text stringlengths 9 7.94M |
|---|
\begin{document}
\setlength{\columnsep}{5pt} \title{\bf Core partial order in rings with involution} \author{Xiaoxiang Zhang\footnote{ E-mail: z990303@seu.edu.cn}, \ Sanzhang Xu\footnote{ E-mail: xusanzhang5222@126.com}, \ Jianlong Chen\footnote{ Corresponding author. E-mail: jlchen@seu.edu.cn }.\\ Department of Mathematics, Southeast University \\ Nanjing 210096, China }
\date{}
\maketitle \begin{quote} {\textbf{Abstract:} \small
Let $R$ be a unital ring with involution.
Several characterizations and properties of core partial order are given.
In particular, we investigate the reverse order law $(ab)^{\tiny\textcircled{\tiny\#}}=b^{\tiny\textcircled{\tiny\#}}a^{\tiny\textcircled{\tiny\#}}$
for two core invertible elements $a,b\in R$.
Some relationships between core partial order and other partial orders are obtained.
\textbf {Keywords:} {\small Core inverse, core partial order, reverse order law, EP element.}
} \end{quote}
\section{ Introduction }\label{a} The core inverse of a complex matrix was introduced by Baksalary and Trenkler \cite{BT}. Let $M_{n}(\mathbb{C})$ be the ring of all $n\times n$ complex matrices. A matrix $X\in M_{n}(\mathbb{C})$ is called a core inverse of $A\in M_{n}(\mathbb{C})$, if it satisfies $AX=P_{A}$ and $\mathcal{R}(X)\subseteq \mathcal{R}(A)$, where $\mathcal{R}(A)$ denotes the column space of $A$, and $P_{A}$ is the orthogonal projector onto $\mathcal{R}(A)$. And if such a matrix $X$ exists, then it is unique and denoted by $A^{\tiny\textcircled{\tiny\#}}$. The core partial order for a complex matrix were also introduced in \cite{BT}. Let $\mathbb{C}^{CM}_{n}=\{A\in M_{n}(\mathbb{C})\mid \mathrm{rank}(A)=\mathrm{rank}(A^{2})\}$ , $A\in \mathbb{C}^{CM}_{n}$ and $B\in M_{n}(\mathbb{C})$. The binary operation $\overset{\tiny\textcircled{\tiny\#}}\leq$ is defined as follows: $$A\overset{\tiny\textcircled{\tiny\#}}\leq B~\Leftrightarrow~ A^{\tiny\textcircled{\tiny\#}}A=A^{\tiny\textcircled{\tiny\#}}B~~\mathrm{and}~~AA^{\tiny\textcircled{\tiny\#}}=BA^{\tiny\textcircled{\tiny\#}}.$$ In \cite[Theorem 6]{BT}, it is proved that core partial order is a matrix partial order. Baksalary and Trenkler gave several characterizations and various relationships between the matrix core partial order and other matrix partial orders by using the decomposition of Hartwig and Spindelb\"{o}ck \cite{HS}. In \cite{RD}, Raki\'{c} and Djordjevi\'{c} generalized the matrix core partial order to the ring case. They gave various equivalent conditions of core partial order and investigated relationships between the core partial order and other partial orders in general rings. Motivated by \cite{BT,M2,MRT,R,RD}, in this paper, we give some new equivalent conditions and properties for core partial order in general rings. Moreover, some new relationships between core partial order and other partial orders are obtained. As an application, we prove the reverse law for two core invertible elements under the core partial order.
Let $R$ be a $*$-ring, that is a ring with an involution $a\mapsto a^*$ satisfying $(a^*)^*=a$, $(ab)^*=b^*a^*$ and $(a+b)^*=a^*+b^*$ for all $a,b\in R$. We say that $x\in R$ is the Moore-Penrose inverse of $a\in R$, if the following hold: $$axa=a, \quad xax=x, \quad (ax)^{\ast}=ax \quad (xa)^{\ast}=xa.$$ There is at most one $x$ such that above four equations hold. If such an element $x$ exists, it is denoted by $a^{\dagger}$. The set of all Moore-Penrose invertible elements will be denoted by $R^{\dagger}$. An element $x\in R$ is an inner inverse of $a\in R$ if $axa=a$ holds. The set of all inner inverses of $a$ will be denoted by $a\{1\}$. An element $a\in R$ is said to be group invertible if there exists $x\in R$ such that the following equations hold: $$axa=a, \quad xax=x, \quad ax=xa.$$ The element $x$ which satisfies the above equations is called a group inverse of $a$. If such an element $x$ exists, it is unique and denoted by $a^\#$. The set of all group invertible elements will be denoted by $R^\#$. An element $a\in R$ is said to be an EP element if $a\in R^{\dagger}\cap R^\#$ and $a^{\dagger}=a^\#.$ The set of all EP elements will be denoted by $R^{EP}$. In \cite{RDD} Raki\'{c}, Din\v{c}i\'{c} and Djordjevi\'{c} generalized the core inverse of a complex matrix to the case of an element in a ring. Let $a,x\in R$, if $$axa=a,~xR=aR,~Rx=Ra^{\ast},$$ then $x$ is called a core inverse of $a$ and if such an element $x$ exists, then it is unique and denoted by $a^{\tiny{\textcircled{\tiny\#}}}$. The set of all core invertible elements in $R$ will be denoted by $R^{\tiny{\textcircled{\tiny\#}}}$. An element $p\in R$ is called self-adjoint idempotent if $p^{2}=p=p^{\ast}$. An element $q\in R$ is called idempotent if $q^{2}=q$.
For $a,b\in R$, we have the following definitions: \begin{itemize} \item[{\rm $\bullet$}] the star partial order $a\overset{\ast}\leq b$: $a^{\ast}a=a^{\ast}b$ and $aa^{\ast}=ba^{\ast}$\cite{D}; \item[{\rm $\bullet$}] the minus partial order $a\overset{-}\leq b$ if and only if there exists an $a^{-}\in a\{1\}$ such that $a^{-}a=a^{-}b$ and $aa^{-}=ba^{-}$\cite{H2}; \item[{\rm $\bullet$}] the sharp partial order $a\overset{\#}\leq b$: $a^{\#}a=a^\#b$ and $aa^{\#}=ba^{\#}$\cite{M}. \end{itemize} This paper is organized as follows. In section 2, some new equivalent characterizations of the core partial order in rings are obtained. Specially, the reverse order of two core invertible elements in rings was given. In section 3, some relationships of the core partial order and other partial orders are obtained.
\section{ Equivalent conditions and properties of core partial order }\label{a} In this section, some new characterizations of the core partial order in rings are obtained. Let us start this section with two auxiliary lemmas. These two lemmas can be found in \cite[Lemma 2.2]{M} and \cite[Lemma 2.3 and Theorem 2.6]{RD}. \begin{lem} \label{lemma-partial1} Let $a\in R^\#$ and $b\in R$. Then: \begin{itemize} \item[{\rm (1)}] $a^\#a=a^\#b$ if and only if $a^{2}=ab$; \item[{\rm (2)}] $aa^\#=ba^\#$ if and only if $a^{2}=ba$; \item[{\rm (3)}] $a\overset{\#}\leq b$ if and only if $a^{2}=ab=ba$; \item[{\rm (4)}] $a\overset{\#}\leq b$ if and only if there exists idempotent $p\in R$ such that $a=pb=bp$. \end{itemize} \end{lem}
\begin{lem} \label{lemma-partial2} Let $a\in R^{\tiny{\textcircled{\tiny\#}}}$ and $b\in R$. Then: \begin{itemize} \item[{\rm (1)}] $a^{\tiny{\textcircled{\tiny\#}}} a=a^{\tiny{\textcircled{\tiny\#}}}b$ if and only if $a^{*}a=a^{*}b$; \item[{\rm (2)}] $aa^{\tiny{\textcircled{\tiny\#}}}=ba^{\tiny{\textcircled{\tiny\#}}}$ if and only if $a^{2}=ba$ if and only if $aa^{\#}=ba^{\#}$. \end{itemize} \end{lem}
We will use the following notations $aR=\{ax\mid x\in R\}$, $Ra=\{xa\mid x\in R\}$, $^{\circ}a=\{x\in R\mid xa=0\}$ and $a^{\circ}=\{x\in R\mid ax=0\}$.
In \cite[Lemma 8]{LPT}, Lebtahi et al. proved that $a\overset{-}\leq b$ if and only if there exists $c\in a\{1,2\}$ such that $b-a\in$ $^{\circ}c\cap c^{\circ}$. For the core partial order, we have the following result.
\begin{thm} \label{core-pr1} Let $a\in R^{\tiny{\textcircled{\tiny\#}}}$ and $b\in R$. Then the following conditions are equivalent: \begin{itemize} \item[{\rm (1)}] $a\overset{\tiny{\textcircled{\tiny\#}}}\leq b$; \item[{\rm (2)}] $ba^{\tiny{\textcircled{\tiny\#}}}b=a$ and $a^{\tiny{\textcircled{\tiny\#}}}ba^{\tiny{\textcircled{\tiny\#}}}=a^{\tiny{\textcircled{\tiny\#}}}$; \item[{\rm (3)}] $aa^{\tiny{\textcircled{\tiny\#}}}b=a=ba^{\tiny{\textcircled{\tiny\#}}}a$; \item[{\rm (4)}] $b-a\in ^{\circ}\!\!a\cap (a^{\ast})^{\circ}$; \item[{\rm (5)}] $b-a\in(1-aa^{\tiny{\textcircled{\tiny\#}}})R\cap R(1-aa^{\tiny{\textcircled{\tiny\#}}})$; \item[{\rm (6)}] $b-a\in ^{\circ}\!\!(aa^{\tiny{\textcircled{\tiny\#}}}) \cap (aa^{\tiny{\textcircled{\tiny\#}}})^{\circ}$. \end{itemize} \end{thm} \begin{proof} $(1)\Leftrightarrow(2)$ Suppose that $a\overset{\tiny{\textcircled{\tiny\#}}}\leq b$. Then $ba^{\tiny{\textcircled{\tiny\#}}}b
=aa^{\tiny{\textcircled{\tiny\#}}}b
=aa^{\tiny{\textcircled{\tiny\#}}}a=a$ and
$a^{\tiny{\textcircled{\tiny\#}}}ba^{\tiny{\textcircled{\tiny\#}}}
=a^{\tiny{\textcircled{\tiny\#}}}aa^{\tiny{\textcircled{\tiny\#}}}
=a^{\tiny{\textcircled{\tiny\#}}}.$ Conversely, if $ba^{\tiny{\textcircled{\tiny\#}}}b=a$ and $a^{\tiny{\textcircled{\tiny\#}}}ba^{\tiny{\textcircled{\tiny\#}}}=a^{\tiny{\textcircled{\tiny\#}}}$, then $aa^{\tiny{\textcircled{\tiny\#}}}
=ba^{\tiny{\textcircled{\tiny\#}}}ba^{\tiny{\textcircled{\tiny\#}}}
=ba^{\tiny{\textcircled{\tiny\#}}}$
and
$a^{\tiny{\textcircled{\tiny\#}}}a
=a^{\tiny{\textcircled{\tiny\#}}}ba^{\tiny{\textcircled{\tiny\#}}}b
=a^{\tiny{\textcircled{\tiny\#}}}b.$
$(1)\Leftrightarrow(3)$ Suppose that $a\overset{\tiny{\textcircled{\tiny\#}}}\leq b$. Then $a^{\tiny{\textcircled{\tiny\#}}}a=a^{\tiny{\textcircled{\tiny\#}}}b$ and $aa^{\tiny{\textcircled{\tiny\#}}}=ba^{\tiny{\textcircled{\tiny\#}}}$. Thus $aa^{\tiny{\textcircled{\tiny\#}}}b=aa^{\tiny{\textcircled{\tiny\#}}}a=a$ and $ba^{\tiny{\textcircled{\tiny\#}}}a=aa^{\tiny{\textcircled{\tiny\#}}}a=a$. Conversely, if $aa^{\tiny{\textcircled{\tiny\#}}}b=a=ba^{\tiny{\textcircled{\tiny\#}}}a$, then pre-multiplication by $a^{\tiny{\textcircled{\tiny\#}}}$ on $aa^{\tiny{\textcircled{\tiny\#}}}b=a$ yields $a^{\tiny{\textcircled{\tiny\#}}}b=a^{\tiny{\textcircled{\tiny\#}}}a$, similarly we have $ba^{\tiny{\textcircled{\tiny\#}}}=aa^{\tiny{\textcircled{\tiny\#}}}$, thus $a\overset{\tiny{\textcircled{\tiny\#}}}\leq b$.
$(1)\Leftrightarrow(4)$ Since $b-a\in ^{\circ}\!\!a\cap (a^{\ast})^{\circ}$ is equivalent to both $a^{*}a=a^{*}b$ and $a^{2}=ba$ hold, thus $(1)\Leftrightarrow(4)$ by Lemma \ref{lemma-partial2}.
$(4)\Leftrightarrow(5)$ By $a\in R^{\tiny{\textcircled{\tiny\#}}}$, we have $^{\circ}a=R(1-aa^{\tiny{\textcircled{\tiny\#}}})$ and
$(a^{\ast})^{\circ}=(1-(a^{\tiny{\textcircled{\tiny\#}}})^{\ast}a^{\ast})R
=(1-(aa^{\tiny{\textcircled{\tiny\#}}})^{\ast})R
=(1-aa^{\tiny{\textcircled{\tiny\#}}})R.$
$(5)\Leftrightarrow(6)$ By $(aa^{\tiny{\textcircled{\tiny\#}}})^{2}=aa^{\tiny{\textcircled{\tiny\#}}}$, we have $(1-aa^{\tiny{\textcircled{\tiny\#}}})R=(aa^{\tiny{\textcircled{\tiny\#}}})^{\circ}$ and $R(1-aa^{\tiny{\textcircled{\tiny\#}}})=^{\circ}\!(aa^{\tiny{\textcircled{\tiny\#}}}).$ \end{proof}
If $p,~q\in R$ are idempotents, then arbitrary $a\in R$ can be written as $$a=paq+pa(1-q)+(1-p)aq+(1-p)a(1-q).$$ The corresponding matrix form is $$a= \left[
\begin{matrix}
a_{11} & a_{12} \\
a_{21} & a_{22}
\end{matrix}
\right]_{p\times q},$$ where $a_{11}=paq$, $a_{12}=pa(1-q)$, $a_{21}=(1-p)aq$ and $a_{22}=(1-p)a(1-q)$. If $a=(a_{ij})_{p\times q}$ and $b=(b_{ij})_{p\times q}$, then $a+b=(a_{ij}+b_{ij})_{p\times q}$.
In \cite[Theorem 2.6]{RD}, Raki\'{c} and Djordjevi\'{c} proved that $a\overset{\tiny{\textcircled{\tiny\#}}}\leq b$ if and only if there exist self-adjoint idempotent $p\in R$ and idempotent $q\in R$ such that $a=pb=bq$ and $qa=a$. We now provide some new characterizations for the core partial order in terms of self-adjoint idempotents.
\begin{thm} \label{core-pr2} Let $a\in R^{\tiny{\textcircled{\tiny\#}}}$ and $b\in R$. Then the following conditions are equivalent: \begin{itemize} \item[{\rm (1)}] $a\overset{\tiny{\textcircled{\tiny\#}}}\leq b$; \item[{\rm (2)}] There exists a self-adjoint idempotent $p\in R$ such that $a=pb$, $ap=bp$ and $aR=pR$; \item[{\rm (3)}] There exists self-adjoint idempotent $p\in R$ such that $a=pb$, $ap=bp$; \item[{\rm (4)}] $a= \left(
\begin{smallmatrix}
a_{1} & a_{2} \\
& \\
0 & 0
\end{smallmatrix}
\right)_{p\times p},~~
b= \left(
\begin{smallmatrix}
a_{1} & a_{2} \\
& \\
0 & b_{4}
\end{smallmatrix}
\right)_{p\times p}$. \end{itemize} \end{thm} \begin{proof} $(1)\Rightarrow(2)$ Let $p=aa^{\tiny{\textcircled{\tiny\#}}}$, then $p^{2}=p=p^{\ast}$ and
$pb=aa^{\tiny{\textcircled{\tiny\#}}}b=aa^{\tiny{\textcircled{\tiny\#}}}a=a$,
$ap
=a^{2}a^{\tiny{\textcircled{\tiny\#}}}=aa^{\tiny{\textcircled{\tiny\#}}}a^{2}a^{\tiny{\textcircled{\tiny\#}}}
=ba^{\tiny{\textcircled{\tiny\#}}}a^{2}a^{\tiny{\textcircled{\tiny\#}}}
=baa^{\tiny{\textcircled{\tiny\#}}}
=bp$, $aR=pR$ by $a=aa^{\tiny{\textcircled{\tiny\#}}}a=pa$.
$(2)\Rightarrow(3)$ It is trivial.
$(3)\Rightarrow(1)$ Suppose that $a=pb$ and $ap=bp$. Then $a^{2}=apb=bpb=ba$ and $a^{*}a=(pb)^{*}pb=b^{*}p^{*}pb=b^{*}p^{\ast}b=(pb)^{*}b=a^{\ast}b$, thus $a\overset{\tiny{\textcircled{\tiny\#}}}\leq b$ by Lemma \ref{lemma-partial2}.
$(3)\Rightarrow(4)$ Suppose that $a=pb$ and $ap=bp$. Then $pa=a$ and $$\begin{array}{rcl}
pap=ap=a_{1}, &~~~~ & pa(1-p)=a-ap=a_{2},\\ (1-p)ap=0, &~~~~ & (1-p)a(1-p)=0.\\
pbp=ap=a_{1}, &~~~~ & pb(1-p)=a-ap=a_{2},\\ (1-p)bp=ap-ap=0, &~~~~ & (1-p)b(1-p)=b-a=b_{4}. \end{array}$$ Thus $a= \left(
\begin{smallmatrix}
a_{1} & a_{2}\\
& \\
0 & 0
\end{smallmatrix}
\right)_{p\times p},~~
b= \left(
\begin{smallmatrix}
a_{1} & a_{2} \\
& \\
0 & b_{4}
\end{smallmatrix}
\right)_{p\times p}$.\\ $(4)\Rightarrow(3)$ If there exists $p^{2}=p=p^{\ast}$ such that $pa=a$, $a_{1}=ap$, $a_{2}=a-ap$, $b_{4}=b-a$, then
$$pb=\left(
\begin{smallmatrix}
p & 0\\
& \\
0 & 0
\end{smallmatrix}
\right)_{p\times p}
\left(
\begin{smallmatrix}
a_{1} & a_{2} \\
& \\
0 & b_{4}
\end{smallmatrix}
\right)_{p\times p}
=\left(
\begin{smallmatrix}
pa_{1} & pa_{2} \\
& \\
0 & 0
\end{smallmatrix}
\right)_{p\times p}
=\left(
\begin{smallmatrix}
a_{1} & a_{2} \\
& \\
0 & 0
\end{smallmatrix}
\right)_{p\times p}
=a,$$
$$ap=\left(
\begin{smallmatrix}
a_{1} & a_{2} \\
& \\
0 & 0
\end{smallmatrix}
\right)_{p\times p}
\left(
\begin{smallmatrix}
p & 0 \\
& \\
0 & 0
\end{smallmatrix}
\right)_{p\times p}
=\left(
\begin{smallmatrix}
a_{1}p & 0 \\
& \\
0 & 0
\end{smallmatrix}
\right)_{p\times p}
=\left(
\begin{smallmatrix}
a_{1} & 0 \\
& \\
0 & 0
\end{smallmatrix}
\right)_{p\times p},$$ $$bp=\left(
\begin{smallmatrix}
a_{1} & a_{2} \\
& \\
0 & b_{4}
\end{smallmatrix}
\right)_{p\times p}
\left(
\begin{smallmatrix}
p & 0 \\
& \\
0 & 0
\end{smallmatrix}
\right)_{p\times p}
=\left(
\begin{smallmatrix}
a_{1}p & 0\\
& \\
0 & 0
\end{smallmatrix}
\right)_{p\times p}
=\left(
\begin{smallmatrix}
a_{1} & 0 \\
& \\
0 & 0
\end{smallmatrix}
\right)_{p\times p}.$$ Hence, $pb=a,~ ap=bp$. \end{proof}
The following characterizations of the minus partial order will be used in the proof of Theorem 2.6, which plays an important role in the sequel. \begin{lem} \emph{\cite[Lemma 3.4]{Rb}} \label{minus-regular} Let $a,~b\in R^{-}$. The following conditions are equivalent : \begin{itemize} \item[{\rm (1)}] $a\overset{-}\leq b$; \item[{\rm (2)}] There exists $b^{-}\in b\{1\}$ such that $a=bb^{-}a=ab^{-}b=ab^{-}a$; \item[{\rm (3)}] For arbitrary $b^{-}\in b\{1\}$, we have $a=bb^{-}a=ab^{-}b=ab^{-}a$. \end{itemize} \end{lem}
\begin{thm} \label{core-minus-regular} Let $a,~b \in R^{\tiny{\textcircled{\tiny\#}}}$ with $a\overset{\tiny{\textcircled{\tiny\#}}}\leq b$. Then: \begin{itemize} \item[{\rm (1)}] $ba^{\tiny{\textcircled{\tiny\#}}}=ab^{\tiny{\textcircled{\tiny\#}}}$,
$a^{\tiny{\textcircled{\tiny\#}}}b=b^{\tiny{\textcircled{\tiny\#}}}a$; \item[{\rm (2)}] $b^{\tiny{\textcircled{\tiny\#}}}ba^{\tiny{\textcircled{\tiny\#}}}
=a^{\tiny{\textcircled{\tiny\#}}}bb^{\tiny{\textcircled{\tiny\#}}}
=a^{\tiny{\textcircled{\tiny\#}}}ba^{\tiny{\textcircled{\tiny\#}}}
=a^{\tiny{\textcircled{\tiny\#}}}$; \item[{\rm (3)}] $b^{\tiny{\textcircled{\tiny\#}}}aa^{\tiny{\textcircled{\tiny\#}}}
=a^{\tiny{\textcircled{\tiny\#}}}ab^{\tiny{\textcircled{\tiny\#}}}
=b^{\tiny{\textcircled{\tiny\#}}}ab^{\tiny{\textcircled{\tiny\#}}}
=a^{\tiny{\textcircled{\tiny\#}}}$. \end{itemize} \end{thm} \begin{proof} Suppose $a\overset{\tiny{\textcircled{\tiny\#}}}\leq b$, thus $a\overset{-}\leq b$ by $a^{\tiny{\textcircled{\tiny\#}}}\in a\{1\}$, then $a=bb^{\tiny{\textcircled{\tiny\#}}}a=bb^{\#}a$ by Lemma \ref{minus-regular}.\\ $(1)$ $ba^{\tiny{\textcircled{\tiny\#}}}
=aa^{\tiny{\textcircled{\tiny\#}}}=bb^{\tiny{\textcircled{\tiny\#}}}aa^{\tiny{\textcircled{\tiny\#}}}
=(bb^{\tiny{\textcircled{\tiny\#}}}aa^{\tiny{\textcircled{\tiny\#}}})^{*}
=aa^{\tiny{\textcircled{\tiny\#}}}bb^{\tiny{\textcircled{\tiny\#}}}
=aa^{\tiny{\textcircled{\tiny\#}}}ab^{\tiny{\textcircled{\tiny\#}}}
=ab^{\tiny{\textcircled{\tiny\#}}}$.
$a^{\tiny{\textcircled{\tiny\#}}}b
=b^{\tiny{\textcircled{\tiny\#}}}ab^{\tiny{\textcircled{\tiny\#}}}b
=b^{\tiny{\textcircled{\tiny\#}}}ba^{\tiny{\textcircled{\tiny\#}}}b
=b^{\tiny{\textcircled{\tiny\#}}}aa^{\tiny{\textcircled{\tiny\#}}}b
=b^{\tiny{\textcircled{\tiny\#}}}aa^{\tiny{\textcircled{\tiny\#}}}a
=b^{\tiny{\textcircled{\tiny\#}}}a.$\\ $(2)$ It is obviously $b^{\tiny{\textcircled{\tiny\#}}}ba^{\tiny{\textcircled{\tiny\#}}}
=b^{\tiny{\textcircled{\tiny\#}}}ab^{\tiny{\textcircled{\tiny\#}}}
=a^{\tiny{\textcircled{\tiny\#}}}$
,
$a^{\tiny{\textcircled{\tiny\#}}}bb^{\tiny{\textcircled{\tiny\#}}}
=b^{\tiny{\textcircled{\tiny\#}}}ab^{\tiny{\textcircled{\tiny\#}}}
=a^{\tiny{\textcircled{\tiny\#}}}$
and
$a^{\tiny{\textcircled{\tiny\#}}}ba^{\tiny{\textcircled{\tiny\#}}}
=a^{\tiny{\textcircled{\tiny\#}}}aa^{\tiny{\textcircled{\tiny\#}}}
=a^{\tiny{\textcircled{\tiny\#}}}.$\\ $(3)$ Similarly to $(2)$, we have
$b^{\tiny{\textcircled{\tiny\#}}}aa^{\tiny{\textcircled{\tiny\#}}}
=b^{\tiny{\textcircled{\tiny\#}}}ba^{\tiny{\textcircled{\tiny\#}}}=a^{\tiny{\textcircled{\tiny\#}}}$ , $a^{\tiny{\textcircled{\tiny\#}}}ab^{\tiny{\textcircled{\tiny\#}}} =a^{\tiny{\textcircled{\tiny\#}}}bb^{\tiny{\textcircled{\tiny\#}}}=a^{\tiny{\textcircled{\tiny\#}}}$ and $b^{\tiny{\textcircled{\tiny\#}}}ab^{\tiny{\textcircled{\tiny\#}}} =b^{\tiny{\textcircled{\tiny\#}}}ba^{\tiny{\textcircled{\tiny\#}}}=a^{\tiny{\textcircled{\tiny\#}}}.$ \end{proof}
A complex matrix $A\in M_{n}(\mathbb{C})$ is called range-Hermite (EP matrix), if $\mathcal{R}(A)=\mathcal{R}(A^{\ast})$. \begin{rem}\label{slip1} \emph{In \cite[Theorem $2.4$]{M2}}, it is claimed that the following are equivalent for two complex matrices $A,B$ of index $1$ with the same order: \begin{itemize} \item[{\rm (1)}] $A^{\tiny{\textcircled{\tiny\#}}}BA^{\tiny{\textcircled{\tiny\#}}}=A^{\tiny{\textcircled{\tiny\#}}}$; \item[{\rm (2)}] $A^{\dagger}BA^\#=A^{\tiny{\textcircled{\tiny\#}}}.$ \end{itemize} \emph{While the implication $(2)\Rightarrow(1)$ is always valid, the converse is not true in genral. In fact, let $A=B=\left[\begin{matrix}
1 & 1 \\
0 & 0
\end{matrix}
\right]\in M_{2}(\mathbb{C})$\normalsize, we have $A^\#=A$, $A^{\dagger}=\left[\begin{matrix}
1/2 & 0 \\
1/2 & 0
\end{matrix}
\right]$\normalsize and $A^{\tiny{\textcircled{\tiny\#}}}=\left[\begin{matrix}
1 & 0 \\
0 & 0
\end{matrix}
\right]$\normalsize, then the condition $A^{\tiny{\textcircled{\tiny\#}}}BA^{\tiny{\textcircled{\tiny\#}}} =A^{\tiny{\textcircled{\tiny\#}}}AA^{\tiny{\textcircled{\tiny\#}}}=A^{\tiny{\textcircled{\tiny\#}}}$ holds. However, $A^{\dagger}BA^\#\neq A^{\tiny{\textcircled{\tiny\#}}}.$ Note that $(1)\Rightarrow(2)$ holds in case $A$ is an EP matrix.} \end{rem}
\begin{prop} \label{a-b-core} Let $a,b \in R^{\tiny{\textcircled{\tiny\#}}}$. Then $a\overset{\tiny{\textcircled{\tiny\#}}}\leq b$ if and only if $a^{\tiny{\textcircled{\tiny\#}}}b=b^{\tiny{\textcircled{\tiny\#}}}a$, $ba^{\tiny{\textcircled{\tiny\#}}}=ab^{\tiny{\textcircled{\tiny\#}}}$, $ab^{\tiny{\textcircled{\tiny\#}}}a=a.$ \end{prop} \begin{proof} Suppose that $a\overset{\tiny{\textcircled{\tiny\#}}}\leq b$. Then
$a^{\tiny{\textcircled{\tiny\#}}}b=b^{\tiny{\textcircled{\tiny\#}}}a$ and
$ba^{\tiny{\textcircled{\tiny\#}}}=ab^{\tiny{\textcircled{\tiny\#}}}$ by Theorem \ref{core-minus-regular}, thus $ab^{\tiny{\textcircled{\tiny\#}}}a=ba^{\tiny{\textcircled{\tiny\#}}}a=aa^{\tiny{\textcircled{\tiny\#}}}a=a.$ Conversely, if $a^{\tiny{\textcircled{\tiny\#}}}b=b^{\tiny{\textcircled{\tiny\#}}}a$, $ba^{\tiny{\textcircled{\tiny\#}}}=ab^{\tiny{\textcircled{\tiny\#}}}$, $ab^{\tiny{\textcircled{\tiny\#}}}a=a$, then
$a^{\tiny{\textcircled{\tiny\#}}}a
=a^{\tiny{\textcircled{\tiny\#}}}ab^{\tiny{\textcircled{\tiny\#}}}a
=a^{\tiny{\textcircled{\tiny\#}}}aa^{\tiny{\textcircled{\tiny\#}}}b
=a^{\tiny{\textcircled{\tiny\#}}}b$ and
$aa^{\tiny{\textcircled{\tiny\#}}}
=ab^{\tiny{\textcircled{\tiny\#}}}aa^{\tiny{\textcircled{\tiny\#}}}
=ba^{\tiny{\textcircled{\tiny\#}}}aa^{\tiny{\textcircled{\tiny\#}}}
=ba^{\tiny{\textcircled{\tiny\#}}}.$ \end{proof}
In \cite[Theorem 2.5]{MRT} Malik et al. investigated the reverse order law for two core invertible complex matrices under the matrix core partial order. By \cite[Theorem 3.1]{XCZ}, we can get that the equations $axa=a$ and $xax=x$ in \cite[Theorem 2,14]{RDD} can be dropped.
\begin{lem} \emph{\cite[Theorem 3.1]{XCZ}} \label{five-equations-yy} Let $a,x \in R$, then $a\in R^{\tiny\textcircled{\tiny\#}}$ with core inverse $x$ if and only if $(ax)^{\ast}=ax$, $xa^{2}=a$ and $ax^{2}=x$. \end{lem}
\begin{thm} \label{Reverse-order1} Let $a,~b \in R^{\tiny{\textcircled{\tiny\#}}}$ with $a\overset{\tiny{\textcircled{\tiny\#}}}\leq b$. Then: \begin{itemize} \item[{\rm (1)}] $(ab)^{\tiny{\textcircled{\tiny\#}}}
=b^{\tiny{\textcircled{\tiny\#}}}a^{\tiny{\textcircled{\tiny\#}}}
=(a^{\tiny{\textcircled{\tiny\#}}})^{2}=(a^{2})^{\tiny{\textcircled{\tiny\#}}}=(ba)^{\tiny{\textcircled{\tiny\#}}}$; \item[{\rm (2)}] $ab\in R^{EP}$ whenever $a\in R^{EP}$. \end{itemize} \end{thm} \begin{proof} $(1)$ Suppose that $a\overset{\tiny{\textcircled{\tiny\#}}}\leq b$. Then $a^{\tiny{\textcircled{\tiny\#}}}b=b^{\tiny{\textcircled{\tiny\#}}}a$ by Proposition \ref{a-b-core}. Thus, $b^{\tiny{\textcircled{\tiny\#}}}a^{\tiny{\textcircled{\tiny\#}}}
=b^{\tiny{\textcircled{\tiny\#}}}aa^{\tiny{\textcircled{\tiny\#}}}a^{\tiny{\textcircled{\tiny\#}}}
=a^{\tiny{\textcircled{\tiny\#}}}ba^{\tiny{\textcircled{\tiny\#}}}a^{\tiny{\textcircled{\tiny\#}}}
=a^{\tiny{\textcircled{\tiny\#}}}aa^{\tiny{\textcircled{\tiny\#}}}a^{\tiny{\textcircled{\tiny\#}}}
=a^{\tiny{\textcircled{\tiny\#}}}a^{\tiny{\textcircled{\tiny\#}}}
=(a^{\tiny{\textcircled{\tiny\#}}})^{2}
=(a^{2})^{\tiny{\textcircled{\tiny\#}}}
=(ba)^{\tiny{\textcircled{\tiny\#}}}.$ Let $x=b^{\tiny{\textcircled{\tiny\#}}}a^{\tiny{\textcircled{\tiny\#}}}$. Then \begin{eqnarray*} &&abx=abb^{\tiny{\textcircled{\tiny\#}}}a^{\tiny{\textcircled{\tiny\#}}}
=aba^{\tiny{\textcircled{\tiny\#}}}a^{\tiny{\textcircled{\tiny\#}}}
=aaa^{\tiny{\textcircled{\tiny\#}}}a^{\tiny{\textcircled{\tiny\#}}}
=aa^{\tiny{\textcircled{\tiny\#}}}
=(aa^{\tiny{\textcircled{\tiny\#}}})^{*}
=(abb^{\tiny{\textcircled{\tiny\#}}}a^{\tiny{\textcircled{\tiny\#}}})^{*};\\
&&x(ab)^{2}=b^{\tiny{\textcircled{\tiny\#}}}a^{\tiny{\textcircled{\tiny\#}}}(ab)^{2}
=b^{\tiny{\textcircled{\tiny\#}}}a^{\tiny{\textcircled{\tiny\#}}}a(ba)b
= a^{\tiny{\textcircled{\tiny\#}}}a^{\tiny{\textcircled{\tiny\#}}}aa^{2}b
= a^{\tiny{\textcircled{\tiny\#}}}a^{2}b
= ab;\\ &&abx^{2}=ab(b^{\tiny{\textcircled{\tiny\#}}}a^{\tiny{\textcircled{\tiny\#}}})^{2}
=a(ba^{\tiny{\textcircled{\tiny\#}}})a^{\tiny{\textcircled{\tiny\#}}}(a^{\tiny{\textcircled{\tiny\#}}})^{2}
=(a^{\tiny{\textcircled{\tiny\#}}})^{2}
= b^{\tiny{\textcircled{\tiny\#}}}a^{\tiny{\textcircled{\tiny\#}}}. \end{eqnarray*} Thus $(ab)^{\tiny{\textcircled{\tiny\#}}}=b^{\tiny{\textcircled{\tiny\#}}}a^{\tiny{\textcircled{\tiny\#}}}$ by Lemma \ref{five-equations-yy}.
$(2)$ Suppose that $a\in R^{EP}$. Then $a^{\tiny{\textcircled{\tiny\#}}}a=aa^{\tiny{\textcircled{\tiny\#}}}.$ Thus \begin{eqnarray*} &&b^{\tiny{\textcircled{\tiny\#}}}a^{\tiny{\textcircled{\tiny\#}}}ab
=b^{\tiny{\textcircled{\tiny\#}}}aa^{\tiny{\textcircled{\tiny\#}}}b
=a^{\tiny{\textcircled{\tiny\#}}}ba^{\tiny{\textcircled{\tiny\#}}}b
=a^{\tiny{\textcircled{\tiny\#}}}b
=a^{\tiny{\textcircled{\tiny\#}}}a;\\ &&abb^{\tiny{\textcircled{\tiny\#}}}a^{\tiny{\textcircled{\tiny\#}}}
=abb^{\tiny{\textcircled{\tiny\#}}}a(a^{\tiny{\textcircled{\tiny\#}}})^{2}
=aba^{\tiny{\textcircled{\tiny\#}}}b(a^{\tiny{\textcircled{\tiny\#}}})^{2}
=aaa^{\tiny{\textcircled{\tiny\#}}}a(a^{\tiny{\textcircled{\tiny\#}}})^{2}
=aa^{\tiny{\textcircled{\tiny\#}}}. \end{eqnarray*} By $(1)$, we have $b^{\tiny{\textcircled{\tiny\#}}}a^{\tiny{\textcircled{\tiny\#}}}ab =(ab)^{\tiny{\textcircled{\tiny\#}}}ab =ab(ab)^{\tiny{\textcircled{\tiny\#}}} =abb^{\tiny{\textcircled{\tiny\#}}}a^{\tiny{\textcircled{\tiny\#}}}$, hence $ab\in R^{EP}.$ \end{proof}
\section{ Relationships between the core partial order and other partial orders }\label{a}
In this section, we consider the relationships between core partial order and other partial orders. Recall that the left star partial order $a$ $\ast \!\!\leq b$ in $R$ is defined by: $a^{\ast}a=a^{\ast}b$ and $aR\subseteq bR$. The right sharp partial order $a\leq_{\#} b$ in $R^\#$ is defined by: $aa^\#=ba^\#$ and $Ra\subseteq Rb$. Let us start with a auxiliary lemma.
\begin{lem} \emph{\cite{BT}} \label{core-star-sharp} Let $a\in R^{\tiny{\textcircled{\tiny\#}}}$ and $b\in R$. Then $a\overset{\tiny{\textcircled{\tiny\#}}}\leq b$ if and only if $a$ $\ast\!\!\leq b$ and $a\leq_{\#} b$. \end{lem}
In \cite[Theorem 4.10]{RD}, Raki\'{c} and Djordjevi\'{c} gave the relationship between the core partial order and the minus partial order for $a, b\in R^{\tiny{\textcircled{\tiny\#}}}$. For instance, it is proved that $a\overset{\tiny{\textcircled{\tiny\#}}}\leq b$ if and only if $a\overset{-}\leq b$ and $b^{\tiny{\textcircled{\tiny\#}}}ab^{\tiny{\textcircled{\tiny\#}}}=a^{\tiny{\textcircled{\tiny\#}}}$.
By Lemma \ref{core-star-sharp}, the core partial order implies the left star partial order and the right sharp partial order.
Motivated by \cite[Theorem 4.10]{RD}, we have the following theorem.
\begin{thm} \label{core-and-other} Let $a,~b \in R^{\tiny{\textcircled{\tiny\#}}}$. Then the following are equivalent: \begin{itemize} \item[{\rm (1)}] $a\overset{\tiny{\textcircled{\tiny\#}}}\leq b$; \item[{\rm (2)}] $a$ $\ast\!\!\leq b$ and $ba^{\tiny{\textcircled{\tiny\#}}}b=a$; \item[{\rm (3)}] $a$ $\ast \!\!\leq b$ and $b^{\tiny{\textcircled{\tiny\#}}}aa^{\tiny{\textcircled{\tiny\#}}}=a^{\tiny{\textcircled{\tiny\#}}}$; \item[{\rm (4)}] $a$ $\ast \!\!\leq b$ and $b^{\tiny{\textcircled{\tiny\#}}}ab^{\tiny{\textcircled{\tiny\#}}}=a^{\tiny{\textcircled{\tiny\#}}}$; \item[{\rm (5)}] $a\leq_{\#} b$ and $ba^{\tiny{\textcircled{\tiny\#}}}b=a$; \item[{\rm (6)}] $a\leq_{\#} b$ and $a^{\tiny{\textcircled{\tiny\#}}}ab^{\tiny{\textcircled{\tiny\#}}}=a^{\tiny{\textcircled{\tiny\#}}}$. \end{itemize} \end{thm} \begin{proof}
$(1) \Rightarrow (2)$-$(6)$ It is obviously by Theorem \ref{core-pr1}, Theorem \ref{core-minus-regular} and Lemma \ref{core-star-sharp}.\\ $(2) \Rightarrow (1)$ Suppose that $a$ $\ast \!\!\leq b$ and $ba^{\tiny{\textcircled{\tiny\#}}}b=a$. Then $a^{*}a=a^{*}b$ and $aR\subseteq bR$. We have $a^{*}a=a^{*}b$ if and only if $a^{\tiny{\textcircled{\tiny\#}}}a=a^{\tiny{\textcircled{\tiny\#}}}b$ by Lemma \ref{lemma-partial2}, thus $aa^{\tiny{\textcircled{\tiny\#}}}=ba^{\tiny{\textcircled{\tiny\#}}}ba^{\tiny{\textcircled{\tiny\#}}} =ba^{\tiny{\textcircled{\tiny\#}}}aa^{\tiny{\textcircled{\tiny\#}}}=ba^{\tiny{\textcircled{\tiny\#}}}.$\\ $(3) \Rightarrow (1)$ Suppose that $a$ $\ast\!\!\leq b$. We have $a=bs$ for some $s\in R$, then $a=bs=bb^{\tiny{\textcircled{\tiny\#}}}bs=bb^{\tiny{\textcircled{\tiny\#}}}a$, thus $aa^{\tiny{\textcircled{\tiny\#}}}=bb^{\tiny{\textcircled{\tiny\#}}}aa^{\tiny{\textcircled{\tiny\#}}}=ba^{\tiny{\textcircled{\tiny\#}}}$.\\ $(4) \Rightarrow (1)$ Suppose $a$ $\ast \!\!\leq b$ and $b^{\tiny{\textcircled{\tiny\#}}}ab^{\tiny{\textcircled{\tiny\#}}}=a^{\tiny{\textcircled{\tiny\#}}}.$ Then $a^{\ast}a=a^{\ast}b$, thus by Lemma \ref{lemma-partial2}, we have $a^{\tiny{\textcircled{\tiny\#}}}a=a^{\tiny{\textcircled{\tiny\#}}}b.$ By $a$ $\ast \!\!\leq b$, we have $a=bb^{\tiny{\textcircled{\tiny\#}}}a$, which gives $$ba^{\tiny{\textcircled{\tiny\#}}}=b(b^{\tiny{\textcircled{\tiny\#}}}ab^{\tiny{\textcircled{\tiny\#}}})=ab^{\tiny{\textcircled{\tiny\#}}}.$$ Pre-multiplication of $b^{\tiny{\textcircled{\tiny\#}}}ab^{\tiny{\textcircled{\tiny\#}}}=a^{\tiny{\textcircled{\tiny\#}}}$ by $a$ and post-multiplication of $b^{\tiny{\textcircled{\tiny\#}}}ab^{\tiny{\textcircled{\tiny\#}}}=a^{\tiny{\textcircled{\tiny\#}}}$ by $bb^{\tiny{\textcircled{\tiny\#}}}$ yield $$aa^{\tiny{\textcircled{\tiny\#}}}bb^{\tiny{\textcircled{\tiny\#}}} =ab^{\tiny{\textcircled{\tiny\#}}}ab^{\tiny{\textcircled{\tiny\#}}}bb^{\tiny{\textcircled{\tiny\#}}}=aa^{\tiny{\textcircled{\tiny\#}}}.$$ Since $a^{\tiny{\textcircled{\tiny\#}}}a=a^{\tiny{\textcircled{\tiny\#}}}b$, we have $aa^{\tiny{\textcircled{\tiny\#}}}=aa^{\tiny{\textcircled{\tiny\#}}}bb^{\tiny{\textcircled{\tiny\#}}} =aa^{\tiny{\textcircled{\tiny\#}}}ab^{\tiny{\textcircled{\tiny\#}}}=ab^{\tiny{\textcircled{\tiny\#}}}$. Thus by $ba^{\tiny{\textcircled{\tiny\#}}}=ab^{\tiny{\textcircled{\tiny\#}}}$ and the definition of core partial order, we have $a\overset{\tiny{\textcircled{\tiny\#}}}\leq b$.\\ $(5) \Rightarrow (1)$ Suppose that $a\leq_{\#} b$ and $ba^{\tiny{\textcircled{\tiny\#}}}b=a$. Then $aa^\#=ba^\#$ and $Ra\subseteq Rb$, by Lemma \ref{lemma-partial2}, we have $aa^\#=ba^\#$ if and only if $aa^{\tiny{\textcircled{\tiny\#}}}=ba^{\tiny{\textcircled{\tiny\#}}}$, thus $a^{\tiny{\textcircled{\tiny\#}}}a=a^{\tiny{\textcircled{\tiny\#}}}ba^{\tiny{\textcircled{\tiny\#}}}b= a^{\tiny{\textcircled{\tiny\#}}}aa^{\tiny{\textcircled{\tiny\#}}}b=a^{\tiny{\textcircled{\tiny\#}}}b.$\\ $(6) \Rightarrow (1)$ By $(5) \Rightarrow (1)$, we only need to prove $a^{\tiny{\textcircled{\tiny\#}}}a=a^{\tiny{\textcircled{\tiny\#}}}b$.\\ Since $Ra\subseteq Rb$ is equivalent to $a=ab^{\tiny{\textcircled{\tiny\#}}}b$, we have $a^{\tiny{\textcircled{\tiny\#}}}a=a^{\tiny{\textcircled{\tiny\#}}}ab^{\tiny{\textcircled{\tiny\#}}}b=a^{\tiny{\textcircled{\tiny\#}}}b.$ \end{proof}
The right star partial order $a\leq\!\!\ast$ $b$ is defined as: $aa^{\ast}=ba^{\ast}$ and $Ra\subseteq Rb.$
\begin{rem}\label{slip2} \emph{Let $a\in R^{\tiny{\textcircled{\tiny\#}}}$ and $b\in R^{EP}$. In \cite[Theorem $2.9$]{M2}, it is claimed that $a\overset{\tiny{\textcircled{\tiny\#}}}\leq b$ if and only if $a\leq\!\!\ast$ $b$ and $b^{\tiny{\textcircled{\tiny\#}}}ab^{\tiny{\textcircled{\tiny\#}}}=a^{\tiny{\textcircled{\tiny\#}}}$ in the complex matrix case. But it is not true. In fact, let $A=\left[\begin{matrix}
1 & 1 \\
0 & 0
\end{matrix}
\right], B=\left[\begin{matrix}
1 & 1 \\
0 & 1
\end{matrix}
\right]\in M_{2}\{\mathbb{C}\}$\normalsize, then $A$ is core invertible, $B$ is an EP matrix and the condition $A\overset{\tiny{\textcircled{\tiny\#}}}\leq B$ is satisfied, but $AA^{\ast}\neq BA^{\ast}.$} \end{rem}
The equivalence of $(2)$-$(4)$ in the following proposition for the complex matrices has been proved by Malik et al. in \cite[Lemma 19]{MRT}. \begin{prop} \label{k-core} Let $a\in R^{\tiny{\textcircled{\tiny\#}}}, ~b\in R$ with $a\overset{\tiny{\textcircled{\tiny\#}}}\leq b$. Then the following conditions are equivalent: \begin{itemize} \item[{\rm (1)}] $a\overset{\#}\leq b$; \item[{\rm (2)}] $ab=ba$; \item[{\rm (3)}] $a^{2}\overset{\tiny{\textcircled{\tiny\#}}}\leq b^{2}$; \item[{\rm (4)}] $a^{k}\overset{\tiny{\textcircled{\tiny\#}}}\leq b^{k}$, for any $k\geq 2$. \end{itemize} \end{prop} \begin{proof} By Lemma \ref{lemma-partial2}, we have $a\overset{\tiny{\textcircled{\tiny\#}}}\leq b$ if and only if both $a^{*}a=a^{*}b$ and $ba=a^{2}$ hold.
$(1)\Rightarrow(2)$ is obvious by Lemma \ref{lemma-partial1}.
$(2)\Rightarrow(4)$ If $ab=ba$, then $ab=ba=a^{2}$ by Lemma \ref{lemma-partial2}. If $k\geq 2$, first show $ab^{k-1}=a^{k}$.
When $k=2$, $ab=ba=a^{2}$;
when $k>2$, $ab^{k-1}=a^{2}b^{k-2}=a^{2}bb^{k-3}=a^{3}b^{k-3}= \cdots=a^{k}.$ Next prove $(a^{k})^{\tiny{\textcircled{\tiny\#}}}a^{k}=(a^{k})^{\tiny{\textcircled{\tiny\#}}}b^{k}.$ In fact,$(a^{k})^{\tiny{\textcircled{\tiny\#}}}b^{k}
=(a^{\tiny{\textcircled{\tiny\#}}})^{k}b^{k}
=(a^{\tiny{\textcircled{\tiny\#}}})^{k-1}a^{\tiny{\textcircled{\tiny\#}}}bb^{k-1}
=(a^{\tiny{\textcircled{\tiny\#}}})^{k-1}a^{\tiny{\textcircled{\tiny\#}}}ab^{k-1}
=(a^{\tiny{\textcircled{\tiny\#}}})^{k}ab^{k-1}
=(a^{k})^{\tiny{\textcircled{\tiny\#}}}ab^{k-1}
=(a^{k})^{\tiny{\textcircled{\tiny\#}}}a^{k}.$ Similarly, $b^{k}(a^{k})^{\tiny{\textcircled{\tiny\#}}}=a^{k}(a^{k})^{\tiny{\textcircled{\tiny\#}}}.$
$(4)\Rightarrow(3)$ Taking $k=2$.
$(3)\Rightarrow(1)$
If $a^{2}\overset{\tiny{\textcircled{\tiny\#}}}\leq b^{2}$,
then $(a^{2})^{\tiny{\textcircled{\tiny\#}}}a^{2}=(a^{2})^{\tiny{\textcircled{\tiny\#}}}b^{2}.$ And
$$(a^{2})^{\tiny{\textcircled{\tiny\#}}}a^{2}
=(a^{\tiny{\textcircled{\tiny\#}}})^{2}a^{2}
=a^{\tiny{\textcircled{\tiny\#}}}a^{\tiny{\textcircled{\tiny\#}}}a^{2}
=a^{\tiny{\textcircled{\tiny\#}}}a
=a^{\#}a,$$
$$(a^{2})^{\tiny{\textcircled{\tiny\#}}}b^{2}
=(a^{\tiny{\textcircled{\tiny\#}}})^{2}b^{2}
=a^{\tiny{\textcircled{\tiny\#}}}a^{\tiny{\textcircled{\tiny\#}}}bb
=a^{\tiny{\textcircled{\tiny\#}}}a^{\tiny{\textcircled{\tiny\#}}}ab
=a^{\#}b,$$ thus $a^{\#}a=a^{\#}b$. Hence $a^{2}=aaa^{\#}a=aaa^{\#}b=ab=ba$ by $ba=a^{2}$. \end{proof}
A complex matrix $A$ is called range-Hermite, if $\mathcal{R}(A)=\mathcal{R}(A^{\ast})$. In \cite[Theorem 7]{BT}, Baksalary and Trenker proved that for complex matrices $A$ and $B$, if $A$ is an range-Hermite matrix, then $A\overset{\tiny{\textcircled{\tiny\#}}}\leq B$ if and only if $A\overset{\ast}\leq B$. In \cite[Theorem 3.3]{M2}, Mailk proved that for complex matrices $A$ and $B$, if $A$ is an range-Hermite matrix, then $A\overset{\tiny{\textcircled{\tiny\#}}}\leq B$ if and only if $A\overset{\#}\leq B$. It is easy to check that the following proposition is valid for elements in rings by \cite[Theorem 3.1]{RDD}.
\begin{prop} \label{EP-core-other} Let $a\in R^{EP}$ and $b\in R$. Then the following are equivalent: \begin{itemize} \item[{\rm (1)}] $a\overset{\tiny{\textcircled{\tiny\#}}}\leq b$; \item[{\rm (2)}] $a\overset{\#}\leq b$; \item[{\rm (3)}] $a\overset{\ast}\leq b$. \end{itemize} \end{prop}
\noindent {\large\bf Acknowledgements}
This research is supported by the National Natural Science Foundation of China (No. 11371089), The second author is grateful to China Scholarship Council for giving him a purse for his further study in Universidad Polit\'{e}cnica de Valencia, Spain.
\end{document} |
\begin{document}
\begin{abstract} In this paper we generalize the Perron Identity for Markov minima. We express the values of binary quadratic forms with positive discriminant in terms of continued fractions associated to broken lines passing through the points where the values are computed. \end{abstract}
\title{Generalized Perron Identity for broken lines} \input epsf \tableofcontents
\section*{Introduction}
Consider a binary quadratic form $f$ with positive discriminant $\Delta(f)$. In this paper we give a geometric interpretation and generalization of the {\it Perron Identity} relating the minimal value of $|f|$ at integer points except the origin and their corresponding continued fractions: \begin{equation} \label{per eq}
\min\limits_{\mathbb{Z}^2\setminus \{(0,0)\}}\big|f\big|=\inf\limits_{i\in \mathbb{Z}}\bigg(\frac{\sqrt{\Delta(f)}}{a_i+[0;a_{i+1}:a_{i+2}:\ldots]+[0;a_{i-1}:a_{i-2}:\ldots]}\bigg).
\end{equation} Here $[a_0;a_1:\ldots]$ and $[0;a_{-1}:a_{-2}:\ldots]$ are regular continued fractions of the slopes of linear factors of corresponding reduced linear forms. Recall that a continued fraction is regular if all its elements are non negative. We discuss this in more detail further in Section~\ref{Basic notions and definitions}.
The Perron Identity was shown by A.~Markov in his paper on minima of binary quadratic forms and the Markov spectrum below 3 in~\cite{mar2}. The statement holds for the entire Markov spectrum (see, e.g., the books by O.~Perron~\cite{per1}, and T.~Cusick and M.~Flahive~\cite{cus1}). Recently Markov numbers were used in relation to Federer-Gromov’s stable norm, (\cite{Fock2007,Veselov2017}). There is not much known about higher dimensional analogue of Markov spectrum. It is believed to be discrete (which is equivalent to Oppenheim conjecture on best approximations, see in Chapter 18 of~\cite{oleg1}). Various values of three-dimensional Markov spectrum were constructed by H.~Davenport in~\cite{Davenport1938,Davenport1938a,Davenport1939}.
In this paper we show the geometric interpretation of the Perron Identity in terms of sails of the form (Remark~\ref{RefGeometry}) and generalize this expression in the spirit of integer geometry. This establishes a relationship between non-regular continued fractions and the values of the corresponding binary quadratic form at any point on the plane (Theorem~\ref{MainTheorem} and Corollary~\ref{MainCorollary}). The result of this paper is based on recent results of the first author in geometric theory of continued fractions for arbitrary broken lines, see~\cite{oleg3,oleg2,oleg4,oleg1}.
{\noindent {\bf Organization of the paper.} We start in Section~\ref{Basic notions and definitions} with necessary definitions and background. We discuss reduced forms, LLS sequences, and formulate the classical Perron Identity. In Section~\ref{thm sec} we formulate and prove the Generalized Perron Identity for finite broken lines. Finally in Section~\ref{Generalized Perron identity for asymptotic infinite broken lines} we prove the Generalized Perron Identity for infinite broken lines, and discuss the relation with the classical the Perron Identity. }
{\noindent {\bf Acknowledgement.} The first author is partially supported by EPSRC grant EP/N014499/1 (LCMH). }
\section{Basic notions and definitions} \label{Basic notions and definitions}
In this section we give necessary notions and definitions. We start in Subsection~\ref{Markov minima and Markov spectrum} with classical definitions of Markov minima and Markov spectrum. Further in Subsection~\ref{Reduced forms, and LLS-sequences} we discuss reduced forms of quadratic binary forms with positive discriminant. In Subsection~\ref{Classical Perron Identity} we discuss the classical Perron Identity. Finally in Subsection~\ref{LLS sequences for broken lines} we introduce LLS sequences for broken lines, which is the central notion in the formulation of the main results.
\subsection{Markov minima and Markov spectrum} \label{Markov minima and Markov spectrum}
Let $f$ be a binary quadratic form with positive discriminant. Recall that in this case $f$ is decomposable into two real factors, namely \[ f(x,y)=(ax-b y)(cx-d y), \] for some real numbers $a$, $b$, $c$, and $d$. The discriminant of this form is \[ \Delta(f)=(ad-bc)^2. \]
The {\it Markov minimum} of the form $f$ is the following number: \[
m(f)=\min\limits_{\mathbb{Z}^2\setminus\{(0,0)\}}|f|. \] The set of all possible values of $\Delta(f)/m(f)$ is called {\it Markov Spectrum}. (Note that $\Delta(f)/m(f)$ is invariant under multiplication of the form $f$ by a non-zero scalar.) The spectrum below 3 correspond to special forms with integer coefficients, we refer an interested reader to an excellent book~\cite{cus1} by T.~Cusick and M.~Flahive on Markov spectrum and related subjects.
\subsection{Reduced forms, and LLS-sequences} \label{Reduced forms, and LLS-sequences}
It is clear that $m(f)$ is invariant under the action of the group of $\SL(2,\mathbb{Z})$. Therefore in order to study the Markov spectrum one can restrict to so called {\it reduced forms} which are simple to describe. There are several ways to pick reduced forms, although the algorithmic part is rather similar to all of them, it is a subject of a Gauss reduction theory (see, e.g.,~\cite{Lewis1997}, \cite{Manin2002}, \cite{Katok2003}, and~\cite{Karpenkov2010}).
We consider the following family of {\it reduced forms}. For every $\alpha\ge 1$ and $1> \beta \ge 0$ set \[ f_{\alpha,\beta}=(y-\alpha x)(y+\beta x). \]
Every form is multiple to some reduced form in appropriate basis of integer lattice $\mathbb{Z}^2$. However such representation is not unique. The following notion provides a complete invariant distinguishing different classes of reduced forms.
\begin{defn} Let $\alpha\ge 1$, $1> \beta \ge 0$ and let \[ \alpha=[a_0;a_1:\ldots] \quad \hbox{and} \quad \beta=[0;a_{-1}:a_{-2}:\ldots] \] be the regular continued fractions for $\alpha$ and $\beta$. Then the sequence \[ (\ldots a_{-2},a_{-1},a_{0}, a_1,a_2,\ldots) \] is called the {\it LLS sequence} of the form $f_{\alpha,\beta}$. \end{defn} This sequence can be either finite or infinite from one or both sides. The name for the LLS sequence (Lattice Length-Sine sequence) is due its lattice trigonometric properties, e.g., see in~\cite{oleg2} and~\cite{oleg4}.
\begin{prop}\label{PropEquiv} Two reduced forms are {\it equivalent} (i.e., multiple to each other after $\SL(2,\mathbb{Z})$-change of coordinates) if and only if they have the same LLS sequence up to shifts of sequence by $k$-elements for some integer $k$ and a reversing of the order of a sequence. \qed \end{prop}
\begin{rem} This statement follows directly from geometric properties of continued fractions. As we do not use this statement in the proof of the results of this paper we skip the proof here. We refer an interested reader to~\cite{oleg1}. \end{rem}
Due to Proposition~\ref{PropEquiv} we can extend the notion of LLS-sequence to any binary quadratic form with positive discriminant. \begin{defn} Let $f$ be a binary quadratic form with positive discriminant. The {\it LLS sequence} for $f$ is the {\it LLS sequence} for any reduced form $f_{\alpha,\beta}$ equivalent to $f$. We denote it by $\LLS(f)$. \end{defn}
\subsection{Classical Perron Identity} \label{Classical Perron Identity} We are coming to one of the most mysterious statements in theory of Markov minima. It is known as the {\it Perron Identity}.
Let $f$ be a binary quadratic form with positive discriminant $\Delta (f)$. Let also \[ \LLS(f)=(\ldots a_{-2},a_{-1},a_{0}, a_1,a_2,\ldots) \] Then we have the following result by A.~Markov in~\cite{mar2}: \[ \frac{m(f)}{\sqrt{\Delta(f)}}=\inf\limits_{i\in\mathbb{Z}}\bigg(\frac{1}{a_i+[0;a_{i+1}:a_{i+2}:\ldots]+[0;a_{i-1}:a_{i-2}:\ldots]}\bigg). \]
This result is based on the following observation. Let $\alpha\ge 1$, $1> \beta \ge 0$ and let \[ \alpha=[a_0;a_1:\ldots] \quad \hbox{and} \quad \beta=[0;a_{-1}:a_{-2}:\ldots] \] be the regular continued fractions for $\alpha$ and $\beta$. Then \[ f_{\alpha,\beta}(0,1)=\frac{1}{a_0+[0;a_1:a_2:\ldots]+[0;a_{-1}:a_{-2}:\ldots]}. \]
Our goal is to investigate the lattice geometry behind this expression. It will lead us to a more general rule relating continued fractions whose elements are arbitrary non zero real numbers, and the values of the corresponding binary form at any point on the plane (see Theorem~\ref{MainTheorem}, Corollary~\ref{MainCorollary} and Remark~\ref{RefGeometry}).
\subsection{LLS sequences for broken lines} \label{LLS sequences for broken lines} We start with the following general definition. \begin{defn} Consider a quadratic binary form $f$ with positive discriminant. A broken line $A_0\ldots A_n$ is an {\it $f$-broken line} if
the following conditions hold:
\begin{itemize}
\item{$A_0,A_n\ne O$ belong to the two distinct loci of linear factors of~$f$;}
\item{all edges of the broken line are of positive length;}
\item{for every $k=1,\ldots, n$ the line $A_{k-1}A_k$ does not pass through the origin.}
\end{itemize}
\end{defn}
Recall the definition of oriented Euclidean area for parallelograms.
\begin{defn}\label{def} Consider three points $A$, $B$, $C$ in the plane. Then the determinant for the matrix of vectors $AB$ and $AC$ is called the {\it the oriented Euclidean area} for the parallelogram spanned by $AB$ and $AC$ and denoted by \[ \det(AB,AC). \] \end{defn}
\begin{defn} Let $\mathcal A=A_0A_1\ldots A_n$ be a broken line with $A_0,A_n\ne O$. Then the sign function of the determinant $\det(OA_1,OA_n)$ is called the {\it signature} of $\mathcal A$ with respect to the origin and denoted by $\sign(\mathcal A)$. \end{defn}
We conclude this section with the following important definition.
\begin{defn}\label{defLLS} Given an $f$-broken line $\mathcal A =A_0\ldots A_n$ define
\[
\begin{aligned}
a_{2k}&=\det(OA_k,OA_{k+1}), \quad k=0,\ldots, n;\\
a_{2k-1}&=\frac{\displaystyle \det(A_kA_{k-1},A_{k}A_{k+1})}{\displaystyle a_{2k-2}a_{2k}} ,
\quad k=1,\ldots, n.\\
\end{aligned}
\]
The sequence $(a_0,\ldots, a_{2n})$ is called the {\it LLS
sequence} for the broken line and denoted by $\LLS(\mathcal A)$.
The expression $[a_0;\ldots: a_{2n}]$ is said to be the {\it
continued fraction for the broken line $A_0\ldots
A_n$}. Note that the values $a_i\ne 0$ may be negative.
\end{defn}
The LLS sequence encodes the integer angles and integer lengths of the broken line (see~\cite{oleg1} for further details).
\section{Generalized Perron Identity for finite broken lines}\label{thm sec}
Now we are in position to formulate and to prove the main result of this paper.
\begin{thm}\label{MainTheorem}{\bf(Generalized Perron Identity: case of finite broken lines.)} Consider a binary quadratic form with positive discriminant $f$. Let $\mathcal A=A_0\ldots A_{n+m}$ be an $f$-broken line $($here $n$ and $m$ are arbitrary positive integers$)$, and let \[ \LLS(\mathcal A)=(a_0,a_1,\ldots,a_{2n+2m}). \] Then \begin{equation}\label{eq1} f(A_n)=\frac{\sign(\mathcal A) \cdot \sqrt{\Delta(f)}}{a_{2n-1}+[0;a_{2n-2}:\ldots:a_0]+[0;a_{2n}:\ldots:a_{2n+2m}]}. \end{equation} \end{thm}
Let us first consider the following example.
\begin{ex} Consider the following binary quadratic form \[ f(x,y)=(x+y)(x-2y). \]
\begin{figure}
\caption{The kernel of $f$ and the $f$-broken line $\mathcal A$.}
\label{ex 2}
\end{figure}
Let $\mathcal A=A_0\ldots A_7$ be the broken line with vertices \[ \begin{array}{llll} A_0=(2,-2), & A_1=(4,-1), & A_2=(3,-2), & A_3=(2,0), \\ A_4=(3,1), & A_5=(4,0), & A_6=(3,-1), & A_7=(4,2), \end{array} \] see Figure~\ref{ex 2}. Let us check Theorem~\ref{MainTheorem} for the broken line $\mathcal A$ at point $A_4=(3,1)$. We leave the computations of LLS-sequences to a reader as an exercise, the result is as follows: \[ \LLS(\mathcal A)=\bigg(6, -\frac{1}{30}, -5, -\frac{3}{20}, 4, \frac{3}{8}, 2, -\frac14,-4: \frac{1}{8}: -4: -\frac{1}{20}: 10 \bigg) \] (here we denote the elements of $\LLS(\mathcal A)$ by $a_0,\ldots, a_{12}$). Finally we have $\Delta(f)=9$ and $\sign(\mathcal A)=1$.
According to Theorem~\ref{MainTheorem} we expect the following. \[ \begin{array}{l} f(A_4)= \displaystyle \frac{\sign(\mathcal A) \cdot \sqrt{\Delta(f)}}{a_{7}+[0;a_{6}:\ldots:a_0]+[0;a_{8}:\ldots:a_{12}]}\\ \displaystyle = \frac{1 \cdot 3} {-\frac{1}{4}+\big[0; 2: \frac{3}{8}: 4: -\frac{3}{20}: -5: -\frac{1}{30}: 6\big]+ \big[0; -4: \frac{1}{8}: -4: -\frac{1}{20}: 10\big]}\\ =4. \end{array} \]
Indeed, direct computation shows that \[ f(A_4)=(3+1)(3-2\cdot 1)=4. \]
\end{ex}
We start the proof with three lemmas.
\begin{lem}\label{l1} Consider a binary quadratic form with positive discriminant $f$. Let $P\ne O$ and $Q\ne O$ annulate distinct linear factors of $f$. Then for every point $A$ it holds \[ f(A)=\sign(POQ)\cdot \frac{\det(OP,OA)\cdot \det(OA,OQ)}{\det(OP,OQ)}\cdot \sqrt{\Delta (f)}. \] \end{lem}
\begin{ex} Consider the following binary quadratic form \[ f(x,y)=(x+y)(x-2y). \] \begin{figure}
\caption{The kernel of $f$ and the $f$-broken line $PAQ$.}
\label{ex 1}
\end{figure}
Let $PAQ$ be an $f$-broken line, with $P=(2,1)$, $A=(3,0)$, and $Q=(2,-2)$, see Figure~\ref{ex 1}. Direct calculations show that \[ \begin{array}{lll} \det(OP,OA)=6,\qquad& \det(OA,OQ)=3,\qquad& \det(OP,OQ)=6, \\ \sign(POQ)=1, \qquad & f(A)=9,\qquad & \Delta (f)=9. \end{array} \] Therefore, we have \[ \begin{aligned} \sign(POQ)\cdot \frac{\det(OP,OA)\cdot \det(OA,OQ)}{\det(OP,OQ)}\cdot \sqrt{\Delta (f)}&= 1\cdot \frac{6\cdot 3}{6}\cdot \sqrt{9}= 9\\ &=f(A). \end{aligned} \] \end{ex}
\begin{proof}[Proof of Lemma~\ref{l1}] The statement is straightforward for the form \[ f_\alpha(x,y)=\alpha xy. \] Assume that $P=(p,0)$, $Q=(0,q)$, and $A=(x,y)$. Then we have \[ f_\alpha(A)=\alpha xy= \frac{py\cdot qx}{pq}\cdot \alpha =\frac{\det(OP,OA)\cdot \det(OA,OQ)}{\det(OP,OQ)}\cdot \sqrt{\Delta(f)}. \] For $P=(0,p)$ and $Q=(q,0)$ we have \[ \begin{aligned} f_\alpha(A)&=\alpha xy= \frac{(-px)\cdot (-qx)}{-pq}\cdot \alpha\\ &=-\frac{\det(OP,OA)\cdot \det(OA,OQ)}{\det(OP,OQ)}\cdot \sqrt{\Delta(f)}. \end{aligned} \] This conclude the proof for the case of $f_\alpha$.
The general case follows from the invariance of the expressions of the equality of the lemma under the group of linear area preserving transformations (i.e., whose determinants equal 1) of the plane. \end{proof}
Now we prove a particular case of Theorem \ref{MainTheorem}. \begin{lem} \label{main lem} Let $f$ be a binary quadratic form with positive discriminant. Consider an oriented $f$-broken line $\mathcal B=B_0B_1B_2$ with $\LLS(\mathcal B)=(b_0,b_1,b_2)$. Then \[ f(B_1)=\frac{\sign(\mathcal B)\cdot\sqrt{\Delta(f)}}{b_1+[0;b_0]+[0;b_2]}. \] \end{lem}
\begin{proof} Set $B_i=(x_i,y_i)$ for $i=0,1,2$. Then Definition~\ref{defLLS} implies \[ \begin{aligned} b_0&=\det(OB_0, OB_1)=x_0y_1-x_1y_0,\\ b_2&=\det(OB_1,OB_2)=x_1y_2-y_1x_2,\\ b_1&=\frac{\det(B_1B_0,B_1B_2)}{b_0b_2}=\frac{x_0y_2-x_2y_0-x_0y_1+x_1y_0-x_1y_2+y_1x_2}{b_0b_2}.
\end{aligned} \] After a substitution and simplification we get \[ \begin{aligned} \frac{1}{b_1+[0;b_0]+[0;b_2]}&=& \frac{(x_0y_1-x_1y_0)(x_1y_2-y_1x_2)}{x_0y_2-x_2y_0}\\ &=& \frac{\det(OB_0,OB_1)\cdot\det(OB_1,OB_2)} {\det(OB_1,OB_2)}. \end{aligned} \] Finally recall that \[ \sign(\mathcal B)=\sign(B_0B_1B_2). \] Now Lemma~\ref{main lem} follows directly from Lemma~\ref{l1}. \end{proof}
For the proof of general case we need the following important result. \begin{lem}{\bf (\cite[Corollary~$11.11$, p.~144]{oleg1}.)}\label{geometry2}
Consider a broken line $A_0\ldots A_n$ that has the LLS sequence $(a_0,\ldots,a_{2n})$, with $A_0=(1,0)$,
$A_1=(1,a_0)$, and $A_n=(x,y)$. Let
\[
\alpha=[a_0;a_1:\ldots:a_{2n}]
\]
be the corresponding continued fraction for this broken
line. Then \[
\frac{y}{x}=\alpha.
\]
For the case of an infinite value for $\alpha=[a_0;a_1:\ldots:
a_{2n}]$,
\[
\frac{x}{y}=0.\] \qed \end{lem}
For a proof of Lemma~\ref{geometry2} we refer to~\cite{oleg1}. As a consequence of Lemma~\ref{geometry2} we have the following statement.
\begin{cor}\label{CorGeom} Consider two broken lines $A_0\ldots A_n$ and $B_0\ldots B_m$ that have the LLS sequences $(a_0,\ldots,a_{2n})$, and $(b_0,\ldots,b_{2m})$ respectively. Suppose that the following hold: \begin{itemize} \item $A_0=B_0$; \item the points $A_n$, $B_m$, and $O$ are in a line; \item the points $A_0=B_0$, $A_1$, and $B_1$ are in a line. \end{itemize} Then \[ [a_0;a_1:\ldots:a_{2n}]=[b_0;b_1:\ldots:b_{2n}]. \] \end{cor}
\begin{proof} In coordinates of the basis \[
e_1=OA_0, \qquad e_2=\frac{A_0A_1}{|A_0A_1||OA_0|} \] the coincidence of continued fractions follows from Lemma~\ref{geometry2}. \end{proof}
\begin{proof}[Proof of Theorem \ref{MainTheorem}] Let $f$ be a binary quadratic form with positive discriminant. Denote the linear factors of $f$ by $f_1$ and $f_2$. Consider an $f$-broken line $\mathcal A= A_0\ldots A_{n+m}$. Without loss of generality we assume that $A_0$ and $A_{n+m}$ annulate $f_1$ and $f_2$ respectively.
\begin{figure}
\caption{The original $f$-broken line $\mathcal A$ and the resulting $f$-broken line $BA_nC$.}
\label{ex 3}
\end{figure}
Denote by $B$ the intersection of the line $A_nA_{n-1}$ with the line $f_1=0$. Denote by $C$ the intersection of the line $A_nA_{n+1}$ with the line $f_2=0$. (See Figure~\ref{ex 3}.) Then the continued fraction for the broken line $BA_nC$ is $[b_0:a_{2n-1}:b_2]$ for some real numbers $b_0$ and $b_2$.
By Corollary~\ref{CorGeom} we have \[ \begin{array}{l} [b_0]=[a_{2n-2};\ldots:a_0];\\ {[}b_2]=[a_{2n};\ldots:a_{2n+2m}].\\ \end{array} \] By construction \[ \sign(BA_nC)=\sign(\mathcal A). \] Therefore by Lemma~\ref{main lem} we have \[ \begin{aligned} f(A_n)&=\frac{\sign(BA_nC)\cdot\sqrt{\Delta(f)}}{a_{2n-1}+[0;b_0]+[0;b_2]}\\ &= \frac{\sign(\mathcal A)\cdot \sqrt{\Delta(f)}} {a_{2n-1}+[0;a_{2n-2}:\ldots:a_0]+[0;a_{2n}:\ldots:a_{2n+2m}]}. \end{aligned} \] This concludes the proof of Theorem~\ref{MainTheorem}. \end{proof}
\section{Generalized Perron identity for asymptotic infinite broken lines} \label{Generalized Perron identity for asymptotic infinite broken lines}
In this section we extend the Generalized Perron Identity (of Theorem~\ref{MainTheorem}) to the case of certain infinite broken lines and discuss the relation to the classical Perron Identity.
We start with the following definition.
\begin{defn} Consider a binary quadratic form $f$ with positive discriminant. An infinite in both sides broken line $\ldots A_{-2}A_{-1}A_0A_1A_2 \ldots$ is an {\it asymptotic $f$-broken line} if the following conditions hold (here we assume that $A_k=(x_k,y_k)$ for every integer $k$): \begin{itemize} \item{the two side infinite sequence $\big(\frac{y_n}{x_n}\big)$ converges to different slopes of the linear factors in the kernel of $f$ as $n$ increases and decreases respectively;} \item{all edges of the broken line are of positive length;} \item{for every admissible $k$ the line $A_{k-1}A_k$ does not pass through the origin.} \end{itemize} \end{defn}
\begin{rem} Here and below one can consider one side infinite broken lines. All the proofs are similar, so we leave this case as an exercise. \end{rem}
The signature of an asymptotic $f$-broken line is defined as a determinant for two vectors in the kernel of $f$, the first with the starting limit direction and the second with the end limit direction.
Finally we have a definition of LLS-sequences similar to Definition~\ref{defLLS}. \begin{defn}\label{defLLS2} Given an asymptotic $f$-broken line \[\mathcal A =\ldots A_{-2}A_{-1}A_0A_1A_2 \ldots\] define
\[
\begin{aligned}
a_{2k}&=\det(OA_k,OA_{k+1}), \quad k\in\mathbb{Z};\\
a_{2k-1}&=\frac{\displaystyle \det(A_kA_{k-1},A_{k}A_{k+1})}{\displaystyle a_{2k-2}a_{2k}} ,
\quad k\in \mathbb{Z}.\\
\end{aligned}
\]
The sequence $(\ldots, a_{-2},a_{-1},a_0, a_1,a_2\ldots)$ is called the {\it LLS
sequence} for the broken line and denoted by $\LLS(\mathcal A)$. \end{defn}
Let us extend the Generalized Perron Identity (of Theorem~\ref{MainTheorem}) to the case of asymptotic $f$-broken line.
\begin{cor}\label{MainCorollary}{\bf(Generalized Perron Identity: case of infinite broken lines.)} Consider a binary quadratic form with positive discriminant $f$. Let \[ \mathcal A=\ldots A_{-2}A_{-1}A_0A_1A_2\ldots \] be an asymptotic $f$-broken line, and let \[ \LLS(\mathcal A)=(\ldots a_{-2},a_{-1},a_0,a_1,a_2,\ldots). \] Assume also that both continued fractions \[ [0;a_{-1}:a_{-2}:\ldots]\quad \hbox{and} \quad [0;a_1:a_2:\ldots] \] converge. Then \begin{equation} f(A_0)=\frac{\sign(\mathcal A) \cdot \sqrt{\Delta(f)}}{a_{0}+[0;a_{-1}:a_{-2}:\ldots]+[0;a_1:a_2:\ldots]}. \end{equation} \end{cor}
\begin{proof} Without loss of generality we consider the form \[ f=\lambda f_{\alpha,\beta}=\lambda(y-\alpha x)(y+\beta x), \] for some nonzero $\lambda$ and arbitrary $\alpha\ne \beta$.
Let $\mathcal A=\ldots A_{-2}A_{-1}A_0A_1A_2\ldots $ be an asymptotic $f$-broken line, where $A_k=(x_k,y_k)$ for all integer $k$. Also we assume that $x_k\ne 0$ for all $k$ (otherwise, switch to another coordinate system, where the last condition holds).
Set \[ \begin{array}{c} \mathcal A_n=A_{-n}\ldots A_{-2}A_{-1}A_0A_1A_2\ldots A_n;\\ \displaystyle \alpha_n=\frac{y_{-n}}{x_{-n}}; \qquad \beta=\frac{y_{n}}{x_{n}}. \end{array} \]
First of all, by definition $\LLS(\mathcal A_n)$ coincides with $\LLS(\mathcal A)$ for all admissible entries.
Secondly, we immediately have that \[ \lim\limits_{n\to \infty} \lambda f_{\alpha_n,\beta_n}(A_0)=\lambda f_{\alpha,\beta}(A_0). \]
Thirdly, the sequence of signatures stabilizes as $n$ tends to infinity. In other words \[ \lim\limits_{n\to \infty} \sign(\mathcal A_n)=\sign(\mathcal A). \]
Fourthly, \[ \lim\limits_{n\to \infty} \Delta(\lambda f_{\alpha_n,\beta_n})=\Delta(\lambda f_{\alpha,\beta}). \]
Finally since both continued fractions \[ [0;a_{-1}:a_{-2}:\ldots],\quad \hbox{and} \quad [0;a_1:a_2:\ldots] \] converge and by the above four observations we have \[ \begin{aligned} f(A_0)&= \lim\limits_{n\to \infty}\lambda f_{\alpha_n,\beta_n}(A_0) \\ &= \lim\limits_{n\to \infty} \frac{\sign(\mathcal A_n) \cdot \sqrt{\Delta(\lambda f_{\alpha_n,\beta_n})}}{a_{0}+[0;a_{-1}:a_{-2}:\ldots:a_{2-2n}]+[0;a_1:a_2:\ldots:a_{2n-2}]}. \\ &= \frac{\sign(\mathcal A) \cdot \sqrt{\Delta(f)}}{a_{0}+[0;a_{-1}:a_{-2}:\ldots]+[0;a_1:a_2:\ldots]}. \end{aligned} \] The second equality holds as it holds for the elements in the limits for every positive integer $n$ by Theorem~\ref{MainTheorem}.
This concludes the proof of the corollary. \end{proof}
We conclude this paper with the following important remark.
\begin{rem}\label{RefGeometry}{\bf (Lattice geometry of the Perron Identity.)} Let $f$ be a binary quadratic form with positive discriminant. Consider an angle in the complement to the kernel of $f$. The {\it sail} of this angle is the boundary of the convex hull of all integer points inside the angle except the origin. Note that each form $f$ has four angles in the complement, and, therefore, it has four sails.
It is important that the sail of any angle in the complement to the set $f=0$ is an asymptotic $f$-broken line, so the Corollary~\ref{MainCorollary} holds for each of four sails of $f$. From general theory of geometric continued fractions, the Markov minimum is an accumulation point of the values at vertices of all sails.
For every vertex $V_i$ of a sail there exists a reduced form $f_{\alpha_i,\beta_i}$. with $\alpha_i\ge 1$ and $1\ge \beta_i > 1$ such that $V_i$ corresponds to $(0,1)$. In particular we have \[ f(V_i)=f_{\alpha_i,\beta_i}(0,1). \] The point $(0,1)$ is a vertex of the sail for $f_{\alpha_i,\beta_i}$. Then from general theory of continued fractions (see Part 1 of~\cite{oleg1}) the sequence $\LLS(f_{\alpha,\beta})$ coincides with the $\LLS$ sequence for the sail containing $(0,1)$.
Hence the expressions in the Perron Identity~(\ref{per eq}) for which the minimum is computed, i.e., \[ \frac{\sqrt{\Delta(f)}}{a_i+[0;a_{i+1}:a_{i+2}:\ldots]+[0;a_{i-1}:a_{i-2}:\ldots]} \] for $i=\ldots, -2,-1,0,1,2,\ldots$ correspond to the formula of Corollary~\ref{MainCorollary} for vertices $V_i$ of all four sails. We consider the vertex $V_i$, with the sail containing it, as an $f$-broken line. \end{rem}
\end{document} |
\begin{document}
\title{
Beyond Bandit Feedback \ in Online Multiclass Classification
}
\begin{abstract} We study the problem of online multiclass classification in a setting where the learner's feedback is determined by an arbitrary directed graph. While including bandit feedback as a special case, feedback graphs allow a much richer set of applications, including filtering and label efficient classification.
We introduce \textproc{Gappletron}, the first online multiclass algorithm that works with arbitrary feedback graphs. For this new algorithm, we prove surrogate regret bounds that hold, both in expectation and with high probability, for a large class of surrogate losses. Our bounds are of order $B\sqrt{\rho KT}$, where $B$ is the diameter of the prediction space, $K$ is the number of classes, $T$ is the time horizon, and $\rho$ is the domination number (a graph-theoretic parameter affecting the amount of exploration). In the full information case, we show that \textproc{Gappletron} achieves a constant surrogate regret of order $B^2K$. We also prove a general lower bound of order $\max\big\{B^2K,\sqrt{T}\big\}$ showing that our upper bounds are not significantly improvable. Experiments on synthetic data show that for various feedback graphs our algorithm is competitive against known baselines. \end{abstract}
\section{Introduction}
In online multiclass classification a learner interacts with an unknown environment in a sequence of rounds. At each round $t$, the learner observes a feature vector $\x_t \in \mathbb{R}^d$ and outputs a prediction $y_t'$ for the label $y_t \in \{1,\ldots,K\}$ associated with $\x_t$.
If $y_t' \neq y_t$, then the learner is charged with a mistake. \citet{kakade2008efficient} introduced the bandit version of online multiclass classification, where the only feedback received by the learner after each prediction is the loss $\mathbbm{1}[y_t' \not = y_t]$. Hence, if a mistake is made at time $t$ (and $K > 2$), the learner cannot uniquely identify the true label $y_t$ based on the feedback information.
Although bandits are a canonical example of partial feedback, they fail to capture a number of important practical scenarios of online classification . Consider for example spam filtering, where an online learner is to classify emails as spam or non-spam based on their content. Whenever the learner classifies an email as legitimate, the recipient gets to see it, and can inform the learner whether the email was correctly classified of not. However, when the email is classified as spam, the learner does not get any feedback because the email is not checked by the recipient. Another example is label efficient multiclass classification. Here, instead of making a prediction, the learner can ask a human expert for the true label.
At the steps when predictions are made, however, the learner does not receive any feedback information (not even their own loss). A further example is disease prevention: if we predict an outburst of disease in a certain area, we can preemptively stop it by vaccinating the local population. This intervention would prevent us from observing whether our prediction was correct, but would still allow us to observe an outburst occurring in a different area.
Unlike bandits, the amount of feedback obtained by the learner in these examples depends on the predicted class, and can vary from full information to no feedback at all. This scenario has been previously considered in the framework of online learning with feedback graphs \citep{Mannor2011from,AlonCDK15,alon2017nonstochastic}.
A feedback graph is a directed graph $\mathcal{G} = (\mathcal{V}, \mathcal{E})$ where each node in $\mathcal{V}$ receives at least one edge from some other node in $\mathcal{V}$ (possibly from itself). The nodes in $\mathcal{V}$ correspond to actions, and a directed edge $(a,b) \in \mathcal{E}$ indicates that by playing action $a$ the learner observes the loss of action $b$. This generalizes the well-known online learning settings of experts (where $\mathcal{G}$ is the complete graph, including self-loops) and bandits (where $\mathcal{G}$ has only self-loops). Note that it is easy to model spam filtering and label efficient prediction using feedback graphs. For spam filtering, $\mathcal{G}$
contains only two actions $s$ and $n$ (corresponding, respectively, to the learner's predictions for spam and non-spam), and the edge set is $\mathcal{E} = \big\{(n, n), (n, s)\big\}$. For label efficient multiclass prediction, $\mathcal{G}$ contains a node for each class, plus an extra node corresponding to issuing a label request.
It is important to observe that all previous analyses of feedback graphs only apply to the abstract setting of prediction with experts, where any dependence of the loss on feature vectors is ignored. This hampers the application of those results to online multiclass classification. In this work we build on previous results on online learning and classification with bandit feedback to design and analyze the first algorithm for online multiclass classification with arbitrary feedback graphs. In doing so, we also improve the analyses of the previously studied special cases (full information and bandit feedback) of multiclass classification.
In the online multiclass classification setting, the goal is bound the number of mistakes made by the learner. The mistake bounds take the following form: \begin{align}\label{eq:surrogate regret def}
\sum_{t=1}^T {\id[y_t' \not = y_t]} = \sum_{t=1}^T \ell_t(\U) + \mathcal{R}_T, \end{align} where $\ell_t$ is a surrogate loss, $\U \in \doma{W} \subseteq \mathbb{R}^{d \times K}$ is the matrix of reference predictors, and $\mathcal{R}_T$ is called the surrogate regret. In this work we provide two types of bounds on the surrogate regret: bounds that hold in expectation and bounds that hold with high probability. Note that equation \eqref{eq:surrogate regret def} could also be written as $\sum_{t=1}^T \big({\id[y_t' \not = y_t]} - \ell_t(\U)\big) = \mathcal{R}_T$. However, we prefer the former former since $\mathcal{R}_T$ is not a proper regret: because the zero-one loss is non-convex we compare it with a surrogate loss.
Our results build on recent work by \citet{vanderHoeven2020exploiting}, who showed that one can exploit the gap between the surrogate loss and the zero-one loss to derive improved surrogate regret bounds
in the full information and bandit settings of online multiclass classification. We modify the \textproc{Gaptron} algorithm \citep{vanderHoeven2020exploiting} to make it applicable to the feedback graph setting. In the analysis of the resulting algorithm, called \textproc{Gappletron}\footnote{Our algorithm is called after the apple tasting feedback model, which is the original name of the spam filtering graph.}, we use several new insights to show that it has $O(B\sqrt{\rho KT})$ surrogate regret in expectation and $O\big(\!\sqrt{\rho KT(B^2 + \ln(1/\delta))}\big)$ surrogate regret with probability at least $1-\delta$ for any feedback graph with domination number\footnote{The domination number is the cardinality of the smallest dominating set.} $\rho$, and for any $\norm{\myvec(\U)} \leq B$ for some norm $\norm{\cdot}$
(if $\norm{\cdot}$ is the Euclidean norm, then $\norm{\myvec(\U)}$ is the Frobenius norm of $\U$). For example, in both spam filtering and label efficient classification we have $\rho=1$. So in the label efficient setting, where each label request counts as a mistake, with high probability \textproc{Gappletron} makes at most order of $B\sqrt{KT}$ mistakes while requesting at most order of $B\sqrt{KT}$ labels. Note that we are not aware of previously known high-probability bounds on the surrogate regret. Furthermore, whereas the results of \citet{vanderHoeven2020exploiting} only hold for a limited number of surrogate loss functions, our results hold for the larger class of regular surrogate losses.
Interestingly, with feedback graphs the surrogate regret for online multiclass classification has, in general, a better dependence on $T$ than the regret for online learning. Indeed, \citet{AlonCDK15} show that the best possible online learning regret is $\Omega(T^{2/3})$ for certain feedback graphs that are called weakly observable (e.g., the graphs for label efficient classification). In contrast, we prove a $O(T^{1/2})$ upper bound on the surrogate regret for any feedback graph, including weakly observable ones.
Our results cannot be significantly improved in general: we prove a $\Omega(B^2K + \sqrt{T})$ lower bound on the surrogate regret. Due to the new insights required by their proofs, we believe the high-probability bounds and the lower bounds are our strongest technical contributions.
We provide several other new results. In the separable case, when there exists a $\U$ for which $\sum_{t=1}^T \ell_t(\U) = 0$, \textproc{Gappletron} has $O(B\sqrt{\rho T})$ surrogate regret in expectation. Even though $O(B^2K)$ mistake bounds are possible in the separable setting \citep{beygelzimer2019bandit}, ours is the first algorithm that has satisfactory surrogate regret in the non-separable case and has an improved surrogate regret in the separable case. Note that although \textproc{Banditron} \citep{kakade2008efficient} also makes $O(B\sqrt{KT})$ mistakes in the separable case, it suffers $O(K^{1/3}(BT)^{2/3})$ surrogate regret in the non-separable case. Our results for the separable case in the full information setting improve results of \citet{vanderHoeven2020exploiting} by a factor of $K$: \textproc{Gappletron} suffers $O(B^2)$ surrogate regret both in expectation and with high probability, thus matching the bounds of the classical \textproc{Perceptron} algorithm \citep{rosenblatt1958perceptron, Novikov1962} --- see Table~\ref{table:intro} for a summary of our theoretical results. Finally, we also evaluated the performance of \textproc{Gappletron} in several experiments, showing that \textproc{Gappletron} is competitive against known baselines in the full information, bandit, and multiclass spam filtering setting, in which predicting a certain class provides full information feedback and all other predictions do not provide any information (see Figure~\ref{fig:intro} for an experimental result in the bandit setting).
\begin{figure}\label{table:intro}
\label{fig:intro}
\end{figure} \paragraph{Additional related work.} The full information and bandit versions of the online multiclass classification setting have been extensively studied. Here we provide the most relevant references and defer the reader to \citet{vanderHoeven2020exploiting} for a more extensive literature review. Algorithms for the full information setting include: the \textproc{Perceptron}, its multiclass versions \citep{rosenblatt1958perceptron, crammer2003ultraconservative, fink2006online} and many variations thereof, second-order algorithms such as \textproc{Arow} \citep{crammer2009adaptive} and the second-order \textproc{Perceptron} \citep{cesa2005second}, and various algorithms for online logistic regression --- see \citet{foster2018logistic} and references therein. In the bandit setting, we mention the algorithms \textproc{Banditron} \citep{kakade2008efficient}, \textproc{Newtron} \citep{hazan2011newtron}, \textproc{SOBA} \citep{beygelzimer2017efficient}, and \textproc{Obama} \citep{foster2018logistic}.
Online learning with feedback graphs has been investigated both in the adversarial and stochastic regimes. In the adversarial setting, variants where the graph changes over time and is partially known or stochastic have been studied by \citet{cohen2016online,kocak2016online}. Regret bounds that scale with the loss of the best action have been obtained by \citet{lykouris2018small}. Other variants include sleeping experts \citep{cortes2019online}, switching experts \citep{arora2019bandits}, and adaptive adversaries \citep{feng2018online}. Some works use feedback graphs to bound the regret in auctions \citep{cesa2017algorithmic,feng2018learning,han2020learning}. In the stochastic setting, regret bounds for Thompson sampling and UCB have been analyzed by \citet{tossou2017thompson,liu2018analysis,lykouris2020feedback}. Finally, feedbacks graphs can also be viewed as a special case of the partial monitoring framework for sequential decisions, see \citep{lattimore2018bandit} for an introduction to the area.
\citet{helmbold2000apple} introduced online filtering as ``apple tasting''. However, their analysis applies to a restricted version of online learning in which instances $x_t$ belong to a finite domain, and the labels $y_t$ are such that $y_t = f(x_t)$ for all $t$ and for some fixed $f$ in a known class of functions. Practical applications of online learning to spam filtering have been investigated by \citet{cesa2003margin,sculley2008advances}.
\paragraph{Notation.} Let $\1$ and $\0$ denote, respectively, the all-one and all-zero vectors, and let $\e_k$ be the basis vector in direction $k$. Let $[K] = \{1, \ldots, K\}$ and let $\mathbb{R}_+$ be the non-negative real numbers. We use $\langle \g, \w \rangle$ to denote the inner product between vectors $\g,\w \in \mathbb{R}^d$. The rows of matrix $\W \in \mathbb{R}^{K \times d}$ are denoted by $\W^1, \ldots, \W^K$.
Whenever possible, we use the same symbol $\W$ to denote both a $K\times d$ matrix and a column vector $\myvec(\W) = (\W^1, \ldots, \W^K)$ in $\mathbb{R}^{Kd}$. We use $\|\x\|_2$ to denote the Euclidean norm of a vector $\x$ and $\|\x\|$ to denote an arbitrary norm. The Kronecker product between matrices $\W$ and $\U$ is denoted by $\W \otimes \U$. We assume $\W\in\doma{W}$ for some convex $\doma{W} \subseteq \mathbb{R}^{K \times d}$. This is equivalent to say that $\myvec(\W)$ belongs to a convex subset of $\mathbb{R}^{Kd}$, for example a $p$-norm ball. As in previous works, we assume instance-label pairs $(\x_t,y_t)$ are generated by an adversary who is oblivious to the algorithm's internal randomization. Finally, for any round $t$, $P_t[\cdot]$ and $\E_t[\cdot]$ denote the conditional probability and expectation, given the randomized predictions $y_1', y_2', \ldots, y_{t-1}'$ and the corresponding feedback.
A feedback graph is any directed graph $\mathcal{G} = (\mathcal{V}, \mathcal{E})$, with edges $\mathcal{E}$ and nodes $\mathcal{V}$, such that for any $y\in\mathcal{V}$ there exists some $y'\in\mathcal{V}$ such that $(y',y) \in \mathcal{E}$, where we allow $y'=y$. In online multiclass classification, $\mathcal{V} = [K]$ and $\mathcal{E}$ specifies which predictions observe which outcomes. Let $\textnormal{out}(y') = \{y \in \mathcal{V} : (y',y) \in \mathcal{E}\}$ be the out-neighbourhood of $y'$. If the learner predicts $y_t'$ at time $t$, then the feedback received by the learner is the set of pairs $\big(y,\mathbbm{1}[y \not = y_t]\big)$ for all $y \in \textnormal{out}(y')$. Due to the structure of the zero-one loss, if a node has $K-1$ outgoing edges, we always add the missing edge to $\mathcal{E}$ as this does not change the information available to the learner. We say that an outcome $y'$ is revealing if predicting that outcome provides the learner with full information feedback, i.e., $\textnormal{out}(y')=[K]$, and we denote the set of revealing outcomes by $\mathcal{Q}$. For example, in label efficient classification, querying the true label $y_t$ corresponds to playing a revealing outcome. We say that a set of nodes $\mathcal{S}$ is a dominating set if for each $y \in \mathcal{V}$ there is a node $y' \in \mathcal{S}$ such that $y \in \textnormal{out}(y')$.
The number of nodes in a smallest dominating set is called the domination number, and we denote it by $\rho$. Note that \textsc{Gappletron} is run using the minimum dominating set $\mathcal{S}$, which is known to be hard to recover in general. However, if the algorithm is fed with any other dominating set $\mathcal{S}'$ of bigger cardinality $\rho'$, our results continue to hold with $\rho$ replaced by $\rho'$ (recall that a dominating set of size at most $(\ln\rho + 2)\rho$ can be efficiently found via a greedy approximation algorithm).
\paragraph{Regular surrogate losses.}
Fix a convex domain $\doma{W}$. Let $\ell: \doma{W} \times \mathbb{R}^d \times [K] \to \mathbb{R}_+$ be any function convex on $\doma{W}$ such that, for all $\W \in \doma{W}$, $\x\in\mathbb{R}^d$, and $y \in [K]\setminus\{y^\star\}$ (with $y^\star = \argmax_k \inner{\W^k}{\x}$) we have \begin{equation}\label{eq:wrong plus right condition}
\frac{K-1}{K}\,\ell(\W, \x, y) + \frac{1}{K} \ell(\W, \x, y^\star) \geq 1. \end{equation} Then $\ell_t = \ell(\cdot, \x_t, y_t)$ is a regular surrogate loss if \begin{equation}\label{eq:gradient condition}
\|\nabla \ell_t(\W)\|^2 \leq 2 L\,\ell_t(\W) \qquad \W\in\doma{W} \end{equation}
for some norm $\|\cdot\|$. When $\|\cdot\|$ is the Euclidean norm, the condition on the gradient is satisfied by all $L$-smooth surrogate loss functions (see, for example, \citep[Lemma 4]{zhou2018fenchel}).
Examples of regular surrogate losses are the smooth hinge loss \citep{rennie2005loss} and the logistic loss with base $K$, defined by $\ell_t(\W_t) = -\log_K q(\W_t, \x_t, y_t)$, where $q$ is the softmax function. Even though the hinge loss is not a regular surrogate loss, in Appendix \ref{app:hinge loss} we show that a particular version of the hinge loss satisfies all the relevant properties of regular surrogate losses. Also, note that in the feedback graph setting, this particular version of the hinge loss we use is random whenever the learner's predictions are randomized.
\section{Gappletron}\label{sec:gappletron}
\begin{algorithm}[h] \caption{\textproc{Gappletron}}\label{alg:gappletron} \begin{algorithmic}[1] \Require Set of revealing actions $\mathcal{Q} \subseteq [K]$, minimum dominating set $\mathcal{S}$, OCO algorithm $\mathcal{A}$ with domain $\doma{W} \subseteq \mathbb{R}^d$, $\gamma \geq 0$, and gap map $a:\mathbb{R}^{K \times d} \times \mathbb{R}^d \rightarrow [0, 1]$ \For{$t = 1 \ldots T$} \State Obtain $\x_t$ \State Let $y_t^\star = \argmax_{k} \inner{\W_t^k}{\x_t}$ \Comment{max-margin prediction} \If{$y_t^\star \in \mathcal{Q}$} \State Set $\gamma_t = 0$ \Else
\State Set $\gamma_t = \min\left\{\half, \gamma\Big/ \sqrt{\big|\{s \leq t: y^\star_s \not \in \mathcal{Q}\}\big|}\right\}$ \Comment{exploration rate} \EndIf \State Set $\zeta_t = \mathbbm{1}[\gamma_t \leq a(\W_t, \x_t)]$ \State $\p_t' = \big(1 - \zeta_t a(\W_t, \x_t) - (1 - \zeta_t) \gamma_t\big) \e_{y_t^\star} + \zeta_t a(\W_t, \x_t) \frac{1}{K}\1 + (1 - \zeta_t) \frac{\gamma_t}{\rho}\1_{S}$ \label{line:sampleprob} \State Predict with label $y_t' \sim \p_t'$ \State Compute ${\displaystyle v_t = \frac{\mathbbm{1}[y_t \in \textnormal{out}(y_t')]}{P_t(y_t \in \textnormal{out}(y_t'))} }$ \Comment{$y_t$ is observed only when $y_t \in \textnormal{out}(y_t')$} \State Set $\widehat{\ell}_t(\W_t) = v_t \ell_t(\W_t)$ \Comment{compute loss estimates} \label{line:lhat} \State Send $\widehat{\ell}_t$ to $\mathcal{A}$ and get $\W_{t+1}$ in return \EndFor \end{algorithmic} \end{algorithm}
In this section we introduce \textproc{Gappletron}, whose pseudocode is presented in Algorithm~\ref{alg:gappletron}. As input, the algorithm takes information about the graph $\mathcal{G}$ in the form of a minimum dominating set $\mathcal{S}$ and a (possibly empty) set of revealing actions $\mathcal{Q}$. \textproc{Gappletron} maintains a parameter $\W_t \in \doma{W} \subseteq \mathbb{R}^{d \times K}$ and uses some full information Online Convex Optimization (OCO) algorithm $\mathcal{A}$ to update the vector form of $\W_t$. Our results hold whenever $\mathcal{A}$ satisfies the condition that $\sum_{t=1}^T \big(\widehat{\ell}_t(\W_t) - \widehat{\ell}_t(\U)\big)$ be at most of order $h(\U)\sqrt{\sum_{t=1}^T \|\nabla \widehat{\ell}_t(\W_t)\|^2}$, where $\widehat{\ell}_t$ are the estimated losses computed at line~\ref{line:lhat} of Algorithm~\ref{alg:gappletron} and
$h : \doma{W} \to \mathbb{R}_+$ is any upper bound on the norm of $\U\in\doma{W}$. Since practically any OCO algorithm can be tuned to have such a guarantee --- see \citep{orabona2018scale} --- this is a mild requirement. Whereas \textproc{Gaptron} is only able to use Online Gradient Descent (OGD) with a fixed learning rate, \textproc{Gappletron} allows for more flexibility, which in turn may lead to different guarantees on the surrogate regret.
For example, if the learner runs an OCO algorithm with a good dynamic regret bound \citep{zinkevich2003}, then \textproc{Gappletron} enjoys a good dynamic surrogate regret bound. Furthermore, the guarantee of $\mathcal{A}$ allows us to derive stronger results in the separable setting while maintaining a similar guarantee as \textproc{Gaptron} in the non-separable setting, which is not possible when using OGD with a fixed learning rate. Additional inputs to \textproc{Gappletron} are $\gamma > 0$, which is used to control the exploration rate of the algorithm in the partial information setting, and the gap map $a$, whose role we explain below.
The predictions of Algorithm~\ref{alg:gappletron} are sampled from $\p_t'$ defined in line~\ref{line:sampleprob},
where $\e_{y_t^\star}$ is the basis vector in the direction of the margin-based linear prediction
$y_t^\star = \argmax_{k} \inner{\W_t^k}{\x_t}$. The gap map $a:\mathbb{R}^{K \times d} \times \mathbb{R}^d \to [0, 1]$ controls the mixture between $\e_{y_t^\star}$ and the uniform exploration term $\frac{1}{K}\1$. For brevity, we sometimes write $a_t$ instead of $a(\W_t, \x_t)$.
The single most important property of \textproc{Gappletron} is presented in the following Lemma.
\begin{lemma} \label{lem: upper bound zo loss} Fix any feedback graph $\mathcal{G}$ and suppose that, for all $t$, $\ell_t$ is a regular surrogate loss with respect to $\ell$. Then \textproc{Gappletron}, run on $\mathcal{G}$ with $a$ such that $a(\W_t, \x_t) = \ell(\W_t, \x_t, y_t^\star)$, satisfies \begin{align*}
{\sum_{y \in [K]} p_t'(y) \id[y \not = y_t]} \leq \frac{K-1}{K} \ell_t(\W_t) + \gamma_t. \end{align*} \end{lemma}
\begin{proof} First, observe that ${\sum_{y \in [K]} p_t'(y) \id[y \not = y_t]} \leq (1 - a_t)\mathbbm{1}[y_t^\star \not = y_t] + a_t \frac{K-1}{K} + \gamma_t$, since $\zeta$, $(1 - \zeta)$, and the cost of exploration are at most $1$.
To conclude the proof we claim that the first two terms in the right-hand side are upper bounded by $\frac{K-1}{K} \ell_t(\W_t)$. We show that by considering two cases. In the first case $y_t^\star = y_t$ and the inequality simply follows by substituting $a_t = \ell\big(\W_t, \x_t, y_t^\star\big) = \ell_t(\W_t)$. In the second case $y_t^\star \not = y_t$ and we have that \begin{equation*} \begin{split}
&(1 - a_t)\mathbbm{1}[y_t^\star \not = y_t] + a_t\frac{K-1}{K} = 1 - \frac{1}K\ell\big(\W_t, \x_t, y_t^\star\big) \\
& = 1 - \frac{1}{K} \ell\big(\W_t, \x_t, y_t^\star\big) - \frac{K-1}{K}\ell\big(\W_t, \x_t, y_t\big) + \frac{K-1}{K}\ell_t(\W_t) \leq \frac{K-1}{K}\ell_t(\W_t), \end{split} \end{equation*} where the inequality is due to equation \eqref{eq:wrong plus right condition} in the definition of regular surrogate losses. \end{proof}
Although the \textproc{Gaptron} algorithm uses similar predictions, it is not clear how to choose $a$ such that a property similar to the one described in Lemma \ref{lem: upper bound zo loss} holds. Rather, \citet{vanderHoeven2020exploiting} derives a different gap map for the hinge loss, the smooth hinge loss, and the logistic loss, and analyses the surrogate regret separately for each loss. With Lemma \ref{lem: upper bound zo loss} in hand, we simplify the analysis and --- at the same time --- also generalize the results of \citet{vanderHoeven2020exploiting} to other surrogate losses. Furthermore, Lemma \ref{lem: upper bound zo loss} also allows us derive surrogate regret bounds that hold with high probability.
What Lemma \ref{lem: upper bound zo loss} states is that with regular surrogate losses and $a(\W_t, \x_t) = \ell\big(\W_t, \x_t, y_t^\star\big)$ the expected zero-one loss of \textproc{Gappletron} can be upper bounded by $\frac{K-1}{K} \ell_t(\W_t)$ plus the cost of exploration. While at first this may seem of little interest, note that we want to bound the zero-one loss in terms of $\ell_t$ rather than $\frac{K-1}{K} \ell_t$. Compared to standard algorithms, this gains us a $-\frac{1}{K}\ell_t(\W_t)$ term in \emph{each} round, which we can use to derive our results. To see how,
observe that \textproc{Gappletron} uses an OCO algorithm $\mathcal{A}$ to update $\myvec(\W_t)$ on each round. Suppose that, for some $h:\doma{W} \to \mathbb{R}$ and $\U \in \doma{W}$, Algorithm $\mathcal{A}$ satisfies
\begin{align}\label{eq:algA requirement}
\sum_{t=1}^T\left(\widehat{\ell}_t(\W_t)-\widehat{\ell}_t(\U)\right) \leq h(\U)\sqrt{\sum_{t=1}^T \|\widehat{\g}_t\|^2}, \end{align}
where $\widehat{\g}_t = v_t \nabla \ell_t(\W_t)$. For simplicity, now assume we are in the full information setting (e.g., $v_t = 1$ for all $t$). Since $\ell_t$ is a regular surrogate loss, we can use $
\|\nabla \ell_t(\W)\|^2 \leq 2 L\,\ell_t(\W) $ and $\sqrt{ab} = \half \inf_{\eta > 0} \left\{{a}/{\eta} + \eta b\right\}$ to show that
\begin{align*}
h(\U)\sqrt{\sum_{t=1}^T \|\widehat{\g}_t\|^2} - \sum_{t=1}^T\frac{1}{K}\ell_t(\W_t) \leq h(\U)\sqrt{\sum_{t=1}^T 2L \ell_t(\W_t)} - \sum_{t=1}^T\frac{1}{K}\ell_t(\W_t) \leq \frac{KLh(\U)^2}{2}. \end{align*}
This means that in the full information setting
the surrogate regret of \textproc{Gappletron} is independent of the number of rounds. In the partial information setting some additional steps are required, but the idea remains essentially the same. We formalize the aforementioned ideas in the following Lemma, whose proof is deferred to Appendix \ref{app:gappletron}.
\begin{restatable}{relemma}{lemsurrogategap} \label{lem: surrogate gap} Fix any feedback graph $\mathcal{G}$ and suppose that, for all $t$, $\ell_t$ is a regular surrogate loss with respect to $\ell$. If $\doma{A}$ satisfies equation~\eqref{eq:algA requirement} then, for any realization of the randomized predictions $y_1',\ldots,y_T'$, \textproc{Gappletron}, run on $\mathcal{G}$ with gap map $a$ such that $a(\W_t, \x_t) = \ell(\W_t, \x_t, y_t^\star)$, satisfies \begin{align*}
\sum_{t=1}^T &{\sum_{y \in [K]} p_t'(y) \id[y \not = y_t]} \le
\sum_{t=1}^T \widehat{\ell}_t(\U) + \sum_{t=1}^T \gamma_t \\&
+ \inf_{\eta > 0} \bigg\{ \frac{h(\U)^2}{2\eta} + \sum_{t=1}^T \left(\frac{K-1}{K}\ell_t(\W_t) - {v}_t \ell_t(\W_t) + \eta {v}_t^2 L \ell_t(\W_t) \right)\bigg\} \qquad \forall\,\U \in \doma{W}~. \end{align*} \end{restatable}
\section{Bounds that Hold in Expectation}\label{sec:expectation} In this section we present bounds on the surrogate regret that hold in expectation. For brevity we use $\mathcal{M}_T = \sum_{t=1}^T {\id[y_t' \not = y_t]}$.
We now state a simplified version of Theorem~\ref{th:expectation bound}, whose full statement and proof can be found in Appendix \ref{app:expectation}.
\begin{theorem}\label{th: informal expectation bound} Let $\mathcal{G}$ be any feedback graph with dominating number $\rho$ and revealing action set $\mathcal{Q}$. Suppose that, for all $t$, $\ell_t$ is a regular surrogate loss with respect to $\ell$. If $\doma{A}$ satisfies equation~\eqref{eq:algA requirement} then \textproc{Gappletron}, run on $\mathcal{G}$ and $\doma{A}$ with gap map $a$ such that $a(\W_t, \x_t) = \ell(\W_t, \x_t, y_t^\star)$, satisfies
\begin{align*}
& \Eb{\mathcal{R}_T} = O\left(\Eb{\max\bigg\{\frac{K^2 L h(\U)^2}{\max\{1, |\mathcal{Q}|\}}, h(\U)\sqrt{\rho K L\big|\{t: y^\star_t \not \in \Qset\}\big|}}\bigg\} \right) \qquad \forall\,\U \in \doma{W}~. \end{align*} Furthermore, for all $\U \in \doma{W}$ such that $\sum_{t=1}^T \ell_t(\U) = 0$, \textproc{Gappletron} satisfies: \begin{align*}
& \Eb{\mathcal{M}_T} = O\left( \Eb{\max\left\{h(\U)\sqrt{\rho L\big|\{t: y^\star_t \not \in \Qset\}\big|}, \frac{K L h(\U)^2}{\max\{1, |\mathcal{Q}|\}}\right\}} - \frac{1}{K}\Eb{\sum_{t=1}^T \ell_t(\W_t)}\right). \end{align*} \end{theorem}
In the full information setting we clearly have that $\mathcal{Q} = [K]$. Hence, using OGD as $\doma{A}$ with an appropriated learning rate, the second statement in Theorem \ref{th: informal expectation bound} reduces to $\E\big[\mathcal{M}_T\big] \le 4 L\|\U\|_2^2 - \sum_{t=1}^T \frac{1}{K} \ell_t(\W_t),$ which improves the results of \textproc{Gaptron} in the separable case by at least a factor $K$. Interestingly, compared to standard bounds for the separable case, such as the \textproc{Perceptron} bound, there is a negative term which seems to further lower the cost of learning how to separate the data.
Similarly, in the partial information setting, the bound for the separable case in Theorem \ref{th: informal expectation bound} has a reduced dependency on $K$ compared to the non-separable case, obtaining similar improvements over \textproc{Gaptron} as in the full information setting.
For the non-separable case, Theorem \ref{th: informal expectation bound} generalizes \textproc{Gaptron} in two directions. The most prominent direction is the extension is to feedback graphs, where our analysis reveals a surprising phenomenon: Theorem \ref{th: informal expectation bound} in fact shows that the surrogate regret in the label efficient setting (and in any setting where $\rho < K$) is actually smaller than in the bandit setting, where $\rho=K$.
Intuitively, this is due to the fact that our algorithm only updates when $y_t$ is known. In the bandit setting, we need to explore all labels to find $y_t$, while in label efficient classification we can just play whichever action is the revealing action, and find $y_t$. This implies that exploration in label efficient classification is easier than in the bandit setting. Note that in the bandit setting, playing $y_t' \not = y_t$ also provides the learner with information. Perhaps by using this information effectively, one is able to improve our surrogate regret bounds, but as of yet it is not clear how to use knowledge of the wrong label. The second extension is that the bounds in Theorem \ref{th: informal expectation bound} hold for all regular surrogate loss functions with the same gap map defined by the surrogate loss, rather than only for a limited number of loss functions and ad-hoc gap maps as it was the case with \textproc{Gaptron}.
\section{Bounds that Hold with High Probability}\label{sec:high probability}
We
now present bounds on the surrogate regret that hold with high probability. After proving a general surrogate regret bound, we derive a corresponding bound, with improved guarantees, for the full information setting. The bound for the partial information setting can be found in Theorem~\ref{th:high probability} in Appendix \ref{app:high prob}, which implies Theorem~\ref{th:informal high probability} below. Let the maximum loss over all rounds be $\ell_{\textnormal{max}} = \max_{t, \W \in \doma{W}} \ell_t(\W)$.
\begin{theorem}\label{th:informal high probability} With probability at least $1 - \delta$, \textproc{Gappletron} satisfies: \begin{align*}
\mathcal{R}_T = \Ob{\sqrt{\left(Lh(\U)^2 + \ell_{\textnormal{max}} \ln(1/\delta)\right)K\rho T}} \qquad \forall\,\U\in\doma{W} \end{align*} Furthermore, for all $\U \in \doma{W}$ such that $\sum_{t=1}^T \ell_t(\U) = 0$, with probability at least $1-\delta$ \textproc{Gappletron} satisfies: \begin{align*}
\mathcal{M}_T = \Ob{\sqrt{ \left(L h(\U)^2 + K \ell_{\textnormal{max}} \ln(1/\delta)\right) \rho T}}. \end{align*} \end{theorem}
Theorem \ref{th:informal high probability} shows that Algorithm \ref{alg:gappletron} has $O(h(\U)\sqrt{\rho KT})$ surrogate regret in the worst case, with high probability. As far as the authors are aware, this is the first high-probability surrogate regret bound for a margin-based classifier in the partial information setting. Similarly to the bounds in expectation, the worst-case surrogate regret is the largest in the bandit setting ($\rho = K$) and the smallest in label efficient classification ($\rho = 1$). Unlike the bounds in expectation, where the surrogate regret was at least a factor $\sqrt{K}$ smaller in the separable case, the improvement in Theorem \ref{th:informal high probability} is less apparent, but the surrogate regret still has a better dependence on $K$ in the separable case. In particular, all the terms with $h(\U)$ have a better dependence on $K$.
In the full information setting the dependence on $\ell_{\textnormal{max}}$ can be removed. This cannot be achieved in the partial information setting, due to the necessity of estimating the surrogate loss. If $\doma{W}$ has a bounded radius $B$ and $\ell_t$ has gradients bounded by $G$, then $\ell_{\textnormal{max}} \leq 1 + BG$ by convexity. The bound for the full information setting can be found in Theorem \ref{th:fullinfo hp}. In the separable case of the full information setting, the bound does not depend on $K$, which is not the case for Theorem \ref{th:informal high probability} due to the need to control the surrogate loss estimates.
\begin{restatable}{retheorem}{thfullinfohp} \label{th:fullinfo hp} Under the conditions of Lemma \ref{lem: surrogate gap}, with probability at least $1 - \delta$, \textproc{Gappletron} satisfies \begin{align*}
\mathcal{M}_T \leq \sum_{t=1}^T \ell_t(\U) + K L h(\U)^2 + \frac{3K+1}{2}\ln\frac{1}{\delta} \qquad \forall\,\U\in\doma{W}~. \end{align*} Furthermore, for all $\U \in \doma{W}$ such that $\sum_{t=1}^T \ell_t(\U) = 0$, then with probability at least $1 - \delta$, \textproc{Gappletron} satisfies $
\mathcal{M}_T \leq 4 L h(\U)^2 + \frac{11}{4}\ln\frac{1}{\delta} $.
\end{restatable}
We provide a proof sketch of the full information versions of Theorem~\ref{th:fullinfo hp}. The proof for the partial information setting is essentially the same, with some extra steps to control the estimates of the surrogate losses. Let $z_t = \left({\id[y_t' \not = y_t]} - {\sum_{y \in [K]} p_t'(y) \id[y \not = y_t]}\right)$. The proof relies on \citep[Theorem 1]{beygelzimer2011contextual} --- see Lemma \ref{lem: freedman} in this paper, which, when translated to our setting, states that with probability at least $1-\delta$,
\begin{equation*}
\sum_{t=1}^T z_t \leq \sqrt{3 \ln\frac{1}{\delta} \sum_{t=1}^T \E_t\left[z_t^2\right]} + 2\ln\frac{1}{\delta}~. \end{equation*}
Since the variance is bounded by the second moment,
$
\E_t\left[z_t^2\right] \leq \E_t\left[{\id[y_t' \not = y_t]}\right] \leq \frac{K-1}{K} \ell_t(\W_t) $,
where the last inequality is due to Lemma \ref{lem: upper bound zo loss}. By using $\sqrt{ab} = \inf_{\eta > 0} \frac{a}{2\eta} + \frac{\eta}{2}b$ and applying Lemma~\ref{lem: surrogate gap}, we find that \begin{equation*}
\mathcal{M}_T \leq \sum_{t=1}^T \ell_t(\U) + \inf_{\eta \in (0, 1]} \left\{\frac{h(\U) - 3\ln\delta}{2\eta} + \sum_{t=1}^T \left(\eta \left(L + \frac{K-1}{K}\right) - \frac{1}{K}\right) \ell_t(\W_t)\right\}, \end{equation*} with probability at least $1 - \delta$. After choosing an appropriate $\eta$, this gives us a $O\big(K h(\U)^2\big)$ surrogate regret bound with high probability.
\section{Lower Bounds}\label{sec:lower bounds}
Corollary \ref{cor:lower} below here shows that the bound of Theorem \ref{th: informal expectation bound} cannot be significantly improved. \begin{corollary} \label{cor:lower} Let $A$ be a possibly randomized algorithm for the online multiclass classification setting with feedback graphs. Then, for any $B = \Omega(1)$, the surrogate regret of $A$ with respect to the smooth hinge loss must satisfy \[
\Eb{\mathcal{M}_T} =
\min_{\U\in\doma{W}}\sum_{t=1}^T \ell_t(\U) + \Omega\big(KB^2 + \sqrt{T}\big) \] where $K$ is the number of classes, the feature vectors $\x_t$ satisfy $\norm{\x_t}_2 = \Theta(1)$ for all $t$, and $\doma{W} = \theset{\W}{\norm{\W} \le B}$. \end{corollary}
Corollary \ref{cor:lower} is implied by Theorems~\ref{th:apple lower bound} and~\ref{th:full info lower bound} in Appendix \ref{app:lower bounds}. The proof of Theorem~\ref{th:apple lower bound} builds on the lower bound of \citet{daniely2015strongly} for strongly-adaptive regret. The feedback graph considered in the proof is filtering with two classes: a blind class (no outgoing edges) and a revealing class. In the proof, we show that the algorithm either explores too much, in which case the lower bound trivially holds, or the algorithm explores too little, in which case the environment can trick the algorithm into playing the wrong action by exploiting the blind class.
\section{Experiments}\label{sec:experiments}
\begin{figure}
\caption{Results of the synthetic experiments for the bandit setting. The plot shows the best results of algorithms with parameters suggested by theory, or tuned with all parameters set to 1, except for $T$. The rows indicate different values for $K$ and the columns different values for $d$. Whiskers show the minimum and the maximum error rate over ten repetitions.}
\label{fig:Synbestbandit}
\end{figure} We empirically evaluated the performance of \textproc{Gappletron} on synthetic data in the bandit, multiclass filtering, and full information settings. Similarly to the SynSep and NonSynSyp datasets described in \citep{kakade2008efficient}, we generated synthetic datasets with $d \in \{80, 120, 160\}$, $K \in \{6, 9, 12\}$, and the label noise rate in $\{0, 0.05, 0.1\}$. Due to space constraints, we only report part of the experiments for the bandit setting in the main text, see Figure \ref{fig:Synbestbandit}. In the bandit setting we used worst case tuning for the algorithms with the parameters suggested by theory, or set all parameters to 1, except for $T$. Initially we only used theoretical tuning for all algorithms, but we found that two algorithms we compared with did not have satisfactory results. A more detailed description of the results, including how we generated data and tuned the algorithms, can be found in Appendix \ref{app:experiments}.
In the bandit setting, we compared \textproc{Gappletron} with the following baselines: PNewtron (the diagonal version of Newtron, by \citet{hazan2011newtron}), SOBAdiag (the diagonal version of SOBA, by \citet{beygelzimer2017efficient}), and the importance weighted version of Banditron\footnote{This is a version different from the one described by \citet{kakade2008efficient}, in particular, we replaced $\widetilde{U}^t$ in their Algorithm~1 with the gradient of the importance weighted hinge loss.} \citep{kakade2008efficient}. We opted to use the diagonal versions of Newtron and SOBA for computational reasons. We chose the importance weighted version of Banditron because the standard version did not produce satisfactory results. We used three surrogate losses for \textproc{Gappletron}: the logistic loss $\ell_t(\W_t) = -\log_K q(\W_t, \x_t, y_t)$ where $q$ is the softmax, the hinge loss defined in \eqref{eq:multiclass hinge}, and the smooth hinge loss \citep{rennie2005loss}, denoted by GapLog, GapHin, and GapSmH respectively. The OCO algorithm used with all losses is Online Gradient Descent, with learning rate $\eta_t =\left(10^{-8} + \sum_{j=1}^t \|\nabla \widehat{\ell}_j(\W_t)\|_2^2\right)^{-1/2}$ and no projections.
As shown in Figure \ref{fig:Synbestbandit}, on average all versions of \textproc{Gappletron} outperform the baselines in the bandit setting. GapHin appears to be more unstable than the other versions of \textproc{Gappletron}. We suspect this is due to the fact that GapHin explores less than its counterparts. In multiclass spam filtering (Figure \ref{fig:SynbestRA} in Appendix \ref{app:experiments}), we see that GapLog makes more mistakes than its counterparts for $K > 6$. We suspect this is due to the fact that with logistic loss, the gap map is never zero, which implies that GapLog picks an outcome uniformly at random more often than GapHin and GapSmH, while not gaining any information. Due to this behaviour GapLog makes more mistakes than necessary, which we also observe in the full information setting. In the bandit setting, the additional exploration leads to additional stability for GapLog, as indicated by the small range of performance of GapLog. In all cases, increasing the exploration rate increased the stability of \textproc{Gappletron}, which is very much in agreement with Theorem~\ref{th:high probability}.
\section{Future work}
There are several intriguing research directions left open to pursue. While our lower bound holds for general feedback graphs, it is not clear whether our bounds are tight for the bandit setting. Either providing a lower bound or an improved algorithm for the bandit setting remains thus open. Our results show that it is possible to obtain improved bounds for the separable case while maintaining satisfactory results for the non-separable case. However, as \citet{beygelzimer2019bandit} show, it is possible to obtain even better guarantees in the separable case of the bandit setting. An algorithm guaranteeing $O(K\|\U\|^2)$ mistakes in the separable case and $O(K\sqrt{T})$ surrogate regret in the non-separable case, without prior knowledge of the separability, would therefore be an interesting contribution.
\paragraph{Acknowledgements} Van der Hoeven and Cesa-Bianchi are supported by the MIUR PRIN grant Algorithms, Games, and Digital Markets (ALGADIMAR) and by the EU Horizon 2020 ICT-48 research and innovation action under grant agreement 951847, project ELISE (European Learning and Intelligent Systems Excellence). Fusco is supported by the ERC Advanced Grant 788893 AMDROMA “Algorithmic and Mechanism Design Research in Online Markets” and the MIUR PRIN project ALGADIMAR “Algorithms, Games, and Digital Markets".
\appendix
\section{Hinge loss}\label{app:hinge loss} The multiclass hinge loss is defined by
\begin{equation}\label{eq:multiclass hinge}
\ell_t(\W) =
\begin{cases}
\max\{1 - m_t(\W, y_t), 0\} & \text{ if } m_t^\star < \kappa \\
\max\{1 - m_t(\W, y_t), 0\} & \text{ if } y_t^\star \not = y_t \text{ and } m_t^\star \geq \kappa \\
0 & \text{ if } y_t^\star = y_t \text{ and } m_t^\star \geq \kappa,
\end{cases} \end{equation}
where $m_t(\W, y) = \inner{\W^y}{\x_t} - \max_{k \not= y} \inner{\W^k}{\x_t}$, $m_t^\star = \max_k m_t(\W_t, k)$, and $\kappa \in [0, 1]$. Setting $\kappa = 0$ yields the multiclass hinge loss used in common implementations of the Perceptron. An alternative version of Lemma \ref{lem: upper bound zo loss} which holds for the hinge loss can be found in Lemma \ref{lem: hinge upper bound zo}. This can then be used to derive similar results for the hinge loss as for regular surrogate losses. \begin{lemma}\label{lem: hinge upper bound zo} Let $\ell_t$ be the multiclass hinge loss with $\kappa = \half$ and let $a(\W_t, \x_t) = \ell\big(\W_t, \x_t, y_t^\star\big)$, where $\ell(\cdot,\x_t,y_t) = \ell_t$. Then \textproc{Gappletron} satisfies \begin{align*}
{\sum_{y \in [K]} p_t'(y) \id[y \not = y_t]} \leq \max\left\{\frac{2}{3}, \frac{K-1}{K}\right\} \ell_t(\W_t) + \gamma_t. \end{align*}
Furthermore, $\ell_t$ satisfies $\|\nabla \ell_t(\W_t)\|^2 \leq 4 \|\x_t\|^2 \ell_t(\W_t)$. \end{lemma}
\begin{proof}[Proof of Lemma \ref{lem: hinge upper bound zo}] First, we have that \begin{align*}
{\sum_{y \in [K]} p_t'(y) \id[y \not = y_t]} & \le \big(1 - \zeta_t a_t - (1-\zeta_t) \gamma_t\big)\mathbbm{1}[y_t^\star \not = y_t] + \zeta_t a_t \frac{K-1}{K} + (1 - \zeta_t) \gamma_t \\
& \le (1 - a_t)\mathbbm{1}[y_t^\star \not = y_t] + a_t \frac{K-1}{K} + \gamma_t, \end{align*} where we used that $\zeta \in \{0,1\}$ and the fact the number of mistakes while uniformly exploring on the dominating set is upper bounded by 1.
To conclude the proof of the first statement we argue that the first two summands of the right hand side are upper bounded by $\frac{K-1}{K} \ell_t(\W_t)$. In order to show that, we split the analysis into two cases. In the first case $y_t^\star = y_t$ and the inequality follows by simply substituting $a_t = \ell\big(\W_t, \x_t, y_t^\star\big) = \ell_t(\W_t)$. In the second case $y_t^\star \not = y_t$ and we have that
\begin{equation*} \begin{split}
m_t^\star + m_t(\W_t, y_t) = & \inner{\W_t^{y_t^\star}}{\x_t} - \max_{k \not= y_t^\star}\inner{\W_t^k}{\x_t} + \inner{\W_t^{y_t}}{\x_t} - \max_{k \not= y_t} \inner{\W_t^k}{\x_t} \\
= & \inner{\W_t^{y_t}}{\x_t} - \max_{k \not= y_t^\star}\inner{\W_t^k}{\x_t}\\
\leq & \inner{\W_t^{y_t}}{\x_t} - \inner{\W_t^{y_t}}{\x_t} = 0 \end{split} \end{equation*}
and thus
\begin{align}\label{eq:mtstar + mt}
m_t^\star \leq -m_t(\W_t, y_t). \end{align}
Since $y_t^\star \not = y_t$ we also have that
\begin{equation}\label{eq:kappamstart} \begin{split}
& (1 - a_t)\mathbbm{1}[y_t^\star \not = y_t] + a_t\frac{K-1}{K} \\
& = \Big(1 - \ell\big(\W_t, \x_t, y_t^\star\big)\Big) + \ell\big(\W_t, \x_t, y_t^\star\big) \frac{K-1}{K} \\
& = 1 - \frac{1}{K} \ell\big(\W_t, \x_t, y_t^\star\big) \\
& = 1 - \frac{1}{K}\mathbbm{1}[m_t^\star < \kappa]\big(1 - m_t(\W_t, y_t^\star)\big). \end{split} \end{equation}
Now, if $m_t^\star < \kappa$ then by equations \eqref{eq:mtstar + mt} and \eqref{eq:kappamstart} we have
\begin{align*}
& (1 - a_t)\mathbbm{1}[y_t^\star \not = y_t] + a_t\frac{K-1}{K} = \frac{K-1}{K} + \frac{1}{K} m_t^\star \leq \frac{K-1}{K}\left(1 + m_t^\star \right) \leq \frac{K-1}{K} \ell_t(\W_t). \end{align*}
If $m_t^\star \geq \kappa$, $a_t = 0$. Therefore, by equations \eqref{eq:mtstar + mt} and \eqref{eq:kappamstart} we have that
\begin{align*}
(1 - a_t)\mathbbm{1}[y_t^\star \not = y_t] + a_t\frac{K-1}{K} = \frac{1 + m_t^\star}{1 + m_t^\star}
\leq \frac{1 - m_t(\W_t, y_t)}{1 + \kappa} = \frac{1}{1 + \kappa} \ell_t(\W_t). \end{align*}
Setting $\kappa = \half$ completes the proof of the first statement.
For the proof of the second statement, first assume that $y_t^\star = y_t$. The case where $m_t^\star \geq \kappa$ is straightforward, so suppose that $m_t^\star < \kappa$, in which case we have that
\begin{align*}
\|\nabla \ell_t(\W_t)\|^2 \leq & 2 \|\x_t\|^2 \\
= & \frac{1-m_t^\star}{1-m_t^\star} \|\x_t\|^2 \\
\leq & 4\|\x_t\|^2 \ell_t(\W_t). \end{align*}
The case where $y_t^\star \not = y_t$ is evident after observing that $\ell_t(\W_t) \geq 1$ in that case. \end{proof}
\section{Details of Section \ref{sec:gappletron} (Gappletron)} \label{app:gappletron}
\lemsurrogategap*
\begin{proof}[Proof of Lemma \ref{lem: surrogate gap}] By adding and subtracting the surrogate loss of the learner and using the guarantee of $\doma{A}$ we have
\begin{equation*} \begin{split}
& \sum_{t=1}^T \left({\sum_{y \in [K]} p_t'(y) \id[y \not = y_t]} - \widehat{\ell}_t(\U)\right) \\
& = \sum_{t=1}^T \left({\sum_{y \in [K]} p_t'(y) \id[y \not = y_t]} - \widehat{\ell}_t(\W_t)\right) + \sum_{t=1}^T \left(\widehat{\ell}_t(\W_t) - \widehat{\ell}_t(\U)\right) \\
& \leq \sum_{t=1}^T \left({\sum_{y \in [K]} p_t'(y) \id[y \not = y_t]} - \widehat{\ell}_t(\W_t)\right) + h(\U)\sqrt{\sum_{t=1}^T \|\widehat{\g}_t\|^2} \\
& \leq \inf_{\eta > 0} \left\{\frac{h(\U)^2}{2\eta} + \sum_{t=1}^T \left( {\sum_{y \in [K]} p_t'(y) \id[y \not = y_t]} - \widehat{\ell}_t(\W_t) + \frac{\eta}{2}\|\widehat{\g}_t\|^2\right)\right\}, \end{split} \end{equation*}
where in the last inequality we used $\sqrt{ab} = \inf_{\eta > 0} \left\{\frac{a}{2\eta} + \frac{\eta}{2} b\right\}$. Recalling that $\widehat{\g}_t = v_t \nabla \ell_t(\W_t)$, we continue by applying Lemma \ref{lem: upper bound zo loss}:
\begin{equation}\label{eq:surrogate gap proof} \begin{split}
& \sum_{t=1}^T \left({\sum_{y \in [K]} p_t'(y) \id[y \not = y_t]} - \widehat{\ell}_t(\U)\right) \\
& \leq \inf_{\eta > 0} \left\{\frac{h(\U)^2}{2\eta} + \sum_{t=1}^T \left(\frac{K-1}{K}\ell_t(\W_t) + \gamma_t - \widehat{\ell}_t(\W_t) + \frac{\eta}{2}\|\widehat{\g}_t\|^2\right) \right\} \\
& = \inf_{\eta > 0} \left\{\frac{h(\U)^2}{2\eta} + \sum_{t=1}^T \left(\frac{K-1}{K}\ell_t(\W_t) + \gamma_t - {v}_t \ell_t(\W_t) + \frac{\eta {v}_t^2}{2}\|\nabla \ell_t(\W_t)\|^2 \right)\right\} \\
& \leq \inf_{\eta > 0} \left\{\frac{h(\U)^2}{2\eta} + \sum_{t=1}^T \left(\frac{K-1}{K}\ell_t(\W_t) + \gamma_t - {v}_t \ell_t(\W_t) + \eta {v}_t^2 L \ell_t(\W_t) \right)\right\}, \end{split} \end{equation}
where in the final inequality we used equation \eqref{eq:gradient condition}. The lemma's statement follows from rearranging the last inequality. \end{proof}
\section{Details of Section \ref{sec:expectation} (Bounds that hold in expectation)}\label{app:expectation}
\begin{theorem}\label{th:expectation bound} Under the conditions of Lemma \ref{lem: surrogate gap}, \textproc{Gappletron} with $\gamma = \half h(\U)\sqrt{K\rho L}$ satisfies: \begin{align*}
& \E\left[\sum_{t=1}^T \mathbbm{1}[y_t' \not = y_t]\right] \\
& \le \E\left[\sum_{t=1}^T \ell_t(\U)\right] + \max\bigg\{\frac{2 K^2Lh(\U)^2}{\max\{1, |\mathcal{Q}|\}}, 2\Eb{ h(\U)\sqrt{\rho K L\big|\{t: y^\star_t \not \in \Qset\}\big|}}\bigg\} \end{align*} Furthermore, if there exists a $\U \in \doma{W}$ such that $\sum_{t=1}^T \ell_t(\U) = 0$ for all realizations of the learners' actions, \textproc{Gappletron} with $\gamma = h(\U) \sqrt{L \rho}$ satisfies: \begin{align*}
& \Eb{\sum_{t=1}^T {\id[y_t' \not = y_t]}} \\
& \leq \Eb{\max\left\{ 4 h(\U)\sqrt{\rho L\big|\{t: y^\star_t \not \in \Qset\}\big|}, \frac{4 K L h(\U)^2}{\max\{1, |\mathcal{Q}|\}}\right\}} - \frac{1}{K}\Eb{\sum_{t=1}^T \ell_t(\W_t)}. \end{align*} \end{theorem} \begin{proof}[Proof of Theorem \ref{th:expectation bound}] Denote by $v_{\textnormal{max}} = \max\{1, \max_t v_t\}$. Observe that $\E_t[v_t] = 1$ and $\E_t[v_t^2] \leq \E_t[v_{\textnormal{max}}]$. We start by applying Lemma \ref{lem: surrogate gap} and taking expectations:
\begin{align*}
& \E\left[\sum_{t=1}^T \mathbbm{1}[y_t' \not = y_t]\right] \\
&- \E\left[\sum_{t=1}^T \ell_t(\U)\right] - \Eb{\sum_{t=1}^T \gamma_t} \\
&\le \Eb{\inf_{\eta > 0} \bigg\{ \frac{h(\U)^2}{2\eta} + \sum_{t=1}^T \left(\frac{K-1}{K}\ell_t(\W_t) - {v}_t \ell_t(\W_t) + \eta {v}_t^2 L \ell_t(\W_t) \right)\bigg\}} \\
&\le \inf_{\eta > 0} \bigg\{ \Eb{\frac{h(\U)^2}{2\eta}} + \Eb{\sum_{t=1}^T \left(\eta v_{\textnormal{max}} L - \frac{1}{K}\right) \ell_t(\W_t) }\bigg\} \\
&\leq \Eb{\frac{v_{\textnormal{max}} K L h(\U)^2}{2}}, \end{align*}
where the last inequality follows from setting $\eta = \frac{1}{K L v_{\textnormal{max}}}$. By using $\sum_{j = 1}^J \frac{1}{\sqrt{j}} \leq 2\sqrt{J}$ we can see that $\sum_{t=1}^T \gamma_t \leq 2\gamma\sqrt{\big|\{t: y^\star_t \not \in \Qset\}\big|}$. Now, observe that if $y_t^\star \in \mathcal{Q}$ then $P_t(y_t \in \textnormal{out}(y_t')) \geq \frac{|\mathcal{Q}|}{K}$ and if $y_t^\star \not \in \mathcal{Q}$ then $P_t(y_t \in \textnormal{out}(y_t')) \geq \min \left\{\frac{1}{2\rho}, \frac{\gamma_t}{\rho}\right\} \geq \min \left\{\frac{1}{2K}, \frac{\gamma_T}{\rho}\right\}$. This means that \begin{equation}\label{eq:vmax}
v_{\textnormal{max}} \leq \max\left\{\frac{\rho}{\gamma_T}, \frac{K}{\max\{1, |\mathcal{Q}|\}}\right\}. \end{equation}
Recall that $\gamma_T = \min\{\half, {\gamma}/{\sqrt{\big|\{t: y^\star_t \not \in \Qset\}\big|}}\}$. If $\rho > 1$ then $|\mathcal{Q}| = 0$ which means that if $\half < \frac{\gamma}{\sqrt{T}}$ then $v_{\textnormal{max}} \leq 2K$. On the other hand, if $\rho = 1$ then $|\mathcal{Q}| \geq 1$ which means that if $\half < \frac{\gamma}{\sqrt{T}}$ then $v_{\textnormal{max}} \leq \frac{2K}{|\mathcal{Q}|}$. This in turn means that
\begin{equation}\label{eq:vmaxE}
v_{\textnormal{max}} \leq \max\left\{\frac{\rho \sqrt{\big|\{t: y^\star_t \not \in \Qset\}\big|}}{\gamma}, \frac{2K}{\max\{1, |\mathcal{Q}|\}}\right\}. \end{equation}
Rearranging the previous inequality and substituting in $\gamma= h(\U)\sqrt{L|\mathcal{S}|}$,
\begin{align*}
\E\left[\sum_{t=1}^T \mathbbm{1}[y_t' \not = y_t]\right] & \le \E\left[\sum_{t=1}^T \ell_t(\U)\right] + \Eb{h(\U)\sqrt{\rho K L \big|\{t: y^\star_t \not \in \Qset\}\big|}} \\
& + \Eb{\max\left\{ h(\U) \sqrt{\rho K L \big|\{t: y^\star_t \not \in \Qset\}\big|}, \frac{2K^2 L h(\U)^2}{2\max\{1, |\mathcal{Q}|\}}\right\}} \\
& \le \E\left[\sum_{t=1}^T \ell_t(\U)\right] + \Eb{\max\left\{ 2 h(\U) \sqrt{\rho K L \big|\{t: y^\star_t \not \in \Qset\}\big|}, \frac{2K^2 L h(\U)^2}{\max\{1, |\mathcal{Q}|\}}\right\}}, \end{align*}
which completes the proof of the first statement of Theorem \ref{th:expectation bound}.
Now, in the case where there exists a $\U \in \doma{W}$ such that $\sum_{t=1}^T \ell_t(\U) = 0$ for all realizations of the learners' actions, by the guarantee of $\doma{A}$ we have
\begin{equation*}
\begin{split}
\E\left[\sum_{t=1}^T \ell_t(\W_t)\right] & = \E\left[\sum_{t=1}^T \big(\widehat{\ell}_t(\W_t) - \widehat{\ell}_t(\U)\big)\right]\\
& \le \inf_{\eta > 0} \bigg\{ \Eb{\frac{ h(\U)^2}{2\eta}} + \E\left[\sum_{t=1}^T \frac{\eta v_{\textnormal{max}}}{2}\|\nabla \ell_t(\W_t)\|^2\right]\bigg\} \\
& \le \inf_{\eta > 0} \bigg\{ \Eb{\frac{ h(\U)^2}{2\eta}} + \E\left[\sum_{t=1}^T \eta v_{\textnormal{max}} L \ell_t(\W_t)\right]\bigg\} \\
& \le \Eb{v_{\textnormal{max}} L h(\U)^2} + \half \E\left[\sum_{t=1}^T \ell_t(\W_t)\right],
\end{split} \end{equation*}
where we used that $\ell_t$ is a regular surrogate loss (in particular equation \eqref{eq:gradient condition}) and plugged in ${\eta = 2 (E[{v_{\textnormal{max}}}]L)^{-1}}$. After reordering, the above gives us
\begin{align}\label{eq:algAtrick sepa}
\E\left[\sum_{t=1}^T \ell_t(\W_t)\right] & \le 2 \Eb{v_{\textnormal{max}} L h(\U)^2}. \end{align}
Now, by using Lemma \ref{lem: upper bound zo loss}, \eqref{eq:vmaxE} and equation \eqref{eq:algAtrick sepa} we have that
\begin{align*}
& \Eb{\sum_{t=1}^T {\id[y_t' \not = y_t]}} \\
& \leq \Eb{\sum_{t=1}^T \frac{K-1}{K}\ell_t(\W_t)} + \Eb{\sum_{t=1}^T \gamma_t} - \Eb{\sum_{t=1}^T \widehat{\ell}_t(\W_t)} + \Eb{\sum_{t=1}^T \widehat{\ell}_t(\W_t)} \\
& \leq \Eb{\sum_{t=1}^T \gamma_t} - \frac{1}{K}\Eb{\sum_{t=1}^T \ell_t(\W_t)} + 2 \E[{v_{\textnormal{max}}}] L h(\U)^2 \\
& \leq 2 \Eb{h(\U)\sqrt{L|\mathcal{S}\big|\{t: y^\star_t \not \in \Qset\}\big|}} - \frac{1}{K}\Eb{\sum_{t=1}^T \ell_t(\W_t)} \\
& ~~ + \Eb{\max\left\{ 2 h(\U)\sqrt{\rho L\big|\{t: y^\star_t \not \in \Qset\}\big|}, \frac{2 K L h(\U)^2}{\max\{1, |\mathcal{Q}|\}}\right\}} \\
& \leq \Eb{\max\left\{ 4 h(\U)\sqrt{\rho L\big|\{t: y^\star_t \not \in \Qset\}\big|}, \frac{4 K L h(\U)^2}{\max\{1, |\mathcal{Q}|\}}\right\}} - \frac{1}{K}\Eb{\sum_{t=1}^T \ell_t(\W_t)}, \end{align*}
which completes the proof for the second statement of Theorem \ref{th:expectation bound}. \end{proof}
\section{Details of Section \ref{sec:high probability} (Bounds that hold with high probability)}\label{app:high prob} We first provide a Lemma due to \citet{beygelzimer2011contextual} which we use to prove our high-probability bounds.
\begin{lemma}\label{lem: freedman} \citep[Theorem 1]{beygelzimer2011contextual} Let $Z_1, \ldots, Z_T$ be a sequence of real-valued random variables. Suppose that $Z_t \leq B$ and $\E_t[Z_t] = 0$. We have with probability $1 - \delta$ \begin{align*}
\sum_{t=1}^T Z_t \leq \sqrt{3\ln\frac{1}{\delta} \sum_{t=1}^T \E_t[Z_t^2]} + 2 B \ln\frac{1}{\delta}. \end{align*} \end{lemma}
\begin{theorem}\label{th:high probability} Under the conditions of Lemma \ref{lem: surrogate gap}, with probability at least $1 - \delta$, \textproc{Gappletron} with $\gamma = \sqrt{K\rho\big(L h(\U)^2 + 5 \ell_{\textnormal{max}} \ln(2/\delta)\big)} $ satisfies: \begin{align*}
\sum_{t=1}^T &{\id[y_t' \not = y_t]} \le
\sum_{t=1}^T \ell_t(\U) + (3K+1)\ln\frac{2}{\delta}
\\ &+ \max\left\{5\sqrt{K\rho T\left(Lh(\U)^2 + 5\ell_{\textnormal{max}} \ln\frac{1}{\delta'}\right)}, \frac{7 K^2\big(Lh(\U)^2 + 5\ell_{\textnormal{max}} \ln(1/\delta')\big)}{\max\{1, |\mathcal{Q}|\}}\right\}. \end{align*} Furthermore, if there exists a $\U \in \doma{W}$ such that $\sum_{t=1}^T \ell_t(\U) = 0$
\footnote{Note that $\sum_{t=1}^T \ell_t(\U) = 0$, where $\ell_t$ may depend on the learner's randomness, is a weaker condition than standard separability. For example, if some $\U$ satisfies this condition for the standard multiclass hinge loss, then $\U$ satisfies the same condition also for our version of the multiclass hinge, see~(\ref{eq:multiclass hinge}).}, then with probability at least $1-\delta$ \textproc{Gappletron} run with $\gamma = \sqrt{\big((K + 2) \ell_{\textnormal{max}} \ln(2/\delta) + L h(\U)^2\big) \rho}$ satisfies: \begin{align*}
\sum_{t=1}^T {\id[y_t' \not = y_t]} \le
2 \ln\frac{2}{\delta}
+ 5 &\max\left\{\sqrt{\big((K + 2) \ell_{\textnormal{max}} \ln(2/\delta) + L h(\U)^2\big) \rho T}, \right. \\&\quad
\left.\frac{2 K}{\max\{1, |\mathcal{Q}|\}}\big((K + 2) \ell_{\textnormal{max}} \ln(2/\delta) + L h(\U)^2\big)\right\}. \end{align*} \end{theorem} \begin{proof}
Before starting, we find a deterministic upper bound on the right-hand side of~(\ref{eq:vmax}) in the proof of Theorem \ref{th:expectation bound}. First, consider $\gamma_T$. By definition it depends on $|\{t: y^*_t \notin \mathcal{Q}\}|$, which is random, however for any realization we can exploit the trivial bound $|\{t: y^*_t \notin \mathcal{Q}\}|\le T$ to argue that $\gamma_T \ge \min\left\{\frac 12, \frac{\gamma}{\sqrt{T}}\right\}$. Furthermore, if $\rho > 1$ then $|\mathcal{Q}| = 0$ which means that if $\half < \frac{\gamma}{\sqrt{T}}$ then $v_{\textnormal{max}} \leq 2K$. On the other hand, if $\rho = 1$ then $|\mathcal{Q}| \geq 1$ which means that if $\half < \frac{\gamma}{\sqrt{T}}$ then $v_{\textnormal{max}} \leq \frac{2K}{|\mathcal{Q}|}$. With that in mind, we can further bound equation \eqref{eq:vmax}:
\begin{equation} \label{eq:vmax_det}
v_{\textnormal{max}} \leq \max\left\{\frac{\rho}{\gamma_T}, \frac{K}{\max\{1, |\mathcal{Q}|\}}\right\} \le \max\left\{\frac{\rho\sqrt{T}}{\gamma}, \frac{2K}{\max\{1,|\mathcal{Q}|\}} \right\}= V_{\textnormal{max}} \end{equation}
As a first step in the actual proof, we study the concentration of the random variables ${\id[y_t' \not = y_t]}$ around their means ${\sum_{y \in [K]} p_t'(y) \id[y \not = y_t]}$. In order to do so, consider their differences
$z_t = {\id[y_t' \not = y_t]} - {\sum_{y \in [K]} p_t'(y) \id[y \not = y_t]}$, which have zero mean and are bounded in $[-1,1]$. By Lemma \ref{lem: upper bound zo loss} we have \[
\E_t\big[z_t^2\big] \le \E_t\big[{\id[y_t' \not = y_t]}\big] \le \left(\frac{K-1}{K} \ell_t(\W_t) + \gamma_t\right). \] Thus, we can use Lemma \ref{lem: freedman} and, with probability at least $1-\delta'$ we have that \begin{equation}\label{eq:hp zero-one to surrogate}
\begin{split}
\sum_{t=1}^T z_t & \le
\inf_{\eta > 0}\left\{ \frac{3\ln(1/\delta')}{2\eta} + \frac{\eta}{2}\sum_{t=1}^T \left(\frac{K-1}{K} \ell_t(\W_t) + \gamma_t\right)\right\} + 2 \ln\frac{1}{\delta'} \\&\le
\frac{3(K-1)\ln(1/\delta')}{4\eta'} + \sum_{t=1}^T \left(\frac{\eta'}{K} \ell_t(\W_t) + \frac{\eta'}{K-1}\gamma_t\right) + 2 \ln\frac{1}{\delta'},
\end{split} \end{equation}
where last inequality follows by scaling the argument of the infimum $\eta = \frac{2\eta'}{K-1}$, thus the inequality holds for all $\eta'>0.$
Similarly, we can argue about the concentration of $v_tr_t$ around $r_t$, where $r_t = \ell_t(\U) - \frac{K-\eta'}{K} \ell_t(\W_t)$. Note that $\E_t[v_tr_t-r_t]=0$ and $|v_tr_t - r_t| \leq 2\ell_{\textnormal{max}} V_{\textnormal{max}}$. Moreover \[
\E_t[(v_tr_t - r_t)^2] \le \E_t\big[(v_t r_t)^2\big] \le 2V_{\textnormal{max}} \ell_{\textnormal{max}} |r_t| \le 2V_{\textnormal{max}} \ell_{\textnormal{max}}\left( \ell_t(\W_t) + \ell_t(\U)\right). \] We can finally apply Lemma \ref{lem: freedman} on $v_t r_t - r_t$. Therefore, with probability at least $1-\delta'$ it holds that
\begin{equation}\label{eq: hp vt sur to sur}
\begin{split}
\sum_{t=1}^T (v_t r_t - r_t)
& \le \sqrt{6\ln\frac{1}{\delta'} \sum_{t=1}^TV_{\textnormal{max}} \ell_{\textnormal{max}}\left( \ell_t(\W_t) + \ell_t(\U)\right)} + 4 V_{\textnormal{max}} \ell_{\textnormal{max}} \ln\frac{1}{\delta'} \\
& \le \frac{3 V_{\textnormal{max}} \ell_{\textnormal{max}} K }{2\eta'}\ln\frac{1}{\delta'} + \frac{\eta'}{K} \sum_{t=1}^T \big(\ell_t(\W_t) + \ell_t(\U)\big) + 4 V_{\textnormal{max}} \ell_{\textnormal{max}} \ln\frac{1}{\delta'},
\end{split} \end{equation} where the inequality holds for all $\eta'>0.$
Choosing $\delta'=\frac \delta 2$, we can conclude that both (\ref{eq:hp zero-one to surrogate}) and (\ref{eq: hp vt sur to sur}) hold with probability at least $1-\delta$, for any $\eta'>0$. The rest of the proof consists then in showing that (\ref{eq:hp zero-one to surrogate}) and (\ref{eq: hp vt sur to sur}) deterministically imply the claimed bound. In particular, we study two different cases, i.e., when $\sum_{t=1}^T \ell_t(\U) > \sum_{t=1}^T \ell_t(\W_t)$ and its converse.
We first consider $\sum_{t=1}^T \ell_t(\U) \le \sum_{t=1}^T \ell_t(\W_t)$. By Lemma \ref{lem: surrogate gap} we find that for any $\eta' \in (0, 1]$
\begin{equation*}
\begin{split}
\sum_{t=1}^T &{\sum_{y \in [K]} p_t'(y) \id[y \not = y_t]} -\sum_{t=1}^T \gamma_t \\
&\le \sum_{t=1}^T v_t \ell_t(\U) + \inf_{\eta > 0} \bigg\{ \frac{ h(\U)^2}{2\eta} + \sum_{t=1}^T \left(\frac{K-1}{K}\ell_t(\W_t) - {v}_t \ell_t(\W_t) + \eta {v}_t^2 L \ell_t(\W_t) \right)\bigg\} \\
&\le \sum_{t=1}^T v_t \ell_t(\U) + \inf_{\eta > 0} \bigg\{ \frac{ h(\U)^2}{2\eta} + \sum_{t=1}^T \left(\frac{K-1}{K}\ell_t(\W_t) - {v}_t \ell_t(\W_t) + \eta V_{\textnormal{max}} {v}_t L \ell_t(\W_t) \right)\bigg\} \\
&\le \sum_{t=1}^T v_t \ell_t(\U) + \frac{ K L V_{\textnormal{max}} h(\U)^2}{2\eta'} + \sum_{t=1}^T \left(\frac{K-1}{K}\ell_t(\W_t) - \frac{K-\eta'}{K} {v}_t \ell_t(\W_t) \right) \\
&= \sum_{t=1}^T \ell_t(\U) + \sum_{t=1}^T \left(v_t r_t -r_t\right) + \frac{ K L V_{\textnormal{max}} h(\U)^2}{2\eta'} + \sum_{t=1}^T \frac{\eta'-1}{K}\ell_t(\W_t),
\end{split} \end{equation*} where we have scaled down $\eta' = KLV_{\textnormal{max}}\eta$. Substituting in~(\ref{eq: hp vt sur to sur}), we get \begin{align} \notag
\sum_{t=1}^T {\sum_{y \in [K]} p_t'(y) \id[y \not = y_t]} &\le \sum_{t=1}^T \gamma_t + \left(1+\frac{\eta'}K\right)\sum_{t=1}^T \ell_t(\U) + \frac{ K L V_{\textnormal{max}} h(\U)^2}{2\eta'} \\
\label{eq:aux1}
&+\sum_{t=1}^T \frac{2\eta'-1}{K}\ell_t(\W_t) + \frac{3 V_{\textnormal{max}} \ell_{\textnormal{max}} K }{2\eta'}\ln\frac{1}{\delta'} + 4 V_{\textnormal{max}} \ell_{\textnormal{max}} \ln\frac{1}{\delta'}. \end{align} Equations (\ref{eq:hp zero-one to surrogate}) and (\ref{eq:aux1}) are all the ingredients we need to conclude the first case, in fact: \begin{align*}
&\sum_{t=1}^T {\id[y_t' \not = y_t]} - \ell_t(\U)\\
&\le \sum_{t=1}^T \left({\id[y_t' \not = y_t]} - {\sum_{y \in [K]} p_t'(y) \id[y \not = y_t]}\right) + \sum_{t=1}^T {\sum_{y \in [K]} p_t'(y) \id[y \not = y_t]} - \ell_t(\U)\\
&\le \frac{4\eta'-1}{K} \sum_{t=1}^T \ell_t(W_t) + \left(1+\frac{\eta'}{K-1}\right)\sum_{t=1}^T \gamma_t + 2 \ln\frac{1}{\delta'}\\
& + \frac{3(K-1)}{4\eta'} \ln\frac{1}{\delta'} + \frac{LKV_{\textnormal{max}} h(\U)^2}{2\eta'}+ 4 V_{\textnormal{max}} \ell_{\textnormal{max}} \ln\frac{1}{\delta'} + \frac{3V_{\textnormal{max}}\ell_{\textnormal{max}} K}{2\eta'}\ln\frac{1}{\delta'}\\
&\le \frac{5}{4}\sum_{t=1}^T \gamma_t+ (3K+1) \ln\frac{1}{\delta'} + 2 V_{\textnormal{max}} K\left(Lh(\U)^2 + 5\ell_{\textnormal{max}} \ln\frac{1}{\delta'}\right), \end{align*} where in the last step make the substitution $\eta'=\frac{1}{4}$. Now, we have that $\sum_{t=1}^T \gamma_t \leq 2 \gamma \sqrt{T}$ and hence we obtain
\begin{equation}\label{eq:hp final 1}
\begin{split}
\sum_{t=1}^T {\id[y_t' \not = y_t]} &- \sum_{t=1}^T \ell_t(\U) \\
&\le 3 \gamma \sqrt{T} + (3K+1) \ln\frac{1}{\delta'} \\
&+ \max\left\{\frac{\rho 2 K \sqrt{T}}{\gamma}, \frac{4K^2}{\max\{1,|\mathcal{Q}|\}} \right\}
\left(Lh(\U)^2 + 5\ell_{\textnormal{max}} \ln\frac{1}{\delta'}\right) \\
&\le (3K+1) \ln\frac{1}{\delta'} \\
&+\max\left\{5\sqrt{K\rho T\left(Lh(\U)^2 + 5\ell_{\textnormal{max}} \ln\frac{1}{\delta'}\right)}, \frac{7 K^2\big(Lh(\U)^2 + 5\ell_{\textnormal{max}} \ln(1/\delta')\big)}{\max\{1, |\mathcal{Q}|\}}\right\}.
\end{split} \end{equation}
Consider now the second case, where $\sum_{t=1}^T \ell_t(\U) > \sum_{t=1}^T \ell_t(\W_t)$. We are still assuming~(\ref{eq:hp zero-one to surrogate}) and~(\ref{eq: hp vt sur to sur}) both hold, even though in this case we need only~(\ref{eq:hp zero-one to surrogate}). The $\sum_{t=1}^T {\sum_{y \in [K]} p_t'(y) \id[y \not = y_t]}$ term is in fact upper bounded using Lemma~\ref{lem: upper bound zo loss}. We have:
\begin{equation}\label{eq:hp final 2}
\begin{split}
\sum_{t=1}^T& \big({\id[y_t' \not = y_t]} - \ell_t(\U)\big) \\
&= \sum_{t=1}^T ({\id[y_t' \not = y_t]}-{\sum_{y \in [K]} p_t'(y) \id[y \not = y_t]}) + \sum_{t=1}^T {\sum_{y \in [K]} p_t'(y) \id[y \not = y_t]} -\sum_{t=1}^T \ell_t(\U)\\
&\le \sqrt{3\ln\frac{1}{\delta'} \sum_{t=1}^T \left(\frac{K-1}{K} \ell_t(\W_t) + \gamma_t\right)} + 2 \ln\frac{1}{\delta'} + \sum_{t=1}^T {\sum_{y \in [K]} p_t'(y) \id[y \not = y_t]} - \sum_{t=1}^T \ell_t(\U) \\
& \le \sqrt{3\ln\frac{1}{\delta'} \sum_{t=1}^T \left(\ell_t(\W_t) + \gamma_t\right)} + 2 \ln\frac{1}{\delta'} +\sum_{t=1}^T \frac{K-1}{K} \ell_t(\W_t) + \gamma_t -\sum_{t=1}^T \ell_t(\W_t) \\
& \le \half\inf_{\eta > 0}\left\{\frac{3\ln\frac{1}{\delta'}}{\eta} + \eta \sum_{t=1}^T \left(\ell_t(\W_t) + \gamma_t\right)\right\} + 2 \ln\frac{1}{\delta'} +\sum_{t=1}^T \frac{K-1}{K} \ell_t(\W_t) + \gamma_t -\sum_{t=1}^T \ell_t(\W_t) \\
& \le \frac{3}{4} K \ln\frac{1}{\delta'} + \frac{5}{2} \sum_{t=1}^T \gamma_t + 2 \ln\frac{1}{\delta'}\\
& \le 3K \ln\frac{1}{\delta'} + 5 \gamma \sqrt{T}\\
&=3K\ln\frac{1}{\delta'} + \sqrt{K\rho T\left(Lh(\U)^2 + 5\ell_{\textnormal{max}} \ln\frac{1}{\delta'}\right)}.
\end{split} \end{equation} The first inequality is due to~(\ref{eq:hp zero-one to surrogate}), while the second one to Lemma \ref{lem: upper bound zo loss}. The third inequality follows from choosing $\eta = \frac{1}{K}$ and finally, the last equality follows by substituting the stated $\gamma.$
In order to prove the second statement, we assume there exists a $\U \in \doma{W}$ such that $\sum_{t=1}^T \ell_t(\U) = 0$ for all realizations of the learners' predictions. By the guarantee on $\mathcal{A}$ and inequality~\eqref{eq:gradient condition} we have
\begin{equation*}
\begin{split}
\sum_{t=1}^T \widehat{\ell}_t(\W_t) & = \sum_{t=1}^T \Big(\widehat{\ell}_t(\W_t) - \widehat{\ell}_t(\U)\Big) \\
& \le \inf_{\eta > 0} \bigg\{ \frac{ h(\U)^2}{2\eta} + \frac{\eta}{2} \sum_{t=1}^T v_t^2\|\nabla \ell_t(\W_t)\|^2 \bigg\} \\
& \le \inf_{\eta > 0} \bigg\{ \frac{ h(\U)^2}{2\eta} + \eta V_{\textnormal{max}} L \sum_{t=1}^T v_t \ell_t(\W_t) \bigg\} \\
& \le V_{\textnormal{max}} L h(\U)^2 + \frac{1}{2} \sum_{t=1}^T v_t \ell_t(\W_t),
\end{split} \end{equation*}
which, after recalling that $\widehat{\ell}_t(\W_t) = v_t \ell_t(\W_t)$, and reordering, gives us \begin{equation}\label{eq: hp algA sepa trick}
\sum_{t=1}^T \widehat{\ell}_t(\W_t) \leq 2V_{\textnormal{max}} L h(\U)^2. \end{equation} By Lemma \ref{lem: freedman}, we have that with probability at least $1 - \delta'$
\begin{equation}\label{eq: hp sepa vt sur to sur} \begin{split}
\sum_{t=1}^T \Big(\ell_t(\W_t) - v_t\ell_t(\W_t)\Big) & \le \sqrt{3\lmaxV_{\textnormal{max}}\ln\frac{1}{\delta'}\sum_{t=1}^T\ell_t(\W_t)} + 2\lmaxV_{\textnormal{max}}\ln\frac{1}{\delta'} \\
& \le \frac{3 K \lmaxV_{\textnormal{max}}}{4\eta'}\ln\frac{1}{\delta'} + \frac{\eta'}{K}\sum_{t=1}^T\ell_t(\W_t) + 2\lmaxV_{\textnormal{max}}\ln\frac{1}{\delta'}, \end{split} \end{equation}
for all $\eta'>0$. By equation \eqref{eq:hp zero-one to surrogate}, with probability at least $1 - \delta'$ we have that for all $\eta'>0$
\begin{align*}
\sum_{t=1}^T {\id[y_t' \not = y_t]} & = \sum_{t=1}^T {\sum_{y \in [K]} p_t'(y) \id[y \not = y_t]} + \sum_{t=1}^T\left({\id[y_t' \not = y_t]} - {\sum_{y \in [K]} p_t'(y) \id[y \not = y_t]}\right) \\
& \le \sum_{t=1}^T {\sum_{y \in [K]} p_t'(y) \id[y \not = y_t]} + \frac{3(K-1)\ln(1/\delta')}{4\eta'} + 2 \ln(1/\delta') \\
& ~~ + \sum_{t=1}^T \left(\frac{\eta'}{K} \ell_t(\W_t) + \frac{\eta'}{K-1}\gamma_t\right). \end{align*}
We continue by using Lemma \ref{lem: upper bound zo loss} to bound ${\sum_{y \in [K]} p_t'(y) \id[y \not = y_t]}$:
\begin{align*}
\sum_{t=1}^T {\id[y_t' \not = y_t]} \leq &\sum_{t=1}^T \frac{K-1}{K} \ell_t(\W_t) + (1 + \frac{\eta'}{K-1})\sum_{t=1}^T \gamma_t + \frac{3(K-1)\ln(1/\delta')}{4\eta'} \\
& + \sum_{t=1}^T \frac{\eta'}{K} \ell_t(\W_t) + 2 \ln(1/\delta') \\
= & (1 + \frac{\eta'}{K-1})\sum_{t=1}^T \gamma_t + \frac{3(K-1)\ln(1/\delta')}{4\eta'} + \frac{\eta' - 1}{K} \ell_t(\W_t) \\
& + 2 \ln(1/\delta') + \sum_{t=1}^T v_t\ell_t(\W_t) + \sum_{t=1}^T \left(\ell_t(\W_t) - v_t\ell_t(\W_t)\right). \end{align*}
By equation \eqref{eq: hp sepa vt sur to sur} and the union bound, with probability at least $1 - 2 \delta'$:
\begin{align*}
\sum_{t=1}^T {\id[y_t' \not = y_t]} \leq &\sum_{t=1}^T \frac{K-1}{K} \ell_t(\W_t) + (1 + \frac{\eta'}{K-1})\sum_{t=1}^T \gamma_t + \frac{3(K-1)\ln(1/\delta')}{4\eta'} \\
& + \sum_{t=1}^T \frac{\eta'}{K} \ell_t(\W_t) + 2 \ln(1/\delta') + 2\lmaxV_{\textnormal{max}}\ln(1/\delta') \\
= & (1 + \frac{\eta'}{K-1})\sum_{t=1}^T \gamma_t + \frac{3 K \lmaxV_{\textnormal{max}}\ln(1/\delta')}{4\eta'} + \frac{2\eta' - 1}{K} \sum_{t=1}^T \ell_t(\W_t) \\
& + (\frac{3}{4\eta'} (K-1) + \half) \ln(1/\delta') + \sum_{t=1}^T v_t\ell_t(\W_t) + 2\lmaxV_{\textnormal{max}}\ln(1/\delta'). \end{align*}
We use equation \eqref{eq: hp algA sepa trick}, $\sum_{t=1}^T \gamma_t \leq 2\gamma\sqrt{T}$, set $\eta' = \half$, set $\delta' = \half \delta$, and the definition of $V_{\textnormal{max}}$ in equation \eqref{eq:vmax_det} to continue:
\begin{align*}
\sum_{t=1}^T {\id[y_t' \not = y_t]} & \le \frac{3}{2}\sum_{t=1}^T \gamma_t + 2 V_{\textnormal{max}} ((K + 2) \ell_{\textnormal{max}} \ln(2/\delta) + L h(\U)^2) + 2 \ln(2/\delta) \\
& \le 2 \ln(2/\delta) + 5 \max\bigg\{\sqrt{((K + 2) \ell_{\textnormal{max}} \ln(2/\delta) + L h(\U)^2) \rho T}, \\
& ~~~~ \frac{2 K}{\max\{1, |\mathcal{Q}|\}}((K + 2) \ell_{\textnormal{max}} \ln(2/\delta) + L h(\U)^2)\bigg\}, \end{align*}
which holds with probability at least $1 - \delta$ and completes the proof of the second statement of Theorem \ref{th:high probability}. \end{proof}
We now restate Theorem \ref{th:fullinfo hp}, after which we prove it.
\thfullinfohp*
\begin{proof}[Proof of Theorem \ref{th:fullinfo hp}] Denote by $z_t = \left({\id[y_t' \not = y_t]} - {\sum_{y \in [K]} p_t'(y) \id[y \not = y_t]}\right)$. By Lemma \ref{lem: freedman}, with probability at least $1 - \delta$ we have that \begin{align*}
\sum_{t=1}^T z_t \le \sqrt{3 \ln(1/\delta) \sum_{t=1}^T \E_t\left[z_t^2\right]} + 2 \ln(1/\delta)
= \inf_{\eta > 0} \left\{\frac{3 \ln(1/\delta)}{2 \eta} + \frac{\eta}{2}\sum_{t=1}^T \E_t\left[z_t^2\right]\right\} + 2 \ln(1/\delta). \end{align*} Since the variance is bounded by the second moment, we have that
\begin{equation*}
\E_t\left[z_t^2\right] \leq \E_t\left[{\id[y_t' \not = y_t]}\right] \leq \frac{K-1}{K} \ell_t(\W_t), \end{equation*}
where the last inequality is due to Lemma \ref{lem: upper bound zo loss}. By applying Lemma \ref{lem: surrogate gap}, and recalling that $\gamma_t=0$ and $v_t = 1$ because we are in the full information setting, we find that with probability at least $1 - \delta$
\begin{align*}
\sum_{t=1}^T {\id[y_t' \not = y_t]} & \le \sum_{t=1}^T \ell_t(\U) + \inf_{\eta > 0} \left\{ \frac{h(\U)^2}{2\eta} + \sum_{t=1}^T \left(\eta L - \frac{1}{2K}\right)\ell_t(\W_t)\right\} \\
& + \inf_{\eta > 0} \left\{\frac{3 \ln(1/\delta)}{2 \eta} + \sum_{t=1}^T \left(\frac{\eta}{2}\frac{K-1}{K} - \frac{1}{2K} \right)\ell_t(\W_t)\right\} + 2 \ln(1/\delta) \\
& \le \sum_{t=1}^T \ell_t(\U) + K L h(\U)^2 + \left(\frac{3}{2}K+\frac{1}{2}\right)\ln(1/\delta), \end{align*}
which completes the proof in the non-separable case. In the separable case, when there exists a $\U \in \doma{W}$ such that $\sum_{t=1}^T \ell_t(\U) = 0$, we can use Lemma~\ref{lem: upper bound zo loss} to show that, with probability at least $1 - \delta$
\begin{align*}
\sum_{t=1}^T {\id[y_t' \not = y_t]} & \le \inf_{\eta > 0} \left\{\frac{3 \ln(1/\delta)}{2 \eta} + \sum_{t=1}^T \frac{\eta}{2}\frac{K-1}{K}\ell_t(\W_t)\right\} + 2 \ln(1/\delta) + \sum_{t=1}^T {\sum_{y \in [K]} p_t'(y) \id[y \not = y_t]} \\
& \le \inf_{\eta > 0} \left\{\frac{3 \ln(1/\delta)}{2 \eta} + \sum_{t=1}^T \frac{\eta}{2}\frac{K-1}{K}\ell_t(\W_t)\right\} + 2 \ln(1/\delta) + \frac{K-1}{K} \sum_{t=1}^T \ell_t(\W_t) \\
& \le \frac{11}{4} \ln(1/\delta) + 2 \sum_{t=1}^T \ell_t(\W_t) \\
& \le \frac{11}{4} \ln(1/\delta) + 4 L h(\U)^2, \end{align*}
where in the last inequality we used equation \eqref{eq: hp algA sepa trick} with $V_{\textnormal{max}} = 1$. \end{proof}
\section{Details of Section \ref{sec:lower bounds} (Lower Bounds)} \label{app:lower bounds}
\begin{theorem}\label{th:apple lower bound} In the spam filtering classification setting with smooth hinge loss, the surrogate regret of any (possibly randomized) algorithm satisfies \[
\Eb{\sum_{t=1}^T \mathbbm{1}[y_t' \neq y_t]} = \sum_{t=1}^T \ell_t(\widehat{\U}) + \Omega\big(\!\sqrt{T}\big) \] for some label sequence $y_1,\ldots,y_T \in \{1,2\}$, for some sequence of feature vectors $\x_t$ such that $\norm{\x_t}_2 = \sqrt{2}$ for all $t$, and for some $\widehat{\U}$ such that $\norm{\widehat{\U}}_2 \le \sqrt{5}$. \end{theorem}
\begin{proof} We adapt an argument from \citet[Lemma~3]{daniely2015strongly}. Let $R = \big\lfloor\sqrt{T/2}\big\rfloor$ and divide the $T$ rounds in $2R$ segments $T_1,\ldots,T_{2R}$ of size $T/(2R)$ each (without loss of generality, assume that $2R$ divides $T$). For each segment $T_i$, define the components $x_{t,z}$ of the feature vectors $\x_t$ at rounds $t \in T_i$ as follows: $x_{t,z} = 1$ if $z \in \{1,i+1\}$ and $x_{t,z} = 0$ otherwise.
Fix an algorithm $A$ and assume $y_t = 1$ for all $t$. Denote by $N_2$ the rounds in which $A$ predicts $2$. If $\Eb{|N_2|} \ge R$, then $A$ makes more than $R = \Omega\big(\!\sqrt{T}\big)$ mistakes and we are done because \begin{align*}
\sum_{t=1}^T \ell_t(\widehat{\U}) =
\sum_{t=1}^T \max\Big\{\big(1 - \hat{U}^1_1 + \hat{U}^2_1\big)^2,\, 0\Big\} = 0 \end{align*} for $\widehat{\U}$ defined as follows: $\widehat{U}^1_1 = 1$, $\widehat{U}^1_z = 0$ for $z > 1$, and $\widehat{U}^2_z = 0$ for all $z$.
Consider then $\Eb{|N_2|} \le R$. Since there are $2R$ segments, we must have that $\Eb{|N_2 \cap T_j|} \leq \frac{1}{2}$ for some $j \in [2R]$, because otherwise $\Eb{|N_2|} = \Eb{\sum_{i=1}^{2R}|N_2 \cap T_i|} > R$. Now, by Markov's inequality we have that $
\bP(|N_2 \cap T_j| \geq 1) \leq \frac{1}{2} $ which means that $A$ does not predict $2$ in segment $j$ with probability at least $\frac{1}{2}$.
Now set $y_t=2$ for all $t \in T_j$. Since we are in the spam filtering setting, if label $2$ is never predicted in segment $j$, $A$ cannot detect that the label has changed, and so it makes a mistake on each round in $T_j$, which has length $T/(2R)$. Hence \begin{align*}
\Eb{\sum_{t=1}^T \mathbbm{1}[y_t' \neq y_t]} \ge
\frac{T}{2R}\bP\left(\sum_{t=1}^T \mathbbm{1}[y_t' \neq y_t] \ge \frac{T}{2R}\right) \ge
\frac{T}{4R} =
\Omega\big(\!\sqrt{T}\big) \end{align*}
Define a new comparator $\widehat{\U}$ as follows: $\widehat{U}^1_z = 1$ if $z = 1$ and $\widehat{U}^1_z = 0$ otherwise, and $\widehat{U}^2_z = 2$ if $z = j+1$ and $\widehat{U}^2_z = 0$ otherwise. For the same sequence of labels $y_t$, we have that
\begin{align*}
\sum_{t=1}^T \ell_t(\widehat{\U}) =
\!\sum_{t \in T_j} \max\Big\{\big(1 - \widehat{U}^2_1 - \widehat{U}^2_{j+1} + \widehat{U}^1_1 + \widehat{U}^1_{j+1}\big)^2, 0\Big\} +
\!\!\!\!\!\sum_{t \in [T] \setminus T_j}\!\!\!\!\! \max\Big\{\big(1 - \widehat{U}^1_1 + \widehat{U}^2_1\big)^2, 0\Big\} \end{align*} where the sums in the right-hand side are easily seen to be zero. This concludes the proof. \end{proof}
\begin{theorem}\label{th:full info lower bound} Consider the full information setting with smooth hinge loss. For any integer $B\ge 2$, the surrogate regret of any (possibly randomized) algorithm satisfies \[
\Eb{\sum_{t=1}^T \mathbbm{1}[y_t' \neq y_t]} \ge \min_{\U\in\doma{W}}\sum_{t=1}^T \ell_t(\U) + (B^2-1)(K-1) + \frac{K-1}{K} \] for some label sequence $y_1,\ldots,y_T \in [K]$ and for some sequence of feature vectors $\x_t$ such that $\norm{\x_t}_2 = 1$ for all $t$, where $\doma{W} = \theset{\W}{\norm{\W} \le B}$. \end{theorem}
\begin{proof} We sample the labels $y_t$ uniformly at random for the first $M+1$ rounds, where $M = (B^2-1)K^2$. Then we set $y_t = y_{M+1}$ for each $t > M+1$. The feature vectors $\x_t$ have $M+1$ components. For $t=1,\ldots,M$ we set the components $x_{t,z}$ of the feature vectors $\x_t$ as $x_{t,t} = 1$ and $x_{t,z} = 0$ for $z \neq t$. For each $t \ge M + 1$, we set $x_{t,M+1} = 1$ and $x_{t,i} = 0$ for all $i=1,\ldots,M$.
We now define a comparator $\widehat{\U}$ as follows. For each $z=1,\ldots,M$ we set $\widehat{U}^y_z = \frac{1}{K}$ for $y=y_t$ and $\widehat{U}^y_z = 0$ otherwise. Then we set $\widehat{U}^y_{M+1} = 1$ if $y = y_{M+1}$ and $\widehat{U}^y_{M+1} = 0$ otherwise. Note that, deterministically, $\norm{\widehat{\U}}_2^2 = 1 + \sum_{t=1}^M \big(U_t^{y_t}\big)^2 = 1 + \frac{M}{K^2} = B^2$. Now fix any (possibly randomized) algorithm $A$. With these choices, in the first $M$ rounds we have \begin{align*}
\Eb{\sum_{t = 1}^M \mathbbm{1}[y_t' \neq y_t]} - \ell_t(\hat{\U}) =
M\frac{K-1}{K} - M\left(1 - \frac{1}{K}\right)^2 = M\left(\frac{1}{K} - \frac{1}{K^2}\right)~. \end{align*} In the next $T-M$ rounds we have \begin{align*}
\Eb{\sum_{t=M+1}^T \mathbbm{1}[y_t' \neq y_t]} \ge \frac{K-1}{K} \qquad\text{and}\qquad
\sum_{t=M+1}^T \ell_t(\widehat{\U}) = 0~. \end{align*} The above expectations are both with respect to the random draw of $y_1,\ldots,y_{M+1}$ and to $A$'s internal randomization. This implies that there exists a sequence $y_1,\ldots,y_{M+1}$ such that the two above bounds hold in expectation with respect to $A$'s internal randomization. Putting the two bounds together concludes the proof. \end{proof}
\section{Details of Section \ref{sec:experiments} (Experiments)}\label{app:experiments}
\begin{figure}
\caption{Results of the synthetic experiments for the bandit setting. The parameters of algorithms are set to 1, except for $T$. The rows are the different values for $K$ and the columns are the different values for $d$. The whiskers represent the minimum and maximum error rates of the ten repetitions.}
\label{fig:SynOnlyT}
\end{figure}
\begin{figure}
\caption{Results of the synthetic experiments for the bandit setting with theoretical tuning. The rows are the different values for $K$ and the columns are the different values for $d$. The whiskers represent the minimum and maximum error rates of the ten repetitions.}
\label{fig:SynTheo}
\end{figure}
\begin{figure}
\caption{Results of the synthetic experiments for multiclass spam filtering. The parameters of algorithms are set to 1, except for $T$. The rows are the different values for $K$ and the columns are the different values for $d$. The whiskers represent the minimum and maximum error rates of the ten repetitions.}
\label{fig:SynRAonlyT}
\end{figure}
\begin{figure}
\caption{Results of the synthetic experiments for multiclass spam filtering with theoretical tuning. The rows are the different values for $K$ and the columns are the different values for $d$. The whiskers represent the minimum and maximum error rates of the ten repetitions.}
\label{fig:SynRAtheo}
\end{figure}
\begin{figure}
\caption{Results of the synthetic experiments for the full information setting. The rows are the different values for $K$ and the columns are the different values for $d$. The whiskers represent the minimum and maximum error rates of the ten repetitions.}
\label{fig:SynFull}
\end{figure}
\begin{figure}
\caption{Results of the synthetic experiments for the multiclass spam filtering. The plot shows the best results of algorithms with parameters suggested by theory, or tuned with all parameters set to 1, except for $T$. The rows are the different values for $K$ and the columns are the different values for $d$. The whiskers represent the minimum and maximum error rates of the ten repetitions.}
\label{fig:SynbestRA}
\end{figure}
The feature vectors $\x_t \in \{0, 1\}^d$ and class labels are generated as follows. For each class we reserve the first $10 d'$ bits to generate ``keywords''. For each class, $1 d'$ to $5 d'$ of these bits are randomly turned on to represent the keywords for that class. The remaining $30 d'$ bits are reserved for unrelated words, of which $5 d'$ are randomly turned on. For each $t$ we select a class uniform at random and set $\x_t$ to be the feature vector described above. Then, with probability 0, 0.05, or 0.1, we replace the class with a uniformly at random chosen class. We varied between 6, 9, or 12 classes and varied $d' \in [2, 3 ,4]$. In the multiclass spam filtering setting we fixed $\mathcal{Q} = \{1\}$, i.e., querying $y_t$ corresponds to predicting label $1$.
As suggested by \citet{hazan2011newtron}, we tuned PNewtron with $\alpha = 10$ and chose the unit ball as domain. For SOBAdiag, we used the adaptive tuning for the exploration rate in the experiment with theoretical tuning and used a fixed exploration rate in the experiment with tuning based only on $T$.
For the experiments in the partial information setting we tuned the algorithms according to what theory suggests for the worst case. Additionally, we also ran experiments with all parameters set to 1, except for $T$. Initially we only tuned the algorithms with theoretical tuning, but we found that in the bandit setting two of the algorithms we compare with did not have satisfactory performance. All parameters based on (an upper bound on) $\|\U\|$ we set to $1$ as to not advantage or disadvantage algorithms that did not use tuning based on $\|\U\|$. All experiments involving randomness due to the algorithms we repeated ten times.
All experiments were run on a system with 8GB of ram, an Intel i5-6300U CPU, and in python 3.8.5 on a Windows 10 operating system. The results of the experiments are summarized in Figures \ref{fig:SynOnlyT}, \ref{fig:SynTheo}, \ref{fig:SynRAonlyT}, \ref{fig:SynRAtheo}, \ref{fig:SynFull}, and \ref{fig:SynbestRA}. We also ran experiments in for the label efficient graph comparing \textproc{Gappletron} with the label efficient \textproc{Perceptron} of \citet{cesa2006worst}. However, as they assume that labels come without a cost it was not clear how to tune their algorithm. We tried several parameter values which still guarantees sublinear regret in $T$. However, with none of the choices of parameters the label efficient \textproc{Perceptron} performed as well as \textproc{Gappletron}, so we choose not to report the results of these experiments.
In the experiments with bandit feedback, as we mentioned, Figure \ref{fig:SynTheo} shows that with theoretical tuning both PNewtron and SOBAdiag performed poorly. We suspect this is due to the tuning with $d$, as when we do not tune with $d$ the performance of these algorithms greatly improved (see Figure \ref{fig:SynOnlyT}). The error rate of \textproc{Banditron} was the lowest with theoretical tuning in roughly half of the experiments. For GapLog and GapSmH the performance also improved when only tuning with $T$, especially in experiments with no noise. GapHin became more unstable when tuning only with $T$, as can been seen from the spread of the results. We suspect this is due to the fact that with the hinge loss, \textproc{Gappletron} explores less than with the smooth hinge loss and the logistic loss. Note that with the smooth hinge loss \textproc{Gappletron} explores less than with the logistic loss, which also seems to become apparent from the range of performance of these two versions of \textproc{Gappletron}. With theoretical tuning, in low noise settings Banditron is on par with the performance of all versions of \textproc{Gappletron}, but with high noise GapLog and GapSmH outperform all other algorithms. With tuning that only depends on $T$, GapLog and GapSmH strictly outperform all other algorithms. Figure \ref{fig:Synbestbandit} contains the results for the best version of each bandit algorithm, which shows that GapLog and GapSmH outperform all other algorithms.
In the multiclass spam filtering setting we compared \textproc{Gappletron} with the importance weighted version of Banditron, which explored the revealing action with probability $\max\{\half, (X^2/T)^{1/3}\}$ or with probability $\max\{\half, (1/T)^{1/3}\}$, where the former is the theoretical tuning and $\|\x_t\|_2 \leq X$.
In multiclass spam filtering with theoretical tuning (Figure \ref{fig:SynRAtheo}), \textproc{Banditron} had the lowest error rate in the no-noise experiments. For experiments with noise we see that as $K$ increases the performance of \textproc{Gappletron} deteriorates compared to the performance of \textproc{Banditron}. We suspect this is due the $\sqrt{K}$ in the exploration of \textproc{Gappletron}, which does not appear in the exploration of \textproc{Banditron}\footnote{Although no bound exists for this algorithm in literature, one can adapt the proof of \citet{kakade2008efficient} to prove a $O((X\|\U\|)^{1/3}T^{2/3})$ surrogate regret bound.}. In Figure \ref{fig:SynRAonlyT} we can see that with tuning based solely on $T$, the spread of the algorithms seems to increase, as was the case in the bandit setting. Either GapHin or GapSmH had the lowest error rate in these experiments, which is also true when comparing the algorithms across the tuning for the exploration rate (Figure \ref{fig:SynbestRA}). The performance of GapLog get worse as $K$ increases, is was the case in the full information setting. We suspect this is due the fact that GapLog explores more than GapHin and GapSmH. While in the bandit setting extra exploration gives additional information, in multiclass spam filtering it does not provide additional information and it only leads to making more mistakes.
In the full information setting we compare \textproc{Gappletron} with the diagonal version of the second-order Perceptron, soPerceptron \citep{cesa2005second}, the multiclass Perceptron, and the passive-aggressive version of the multiclass perceptron \citep{crammer2006online}. In Figure \ref{fig:SynFull}, we can see that if there is no label noise, essentially all algorithms find the separating hyperplane. Note that GapLog has the worst performance in this case. This is due to the fact that with the logistic loss, \textproc{Gappletron} never stops with playing at random, leading to sometimes unnecessarily playing the wrong action. We also see this behavior in experiments with label noise, where GapLog performs worse than the other versions of \textproc{Gappletron}, although its performance in still either on par with or better than the non-\textproc{Gappletron} algorithms in these experiments. Overall, in the full information experiments GapSmH appears to have the best performance.
\end{document} |
\begin{document}
\maketitle
\begin{abstract} We compute the analytic torsion of a cone over a sphere of dimension 1, 2, and 3, and we conjecture a general formula for the cone over an odd dimensional sphere. \end{abstract}
\section{Introduction} \label{s0}
An important open problem in geometric and global analysis is to extend the Cheeger M\"uller theorem to spaces with singularities of conical type. The aim of this work is to give some contribution to the quantitative aspect of the problem. For we give explicit formulas for the analytic torsion of the class of low dimensional spaces consisting of cones over spheres. The results cover also the smooth case of the discs, and therefore provides also a contribution to the discussion on the extension of the Cheeger M\"uller theorem to smooth manifolds with boundary, namely to the problem of establish the correct boundary term.
Let $(W,g)$ be a closed connected Riemannian manifold of dimension $n$ with metric $g$. Let $C W$ denote the completed finite metric cone over $W$, namely the space $[0,l]\times W$, with the metric $dr\otimes dr+r^2 g$, on $(0,l]\times W$, as defined in \cite{Che0} (2.1). A interesting open problem concerning the metric cone is to compute its analytic torsion. The analytic torsion of a smooth connected Riemannian manifold $(M,g)$ of dimension $m$ is defined by \cite{RS}, Section 6, \begin{equation}\label{analytic} \log T(M)=\frac{1}{2}\sum_{q=1}^m(-1)^q q\zeta'(0,\Delta^{(q)}), \end{equation} where $\Delta^{(q)}$ is the Laplace operator on $q$-forms on $M$, and the zeta function is defined by \cite{RS} (1.5) \[ \zeta(s,\Delta^{(q)})=\sum_{\lambda\in {\rm Sp}_+\Delta^{(q)}}\lambda^{-s}, \] for ${\rm Re}(s)>\frac{m}{2}$, and by analytic continuation elsewhere. This definition extends to the case of a cone $CW$ using the Hodge theory and the functional calculus for the Laplace operator on forms developed in \cite{Che0}. More precisely, one would like to obtain formulas for $T(CW)$ as a function of some geometric invariant of $W$. Starting from the result of Cheeger \cite{Che0} \cite{Che2}, and applying absolute or relative boundary conditions \cite{RS}, Section 3, one obtain quite easily the eigenvalues of the Laplace operator on forms, necessary to compute the torsion. These eigenvalues turn out to be sequences of real numbers ${\rm Sp}_+\Delta^{(q)}=\{\lambda^{(q)}_{\mu,k}\}$ that correspond to the zeros of some linear combinations of Bessel functions of the first kind and their derivative. The index $k$ enumerate the zero, and the index $\mu$ is given by some explicit function of the eigenvalues of the Laplace operator on forms on the section of the cone, namely on $W$. The zeta function of this type of double sequences can be tackled using some recent results of Spreafico \cite{Spr3} \cite{Spr5} \cite{Spr6} \cite{Spr9}. The general strategy is to prove that the sequence ${\rm Sp}_+\Delta^{(q)}$ is spectrally decomposable over some sequence ${\rm Sp}_+ \Delta_{W}^{(p)}$ of eigenvalues of the Laplacian on forms on the section. Then, one can apply the result of Spreafico to obtain the value $\zeta'(0,\Delta^{(q)})$. The final formula can be very complicate in general, and not particularly illuminating. The possibility of reducing and simplifying this formula is based on two facts: one fact is the explicit form of the coefficients of the uniform asymptotic expansion of the Bessel function $I_\nu(\nu z)$ (and of its derivative) with respect to the order $\nu$; the second fact, is the explicit knowledge of the eigenvalues of the Laplacian on forms on the section. While the first fact is proved to be true in general, the second one is not clear. For this reason it is interesting to study particular cases where the second fact is also true.
In this note, we study the analytic torsion of the cone over an $n$-dimensional sphere. More precisely, we prove in Section \ref{s4} the following theorem, and we state a conjecture for the general case at the end of Section \ref{s5}.
\begin{theo} \label{t1} The analytic torsion of the cone $C_\alpha S^{n}_{l\sin\alpha}$ of angle $\alpha$, and length $l>0$, over the sphere $S^{n}$, with the standard metric induced by the immersion in ${\mathds{R}}^{n+2}$, and absolute boundary conditions is, for $n=1,2$, and $3$: \begin{align*} \log T(C_\alpha S^{1}_{l\sin\alpha})=&\frac{1}{2}\log {\rm Vol}(C_\alpha S^{1}_{l\sin\alpha})+\frac{1}{2}\sin\alpha=\frac{1}{2}\log\pi l^2\sin\alpha+\frac{1}{2}\sin\alpha,\\ \log T(C_\alpha S^{2}_{l\sin\alpha})=&\frac{1}{2}\log {\rm Vol}(C_\alpha S^{2}_{l\sin\alpha})-\frac{1}{2}f({\rm csc}\alpha)+\frac{1}{4}\sin^2\alpha\\ =&\frac{1}{2}\log \frac{4\pi l^3\sin^2\alpha}{3}-\frac{1}{2}f({\rm csc}\alpha)+\frac{1}{4}\sin^2\alpha,\\ \log T(C_\alpha S^{3}_{l\sin\alpha})=&\frac{1}{2}\log {\rm Vol}(C_\alpha S^{3}_{l\sin\alpha}) +\frac{3}{4}\sin\alpha-\frac{1}{12}\sin^3\alpha\\ =&\frac{1}{2}\log \frac{\pi^2l^4\sin^3\alpha}{2}+\frac{3}{4}\sin\alpha-\frac{1}{12}\sin^3\alpha, \end{align*} where the function $f(\nu)$ is given at the end of Section \ref{s4}.
\end{theo}
\section{Geometric setup} \label{s1}
We describe in this section the geometric setup in details. Let $S^n_b$ be the standard sphere of radius $b>0$ in ${\mathds{R}}^{n+1}$, $S^{n}_b=\{x\in{\mathds{R}}^{n+1}~|~|x|=b\}$ (we simply write $S^n$ for $S^n_1$). Imbed $S^n_{l\sin\alpha}$ in ${\mathds{R}}^{n+2}$, with center in the point $\{0,...,0,l\sin\alpha\}$, with $l>0$. Let $C_\alpha S^n_{l\sin\alpha}$ be the cone of angle $\alpha$ over $S^n_{l\sin\alpha}$ in ${\mathds{R}}^{n+2}$. Note that the disc corresponds to $D^{n+1}_l=C_\frac{\pi}{2} S^{n}_l$. We parameterize $C_{\alpha}S^n_{l\sin\alpha}$ by \begin{equation*}\label{}C_{\alpha}S_{l\sin\alpha}^{n}=\left\{ \begin{array}{rcl} x_1&=&r \sin{\alpha} \sin{\theta_n}\sin{\theta_{n-1}}\cdots\sin{\theta_3}\sin{\theta_2}\cos{\theta_1} \\[8pt] x_2&=&r \sin{\alpha} \sin{\theta_n}\sin{\theta_{n-1}}\cdots\sin{\theta_3}\sin{\theta_2}\sin{\theta_1} \\[8pt] x_3&=&r \sin{\alpha} \sin{\theta_n}\sin{\theta_{n-1}}\cdots\sin{\theta_3}\cos{\theta_2} \\[8pt]
&\vdots& \\
x_{n+1}&=&r \sin{\alpha} \cos{\theta_n} \\[8pt] x_{n+2}&=&r \cos{\alpha} \end{array} \right.\end{equation*} with $r \in [0,l]$, $\theta_1 \in [0,2\pi]$, $\theta_2,\ldots,\theta_n \in [0,\pi]$, $\alpha$ is a fixed positive real number, and $0<a=\frac{1}{\nu}= \sin{\alpha}\leq 1$. This is a compact connected space. The metric induced by the immersion in ${\mathds{R}}^{n+2}$ is \begin{align*} g &=dr \otimes dr + r^2 a^2 g_{S^{n}_{1}},\\
\end{align*} and is smooth for $r>0$. Comparing with \cite{Che0}, Section 1, we see that the space $C_\alpha S^n_{l\sin\alpha}$ is a completed metric cone, and $X_\alpha=C_\alpha S^n_{l\sin\alpha}-\{0\}$, is a metric cone over $S^n_{l\sin \alpha}$. Note that $X_\alpha$ is not smooth, since the radius of the sphere is not unitary. Note also that the space $C_\alpha S^n_{l\sin\alpha}$ is simply connected (in fact it has the homotopy type of a point).
In order to define the opportune self adjoint extension of the Laplace operator on forms, we split the space of forms near the boundary as direct sum $\Lambda C_\alpha S^n_{l\sin\alpha}=\Lambda S^n_{l\sin\alpha}\oplus N^* C_\alpha S^n_{l\sin\alpha}$, where $N^*$ is the dual to the normal bundle to the boundary. Locally, this reads as follows. Let ${\partial}_r$ denotes the outward pointing unit normal vector to the boundary, and $dr$ the correspondent one form. Near the boundary we have the collar decomposition $C_\alpha S^n_{l\sin\alpha}=[0,-\epsilon)\times S^n_{l\sin\alpha}$, and if $y$ is a system of local coordinates on the boundary, then $x=(r,y)$ is a local system of coordinates in $C_\alpha S^n_{l\sin\alpha}$. The smooth forms on $C_\alpha S^n_{l\sin\alpha}$ near the boundary decompose as \[ \omega=\omega_{\rm tan}+\omega_{\rm norm}, \] where $\omega_{\rm norm}$ is the orthogonal projection on the subspace generated by $dr$, and $\omega_{\rm tan}$ is in $\Lambda S^n_{l\sin\alpha}$. We write \[ \omega=\omega_1+ \omega_{2}\wedge dr, \] where $\omega_j\in C^\infty(C_\alpha S^n_{l\sin\alpha})\otimes \Lambda S^n_{l\sin\alpha}$, and \[
*\omega_2=dr \wedge *\omega. \]
Define absolute boundary conditions by \[
B_{\rm abs}(\omega)=\omega_{\rm norm}|_{S^n_{l\sin\alpha}}=\omega_2|_{S^n_{l\sin\alpha}}=0, \] and relative boundary conditions by \[
B_{\rm rel}(\omega)=\omega_{\rm tan}|_{S^n_{l\sin\alpha}}=\omega_1|_{S^n_{l\sin\alpha}}=0. \]
Let ${\mathcal{B}}(\omega)=B(\omega)\oplus B((d+d^\dagger)(\omega))$. Then the operator $\Delta=(d+d^\dagger)^2$ with boundary conditions ${\mathcal{B}}(\omega)=0$ is self adjoint. Note that ${\mathcal{B}}$ correspond to \begin{equation}\label{abs}
{\mathcal{B}}_{\rm abs}(\omega)=0\hspace{20pt}{\rm if~ and~ only~ if}\hspace{20pt}\left\{\begin{array}{l}\omega_{\rm norm}|_{S^n_{l\sin\alpha}}=0,\\
(d\omega)_{\rm norm}|_{S^n_{l\sin\alpha}}=0,\\
\end{array} \right. \end{equation} \begin{equation}\label{rel}
{\mathcal{B}}_{\rm rel}(\omega)=0\hspace{20pt}{\rm if~ and~ only~ if}\hspace{20pt}\left\{\begin{array}{l}\omega_{\rm tan}|_{S^n_{l\sin\alpha}}=0,\\
(d^\dagger\omega)_{\rm tan}|_{S^n_{l\sin\alpha}}=0,\\
\end{array} \right. \end{equation}
\section{The spectrum of the Laplacian on forms} \label{s2}
In this section we give the spectrum of the Laplacian on forms. The result for $n=1$, and $n=2$ is in \cite{HMS}, Lemmas 3, and 4. Thus we just need to study the case of $n=3$. Decomposing with respect to the projections on the eigenspaces of the restriction of the Laplacian on the section of the cone (i.e with respect to the angular momenta), the definition of an appropriate self adjoint extension of the Laplace operator (on functions) on a cone reduces to the analysis of the boundary values of a singular Sturm Liouville ordinary second order differential equation on the line segment $(0,l]$. The problem was addressed already by Rellich in \cite{Rel}, who parameterized the self adjoint extensions. In particular, it turns out that there are not boundary values (at zero) for the non zero mode of the angular momentum, while a boundary condition is necessary for the zero modes, and the unique self adjoint extension defined by this boundary condition is the maximal extension, corresponding to the Friedrich extension (see \cite{BS2} or \cite{Che2} for the boundary condition). The same argument works for the Laplacian on forms. However, in the present situation we do not actually need boundary conditions (at zero) for forms of positive degree, since the middle homology of the section of the cone is trivial (compare with \cite{Che0}).
Since the eigenvalues for relative boundary conditions follows by Hodge duality, we just give the eigenvalues for absolute boundary conditions. In the following, we denote by $\{k:\lambda\}$ the set of eigenvalues $\lambda$ with multiplicity $k$.
\begin{lem}\label{eig1} The spectrum of the (Friedrich extension of the) Laplacian operator $\Delta_{C_\alpha S^1_{l\sin\alpha}}^{(q)}$ on $q$-forms with absolute boundary conditions is (where $\nu={\rm csc}\alpha$): \begin{align*} {\rm Sp} \Delta_{C_\alpha S^1_{l\sin\alpha}}^{(0)}=& \left\{j_{1,k}^2/l^{2}\right\}_{k=1}^{\infty}\cup \left\{2:(j_{\nu n,k}')^2/l^{2}\right\}_{n,k=1}^\infty, \\ {\rm Sp} \Delta_{C_\alpha S^1_{l\sin\alpha}}^{(1)}=& \left\{j_{0,k}^2/l^{2}\right\}_{k=1}^{\infty}\cup\left\{j_{1,k}^2/l^{2}\right\}_{k=1}^\infty\cup \left\{2:j_{\nu n,k}^2/l^{2}\right\}_{n,k=1}^\infty \\ & \cup \left\{2:(j_{\nu n,k}')^2/l^{2}\right\}_{n,k=1}^\infty , \\ {\rm Sp} \Delta_{C_\alpha S^1_{l\sin\alpha}}^{(2)}=& \left\{j_{0,k}^2/l^{2}\right\}_{k=1}^\infty\cup \left\{2:j_{\nu n,k}^2/l^{2}\right\}_{n,k=1}^\infty. \\ \end{align*}
\end{lem}
\begin{lem}\label{eig2} The spectrum of the (Friedrich extension of the) Laplacian operator $\Delta_{C_{\alpha} S^2_{l\sin\alpha}}^{(q)}$ on $q$-forms with absolute boundary conditions is: \begin{align*} {\rm Sp} \Delta_{C_{\alpha} S^2_{l\sin\alpha}}^{(0)}=& \left\{(2n+1): \hat j_{\mu_n,k,-}^2/l^{2}\right\}_{n,k=1}^{\infty} \cup \left\{j_{\frac{3}{2},k}^2/l^{2}\right\}_{k=1}^\infty, \\ {\rm Sp} \Delta_{C_{\alpha} S^2_{l\sin\alpha}}^{(1)}=& \left\{j_{\frac{3}{2},k}^2/l^{2}\right\}_{k=1}^\infty\cup \left\{(2n+1):j_{\mu_n, k}^2/l^{2}\right\}_{n,k=1}^\infty\\ &\cup\left\{(2n+1):\hat j_{\mu_n,k,+}^2/l^{2}\right\}_{n,k=1}^\infty\cup \left\{(2n+1):\hat j_{\mu_n,k,-}^2/l^{2}\right\}_{n,k=1}^\infty,\\ {\rm Sp} \Delta_{C_{\alpha} S^2_{l\sin\alpha}}^{(2)}=& \left\{j_{\frac{1}{2},k}^2/l^{2}\right\}_{k=1}^\infty\cup \left\{(2n+1):j_{\mu_n, k}^2/l^{2}\right\}_{n,k=1}^\infty\\ &\cup\left\{(2n+1):\hat j_{\mu_n,k,+}^2/l^{2}\right\}_{n,k=1}^\infty\cup \left\{(2n+1):j_{\mu_n,k}^2/l^{2}\right\}_{n,k=1}^\infty, \\ {\rm Sp} \Delta_{C_{\alpha} S^2_{l\sin\alpha}}^{(3)}=& \left\{(2n+1):j_{\mu_n,k}^2/l^{2}\right\}_{n,k=1}^{\infty} \cup \left\{j_{\frac{1}{2},k}^2/l^{2}\right\}_{k=1}^\infty ,\\ \end{align*} where $\mu_n=\sqrt{\nu^2 n(n+1)+\frac{1}{4}}$, and where the $\hat j_{\nu,k,\pm}$ are the zeros of the function $G^{\pm}_{\nu}(z)=\pm\frac{1}{2}J_{\nu}(z)+zJ'_\nu(z)$. \end{lem}
\begin{lem}\label{eig3} The spectrum of the (Friedrich extension of the) Laplacian operator $\Delta_{C_{\alpha} S^3_{l\sin\alpha}}^{(q)}$ on $q$-forms with absolute boundary conditions is: \begin{align*} {\rm Sp} \Delta_{C_{\alpha} S^3_{l\sin\alpha}}^{(0)}=& \left\{j_{2,k}^2/l^{2}\right\}_{k=1}^\infty \cup \left\{(n+1)^2: \tilde j_{\mu_{0,n},k,-}^2/l^{2}\right\}_{n,k=1}^{\infty}, \\ {\rm Sp} \Delta_{C_{\alpha} S^3_{l\sin\alpha}}^{(1)}=& \left\{j_{2}^2/l^{2}\right\}_{k=1}^\infty\cup \left\{2n(n+2):(j'_{\mu_{1,n}, k})^2/l^{2}\right\}_{n,k=1}^\infty\\ &\cup\left\{(n+1)^2:\tilde j_{\mu_{0,n},k,-}^2/l^{2}\right\}_{n,k=1}^\infty\cup \left\{(n+1)^2: j_{\mu_{0,n},k}^2/l^{2}\right\}_{n,k=1}^\infty,\\ {\rm Sp} \Delta_{C_{\alpha} S^3_{l\sin\alpha}}^{(2)}=& \left\{(n+1)^2: \tilde j_{\mu_{0,n},k,+}^2/l^{2}\right\}_{n,k=1}^\infty \cup \left\{2n(n+2):(j'_{\mu_{1,n}, k})^2/l^{2}\right\}_{n,k=1}^\infty\\ &\cup\left\{2n(n+2): j_{\mu_{1,n},k}^2/l^{2}\right\}_{n,k=1}^\infty\cup \left\{(n+1)^2: j_{\mu_{0,n},k}^2/l^{2}\right\}_{n,k=1}^\infty, \\ {\rm Sp} \Delta_{C_{\alpha} S^3_{l\sin\alpha}}^{(3)}=& \left\{j_{1}^2/l^{2}\right\}_{k=1}^\infty\cup \left\{(n+1)^2:\tilde j_{\mu_{0,n}, k,+}^2/l^{2}\right\}_{n,k=1}^\infty\\ &\cup\left\{(n+1)^2: j_{\mu_{0,n},k}^2/l^{2}\right\}_{n,k=1}^\infty\cup \left\{2n(n+2): j_{\mu_{1,n},k}^2/l^{2}\right\}_{n,k=1}^\infty,\\ {\rm Sp} \Delta_{C_{\alpha} S^3_{l\sin\alpha}}^{(4)}=& \left\{j_{1,k}^2/l^{2}\right\}_{k=1}^\infty \cup \left\{(n+1)^2: \tilde j_{\mu_{0,n},k}^2/l^{2}\right\}_{n,k=1}^{\infty}, \end{align*} where \[ \mu_{0,n}=\sqrt{\nu^2 n(n+2)+1}, \qquad \mu_{1,n} = \nu(n+1), \] and where the $\tilde j_{\nu,k,\pm}$ are the zeros of the function $T^{\pm}_{\nu}(z)=\pm J_{\nu}(z)+zJ'_\nu(z)$. \end{lem}
\begin{proof} Recall we parameterize $C_{\alpha}S^{3}_{l\sin\alpha}$ by \[C_{\alpha}S^2_{l\sin\alpha}=\begin{cases} x_1=x\sin{\alpha}\sin{\theta_3}\sin{\theta_2}\cos{\theta_1} \\[8pt] x_2=x\sin{\alpha}\sin{\theta_3}\sin{\theta_2}\sin{\theta_1} \\[8pt] x_3=x\sin{\alpha}\sin{\theta_3}\cos{\theta_2} \\[8pt] x_4=x\sin{\alpha}\cos{\theta_3} \\[8pt] x_5=x\cos{\alpha} \end{cases}\] where $(x,\theta_1,\theta_2,\theta_{3})\in [0,l]\times[0,2\pi]\times[0,\pi]\times[0,\pi]$, $0<\alpha\leq \pi/2$ is a fixed real number and $0< a=\sin\alpha\leq 1$. The induced metric is (for $x>0$) \[ g = dx\otimes dx + (a^2x^2\sin^2\theta_2\sin^2\theta_3) d\theta_1\otimes d\theta_1 + (a^2x^2\sin^2\theta_3) d\theta_2 \otimes
d\theta_2 + (a^2x^2) d\theta_3 \otimes d\theta_3. \]
Using the absolute boundary conditions on forms described in equation (\ref{abs}) of the previous section, we obtain the following equations. For the $0$-forms: \begin{equation}\label{abs0S3} \begin{aligned} {\rm abs. }&: \partial_x \omega(l,\theta_1,\theta_2,\theta_3)=0. \end{aligned} \end{equation}
For the $1$-forms: \begin{equation}\label{abs1S3} \begin{aligned} {\rm abs.}&:
\left\{\begin{array}{lll} \omega_x (l,\theta_1,\theta_2,\theta_3) = 0 \\
\partial_x \omega_{\theta_1}(l,\theta_1,\theta_2,\theta_3)=0 \\
\partial_x\omega_{\theta_2}(l,\theta_1,\theta_2,\theta_3)=0 \\
\partial_x\omega_{\theta_3}(l,\theta_1,\theta_2,\theta_3)=0.\end{array}\right. \end{aligned} \end{equation}
For the $2$-forms, with $i=1,2,3$: \begin{equation}\label{abs2S3} \begin{aligned} {\rm abs.}&:
\left\{\begin{array}{ll} \omega_{x\theta_i} (l,\theta_1,\theta_2,\theta_3) = 0 \\
\partial_x\omega_{\theta_1\theta_2}(l,\theta_1,\theta_2,\theta_3)=0 \\
\partial_x\omega_{\theta_1\theta_3}(l,\theta_1,\theta_2,\theta_3)=0 \\
\partial_x\omega_{\theta_2\theta_3}(l,\theta_1,\theta_2,\theta_3)=0.\end{array}\right. \end{aligned} \end{equation}
For the $3$-forms: \begin{equation}\label{abs3S3} \begin{aligned} {\rm abs.}&:
\left\{\begin{array}{lll} \omega_{x\theta_1\theta_2} (l,\theta_1,\theta_2,\theta_3) = 0 \\
\omega_{x\theta_1\theta_3}(l,\theta_1,\theta_2,\theta_3)=0 \\
\omega_{x\theta_2\theta_3}(l,\theta_1,\theta_2,\theta_3)=0 \\
\partial_x\omega_{\theta_1\theta_2\theta_3}(l,\theta_1,\theta_2,\theta_3)=0.\end{array}\right. \end{aligned} \end{equation}
For the $4$-forms: \begin{equation}\label{abs4S3} \begin{aligned} {\rm abs. }&: \omega_{x\theta_1\theta_2\theta_3}(l,\theta_1,\theta_2,\theta_3)=0. \end{aligned} \end{equation}
Next we use the description of the eigenfunctions given in Section 3 of \cite{Che2} to determine the eigenvalues. By \cite{IT} the eigenvalues of the coexact forms of the Laplacian over $S^3$ are, with $n\geq 1$:
\begin{center}
\begin{table}[htb] \centering
\begin{tabular}{|c|c|c|} \hline Dimension & Eigenvalue & Multiplicity \\ \hline \hline $0$ & $n(n+2)$ & $(n+1)^2$ \\ \cline{1-3} $1$ & $(n+1)^2$ & $2 n(n+2)$ \\ \cline{1-3} $2$ & $n(n+2)$ & $(n+1)^2$\\ \hline \end{tabular} \end{table}
\end{center}
And by \cite{Che2} we have $\mu_{0,n} = \mu_{2,n} = \sqrt{\nu^2 n(n+2) +1 }$ and $\mu_{1,n} = \nu(n+1)$, and the eigenforms of the Laplacian of $C_{\alpha}S^{3}_{la}$ are as follows. For the $0$-forms: \begin{equation*} \alpha^{(0)}_{n} = x^{-1} J_{\mu_{0,n}}(\lambda x) \phi^{(0)}_{n}(\theta_1,\theta_2,\theta_3), \qquad E^{(0)} = x^{-1} J_{1}(\lambda x) h^{0}(\theta_1,\theta_2,\theta_3). \end{equation*}
For the $1$-forms: \begin{align*} \alpha^{(1)}_{n} &= x^{-1} J_{\mu_{1,n}}(\lambda x) \phi^{(1)}_{n}(\theta_1,\theta_2,\theta_3),\\ \beta^{ (1)}_{n} &= x^{-1} J_{\mu_{0,n}}(\lambda x)d\phi^{(0)}_{n}(\theta_1,\theta_2,\theta_3) + \partial_x(x^{-1} J_{\mu_{0,n}}(\lambda x))dx\wedge \phi^{(0)}_{n}(\theta_1,\theta_2,\theta_3),\\ \gamma^{ (1)}_{n} &= x^{-1} \partial_x(x J_{\mu_{0,n}}(\lambda x)) d\phi^{(0)}_{n}(\theta_1,\theta_2,\theta_3) +x^{-2} J_{\mu_{0,n}}(\lambda x) dx\wedge \tilde \delta \tilde d \phi^{(0)}_{n}(\theta_1,\theta_2,\theta_3),\\ D^{(1)} &= \partial_x(x^{-1} J_1(\lambda x)) dx \wedge h^{(0)}(\theta_1,\theta_2,\theta_3) \end{align*}
For the $2$-forms: \begin{align*} \alpha^{(2)}_{n} &= x J_{\mu_{0,n}}(\lambda x) \phi^{(2)}_{n}(\theta_1,\theta_2,\theta_3),\\ \beta^{ (2)}_{n} &= J_{\mu_{1,n}}(\lambda x)d\phi^{(1)}_{n}(\theta_1,\theta_2,\theta_3) + \partial_x(J_{\mu_{1,n}}(\lambda x))dx\wedge\phi^{(2)}_{n}(\theta_1,\theta_2,\theta_3),\\ \gamma^{ (2)}_{n} &= x \partial_x(J_{\mu_{1,n}}(\lambda x)) d\phi^{(1)}_{n}(\theta_1,\theta_2,\theta_3) +x^{-1} J_{\mu_{1,n}}(\lambda x) dx\wedge \tilde \delta \tilde d \phi^{(1)}_{n}(\theta_1,\theta_2,\theta_3),\\ \delta^{(2)}_n &= J_{\mu_{0,n}}(\lambda x) dx \wedge d\phi^{(0)}_{n}(\theta_1,\theta_2,\theta_3). \end{align*}
For the $3$-forms: \begin{align*} \beta^{ (3)}_{n} &= x J_{\mu_{0,n}}(\lambda x)d\phi^{(2)}_{n}(\theta_1,\theta_2,\theta_3) + \partial_x(x J_{\mu_{0,n}}(\lambda x))dx\wedge\phi^{(2)}_{n}(\theta_1,\theta_2,\theta_3),\\ \gamma^{ (3)}_{n} &= x^{3} \partial_x(x^{-1} J_{\mu_{0,n}}(\lambda x)) d\phi^{(2)}_{n}(\theta_1,\theta_2,\theta_3) + J_{\mu_{0,n}}(\lambda x) dx\wedge \tilde \delta \tilde d \phi^{(2)}_{n}(\theta_1,\theta_2,\theta_3),\\ \delta^{(3)}_n &= x J_{\mu_{1,n}}(\lambda x) dx \wedge d\phi^{(1)}_{n}(\theta_1,\theta_2,\theta_3),\\ E^{(3)} & = x^2 J_{2}(\lambda x)h^{3}(\theta_1,\theta_2,\theta_3). \end{align*}
For $4$-forms: \begin{equation*} \delta^{(4)}_{n} = x^{2} J_{\mu_{0,n}}(\lambda x) dx \wedge d\phi^{(2)}_{n}(\theta_1,\theta_2,\theta_3), \qquad D^{(4)} =\partial_x( x^{2} J_{2}(\lambda x)) dx \wedge h^{3}(\theta_1,\theta_2,\theta_3). \end{equation*}
Where the $\phi^{(i)}_n(\theta_1,\theta_2,\theta_3)$, for $i=0,1,2$, are coexact eigenforms of the Laplacian on $S^{3}$, and $h^{(0)}(\theta_1,\theta_2,\theta_3)$, and $h^{(3)}(\theta_1,\theta_2,\theta_3)$ are harmonic forms of the Laplacian on $S^{3}$. Using these functions in the boundary conditions given in equation (\ref{abs}), we obtain the result.
\end{proof}
\section{Zeta determinants for some class of double sequences} \label{s3}
We give in this section all the tools necessary in order to evaluate the zeta determinants appearing in the calculation of the analytic torsion. This is based on \cite{Spr3} \cite{Spr4} \cite{Spr5} and \cite{Spr9}. We present here a simplified version of the main result of those works (see in particular the general formulation in Theorem 3.9 of \cite{Spr9} or the Spectral Decomposition Lemma of \cite{Spr5}), that is sufficient for our purpose here.
Let $S=\{a_n\}_{n=1}^\infty$ be a sequence of non vanishing complex numbers, ordered by increasing modules, with the unique point of accumulation at infinite. The positive real number (possibly infinite) \[
s_0={\rm limsup}_{n\to\infty} \frac{\log n}{\log |a_n|}, \] is called the exponent of convergence of $S$, and denoted by ${\mathsf e}(S)$. We are only interested in sequences with ${\mathsf e}(S)=s_0<\infty$. If this is the case, then there exists a least integer $p$ such that the series $\sum_{n=1}^\infty a_n^{-p-1}$ converges absolutely. We assume $s_0-1< p\leq s_0$, we call the integer $p$ the genus of the sequence $S$, and we write $p={\mathsf g}(S)$. We define the zeta function associated to $S$ by the uniformly convergent series \[ \zeta(s,S)=\sum_{n=1}^\infty a_n^{-s}, \] when ${\rm Re}(s)> {\mathsf e}(S)$, and by analytic continuation otherwise. We call the open subset $\rho(S)={\mathds{C}}-S$ of the complex plane the resolvent set of $S$. For all $\lambda\in\rho(S)$, we define the Gamma function associated to $S$ by the canonical product \begin{equation}\label{gamma} \frac{1}{\Gamma(-\lambda,S)}=\prod_{n=1}^\infty\left(1+\frac{-\lambda}{a_n}\right)\e^{\sum_{j=1}^{{\mathsf g}(S)}\frac{(-1)^j}{j}\frac{(-\lambda)^j}{a_n^j}}. \end{equation}
When necessary in order to define the meromorphic branch of an analytic function, the domain for $\lambda$ will be the open subset ${\mathds{C}}-[0,\infty)$ of the complex plane. We use the notation $\Sigma_{\theta,c}=\left\{z\in {\mathds{C}}~|~|\arg(z-c)|\leq \frac{\theta}{2}\right\}$, with $c\geq \delta> 0$, $0< \theta<\pi$. We use
$D_{\theta,c}={\mathds{C}}-\Sigma_{\theta,c}$, for the complementary (open) domain and $\Lambda_{\theta,c}=\partial \Sigma_{\theta,c}=\left\{z\in {\mathds{C}}~|~|\arg(z-c)|= \frac{\theta}{2}\right\}$, oriented counter clockwise, for the boundary. With this notation, we define now a particular subclass of sequences. Let $S$ be as above, and assume that ${\mathsf e}(S)<\infty$, and that there exist $c>0$ and $0<\theta<\pi$, such that $S$ is contained in the interior of the sector $\Sigma_{\theta,c}$. Furthermore, assume that the logarithm of the associated Gamma function has a uniform asymptotic expansion for large $\lambda\in D_{\theta,c}(S)={\mathds{C}}-\Sigma_{\theta,c}$ of the following form \[ \log\Gamma(-\lambda,S)\sim\sum_{j=0}^\infty a_{\alpha_j,0}(-\lambda)^{\alpha_j} +\sum_{k=0}^{{\mathsf g}(S)} a_{k,1}(-\lambda)^k\log(-\lambda), \] where $\{\alpha_j\}$ is a decreasing sequence of real numbers. Then, we say that $S$ is a {\it totally regular sequence of spectral type with infinite order}. We call the open set $D_{\theta,c}(S)$ the asymptotic domain of $S$.
Next, let $S=\{\lambda_{n,k}\}_{n,k=1}^\infty$ be a double sequence of non vanishing complex numbers with unique accumulation point at the infinity, finite exponent $s_0={\mathsf e}(S)$ and genus $p={\mathsf g}(S)$. Assume if necessary that the elements of $S$ are ordered as $0<|\lambda_{1,1}|\leq|\lambda_{1,2}|\leq |\lambda_{2,1}|\leq \dots$. We use the notation $S_n$ ($S_k$) to denote the simple sequence with fixed $n$ ($k$). We call the exponents of $S_n$ and $S_k$ the relative exponents of $S$, and we use the notation $(s_0={\mathsf e}(S),s_1={\mathsf e}(S_k),s_2={\mathsf e}(S_n))$. We define relative genus accordingly.
\begin{defi} Let $S=\{\lambda_{n,k}\}_{n,k=1}^\infty$ be a double sequence with finite exponents $(s_0,s_1,s_2)$, genus $(p_0,p_1,p_2)$, and positive spectral sector $\Sigma_{\theta_0,c_0}$. Let $U=\{u_n\}_{n=1}^\infty$ be a totally regular sequence of spectral type of infinite order with exponent $r_0$, genus $q$, domain $D_{\phi,d}$. We say that $S$ is spectrally decomposable over $U$ with power $\kappa$, length $\ell$ and asymptotic domain $D_{\theta,c}$, with $c={\rm min}(c_0,d,c')$, $\theta={\rm max}(\theta_0,\phi,\theta')$, if there exist positive real numbers $\kappa$, $\ell$ (integer), $c'$, and $\theta'$, with $0< \theta'<\pi$, such that: \begin{enumerate} \item the sequence $u_n^{-\kappa}S_n=\left\{\frac{\lambda_{n,k}}{u^\kappa_n}\right\}_{k=1}^\infty$ has spectral sector $\Sigma_{\theta',c'}$, and is a totally regular sequence of spectral type of infinite order for each $n$; \item the logarithmic $\Gamma$-function associated to $S_n/u_n^\kappa$ has an asymptotic expansion for large $n$ uniformly in $\lambda$ for $\lambda$ in $D_{\theta,c}$, of the following form \begin{equation}\label{exp} \hspace{30pt}\log\Gamma(-\lambda,u_n^{-\kappa} S_n)=\sum_{h=0}^{\ell} \phi_{\sigma_h}(\lambda) u_n^{-\sigma_h}+\sum_{l=0}^{L} P_{\rho_l}(\lambda) u_n^{-\rho_l}\log u_n+o(u_n^{-r_0}), \end{equation} where $\sigma_h$ and $\rho_l$ are real numbers with $\sigma_0<\dots <\sigma_\ell$, $\rho_0<\dots <\rho_L$, the $P_{\rho_l}(\lambda)$ are polynomials in $\lambda$ satisfying the condition $P_{\rho_l}(0)=0$, $\ell$ and $L$ are the larger integers such that $\sigma_\ell\leq r_0$ and $\rho_L\leq r_0$.
\end{enumerate} \label{spdec} \end{defi}
When a double sequence $S$ is spectrally decomposable over a simple sequence $U$, Theorem 3.9 of \cite{Spr9} gives a formula for the derivative of the associated zeta function at zero. In order to understand such a formula, we need to introduce some other quantities. First, we define the functions \begin{equation}\label{fi1} \Phi_{\sigma_h}(s)=\int_0^\infty t^{s-1}\frac{1}{2\pi i}\int_{\Lambda_{\theta,c}}\frac{\e^{-\lambda t}}{-\lambda} \phi_{\sigma_h}(\lambda) d\lambda dt. \end{equation}
Next, by Lemma 3.3 of \cite{Spr9}, for all $n$, we have the expansions: \begin{equation}\label{form}\begin{aligned} \log\Gamma(-\lambda,S_n/{u_n^\kappa})&\sim\sum_{j=0}^\infty a_{\alpha_j,0,n} (-\lambda)^{\alpha_j}+\sum_{k=0}^{p_2} a_{k,1,n}(-\lambda)^k\log(-\lambda),\\ \phi_{\sigma_h}(\lambda)&\sim\sum_{j=0}^\infty b_{\sigma_h,\alpha_j,0} (-\lambda)^{\alpha_j}+\sum_{k=0}^{p_2} b_{\sigma_h,k,1}(-\lambda)^k\log(-\lambda), \end{aligned} \end{equation} for large $\lambda$ in $D_{\theta,c}$. We set (see Lemma 3.5 of \cite{Spr9}) \begin{equation}\label{fi2} \begin{aligned} A_{0,0}(s)&=\sum_{n=1}^\infty \left(a_{0, 0,n} -\sum_{h=0}^\ell b_{\sigma_h,0,0}u_n^{-\sigma_h}\right)u_n^{-\kappa s},\\ A_{j,1}(s)&=\sum_{n=1}^\infty \left(a_{j, 1,n} -\sum_{h=0}^\ell b_{\sigma_h,j,1}u_n^{-\sigma_h}\right)u_n^{-\kappa s}, ~~~0\leq j\leq p_2. \end{aligned} \end{equation}
We can now state the formula for the derivative at zero of the double zeta function. We give here a modified version of Theorem 3.9 of \cite{Spr9}, more suitable for our purpose here. This is based on the following fact. The key point in the proof of Theorem 3.9 of \cite{Spr9} is the decomposition given in Lemma 3.5 of that paper of the sum \[ \mathcal{T}(s,\lambda, S,U)=\sum_{n=1}^\infty u_n^{-\kappa s} \log\Gamma(-\lambda, u_n^{-\kappa}S_n), \] in two terms: the regular part $\mathcal{P}(s,\lambda,S,U)$ and the remaining singular part. The regular part is obtained subtracting from ${\mathcal{T}}$ some terms constructed starting from the expansion of the logarithmic Gamma function given in equation (\ref{exp}), namely \[ {\mathcal{P}}(s,\lambda,S,u)={\mathcal{T}}(s,\lambda, S,U)-\sum_{h=0}^{\ell} \phi_{\sigma_h}(\lambda) u_n^{-\sigma_h}-\sum_{l=0}^{L} P_{\rho_l}(\lambda)u_n^{-\rho_l}\log u_n. \]
Now, assume instead we subtract only the terms such that the zeta function $\zeta(s,U)$ has a pole at $s=\sigma_h$ or at $s=\rho_l$. Let $\hat {\mathcal{P}}(s,\lambda, S,U)$ be the resulting function. Then the same argument as the one used in Section 3 of \cite{Spr9} in order to prove Theorem 3.9 applies, and we obtain similar formulas for the values of the residue, and of the finite part of the zeta function $\zeta(s,S)$ and of its derivative at zero, with just two differences: first, in the all the sums, all the terms with index $\sigma_h$ such that $s=\sigma_h$ is not a pole of $\zeta(s,U)$ must be omitted; and second, we must substitute the terms $A_{0,0}(0)$ and $A_{0,1}'(0)$, with the finite parts $\Rz_{s=0}A_{0,0}(s)$, and $\Rz_{s=0}A_{0,1}'(s)$. The first modification is an obvious consequence of the substitution of the function ${\mathcal{P}}$ by the function $\hat {\mathcal{P}}$. The second modification, follows by the same reason noting that the function $A_{\alpha_j,k}(s)$ defined in Lemma 3.5 of \cite{Spr9} are no longer regular at $s=0$ themselves. However, they both admits a meromorphic extension regular at $s=0$, using the extension of the zeta function $\zeta(s,U)$, and the expansion of the coefficients $a_{\alpha_j,k,n}$ for large $n$. Thus we have the following result.
\begin{theo} \label{tt} The formulas of Theorem 3.9 of \cite{Spr9} hold if all the quantities with index $\sigma_h$ such that the zeta function $\zeta(s,U)$ has not a pole at $s=\sigma_h$ are omitted. In such a case, the result must be read by means of the analytic extension of the zeta function $\zeta(s,U)$. \end{theo}
Next, assuming some simplified pole structure for the zeta function $\zeta(s,U)$, sufficient for the present analysis, we state the main result of this section.
\begin{theo} \label{t4} Let $S$ be spectrally decomposable over $U$ as in Definition \ref{spdec}. Assume that the functions $\Phi_{\sigma_h}(s)$ have at most simple poles for $s=0$. Then, $\zeta(s,S)$ is regular at $s=0$, and \begin{align*} \zeta(0,S)=&-A_{0,1}(0)+\frac{1}{\kappa}{\sum_{h=0}^\ell} \Ru_{s=0}\Phi_{\sigma_h}(s)\Ru_{s=\sigma_h}\zeta(s,U),\\ \zeta'(0,S)=&-A_{0,0}(0)-A_{0,1}'(0)+\frac{\gamma}{\kappa}\sum_{h=0}^\ell\Ru_{s=0}\Phi_{\sigma_h}(s)\Ru_{s=\sigma_h}\zeta(s,U)\\ &+\frac{1}{\kappa}\sum_{h=0}^\ell\Rz_{s=0}\Phi_{\sigma_h}(s)\Ru_{s=\sigma_h}\zeta(s,U)+{\sum_{h=0}^\ell}{^{\displaystyle '}}\Ru_{s=0}\Phi_{\sigma_h}(s)\Rz_{s=\sigma_h}\zeta(s,U), \end{align*} where the notation $\sum'$ means that only the terms such that $\zeta(s,U)$ has a pole at $s=\sigma_h$ appear in the sum.
\end{theo}
This result should be compared with the Spectral Decomposition Lemma of \cite{Spr5} and Proposition 1 of \cite{Spr6}.
\begin{corol} \label{c} Let $S_{(j)}=\{\lambda_{(j),n,k}\}_{n,k=1}^\infty$, $j=1,2$, be two double sequences that satisfy all the requirements of Definition \ref{spdec} of spectral decomposability over a common sequence $U$, with the same parameters $\kappa$, $\ell$, etc., except that the polynomials $P_{(j),\rho}(\lambda)$ appearing in condition (2) do not vanish for $\lambda=0$. Assume that the difference of such polynomials does satisfy this condition, namely that $P_{(1),\rho}(0)-P_{(2),\rho}(0)=0$. Then, the difference of the zeta functions $\zeta(s,S_{(1)})-\zeta(s,S_{(2)})$ is regular at $s=0$ and satisfies the formulas given in Theorem \ref{t4}. \end{corol}
We conclude this section by recalling some results on zeta determinants of some simple sequences that will be necessary in the following. This results can be found in different places, and are known to specialists. We will use the formulation of \cite{Spr1}. For positive real numbers $l$ and $q$, define the non homogeneous quadratic Bessel zeta function by \[ z(s,\nu,q,l)=\sum_{k=1}^\infty \left(\frac{j_{\nu,k}^2}{l^2}+q^2\right)^{-s}, \] for ${\rm Re}(s)>\frac{1}{2}$. Then, $z(s,\nu,q,l)$ extends analytically to a meromorphic function in the complex plane with simple poles at $s=\frac{1}{2}, -\frac{1}{2}, -\frac{3}{2}, \dots$. The point $s=0$ is a regular point and \begin{equation}\label{p00} \begin{aligned} z(0,\nu,q,l)&=-\frac{1}{2}\left(\nu+\frac{1}{2}\right),\\ z'(0,\nu,q,l)&=-\log\sqrt{2\pi l}\frac{I_\nu(lq)}{q^\nu}. \end{aligned} \end{equation}
In particular, taking the limit for $q\to 0$, \[ z'(0,\nu,0,l)=-\log\frac{\sqrt{\pi}l^{\nu+\frac{1}{2}}}{2^{\nu-\frac{1}{2}}\Gamma(\nu+1)}. \]
\section{The analytic torsion } \label{s4}
In this section we give the analytic torsions of $C_\alpha S^n_{l\sin\alpha}$, for $n=1,2$, and $3$. Actually, the case $n=1$ is essentially contained in \cite{Spr6}, and both the cases $n=1$ and $n=2$ are given in \cite{HMS}, Sections 5.4 and 5.5, so we will focus here on the new case of $C_\alpha S^3_{l\sin\alpha}$.
By the analysis in Section \ref{s3}, the relevant zeta functions are \begin{align*} \zeta(s,\Delta^{(1)})&=\sum_{k=1}^\infty \frac{j_{2,k}^{-2s}}{l^{-2s}}+2\sum_{n,k=1}^\infty n(n+2) \frac{(j'_{\mu_{1,n},k})^{-2s}}{l^{-2s}} + \sum_{n,k=1}^{\infty} (n+1)^2 \frac{\tilde j_{\mu_{0,n},k,-}^{-2s}}{l^{-2s}}\\ &+\sum_{n,k=1}^{\infty} (n+1)^2 \frac{j_{\mu_{0,n},k}^{-2s}}{l^{-2s}},\\ \zeta(s,\Delta^{(2)})&= \sum_{n,k=1}^{\infty} (n+1)^2 \frac{\tilde j_{\mu_{0,n},k,+}^{-2s}}{l^{-2s}} + 2\sum_{n,k=1}^{\infty} n(n+2) \frac{(j'_{\mu_{1,n},k})^{-2s}}{l^{-2s}} \\ &+2 \sum_{n,k=1}^{\infty} n(n+2) \frac{j_{\mu_{1,n},k}^{-2s}}{l^{-2s}} + \sum_{n,k=1}^{\infty} (n+1)^2 \frac{j_{\mu_{0,n},k}^{-2s}}{l^{-2s}},\\ \zeta(s,\Delta^{(3)})&=\sum_{k=1}^\infty \frac{j_{1,k}^{-2s}}{l^{-2s}}+2\sum_{n,k=1}^\infty n(n+2) \frac{j_{\mu_{1,n},k}^{-2s}}{l^{-2s}} + \sum_{n,k=1}^{\infty} (n+1)^2 \frac{\tilde j_{\mu_{0,n},k,+}^{-2s}}{l^{-2s}}\\ &+ \sum_{n,k=1}^{\infty} (n+1)^2 \frac{j_{\mu_{0,n},k}^{-2s}}{l^{-2s}},\\ \zeta(s,\Delta^{(4)})&=\sum_{k=1}^{\infty} \frac{j_{1,k}^{-2s}}{l^{-2s}} + \sum_{n,k=1}^{\infty} (n+1)^2 \frac{j_{\mu_{0,n},k}^{-2s}}{l^{-2s}}, \end{align*} and by equation (\ref{analytic}), the torsion is ($a=\sin\alpha=\frac{1}{\nu}$) \begin{align*} \log T(C_\alpha S^3_{la})&=-\frac{1}{2}\zeta'(0,\Delta^{(1)}) +\zeta'(0,\Delta^{(2)})-\frac{3}{2}\zeta'(0,\Delta^{(3)})+2\zeta'(0,\Delta^{(4)}). \end{align*}
Define the function \begin{align*} t(s)=&-\frac{1}{2}\zeta(s,\Delta^{(1)}) +\zeta(s,\Delta^{(2)})-\frac{3}{2}\zeta(s,\Delta^{(3)})+2\zeta(s,\Delta^{(4)})\\ =&\frac{1}{2} \sum_{k=1}^{\infty} \frac{j_{1,k}^{-2s}}{l^{-2s}}-\frac{1}{2} \sum_{k=1}^\infty \frac{j_{2,k}^{-2s}}{l^{-2s}}\\ & + \sum_{n,k=1}^\infty n(n+2) \frac{(j'_{\mu_{1,n},k})^{-2s}}{l^{-2s}} - \sum_{n,k=1}^{\infty} n(n+2) \frac{j_{\mu_{1,n},k}^{-2s}}{l^{-2s}}\\ &+ \sum_{n,k=1}^{\infty} (n+1)^2 \frac{j_{\mu_{0,n},k}^{-2s}}{l^{-2s}} -\frac{1}{2}\sum_{n,k=1}^{\infty} (n+1)^2 \frac{\tilde j_{\mu_{0,n},k,-}^{-2s}}{l^{-2s}} -\frac{1}{2} \sum_{n,k=1}^{\infty} (n+1)^2 \frac{\tilde j_{\mu_{0,n},k,+}^{-2s}}{l^{-2s}}\\ =& l^{2s}\left(\frac{1}{2}z_{1}(s) - \frac{1}{2}z_{2}(s) + \hat{Z}(s) -Z(s) +Z_0(s) -\frac{1}{2}Z_+(s) - \frac{1}{2}Z_-(s) \right) , \end{align*} then \begin{align*} \log T(C_\alpha S^3_{la})=t'(0)=&\frac{1}{2}z'_{1}(0) - \frac{1}{2}z'_{2}(0) + \hat{Z}'(0) -Z'(0) +Z'_0(0)\\ &-\frac{1}{2}Z'_+(0)- \frac{1}{2}Z'_-(0) + \log l^2 \left(\frac{1}{2}z_{1}(0) - \frac{1}{2}z_{2}(0)\right.\\ & \left.+\hat{Z}(0) -Z(0) +Z_0(0) -\frac{1}{2}Z_+(0) - \frac{1}{2}Z_-(0)\right). \end{align*}
Using equations (\ref{p00}) of Section \ref{s3}, we compute $z_{1/2}(0)$ e $z'_{1/2}(0)$. We obtain
\begin{equation}\label{ttt} \begin{aligned} \log T(C_\alpha S^3_{la})&= \left(\frac{1}{4} + \hat{Z}(0) -Z(0) +Z_0(0) -\frac{1}{2}Z_+(0) - \frac{1}{2}Z_-(0)\right)\log l^2\\ &+ \left(-\log 2 + \hat{Z}'(0) -Z'(0) +Z'_0(0) -\frac{1}{2}Z'_+(0) - \frac{1}{2}Z'_-(0) \right). \end{aligned} \end{equation}
In order to evaluate the remaining part, we use Corollary \ref{c} of Theorem \ref{t4}. We consider separately the two functions $Z(s)-\hat Z(s)$, and $2Z_0(s)-Z_+(s)-Z_-(s)$. In the first case, the relevant sequences are the double sequences $S=\{n(n+2):j_{\mu_{1,n},k}^2\}$ and $\hat S=\{n(n+2):(j_{\mu_{1,n},k}')^2\}$, and the simple sequence $U_1=\{n(n+2):\mu_{1,n}\}_{n=1}^\infty$, and $Z(s)=\zeta(s,S)$, $\hat Z(s)=\zeta(s,\hat S)$. In the second case, the relevant sequences are the double sequences $S_0=\{(n+1)^2:j_{\mu_{0,n},k}^2\}$ and $S_\pm=\{(n+1)^2:(\tilde j_{\mu_{0,n,\pm},k})^2\}$, and the simple sequence $U_0=\{\mu_{0,n}\}_{n=1}^\infty$, and $Z_0(s)=\zeta(s,S_0)$, $Z_\pm(s)=\zeta(s, S_\pm)$.
We start by analysing the two simple sequences $U_j$, $j=0,1$. Recall from Lemma \ref{eig3}, that \[ \mu_{0,n}=\sqrt{\nu^2 n(n+2)+1}, \qquad \mu_{1,n} = \nu(n+1). \]
Consider first the sequence $U_1=\{n(n+2):\mu_{1,n}\}_{n=1}^\infty$. By definition of $\mu_{1,n}$, it is easy to see that \[ \zeta(s,U_1)=\nu^{-s}\left(\zeta_R(s-2)-\zeta_R(s)\right), \] and therefore $U_1$ is a totally regular sequence of spectral type with infinite order, ${\mathsf e}(U_1)={\mathsf g}(U_1)=3$, and $\zeta(s,U_1)$ has simple poles at $s=1$ and $s=3$ with residues: \begin{equation}\label{rrr1}\begin{aligned} &\Rz_{s=1}\zeta(s,U_1)=\frac{1}{\nu}\left(\log\nu-\gamma-\frac{1}{12}\right),&\Ru_{s=1}\zeta(s,U_1)=-\frac{1}{\nu},\\ &\Rz_{s=3}\zeta(s,U_1)=\frac{1}{\nu^3}\left(\gamma-\log\nu-\zeta(3)\right),&\Ru_{s=3}\zeta(s,U_1)=\frac{1}{\nu^3}.\\ \end{aligned} \end{equation}
The analysis for the sequence $U_0$ is a little bit longer. By definition $U_0=\{(n+1)^2:\mu_{0,n}\}_{n=1}^\infty$, where \[ \mu_{0,n}=\sqrt{\nu^2 n(n+2)+1}. \]
For a positive $q$, consider the sequence \[ L_q=\{(n+1)^2:\sqrt{n(n+2)+q}\}_{n=1^\infty}, \]
Then, it is clear that \[ \zeta(s,U_0)=\nu^{-s}\zeta(s,L_{\frac{1}{\nu^2}}). \]
The sequence $L_0$ is the sequence of the square roots of the positive eigenvalues of the Laplace operator on the three sphere $S^3$ of radius 1 (see \cite{Spr0}, and references therein). Thus, \[ \zeta(2s,L_0)=\zeta(s,{\rm Sp}_+\Delta_{S^3}). \]
The zeta function $\zeta(s,{\rm Sp}_+\Delta_{S^3})$ has been studied by various author. We will refer to \cite{Spr0}. Using the results in \cite{Spr0}, it follows that ${\mathsf e}({\rm Sp}_+\Delta^{(0)}_{S^3})=\frac{3}{2}$, ${\mathsf g}({\rm Sp}_+ \Delta^{(0)}_{S^3})=1$, and that ${\rm Sp}_+ \Delta^{(0)}_{S^3}$ is a totally regular sequence of spectral type with infinite order. Since shifting the sequence does not alter its character (see \cite{Spr4}), it follows that ${\mathsf e}(U_0)={\mathsf g}(U_0)=3$, and that $U$ is a totally regular sequence of spectral type with infinite order. In \cite{Spr0}, it is also proved that $\zeta(s,{\rm Sp}_+\Delta_{S^3})$ has simple poles at $s=\frac{3}{2},\frac{1}{2},-\frac{j}{2}$, for all $j>0$, and formulas for the residues are given. In particular: \begin{align*} &\Ru_{s=\frac{3}{2}}\zeta(s,{\rm Sp}_+ \Delta^{(0)}_{S^3})=\frac{1}{2},&\Ru_{s=3}\zeta(s,{\rm Sp}_+ \Delta^{(0)}_{S^3})=\frac{1}{4},\\ \end{align*} and hence, $\zeta(s,L_0)$ has one simple pole at $s=1$, and $s=3$ with the residues: \begin{align*} &\Ru_{s=3}\zeta(s,{\rm Sp}_+ \Delta^{(0)}_{S^3})=1,&\Ru_{s=1}\zeta(s,{\rm Sp}_+ \Delta^{(0)}_{S^3})=\frac{1}{2}.\\ \end{align*}
Expanding the power of the binomial, we have that \begin{align*} \zeta(s,L_q)&=\zeta(s,L_0)-\frac{s}{2}\zeta(s,L_0)q+\sum_{j=2}^\infty \binom{-\frac{s}{2}}{j}\zeta(s+2j,L_0)q^j, \end{align*} and therefore, \begin{align*} &\Ru_{s=1}\zeta(s,L_q)=\frac{1}{2}(1-q),&\Ru_{s=3}\zeta(s,L_q)=1,\\ \end{align*} and we have the expansions \begin{equation}\label{resz1} \begin{aligned} \zeta(s,U_0)&=\nu^{-s}\zeta(s,L_q)=\frac{1}{2\nu}\left(1-\frac{1}{\nu^2}\right)\frac{1}{s-1}+K_1(s),& {\rm near}~s=1,\\ \zeta(s,U_0)&=\nu^{-s}\zeta(s,L_q)=\frac{1}{\nu^3}\frac{1}{s-3}+K_3(s),& {\rm near}~s=3,\\ \end{aligned} \end{equation} where the $K_j(s)$ are some regular functions.
Next, we start the analysis of the double sequences. We split it into two parts.
\subsection{Part I} In this first part we deal with $Z(s)-\hat Z(s)$. Thus, we consider the sequences $S$ and $\hat S$. Using classical estimates for the zeros of Bessel function \cite{Wat}, we find that ${\mathsf e}(S)={\mathsf e}(\hat S)=2$, and the relative genus are $(2,0,1)$ for both sequences. The fact that $S_n$ and $\hat S_n$ are totally regular sequences of spectral type with infinite order, will be a consequence of the following analysis. Note that we have the product representations (the first is classical, see for example \cite{Wat}, the second follows using the Hadamard factorization theorem)
\begin{align*} I_\nu(z)&=\frac{z^\nu}{2^\nu\Gamma(\nu+1)}\prod_{k=1}^\infty \left(1+\frac{z^2}{j_{\nu,k}^2}\right),\\ I_\nu'(z)&=\frac{z^{\nu-1}}{2^\nu\Gamma(\nu)}\prod_{k=1}^\infty \left(1+\frac{z^2}{(j_{\nu,k}')^2}\right).\\ \end{align*}
Using these representations, we obtain the following representations for the Gamma functions associated to the sequences $S_n$ and $\hat S_n$. For further use, we give instead the representations for the Gamma functions associated to the sequences $S_n/\mu_{1,n}^2$, and $\hat S_n/\mu_{1,n}^2$, that will do as well. By the definition in equation (\ref{gamma}), with $z=\sqrt{-\lambda}$, we have \begin{align*} \log \Gamma(-\lambda,S_n/(\mu_{1,n})^2)=&-\log\prod_{k=1}^\infty \left(1+\frac{(-\lambda)(\mu_{1,n})^2}{j_{\mu_{1,n},k}^2}\right)\\ =&-\log I_{\mu_{1,n}}(\mu_{1,n}\sqrt{-\lambda})+(\mu_{1,n})\log\sqrt{-\lambda} \\ &+\mu_{1,n}\log (\mu_{1,n})-\mu_{1,n}\log 2-\log\Gamma(\mu_{1,n}+1),\\ \log \Gamma(-\lambda,\hat S_n/(\mu_{1,n})^2)=&-\log\prod_{k=1}^\infty \left(1+\frac{(-\lambda)(\mu_{1,n})^2}{(j_{\mu_{1,n},k}')^2}\right)\\ =&-\log I'_{\mu_{1,n}}(\mu_{1,n}\sqrt{-\lambda})+(\mu_{1,n}-1)\log\sqrt{-\lambda} \\ &+\mu_{1,n}\log (\mu_{1,n})-\mu_{1,n}\log 2-\log\Gamma(\mu_{1,n}). \end{align*}
A first consequence of these representations is that we have a complete asymptotic expansion of the Gamma functions $\log \Gamma(-\lambda,S_n)$, and $\log \Gamma(-\lambda,\hat S_n)$, and therefore $S_n$ and $\hat S_n$ are sequences of spectral type. Considering the expansions, it follows that they are both totally regular sequences of infinite order.
Next, we prove that $S$ and $\hat S$ are spectrally decomposable over $U_1$ with power $\kappa=2$ and length $\ell=4$, as in Definition \ref{spdec}. We have to show that the functions $\log \Gamma(-\lambda,S_n/\mu_{1,n}^2)$, and $\log \Gamma(-\lambda,\hat S_n/\mu_{1,n}^2)$ have the appropriate uniform expansions for large $n$. This follows using the uniform expansions for the Bessel functions given for example in \cite{Olv} (7.18), and Ex. 7.2, \[ I_{\nu}(\nu z)=\frac{\e^{\nu\sqrt{1+z^2}}\e^{\nu\log\frac{z}{1+\sqrt{1+z^2}}}}{\sqrt{2\pi \nu}(1+z^2)^\frac{1}{4}}\left(1+U_1(z)\frac{1}{\nu}+U_2(z)\frac{1}{\nu^2}+U_{3}(z)\frac{1}{\nu^{3}}+O(\frac{1}{\nu^4})\right), \] where \begin{align*} U_1(z)=&\frac{1}{8\sqrt{1+z^2}}-\frac{5}{24(1+z^2)^\frac{3}{2}},\\ U_2(z)=&\frac{9}{128(1+z^2)}-\frac{77}{192(1+z^2)^2}+\frac{385}{1152(1+z^2)^3},\\ U_3(z)=&\frac{75}{1024(1+z^2)^{\frac{3}{2}}} - \frac{4563}{5120(1+z^2)^{\frac{5}{2}}}+ \frac{17017}{9216(1+z^2)^{\frac{7}{2}}}-\frac{85085}{82944(1+z^2)^{\frac{9}{2}}}, \end{align*} and \[ I_{\nu}'(\nu z)=\frac{(1+z^2)^\frac{1}{4}\e^{\nu\sqrt{1+z^2}}\e^{\nu\log\frac{z}{1+\sqrt{1+z^2}}}}{\sqrt{2\pi \nu}z}\left(1+V_1(z)\frac{1}{\nu}+V_2(z)\frac{1}{\nu^2}+\dots+O(\frac{1}{\nu^4})\right), \] \begin{align*} V_1(z)=&-\frac{3}{8\sqrt{1+z^2}}+\frac{7}{24(1+z^2)^\frac{3}{2}},\\ V_2(z)=&-\frac{15}{128(1+z^2)}+ \frac{33}{64(1+z^2)^2} - \frac{455}{1152(1+z^2)^3},\\ V_3(z)=&-\frac{105}{1024(1+z^2)^{\frac{3}{2}}} + \frac{5577}{5120(1+z^2)^{\frac{5}{2}}}-\frac{6545}{3072(1+z^2)^{\frac{7}{2}}}+\frac{95095}{82944(1+z^2)^{\frac{9}{2}}}. \end{align*}
Using the classical expansion for the logarithm of the Euler Gamma function \cite{GZ} 8.344, we obtain, for large $n$, uniformly in $\lambda$, the expansion of $\log \Gamma(-\lambda,\hat S_n/\mu_{1,n}^2)$ and of $\log \Gamma(-\lambda, S_n/\mu_{1,n}^2)$, and consequentely of the difference
\begin{align*} \log \Gamma(-\lambda,\hat S_n/\mu_{1,n}^2) &- \log \Gamma(-\lambda, S_n/\mu_{1,n}^2) = \sum_{h=0}^\infty \left( \hat\phi_{h-1}(\lambda) - \phi_{h-1} (\lambda) \right) \mu_{1,n}^{1-h}\\ &= -\frac{1}{2} \log(1-\lambda) -\frac{1}{2} \log \lambda + \left(\hat \phi_1(\lambda) - \phi_1(\lambda)\right)\frac{1}{\mu_{1,n}}\\ &+ \left(\hat \phi_2(\lambda) - \phi_2(\lambda)\right)\frac{1}{\mu_{1,n}^2} + \left(\hat \phi_3(\lambda) - \phi_3(\lambda)\right)\frac{1}{\mu_{1,n}^3} +O\left(\frac{1}{\mu_{1,n}^{4}}\right) \end{align*} with \begin{align*} \hat\phi_1(\lambda)-\phi_1(\lambda)&=\frac{1}{2}\frac{1}{(1-\lambda)^\frac{1}{2}}-\frac{1}{2}\frac{1}{(1-\lambda)^\frac{3}{2}},\\ \hat\phi_2(\lambda)-\phi_2(\lambda)&=\frac{1}{4}\frac{1}{(1-\lambda)}- \frac{1}{(1-\lambda)^{2}}-\frac{3}{4} \frac{1}{(1-\lambda)^3},\\ \hat\phi_3(\lambda)-\phi_3(\lambda)&=\frac{11}{48}\frac{1}{(1-\lambda)^\frac{3}{2}}-\frac{35}{16}\frac{1}{(1-\lambda)^\frac{5}{2}} +\frac{67}{16}\frac{1}{(1-\lambda)^\frac{7}{2}}-\frac{107}{48}\frac{1}{(1-\lambda)^\frac{9}{2}}.\\ \end{align*}
Note that the length $\ell$ of the decomposition is precisely $4$. For the ${\mathsf e}(U_1)=3$, and therefore the larger integer such that $h-1=\sigma_h\leq 3$ is $4$. However, note that by Theorem \ref{tt}, only the term with $\sigma_h=1$, and $\sigma_h=3$, namely $h=2,4$, appear in the formula of Theorem \ref{t4}, since the unique poles of $\zeta(s,U_1)$ are at $s=1$ and $s=3$. We now apply the formulas of Theorem \ref{t4}.
First, by the definition in equation (\ref{fi1}), \begin{align*} \hat \Phi_1(s) - \Phi_1(s)=&\int_0^\infty t^{s-1}\frac{1}{2\pi i}\int_{\Lambda_{\theta,c}}\frac{\e^{-\lambda t}}{-\lambda} \left(\frac{1}{2}\frac{1}{(1-\lambda)^\frac{1}{2}}-\frac{1}{2}\frac{1}{(1-\lambda)^\frac{3}{2}}\right) d\lambda dt,\\ \hat \Phi_2(s) - \Phi_2(s)=&\int_0^\infty t^{s-1}\frac{1}{2\pi i}\int_{\Lambda_{\theta,c}}\frac{\e^{-\lambda t}}{-\lambda} \left(\frac{1}{4}\frac{1}{(1-\lambda)}- \frac{1}{(1-\lambda)^{2}}-\frac{3}{4} \frac{1}{(1-\lambda)^3}\right)\hspace{-3pt} d\lambda dt,\\ \hat \Phi_3(s) - \Phi_3(s)=&\int_0^\infty t^{s-1}\frac{1}{2\pi i}\int_{\Lambda_{\theta,c}}\frac{\e^{-\lambda t}}{-\lambda} \left(\frac{11}{48}\frac{1}{(1-\lambda)^\frac{3}{2}}-\frac{35}{16}\frac{1}{(1-\lambda)^\frac{5}{2}}\right) d\lambda dt\\ &+\int_0^\infty t^{s-1}\frac{1}{2\pi i}\int_{\Lambda_{\theta,c}}\frac{\e^{-\lambda t}}{-\lambda} \left(\frac{67}{16}\frac{1}{(1-\lambda)^\frac{7}{2}}-\frac{107}{48}\frac{1}{(1-\lambda)^\frac{9}{2}}\right) d\lambda dt.\\ \end{align*}
These integrals can be computed using the formula in Appendix \ref{appendixA}. We obtain \begin{align*} \Rz_{s=0}\left(\hat \Phi_1(s) - \Phi_1(s)\right)&=-1 ,&\Ru_{s=0}\left(\hat\Phi_1(s) - \Phi_1(s)\right)&=0,\\ \Rz_{s=0}\left(\hat \Phi_2(s) - \Phi_2(s)\right)&=\frac{1}{8} ,&\Ru_{s=0}\left(\hat\Phi_2(s) - \Phi_2(s)\right)&=0,\\ \Rz_{s=0}\left(\hat \Phi_3(s) - \Phi_3(s)\right)&=-\frac{2}{315} ,&\Ru_{s=0}\left(\hat\Phi_3(s) - \Phi_3(s)\right)&=0. \end{align*}
Second, using this results and the residues of $\zeta(s,U_1)$ given in by equation (\ref{rrr1}) it follows that \begin{equation}\label{p1} \begin{aligned}
\hat Z(0)- Z(0)=&-\hat A_{0,1}(0)+ A_{0,1}(0)+\frac{1}{2}\Ru_{s=1}\zeta(s,U_1)\Ru_{s=0}(\hat\Phi_1(s)-\Phi_1(s))\\ &+\frac{1}{2}\Ru_{s=3}\zeta(s,U_1)\Ru_{s=0}(\hat\Phi_3(s)-\Phi_3(s)),\\ =&-\hat A_{0,1}(0)+ A_{0,1}(0), \end{aligned} \end{equation} and \begin{equation}\label{p2} \begin{aligned}
\hat Z'(0)-Z'(0)=&-\hat A_{0,0}(0)-\hat A_{0,1}'(0)+ A_{0,0}(0)+ A_{0,1}'(0)\\ &+\frac{1}{2}\Rz_{s=1}\zeta(s,U_1)\Ru_{s=0}(\hat\Phi_1(s)-\Phi_1(s))\\ &+\frac{1}{2}\Rz_{s=3}\zeta(s,U_1)\Ru_{s=0}(\hat\Phi_3(s)-\Phi_3(s)),\\ =&-\hat A_{0,1}(0)+ A_{0,1}(0)+\frac{1}{2\nu}-\frac{1}{315\nu^3}. \end{aligned} \end{equation}
Third, by equation (\ref{fi2}) and Theorem \ref{tt}, the terms $A_{0,0}(0)$ and $A'_{0,1}(0)$, are \begin{align*} A_{0,0}(s)&=\sum_{n=1}^\infty \left(a_{0, 0,n} -b_{1,0,0}u_n^{-1}-b_{3,0,0}u_n^{-3}\right)u_n^{-2 s},\\ A_{0,1}(s)&=\sum_{n=1}^\infty \left(a_{0, 1,n} -b_{1,0,1}u_n^{-1}-b_{3,0,1}u_n^{-3}\right)u_n^{-2 s}. \end{align*}
Hence, we need the expansion for large $\lambda$ of the functions $\log\Gamma(-\lambda,\hat S_n/\mu_{1,n}^2)$, $\hat\phi_{1}(\lambda)$, $\hat\phi_{3}(\lambda)$, $\log\Gamma(-\lambda, S_n/\mu_{1,n}^2)$, $\phi_{1}(\lambda)$ and $\phi_{3}(\lambda)$. Using classical expansions for the Bessel functions and their derivative and the formulas in equation (\ref{form}), we obtain \begin{align*} a_{0,0,n}&=\frac{1}{2}\log 2\pi+\left(\mu_{1,n}+\frac{1}{2}\right)\log\mu_{1,n}-\mu_{1,n}\log 2-\log\Gamma(\mu_{1,n}+1),\\ a_{0,1,n}&=\frac{1}{2}\left(\mu_{1,n}+\frac{1}{2}\right),\\ b_{1,0,0}&=-\frac{1}{12},\hspace{50pt} b_{3,0,0} = \frac{1}{360}, \hspace{50pt}b_{1,0,1}=b_{3,0,1}=0, \end{align*} and \begin{align*} \hat a_{0,0,n}&=\frac{1}{2}\log 2\pi+\left(\mu_{1,n}+\frac{1}{2}\right)\log\mu_{1,n}-\mu_{1,n}\log 2-\log\Gamma(\mu_{1,n}+1),\\ \hat a_{0,1,n}&=\frac{1}{2}\left(\mu_{1,n}-\frac{1}{2}\right),\\ \hat b_{1,0,0}&=-\frac{1}{12}, \hspace{50pt}\hat b_{3,0,0} = \frac{1}{360}, \hspace{50pt}\hat b_{1,0,1}=\hat b_{3,0,1}=0. \end{align*}
This shows that $A_{0,0}(0)=\hat A_{0,0}(0)$, and that \[ \hat A_{0,1}(s)- A_{0,1}(s)=-\frac{1}{2}\sum_{n=1}^\infty n(n+2)\mu_{1,n}^{-2s}=-\frac{1}{2}\zeta(2s, U_1). \]
Thus, \begin{align*} \hat A_{0,1}(0)- A_{0,1}(0)&=-\frac{1}{4},\\ \hat A'_{0,1}(0)- A'_{0,1}(0)&=\frac{1}{2}\log\nu-\zeta'(-2)-\frac{1}{2}\log 2\pi. \end{align*}
Substitution in equations (\ref{p1}) and (\ref{p2}), gives \begin{align*} \hat Z(0)- Z(0)=&\frac{1}{4},\\ \hat Z'(0)-Z'(0)=&-\frac{1}{2}\log\nu+\zeta'(-2)+\frac{1}{2}\log 2\pi+\frac{1}{2\nu}-\frac{1}{315\nu^3}. \end{align*}
\subsection{Part II} In this second part we deal with $2Z_0(s)-Z_+(s)-Z_-(s)$. Thus, we consider the sequences $S_0$ and $S_\pm$. The sequence $S_0$ is analogous to the sequence $S$ analyzed in the previous part. We have that \begin{align*} \log \Gamma(-\lambda,S_{0,n}/\mu_{0,n}^2)=&-\log I_{\mu_{0,n}}(\mu_{0,n}\sqrt{-\lambda})+\mu_{0,n}\log\sqrt{-\lambda}+\mu_{0,n} \log\mu_{0,n}\\ &-\mu_{0,n}\log2 -\log\Gamma(\mu_{0,n}) - \log \mu_{0,n}. \end{align*}
Using the uniform expansion of $\log I_{\mu_{0,n}}(\mu_{0,n}\sqrt{-\lambda})$, we obtain the uniform expansion for large $n$: \begin{align*} \log &\Gamma(-\lambda, S_{0,n}/\mu_{0,n}^2) \\ &= \sum_{h=0}^\infty \phi_{h-1,0} (\lambda) \mu_{0,n}^{1-h}\\ &= \left( - \sqrt{1-\lambda} + \log(1+\sqrt{1-\lambda}) - \log 2 + 1) - \log \sqrt{-\lambda} \right)\mu_{0,n}\\ &+ \frac{1}{4}\log(1-\lambda) + \left( - U_1(\sqrt{-\lambda})-\frac{1}{12}\right)\frac{1}{\mu_{0,n}}\\ &+ \left( - U_2(\sqrt{-\lambda})+\frac{1}{2}U_1(\sqrt{-\lambda})^2\right) \frac{1}{\mu_{0,n}^2}\\ &+ \left( - U_3(\sqrt{-\lambda})+U_1(\sqrt{-\lambda})U_2(\sqrt{-\lambda})-\frac{1}{3}U_1(\sqrt{-\lambda})^3+\frac{1}{360}\right)\frac{1}{\mu_{0,n}^3} + O\left(\frac{1}{\mu_{1,n}^{4}}\right)\hspace{-1.5pt}, \end{align*} and hence \begin{align*} \phi_{1,0}(\lambda)&=-\frac{1}{8} \frac{1}{(1-\lambda)^{\frac{1}{2}}} + \frac{5}{24}\frac{1}{(1-\lambda)^{\frac{3}{2}}}-\frac{1}{12},\\ \phi_{2,0}(\lambda)&=-\frac{1}{16} \frac{1}{(1-\lambda)}+\frac{3}{8} \frac{1}{(1-\lambda)^{2}}-\frac{5}{16} \frac{1}{(1-\lambda)^3},\\ \phi_{3,0}(\lambda)&=-\frac{25}{384} \frac{1}{(1-\lambda)^{\frac{3}{2}}}+\frac{531}{640} \frac{1}{(1-\lambda)^{\frac{5}{2}}} -\frac{221}{128}\frac{1}{(1-\lambda)^{\frac{7}{2}}}+\frac{1105}{1152}\frac{1}{(1-\lambda)^{\frac{9}{2}}}+\frac{1}{360}. \end{align*}
Using the expansion of $\log I_{\mu_{0,n}}(\mu_{0,n}\sqrt{-\lambda})$, and that of the $\phi_{j,0}(\lambda)$ for large $\lambda$, and the definitions in equations (\ref{form}), we compute
\begin{align*} a_{0,0,n,0}&=\frac{1}{2}\log 2\pi+\left(\mu_{0,n}+\frac{1}{2}\right)\log\mu_{0,n}-\mu_{0,n}\log 2-\log\Gamma(\mu_{0,n}+1),\\ a_{0,1,n,0}&=\frac{1}{2}\left(\mu_{0,n}+\frac{1}{2}\right),\\ b_{1,0,0,0}&=-\frac{1}{12}, \hspace{50pt}b_{3,0,0,0} = \frac{1}{360}, \hspace{50pt}b_{1,0,1,0}=b_{3,0,1,0}=0. \end{align*}
The analysis of the sequences $S_\pm$ needs more work. Let define the functions \[ T^{\pm}_{\nu}(z)=\pm J_{\nu}(z)+zJ'_\nu(z). \]
Recalling the series definition of the Bessel function \[ J_\nu(z)=\frac{z^\nu}{2^\nu}\sum_{k=0}^\infty \frac{(-1)^kz^{2k}}{2^{2k}k!\Gamma(\nu+k+1)}, \] we obtain that near $z=0$ \[ T_\nu^\pm(z) =\left(1\pm\frac{1}{\nu}\right) \frac{z^\nu}{2^\nu\Gamma(\nu)}. \]
This means that the function $\hat T^\pm_\nu(z)=z^{-\nu} T^\pm_\nu(z)$ is an even function of $z$. Let $z_{\nu,k,\pm}$ be the positive zeros of $T^\pm_\nu(z)$ arranged in increasing order. By the Hadamard factorization theorem, we have the product expansion \[ \hat T^\pm_\nu(z)=\hat T^\pm_\nu(z){\prod_{k=-\infty}^{+\infty}}\left(1-\frac{z}{z_{\nu,k,\pm}}\right), \] and therefore \[ T^\pm_\nu(z)=\left(1\pm\frac{1}{\nu}\right)\frac{z^\nu}{2^\nu\Gamma(\nu)} \prod_{k=1}^{\infty}\left(1-\frac{z^2}{z^2_{\nu,k,\pm}}\right). \]
Next, recalling that (when $-\pi<\arg(z)<\frac{\pi}{2}$) \begin{align*} J_\nu(iz)&=\e^{\frac{\pi}{2}i\nu} I_\nu(z),\\ J'_\nu(iz)&=\e^{\frac{\pi}{2}i\nu}\e^{-\frac{\pi}{2}i} I'_\nu(z),\\ \end{align*} we obtain \[ T_\nu^\pm(iz)=\e^{\frac{\pi}{2}i\nu}\left(\pm I_\nu(z)+zI'_\nu(z)\right). \]
Thus, we define (for $-\pi<\arg(z)<\frac{\pi}{2}$) \begin{equation}\label{pop} Q^\pm_\nu(z)=\e^{-\frac{\pi}{2}i\nu}T_\nu^\pm(i z), \end{equation} and hence \begin{align*} Q^\pm_\nu(z)&=\pm I_\nu(z)+zI'_\nu(z)=\left(1\pm\frac{1}{\nu}\right)\frac{z^\nu}{2^\nu\Gamma(\nu)} \prod_{k=1}^{\infty}\left(1+\frac{z^2}{z^2_{\nu,k,\pm}}\right). \end{align*}
Using these representations, we obtain the following representations for the Gamma functions associated to the sequences $S_{\pm,n}$. By the definition in equation (\ref{gamma}), with $z=\sqrt{-\lambda}$, we have \begin{align*} \log \Gamma(-\lambda,S_{\pm,n})=&-\log\prod_{k=1}^\infty \left(1+\frac{(-\lambda)}{\tilde j_{\mu_{0,n},k,\pm}^2}\right)\\ =&-\log Q^\pm_{\mu_{0,n}}(\sqrt{-\lambda})+\mu_{0,n}\log\sqrt{-\lambda}\\ &-\mu_{0,n}\log 2-\log\Gamma(\mu_{0,n})+\log\left(1\pm\frac{1}{\mu_{0,n}}\right). \end{align*}
A first consequence of this representations is that we have a complete asymptotic expansion of the Gamma functions $\log \Gamma(-\lambda,S_{\pm,n})$, and therefore both $S_{+,n}$ and $S_{-,n}$ are sequences of spectral type. Considering the expansions, it follows that they are both totally regular sequences of infinite order.
Next, we prove that $S_\pm$ are spectrally decomposable over $U$ with power $\kappa=2$ and length $\ell=4$, as in Definition \ref{spdec}. We have to show that the functions $\log \Gamma(-\lambda,S_{\pm,n}/u_n^2)$, have the appropriate uniform expansions for large $n$. We have \begin{align*} \log \Gamma(-\lambda,S_{\pm,n}/\mu_{0,n}^2)=&-\log Q^\pm_{\mu_{0,n}}(\mu_{0,n}\sqrt{-\lambda})+\mu_{0,n}\log\sqrt{-\lambda}+\mu_{0,n} \log\mu_{0,n}\\ &-\mu_{0,n}\log2 -\log\Gamma(\mu_{0,n})+\log\left(1\pm\frac{1}{\mu_{0,n}}\right). \end{align*}
Recalling the expansions given the previous part, we obtain \begin{align*} Q^\pm_\nu(\nu z) &=\sqrt{\nu}(1+z^2)^\frac{1}{4}\frac{\e^{\nu\sqrt{1+z^2}}\e^{\nu\log\frac{z}{1+\sqrt{1+z^2}}}}{\sqrt{2\pi }}\\ &\hspace{30pt}\left(1+W_{1,\pm}(z)\frac{1}{\nu}+W_{2,\pm}(z)\frac{1}{\nu^2}+W_{3,\pm}(z)\frac{1}{\nu^{3}} + O(\nu^{-4})\right), \end{align*} where $p=\frac{1}{\sqrt{1+z^2}}$, and \begin{align*} W_{1,\pm}(p)=V_1(p)\pm p,\hspace{15pt}W_{2,\pm}(p)=V_2(p)\pm pU_1(p),\hspace{15pt}W_{3,\pm}(p)=V_3(p)\pm pU_2(p), \end{align*} \begin{align*} &W_{1,+}(p)=\frac{5}{8}p+\frac{7}{24}p^3,\\ &W_{2,+}(p)=-\frac{1}{128}p^2+\frac{59}{192}p^4-\frac{455}{1152}p^6, \\ &W_{3,+}(p)=-\frac{33}{1024}p^3+\frac{10571}{15360}p^5-\frac{16555}{9216}p^7 + \frac{95095}{82944}p^9,\\ &W_{1,-}(p)=-\frac{11}{8}p+\frac{7}{24}p^3,\\ &W_{2,-}(p)=-\frac{31}{128}p^2+\frac{139}{192}p^4-\frac{455}{1152}p^6,\\ &W_{3,-}(p)=-\frac{177}{1024}p^3+\frac{22891}{15360}p^5-\frac{22715}{9216}p^7 + \frac{95095}{82944}p^9.\\ \end{align*}
This gives, \begin{align*} \log\Gamma&(-\lambda, S_{n,\pm}/\mu_{0,n}^2)\\ =&\sum_{h=0}^\infty \phi_{h-1,\pm}(\lambda) \mu_n^{1-h}=\\ =&\left(1-\sqrt{1-\lambda}+\log(1+\sqrt{1-\lambda})-\log 2\right)\mu_{0,n}\\ &-\frac{1}{4}\log(1-\lambda)+\left(-W_{1,\pm}(\sqrt{-\lambda})\pm 1 -\frac{1}{12}\right)\frac{1}{\mu_{0,n}}\\ &+\left(-W_{2,\pm}(\sqrt{-\lambda})+\frac{1}{2}W_{1,\pm}^2(\sqrt{-\lambda})-\frac{1}{2}\right)\frac{1}{\mu^2_{0,n}}\\ &+\left(W_{1,\pm}(\sqrt{-\lambda})W_{2,\pm}(\sqrt{-\lambda})-W_{3,\pm}(\sqrt{-\lambda})-\frac{1}{3}W^{3}_{1,\pm}(\sqrt{-\lambda})\pm\frac{1}{3}+\frac{1}{360} \right)\frac{1}{\mu_{0,n}^{3}}\\ & + O\left(\frac{1}{\mu_{0,n}^4}\right), \end{align*} and hence \[ \begin{aligned} \phi_{1,+}(\lambda)&=-\frac{5}{8}\frac{1}{(1-\lambda)^{\frac{1}{2}}}-\frac{7}{24}\frac{1}{(1-\lambda)^{\frac{3}{2}}}+\frac{11}{12},\\ \phi_{1,-}(\lambda)&=\frac{11}{8}\frac{1}{(1-\lambda)^{\frac{1}{2}}}-\frac{7}{24}\frac{1}{(1-\lambda)^{\frac{3}{2}}}+\frac{13}{12},\\ \end{aligned} \] \[ \begin{aligned} \phi_{2,+}(\lambda)&=\frac{3}{16}\frac{1}{1-\lambda}-\frac{1}{8}\frac{1}{(1-\lambda)^2}+\frac{7}{16}\frac{1}{(1-\lambda)^3}-\frac{1}{2},\\ \phi_{2,-}(\lambda)&=\frac{19}{16}\frac{1}{1-\lambda}-\frac{9}{8}\frac{1}{(1-\lambda)^2}+\frac{7}{16}\frac{1}{(1-\lambda)^3}-\frac{1}{2}.\\ \end{aligned} \] \[ \begin{aligned} \phi_{3,+}(\lambda)&=-\frac{17}{384}\frac{1}{(1-\lambda)^{\frac{3}{2}}}-\frac{389}{640}\frac{1}{(1-\lambda)^{\frac{5}{2}}} +\frac{203}{128}\frac{1}{(1-\lambda)^{\frac{7}{2}}}-\frac{1463}{1152}\frac{1}{(1-\lambda)^{\frac{9}{2}}}+\frac{121}{360},\\ \phi_{3,-}(\lambda)&=\frac{527}{384}\frac{1}{(1-\lambda)^{\frac{3}{2}}}-\frac{1989}{640}\frac{1}{(1-\lambda)^{\frac{5}{2}}} +\frac{427}{128}\frac{1}{(1-\lambda)^{\frac{7}{2}}}-\frac{1463}{1152}\frac{1}{(1-\lambda)^{\frac{9}{2}}}-\frac{119}{360}.\\ \end{aligned} \]
By equation (\ref{fi2}) and Theorem \ref{tt}, the terms $A_{0,0}(s)$ and $A_{0,1}(s)$, are \begin{align*} A_{0,0,\pm}(s)&=\sum_{n=1}^\infty \left(a_{0, 0,n,\pm} -b_{1,0,0,\pm}u_n^{-1}-b_{3,0,0,\pm}u_n^{-3}\right)u_n^{-2 s},\\ A_{0,1,\pm}(s)&=\sum_{n=1}^\infty \left(a_{0, 1,n,\pm} -b_{1,0,1,\pm}u_n^{-1}-b_{3,0,1,\pm}u_n^{-3}\right)u_n^{-2 s}. \end{align*}
Hence, we need the expansion for large $\lambda$ of the functions $\log\Gamma(-\lambda,S_{n,\pm}/\mu_{0,n}^2)$, $\phi_{1,\pm}(\lambda)$ and $\phi_{3,\pm}(\lambda)$. Using equations (\ref{pop}) and the definition, we obtain \[ Q^\pm_\nu(z)\sim \frac{\sqrt{z}\e^z}{\sqrt{2\pi}}\left(1+\sum_{k=1}^\infty b_kz^{-k}\right)+O(\e^{-z}), \] for large $z$. Therefore, \begin{align*} \log\Gamma(-\lambda,S_{n,\pm}/\mu_{0,n}^2)=&-\mu_{0,n} \sqrt{-\lambda}+\frac{1}{2}\left(\mu_{0,n}-\frac{1}{2}\right)\log(-\lambda) +\frac{1}{2}\log 2\pi\\ &+\left(\mu_{0,n}-\frac{1}{2}\right)\log\mu_{0,n} -\log 2^{\mu_{0,n}}\Gamma(\mu_{0,n})\\ &+\log\left(1\pm\frac{1}{\mu_n}\right) +O\left(\frac{1}{\sqrt{-\lambda}}\right). \end{align*}
Thus, \begin{align*} a_{0,0,n,\pm}&=\frac{1}{2}\log 2\pi+\left(\mu_{0,n}-\frac{1}{2}\right)\log\mu_{0,n}-\log 2^{\mu_{0,n}}\Gamma(\mu_{0,n}) +\log\left(1\pm\frac{1}{\mu_{0,n}}\right),\\ a_{0,1,n,\pm}&=\frac{1}{2}\left(\mu_{0,n}-\frac{1}{2}\right),\\ b_{1,0,0,+}&=-\frac{11}{12}, \hspace{30pt}b_{3,0,0,+} = \frac{121}{360}, \hspace{30pt}b_{1,0,1,\pm}=b_{3,0,1,\pm} =0,\\ b_{1,0,0,-}&=-\frac{13}{12}, \hspace{30pt}b_{3,0,0,+} = \frac{119}{360}. \end{align*}
Using these coefficients and the ones obtained for the sequence $S_0$, we conclude that \begin{align*} 2A_{0,0,0}(s)-A_{0,0,+}(s)-A_{0,0,-}(s) = - \sum_{n=1}^{\infty} \log\left(1-\frac{1}{\mu^2_{0,n}}\right)\frac{(n+1)^2}{\mu_{0,n}^{2s}}, \end{align*} and
\begin{align*} 2A_{0,1,0}(s)-A_{0,1,+}(s)-A_{0,1,-}(s) = \sum_{n=1}^{\infty} \frac{(n+1)^2}{\mu_{0,n}^{2s}}. \end{align*}
Next, we collect the results obtained for giving the uniform expansion of the sum of the logarithmic Gamma functions: \begin{align*} 2 \log \Gamma(-\lambda,\hat S_{0,n}/\mu_{0,n}^2) - \log\Gamma(-\lambda, S_{n,+}/\mu_{0,n}^2) - \log\Gamma(-\lambda, &S_{n,-}/\mu_{0,n}^2) \\ &=\sum_{h=1}^\infty \phi_{h-1}(\sqrt{-\lambda})\mu_{0,n}^{1-h}, \end{align*} where \[ \phi_{h-1}(\sqrt{-\lambda})=2 \phi_{h-1,0}(\sqrt{-\lambda}) - \phi_{h-1,+}(\sqrt{-\lambda}) - \phi_{h-1,-}(\sqrt{-\lambda}), \] and \begin{align*} \phi_{1}(\sqrt{-\lambda}) &= - \frac{1}{(1-\lambda)^{\frac{1}{2}}} + \frac{1}{(1-\lambda)^{\frac{3}{2}}}, \\ \phi_{2}(\sqrt{-\lambda}) &= - \frac{3}{2} \frac{1}{1-\lambda} + 2 \frac{1}{(1-\lambda)^{2}} - \frac{3}{2} \frac{1}{(1-\lambda)^3} + 1,\\ \phi_{3}(\sqrt{-\lambda}) &=-\frac{35}{24} \frac{1}{(1-\lambda)^{\frac{3}{2}}} +\frac{43}{8} \frac{1}{(1-\lambda)^{\frac{5}{2}}} -\frac{67}{8}\frac{1}{(1-\lambda)^{\frac{7}{2}}} +\frac{107}{24} \frac{1}{(1-\lambda)^{\frac{9}{2}}}. \end{align*}
Let $\Phi_{h-1}(s)=2 \Phi_{h-1,0}(s) - \Phi_{h-1,+}(s) - \Phi_{h-1,-}(s)$. Then, using the definition in equation (\ref{fi1}), and the formula for the integral in Appendix \ref{appendixA}, we have \begin{align*} \Phi_{1}(s) &= \frac{2 \Gamma(s+\frac{1}{2})}{\sqrt{\pi}}, \\ \Phi_{2}(s) &= -\frac{\Gamma(s+1)}{2}(5+5s+\frac{3}{2}s^2),\\ \Phi_{3}(s) &= \frac{\Gamma(s+\frac{3}{2})}{\sqrt{\pi}} \left(\frac{428}{315} +\frac{22}{35}s + \frac{214}{315}s^2\right), \end{align*} and hence \begin{align*} \Rz_{s=0}\Phi_{1}(s)&=2 , &\Ru_{s=0}\Phi_{1}(s)&=0,\\ \Rz_{s=0}\Phi_{2}(s)&=-\frac{5}{2} , &\Ru_{s=0}\Phi_{2}(s)&=0,\\ \Rz_{s=0}\Phi_{3}(s)&=\frac{214}{315} , &\Ru_{s=0}\Phi_{3}(s)&=0. \end{align*}
Using all these results and the residues of the function $\zeta(s,U_0)$ in the formulas given in Theorem \ref{t4}, we obtain \begin{align*} 2Z_0(0)-Z_+(0)-Z_-(0)=&-2A_{0,1,0}(0)+A_{0,1,+}(0)+A_{0,1,-}(0),\\ 2Z'_0(0)-Z'_+(0)-Z'_-(0)=&-2A_{0,0,0}(0)+A_{0,0,+}(0)+A_{0,0,-}(0) -2A'_{0,1,0}(0)\\ &+A'_{0,1,+}(0)+A'_{0,1,-}(0) + \frac{1}{2\nu}\left(1-\frac{1}{\nu^2}\right)+\frac{107}{315\nu^3}. \end{align*}
Recall that \begin{align*} 2A_{0,1,0}(s)-A_{0,1,+}(s)-A_{0,1,-}(s) &= \sum_{n=1}^{\infty} \frac{(n+1)^2}{\mu_{0,n}^{2s}}\\ &=\nu^{-2s}\zeta(2s,U_0) &=\nu^{-2s}\zeta\left(s,{\rm Sp}_+\Delta_{S^3}+\frac{1}{\nu^2}\right), \end{align*} and this gives (see \cite{Spr0}) \[ 2A_{0,1,0}(0)-A_{0,1,+}(0)-A_{0,1,-}(0) =\zeta\left(0,{\rm Sp}_+\Delta_{S^3}+\frac{1}{\nu^2}\right)=-1, \] and hence \[ 2Z_0(0)-Z_+(0)-Z_-(0)=-2A_{0,1,0}(0)+A_{0,1,+}(0)+A_{0,1,-}(0)=1. \]
In order to deal with the other term, it is convenient to proceed as follows. Since, \begin{align*} 2A_{0,0,0}(s)-A_{0,0,+}(s)-A_{0,0,-}(s) = -\sum_{n=1}^{\infty}(n+1)^2 \log\frac{\mu_{0,n}^2-1}{\mu_{0,n}^2}\mu_{0,n}^{-2s}, \end{align*} we have that \begin{align*} A(s)=&2A_{0,0,0}(s)-A_{0,0,+}(s)-A_{0,0,-}(s)+2A'_{0,1,0}(s)-A'_{0,1,+}(s)-A'_{0,1,-}(s)\\ =&-\sum_{n=1}^{\infty}(n+1)^2\log(\mu_{0,n}^2-1)\mu_{0,n}^{-2s}. \end{align*}
Recalling the definition of $\mu_{0,n}$, \begin{align*} A(s)=&-\sum_{n=1}^{\infty}(n+1)^2\log(\nu^2n(n+2))\mu_{0,n}^{-2s}\\ =&-2\log \nu\sum_{n=1}^{\infty}(n+1)^2\mu_{0,n}^{-2s} -\sum_{n=1}^{\infty}(n+1)^2\log(n(n+2))\mu_{0,n}^{-2s}\\ =&-2(\log\nu)\nu^{-2s}\zeta\left(s,{\rm Sp}_+\Delta_{S^3}+\frac{1}{\nu^2}\right)+\nu^{-2s}\zeta'\left(s,{\rm Sp}_+\Delta_{S^3}+\frac{1}{\nu^2}\right)\\ =&-2(\log\nu)\nu^{-2s}\sum_{j=0}^\infty\binom{-s}{j}\zeta(s+j,{\rm Sp}_+\Delta_{S^3})\nu^{-2j}\\ &+\nu^{-2s}\sum_{j=0}^\infty\binom{-s}{j}\zeta'(s+j,{\rm Sp}_+\Delta_{S^3})\nu^{-2j}, \end{align*} and therefore \begin{align*} A(0)&=-2\zeta(0,{\rm Sp}_+\Delta_{S^3})\log\nu+\zeta'(0,{\rm Sp}_+\Delta_{S^3})\\ &=2\log\nu+2\zeta'(-2)+2\zeta'(0)+\log 2. \end{align*}
This give \begin{align*} 2Z'_0(0)-Z'_+(0)-Z'_-(0)&=-A(0)\\ &=-2\log\nu-2\zeta'(-2)+\log \pi + \frac{1}{2\nu}\left(1-\frac{1}{\nu^2}\right)+\frac{107}{315\nu^3}. \end{align*}
We can now compute the torsion using equation (\ref{ttt}) \begin{align*} \log T(C_\alpha S^3_{la})=&\left(\frac{1}{4}+\frac{1}{4}+\frac{1}{2}\right)\log l^2\\ &-\log 2 -\frac{1}{2}\log\nu+\zeta'(-2)+\frac{1}{2}\log 2\pi+\frac{1}{2\nu}-\frac{1}{315\nu^3}\\ &-\log\nu-\zeta'(-2)+\frac{1}{2}\log \pi + \frac{1}{4\nu}\left(1-\frac{1}{\nu^2}\right)+\frac{107}{630\nu^3}\\ =&\frac{1}{2}\log \frac{\pi^2l^4}{2\nu^3}+\frac{3}{4}\frac{1}{\nu}-\frac{1}{12\nu^3}. \end{align*}
We conclude this section reviewing briefly the analysis of the case $n=1$, and $n=2$. All details can be found in \cite{HMS}. In the case $n=1$, the torsion is given by \[ \log T(C_\alpha S^1_{l\sin\alpha})=\left(\frac{1}{4}+Z(0)-\hat Z(0)\right)\log l^2+Z'(0)-\hat Z'(0)-\frac{1}{2}\log 2, \] where \[ Z(s)=\sum_{n,k=1}^\infty j_{\nu n,k}^{-2s},\qquad \hat Z(s)=\sum_{n,k=1}^\infty (j'_{\nu n,k})^{-2s}. \]
Therefore, the analysis is very similar to the one performed in the previous part I, with the main difference that now the zeta function $\zeta(s,U)$ is $\nu^{-s}\zeta(s)$. Therefore, we just have a simple pole at $s=1$, and we only need the expansion of the logarithmic Gamma function up to order $\nu^{-1}$.
The case of the sphere is a bit more complicate. Now, \[ \log T(C_\alpha S^2_{l\sin\alpha})= \left(\frac{3}{4} + \frac{1}{2} X_+(0) - \frac{1}{2} X_-(0) \right) \log l^{2} + \frac{1}{2}X_+'(0)-\frac{1}{2}X_-'(0)+\frac{1}{2}\log\frac{4}{3}, \] where \[ X_+(s)=\sum_{n,k=1}^\infty (2n+1) \hat j_{\mu_n,k}^{-2s},\qquad X_-(s)=\sum_{n,k=1}^\infty (2n+1)\hat j_{\mu_n,k}^{-2s}, \] $\mu_n=\sqrt{\nu^2 n(n+1)+\frac{1}{4}}$, and the $\hat j_{\nu,k,\pm}$ are the zeros of the function $G^{\pm}_{\nu}(z)=\pm\frac{1}{2}J_{\nu}(z)+zJ'_\nu(z)$. The zeta function $\zeta(s,U)$ is now related to the zeta function of the Laplace operator on the 2-sphere: \[ \zeta(2s,U)=\nu^{-2s}\zeta\left(s,{\rm Sp}_+\Delta_{S^2}^{(0)}+\frac{1}{4\nu^2}\right). \]
It is known (see for example \cite{Spr4}), that $\zeta(s,{\rm Sp}_+\Delta_{S^2}^{(0)})$ has one simple pole at $s=1$. This gives \[ \zeta(s,U)=\frac{2}{\nu^2}\frac{1}{s-2}+f(s), \] where $f(s)$ is some regular function. Thus, \begin{align*} X_+(0) - X_-(0)=&-A_{0,1,+}(0)+A_{0,1,-}(0) + \frac{1}{\nu^2} \Ru_{s=0} (\Phi_{2,+}(s) - \Phi_{2,-}(s))\\ X'_+(0)-X'_-(0)=&-(A_{0,0,+}(0)+A_{0,1,+}'(0)-A_{0,0,-}(0)- A_{0,1,-}'(0))\\ &+\frac{1}{\nu^2}\Rz_{s=0}(\Phi_{2,+}(s)-\Phi_{2,-}(s))\\ &+\left(\frac{\gamma}{\nu^2}+K\right)\Ru_{s=0}(\Phi_{2,+}(s)-\Phi_{2,-}(s)).\\ \end{align*}
Next, proceeding as in the part II above, and introducing the functions \[ G^\pm_\nu(z)=\pm\frac{1}{2}J_\nu(z)+zJ'_\nu(z), \] we obtain the product representation \begin{align*} H^\pm_\nu(z)&=\pm\frac{1}{2}I_\nu(z)+zI'_\nu(z)=\left(1\pm\frac{1}{2\nu}\right)\frac{z^\nu}{2^\nu\Gamma(\nu)} \prod_{k=1}^{\infty}\left(1+\frac{z^2}{z^2_{\nu,k,\pm}}\right), \end{align*} where $H^\pm_\nu(z)=\e^{-\frac{\pi}{2}i\nu}G_\nu^\pm(i z)$. This allows to obtain the expansion \begin{align*} \log\Gamma(-\lambda, S_{n,\pm}/\mu^2_n)=&\sum_{h=0}^\infty \phi_{h-1,\pm}(\lambda) \mu_n^{1-h}\\ =&\left(1-\sqrt{1-\lambda}+\log(1+\sqrt{1-\lambda})-\log 2\right)\mu_n\\ &-\frac{1}{4}\log(1-\lambda)+\left(-W_{1,\pm}(\sqrt{-\lambda})\pm \frac{1}{2}-\frac{1}{12}\right)\frac{1}{\mu_n}\\ &+\left(-W_{2,\pm}(\sqrt{-\lambda})+\frac{1}{2}W_{1,\pm}^2(\sqrt{-\lambda})-\frac{1}{8}\right)\frac{1}{\mu^2_n}+O\left(\frac{1}{\mu_n^3}\right), \end{align*} where $p=\frac{1}{(1-\lambda)^\frac{1}{2}}$, and \begin{align*} &W_{1,\pm}(p)=V_1(p)\pm\frac{1}{2}p,&W_{2,\pm}(p)=V_2(p)\pm \frac{1}{2}pU_1(p), \end{align*} \begin{align*} &W_{1,+}(p)=\frac{1}{8}p+\frac{7}{24}p^3,&W_{2,+}(p)=-\frac{7}{128}p^2+\frac{79}{192}p^4-\frac{455}{1152}p^6,\\ &W_{1,-}(p)=-\frac{7}{8}p+\frac{7}{24}p^3,&W_{2,-}(p)=-\frac{28}{128}p^2+\frac{119}{192}p^4-\frac{455}{1152}p^6.\\ \end{align*}
This gives, \[ \phi_{2,+}(\lambda)-\phi_{2,-}(\lambda)=-\frac{1}{2}\left(\frac{1}{1-\lambda}-\frac{1}{(1-\lambda)^2}\right), \] and hence using the definition in equation (\ref{fi1}), \begin{align*} \Phi_{2,+}(s)-\Phi_{2,-}(s)&=-\frac{1}{2}\int_0^\infty t^{s-1}\frac{1}{2\pi i}\int_{\Lambda_{\theta,c}}\frac{\e^{-\lambda t}}{-\lambda}\left(\frac{1}{1-\lambda}-\frac{1}{(1-\lambda)^2}\right).\\ \end{align*}
Using the formula in Appendix \ref{appendixA}, we obtain \begin{align*} \Phi_{2,+}(s)-\Phi_{2,-}(s)&=\frac{1}{2}\Gamma(s+1),\\ \end{align*} and hence \begin{align*} &\Rz_{s=0}(\Phi_{2,+}(s)-\Phi_{2,-}(s))=\frac{1}{2},&\Ru_{s=0}(\Phi_{2,+}(s)-\Phi_{2,-}(s))=0.\\ \end{align*}
This gives
\begin{align*} Z_+(0)-Z_-(0)&= -A_{0,1,+}(0) + A_{0,1,-}(0)\\ Z_+'(0)-Z_-'(0)&=-(A_{0,0,+}(0)+A_{0,1,+}'(0)-A_{0,0,-}(0)- A_{0,1,-}'(0))+\frac{1}{2\nu^2}.\\ \end{align*}
Eventually, using the expansion for large $z$ of the functions $H_\nu^\pm(z)$, we obtain \begin{align*} \log\Gamma(-\lambda,S_{n,\pm}/\mu_n^2)=&-\mu_n \sqrt{-\lambda}+\frac{1}{2}\left(\mu_n-\frac{1}{2}\right)\log(-\lambda) +\frac{1}{2}\log 2\pi\\ &+\left(\mu_n-\frac{1}{2}\right)\log\mu_n -\log 2^{\mu_n}\Gamma(\mu_n)\\ &+\log\left(1\pm\frac{1}{2\mu_n}\right) +O\left(\frac{1}{\sqrt{-\lambda}}\right), \end{align*} and hence \begin{align*} a_{0,0,n,\pm}&=\frac{1}{2}\log 2\pi+\left(\mu_n-\frac{1}{2}\right)\log\mu_n-\log 2^{\mu_n}\Gamma(\mu_n) +\log\left(1\pm\frac{1}{2\mu_n}\right),\\ a_{0,1,n,\pm}&=\frac{1}{2}\left(\mu_n-\frac{1}{2}\right),\\ b_{2,0,0,\pm}&=-\frac{1}{8},\hspace{30pt}b_{2,0,1,\pm}=0.\\ \end{align*}
This immediately shows that $A_{0,1,+}(s)=A_{0,1,-}(s)$, and therefore $X_+(0)-X_-(0) = 0$. Next, \begin{align*} A_{0,0,+}(s)-A_{0,0,-}(s)&=\sum_{n=1}^\infty (2n+1) \mu_n^{-2s}\left(\log\left(1+\frac{1}{2\mu_n}\right)-\log\left(1-\frac{1}{2\mu_n}\right)\right)\\ &=F(s,\nu). \end{align*}
Note that this series converges uniformely for ${\rm Re}(s)>2$, but using the analytic extension of the zeta function $\zeta(s,U)$, has an analytic extension that is regular at $s=0$. Therefore, \begin{align*} X_+'(0)- X_-'(0)=&-\Rz_{s=0}F(s,\nu)+\frac{1}{2\nu^2}=-\log \frac{\nu^2}{\pi}-f(\nu)+\frac{1}{2\nu^2}, \end{align*} and this concludes the proof in this case. A power series representation for the function $f(\nu)$ is (see \cite{HMS} Appendix B) \begin{align*} f(\nu)=&\log\frac{\nu^2}{\pi}+\zeta(\frac{1}{2},{\rm Sp}_+\Delta^{(0)}_{S^2})\frac{1}{\nu}\\ &+\sum_{\substack{j,k=0,\\ j+k\not=0}}^\infty \frac{1}{(2k+1)2^{2k}} \frac{1}{2^{2j}}\binom{-k-\frac{1}{2}}{j}\frac{\zeta(k+j+\frac{1}{2},{\rm Sp}_+\Delta^{(0)}_{S^2})}{\nu^{2k+2j+1}}. \end{align*}
\section{The higher dimensional cases} \label{s5}
In case of a smooth compact connect Riemannian manifold $(M,g)$ with boundary ${\partial} M$, the analytic torsion is given by the Reidemeister torsion plus some further contributions. It was shown in \cite{Che1}, that this further contribution only depends on the boundary, namely that \[ \log T(M)=\log\tau(M)+C({\partial} M). \]
In the case of a product metric near the boundary, the following formula for this contribution was given by L\"uck \cite{Luc} \[ \log T(M)=\log\tau(M)+\frac{1}{4}\chi({\partial} M)\log 2. \]
In the general case a further contribution appears, that measures how the metric is {\it far} from a product metric: \[ \log T(M)=\log\tau(M)+\frac{1}{4}\chi({\partial} M)\log 2+A({\partial} M). \]
A formula for this new {\it anomaly} contribution has been recently given by Br\"uning and Ma \cite{BM}. More precisely, in \cite{BM} (equation (0.6)) is given a formula for the ratio of the analytic torsion of two metrics, $g_0$ and $g_1$, \begin{equation}\label{bat} \begin{aligned} \log \frac{T(M,g_1)}{T(M,g_0)}= \frac{1}{2}\int_{{\partial} M} \left(B(\nabla_1^{T M})-B(\nabla_0^{T M})\right), \end{aligned} \end{equation} where $\nabla_j^{TM}$ is the curvature form of the metric $g_j$, and the forms $B(\nabla_j^{TX})$ are defined in equation (1.17) of \cite{BM} (see equation \ref{ebm1} below, and observe that we take the opposite sign with respect to the definition in \cite{BM}, since we are considering left actions instead of right actions). Note that we use the formula of \cite{BM} in the particular case of a flat trivial bundle $F$. Taking $g_1=g$, and $g_0$ an opportune deformation of $g$, that is a product metric near the boundary, \[ A({\partial} M)=\log \frac{T(M,g_1)}{T(M,g_0)}, \] and therefore
\begin{equation}\label{pop1} \log T(M)=\log\tau(M)+\frac{1}{4}\chi({\partial} M)\log 2+ \frac{1}{2}\int_{{\partial} M} \left(B(\nabla_1^{T M})-B(\nabla_0^{T M})\right). \end{equation}
Since the whole boundary contribution is a local invariant of the boundary, the formula in equation (\ref{bat}) holds in the case of a cone $M=CW$, and therefore in the case under study: $M=C_\alpha S_{l\sin\alpha}^{m-1}$. We compute the contribution given by the formula in equation (\ref{bat}) with respect to the metric induced by the immersion and an opportune product metric. Our result is stated in the following lemma.
\begin{lem} Consider the two metrics \begin{align*} g_1 &= dr \otimes dr + a^2 r^2 g_{S^{n}},\\ g_0& = dr\otimes dr + a^2 l^{2} g_{S^{n}}, \end{align*} on $C_\alpha S^{n}_{la}$, where $a=\sin\alpha$. Then, ($p>0$) \begin{align*} \log &\frac{T(C_\alpha S^{2p}_{la},g_1)}{T(C_\alpha S^{2p}_{la},g_0)}= \frac{a^{2p}}{8} \sum_{j=0}^{[p-\frac{1}{2}]} \frac{1}{j!(p-j)!} \sum_{h=0}^{j} \binom{j}{h} \frac{(-1)^{h}}{(p-j+h)a^{2(j-h)}} \chi(S^{2p}_{la})\\ \log &\frac{T(C_\alpha S^{2p-1}_{la},g_1)}{T(C_\alpha S^{2p-1}_{la},g_0)}\\ &= \sum_{j=0}^{p -1} \frac{2^{p-j}}{j!(2(p-j)-1)!!} \sum_{h=0}^{j} \binom{j}{h} \frac{(-1)^{h}}{(2(p-j+h)-1)a^{2(j-h)}} \frac{a^{2p-1} (2p-1)!}{4^{p} (p-1)!} \end{align*} \label{ele} \end{lem}
\begin{proof} The proof is a generalization of the proofs of Lemmas 1 and 2 of \cite{HMS}. We first recall some notation from \cite{BZ} Chapter III and \cite{BM} Section 1.1. For two ${\mathds{Z}}/2$-graded algebras ${\mathcal{A}}$ and ${\mathcal{B}}$, let ${\mathcal{A}}\hat\otimes{\mathcal{B}}={\mathcal{A}}\wedge\hat{\mathcal{B}}$ denotes the ${\mathds{Z}}/2$-graded tensor product. For two real finite dimensional vector spaces $V$ and $E$, of dimension $m$ and $n$, with $E$ Euclidean and oriented, the Berezin integral is the linear map \begin{align*} \int^B&: \Lambda V^* \hat\otimes \Lambda E^* \to \Lambda V^*, \\ \int^B:&\alpha \hat\otimes \beta\mapsto \frac{(-1)^{\frac{n(n+1)}{2}}}{\pi^\frac{n}{2}}\beta(e_1,\dots, e_n)\alpha, \end{align*} where $\{e_j\}_{j=1}^n$ is an orthonormal base of $E$. Let $A$ be an antisymmetric endomorphism of $E$. Consider the map \[ {\hat{}} :A\mapsto \hat A=\frac{1}{2} \sum_{j,l=1}^n (e_j,A e_l) \hat e^j\wedge \hat e^l. \]
Note that \begin{equation}\label{pfpf} \int^B \e^{-\frac{\hat A}{2}}=Pf\left(\frac{A}{2\pi}\right), \end{equation} and this vanishes if ${\rm dim}E=n$ is odd.
Let $\omega_j$ be the curvature one form over $C_{\alpha}S^{m-1}_{l\sin\alpha}$ associated to the metric $g_j$. Let $\Theta$ be the curvature two form of the boundary $S^{m-1}$ (with radius 1) and the standard Euclidean metric. Let $\tensor{(\omega_j)}{^{a}_{b}}$ denotes the entries with line $a$ and column $b$ of the matrix of one forms $\omega_j$. Then, we introduce the following quantities (see \cite{BM} equations (1.8) and (1.15)) \begin{equation}\label{pippo}\begin{aligned} \mathcal{S}_j&=\frac{1}{2}\sum_{k=1}^{m-1}\tensor{(\omega_j-\omega_0)}{^{r}_{\theta_k}}\hat e^{\theta_k},\\
\hat \Omega&=\mathcal{R}^{T C_{\alpha}S^{m-1}_{l\sin\alpha}}|S^{m-1}_{la}= \frac{1}{2}\sum_{k,l=1}^{m-1}\tensor{\Omega}{^{\theta_k} _{\theta_l}} \hat e^{\theta_k}\wedge \hat e^{\theta_l}\\ \mathcal{R}&=\hat \Theta=\frac{1}{2}\sum_{k,l=1}^{m-1}\tensor{\Theta}{^{\theta_k}_{\theta_l}} \hat e^{\theta_k}\wedge \hat e^{\theta_l}.\\ \end{aligned} \end{equation}
Direct calculations starting from the metrics $g_j$ allow to obtain explicit formulas for all these forms. The calculations in the present case are a slight generalization of the calculations presented in the proof of Lemma 2 of \cite{HMS}, and we refer to that work for further details. We find that the non zero entries of the matrices appearing in equation (\ref{pippo}) are \begin{align*} \tensor{(\omega_1 - \omega_0)}{^{r}_{\theta_i}} &= - a \prod^{m-1}_{j=i+1} \sin{\theta_j} d\theta_i,\\ \tensor{\Omega}{^{\theta_i}_{\theta_k}}&= (1-a^{2})\prod_{j=i+1}^{k} \sin{\theta_j} \prod_{s=k+1}^{m-1} \sin^{2}{\theta_s} d\theta_i \wedge d\theta_k, \hspace{20pt}i<k,\\ \tensor{\Theta}{^{\theta_i}_{\theta_k}}&= \prod_{j=i+1}^{k} \sin{\theta_j} \prod_{s=k+1}^{m-1} \sin^{2}{\theta_s} d\theta_i \wedge d\theta_k, \hspace{20pt}i<k. \end{align*}
Note that for $i<k$ \[ \tensor{((\omega_1 - \omega_0)^2)}{^{\theta_k}_{\theta_i}} = -a^{2} \prod_{j=i+1}^{k} \sin{\theta_j} \prod^{m-1}_{s=k+1} \sin^{2}{\theta_s} d\theta_i \wedge d\theta_k. \]
Then, recalling $\mathcal{R} = \hat\Omega - 2 \mathcal{S}_1^2$ by equation (1.16) of \cite{BM}, is easy to see \[ \mathcal{R} =-\frac{2}{a^2} \mathcal{S}_1^2. \]
Following \cite{BM}, equation (1.17), we define \begin{equation}\label{ebm1} B(\nabla_j^{T C_{\alpha}S^{m-1}_{l\sin\alpha}})=\frac{1}{2}\int_0^1\int^B \e^{-\frac{1}{2}\mathcal{R}-u^2 \mathcal{S}_j^2}\sum_{k=1}^\infty \frac{1}{\Gamma\left(\frac{k}{2}+1\right)}u^{k-1} \mathcal{S}_j^k du. \end{equation}
From this definition it follows that $B(\nabla_0^{T C_{\alpha}S^{m-1}_{l\sin\alpha}})$ vanishes identically, since $\mathcal{S}_0$ does. It remains to evaluate $B(\nabla_1^{T C_{\alpha}S^{m-1}_{l\sin\alpha}})$. Equation (\ref{ebm1}) gives \begin{align*} B(\nabla_1^{T C_{\alpha}S^{m-1}_{l\sin\alpha}})&=\frac{1}{2}\int_0^1\int^B \e^{(\frac{1}{a^2}-u^2) \mathcal{S}_1^2}\sum_{k=1}^\infty \frac{1}{\Gamma\left(\frac{k}{2}+1\right)}u^{k-1} \mathcal{S}_1^k du\\ &=\frac{1}{2}\int^B \sum_{j=0,k=1}^\infty \frac{1}{j!\Gamma\left(\frac{k}{2}+1\right)}\int_0^1 \left(\frac{1}{a^2}-u^2\right)^ju^{k-1} d u \mathcal{S}_1^{k+2j} \\ &=\frac{1}{2}\int^B \sum_{j=0,k=1}^\infty \frac{1}{j!\Gamma\left(\frac{k}{2}+1\right)} \sum_{h=0}^{j} \binom{j}{h} \frac{(-1)^{h}}{(2h+k)a^{2(j-h)}} \mathcal{S}_1^{k+2j}. \end{align*}
Since the Berezin integral vanishes identically whenever $k+2j\not=m-1$, we obtain
\begin{equation}\label{epe1} \begin{aligned} B&(\nabla_1^{T C_{\alpha}S^{m-1}_{l\sin\alpha}})\\ &=\frac{1}{2} \sum_{j=0}^{[\frac{m}{2} -1]} \frac{1}{j!\Gamma\left(\frac{m-2j+1}{2}\right)} \sum_{h=0}^{j} \binom{j}{h} \frac{(-1)^{h}}{(m-2(j-h)-1)a^{2(j-h)}}\int^B \mathcal{S}_1^{m-1}. \end{aligned} \end{equation}
Now consider the two cases of even and odd $m$ independently. First, assume $m=2p+1$ ($p\geq 0$). Then, using equation (\ref{pfpf}), equation (\ref{epe1}) gives
\begin{align*} B(\nabla_1^{T C_{\alpha}S^{2p}_{l\sin\alpha}})
&=\frac{1}{4} \sum_{j=0}^{[p-\frac{1}{2}]} \frac{1}{j!(p-j)!} \sum_{h=0}^{j} \binom{j}{h} \frac{(-1)^{h}}{(p-j+h)a^{2(j-h)}} \int^B \mathcal{S}_1^{2p}\\ &=\frac{1}{4} \sum_{j=0}^{[p-\frac{1}{2}]} \frac{1}{j!(p-j)!} \sum_{h=0}^{j} \binom{j}{h} \frac{(-1)^{h}}{(p-j+s)a^{2(j-h)}} \int^B \frac{(-a^2)^p}{2^p}\mathcal{R}^{p}\\ &=\frac{a^{2p}}{4} \sum_{j=0}^{[p-\frac{1}{2}]} \frac{1}{j!(p-j)!} \sum_{h=0}^{j} \binom{j}{h} \frac{(-1)^{h}}{(p-j+h)a^{2(j-h)}} \int^B \e^{-\frac{\mathcal{R}}{2}}\\ &=\frac{a^{2p}}{4} \sum_{j=0}^{[p-\frac{1}{2}]} \frac{1}{j!(p-j)!} \sum_{h=0}^{j} \binom{j}{h} \frac{(-1)^{h}}{(p-j+h)a^{2(j-h)}} Pf\left(\frac{\Theta}{2\pi}\right)\\ &=\frac{a^{2p}}{4} \sum_{j=0}^{[p-\frac{1}{2}]} \frac{1}{j!(p-j)!} \sum_{h=0}^{j} \binom{j}{h} \frac{(-1)^{h}}{(p-j+h)a^{2(j-h)}} e(S^{2p},g_E)\\ \end{align*} where $e(S^{2p},g_E)$ is the Euler class of $(S^{2p},g_E)$, and we use the fact that \[ e(S^{2p}_l,g_l)= Pf\left(\frac{\Theta}{2\pi}\right)=\int^{B} \exp(-\frac{\hat\Theta}{2}). \]
Therefore,
\begin{align*} \frac{1}{2}\int_{S^{2p}_{l\sin\alpha}}& B(\nabla_1^{T C_{\alpha}S^{2p}_{l\sin\alpha}})\\
&= \frac{a^{2p}}{8}
\sum_{j=0}^{[p-\frac{1}{2}]} \frac{1}{j!(p-j)!} \sum_{h=0}^{j} \binom{j}{h} \frac{(-1)^{h}}{(p-j+h)a^{2(j-h)}} \int_{S^{2p}_{la}} e(S^{2p}_{la},g_E)\\ &= \frac{a^{2p}}{8} \sum_{j=0}^{[p-\frac{1}{2}]} \frac{1}{j!(p-j)!} \sum_{h=0}^{j} \binom{j}{h} \frac{(-1)^{h}}{(p-j+h)a^{2(j-h)}} \chi(S^{2p}_{la}). \end{align*}
Second, assume $m=2p$ ($p\geq 1$). Then, equation (\ref{epe1}) gives
\begin{align*} B(\nabla_1&^{T C_{\alpha}S^{2p-1}_{l\sin\alpha}})\\ &=\frac{1}{2} \sum_{j=0}^{p -1} \frac{1}{j!\Gamma\left(p-j + \frac{1}{2}\right)} \sum_{h=0}^{j} \binom{j}{h} \frac{(-1)^{h}}{(2(p-j+h)-1)a^{2(j-h)}} \int^B \mathcal{S}_1^{2p-1}. \end{align*}
Now we evaluate $\int^B \mathcal{S}_1^{2p-1}$. Recalling that \[ \mathcal{R} =-\frac{2}{a^2} \mathcal{S}_1^2, \] we obtain that \begin{align*} \int^B \mathcal{S}_1^{2p-1} &= \int^{B} \mathcal{S}_1 \mathcal{S}_1^{2p-2},\\ &= \frac{(-1)^{p-1} a^{2p-2}}{2^{p-1}}\int^{B} \mathcal{S}_1 \mathcal{R}^{p-1} \end{align*} and using the esplicit definitions of these forms given in equation (\ref{pippo}), we have
\begin{align*} \int^B \mathcal{S}_1^{2p-1} =& \frac{(-1)^{p-1} a^{2p-2}}{2^{2p-1}}\int^{B} \left(\sum_{k=1}^{2p-1}\tensor{(\omega_1-\omega_0)}{^{r}_{\theta_k}} {\hat \e^{\theta_k}}\right)
\left(\sum_{i,j=1}^{2p-1}\tensor{\Theta}{^{\theta_i}_{\theta_j}} {\hat \e^{\theta_i}}\wedge {\hat
e^{\theta_j}}\right)^{p-1}\\ =&\frac{(-1)^{p} a^{2p-1}}{2^{2p-1}} c_B \\ &\times \sum_{\substack{\sigma \in S_{2p}\\ \sigma(1) = 1}} {\rm sgn}(\sigma) \tensor{(\omega_1-\omega_0)}{^{1}_{\sigma(2)}} \tensor{(\Omega_0)}{^{\sigma(3)}_{\sigma(4)}} \ldots \tensor{(\Omega_0)}{^{\sigma(2p-1)}_{\sigma(2p)}}. \end{align*} where $c_B=\frac{(-1)^{p(2p-1)}}{\pi^{\frac{2p-1}{2}}}$. Using the same argument used in the final part of the proof of Lemma 2 of \cite{HMS}, we show that \begin{align*} \int^B \mathcal{S}_1^{2p-1} &= c_B \frac{(-1)^{p} a^{2p-1} (2p-1)!}{2^{p-1} 2^p} \prod_{j=2}^{2p-1} (\sin\theta_{j})^{j-1} d\theta_1 \wedge \ldots \wedge d\theta_{2p-1}. \end{align*}
Then,
\begin{align*} \int_{S^{2p-1}_{la}} \int^B \mathcal{S}_1^{2p-1} &= \frac{(-1)^{p(2p-1)}}{\pi^{\frac{2p-1}{2}}} \frac{(-1)^{p} a^{2p-1} (2p-1)!}{2^{p-1} 2^p (la)^{2p-1}} {\rm Vol}(S^{2p-1}_{la}) \\ &= \frac{(-1)^{p(2p-1)}}{\pi^{\frac{2p-1}{2}}} \frac{(-1)^{p} a^{2p-1} (2p-1)!}{2^{p-1} 2^p (la)^{2p-1}} \frac{2 \pi^{p} (la)^{2p-1}}{(p-1)!}\\ &=\frac{1}{\pi^{-\frac{1}{2}}} \frac{a^{2p-1} (2p-1)!}{2^{p-1} 2^{p-1} } \frac{1}{(p-1)!} \end{align*}
and \begin{align*} \frac{1}{2}\int_{S^{2p-1}_{l\sin\alpha}} &B(\nabla_1^{T C_{\alpha}S^{2p-1}_{l\sin\alpha}})\\ =& \sum_{j=0}^{p -1} \frac{1}{j!\Gamma\left(p-j + \frac{1}{2}\right)} \sum_{h=0}^{j}\binom{j}{h} \frac{(-1)^{h}}{(2(p-j+h)-1)a^{2(j-h)}} \int_{S^{2p-1}_{l\sin\alpha}} \int^B \frac{\mathcal{S}_1^{2p-1}}{4}\\
=& \sum_{j=0}^{p -1} \frac{1}{j!\Gamma\left(p-j + \frac{1}{2}\right)} \sum_{h=0}^{j} \binom{j}{h} \frac{(-1)^{h}}{(2(p-j+h)-1)a^{2(j-h)}} \frac{a^{2p-1} (2p-1)!}{\pi^{-\frac{1}{2}} 4^{p} (p-1)!}\\ =& \sum_{j=0}^{p -1} \frac{2^{p-j}}{j!(2(p-j)-1)!!} \sum_{h=0}^{j} \binom{j}{h} \frac{(-1)^{h}}{(2(p-j+h)-1)a^{2(j-h)}}\frac{a^{2p-1} (2p-1)!}{4^{p} (p-1)!}. \end{align*}
\end{proof}
We have now all the terms appearing in equation (\ref{pop1}). In fact, the Reidemeister torsion of the cone over a sphere was computed in \cite{HMS}, Proposition 2, \[ \log\tau(C_\alpha S^{m-1}_{l\sin\alpha})=\frac{1}{2}{\rm Vol}(C_\alpha S^{m-1}_{l\sin\alpha}). \]
Comparing with the results given in Theorem \ref{t1}, we detect the contribution of the singularity. It is easy to see that the formula in equation (\ref{pop1}) holds for the cone over the circle and over the $3$-spheres, while a contribution due to the singularity appears in the case of the sphere. This motivates the following conjecture, that is a theorem for $p<3$.
\begin{conj} \label{c1} The analytic torsion of the cone $C_\alpha S^{2p-1}_{l\sin\alpha}$, of angle $\alpha$, and length $l>0$, over the odd dimensional sphere $S^{2p-1}$, with the standard metric induced by the immersion in ${\mathds{R}}^{m+1}$, and absolute boundary conditions is (where $p>0$): \begin{align*} \log T(C_\alpha &S^{2p-1}_{l\sin\alpha})=\frac{1}{2}\log {\rm Vol} (C_\alpha S^{2p-1}_{l\sin\alpha})\\ +& \sum_{j=0}^{p -1} \frac{2^{p-j}}{j!(2(p-j)-1)!!} \sum_{h=0}^{j} \binom{j}{h} \frac{(-1)^{h}{\rm csc}^{2(j-h)}\alpha}{(2(p-j+h)-1)} \frac{ (2p-1)!\sin^{2p-1}\alpha}{4^{p} (p-1)!}. \end{align*}
\end{conj}
\section{Appendix A} \label{appendixA}
We give here a formula for a contour integral appearing in the text. The proof is in \cite{Spr3} Section 4.2. Let
$\Lambda_{\theta,c}=\{\lambda\in{\mathds{C}}~|~|\arg(\lambda-c)|=\theta\}$, $0<\theta<\pi$, $0<c<1$, $a$ real, then \[ \int_0^\infty t^{s-1} \frac{1}{2\pi i}\int_{\Lambda_{\theta,c}}\frac{\e^{-\lambda t}}{-\lambda}\frac{1}{(1-\lambda)^a}d\lambda dt=\frac{\Gamma(s+a)}{\Gamma(a)s}. \]
\end{document} |
\begin{document}
\title{On the Rajchman property for self-similar measures on $\mathbb{R}^{d}$}
\author{Ariel Rapaport}
\subjclass[2000]{\noindent Primary: 28A80, Secondary: 42A16.}
\keywords{Self-similar measure, Rajchman measure, P.V. $k$-tuple.} \begin{abstract} We establish a complete algebraic characterization of self-similar iterated function systems $\Phi$ on $\mathbb{R}^{d}$, for which there exists a positive probability vector $p$ so that the Fourier transform of the self-similar measure corresponding to $\Phi$ and $p$ does not tend to $0$ at infinity. \end{abstract}
\maketitle
\section{\label{sec:Introduction-and-the}Introduction and the main result}
\subsection{Introduction}
Let $d\ge1$ be an integer. Given a Borel probability measure $\nu$ on $\mathbb{R}^{d}$ its Fourier transform is denoted by $\widehat{\nu}$. That is, \[ \widehat{\nu}(\xi):=\int e^{i\left\langle \xi,x\right\rangle }\:d\nu(x)\text{ for }\xi\in\mathbb{R}^{d}, \]
where $\left\langle \cdot,\cdot\right\rangle $ is the standard inner product of $\mathbb{R}^{d}$. It is said that $\nu$ is a Rajchman measure if $|\widehat{\nu}(\xi)|\rightarrow0$ as $|\xi|\rightarrow\infty$. The Riemann--Lebesgue lemma says that $\nu$ is Rajchman whenever it is absolutely continuous with respect to the Lebesgue measure. For singular measures determining which ones are Rajchman is a subtle question with a long history (see \cite{Ly}). In this paper we study the Rajchman property in the context of self-similar measures on $\mathbb{R}^{d}$.
Denote the orthogonal group of $\mathbb{R}^{d}$ by $O(d)$. A similarity of $\mathbb{R}^{d}$ is a map $\varphi:\mathbb{R}^{d}\rightarrow\mathbb{R}^{d}$ of the form $\varphi(x)=rUx+a$, where $r>0$, $U\in O(d)$ and $a\in\mathbb{R}^{d}$. When $0<r<1$, the map $\varphi$ is said to be a contracting similarity. A finite collection $\Phi=\{\varphi_{i}\}_{i=1}^{\ell}$ of contracting similarities is called a self-similar iterated function system (IFS) on $\mathbb{R}^{d}$. It is well known (see \cite{Hut}) that there exists a unique nonempty compact $K\subset\mathbb{R}^{d}$ which satisfies the relation \[ K=\cup_{i=1}^{\ell}\varphi_{i}(K)\:. \] It is called the self-similar set, or attractor, corresponding to $\Phi$.
Following \cite{Ho} we make the following definition. \begin{defn} We say that $\Phi$ is affinely irreducible if there does not exist a proper affine subspace $\mathbb{V}$ of $\mathbb{R}^{d}$ so that $\varphi_{i}(\mathbb{V})=\mathbb{V}$ for all $1\le i\le\ell$. \end{defn}
It is easy to see that $\Phi$ is not affinely irreducible if and only if its attractor $K$ is contained in a proper affine subspace $\mathbb{V}$ of $\mathbb{R}^{d}$. Observe that when $d=1$, the IFS $\Phi$ is affinely irreducible if and only if the maps in $\Phi$ do not all have the same fixed point.
It is also well known (again, see \cite{Hut}) that given a probability vector $p=(p_{i})_{i=1}^{\ell}$ there exists a unique Borel probability measure $\mu$ on $\mathbb{R}^{d}$ which satisfies the relation, \[ \mu=\sum_{i=1}^{\ell}p_{i}\cdot\varphi_{i}\mu, \] where $\varphi_{i}\mu:=\mu\circ\varphi_{i}^{-1}$ it the pushforward of $\mu$ via $\varphi_{i}$. The measure $\mu$ is called the self-similar measure corresponding to $\Phi$ and $p$, and it is supported on the attractor $K$. If $p_{i}>0$ for all $1\le i\le\ell$ we say that $p$ is positive and write $p>0$. When $p>0$ the support of $\mu$ is equal to $K$. If $\Phi$ is not affinely irreducible then $\mu(\mathbb{V})=1$ for some proper affine subspace $\mathbb{V}\subset\mathbb{R}^{d}$, in which case it is easy to see that $\mu$ is not Rajchman. For this reason, we shall always assume that our function systems are affinely irreducible.
Before stating our main theorem we mention some relevant previous results, mainly regarding the Fourier decay of self-similar measures on $\mathbb{R}$. We start with the basic case of Bernoulli convolutions. Given $\lambda\in(0,1)$, write $\nu_{\lambda}$ for the distribution of the random sum $\sum_{n\ge0}\pm\lambda^{n}$, where the $\pm$ are independent unbiased random variables. This measure is called the Bernoulli convolution with parameter $\lambda$. It can also be realised as the self-similar measure corresponding to the IFS $\{t\rightarrow\lambda t\pm1\}$ and the probability vector $(\frac{1}{2},\frac{1}{2})$.
Erd\H{o}s \cite{Er1} proved that $\nu_{\lambda}$ is not Rajchman whenever $\lambda^{-1}$ is a Pisot number different from $2$. Recall that a Pisot number, also called a Pisot--Vijayaraghavan number or a P.V. number, is an algebraic integer greater than one whose algebraic (Galois) conjugates are all less than one in modulus. Note that $\nu_{1/2}$ is absolutely continuous and in particular Rajchman. Later Salem \cite{Sa} showed that if $\lambda^{-1}$ is not a Pisot number then $\nu_{\lambda}$ is Rajchman, thus providing a characterization of Rajchman Bernoulli convolution measures. Erd\H{o}s \cite{Er2} proved that $\widehat{\nu_{\lambda}}$ has power decay for a.e. $\lambda\in(0,1)$. That is, there exist
$s>0$ and $C>1$ so that $|\widehat{\nu_{\lambda}}(\xi)|\le C|\xi|^{-s}$ for $\xi\in\mathbb{R}$. Kahane \cite{Kah} later observed that this actually holds for all $\lambda\in(0,1)$ outside a set of zero Hausdorff dimension.
We turn to discuss the Fourier decay of general orientation preserving self-similar measures on the real line, in which case a lot of recent progress has been made. Let $\Phi=\{\varphi_{i}(t)=r_{i}t+a_{i}\}_{i=1}^{\ell}$ be a self-similar IFS on $\mathbb{R}$, with $r_{i}>0$ for $1\le i\le\ell$. Set \[ \Delta:=\{(p_{i})_{i=1}^{\ell}\in(0,1]^{\ell}\::\:p_{1}+...p_{\ell}=1\}, \] and for $p\in\Delta$ write $\mu_{p}$ for the self-similar measure corresponding to $\Phi$ and $p$. Let $\mathbf{H}\subset\mathbb{R}_{>0}$ be the group generated by the contractions $\{r_{i}\}_{i=1}^{\ell}$, where $\mathbb{R}_{>0}$ is the multiplicative group of positive real numbers. It is desirable to characterize the systems $\Phi$ for which there exists $p\in\Delta$ so that $\mu_{p}$ is non-Rajchman. The following result, due to Li and Sahlsten, reduces this problem to the case in which $\mathbf{H}$ is cyclic. \begin{thm} [Li--Sahlsten, \cite{LS}]\label{thm:li and sahl}Suppose that $\Phi$ is affinely irreducible and that $\mathbf{H}$ is not cyclic. Then $\mu_{p}$ is Rajchman for every $p\in\Delta$. \end{thm}
A related result has recently been obtained by Algom, Rodriguez Hertz and Wang \cite[Corollary 1.2]{AHW}, which verifies the Rajchman property for self-conformal measures under mild assumptions. In \cite{SS}, Sahlsten and Stevens have established power Fourier decay for self-conformal measures under certain conditions.
The proof of Theorem \ref{thm:li and sahl} is based on the classical renewal theorem for transient random walks on $\mathbb{R}$. This approach was initiated by Li \cite{Li}, who established the Rajchman property for the Furstenberg measure on $\mathbb{RP}^{1}$ under mild assumptions. Renewal theory also plays a major role in the proof of the main result of this paper.
The situation in which $\mathbf{H}$ is cyclic has been considered by Brémont (see also the paper by Varj{\'u} and Yu \cite{VY}). We continue to consider the orientation preserving system $\Phi=\{\varphi_{i}(t)=r_{i}t+a_{i}\}_{i=1}^{\ell}$ on $\mathbb{R}$. \begin{thm} [Brémont, \cite{Br}]\label{thm:bermont}Suppose that $\Phi$ is affinely irreducible and that $\mathbf{H}$ is cyclic. Let $r\in(0,1)$ be with $\mathbf{H}=\{r^{n}\}_{n\in\mathbb{Z}}$. Then there exists $p\in\Delta$ so that $\mu_{p}$ is non-Rajchman if and only if, $r^{-1}$ is a Pisot number and $\Phi$ can be conjugated by a suitable similarity to a form such that $a_{i}\in\mathbb{Q}(r)$ for $1\le i\le\ell$. \end{thm}
Brémont also proved that when $\mathbf{H}=\{r^{n}\}_{n\in\mathbb{Z}}$ for a Pisot number $r$ and $a_{i}\in\mathbb{Q}(r)$ for $1\le i\le\ell$, then in fact $\mu_{p}$ is non-Rajchman for every $p\in\Delta$ outside a finite union of proper submanifolds of $\Delta$. Moreover, in this case he also showed that $\mu_{p}$ is absolutely continuous whenever it is Rajchman.
Theorems \ref{thm:li and sahl} and \ref{thm:bermont} provide a complete algebraic characterization of the systems $\Phi$ on $\mathbb{R}$ for which there exists $p\in\Delta$ so that $\mu_{p}$ is non-Rajchman. The purpose of this paper is to extend this characterization to arbitrary self-similar IFSs on $\mathbb{R}^{d}$.
We point out that Solomyak \cite{So} has recently shown that there exists $\mathcal{E}\subset(0,1)^{\ell}$ of zero Hausdorff dimension so that when $\Phi$ is affinely irreducible and $(r_{i})_{i=1}^{\ell}\notin\mathcal{E}$, it holds that $\widehat{\mu_{p}}$ has power decay for all $p\in\Delta$. For explicit parameters, and under additional diophantine assumptions, logarithmic decay rate has recently been obtained in \cite{LS}, \cite{VY} and \cite{AHW}. In the present paper we are only interested in the complete characterization of self-similar IFSs generating non-Rajchman measures, and do not consider the Fourier rate of decay.
Finally, we mention that in the context of self-affine measures on $\mathbb{R}^{d}$, the Rajchman property has recently been considered by Li and Sahlsten \cite{LS2}. Assuming the group generated by the linear parts of the affine maps is proximal and totally irreducible and that the attractor is not a singleton, they have established that all self-affine measures, corresponding to positive probability vectors, are Rajchman. When $d=2,3$, or under additional assumptions on the group generated by the linear parts, they have also obtained power Fourier decay. The proximality assumption makes the situation studied in that paper very different compared to the self-similar setup studied here.
\subsection{\label{subsec:The-main-result.}The main result}
Following \cite{Ca} and \cite[Section 9.2]{BDGPS} we make the following definition, which is necessary in order to state our main result. \begin{defn} Given $k\ge1$, a finite collection $\{\theta_{1},...,\theta_{k}\}$ of distinct algebraic integers is said to be a P.V. $k$-tuple if the following conditions are satisfied. \begin{enumerate}
\item $|\theta_{j}|>1$ for $1\le j\le k$; \item there exists a monic polynomial $P\in\mathbb{Z}[X]$ so that $P(\theta_{j})=0$
for $1\le j\le k$, and $|z|<1$ for $z\in\mathbb{C}\setminus\{\theta_{1},...,\theta_{k}\}$ with $P(z)=0$. \end{enumerate} \end{defn}
We make some remarks regarding this definition. In what follows, let $\{\theta_{1},...,\theta_{k}\}$ be a P.V. $k$-tuple. \begin{itemize} \item For each $1\le j_{0}\le k$ there exists $1\le j_{1}\le k$ so that $\theta_{j_{1}}=\overline{\theta_{j_{0}}}$. Additionally, writing $J$ for the set of $1\le j\le k$ so that $\theta_{j}$ is conjugate to $\theta_{j_{0}}$ over $\mathbb{Q}$, it holds that $\{\theta_{j}\}_{j\in J}$ is also a P.V. tuple. \item Note that a positive real number $\theta$ is a Pisot number precisely when $\{\theta\}$ is a P.V. $1$-tuple. A nonreal complex number $\theta$ so that $\{\theta,\overline{\theta}\}$ is a P.V. $2$-tuple is commonly called a complex Pisot number. \item We shall be interested in P.V. tuples whose elements have the same modulus. Obviously every P.V. $1$-tuple and every P.V. $2$-tuple of the form $\{\theta,\overline{\theta}\}$ has this property. Assuming
$|\theta_{1}|=...=|\theta_{k}|$, for every $m\ge1$ the collection \[ \{z\in\mathbb{C}\::\:z^{m}=\theta_{j}\text{ for some }1\le j\le k\} \] is a P.V. $mk$-tuple with this property. Further examples can be obtained by considering the products of real or complex Pisot numbers with certain primitive roots of unity. For instance, as pointed out in \cite{Bo}, if $\theta$ and $\overline{\theta}$ are the complex Pisot numbers whose minimal polynomial is $X^{3}+X^{2}-1$ and $u$ and $\overline{u}$ are the primitive $6\text{th}$ roots of unity, then $\{\theta u,\overline{\theta}u,\theta\overline{u},\overline{\theta u}\}$ is a P.V. $4$-tuple whose elements are all conjugates over $\mathbb{Q}$ and have the same modulus.
\item Suppose that $|\theta_{1}|=...=|\theta_{k}|$ and that $\theta_{1},...,\theta_{k}$ are conjugates over $\mathbb{Q}$. It is natural to ask whether we can say more about the structure of the P.V. $k$-tuple $\{\theta_{1},...,\theta_{k}\}$ under these additional assumptions. From a result of Boyd \cite{Bo} and Ferguson \cite{Fer} it follows that if $\theta_{j}$ is real for some $1\le j\le k$ then $\{\theta_{1},...,\theta_{k}\}=\{e^{2\pi ij/k}\theta_{1}\}_{j=1}^{k}$. Considering this result and the previous remark, one might think that, under the additional assumptions, for every $1\le j\le k$ at least one of the numbers $\theta_{j}/\theta_{1}$ and $\theta_{j}/\overline{\theta_{1}}$ is a root of unity. In Example \ref{exa:interesting} below we show that this is not the case. \end{itemize}
{} $\quad$As noted above, we shall always assume that our function systems are affinely irreducible. On the other hand, the situation in which there exists a nontrivial linearly invariant subspace should be taken into account. Let $\Phi=\{\varphi_{i}(x)=r_{i}Ux+a_{i}\}_{i=1}^{\ell}$ be a self-similar IFS on $\mathbb{R}^{d}$. Given a linear subspace $\mathbb{V}$ of $\mathbb{R}^{d}$ we write $\pi_{\mathbb{V}}$ for the orthogonal projection onto $\mathbb{V}$. Observe that if $d':=\dim\mathbb{V}>0$, $U_{i}(\mathbb{V})=\mathbb{V}$ for $1\le i\le\ell$, and $S:\mathbb{V}\rightarrow\mathbb{R}^{d'}$ is an isometry (which is necessarily an affine map), then $\{S\circ\pi_{\mathbb{V}}\circ\varphi_{i}\circ S^{-1}\}_{i=1}^{\ell}$ is a self-similar IFS on $\mathbb{R}^{d'}$. Moreover, in this situation for every $1\le i\le\ell$ there exists $U_{i}'\in O(d')$ and $a_{i}'\in\mathbb{R}^{d'}$ so that \[ S\circ\pi_{\mathbb{V}}\circ\varphi_{i}\circ S^{-1}(x)=r_{i}U_{i}'x+a_{i}'\text{ for }x\in\mathbb{R}^{d'}\:. \]
We are now ready to state the main result of this paper. In what follows we consider $\mathbb{R}^{d}$ as a subset of $\mathbb{C}^{d}$. We denote the standard inner product of $\mathbb{C}^{d}$ by $\left\langle \cdot,\cdot\right\rangle $, that is $\left\langle z,w\right\rangle =\sum_{j=1}^{d}z_{j}\overline{w_{j}}$ for $z,w\in\mathbb{C}^{d}$. Given a linear operator $A$ on $\mathbb{R}^{d}$ we consider it also as a linear operator on $\mathbb{C}^{d}$ in the natural way, that is by setting $A(x+iy):=Ax+iAy$ for $x,y\in\mathbb{R}^{d}$. \begin{thm} \label{thm:main}Let $\Phi=\{\varphi_{i}(x)=r_{i}U_{i}x+a_{i}\}_{i=1}^{\ell}$ be an affinely irreducible self-similar IFS on $\mathbb{R}^{d}$, with $0<r_{i}<1$, $U_{i}\in O(d)$ and $a_{i}\in\mathbb{R}^{d}$ for $1\le i\le\ell$. Then there exists a probability vector $p=(p_{i})_{i=1}^{\ell}>0$ such that the self-similar measure corresponding to $\Phi$ and $p$ is non-Rajchman if and only if there exists a linear subspace $\mathbb{V}\subset\mathbb{R}^{d}$, with $d':=\dim\mathbb{V}>0$ and $U_{i}(\mathbb{V})=\mathbb{V}$ for $1\le i\le\ell$, and an isometry $S:\mathbb{V}\rightarrow\mathbb{R}^{d'}$ so that the following conditions are satisfied. \begin{enumerate} \item \label{enu:main thm first cond}For $1\le i\le\ell$ let $U_{i}'\in O(d')$ and $a_{i}'\in\mathbb{R}^{d'}$ be with $S\circ\pi_{\mathbb{V}}\circ\varphi_{i}\circ S^{-1}(x)=r_{i}U_{i}'x+a_{i}'$. Let $\mathbf{H}\subset GL_{d'}(\mathbb{R})$ be the group generated by $\{r_{i}U_{i}'\}_{i=1}^{\ell}$, and set $\mathbf{N}:=\mathbf{H}\cap O(d')$. Then $\mathbf{N}$ is finite, $\mathbf{N}\triangleleft\mathbf{H}$ and $\mathbf{H}/\mathbf{N}$ is cyclic. \item \label{enu:main thm second cond}For every contracting $A\in\mathbf{H}$ with $\{A^{n}\mathbf{N}\}_{n\in\mathbb{Z}}=\mathbf{H}/\mathbf{N}$, there exist $k\ge1$, $\theta_{1},...,\theta_{k}\in\mathbb{C}$ and $\zeta_{1},...,\zeta_{k}\in\mathbb{C}^{d'}\setminus\{0\}$, so that \begin{enumerate} \item $\{\theta_{1},...,\theta_{k}\}$ is a P.V. $k$-tuple; \item $A^{-1}\zeta_{j}=\theta_{j}\zeta_{j}$ for $1\le j\le k$; \item for every $1\le i\le\ell$ and $V\in\mathbf{N}$ there exists $P_{i,V}\in\mathbb{Q}[X]$ so that $\left\langle Va_{i}',\zeta_{j}\right\rangle =P_{i,V}(\theta_{j})$ for $1\le j\le k$. \end{enumerate} \end{enumerate} \end{thm}
We make some remarks regarding the theorem. \begin{itemize} \item When $d=1$ and the system $\Phi$ is orientation preserving, Theorem \ref{thm:main} is easily seen to be equivalent to Theorems \ref{thm:li and sahl} and \ref{thm:bermont}. \item As the proof will show, if condition (\ref{enu:main thm first cond}) holds and condition (\ref{enu:main thm second cond}) is satisfied for some contracting $A\in\mathbf{H}$ with $\{A^{n}\mathbf{N}\}_{n\in\mathbb{Z}}=\mathbf{H}/\mathbf{N}$, then there exists a probability vector $p=(p_{i})_{i=1}^{\ell}>0$ so that the corresponding self-similar measure is non-Rajchman. \item In condition (\ref{enu:main thm first cond}), since $\mathbf{N}$ is the kernel of the homomorphism sending $rU\in\mathbf{H}$ with $r>0$ and $U\in O(d')$ to $r$, it is obvious that $\mathbf{N}\triangleleft\mathbf{H}$. The statements regarding the finiteness of $\mathbf{N}$ and $\mathbf{H}/\mathbf{N}$ being cyclic are the interesting part of this condition. \item In condition (\ref{enu:main thm second cond}), note that by restricting to a suitable nonempty subset of $\{\theta_{1},...,\theta_{k}\}$ we may assume that $\theta_{1},...,\theta_{k}$ are conjugates over $\mathbb{Q}$. \item As we show in Example \ref{exa:dep on A} below, the parameters $k$ and $\theta_{1},...,\theta_{k}$ in condition (\ref{enu:main thm second cond}) may depend on the choice of $A$. On the other hand, it is not hard to show that if conditions (\ref{enu:main thm first cond}) and (\ref{enu:main thm second cond}) are satisfied and $A_{1},A_{2}\in\mathbf{H}$ are contractions with $\{A_{i}^{n}\mathbf{N}\}_{n\in\mathbb{Z}}=\mathbf{H}/\mathbf{N}$ for $i=1,2$, then for every eigenvalue $\theta$ of $A_{1}$ there exists a root of unity $u$ so that $u\theta$ is an eigenvalue of $A_{2}$.
\item In condition (\ref{enu:main thm second cond}), since $A^{-1}$ is a member of $\mathbf{H}$ all of its eigenvalues have the same modulus. In particular $|\theta_{1}|=...=|\theta_{k}|$. \item Theorem \ref{thm:main} provides many explicit examples of affinely irreducible self-similar function systems for which there exists a positive probability vector so that the corresponding self-similar measure is non-Rajchman. In fact, for every $k\ge1$ and $\theta_{1},...,\theta_{k}\in\mathbb{C}$ such that $\{\theta_{1},...,\theta_{k}\}$ is a P.V. $k$-tuple and
$|\theta_{1}|=...=|\theta_{k}|$, we can construct a corresponding self-similar IFS on $\mathbb{R}^{k}$ with these properties (see Example \ref{exa:many examples} below). \item For a system $\Phi$ satisfying conditions (\ref{enu:main thm first cond}) and (\ref{enu:main thm second cond}), it could be interesting to study the exceptional set of positive probability vectors for which the corresponding self-similar measure is Rajchman. As noted after the statement of Theorem \ref{thm:bermont}, this has been carried out by Brémont \cite{Br} in the case of orientation preserving systems on $\mathbb{R}$. \end{itemize}
{} $\quad$Theorem \ref{thm:main} can be used to verify the Rajchman property in many situations. For instance we have the following simple corollary. \begin{cor} Let $\Phi=\{\varphi_{i}(x)=r_{i}U_{i}x+a_{i}\}_{i=1}^{\ell}$ be an affinely irreducible self-similar IFS on $\mathbb{R}^{d}$. Suppose that there exists a probability vector $p=(p_{i})_{i=1}^{\ell}>0$ such that the self-similar measure corresponding to $\Phi$ and $p$ is non-Rajchman. Then there exists an algebraic integer $\theta>1$ so that for every $1\le i\le\ell$ there exists a rational integer $n_{i}\ge1$ with $r_{i}=\theta^{-n_{i}}$. \end{cor}
\begin{proof} By Theorem \ref{thm:main} there exist $\mathbb{V}$ and $S$ as in the statement of the theorem. Let $A$ and $\theta_{1}$ be as in condition (\ref{enu:main thm second cond}), and note that $A=rU$
for some $0<r<1$ and $U\in O(d')$. Since $\theta_{1}^{-1}$ is an eigenvalue of $A$ we have $\Vert A\Vert=|\theta_{1}|^{-1}$, where $\Vert\cdot\Vert$ is the operator norm. From $\{A^{n}\mathbf{N}\}_{n\in\mathbb{Z}}=\mathbf{H}/\mathbf{N}$ and $\Vert A\Vert<1$, it follows that for $1\le i\le\ell$ there exists $n_{i}\ge1$ and $V_{i}\in\mathbf{N}$ so that $r_{i}U_{i}'=A^{n_{i}}V_{i}$. Thus, \[
r_{i}=\Vert r_{i}U_{i}'\Vert=\Vert A^{n_{i}}V_{i}\Vert=\Vert A\Vert^{n_{i}}=|\theta_{1}|^{-n_{i}}\:. \] Since $\theta_{1}$ is a member of a P.V. $k$-tuple it is an algebraic integer. Since the modulus of an algebraic integer is still an algebraic integer, this completes the proof of the corollary. \end{proof}
\subsection{Examples} \begin{example} \label{exa:many examples}Let $k\ge1$ and $\theta_{1},...,\theta_{k}\in\mathbb{C}$
be such that $\{\theta_{1},...,\theta_{k}\}$ is a P.V. $k$-tuple and $|\theta_{1}|=...=|\theta_{k}|$. In this example we show that it is possible to construct an affinely irreducible self-similar IFS $\Phi$ on $\mathbb{R}^{k}$ so that conditions (\ref{enu:main thm first cond}) and (\ref{enu:main thm second cond}) in Theorem \ref{thm:main} are satisfied with the parameters $k$ and $\theta_{1},...,\theta_{k}$, where we take $\mathbb{V}=\mathbb{R}^{k}$ and $S=Id$.
Since $\{\theta_{1},...,\theta_{k}\}$ is a P.V. $k$-tuple, for every $1\le j_{1}\le k$ there exists $1\le j_{2}\le k$ so that $\theta_{j_{2}}=\overline{\theta_{j_{1}}}$. Thus, there exists $A\in GL_{k}(\mathbb{R})$ so that $\theta_{1},...,\theta_{k}$ are the eigenvalues of $A^{-1}$ and $A=rU$ for some $0<r<1$ and $U\in O(k)$. Let $\zeta_{1},...,\zeta_{k}\in\mathbb{C}^{k}$ be such that $\{\zeta_{1},...,\zeta_{k}\}$ is an orthonormal basis for $\mathbb{C}^{k}$, $A^{-1}\zeta_{j}=\theta_{j}\zeta_{j}$ for $1\le j\le k$, and $\zeta_{j_{2}}=\overline{\zeta_{j_{1}}}$ for $1\le j_{1},j_{2}\le k$ with $\theta_{j_{2}}=\overline{\theta_{j_{1}}}$. Set $\xi:=\sum_{j=1}^{k}\zeta_{k}$, so that $\xi\in\mathbb{R}^{k}$. Let $\Phi:=\{\varphi_{i}\}_{i=0}^{k}$ be the self-similar IFS on $\mathbb{R}^{k}$ with $\varphi_{0}(x)=Ax$ and $\varphi_{i}(x)=Ax+A^{1-i}\xi$ for $1\le i\le k$.
Let us show that $\Phi$ is affinely irreducible. Denote the attractor of $\Phi$ by $K$. Let $y_{0}$ be the zero vector of $\mathbb{R}^{k}$, and for $1\le i\le k$ write $y_{i}:=(I-A)^{-1}A^{1-i}\xi$. For $0\le i\le k$ we have $\varphi_{i}(y_{i})=y_{i}$, and so $y_{0},...,y_{k}\in K$. The matrix $(\left\langle A^{1-i}\xi,\zeta_{j}\right\rangle )_{i,j=1}^{k}$ is equal to the Vandermonde matrix $\{\theta_{j}^{i-1}\}_{i,j=1}^{k}$, and so its determinant is nonzero. It follows that $\{A^{1-i}\xi\}_{i=1}^{k}$ are linearly independent, and so $\{y_{i}\}_{i=1}^{k}$ are also linearly independent. This shows that the affine span of $K$ is equal to $\mathbb{R}^{k}$, which implies that $\Phi$ is affinely irreducible.
It is obvious that condition (\ref{enu:main thm first cond}) in Theorem \ref{thm:main} is satisfied with $\mathbf{N}=\{Id\}$. Moreover, for $1\le i,j\le k$ we have $\left\langle \varphi_{i}(0),\zeta_{j}\right\rangle =\theta_{j}^{i-1}$. From this, and since $\left\langle \varphi_{0}(0),\zeta_{j}\right\rangle =0$ for $1\le j\le k$, it follows that condition (\ref{enu:main thm second cond}) is also satisfied. From Theorem \ref{thm:main} we now get that there exists a probability vector $p=(p_{i})_{i=1}^{k}>0$ so that the self-similar measure corresponding to $\Phi$ and $p$ is non-Rajchman. \end{example}
\begin{example} \label{exa:dep on A}The purpose of this example is to show that the parameters $k$ and $\theta_{1},...,\theta_{k}$, appearing in condition (\ref{enu:main thm second cond}) in Theorem \ref{thm:main}, may depend on the choice of $A$. Set $r_{1}=r_{2}=1/2$, let $U_{1}$ be the identity map of $\mathbb{R}^{2}$, let $U_{2}\in O(2)$ be a planar rotation of angle $\pi/2$ (i.e. $U_{2}(x_{1},x_{2})=(-x_{2},x_{1})$), set $a_{1}=(1,0)$ and $a_{2}=0$, and set $\varphi_{i}(x)=r_{i}U_{i}x+a_{i}$ for $i=1,2$ and $x\in\mathbb{R}^{2}$. It is easy to verify that the IFS $\Phi:=\{\varphi_{1},\varphi_{2}\}$ is affinely irreducible.
Let $\mathbf{H}\subset GL_{2}(\mathbb{R})$ be the group generated by $r_{1}U_{1}$ and $r_{2}U_{2}$, and set $\mathbf{N}:=\mathbf{H}\cap O(2)$. We have $\mathbf{N}=\{U_{2}^{l}\}_{l=1}^{4}$, and for $A_{1}:=r_{1}U_{1}$ and $A_{2}:=r_{2}U_{2}$ it holds that $\{A_{1}^{n}\mathbf{N}\}_{n\in\mathbb{Z}}=\{A_{2}^{n}\mathbf{N}\}_{n\in\mathbb{Z}}=\mathbf{H}/\mathbf{N}$. Thus, condition (\ref{enu:main thm first cond}) of Theorem \ref{thm:main} is satisfied (with $\mathbb{V}=\mathbb{R}^{2}$ and $S=Id$). It is also easy to verify that if we take $k=1$, $\theta_{1}=2$ and $\zeta_{1}=(1,0)$ then condition (\ref{enu:main thm second cond}) holds with respect to $A_{1}$, and if we take $k=2$, $\theta_{1}=2i$, $\theta_{2}=-2i$, $\zeta_{1}=(1,i)$ and $\zeta_{2}=(1,-i)$ then condition (\ref{enu:main thm second cond}) holds with respect to $A_{2}$. Moreover, since $2i$ and $-2i$ are conjugates over $\mathbb{Q}$, with $k=1$ condition (\ref{enu:main thm second cond}) cannot hold with respect to $A_{2}$. This shows that the parameters $k$ and $\theta_{1},...,\theta_{k}$ depend on the choice of $A$. \end{example}
\begin{example} \label{exa:interesting}The purpose of this example is to construct a P.V $k$-tuple $\{\theta_{1},...,\theta_{k}\}$ such that $k\ge3$, $\theta_{1},...,\theta_{k}$ are all conjugates over $\mathbb{Q}$,
$|\theta_{1}|=...=|\theta_{k}|$, and for every $1\le j_{1}<j_{2}\le k$ the number $\theta_{j_{1}}\theta_{j_{2}}^{-1}$ is not a root of unity.
A polynomial $P\in\mathbb{Z}[X]$ of degree $n$ is said to be reciprocal if $P(X)=X^{n}P(X^{-1})$. In this case the roots of $P$ fall into reciprocal pairs, that is $z^{-1}$ is a root of $P$ whenever $z\in\mathbb{C}$ is a root of $P$. We say that $P\in\mathbb{Z}[X]$ is a Salem polynomial if it is the minimal polynomial of a Salem number. This means that
$P$ is irreducible, monic, reciprocal, it has degree at least $4$, there exists $s>1$ with $P(s)=0$, and $|z|=1$ for every $z\in\mathbb{C}\setminus\{s,s^{-1}\}$ with $P(z)=0$. The number $s$ is called a Salem number.
Let $m\ge4$ be even, and let $P$ be a Salem polynomial of degree $2m$. For example, we can take $P(X)$ to be $X^{8}-X^{5}-X^{4}-X^{3}+1$. Let $z_{1}>1$ be the Salem number corresponding to $P$, and let $z_{2},...,z_{m}$ be the roots of $P$ located on the upper half of the unit circle in $\mathbb{C}$. Set $I:=\{1,...,m\}$, and for $J\subset I$ write $\theta_{J}:=\Pi_{j\in J}z_{j}\cdot\Pi_{j\in I\setminus J}z_{j}^{-1}$. The Galois group of $P$ is analysed in \cite[Theorem 1.1]{CM}. From that result it follows that $\{\theta_{J}\}_{J\subset I}$ is a complete set of algebraic conjugates over $\mathbb{Q}$.
Let $\mathbb{F}\subset\mathbb{C}$ be the splitting field of $P$ over $\mathbb{Q}$. Note that given $j_{0}\in I$, $J_{1},J_{2}\subset I$ with $j_{0}\in J_{1}\setminus J_{2}$, and an automorphism $\sigma:\mathbb{F}\rightarrow\mathbb{F}$
with $\sigma(z_{j_{0}})=z_{1}$, we have $|\sigma(\theta_{J_{1}})|=z_{1}>1$
and $|\sigma(\theta_{J_{2}})|=z_{1}^{-1}<1$. Since $\sigma(u)$ is a root of unity whenever $u\in\mathbb{F}$ is a root of unity, it follows that $\theta_{J_{1}}\ne\theta_{J_{2}}e^{2\pi iq}$ for all distinct $J_{1},J_{2}\subset I$ and $q\in\mathbb{Q}$. This shows that $\{\theta_{J}\::\:1\in J\subset I\}$ is a P.V. $2^{m-1}$-tuple, and that it satisfies the required properties. \end{example}
\subsection{\label{subsec:About-the-proof}About the proof}
Let $\Phi=\{\varphi_{i}(x)=r_{i}U_{i}x+a_{i}\}_{i=1}^{\ell}$ be an affinely irreducible self-similar IFS on $\mathbb{R}^{d}$. Most of the proof of Theorem \ref{thm:main} deals with the direction in which $\Phi$ is assumed to generate a non-Rajchman measure. We present a general outline of the argument for this direction. Everything will be repeated in a rigorous manner in later parts of the paper.
Let $p=(p_{i})_{i=1}^{\ell}$ be a positive probability vector, let $\mu$ be the self-similar measure corresponding to $\Phi$ and $p$, and suppose that $\mu$ is non-Rajchman. Write $G\subset\mathbb{R}\times O(d)$ for the closed subgroup generated by $\{(\log r_{i}^{-1},U_{i})\}_{i=1}^{\ell}$. For $(t,U)=g\in G$ set $\psi g=t$. Since $\psi$ is a proper continuous map, $\psi(G)$ is a closed subgroup of $\mathbb{R}$. Let $\gamma:\psi(G)\rightarrow G$ be a continuous homomorphism with $\psi\circ\gamma=Id$. We define a right action of $G$ on $\mathbb{R}^{d}$ by setting $x.(t,U):=2^{-t}U^{-1}x$ for $(t,U)\in G$ and $x\in\mathbb{R}^{d}$.
Let $X_{1},X_{2},...$ be i.i.d. $G$-valued random elements with $\mathbb{P}\{X_{1}=(\log r_{i}^{-1},U_{i})\}=p_{i}$ for $1\le i\le\ell$, and for $n\ge1$ set $Y_{n}:=X_{1}\cdot...\cdot X_{n}$. For $t>0$ denote by $\tau_{t}$ the stopping time which is equal to the smallest $n\ge1$ for which $\psi Y_{n}\ge t$. Using a result obtained in \cite{BDGHU}, which extends the classical renewal theorem, we show that as $t\rightarrow\infty$ the random elements $\gamma_{-t}Y_{\tau_{t}}$ converge in distribution to a probability measure $\nu$ on $G$ which is absolutely continuous with respect to the Haar measure of $G$. This key fact will be used several times during the paper. In particular, we use it to prove the following lemma. \begin{lem} \label{lem:ATP ini up bd}For every $\epsilon>0$ there exists $T>1$ such that the following holds. Let $t\ge T$ be with $t\in\psi(G)$
and let $\xi\in\mathbb{R}^{d}$ be with $|\xi|\le\epsilon^{-1}$, then \[
|\widehat{\mu}(\xi.\gamma_{-t})|^{2}\le\epsilon+\int\int\left|\int e^{i\left\langle \xi.g,x-y\right\rangle }d\nu(g)\right|\:d\mu(x)\:d\mu(y)\:. \] \end{lem}
This lemma is inspired by the argument in \cite{LS} used in the proof of Theorem \ref{thm:li and sahl}. The lemma is only useful when the group $G$ is nondiscrete.
As we show below, from the affine irreducibility of $\Phi$ it follows that $\mu(\mathbb{V})=0$ for every proper affine subspace $\mathbb{V}$ of $\mathbb{R}^{d}$. Using this fact we prove the following lemma. \begin{lem} \label{lem:ATP ub int sig of fur of curve mass}For every $\epsilon>0$
there exists $S>1$ so that the following holds. Let $s\ge S$ and let $c:[0,1]\rightarrow\mathbb{R}^{d}$ be a smooth curve with $|c'(t)|\ge\epsilon$
and $|c''(t)|\le\epsilon^{-1}$ for all $0\le t\le1$, then \[
\int\int\left|\int_{0}^{1}e^{is\left\langle c(t),x-y\right\rangle }\:dt\right|\:d\mu(x)\:d\mu(y)<\epsilon\:. \] \end{lem}
Now it is not difficult to show that $\psi(G)\ne\mathbb{R}$. Indeed, assuming this is not the case we can represent $\nu$ as an average of smooth $1$-dimensional probability measures, each of which is supported on a single coset of the subgroup $\gamma(\mathbb{R})$. By using this decomposition together with Lemmata \ref{lem:ATP ini up bd} and \ref{lem:ATP ub int sig of fur of curve mass}, we show that $\mu$ must be Rajchman which contradicts our assumption.
Next we want to make a reduction from the case in which $G$ is nondiscrete with $\psi(G)\ne\mathbb{R}$, to the case in which $G$ is discrete. For this we need to choose appropriately the subspace $\mathbb{V}\subset\mathbb{R}^{d}$ appearing in the statement of Theorem \ref{thm:main}. Denote by $G_{0}$ the connected component of $G$ containing the identity element. We choose $\mathbb{V}$ to be the linear subspace consisting of all $x\in\mathbb{R}^{d}$ so that $x.g=x$ for all $g\in G_{0}$. From $G_{0}\triangleleft G$ it follows that $x.g\in\mathbb{V}$ for $x\in\mathbb{V}$ and $g\in G$, which implies that $U_{i}(\mathbb{V})=\mathbb{V}$ for $1\le i\le\ell$.
Note that from $\psi(G)\ne\mathbb{R}$ it follows that the connected Lie group $G_{0}$ is contained in the compact group $\{0\}\times O(d)$. By using this fact, by representing $\nu$ as an average of certain smooth $1$-dimensional measures, and by applying Lemmata \ref{lem:ATP ini up bd} and \ref{lem:ATP ub int sig of fur of curve mass} once more, we prove the following proposition. It will enable us to perform the aforementioned reduction. \begin{prop} \label{prop:ATP reduc}For every $\epsilon>0$ there exists $R>1$
so that $|\widehat{\mu}(\xi)|<\epsilon$ for every $\xi\in\mathbb{R}^{d}$
with $|\pi_{\mathbb{V}^{\perp}}\xi|\ge\max\{R,\epsilon|\pi_{\mathbb{V}}\xi|\}$. \end{prop}
Next we consider the case in which $G$ is discrete. Cleary $\psi(G)\ne\mathbb{R}$ in this case, and so $\psi(G)=\beta\mathbb{Z}$ for some $\beta>0$. Let $U\in O(d)$ be with $(\beta,U)\in G$, and set $A=2^{-\beta}U$. Under the additional technical assumption $a_{1}=0$, we show that condition (\ref{enu:main thm second cond}) in the statement of Theorem \ref{thm:main} holds for the matrix $A$. The proof is a nontrivial extension of the argument used in \cite{VY} for the direction of Theorem \ref{thm:bermont} in which the IFS is assumed to generate a non-Rajchman measure. One of the main ingredients of that argument is a classical theorem of Pisot. This theorem says that if $\theta>1$ and $0\ne\lambda\in\mathbb{R}$ satisfy $\sum_{n\ge0}\Vert\lambda\theta^{n}\Vert^{2}<\infty$, where $\Vert\cdot\Vert$ is the distance to the nearest integer, then $\theta$ is a Pisot number and $\lambda\in\mathbb{Q}(\theta)$. In our proof we shall need to use a generalisation of this result for P.V. $k$-tuples, which is basically contained in Pisot's original paper \cite{Pi}.
Observe that from proposition \ref{prop:ATP reduc}, and since $\mu$ is not Rajchman, it follows that $d':=\dim\mathbb{V}>0$. Let $S:\mathbb{V}\rightarrow\mathbb{R}^{d'}$ be an isometry. By using Proposition \ref{prop:ATP reduc}, the fact that $\mu$ is not Rajchman, and the self-similarity of $\mu$, we can show that $S\pi_{\mathbb{V}}\mu$ is also not Rajchman. The measure $S\pi_{\mathbb{V}}\mu$ is the self-similar measure corresponding to the self-similar IFS $\Phi':=\{S\circ\pi_{\mathbb{V}}\circ\varphi_{i}\circ S^{-1}\}_{i=1}^{\ell}$ on $\mathbb{R}^{d'}$ and the probability vector $p$. Let $\mathbf{H}$ be the closed group generated by the linear parts of $\Phi'$, and set $\mathbf{N}:=\mathbf{H}\cap O(d')$. By using our choice of $\mathbb{V}$, it is not hard to show that $\mathbf{H}$ is discrete, $\mathbf{N}$ is finite and $\mathbf{H}/\mathbf{N}$ is cyclic. Moreover, we can choose the isometry $S$ so that the technical assumption $a_{1}=0$ is satisfied for the IFS $\Phi'$. At this point we complete the proof by applying on $\Phi'$ our result for the case in which $G$ is discrete.
\subsection*{Organisation of the paper}
In Section \ref{sec:Preliminaries} we develop notations and establish some basic properties of the group $G$. Assuming irreducibility, we also prove that self-similar measures vanish on proper affine subspaces. In Section \ref{sec:Renewal-theory-and first hit} we state the version of the renewal theorem for $G$, and derive the statement regarding the limit distribution of $\gamma_{-t}Y_{\tau_{t}}$. Section \ref{sec:The-nondiscrete-case} deals with the parts of the argument in which $G$ is assumed to be nondiscrete. In Section \ref{sec:The-discrete-case} we consider the case in which $G$ is discrete. In particular, in this section we construct non-Rajchman self-similar measures when $G$ is discrete and the IFS satisfies assumptions similar to condition (\ref{enu:main thm second cond}) in Theorem \ref{thm:main}. In Section \ref{sec:Proof-of-the main} we connect all the pieces, and complete the proof of Theorem \ref{thm:main}.
\section{\label{sec:Preliminaries}Preliminaries}
\subsection{\label{subsec:General-notations}General notations}
For an integer $m$ we write $\mathbb{Z}_{\ge m}:=\{m,m+1,...\}$. We use the notations $\mathbb{Z}_{>m}$, $\mathbb{Z}_{\le m}$ and $\mathbb{Z}_{<m}$ in a similar way.
Let $d\in\mathbb{Z}_{\ge1}$ be fixed. We denote the standard inner product of $\mathbb{R}^{d}$ or $\mathbb{C}^{d}$ by $\left\langle \cdot,\cdot\right\rangle $, that is \[ \left\langle z,w\right\rangle =\sum_{j=1}^{d}z_{j}\overline{w_{j}}\text{ for }z,w\in\mathbb{C}^{d}\:. \] For a linear subspace $\mathbb{V}$ of $\mathbb{R}^{d}$ or $\mathbb{C}^{d}$, the orthogonal projection onto $\mathbb{V}$ is denoted by $\pi_{\mathbb{V}}$. We write $\mathbb{V}^{\perp}$ for the orthogonal complement of $\mathbb{V}$. The orthogonal group of $\mathbb{R}^{d}$ is denoted by $O(d)$. Given a Borel probability measure $\sigma$ on $\mathbb{R}^{d}$ its Fourier transform $\widehat{\sigma}$ is defined by \[ \widehat{\sigma}(\xi):=\int e^{i\left\langle \xi,x\right\rangle }\:d\sigma(x)\text{ for }\xi\in\mathbb{R}^{d}\:. \]
For a locally compact Hausdorff space $X$, we write $C_{c}(X)$ for the space of continuous functions $f:X\rightarrow\mathbb{R}$ with compact support. We denote by $\mathcal{M}(X)$ the collection of all compactly supported Borel probability measures on $X$. If $Y$ is another topological space, $\sigma$ is a Borel measure on $X$, and $F:X\rightarrow Y$ is Borel measurable, then we write $F\sigma$ for the pushforward of $\sigma$ via $F$. That is, $F\sigma:=\sigma\circ F^{-1}$.
Throughout the paper $\Phi=\{\varphi_{i}(x)=r_{i}U_{i}x+a_{i}\}_{i=1}^{\ell}$ is an affinely irreducible self-similar IFS on $\mathbb{R}^{d}$, so that $0<r_{i}<1$, $U_{i}\in O(d)$ and $a_{i}\in\mathbb{R}^{d}$ for $1\le i\le\ell$. We consider $\Phi$ as fixed, and so usually the dependence of various parameters on $\Phi$ will not be indicated. We denote by $K$ the attractor of $\Phi$, that is $K$ is the unique nonempty compact subset of $\mathbb{R}^{d}$ with \[ K=\cup_{i=1}^{\ell}\varphi_{i}(K)\:. \] Given a probability vector $p=(p_{i})_{i=1}^{\ell}$ there exists a unique $\mu\in\mathcal{M}(K)$ which satisfies the relation \begin{equation} \mu=\sum_{i=1}^{\ell}p_{i}\cdot\varphi_{i}\mu\:.\label{eq:SS relation} \end{equation} It is called the self-similar measure corresponding to $\Phi$ and $p$. We usually assume that $p_{i}>0$ for $1\le i\le\ell$, in which case we say that $p$ is positive and write $p>0$.
We sometimes write $\Lambda$ for the index set $\{1,...,\ell\}$, and denote the set of finite words over $\Lambda$ by $\Lambda^{*}$. Following \cite{BP}, we say that a finite set of words $\mathcal{W}\subset\Lambda^{*}$ is a minimal cut-set for $\Lambda^{*}$ if every infinite sequence in $\Lambda^{\mathbb{N}}$ has a unique prefix in $\mathcal{W}$. Given a group $Y$, indexed elements $\{y_{i}\}_{i=1}^{\ell}\subset Y$, and a word $i_{1}...i_{n}=w\in\Lambda^{*}$, we often write $y_{w}$ in place of $y_{i_{1}}\cdot...\cdot y_{i_{n}}$. For the empty word $\emptyset$ we write $y_{\emptyset}$ in place of $1_{Y}$, where $1_{Y}$ is the identity of $Y$. Note that if $\mathcal{W}$ is a minimal cut-set for $\Lambda^{*}$, then by the self-similarity relation (\ref{eq:SS relation}) \[ \mu=\sum_{w\in\mathcal{W}}p_{w}\cdot\varphi_{w}\mu\:. \]
For $1\le i\le\ell$ set \[ g_{i}:=(\log r_{i}^{-1},U_{i})\in\mathbb{R}\times O(d), \] where throughout the paper the base of the $\log$ function is always $2$. Let $G$ be the smallest closed subgroup of $\mathbb{R}\times O(d)$ containing the elements $\{g_{i}\}_{i=1}^{\ell}$. We always equip $G$ with the subspace topology inherited from $\mathbb{R}\times O(d)$. Since $G$ is a closed subgroup of the Lie group $\mathbb{R}\times O(d)$, it is itself a Lie group. We denote by $G_{0}$ the connected component of $G$ containing the identity element. We write, \[ x.(t,U):=2^{-t}U^{-1}x\text{ for }(t,U)\in G\text{ and }x\in\mathbb{R}^{d}, \] which defined a right action of $G$ on $\mathbb{R}^{d}$.
Let $\psi:G\rightarrow\mathbb{R}$ be the projection onto the first coordinate, that is $\psi(t,U)=t$ for $(t,U)\in G$, and write $N$ for the kernel of $\psi$. Since the homomorphism $\psi$ is continuous and proper, it is also a closed map. In particular $\psi(G)$ is a closed subgroup of $\mathbb{R}$. Given $T\in\mathbb{R}$ we write $G_{\le T}$ for the set $\psi^{-1}(-\infty,T]$, and use the notations $G_{<T}$, $G_{\ge T}$ and $G_{>T}$ in a similar way.
Let $\mathbf{m}_{\mathbb{R}}$ be the Lebesgue measure of $\mathbb{R}$. Let $\mathbf{m}_{\psi(G)}$ be the Haar measure of $\psi(G)$, normalized so that $\mathbf{m}_{\psi(G)}=\mathbf{m}_{\mathbb{R}}$ if $\psi(G)=\mathbb{R}$ and $\mathbf{m}_{\psi(G)}\{t\}=\beta$ for all $t\in\psi(G)$ if $\psi(G)=\beta\mathbb{Z}$ with $\beta>0$. We show in Corollary \ref{cor:G unimodular and m_G=00003D} below that $G$ is unimodular. It is easy to see that if $\mathbf{m}$ is a Haar measure for $G$ then $\psi\mathbf{m}$ is a Haar measure for $\psi(G)$. We denote by $\mathbf{m}_{G}$ the Haar measure of $G$, normalized so that $\psi\mathbf{m}_{G}=\mathbf{m}_{\psi(G)}$. Our choice of normalization for $\mathbf{m}_{G}$ can be explained by the version of the renewal theorem for $G$ stated below (see Section \ref{subsec:A-version-of ren thm G}).
\subsection{\label{subsec:Basic-properties-of G}Basic properties of $G$} \begin{lem} \label{lem:proper homo from R}There exists a continuous and proper homomorphism $\gamma:\psi(G)\rightarrow G$ such that $\psi\circ\gamma=Id$. If $\psi(G)=\mathbb{R}$, then $\gamma$ is smooth. If $\psi(G)=\beta\mathbb{Z}$ for $\beta>0$, then for any $g\in G$ with $\psi(g)=\beta$ it is possible to define $\gamma$ so that $\gamma(\beta)=g$. \end{lem}
\begin{proof} If $\psi(G)=\beta\mathbb{Z}$ for $\beta>0$ then the lemma is trivial. Given $g\in G$ with $\psi(g)=\beta$, we simply set $\gamma(\beta n)=g^{n}$ for $n\in\mathbb{Z}$. Clearly $\gamma$ satisfies the required properties.
Suppose next that $\psi(G)=\mathbb{R}$. Let $\mathfrak{g}$ and $\mathfrak{o}(d)$ be the Lie algebras of $G$ and $O(d)$ respectively. We may identify $\mathfrak{g}$ as a Lie subalgebra of $\mathbb{R}\times\mathfrak{o}(d)$. Since $\psi(G)=\mathbb{R}$, there exists $X\in\mathfrak{g}$ so that its projection onto the first coordinate of $\mathbb{R}\times\mathfrak{o}(d)$ is equal to $1$. Set $\gamma(t)=\exp(tX)$ for every $t\in\mathbb{R}$, where $\exp:\mathfrak{g}\rightarrow G$ is the exponential map of $G$. It is easy to check that $\gamma$ satisfies the required properties. \end{proof}
We consider the homomorphism $\gamma$ from the previous lemma as fixed. In later sections we shall often write $\gamma_{t}$ in place of $\gamma(t)$. Recall that we write $N$ for the kernel of $\psi$, and let $H$ denote the image of $\gamma$. Since $\gamma$ is continuous and proper, $H$ is a closed subgroup of $G$. Since $N\triangleleft G$, the subgroup $H$ acts on $N$ by conjugation. For $h\in H$ and $n\in N$ we write $n^{h}$ in place of $hnh^{-1}$. Let $N\rtimes H$ be the semidirect product of $N$ by $H$. That is, $N\rtimes H$ is the group whose underlying set is $N\times H$ with the following group operation, \[ (n_{1},h_{1})\cdot(n_{2},h_{2})=(n_{1}n_{2}^{h_{1}},h_{1}h_{2})\text{ for }(n_{1},h_{1}),(n_{2},h_{2})\in N\rtimes H\:. \] We equip $N$ and $H$ with the subspace topologies inherited from $G$, and $N\rtimes H$ with the product topology. It is easy to verify that this makes $N\rtimes H$ into a locally compact group. Let $F:N\rtimes H\rightarrow G$ be with $F(n,h)=nh$ for $(n,h)\in N\rtimes H$. \begin{lem} \label{lem:G splits and F iso}$G$ is a split extension of $N$ by $H$, that is $HN=G$ and $H\cap N=\{1_{G}\}$. Consequently, the map $F$ is an isomorphism of topological groups. \end{lem}
\begin{proof} For $g\in G$ we have $\psi(\gamma(\psi g)^{-1}g)=0$. Hence, \[ g=\gamma(\psi g)\cdot\gamma(\psi g)^{-1}g\in HN, \] which shows that $HN=G$. Next let $g\in H\cap N$, then $\psi g=0$ and there exists $t\in\mathbb{R}$ with $\gamma(t)=g$. Thus, \[ t=\psi(\gamma(t))=\psi g=0, \] and so $1_{G}=\gamma(0)=\gamma(t)=g$, which shows that $H\cap N=\{1_{G}\}$.
It is easy to verify that $F$ is a homomorphism. From $HN=G$ and $H\cap N=\{1_{G}\}$ it follows that $F$ is a group isomorphism. It is obvious that $F$ is continuous. It is also easy to see that $F$ is a proper map, and so it is a closed map. This shows that $F$ is an isomorphism of topological groups, and completes the proof of the lemma. \end{proof}
Since $N$ is a closed subgroup of $\{0\}\times O(d)$ it is compact. Let $\mathbf{m}_{N}$ be the Haar measure of $N$, normalized so that $\mathbf{m}_{N}(N)=1$. By Lemma \ref{lem:proper homo from R} the map $\gamma:\psi(G)\rightarrow H$ is an isomorphism of topological groups. Write $\mathbf{m}_{H}$ for $\gamma\mathbf{m}_{\psi(G)}$, so that $\mathbf{m}_{H}$ is a Haar measure for $H$. \begin{lem} \label{lem:haar for semi prod}$\mathbf{m}_{N}\times\mathbf{m}_{H}$ is a left and right Haar measure for $N\rtimes H$. \end{lem}
\begin{proof} Since $N$ is compact and $H$ is abelian, $\mathbf{m}_{N}$ and $\mathbf{m}_{H}$ are both left and right Haar measures. Let $(n_{0},h_{0})\in N\rtimes H$ and $f\in C_{c}(N\rtimes H)$ be given. Since $N$ is compact, the automorphism $n\rightarrow n^{h_{0}}$ preserves $\mathbf{m}_{N}$ (see e.g. \cite[Section 1.1]{Wa}). Thus, \begin{eqnarray*} \int f((n_{0},h_{0})\cdot(n,h))\:d\mathbf{m}_{N}\times\mathbf{m}_{H}(n,h) & = & \int\int f(n_{0}n^{h_{0}},h_{0}h)\:d\mathbf{m}_{N}(n)\:d\mathbf{m}_{H}(h)\\
& = & \int\int f(n_{0}n,h_{0}h)\:d\mathbf{m}_{N}(n)\:d\mathbf{m}_{H}(h)\\
& = & \int f(n,h)\:d\mathbf{m}_{N}\times\mathbf{m}_{H}(n,h), \end{eqnarray*} which shows that $\mathbf{m}_{N}\times\mathbf{m}_{H}$ is a left Haar measure for $N\rtimes H$. The proof that it is also a right Haar measure is even simpler, and is therefore omitted. \end{proof}
\begin{cor} \label{cor:G unimodular and m_G=00003D}$G$ is unimodular, and $\mathbf{m}_{G}=F(\mathbf{m}_{N}\times\mathbf{m}_{H})$. \end{cor}
\begin{proof} From Lemmata \ref{lem:G splits and F iso} and \ref{lem:haar for semi prod} it follows that $G$ is unimodular and that $F(\mathbf{m}_{N}\times\mathbf{m}_{H})$ is a Haar measure for $G$. For every $(n,h)\in N\rtimes H$ we have $\psi F(n,h)=\psi h$. Hence, \[ \psi F(\mathbf{m}_{N}\times\mathbf{m}_{H})=\psi\mathbf{m}_{H}=\psi\gamma\mathbf{m}_{\psi(G)}=\mathbf{m}_{\psi(G)}\:. \] Since $\mathbf{m}_{G}$ is the unique Haar measure for $G$ whose image under $\psi$ is equal to $\mathbf{m}_{\psi(G)}$ the corollary follows. \end{proof}
\subsection{Self-similar measures vanish on proper affine subspaces}
The following lemma is a consequence of the affine irreducibility of $\Phi$. It is well known, but since we could not find a proof in the existing literature we provide one for completeness. The lemma will be used in Section \ref{sec:The-nondiscrete-case} when we consider the case in which the group $G$ is nondiscrete. \begin{lem} \label{lem:0 mass aff sub}Let $p=(p_{i})_{i=1}^{\ell}>0$ be a probability vector, and let $\mu$ be the self-similar measure corresponding to $\Phi$ and $p$. Then $\mu(\mathbb{V})=0$ for every proper affine subspace $\mathbb{V}$ of $\mathbb{R}^{d}$. \end{lem}
\begin{proof} For $0\le k\le d$ denote by $\mathbf{A}_{k}$ the collection of all $k$-dimensional affine subspaces of $\mathbb{R}^{d}$. Let $m$ be the smallest nonnegative integer for which there exists $\mathbb{V}\in\mathbf{A}_{m}$ with $\mu(\mathbb{V})>0$. Assume by contradiction that $m<d$.
Set \[ \kappa:=\sup\{\mu(\mathbb{V})\::\:\mathbb{V}\in\mathbf{A}_{m}\}, \] and \[ \mathbf{M}=\{\mathbb{V}\in\mathbf{A}_{m}\::\:\mu(\mathbb{V})=\kappa\}\:. \] By the definition of $m$ we have $\kappa>0$. From this, since $\mu$ is a finite measure, and since $\mu(\mathbb{V})=0$ for all $\mathbb{V}\in\cup_{k=0}^{m-1}\mathbf{A}_{k}$, it follows that $\mathbf{M}$ is nonempty and finite. Let $\{\mathbb{V}_{j}\}_{j=1}^{s}$ be an enumeration of the elements in $\mathbf{M}$.
Set $\mathbb{Y}:=\cup_{j=1}^{s}\mathbb{V}_{j}$. For $1\le j\le s$, \[ \kappa=\mu(\mathbb{V}_{j})=\sum_{i=1}^{\ell}p_{i}\cdot\mu(\varphi_{i}^{-1}(\mathbb{V}_{j}))\:. \] By the definition of $\kappa$ we have $\mu(\varphi_{i}^{-1}(\mathbb{V}_{j}))\le\kappa$ for $1\le i\le\ell$, and so $\varphi_{i}^{-1}(\mathbb{V}_{j})\in\mathbf{M}$ for $1\le i\le\ell$. This implies that $\mathbb{Y}$ is invariant with respect to the maps in $\Phi$. From this and since $\mathbb{Y}$ is closed, it follows that the attractor $K$ is contained in $\mathbb{Y}$.
For every $1\le j_{1}<j_{2}\le s$ it holds that $\mathbb{V}_{j_{1}}\cap\mathbb{V}_{j_{2}}$ is either empty, or it is an affine subspace of dimension strictly less than $m$. Thus, by the definition of $m$ we have $\mu(\mathbb{V}_{j_{1}}\cap\mathbb{V}_{j_{2}})=0$ for $1\le j_{1}<j_{2}\le s$. Since $\mu$ is supported on $K$, this implies that $K$ is not contained in \[ \cup_{1\le j_{1}<j_{2}\le s}\mathbb{V}_{j_{1}}\cap\mathbb{V}_{j_{2}}\:. \] Hence, by reordering $\{\mathbb{V}_{j}\}_{j=1}^{s}$ if necessary, we may assume that there exists $x\in K\cap\mathbb{V}_{1}$ with $x\notin\mathbb{V}_{j}$ for $2\le j\le s$. From \[ \inf\{\mathrm{dist}(x,\mathbb{V}_{j})\::\:2\le j\le s\}>0 \] and \[ \inf\{\mathrm{diam}(\varphi_{w}(K))\::\:w\in\Lambda^{*}\text{ and }x\in\varphi_{w}(K)\}=0, \] it follows that there exists $w\in\Lambda^{*}$ such that $\varphi_{w}(K)\cap\mathbb{V}_{j}=\emptyset$ for $2\le j\le s$. From this and $\varphi_{w}(K)\subset K\subset\mathbb{Y}$, we obtain that $K\subset\varphi_{w}^{-1}(\mathbb{V}_{1})$. Since $\dim(\varphi_{w}^{-1}(\mathbb{V}_{1}))=m<d$, this contradicts the affine irreducibility of $\Phi$ and completes the proof of the lemma. \end{proof}
\section{\label{sec:Renewal-theory-and first hit}Renewal theory and first hitting distribution} \begin{defn} Given $q\in\mathcal{M}(G)$ we say that $q$ is adapted if the subgroup generated by the support of $q$ is dense in $G$. \end{defn}
Throughout this section we fix an adapted finitely supported probability measure $q$ on $G$ with $q(G_{>0})=1$, where recall that $G_{>0}:=\psi^{-1}(0,\infty)$. We write $\lambda$ in place of $\int\psi\:dq$, so that $\lambda>0$.
\subsection{\label{subsec:A-version-of ren thm G}A version of the renewal theorem for $G$}
Set $Q:=\sum_{n\ge0}q^{n}$, where $q^{n}$ is the $n$-fold convolution of $q$ with itself for $n\ge1$ and $q^{0}$ is the Dirac mass as $1_{G}$. Since $q(G_{>0})=1$, it is obvious that $Q$ is a Radon measure on $G$. For $g\in G$ let $L_{g}:G\rightarrow G$ be with $L_{g}g'=gg'$ for $g'\in G$, and note that $L_{g}Q:=Q\circ L_{g}^{-1}$ is also a Radon measure on $G$.
The following theorem follows directly from \cite[Theorem A.1]{BDGHU}. It extends the classical renewal theorem for closed subgroups of $\mathbb{R}$ (see e.g. \cite[Chapter 5]{Re}) to the group $G$. \begin{thm} \label{thm:renewal for G}Let $h_{1},h_{2},...\in G$ be with $\psi h_{n}\overset{n}{\rightarrow}-\infty$. Then for every $f:G\rightarrow\mathbb{R}$ which is Borel measurable, bounded, compactly supported and satisfies \[ \mathbf{m}_{G}\{g\in G\::\:f\text{ is not continuous at }g\}=0, \] we have \[ \underset{n\rightarrow\infty}{\lim}\:\int f\:dL_{h_{n}}Q=\lambda^{-1}\int f\:d\mathbf{m}_{G}\:. \] \end{thm}
\subsection{\label{subsec:Limit-distribution-of}First hitting distribution}
Let $X_{1},X_{2},...$ be i.i.d. $G$-valued random elements with distribution $q$. Set $Y_{0}:=1_{G}$, and for $n\ge1$ let $Y_{n}:=X_{1}\cdot...\cdot X_{n}$. For $t>0$ write, \[ \tau_{t}:=\inf\{n\ge1\::\:\psi Y_{n}\ge t\}\:. \] For $g\in G$ set, \[ \rho(g):=\lambda^{-1}\mathbb{P}\{\psi X_{1}>\psi g\ge0\}\:. \] Note that since $\lambda=\mathbb{E}[\psi X_{1}]$, and by our choice of $\mathbf{m}_{G}$, it follows that $\int\rho\:d\mathbf{m}_{G}=1$. Write $\nu$ in place of $\rho\:d\mathbf{m}_{G}$, so that $\nu\in\mathcal{M}(G)$.
Recall the homomorphism $\gamma:\psi(G)\rightarrow G$ from Lemma \ref{lem:proper homo from R}, and that for $t\in\psi(G)$ we often write $\gamma_{t}$ in place of $\gamma(t)$. The idea of the proof of the following proposition is based on \cite[Section 4, Proof of Theorem 3]{St}. \begin{prop} \label{prop:conv in dist}The random elements $\{\gamma_{-t}Y_{\tau_{t}}\}_{t\in\psi(G_{>0})}$ converge in distribution to $\nu$ as $t\rightarrow\infty$. That is, for every continuous and bounded $f:G\rightarrow\mathbb{C}$, \[ \underset{t\rightarrow\infty}{\lim}\:\mathbb{E}\left[f(\gamma_{-t}Y_{\tau_{t}})\right]=\int f\:d\nu\:. \] \end{prop}
For the proof we need the following lemma. \begin{lem} \label{lem:conv in dist first step}For $f\in C_{c}(G)$ we have, \[ \underset{t\rightarrow\infty}{\lim}\:\int1_{G_{<0}}(g)\int f(gh)\:dq(h)\:dL_{\gamma_{-t}}Q(g)=\frac{1}{\lambda}\int1_{G_{<0}}(g)\int f(gh)\:dq(h)\:d\mathbf{m}_{G}(g)\:. \] \end{lem}
\begin{proof} Let $f\in C_{c}(G)$, and for $g\in G$ set \[ \tilde{f}(g)=1_{G_{<0}}(g)\int f(gh)\:dq(h)\:. \] It is clear that $\tilde{f}$ is Borel measurable and bounded. Since $q$ and $f$ are compactly supported so does $\tilde{f}$. The set of points at which $\tilde{f}$ is discontinuous is contained in the boundary of $G_{<0}$, which we denoted by $\partial G_{<0}$. If $\psi(G)=\mathbb{R}$ then, \[ \mathbf{m}_{G}(\partial G_{<0})=\mathbf{m}_{G}(\psi^{-1}\{0\})=\mathbf{m}_{\mathbb{R}}\{0\}=0\:. \] If $\psi(G)\ne\mathbb{R}$ then $\partial G_{<0}=\emptyset$. From Theorem \ref{thm:renewal for G} and since $\psi\circ\gamma=Id$ we now get, \[ \underset{t\rightarrow\infty}{\lim}\:\int\tilde{f}\:dL_{\gamma_{-t}}Q=\lambda^{-1}\int\tilde{f}\:d\mathbf{m}_{G}, \] which completes the proof of the lemma. \end{proof}
\begin{proof}[Proof of Proposition \ref{prop:conv in dist}] Let $f\in C_{c}(G)$ be nonnegative and with $f(g)=0$ for $g\in G_{<0}$. Since $\nu(G_{<0})=0$, it suffices to show \begin{equation} \underset{t\rightarrow\infty}{\lim}\:\mathbb{E}\left[f(\gamma_{-t}Y_{\tau_{t}})\right]=\int f\:d\nu\:.\label{eq:suf to show conv in dist} \end{equation} Note that for $n\ge1$ the distribution of $Y_{n}$ is equal to $q^{n}$. Hence, for $t\in\psi(G_{>0})$ \begin{eqnarray*} \int1_{G_{<0}}(g)\int f(gh)\:dq(h)\:dL_{\gamma_{-t}}Q(g) & = & \sum_{n\ge0}\int1_{G_{<0}}(g)\int1_{G_{\ge0}}(gh)f(gh)\:dq(h)\:dL_{\gamma_{-t}}q^{n}(g)\\
& = & \sum_{n\ge0}\mathbb{E}\left[1_{G_{<0}}(\gamma_{-t}Y_{n})1_{G_{\ge0}}(\gamma_{-t}Y_{n+1})f(\gamma_{-t}Y_{n+1})\right]\\
& = & \sum_{n\ge0}\mathbb{E}\left[1_{\{\psi Y_{n}<t\}}1_{\{\psi Y_{n+1}\ge t\}}f(\gamma_{-t}Y_{n+1})\right]\\
& = & \sum_{n\ge0}\mathbb{E}\left[1_{\{\tau_{t}=n+1\}}f(\gamma_{-t}Y_{n+1})\right]=\mathbb{E}\left[f(\gamma_{-t}Y_{\tau_{t}})\right]\:. \end{eqnarray*} Moreover, by the right-invariance of $\mathbf{m}_{G}$ \begin{eqnarray*} \lambda^{-1}\int1_{G_{<0}}(g)\int f(gh)\:dq(h)\:d\mathbf{m}_{G}(g) & = & \lambda^{-1}\int f(g)\int1_{G_{\ge0}}(g)1_{G_{<0}}(gh^{-1})\:dq(h)\:d\mathbf{m}_{G}(g)\\
& = & \lambda^{-1}\int f(g)\mathbb{P}\{\psi X_{1}>\psi g\ge0\}\:d\mathbf{m}_{G}(g)\\
& = & \int f(g)\rho(g)\:d\mathbf{m}_{G}(g)=\int f\:d\nu\:. \end{eqnarray*} Thus, the equality (\ref{eq:suf to show conv in dist}) follows from Lemma \ref{lem:conv in dist first step}, which completes the proof of the proposition. \end{proof}
We shall need a uniform version of Proposition \ref{prop:conv in dist}. Define a metric $d_{op}$ on $G$ by setting \begin{equation} d_{op}((r,U),(s,V))=\Vert2^{-r}U^{-1}-2^{-s}V^{-1}\Vert\text{ for }(r,U),(s,V)\in G,\label{eq:def of d_op} \end{equation} where $\Vert\cdot\Vert$ is the operator norm. It is clear that the topology induced by $d_{op}$ is equal to the subspace topology inherited from $\mathbb{R}\times O(d)$. Given $C>0$, we say that $f:G\rightarrow\mathbb{C}$ is $C$-Lipschitz with respect to $d_{op}$ if \[
|f(g)-f(g')|\le Cd_{op}(g,g')\text{ for }g,g'\in G\:. \]
Observe that there exists a compact subset $B$ of $G$ so that $\nu(B)=1$ and $\mathbb{P}\{\gamma_{-t}Y_{\tau_{t}}\in B\}=1$ for all $t\in\psi(G_{>0})$. The following corollary follows directly from this, from Proposition \ref{prop:conv in dist}, and from \cite[Lemma A.3.3]{BP}. \begin{cor} \label{cor:conv in dist lip}For every $\epsilon>0$ there exists $T=T(q,\epsilon)>1$ so that the following holds. Let $f:G\rightarrow\mathbb{C}$ be $\epsilon^{-1}$-Lipschitz with respect to $d_{op}$, then \[
\left|\mathbb{E}\left[f(\gamma_{-t}Y_{\tau_{t}})\right]-\int f\:d\nu\right|<\epsilon\text{ for all }t\in\psi(G_{\ge T})\:. \] \end{cor}
\section{\label{sec:The-nondiscrete-case}The nondiscrete case}
In this section we consider the situation in which the group $G$ is nondiscrete. In Section \ref{subsec:The-case psi(G)=00003DR} we show that self-similar measures corresponding to positive probability vectors are always Rajchman whenever $\psi(G)=\mathbb{R}$. In Section \ref{subsec:Reduction-to-the disc case} we assume $\psi(G)\ne\mathbb{R}$, and prove a result which will enable us to make a reduction to the case in which $G$ is discrete
Recall that $\Phi=\{\varphi_{i}(x)=r_{i}U_{i}x+a_{i}\}_{i=1}^{\ell}$ is an affinely irreducible self-similar IFS on $\mathbb{R}^{d}$, and that $g_{i}=(\log r_{i}^{-1},U_{i})$ for $1\le i\le\ell$. Throughout this section let $p=(p_{i})_{i=1}^{\ell}$ be a fixed positive probability vector. Since we consider $p$ as fixed for this section, usually the dependence of various parameters on $p$ will not be indicated. Let $\mu$ be the self-similar measure corresponding to $\Phi$ and $p$. Let $\sigma\in\mathcal{M}(\mathbb{R}^{d})$ be defined by, \[ \sigma(f)=\int\int f(x-y)\:d\mu(x)\:d\mu(y)\text{ for }f\in C_{c}(\mathbb{R}^{d})\:. \]
Set \[ q:=\sum_{i=1}^{\ell}p_{i}\delta_{g_{i}}, \] where $\delta_{g_{i}}$ is the Dirac mass at $g_{i}$. By definition $G$ is the closed subgroup generated be the support of $q$, and so $q$ is adapted. As before, we write $\lambda$ in place of $\int\psi\:dq$. Let $\{X_{n}\}_{n\ge1}$, $\{Y_{n}\}_{n\ge0}$, $\{\tau_{t}\}_{t>0}$, $\rho:G\rightarrow[0,\infty)$ and $\nu\in\mathcal{M}(G)$ be as defined in Section \ref{subsec:Limit-distribution-of}, where we assume that these objects are defined with respect to the present choice of $q$.
\subsection{An initial upper bound}
The purpose of this subsection is to prove Lemma \ref{lem:initial upper bd}. Recall that for $(t,U)=g\in G$ and $x\in\mathbb{R}^{d}$ we write $x.g:=2^{-t}U^{-1}x$. \begin{lem} \label{lem:initial upper bd}For every $\epsilon>0$ there exists
$T>1$ such that the following holds. Let $t\in\psi(G_{\ge T})$ and let $\xi\in\mathbb{R}^{d}$ be with $|\xi|\le\epsilon^{-1}$, then \[
|\widehat{\mu}(\xi.\gamma_{-t})|^{2}\le\epsilon+\int\left|\int e^{i\left\langle \xi.g,x\right\rangle }d\nu(g)\right|\:d\sigma(x)\:. \] \end{lem}
We first need the following lemma, whose proof is similar to the proof of \cite[Lemma 3.1]{LS}. \begin{lem} \label{lem:ub on fur by double integral}For every $\xi\in\mathbb{R}^{d}$ and $t\in\psi(G_{>0})$, \[
|\widehat{\mu}(\xi)|^{2}\le\int\mathbb{E}\left[e^{i\left\langle \xi.Y_{\tau_{t}},x\right\rangle }\right]\:d\sigma(x)\:. \] \end{lem}
\begin{proof} Recall that $\Lambda:=\{1,...,\ell\}$, and that for a group $Z$, elements $\{z_{i}\}_{i=1}^{\ell}\subset Z$ and a word $i_{1}...i_{n}=w\in\Lambda^{*}$, we write $z_{w}$ in place of $z_{i_{1}}\cdot...\cdot z_{i_{n}}$. Let, \[ \mathcal{W}=\{i_{1}...i_{n}\in\Lambda^{*}\::\:\psi(g_{i_{1}...i_{n}})\ge t>\psi(g_{i_{1}...i_{n-1}})\}\:. \] Since $\mathcal{W}$ is a minimal cut-set (see Section \ref{subsec:General-notations}), \[ \mu=\sum_{w\in\mathcal{W}}p_{w}\cdot\varphi_{w}\mu\:. \] This implies, \[ \widehat{\mu}(\xi)=\sum_{w\in\mathcal{W}}p_{w}\int e^{i\left\langle \xi,\varphi_{w}(x)\right\rangle }\:d\mu(x). \] Thus be Jensen's inequality, \begin{eqnarray*}
|\widehat{\mu}(\xi)|^{2} & \le & \sum_{w\in\mathcal{W}}p_{w}\left|\int e^{i\left\langle \xi,\varphi_{w}(x)\right\rangle }\:d\mu(x)\right|^{2}\\
& = & \sum_{w\in\mathcal{W}}p_{w}\int e^{i\left\langle \xi,\varphi_{w}(x)\right\rangle }\:d\mu(x)\cdot\int e^{-i\left\langle \xi,\varphi_{w}(y)\right\rangle }\:d\mu(y)\\
& = & \int\int\sum_{w\in\mathcal{W}}p_{w}e^{i\left\langle \xi,r_{w}U_{w}(x-y)\right\rangle }\:d\mu(x)\:d\mu(y)\\
& = & \int\sum_{w\in\mathcal{W}}p_{w}e^{i\left\langle \xi.g_{w},x\right\rangle }\:d\sigma(x)\:. \end{eqnarray*} The lemma now follows since the distribution of $Y_{\tau_{t}}$ is equal to $\sum_{w\in\mathcal{W}}p_{w}\delta_{g_{w}}$. \end{proof}
\begin{proof}[Proof of Lemma \ref{lem:initial upper bd}] Let $\epsilon>0$ and let $t\in\psi(G_{>0})$ be large with respect to $\epsilon$, $p$ and $\Phi$. Fix $\xi\in\mathbb{R}^{d}$ with
$|\xi|\le\epsilon^{-1}$. By Lemma \ref{lem:ub on fur by double integral}, \[
|\widehat{\mu}(\xi.\gamma_{-t})|^{2}\le\int\mathbb{E}\left[e^{i\left\langle \xi.(\gamma_{-t}Y_{\tau_{t}}),x\right\rangle }\right]\:d\sigma(x)\:. \]
Recall the metic $d_{op}$ on $G$ defined in (\ref{eq:def of d_op}). Observe that $\sigma$ is supported on the compact set $K-K$, where
$K$ is the attractor of $\Phi$. From this and since $|\xi|\le\epsilon^{-1}$, there exists a constant $C>1$, which depends only on $\epsilon$ and $\Phi$, so that for every $x\in\mathrm{supp}(\sigma)$ the map which takes $g\in G$ to $e^{i\left\langle \xi.g,x\right\rangle }$ is $C$-Lipschitz with respect to $d_{op}$. Thus, by assuming that $t$ is large enough and by Corollary \ref{cor:conv in dist lip}, \[
|\widehat{\mu}(\xi.\gamma_{-t})|^{2}\le\epsilon+\int\left|\int e^{i\left\langle \xi.g,x\right\rangle }\:d\nu(g)\right|\:d\sigma(x), \] which completes the proof of the lemma. \end{proof}
\subsection{Average Fourier decay of measures on curves}
The purpose of this subsection is to prove Lemma \ref{lem:ub int sig of fur of curve mass}. It will enable us to make use of the upper bound obtained in the previous section.
For $y\in\mathbb{R}^{d}$ and $\delta>0$ we write $B(y,\delta)$ for the closed ball in $\mathbb{R}^{d}$ with centre $y$ and radius $\delta$. Let $\mathbb{RP}^{d-1}$ be the projective space of $\mathbb{R}^{d}$, and for $0\ne x\in\mathbb{R}^{d}$ write $\overline{x}\in\mathbb{RP}^{d-1}$ for the line spanned by $x$. Recall that for a linear subspace $\mathbb{V}\subset\mathbb{R}^{d}$ we denote its orthogonal projection by $\pi_{\mathbb{V}}$. \begin{lem} \label{lem:sig mass of strips}For every $\epsilon>0$ there exists $\delta>0$ such that, \[ \pi_{\overline{x}}\sigma(B(y,\delta))<\epsilon\text{ for all }\overline{x}\in\mathbb{RP}^{d-1}\text{ and }y\in\overline{x}\:. \] \end{lem}
\begin{proof} Assume by contradiction that the lemma fails for some $\epsilon>0$. Then for every $n\ge1$ there exist $\overline{x_{n}}\in\mathbb{RP}^{d-1}$ and $y_{n}\in\overline{x_{n}}$ so that $\pi_{\overline{x_{n}}}\sigma(B(y_{n},\frac{1}{n}))\ge\epsilon$. For every $n\ge1$ we have \[ B(y_{n},\frac{1}{n})\cap\pi_{\overline{x_{n}}}(\mathrm{supp}(\sigma))\ne\emptyset\:. \] From this, and since $\sigma$ is compactly supported, it follows that there exists $M>1$ so that $y_{1},y_{2},...\in B(0,M)$. Thus, there exist $\overline{x}\in\mathbb{RP}^{d-1}$, $y\in\overline{x}$ and an increasing sequence $\{n_{k}\}_{k\ge1}\subset\mathbb{Z}_{\ge1}$, so that $\overline{x_{n_{k}}}\overset{k}{\rightarrow}\overline{x}$ and $y_{n_{k}}\overset{k}{\rightarrow}y$.
For $\eta>0$ and sufficiently large $k\ge1$, \[ \pi_{\overline{x_{n_{k}}}}^{-1}(B(y_{n_{k}},\frac{1}{n_{k}}))\cap\mathrm{supp}(\sigma)\subset\pi_{\overline{x}}^{-1}(B(y,\eta))\:. \] Since $\pi_{\overline{x_{n_{k}}}}\sigma(B(y_{n_{k}},\frac{1}{n_{k}}))\ge\epsilon$, this implies $\pi_{\overline{x}}\sigma(B(y,\eta))\ge\epsilon$. Since this holds for all $\eta>0$ we have $\pi_{\overline{x}}\sigma\{y\}\ge\epsilon$. Hence by the definition of $\sigma$, \[ \epsilon\le\int\int1_{\pi_{\overline{x}}^{-1}\{y\}}(z-\xi)\:\mu(z)\:d\mu(\xi)=\int\mu(\pi_{\overline{x}}^{-1}\{y+\pi_{\overline{x}}\xi\})\:d\mu(\xi)\:. \] Thus, there exists a proper affine subspace $\mathbb{V}$ of $\mathbb{R}^{d}$ so that $\mu(\mathbb{V})>0$. This contradicts Lemma \ref{lem:0 mass aff sub}, which completes the proof. \end{proof}
\begin{lem} \label{lem:fur of line curves}For every $\epsilon>0$ there exists $S>1$ so that the following holds. Let $s\ge S$ and let $u\in\mathbb{R}^{d}$
be with $|u|\ge1$, then \[
\int\left|\int_{0}^{1}e^{i\left\langle tu,sx\right\rangle }\:dt\right|\:d\sigma(x)<\epsilon\:. \] \end{lem}
\begin{proof}
Let $\epsilon>0$, let $\delta>0$ be small with respect to $\epsilon$, and let $s>4/(\delta\epsilon)$. Fix $u\in\mathbb{R}^{d}$ with $|u|\ge1$. By Lemma \ref{lem:sig mass of strips} we may assume that $\pi_{\overline{u}}\sigma(B(0,\delta))<\epsilon/2$. For every $x\in\mathbb{R}^{d}$ with $|\pi_{\overline{u}}x|\ge\delta$, \[
\left|\int_{0}^{1}e^{i\left\langle tu,sx\right\rangle }\:dt\right|=\left|\frac{1}{s\left\langle u,x\right\rangle }(e^{is\left\langle u,x\right\rangle }-1)\right|\le2s^{-1}\delta^{-1}<\epsilon/2\:. \] Hence, \[
\int\left|\int_{0}^{1}e^{i\left\langle tu,sx\right\rangle }\:dt\right|\:d\sigma(x)<\int1_{\{|\pi_{\overline{u}}x|\ge\delta\}}\left|\int_{0}^{1}e^{i\left\langle tu,sx\right\rangle }\:dt\right|\:d\sigma(x)+\frac{\epsilon}{2}<\epsilon, \] which completes the proof of the lemma. \end{proof}
\begin{lem} \label{lem:ub int sig of fur of curve mass}For every $\epsilon>0$
there exists $S>1$ so that the following holds. Let $s\ge S$ and let $c:[0,1]\rightarrow\mathbb{R}^{d}$ be a smooth curve with $|c'(t)|\ge\epsilon$
and $|c''(t)|\le\epsilon^{-1}$ for all $0\le t\le1$, then \[
\int\left|\int_{0}^{1}e^{i\left\langle c(t),sx\right\rangle }\:dt\right|\:d\sigma(x)<\epsilon\:. \] \end{lem}
\begin{proof} Let $\epsilon>0$ and let $s>1$ be large with respect to $\epsilon$ and $\mathrm{supp}(\sigma)$. Fix a smooth curve $c:[0,1]\rightarrow\mathbb{R}^{d}$
with $|c'(t)|\ge\epsilon$ and $|c''(t)|\le\epsilon^{-1}$ for all $0\le t\le1$. Let $n\ge1$ be such that $(n-1)^{3/2}<s\le n^{3/2}$. By assuming that $s$ is large enough, \begin{equation} \frac{s}{n}>\frac{1}{2}\frac{s}{n-1}>\frac{1}{2}(n-1)^{1/2}>\frac{1}{4}n^{1/2}\ge\frac{1}{4}s^{1/3}\:.\label{eq:s/n >} \end{equation}
Firstly, we have \begin{equation}
\int\left|\int_{0}^{1}e^{i\left\langle c(t),sx\right\rangle }\:dt\right|\:d\sigma(x)\le\sum_{k=0}^{n-1}\int\left|\int_{0}^{1/n}e^{i\left\langle c(t+\frac{k}{n}),sx\right\rangle }\:dt\right|\:d\sigma(x)\:.\label{eq:<=00003D sum int} \end{equation}
For $0\le k<n$ set $v_{k}=c(\frac{k}{n})$ and $u_{k}=c'(\frac{k}{n})$, and note that $|u_{k}|\ge\epsilon$. By Taylor's theorem it follows that for $0\le t\le1/n$, \[
|c(t+\frac{k}{n})-v_{k}-tu_{k}|\le d\frac{\Vert c''\Vert_{\infty}}{2}n^{-2}\le\frac{d}{2\epsilon n^{2}}\:. \] Hence for $x\in\mathrm{supp}(\sigma)$, \begin{eqnarray*}
\left|e^{i\left\langle c(t+\frac{k}{n}),sx\right\rangle }-e^{i\left\langle v_{k}+tu_{k},sx\right\rangle }\right| & \le & \left|\left\langle c(t+\frac{k}{n})-v_{k}-tu_{k},sx\right\rangle \right|\\
& \le & n^{3/2}|x|\cdot|c(t+\frac{k}{n})-v_{k}-tu_{k}|\\
& \le & \frac{d|x|}{2\epsilon n^{1/2}}\le\frac{d|x|}{2\epsilon s^{1/3}}\:. \end{eqnarray*} By taking $s$ to be large enough, we may assume that the last expression is at most $\epsilon$. Thus, \begin{eqnarray}
\sum_{k=0}^{n-1}\int\left|\int_{0}^{1/n}e^{i\left\langle c(t+\frac{k}{n}),sx\right\rangle }\:dt\right|\:d\sigma(x) & \le & \epsilon+\sum_{k=0}^{n-1}\int\left|\int_{0}^{1/n}e^{i\left\langle v_{k}+tu_{k},sx\right\rangle }\:dt\right|\:d\sigma(x)\nonumber \\
& = & \epsilon+\sum_{k=0}^{n-1}\int\left|\int_{0}^{1/n}e^{i\left\langle tu_{k},sx\right\rangle }\:dt\right|\:d\sigma(x)\:.\label{eq:sum in <=00003D} \end{eqnarray} Additionally, for every $0\le k<n$, \[
\int\left|\int_{0}^{1/n}e^{i\left\langle tu_{k},sx\right\rangle }\:dt\right|\:d\sigma(x)=\frac{1}{n}\int\left|\int_{0}^{1}e^{i\left\langle t\epsilon^{-1}u_{k},\epsilon sn^{-1}x\right\rangle }\:dt\right|\:d\sigma(x)\:. \]
From this, since $|\epsilon^{-1}u_{k}|\ge1$, by Lemma \ref{lem:fur of line curves}, by (\ref{eq:s/n >}), and by assuming that $s$ is large enough, \[
\int\left|\int_{0}^{1/n}e^{i\left\langle tu_{k},sx\right\rangle }\:dt\right|\:d\sigma(x)\le\epsilon/n\:. \] From this, (\ref{eq:<=00003D sum int}) and (\ref{eq:sum in <=00003D}), \[
\int\left|\int_{0}^{1}e^{i\left\langle c(t),sx\right\rangle }\:dt\right|\:d\sigma(x)\le2\epsilon, \] which completes the proof of the lemma. \end{proof}
\subsection{\label{subsec:The-case psi(G)=00003DR}The case $\psi(G)=\mathbb{R}$}
Recall that $\psi(t,U)=t$ for $(t,U)\in G$, that $N$ is the kernel of $\psi$, and that $\mathbf{m}_{N}$ is the Haar measure of $N$ normalized so that $\mathbf{m}_{N}(N)=1$. By reordering the maps $\{\varphi_{i}\}_{i=1}^{\ell}$ if necessary, we may assume that $\log r_{i}^{-1}\le\log r_{j}^{-1}$ for $1\le i<j\le\ell$. For $1\le i\le\ell$ set $b_{i}=\log r_{i}^{-1}$ and $\alpha_{i}=\sum_{k=i}^{\ell}p_{k}$, and write $b_{0}=0$. Let $\rho_{0}:\mathbb{R}\rightarrow[0,\infty)$ be such that, \[ \rho_{0}(t)=\begin{cases} 0 & \text{ for }t<0\text{ and }t\ge b_{\ell}\\ \alpha_{i}/\lambda & \text{ for }1\le i\le\ell\text{ and }b_{i-1}\le t<b_{i} \end{cases}\:. \]
\begin{lem} \label{lem:nu =00003D double int}Suppose that $\psi(G)=\mathbb{R}$. Then $\int\rho_{0}(t)\:dt=1$, and for every bounded and continuous $f:G\rightarrow\mathbb{C}$ \[ \nu(f)=\int\int f(\gamma_{t}n)\rho_{0}(t)\:dt\:d\mathbf{m}_{N}(n)\:. \] \end{lem}
\begin{proof} By the definition of $\rho$ (see Section \ref{subsec:Limit-distribution-of}) we have $\rho(g)=\rho_{0}(\psi g)$ for $g\in G$. Since $\psi(G)=\mathbb{R}$, and by the way we defined $\mathbf{m}_{G}$ (see Section \ref{subsec:General-notations}), it follows that $\psi\mathbf{m}_{G}$ is equal to the Lebesgue measure $\mathbf{m}_{\mathbb{R}}$. Thus, \[ 1=\int\rho\:d\mathbf{m}_{G}=\int\rho_{0}\:d\psi\mathbf{m}_{G}=\int\rho_{0}(t)\:dt\:. \] Let $f:G\rightarrow\mathbb{C}$ be bounded and continuous. By Corollary \ref{cor:G unimodular and m_G=00003D} and since $\mathbf{m}_{H}=\gamma\mathbf{m}_{\mathbb{R}}$, \begin{eqnarray*} \nu(f)=\int f\rho\:d\mathbf{m}_{G} & = & \int\int f(nh)\rho(nh)\:d\mathbf{m}_{N}(n)\:d\gamma\mathbf{m}_{\mathbb{R}}(h)\\
& = & \int\int f(n\gamma_{t})\rho_{0}(\psi(n\gamma_{t}))\:d\mathbf{m}_{N}(n)\:dt\:. \end{eqnarray*} Since $\psi\circ\gamma=Id$ (see Lemma \ref{lem:proper homo from R}) we have $\psi(n\gamma_{t})=t$ for $n\in N$ and $t\in\mathbb{R}$. Thus, \[ \nu(f)=\int\int f(\gamma_{t}(\gamma_{t}^{-1}n\gamma_{t}))\rho_{0}(t)\:d\mathbf{m}_{N}(n)\:dt\:. \] Fort every $t\in\mathbb{R}$ the map $n\rightarrow\gamma_{t}^{-1}n\gamma_{t}$ is a continuous automorphism of $N$. Since $N$ is compact this automorphism preserves $\mathbf{m}_{N}$. The lemma now follows from the last equality. \end{proof} \begin{prop} \label{prop:case Psi(G)=00003DR}Recall that $\mu$ is the self-similar measure corresponding to $\Phi$ and the positive probability vector $p$. Suppose that $\psi(G)=\mathbb{R}$, then $\mu$ is a Rajchman measure. That is, $\widehat{\mu}(\xi)\rightarrow0$ as $\xi\rightarrow\infty$. \end{prop}
\begin{proof} Let $\epsilon>0$, let $r>1$ be large with respect to $\epsilon$,
$\Phi$, $p$ and $\gamma$, and let $T>1$ be large with respect to $r$. Fix $\xi\in\mathbb{R}^{d}$ with $|\xi|\ge2^{T}r$. We prove the proposition by showing that $|\widehat{\mu}(\xi)|^{2}\le\epsilon$.
Write $\mathbb{S}^{d-1}$ for the unit sphere in $\mathbb{R}^{d}$. Let $u\in\mathbb{S}^{d-1}$ and $t\ge T$ be such that $\xi=2^{t}ru$. Note that since $\psi(G)=\mathbb{R}$, the domain of $\gamma$ is $\mathbb{R}$. Let $U\in O(d)$ be such that $\gamma_{-t}=(-t,U)$. By Lemma \ref{lem:initial upper bd}, and by assuming that $T$ is large enough with respect to $r$ and $\epsilon$, \[
|\widehat{\mu}(\xi)|^{2}=|\widehat{\mu}((rUu).\gamma_{-t})|^{2}\le\epsilon/2+\int\left|\int e^{i\left\langle (rUu).g,x\right\rangle }\:d\nu(g)\right|\:d\sigma(x)\:. \] From this, Lemma \ref{lem:nu =00003D double int} and the definition of $\rho_{0}$, \begin{eqnarray}
|\widehat{\mu}(\xi)|^{2} & \le & \epsilon/2+\int\int\left|\int e^{i\left\langle (Uu).(\gamma_{s}n),rx\right\rangle }\rho_{0}(s)\:ds\right|\:d\mathbf{m}_{N}(n)\:d\sigma(x)\nonumber \\
& \le & \epsilon/2+\int\sum_{j=1}^{\ell}\frac{\alpha_{j}}{\lambda}\int\left|\int_{b_{j-1}}^{b_{j}}e^{i\left\langle (Uu).(\gamma_{s}n),rx\right\rangle }\:ds\right|\:d\sigma(x)\:d\mathbf{m}_{N}(n)\:.\label{eq:triple int initial bd} \end{eqnarray} For $1\le j\le\ell$, $n\in N$ and $v\in\mathbb{S}^{d-1}$, let $c_{j,n}^{v}:[0,1]\rightarrow\mathbb{R}^{d}$ be such that \[ c_{j,n}^{v}(s):=v.(\gamma_{s(b_{j}-b_{j-1})+b_{j-1}}n)\:\text{ for }s\in[0,1]\:. \] From (\ref{eq:triple int initial bd}) we get, \begin{equation}
|\widehat{\mu}(\xi)|^{2}\le\epsilon/2+\int\sum_{j=1}^{\ell}\frac{\alpha_{j}(b_{j}-b_{j-1})}{\lambda}\int\left|\int_{0}^{1}e^{i\left\langle c_{j,n}^{Uu}(s),rx\right\rangle }\:ds\right|\:d\sigma(x)\:d\mathbf{m}_{N}(n)\:.\label{eq:triple int bd with c} \end{equation}
By Lemma \ref{lem:proper homo from R} and since $\psi(G)=\mathbb{R}$, it follows that $\gamma$ is smooth. Hence, the curves $c_{j,n}^{v}$ are also smooth. Let $C>1$ be large with respect to $\{b_{j}\}_{j=0}^{\ell}$
and the curve $\gamma$. For $x\in\mathbb{R}^{d}$ set $f(x)=|x|^{2}$. From $\psi\circ\gamma=Id$ and $N\subset\{0\}\times O(d)$, we get that for every $1\le j\le\ell$, $n\in N$, $v\in\mathbb{S}^{d-1}$ and $s\in[0,1]$, \[ f(c_{j,n}^{v}(s))=2^{-2(s(b_{j}-b_{j-1})+b_{j-1})}\:. \] By differentiating the last equality with respect to $s$ and by assuming that $C$ is sufficiently large, it follows that for every $1\le j\le\ell$ with $b_{j}>b_{j-1}$, \[
|\frac{d}{ds}c_{j,n}^{v}(s)|\ge C^{-1}\text{ for }n\in N,\:v\in\mathbb{S}^{d-1}\text{ and }s\in[0,1]\:. \] Additionally, by assuming that $C$ is sufficiently large, \[
|\frac{d^{2}}{ds^{2}}c_{j,n}^{v}(s)|\le C\text{ for }1\le j\le\ell,\:n\in N,\:v\in\mathbb{S}^{d-1}\text{ and }s\in[0,1]\:. \] Hence, from Lemma \ref{lem:ub int sig of fur of curve mass}, from (\ref{eq:triple int bd with c}), and by assuming as we may that $r$ is large enough with respect to $\epsilon$, $C$, $\{\alpha_{j}/\lambda\}_{j=1}^{\ell}$
and $\{b_{j}\}_{j=0}^{\ell}$, we get $|\widehat{\mu}(\xi)|^{2}\le\epsilon$. This completes the proof of the proposition. \end{proof}
\subsection{\label{subsec:Reduction-to-the disc case}Reduction to the discrete case}
Throughout this subsection we assume that $\psi(G)\ne\mathbb{R}$. Recall that we write $G_{0}$ for the connected component of $G$ containing the identity element. Since $G$ is a Lie group it is locally path connected, and so $G_{0}$ is an open and closed normal subgroup of $G$. From $\psi(G)\ne\mathbb{R}$ it follows that $\psi(G)$ is a discrete subgroup of $\mathbb{R}$. This implies that $N$ is also open and close in $G$, and so $G_{0}\subset N$. In particular $G_{0}$ is compact.
Let $\mathbb{V}$ be the linear subspace of $\mathbb{R}^{d}$ consisting of all $x\in\mathbb{R}^{d}$ so that $x.g=x$ for all $g\in G_{0}$. Recall that $\mathbb{V}^{\perp}$ denotes the orthogonal complement of $\mathbb{V}$. \begin{lem} \label{lem:G inv subspaces}The subspaces $\mathbb{V}$ and $\mathbb{V}^{\perp}$ are $G$-invariant. That is, $v.g\in\mathbb{V}$ and $w.g\in\mathbb{V}^{\perp}$ for all $v\in\mathbb{V}$, $w\in\mathbb{V}^{\perp}$ and $g\in G$. \end{lem}
\begin{proof} Let $g_{0}\in G_{0}$, $g\in G$ and $v\in\mathbb{V}$ be given. Since $G_{0}\triangleleft G$, there exists $g_{0}'\in G_{0}$ with $gg_{0}=g_{0}'g$. Thus, \[ (v.g).g_{0}=v.(gg_{0})=v.(g_{0}'g)=(v.g_{0}').g=v.g\:. \] This shows that $v.g\in\mathbb{V}$, and so $\mathbb{V}$ is $G$-invariant. It is now obvious that $\mathbb{V}^{\perp}$ is also $G$-invariant. \end{proof}
The purpose of this subsection is to prove the following proposition. In Section \ref{sec:Proof-of-the main}, when we complete the proof of our main result, it will enable us to make a reduction to the case in which $G$ is discrete. \begin{prop} \label{prop:cont case psi(G) not R}Recall that $\mu$ is the self-similar measure corresponding to $\Phi$ and the positive probability vector $p$. Suppose that $\psi(G)\ne\mathbb{R}$. Then for every $\epsilon>0$
there exists $R=R(\epsilon,p)>1$ so that $|\widehat{\mu}(\xi)|<\epsilon$
for every $\xi\in\mathbb{R}^{d}$ with $|\pi_{\mathbb{V}^{\perp}}\xi|\ge\max\{R,\epsilon|\pi_{\mathbb{V}}\xi|\}$. \end{prop}
We start working towards the proof of the proposition. If $\mathbb{V}=\mathbb{R}^{d}$ then the proposition holds trivially, and so we may assume that $\dim\mathbb{V}^{\perp}>0$. Write $\mathbb{S}_{\mathbb{V}^{\perp}}$ for the unit sphere of $\mathbb{V}^{\perp}$. That is $\mathbb{S}_{\mathbb{V}^{\perp}}:=\mathbb{V}^{\perp}\cap\mathbb{S}^{d-1}$, where $\mathbb{S}^{d-1}$ is the unit sphere of $\mathbb{R}^{d}$.
Let $\mathfrak{g}_{0}$ be the Lie algebra of $G_{0}$. For the rest of this section, fix some compact neighbourhood $B_{\mathfrak{g}_{0}}$ of $0$ in $\mathfrak{g}_{0}$. That is, $B_{\mathfrak{g}_{0}}$ is a compact subset of $\mathfrak{g}_{0}$ and $0\in\mathrm{Int}(B_{\mathfrak{g}_{0}})$. Since $B_{\mathfrak{g}_{0}}$ is fixed, usually the dependence of various parameters on $B_{\mathfrak{g}_{0}}$ will not be indicated. For $X\in\mathfrak{g}_{0}$ and $y\in\mathbb{R}^{d}$ let $c_{X,y}:\mathbb{R}\rightarrow\mathbb{R}^{d}$ be such that \[ c_{X,y}(t)=y.\exp(tX)\text{ for }t\in\mathbb{R}, \] where $\exp:\mathfrak{g}_{0}\rightarrow G_{0}$ is the exponential map of $G_{0}$. \begin{lem}
\label{lem:lb on der of cur}There exists $\delta>0$ so that for every $y\in\mathbb{S}_{\mathbb{V}^{\perp}}$ there exists $X\in B_{\mathfrak{g}_{0}}$, such that $|c_{X,y}'(t)|\ge\delta$ for all $t\in\mathbb{R}$. \end{lem}
\begin{proof} For $y\in\mathbb{S}_{\mathbb{V}^{\perp}}$, $X\in B_{\mathfrak{g}_{0}}$ and $t,s\in\mathbb{R}$, \[ c_{X,y}(t+s)=(y.\exp(sX)).\exp(tX)=c_{X,y}(s).\exp(tX)\:. \] Differentiating with respect to $s$ at $s=0$ we get \begin{equation} c_{X,y}'(t)=c_{X,y}'(0).\exp(tX)\:.\label{eq:inv of der} \end{equation}
Since $\exp(tX)\in G_{0}\subset N$ and $N\subset\{0\}\times O(d)$, it follows that $|c_{X,y}'(t)|=|c_{X,y}'(0)|$. Thus, it suffices to show that there exists $\delta>0$ so that \begin{equation}
\text{for every }y\in\mathbb{S}_{\mathbb{V}^{\perp}}\text{ there exists }X\in B_{\mathfrak{g}_{0}}\text{ with }|c_{X,y}'(0)|\ge\delta.\label{eq:cont assum} \end{equation}
Assume by contradiction that such a $\delta$ does not exist. Let $M:\mathbb{S}_{\mathbb{V}^{\perp}}\rightarrow[0,\infty)$ be with \[
M(y)=\max_{X\in B_{\mathfrak{g}_{0}}}\:|c_{X,y}'(0)|\text{ for }y\in\mathbb{S}_{\mathbb{V}^{\perp}}\:. \] Since there does not exist $\delta>0$ which satisfies (\ref{eq:cont assum}), we have \begin{equation} \inf_{y\in\mathbb{S}_{\mathbb{V}^{\perp}}}\:M(y)=0\:.\label{eq:inf Q =00003D 0} \end{equation} Since $B_{\mathfrak{g}_{0}}$ is compact and since the map which takes $(X,y)\in\mathfrak{g}_{0}\times\mathbb{S}_{\mathbb{V}^{\perp}}$ to
$|c_{X,y}'(0)|$ is continuous, it is easy to check that $M$ is also continuous. From this, from (\ref{eq:inf Q =00003D 0}) and since $\mathbb{S}_{\mathbb{V}^{\perp}}$ is compact, it follows that there exists $y_{0}\in\mathbb{S}_{\mathbb{V}^{\perp}}$ so that $M(y_{0})=0$. That is, $c_{X,y_{0}}'(0)=0$ for all $X\in B_{\mathfrak{g}_{0}}$. Hence, by (\ref{eq:inv of der}) we have for all $t\in\mathbb{R}$ \[ c_{X,y_{0}}'(t)=c_{X,y_{0}}'(0).\exp(tX)=0\:. \] This shows that for all $X\in B_{\mathfrak{g}_{0}}$ and $t\in\mathbb{R}$, \[ y_{0}=c_{X,y_{0}}(0)=c_{X,y_{0}}(t)=y_{0}.\exp(tX)\:. \] Since $0\in\mathrm{Int}(B_{\mathfrak{g}_{0}})$ we have $\mathfrak{g}_{0}=\cup_{t>0}tB_{\mathfrak{g}_{0}}$, and so $y_{0}=y_{0}.\exp(X)$ for all $X\in\mathfrak{g}_{0}$. Additionally, since $G_{0}$ is a compact and connected Lie group its exponential map is surjective (see e.g. \cite[Corollary 11.10]{Ha}), that is $\exp(\mathfrak{g}_{0})=G_{0}$. Thus $y_{0}=y_{0}.g$ for all $g\in G_{0}$, which gives $y_{0}\in\mathbb{V}$. This contradicts $y_{0}\in\mathbb{S}_{\mathbb{V}^{\perp}}$ and completes the proof of the lemma. \end{proof}
Given a compact Hausdorff topological group $Y$, let $\mathbf{m}_{Y}$ be its Haar measure normalized so that $\mathbf{m}_{Y}(Y)=1$. The following equidistribution lemma is standard. We provide the simple proof for completeness. \begin{lem} \label{lem:equi dist of flow}Let $X\in\mathfrak{g}_{0}$, and let $G_{1}$ be the smallest closed subgroup of $G_{0}$ containing $\{\exp(tX)\}_{t\in\mathbb{R}}$. Then for every $f\in C_{c}(G_{1})$, \[ \mathbf{m}_{G_{1}}(f)=\underset{T\rightarrow\infty}{\lim}\frac{1}{T}\int_{0}^{T}f(\exp(tX))\:dt\:. \] \end{lem}
\begin{proof} Since $G_{1}$ is compact and separable, $\mathcal{M}(G_{1})$ is compact and metrizable with respect to the weak-{*} topology. For $T>0$ let $\alpha_{T}\in\mathcal{M}(G_{1})$ be such that, \[ \alpha_{T}(f)=\frac{1}{T}\int_{0}^{T}f(\exp(tX))\:dt\text{ for }f\in C_{c}(G_{1})\:. \] Let $\{T_{k}\}_{k\ge1}\subset(0,\infty)$ and $\alpha\in\mathcal{M}(G_{1})$ be with $T_{k}\overset{k}{\rightarrow}\infty$ and $\alpha_{T_{k}}\overset{k}{\rightarrow}\alpha$ in the weak-{*} topology. In order to prove the lemma it suffices to show that $\alpha=\mathbf{m}_{G_{1}}$.
Let $s\in\mathbb{R}$, and write $g=\exp(sX)$. Recall that we write $L_{g}h=gh$ for $h\in G$. For $f\in C_{c}(G_{1})$, \begin{eqnarray*}
|\alpha(f)-L_{g}\alpha(f)| & = & \underset{k\rightarrow\infty}{\lim}\:|\alpha_{T_{k}}(f)-\alpha_{T_{k}}(f\circ L_{g})|\\
& = & \underset{k\rightarrow\infty}{\lim}\:\frac{1}{T_{k}}\left|\int_{0}^{T_{k}}f(\exp(tX))\:dt-\int_{s}^{s+T_{k}}f(\exp(tX))\:dt\right|=0\:. \end{eqnarray*} Thus $\alpha=L_{g}\alpha$ for all $g\in\{\exp(tX)\}_{t\in\mathbb{R}}$. Since $G_{1}$ is the closure of $\{\exp(tX)\}_{t\in\mathbb{R}}$, it follows that $\alpha=L_{g}\alpha$ for all $g\in G_{1}$. Hence $\alpha$ is a Haar measure for $G_{1}$. Since it is also a probability measure we get $\alpha=\mathbf{m}_{G_{1}}$, which completes the proof of the lemma. \end{proof}
\begin{lem} \label{lem:exists G_1}For every $\epsilon>0$ there exists $R>1$ so that the following holds. Let $y\in\mathbb{S}_{\mathbb{V}^{\perp}}$, then there exists a closed subgroup $G_{1}$ of $G_{0}$ so that for all $r\ge R$ and $g_{0}\in G_{0}$, \[
\int\left|\int e^{i\left\langle y.gg_{0},rx\right\rangle }\:d\mathbf{m}_{G_{1}}(g)\right|\:d\sigma(x)<\epsilon\:. \] \end{lem}
\begin{proof} It follows from (\ref{eq:inv of der}) that for $X\in B_{\mathfrak{g}_{0}}$, $y\in\mathbb{S}_{\mathbb{V}^{\perp}}$ and $s,t\in\mathbb{R}$, \[ c_{X,y}'(t+s)=(c_{X,y}'(0).\exp(sX)).\exp(tX)=c_{X,y}'(s).\exp(tX)\:. \]
Differentiating with respect to $s$ at $s=0$ and using $\exp(tX)\in G_{0}\subset\{0\}\times O(d)$, we get $|c_{X,y}''(t)|=|c_{X,y}''(0)|$. From this and since $B_{\mathfrak{g}_{0}}$
and $\mathbb{S}_{\mathbb{V}^{\perp}}$ are compact, it follows that there exists a constant $C>1$, depending only on $B_{\mathfrak{g}_{0}}$, so that $|c_{X,y}''(t)|\le C$ for all $X\in B_{\mathfrak{g}_{0}}$, $y\in\mathbb{S}_{\mathbb{V}^{\perp}}$ and $t\in\mathbb{R}$.
Let $\delta>0$ be as obtained in Lemma \ref{lem:lb on der of cur}. Let $\epsilon>0$, and let $r>1$ be large with respect to $\epsilon$,
$\delta$ and $C$. Let $y\in\mathbb{S}_{\mathbb{V}^{\perp}}$, then there exists $X\in B_{\mathfrak{g}_{0}}$ so that $|c_{X,y}'(t)|\ge\delta$ for all $t\in\mathbb{R}$. Let $G_{1}$ be the smallest closed subgroup of $G_{0}$ containing $\{\exp(tX)\}_{t\in\mathbb{R}}$. For $g_{0}\in G_{0}$ and $k\in\mathbb{Z}_{\ge0}$, let $c_{X,y}^{g_{0},k}:[0,1]\rightarrow\mathbb{R}^{d}$ be with \[ c_{X,y}^{g_{0},k}(t)=c_{X,y}(k+t).g_{0}\text{ for }t\in[0,1]\:. \] Let $g_{0}\in G_{0}$, then by Lemma \ref{lem:equi dist of flow}, \begin{eqnarray}
\int\left|\int e^{i\left\langle y.gg_{0},rx\right\rangle }\:d\mathbf{m}_{G_{1}}(g)\right|\:d\sigma(x) & = & \underset{T\rightarrow\infty}{\lim}\frac{1}{T}\int\left|\int_{0}^{T}e^{i\left\langle c_{X,y}(t).g_{0},rx\right\rangle }\:dt\right|\:d\sigma(x)\nonumber \\
& \le & \underset{M\rightarrow\infty}{\limsup}\frac{1}{M}\sum_{k=0}^{M-1}\int\left|\int_{0}^{1}e^{i\left\langle c_{X,y}^{g_{0},k}(t),rx\right\rangle }\:dt\right|\:d\sigma(x)\:.\label{eq:ub by lim of avg} \end{eqnarray} Since $g_{0}\in\{0\}\times O(d)$, we have for $k\in\mathbb{Z}_{\ge0}$ and $0\le t\le1$ \[
|\frac{d}{dt}c_{X,y}^{g_{0},k}(t)|=|c_{X,y}'(k+t)|\ge\delta\:\text{ and }\:|\frac{d^{2}}{dt^{2}}c_{X,y}^{g_{0},k}(t)|=|c_{X,y}''(k+t)|\le C\:. \] From this, from Lemma \ref{lem:ub int sig of fur of curve mass}, by assuming that $r$ is large enough, and by (\ref{eq:ub by lim of avg}) \[
\int\left|\int e^{i\left\langle y.gg_{0},rx\right\rangle }\:d\mathbf{m}_{G_{1}}(g)\right|\:d\sigma(x)<\epsilon, \] which completes the proof of the lemma. \end{proof}
\begin{lem} \label{lem:ub int m_G_0 =000026 sig}For every $\epsilon>0$ there exists $R>1$ so that for all $y\in\mathbb{S}_{\mathbb{V}^{\perp}}$ and $r\ge R$, \[
\int\left|\int e^{i\left\langle y.g_{0},rx\right\rangle }\:d\mathbf{m}_{G_{0}}(g_{0})\right|\:d\sigma(x)<\epsilon\:. \] \end{lem}
\begin{proof} Let $\epsilon>0$, let $r>1$ be large with respect to $\epsilon$, and let $y\in\mathbb{S}_{\mathbb{V}^{\perp}}$. By Lemma \ref{lem:exists G_1} and by assuming that $r$ is large enough, it follows that there exists a closed subgroup $G_{1}$ of $G_{0}$ so that for all $g_{0}\in G_{0}$ \begin{equation}
\int\left|\int e^{i\left\langle y.gg_{0},rx\right\rangle }\:d\mathbf{m}_{G_{1}}(g)\right|\:d\sigma(x)<\epsilon\:.\label{eq:from prev lem} \end{equation} Additionally, for all $f\in C_{c}(G_{0})$ \[ \int f\:d\mathbf{m}_{G_{0}}=\int\int f(gg_{0})\:d\mathbf{m}_{G_{1}}(g)\:d\mathbf{m}_{G_{0}}(g_{0})\:. \] Hence, \begin{eqnarray*}
\int\left|\int e^{i\left\langle y.g_{0},rx\right\rangle }\:d\mathbf{m}_{G_{0}}(g_{0})\right|d\sigma(x) & = & \int\left|\int\int e^{i\left\langle y.gg_{0},rx\right\rangle }\:d\mathbf{m}_{G_{1}}(g)\:d\mathbf{m}_{G_{0}}(g_{0})\right|d\sigma(x)\\
& \le & \int\int\left|\int e^{i\left\langle y.gg_{0},rx\right\rangle }\:d\mathbf{m}_{G_{1}}(g)\right|d\sigma(x)\:d\mathbf{m}_{G_{0}}(g_{0})\:. \end{eqnarray*} This together with (\ref{eq:from prev lem}) completes the proof of the lemma. \end{proof}
\begin{proof}[Proof of Proposition \ref{prop:cont case psi(G) not R}] Let $\epsilon>0$, let $r>1$ be large with respect to $\epsilon$, $p$ and $\Phi$, and let $T>1$ be large with respect to $r$. Fix $\xi\in\mathbb{R}^{d}$ with \[
|\pi_{\mathbb{V}^{\perp}}\xi|\ge\max\{2^{T}r,\epsilon|\pi_{\mathbb{V}}\xi|\}\:. \] Since $\psi(G)\ne\mathbb{R}$, there exists $\beta>0$ so that $\psi(G)=\beta\mathbb{Z}$. Let $t\in\psi(G_{\ge T})$ be such that \begin{equation}
2^{t-\beta}r<|\pi_{\mathbb{V}^{\perp}}\xi|\le2^{t}r\:.\label{eq:choise of t} \end{equation} Since $\psi\gamma_{t}=t$, \[
|\xi.\gamma_{t}|=|2^{-t}\xi|\le2^{-t}(|\pi_{\mathbb{V}^{\perp}}\xi|+|\pi_{\mathbb{V}}\xi|)\le2^{-t}|\pi_{\mathbb{V}^{\perp}}\xi|(1+\epsilon^{-1})\le r(1+\epsilon^{-1})\:. \]
By Lemma \ref{lem:initial upper bd}, from $|\xi.\gamma_{t}|\le r(1+\epsilon^{-1})$, and by assuming that $T$ is large enough with respect to $r$ and $\epsilon$, \[
|\widehat{\mu}(\xi)|^{2}=|\widehat{\mu}((\xi.\gamma_{t}).\gamma_{-t})|^{2}\le\epsilon+\int\left|\int e^{i\left\langle \xi.(\gamma_{t}g),x\right\rangle }\:d\nu(g)\right|\:d\sigma(x)\:. \] From this, since $\nu=\rho\:d\mathbf{m}_{G}$, and since \[ \mathbf{m}_{G}(f)=\int\int f(gg_{0})\:d\mathbf{m}_{G_{0}}(g_{0})\:d\mathbf{m}_{G}(g)\text{ for }f\in C_{c}(G), \] we get, \begin{eqnarray}
|\widehat{\mu}(\xi)|^{2} & \le & \epsilon+\int\left|\int e^{i\left\langle \xi.(\gamma_{t}g),x\right\rangle }\rho(g)\:d\mathbf{m}_{G}(g)\right|\:d\sigma(x)\nonumber \\
& \le & \epsilon+\int\rho(g)\int\left|\int e^{i\left\langle \xi.(\gamma_{t}gg_{0}),x\right\rangle }\:d\mathbf{m}_{G_{0}}(g_{0})\right|\:d\sigma(x)\:d\mathbf{m}_{G}(g)\;.\label{eq:ub 3 int G_0 inside} \end{eqnarray} We have also used here the fact that $\rho(gg_{0})=\rho(g)$ for $g\in G$ and $g_{0}\in G_{0}$, which holds since $G_{0}\subset N$.
Let $g\in G$ and write $w_{g}=(\pi_{\mathbb{V}^{\perp}}\xi).\gamma_{t}g$
and $y_{g}=w_{g}/|w_{g}|$. It follows from Lemma \ref{lem:G inv subspaces} that the subspaces $\mathbb{V}$ and $\mathbb{V}^{\perp}$ are $\gamma_{t}g$-invariant. Hence, \[ \pi_{\mathbb{V}^{\perp}}(\xi.\gamma_{t}g)=\pi_{\mathbb{V}^{\perp}}((\pi_{\mathbb{V}}\xi).\gamma_{t}g+(\pi_{\mathbb{V}^{\perp}}\xi).\gamma_{t}g)=w_{g}\:. \] Additionally, given $g_{0}\in G_{0}$ it follows by the definition of $\mathbb{V}$ that $v.g_{0}=v$ for $v\in\mathbb{V}$. Thus, \[ \xi.(\gamma_{t}gg_{0})=(\pi_{\mathbb{V}}(\xi.\gamma_{t}g)+\pi_{\mathbb{V}^{\perp}}(\xi.\gamma_{t}g)).g_{0}=\pi_{\mathbb{V}}(\xi.\gamma_{t}g)+w_{g}.g_{0}\:. \] From this and (\ref{eq:ub 3 int G_0 inside}), \begin{equation}
|\widehat{\mu}(\xi)|^{2}\le\epsilon+\int\rho(g)\int\left|\int e^{i\left\langle y_{g}.g_{0},|w_{g}|x\right\rangle }\:d\mathbf{m}_{G_{0}}(g_{0})\right|\:d\sigma(x)\:d\mathbf{m}_{G}(g)\:.\label{eq:ub with y_g =000026 w_g} \end{equation} By the definition of $\rho$ there exists a constant $C>1$, which depends only on $\Phi$, so that $\psi g\le C$ for all $g\in G$ with $\rho(g)\ne0$. For such a $g$ it follows by (\ref{eq:choise of t}) that, \[
|w_{g}|=|(\pi_{\mathbb{V}^{\perp}}\xi).\gamma_{t}g|=2^{-\psi g}2^{-t}|\pi_{\mathbb{V}^{\perp}}\xi|>2^{-C-\beta}r\:. \] Thus, from Lemma \ref{lem:ub int m_G_0 =000026 sig}, since $y_{g}\in\mathbb{S}_{\mathbb{V}^{\perp}}$, and by assuming as we may that $r$ is sufficiently large with respect to $\epsilon$, $\beta$ and $C$, we get \[
\int\left|\int e^{i\left\langle y_{g}.g_{0},|w_{g}|x\right\rangle }\:d\mathbf{m}_{G_{0}}(g_{0})\right|\:d\sigma(x)\le\epsilon\:. \] This together with (\ref{eq:ub with y_g =000026 w_g}) shows that
$|\widehat{\mu}(\xi)|^{2}\le2\epsilon$, which completes the proof of the Proposition. \end{proof}
\section{\label{sec:The-discrete-case}The discrete case}
Throughout this section we always assume that the group $G$ is discrete. That is, we assume that the subspace topology on $G$, inherited from $\mathbb{R}\times O(d)$, is equal to the discrete topology. Since $N$ is a compact subset of a discrete space, it holds that $N$ is finite. From the discreteness of $G$ it also follows that there exists $\beta>0$ with $\psi(G)=\beta\mathbb{Z}$. Hence, $l_{i}:=\psi(g_{i})/\beta$ is a positive integer for all $1\le i\le\ell$.
Fix some $h\in G$ with $\psi(h)=\beta$, and recall that $H:=\gamma\circ\psi(G)$. By Lemma \ref{lem:proper homo from R} we may assume that $\gamma_{\beta}=h$, which gives $H=\{h^{j}\}_{j\in\mathbb{Z}}$. By Lemma \ref{lem:G splits and F iso} and since $N\triangleleft G$, it follows that for every $g\in G$ \begin{equation} g=nh^{l}=h^{l}n'\text{ for some }l\in\mathbb{Z}\text{ and }n,n'\in N\:.\label{eq:rep of each g} \end{equation} Let $U\in O(d)$ be such that $h=(\beta,U)$. Set $A:=2^{-\beta}U$ and $B:=A^{*}$, where $A^{*}=2^{-\beta}U^{-1}$ is the transpose of $A$. Write, \[ N_{0}:=\{V\in O(d)\::\:(0,V)\in N\}, \] so that $N_{0}$ is a finite subgroup of $O(d)$. Since $N\triangleleft G$, we also have $A^{-1}N_{0}A=N_{0}$. From (\ref{eq:rep of each g}) it follows that for each $1\le i\le\ell$, \begin{equation} r_{i}U_{i}=VA^{l_{i}}=A^{l_{i}}V'\text{ for some }V,V'\in N_{0}\:.\label{eq:rep of riUi} \end{equation}
The following proposition is the main result of this section. Its proof is a nontrivial extension of an argument used in \cite{VY} to prove one of the directions of Theorem \ref{thm:bermont} stated in the introduction. Recall that $\Phi=\{\varphi_{i}(x)=r_{i}U_{i}x+a_{i}\}_{i=1}^{\ell}$ is an affinely irreducible self-similar IFS on $\mathbb{R}^{d}$, and recall from Section \ref{subsec:The-main-result.} the definition of a P.V. $k$-tuple. We shall consider $A$ as a linear operator on $\mathbb{C}^{d}$ in the natural way, that is by setting $A(x+iy):=Ax+iAy$ for $x,y\in\mathbb{R}^{d}$. \begin{prop} \label{prop:main disc case}Suppose that $G$ is discrete and that $a_{1}=0$. Moreover, assume that there exists a probability vector $p=(p_{i})_{i=1}^{\ell}>0$ so that the self-similar measure corresponding to $\Phi$ and $p$ is non-Rajchman. Let $A$ and $N_{0}$ be as defined above. Then there exist $k\ge1$, $\theta_{1},...,\theta_{k}\in\mathbb{C}$ and $\zeta_{1},...,\zeta_{k}\in\mathbb{C}^{d}\setminus\{0\}$, so that \begin{enumerate} \item \label{enu:dic P.V. tup}$\{\theta_{1},...,\theta_{k}\}$ is a P.V. $k$-tuple; \item \label{enu:disc eigen vec}$A^{-1}\zeta_{j}=\theta_{j}\zeta_{j}$ for $1\le j\le k$; \item \label{enu:poly disc case}for every $1\le i\le\ell$ and $V\in N_{0}$ there exists $P_{i,V}\in\mathbb{Q}[X]$ so that $\left\langle Va_{i},\zeta_{j}\right\rangle =P_{i,V}(\theta_{j})$ for all $1\le j\le k$; \end{enumerate} \end{prop}
The assumption $a_{1}=0$ might seem somewhat arbitrary. It simplifies the statement of condition (\ref{enu:poly disc case}), and some of the arguments that follow.
The proof of the proposition is carried out in Sections \ref{subsec:a pre prop} and \ref{subsec:Proof-of-Proposition disc case}. In Section \ref{subsec:Construction-of-non-Rajchman} we state and prove a converse to it.
\subsection{\label{subsec:a pre prop}A preliminary proposition}
Throughout this subsection let $p=(p_{i})_{i=1}^{\ell}$ be a fixed positive probability vector. Let $\mu$ be the self-similar measure corresponding to $\Phi$ and $p$. Recall that $G$ is assumed to be discrete, and that we write $B$ in place of $A^{*}$. For a real number $x$ let $\Vert x\Vert$ be the distance from $x$ to its nearest integer, that is \[
\Vert x\Vert:=\inf\{|x-k|\::\:k\in\mathbb{Z}\}\:. \] Recall that $\Lambda:=\{1,...,\ell\}$, and that a finite set of words $\mathcal{W}\subset\Lambda^{*}$ is said to be a minimal cut-set for $\Lambda^{*}$ if every infinite sequence in $\Lambda^{\mathbb{N}}$ has a unique prefix in $\mathcal{W}$. The purpose of this subsection is to prove the following proposition. \begin{prop} \label{prop:ub on sum of dist to int}Let $\mathcal{W}$ be a minimal cut-set for $\Lambda^{*}$, and let $u,u'\in\mathcal{W}$. Suppose that $G$ is generated by $\{g_{w}\}_{w\in\mathcal{W}}$, and that $g_{u}=g_{u'}$. Then for every $\epsilon>0$ there exists $C=C(\epsilon,\mathcal{W},p)>1$ so that for all $V\in N_{0}$, \[
\sum_{j\ge0}\left\Vert \left\langle V(\varphi_{u}(0)-\varphi_{u'}(0)),B^{j}\xi\right\rangle \right\Vert ^{2}\le C\text{ for }\xi\in\mathbb{R}^{d}\text{ with }|\widehat{\mu}(2\pi\xi)|\ge\epsilon\:. \]
\end{prop}
For the rest of this subsection fix $\mathcal{W}\subset\Lambda^{*}$ and $u,u'\in\mathcal{W}$ as in the statement of the proposition. Note that since $\mathcal{W}$ is a minimal cut-set, $(p_{w})_{w\in\mathcal{W}}$ is a probability vector. Let $I_{1},I_{2},...$ be i.i.d. $\mathcal{W}$-valued random words with $\mathbb{P}\{I_{1}=w\}=p_{w}$ for $w\in\mathcal{W}$. Set $Y_{0}=1_{G}$, and for $k\ge1$ let $X_{k}:=g_{I_{k}}$, $Y_{k}:=X_{1}\cdot...\cdot X_{k}$, and \[ \tau_{\beta}(k):=\inf\{m\ge1\::\:\psi Y_{m}\ge k\beta\}\:. \] For $\xi\in\mathbb{R}^{d}$ and $w\in\{u,u'\}$ set, \[
\alpha_{w}(\xi):=\frac{1}{p_{u}+p_{u'}}\left|p_{u}e^{i\left\langle \xi,\varphi_{u}(0)\right\rangle }+p_{u'}e^{i\left\langle \xi,\varphi_{u'}(0)\right\rangle }\right|, \] and for $w\in\mathcal{W}\setminus\{u,u'\}$ write $\alpha_{w}(\xi):=1$. Set $Z_{\xi,0}:=1$, and for $n\ge1$ let \[ Z_{\xi,n}:=\prod_{k=1}^{n}\alpha_{I_{k}}(\xi.Y_{k-1})\:. \]
\begin{lem}
\label{lem:from doob's}For $k\ge1$ and $\xi\in\mathbb{R}^{d}$ we have $|\widehat{\mu}(\xi)|\le\mathbb{E}[Z_{\xi,\tau_{\beta}(k)}]$. \end{lem}
\begin{proof} Since $\mathcal{W}$ is a minimal cut-set and since $g_{u}=g_{u'}$, it follows that for $y\in\mathbb{R}^{d}$ \begin{eqnarray*}
|\widehat{\mu}(y)| & = & \left|\sum_{w\in\mathcal{W}}p_{w}\int e^{i\left\langle y,\varphi_{w}(x)\right\rangle }\:d\mu(x)\right|\\
& = & \left|\sum_{w\in\mathcal{W}}p_{w}e^{i\left\langle y,\varphi_{w}(0)\right\rangle }\widehat{\mu}(y.g_{w})\right|\le\sum_{w\in\mathcal{W}}p_{w}|\widehat{\mu}(y.g_{w})|\cdot\alpha_{w}(y)\:. \end{eqnarray*} Let $n\ge0$, then by applying the last inequality with $y=\xi.Y_{n}$, \begin{eqnarray*}
Z_{\xi,n}|\widehat{\mu}(\xi.Y_{n})| & \le & Z_{\xi,n}\sum_{w\in\mathcal{W}}p_{w}|\widehat{\mu}((\xi.Y_{n}).g_{w})|\cdot\alpha_{w}(\xi.Y_{n})\\
& = & \mathbb{E}\left[Z_{\xi,n}|\widehat{\mu}((\xi.Y_{n}).g_{I_{n+1}})|\cdot\alpha_{I_{n+1}}(\xi.Y_{n})\:\Bigl|\:I_{1},...,I_{n}\right]\\
& = & \mathbb{E}\left[Z_{\xi,n+1}|\widehat{\mu}(\xi.Y_{n+1})|\:\Bigl|\:I_{1},...,I_{n}\right]\:. \end{eqnarray*}
This shows that $\{Z_{\xi,n}|\widehat{\mu}(\xi.Y_{n})|\}_{n\ge0}$ is a submartingale with respect to the filtration $\{\mathcal{F}_{n}\}_{n\ge0}$, where $\mathcal{F}_{n}$ is the $\sigma$-algebra generated by $I_{1},...,I_{n}$. Thus, since $\tau_{\beta}(k)$ is a bounded stopping time with respect to the filtration $\{\mathcal{F}_{n}\}_{n\ge0}$, and by Doob's optional stopping theorem, we get \[
|\widehat{\mu}(\xi)|=\mathbb{E}\left[Z_{\xi,0}|\widehat{\mu}(\xi.Y_{0})|\right]\le\mathbb{E}\left[Z_{\xi,\tau_{\beta}(k)}|\widehat{\mu}(\xi.Y_{\tau_{\beta}(k)})|\right]\le\mathbb{E}\left[Z_{\xi,\tau_{\beta}(k)}\right], \] which completes the proof of the lemma. \end{proof}
\begin{lem} \label{lem:lb on prob =00003D g}There exists a constant $C=C(\mathcal{W},p)>1$ so that, \[ \mathbb{P}\left\{ \gamma_{-\beta k}Y_{\tau_{\beta}(k)}=g\right\} >C^{-1}\text{ for every integer }k\ge C\text{ and }g\in N\:. \] \end{lem}
\begin{proof} Set $q:=\sum_{w\in\mathcal{W}}p_{w}\delta_{g_{w}}$ and $\lambda:=\int\psi\:dq$. For $g\in G$ let, \[ \rho(g):=\lambda^{-1}\mathbb{P}\{\psi X_{1}>\psi g\ge0\}, \] and write $\nu$ in place of $\rho\:d\mathbf{m}_{G}$. Since $\psi(G)=\beta\mathbb{Z}$
and $|N|<\infty$, it follows by our choice of $\mathbf{m}_{G}$ (see Section \ref{subsec:General-notations}) that $\mathbf{m}_{G}\{g\}=\beta/|N|$ for $g\in G$. For $g\in N$ we have $\psi(g)=0$, and so $\rho(g)=\lambda^{-1}$. Since $G$ is generated by $\{g_{w}\}_{w\in\mathcal{W}}$ it holds that $q$ is adapted, and so we can apply Proposition \ref{prop:conv in dist}. It follows that for $g\in N$, \[
\underset{k\rightarrow\infty}{\lim}\mathbb{P}\left\{ \gamma_{-\beta k}Y_{\tau_{\beta}(k)}=g\right\} =\nu\{g\}=\rho(g)\mathbf{m}_{G}\{g\}=\beta/(\lambda|N|)\:. \] Since $N$ is finite this completes the proof of the lemma. \end{proof}
Recall that $\psi(g_{i})/\beta=:l_{i}\in\mathbb{Z}_{>0}$ for $1\le i\le\ell$. Given a word $i_{1}...i_{n}=w\in\Lambda^{*}$ we write $l_{w}$ in place of $l_{i_{1}}+...+l_{i_{n}}$. \begin{lem} \label{lem:ub on cond exp}There exists an integer $C=C(\mathcal{W},p)>1$ so that for every $k\in\mathbb{Z}_{>C}$, $V\in N_{0}$ and $\xi\in\mathbb{R}^{d}$, \[ \mathbb{E}\left[Z_{\xi,\tau_{\beta}(k)}\right]\le\mathbb{E}\left[Z_{\xi,\tau_{\beta}(k-C)}\right]\left(1-C^{-1}(1-\alpha_{u}(VB^{k-l_{u}}\xi))\right)\:. \] \end{lem}
\begin{proof} Let $C\in\mathbb{Z}_{>1}$ be large with respect to $\Phi$, $p$ and $\mathcal{W}$. Set $l_{\mathrm{max}}=\max_{w\in\mathcal{W}}l_{w}$, and suppose that $C>2l_{\mathrm{max}}$. Fix $k\in\mathbb{Z}_{>C}$, $V\in N_{0}$ and $\xi\in\mathbb{R}^{d}$. Let $n_{V}\in N$ be with $n_{V}=(0,V^{-1})$. Denote by $\mathcal{W}^{*}$ the set of finite words over $\mathcal{W}$. For $w_{1}...w_{m}=\mathbf{w}\in\mathcal{W}^{*}$ we write, \[ \:g_{\mathbf{w}}:=g_{w_{1}}\cdot...\cdot g_{w_{m}}\text{ and }l_{\mathbf{w}}:=l_{w_{1}}+...+l_{w_{m}}\:. \] Let, \[ \mathcal{Y}:=\{w_{1}...w_{m}\in\mathcal{W}^{*}\::\:\psi(g_{w_{1}...w_{m}})\ge\beta(k-C)>\psi(g_{w_{1}...w_{m-1}})\}\:. \] For $\mathbf{y}\in\mathcal{Y}$ set, \[ \eta_{\mathbf{y}}:=\mathbb{P}\left\{ Y_{\tau_{\beta}(k-l_{u})}=h^{k-l_{u}}n_{V}\mid I_{1}...I_{\tau_{\beta}(k-C)}=\mathbf{y}\right\} \:. \] For $m\in\mathbb{Z}_{\ge0}$ and $b\in\mathbb{Z}_{\ge1}$ write, \[ \tau_{\beta,m}(b):=\inf\{j>m\::\:\psi(X_{m+1}\cdot...\cdot X_{j})\ge b\beta\}\:. \]
Fix $w_{1}...w_{m}=\mathbf{y}\in\mathcal{Y}$ for the moment. From (\ref{eq:rep of each g}) and since $\psi(h)=\beta$, it follows that there exists $n_{\mathbf{y}}\in N$ with $g_{\mathbf{y}}=h^{l_{\mathbf{y}}}n_{\mathbf{y}}$. Additionally, by the definition of $\mathcal{Y}$, \begin{equation} k-C\le l_{\mathbf{y}}<k-C+l_{\mathrm{max}}<k-l_{\mathrm{max}}\:.\label{eq:est on l_y} \end{equation} Note that, \[ \mathbb{P}\left\{ Y_{m}=g_{\mathbf{y}}\mid I_{1}...I_{\tau_{\beta}(k-C)}=\mathbf{y}\right\} =1\:. \] From this and since $\psi(g_{\mathbf{y}})=\beta l_{\mathbf{y}}<\beta(k-l_{u})$, \[ \mathbb{P}\left\{ \tau_{\beta}(k-l_{u})=\tau_{\beta,m}(k-l_{u}-l_{\mathbf{y}})\mid I_{1}...I_{\tau_{\beta}(k-C)}=\mathbf{y}\right\} =1\:. \] Hence, by multiplying from the left both sides of the equation $Y_{\tau_{\beta}(k-l_{u})}=h^{k-l_{u}}n_{V}$ by $g_{\mathbf{y}}^{-1}=n_{\mathbf{y}}^{-1}h^{-l_{\mathbf{y}}}$, we get \begin{eqnarray*} \eta_{\mathbf{y}} & = & \mathbb{P}\left\{ X_{m+1}\cdot...\cdot X_{\tau_{\beta,m}(k-l_{u}-l_{\mathbf{y}})}=n_{\mathbf{y}}^{-1}h^{k-l_{u}-l_{\mathbf{y}}}n_{V}\mid I_{1}...I_{\tau_{\beta}(k-C)}=\mathbf{y}\right\} \\
& = & \mathbb{P}\left\{ Y_{\tau_{\beta}(k-l_{u}-l_{\mathbf{y}})}=n_{\mathbf{y}}^{-1}h^{k-l_{u}-l_{\mathbf{y}}}n_{V}\right\} , \end{eqnarray*} where in last equality we have used the stationarity of the process $\{X_{j}\}_{j\ge1}$. Set, \[ z_{\mathbf{y}}:=\gamma_{-\beta(k-l_{u}-l_{\mathbf{y}})}n_{\mathbf{y}}^{-1}h^{k-l_{u}-l_{\mathbf{y}}}n_{V}, \] then \begin{equation} \eta_{\mathbf{y}}=\mathbb{P}\left\{ \gamma_{-\beta(k-l_{u}-l_{\mathbf{y}})}Y_{\tau_{\beta}(k-l_{u}-l_{\mathbf{y}})}=z_{\mathbf{y}}\right\} \:.\label{eq:alpha_u =00003D} \end{equation} From $\psi\circ\gamma=Id$, $\psi(h)=\beta$ and $n_{\mathbf{y}},n_{V}\in N$ it follows that $z_{\mathbf{y}}\in N$. Also, by (\ref{eq:est on l_y}) we have $k-l_{u}-l_{\mathbf{y}}>C-2l_{\mathrm{max}}$. Hence, by Lemma \ref{lem:lb on prob =00003D g} and by assuming that $C$ is sufficiently large, it follows that $\eta_{\mathbf{y}}>p_{u}^{-1}C^{-1}$. This holds for all $\mathbf{y}\in\mathcal{Y}$, which implies that almost surely \[ \mathbb{P}\left\{ Y_{\tau_{\beta}(k-l_{u})}=h^{k-l_{u}}n_{V}\mid I_{1}...I_{\tau_{\beta}(k-C)}\right\} \ge p_{u}^{-1}C^{-1}\:. \]
From the last inequality we get, \begin{multline} \mathbb{P}\left\{ Y_{\tau_{\beta}(k)-1}=h^{k-l_{u}}n_{V}\text{ and }I_{\tau_{\beta}(k)}=u\mid I_{1}...I_{\tau_{\beta}(k-C)}\right\} \\ =\mathbb{P}\left\{ Y_{\tau_{\beta}(k-l_{u})}=h^{k-l_{u}}n_{V}\text{ and }I_{\tau_{\beta}(k-l_{u})+1}=u\mid I_{1}...I_{\tau_{\beta}(k-C)}\right\} \\ =\mathbb{P}\left\{ Y_{\tau_{\beta}(k-l_{u})}=h^{k-l_{u}}n_{V}\mid I_{1}...I_{\tau_{\beta}(k-C)}\right\} \mathbb{P}\left\{ I_{1}=u\right\} \ge C^{-1}\:.\label{eq:>C^-1} \end{multline} Since $C>l_{\mathrm{max}}$ we have $\tau_{\beta}(k-C)\le\tau_{\beta}(k)-1$. Thus, since $\alpha_{w}(x)\le1$ for all $w\in\mathcal{W}$ and $x\in\mathbb{R}^{d}$, it follows that $Z_{\xi,\tau_{\beta}(k-C)}\ge Z_{\xi,\tau_{\beta}(k)-1}$. Hence, \[ Z_{\xi,\tau_{\beta}(k)}=Z_{\xi,\tau_{\beta}(k)-1}\cdot\alpha_{I_{\tau_{\beta}(k)}}(\xi.Y_{\tau_{\beta}(k)-1})\le Z_{\xi,\tau_{\beta}(k-C)}\cdot\alpha_{I_{\tau_{\beta}(k)}}(\xi.Y_{\tau_{\beta}(k)-1})\:. \] From this and (\ref{eq:>C^-1}) we get, \begin{eqnarray*} \mathbb{E}\left[Z_{\xi,\tau_{\beta}(k)}\mid I_{1},...,I_{\tau_{\beta}(k-C)}\right] & \le & Z_{\xi,\tau_{\beta}(k-C)}\mathbb{E}\left[\alpha_{I_{\tau_{\beta}(k)}}(\xi.Y_{\tau_{\beta}(k)-1})\mid I_{1},...,I_{\tau_{\beta}(k-C)}\right]\\
& \le & Z_{\xi,\tau_{\beta}(k-C)}\left(1-C^{-1}+C^{-1}\alpha_{u}(\xi.h^{k-l_{u}}n_{V})\right)\\
& = & Z_{\xi,\tau_{\beta}(k-C)}\left(1-C^{-1}+C^{-1}\alpha_{u}(VB^{k-l_{u}}\xi)\right)\:. \end{eqnarray*} This gives, \begin{eqnarray*} \mathbb{E}\left[Z_{\xi,\tau_{\beta}(k)}\right] & = & \mathbb{E}\left[\mathbb{E}\left[Z_{\xi,\tau_{\beta}(k)}\mid I_{1},...,I_{\tau_{\beta}(k-C)}\right]\right]\\
& \le & \mathbb{E}\left[Z_{\xi,\tau_{\beta}(k-C)}\right]\left(1-C^{-1}+C^{-1}\alpha_{u}(VB^{k-l_{u}}\xi)\right), \end{eqnarray*} which completes the proof of the lemma. \end{proof}
\begin{proof}[Proof of Proposition \ref{prop:ub on sum of dist to int} ]
Let $0<\epsilon<1$, let $C\in\mathbb{Z}_{>1}$ be large with respect to $\epsilon$, $\mathcal{W}$, $p$ and $\Phi$, let $V\in N_{0}$, let $\xi_{0}\in\mathbb{R}^{d}$ be with $|\widehat{\mu}(2\pi\xi_{0})|\ge\epsilon$, and write $\xi=2\pi\xi_{0}$. For $y\in\mathbb{R}^{d}$ set $\Psi(y)=1-\alpha_{u}(y)$, and note that $0\le\Psi(y)\le1$. By Lemma \ref{lem:ub on cond exp} it follows that for $k\in\mathbb{Z}_{>C}$, \[ \mathbb{E}\left[Z_{\xi,\tau_{\beta}(k)}\right]\le\mathbb{E}\left[Z_{\xi,\tau_{\beta}(k-C)}\right]\left(1-C^{-1}\Psi(VB^{k-l_{u}}\xi)\right)\:. \] Iterating this and using the fact that $0\le Z_{\xi,n}\le1$ for all $n\in\mathbb{Z}_{\ge1}$, we get \[ \mathbb{E}\left[Z_{\xi,\tau_{\beta}(k)}\right]\le\prod_{j=0}^{\left\lceil k/C\right\rceil -2}(1-C^{-1}\Psi(VB^{k-jC-l_{u}}\xi)), \] where $\left\lceil k/C\right\rceil $ is the smallest integer which is at least as large as $k/C$. Let $n\in\mathbb{Z}_{\ge1}$, then by applying the last inequality for $nC<k\le nC+C$ we get, \begin{eqnarray*} \prod_{k=nC+1}^{nC+C}\mathbb{E}\left[Z_{\xi,\tau_{\beta}(k)}\right] & \le & \prod_{k=nC+1}^{nC+C}\prod_{j=0}^{\left\lceil k/C\right\rceil -2}(1-C^{-1}\Psi(VB^{k-jC-l_{u}}\xi))\\
& = & \prod_{k=1}^{C}\prod_{j=0}^{n-1}(1-C^{-1}\Psi(VB^{nC+k-jC-l_{u}}\xi))\\
& = & \prod_{j=C+1}^{nC+C}(1-C^{-1}\Psi(VB^{j-l_{u}}\xi))\:. \end{eqnarray*} Hence, by Lemma \ref{lem:from doob's} \[
\epsilon^{C}\le|\widehat{\mu}(\xi)|^{C}\le\prod_{j=C+1}^{nC+C}(1-C^{-1}\Psi(VB^{j-l_{u}}\xi))\:. \] From this and the inequality $1+t\le e^{t}$, \[ \epsilon^{C}\le\exp\left(-C^{-1}\sum_{j=C+1}^{nC+C}\Psi(VB^{j-l_{u}}\xi)\right)\:. \] Since this holds for all $n\in\mathbb{Z}_{\ge1}$, \begin{equation} C^{2}\ln\epsilon^{-1}\ge\sum_{j=C+1}^{\infty}\Psi(VB^{j}\xi)\:.\label{eq:ub sum u} \end{equation}
Set $\delta:=p_{u}/(p_{u}+p_{u'})$ and $\delta':=p_{u'}/(p_{u}+p_{u'})$. By Taylor's theorem, given $0\le s\le1/8$ there exists $0\le t\le2\pi s$ so that \[ \cos(2\pi s)-1=-\frac{\cos(t)}{2}(2\pi s)^{2}\le-\frac{\cos(\pi/4)}{2}(2\pi s)^{2}\le-s^{2}\:. \] Hence, \begin{eqnarray*}
|\delta e^{2\pi is}+\delta'|^{2} & = & (\delta\cos(2\pi s)+\delta')^{2}+\delta^{2}\sin^{2}(2\pi s)\\
& = & 1+2\delta\delta'(\cos(2\pi s)-1)\le1-2\delta\delta's^{2}, \end{eqnarray*} and so, \[
1-|\delta e^{2\pi is}+\delta'|\ge1-(1-2\delta\delta's^{2})^{1/2}\ge\delta\delta's^{2}\:. \] It follows that if $y\in\mathbb{R}^{d}$ satisfies $\left\Vert \left\langle y,\varphi_{u}(0)-\varphi_{u'}(0)\right\rangle \right\Vert \le1/8$, then \begin{eqnarray}
\Psi(2\pi y) & = & 1-\left|\delta\exp\left(2\pi i\left\Vert \left\langle y,\varphi_{u}(0)-\varphi_{u'}(0)\right\rangle \right\Vert \right)+\delta'\right|\nonumber \\
& \ge & \delta\delta'\left\Vert \left\langle y,\varphi_{u}(0)-\varphi_{u'}(0)\right\rangle \right\Vert ^{2}\:.\label{eq:if <=00003D1/8} \end{eqnarray} Additionally, for $1/8<s\le1/2$ we have \begin{eqnarray*}
|\delta e^{2\pi is}+\delta'|^{2} & = & 1+2\delta\delta'(\cos(2\pi s)-1)\\
& \le & 1+2\delta\delta'(\cos(\pi/4)-1)\le1-\delta\delta'/2, \end{eqnarray*} and so, \[
1-|\delta e^{2\pi is}+\delta'|\ge1-(1-\delta\delta'/2)^{1/2}\ge\delta\delta'/4\:. \] It follows that if $y\in\mathbb{R}^{d}$ satisfies $\left\Vert \left\langle y,\varphi_{u}(0)-\varphi_{u'}(0)\right\rangle \right\Vert >1/8$, then \begin{eqnarray}
\Psi(2\pi y) & = & 1-\left|\delta\exp\left(2\pi i\left\Vert \left\langle y,\varphi_{u}(0)-\varphi_{u'}(0)\right\rangle \right\Vert \right)+\delta'\right|\nonumber \\
& \ge & \delta\delta'/4>\frac{1}{4}\delta\delta'\left\Vert \left\langle y,\varphi_{u}(0)-\varphi_{u'}(0)\right\rangle \right\Vert ^{2}\:.\label{eq:if >1/8} \end{eqnarray} Now recall that $\xi=2\pi\xi_{0}$, then from (\ref{eq:ub sum u}), (\ref{eq:if <=00003D1/8}) and (\ref{eq:if >1/8}) \[ C^{2}\ln\epsilon^{-1}\ge\sum_{j=C+1}^{\infty}\Psi(2\pi VB^{j}\xi_{0})\ge\frac{1}{4}\delta\delta'\sum_{j=C+1}^{\infty}\left\Vert \left\langle VB^{j}\xi_{0},\varphi_{u}(0)-\varphi_{u'}(0)\right\rangle \right\Vert ^{2}, \] which completes the proof of the proposition. \end{proof}
\subsection{\label{subsec:Proof-of-Proposition disc case}Proof of Proposition \ref{prop:main disc case}}
We continue to assume that $G$ is discrete. In order to apply Proposition \ref{prop:ub on sum of dist to int} we need the following lemma. \begin{lem} \label{lem:good fam of words}Suppose that $a_{1}=0$. Then there exists $\mathcal{W}\subset\Lambda^{*}$, $L\in\mathbb{Z}_{\ge1}$ and $\{u_{j}\}_{i=1}^{\ell},\{u_{j}'\}_{i=1}^{\ell}\subset\mathcal{W}$ so that, \begin{enumerate} \item $\mathcal{W}$ is a minimal cut-set for $\Lambda^{*}$; \item $G$ is generated by $\{g_{w}\}_{w\in\mathcal{W}}$; \item $g_{u_{j}}=g_{u_{j}'}$ for $1\le j\le\ell$; \item $\varphi_{u_{j}}(0)-\varphi_{u_{j}'}(0)=a_{j}-A^{L}a_{j}$ for $1\le j\le\ell$; \item $VA^{L}=A^{L}V$ for $V\in N_{0}$. \end{enumerate} \end{lem}
\begin{proof} For every $k\ge1$ we have $h^{kl_{1}}g_{1}^{-k}\in N$. Since $N$ is finite there exist $k_{1}>k_{2}\ge1$ with $h^{k_{1}l_{1}}g_{1}^{-k_{1}}=h^{k_{2}l_{1}}g_{1}^{-k_{2}}$, and so $g_{1}^{k_{1}-k_{2}}=h^{(k_{1}-k_{2})l_{1}}$. For every $g,g'\in G$ it holds that $[g,g']\in N$, where $[g,g']$ is the commutator of $g$ and $g'$. Since $N$ is finite there exist $m_{1}>m_{2}\ge1$ so that $[g^{m_{1}},g']=[g^{m_{2}},g']$, and so $g^{m_{1}-m_{2}}g'=g'g^{m_{1}-m_{2}}$. It follows that there exists $b\in\mathbb{Z}_{>1}$ so that $g_{1}^{b}=h^{bl_{1}}$, $g_{1}^{b}g_{j}=g_{j}g_{1}^{b}$ for $1\le j\le\ell$, $g_{\ell}^{b}g_{1}=g_{1}g_{\ell}^{b}$, and $h^{b}n=nh^{b}$ for $n\in N$. We set $L:=bl_{1}$.
Recall that $h=(\beta,U)$. For $V\in N_{0}$ we have $(0,V)\in N$, thus \[ (b\beta,VU^{b})=(0,V)h^{b}=h^{b}(0,V)=(b\beta,U^{b}V), \] and so $VU^{b}=U^{b}V$. Since $A=2^{-\beta}U$ this implies that $VA^{L}=A^{L}V$, and so the fifth condition in the statement of the lemma is satisfied.
For $m\ge1$ denote the set of $m$-words over $\Lambda$ by $\Lambda^{m}$. For $1\le j\le\ell$ we write $j^{m}$ for the word $i_{1}...i_{m}\in\Lambda^{m}$ with $i_{k}=j$ for $1\le k\le m$. Given $m_{1},m_{2}\ge1$, $w_{1}\in\Lambda^{m_{1}}$ and $w_{2}\in\Lambda^{m_{2}}$, we write $w_{1}w_{2}\in\Lambda^{m_{1}+m_{2}}$ for the concatenation of $w_{1}$ with $w_{2}$.
Set \[ \mathcal{W}:=(\Lambda^{b+1}\setminus\{\ell^{b}1\})\cup\{\ell^{b}1i\::\:1\le i\le\ell\}\:. \] It is clear that $\mathcal{W}$ is a minimal cut-set for $\Lambda^{*}$. For $1\le j\le\ell$ set $u_{j}:=j1^{b}$ and $u_{j}':=1^{b}j$. Note that since $\Phi$ is affinely irreducible we must have $\ell>1$. From this and $b>1$, it follows that $u_{j},u_{j}'\in\mathcal{W}$.
From $g_{1}^{b}g_{j}=g_{j}g_{1}^{b}$ it follows that the third condition is satisfied. From $g_{1}=(\log r_{1}^{-1},U_{1})$, $h=(\beta,U)$ and $g_{1}^{b}=h^{bl_{1}}$ it follows that, \[ r_{1}^{b}U_{1}^{b}=2^{-\beta bl_{1}}U^{bl_{1}}=A^{L}\:. \] Thus, since $a_{1}=0$ \[ \varphi_{u_{j}}(0)-\varphi_{u_{j}'}(0)=a_{j}-r_{1}^{b}U_{1}^{b}a_{j}=a_{j}-A^{L}a_{j}, \] which shows that the fourth condition is satisfied.
It remains to show that $G$ is generated by $\{g_{w}\}_{w\in\mathcal{W}}$. By definition $G$ is the closed subgroup of $\mathbb{R}\times O(d)$ generated by $\{g_{i}\}_{i=1}^{\ell}$. From this and since $G$ is discrete, it follows that $G$ is generated by $\{g_{i}\}_{i=1}^{\ell}$. Write $G_{1}$ for the group generated by $\{g_{w}\}_{w\in\mathcal{W}}$. For every $1\le i\le\ell$ we have $1\ell^{b},\ell^{b}1i\in\mathcal{W}$. Hence from $g_{\ell}^{b}g_{1}=g_{1}g_{\ell}^{b}$, \[ g_{i}=(g_{1\ell^{b}})^{-1}g_{\ell^{b}1i}\in G_{1}\:. \]
This shows that $G_{1}=G$, which completes the proof of the lemma. \end{proof}
The treatment of the $1$-dimensional case, carried out in \cite{Br} and \cite{VY}, relies on a classical theorem of Pisot (see \cite[Theorem 2.1]{Bu}). In the proof of Proposition \ref{prop:main disc case} we shall need the following extension of this result. It follows directly from \cite[Chapter III, Theorem III]{Pi} together with \cite[Theorem 1]{Ko}. A result similar to \cite[Theorem 1]{Ko} was obtained in \cite[Lemma 2]{Ma}. \begin{thm} \label{thm:gen of pisot}Let $k\ge1$ and $\theta_{1},...,\theta_{k},\lambda_{1},...,\lambda_{k}\in\mathbb{C}$
be with $|\theta_{j}|>1$ and $\lambda_{j}\ne0$ for $1\le j\le k$, and $\theta_{j}\ne\theta_{i}$ for $1\le j<i\le k$. For $n\ge0$ set $\eta_{n}=\sum_{j=1}^{k}\lambda_{j}\theta_{j}^{n}$, and suppose that $\eta_{n}\in\mathbb{R}$ for all $n\ge0$. Moreover assume that $\sum_{n\ge0}\Vert\eta_{n}\Vert^{2}<\infty$. Then, \begin{enumerate} \item $\{\theta_{1},...,\theta_{k}\}$ is a P.V. $k$-tuple; \item $\lambda_{j}\in\mathbb{Q}(\theta_{j})$ for each $1\le j\le k$; \item if $1\le j,i\le k$ are such that $\theta_{j}$ and $\theta_{i}$ are conjugates over $\mathbb{Q}$ and $\sigma:\mathbb{Q}(\theta_{j})\rightarrow\mathbb{Q}(\theta_{i})$ is an isomorphism with $\sigma(\theta_{j})=\theta_{i}$, then $\sigma(\lambda_{j})=\lambda_{i}$. \end{enumerate} \end{thm}
\begin{proof}[Proof of Proposition \ref{prop:main disc case}] Recall that $A=2^{-\beta}U$ and $B=A^{*}$, where $U\in O(d)$. Let $\theta_{1},...,\theta_{s}\in\mathbb{C}$ be the distinct eigenvalues of $A^{-1}$. For $1\le j\le s$ let $\mathbb{V}_{j}\subset\mathbb{C}^{d}$ be the eigenspace of $A^{-1}$ corresponding to $\theta_{j}$. Since $B^{-1}=(A^{-1})^{*}$, the numbers $\theta_{1},...,\theta_{s}$ are also the distinct eigenvalues of $B^{-1}$, and $\mathbb{V}_{j}$ is the eigenspace of $B^{-1}$ corresponding to $\overline{\theta_{j}}$ for each $1\le j\le s$.
Assume that there exists a probability vector $p=(p_{i})_{i=1}^{\ell}>0$ so that the self-similar measure $\mu$ corresponding to $\Phi$ and $p$ is non-Rajchman. There exist $\epsilon>0$ and $\xi_{1},\xi_{2},...\in\mathbb{R}^{d}$
so that $|\xi_{k}|\ge1$ and $|\widehat{\mu}(2\pi\xi_{k})|\ge\epsilon$
for all $k\ge1$, and also $|\xi_{k}|\overset{k}{\rightarrow}\infty$. For $k\ge1$ set \[
n_{k}:=\min\{n\ge1\::\:|B^{n}\xi_{k}|\le1\}, \]
then $2^{-\beta}\le|B^{n_{k}}\xi_{k}|\le1$. Thus, by moving to a subsequence without changing the notation, we may assume that there exists $0\ne\xi\in\mathbb{R}^{d}$ so that $B^{n_{k}}\xi_{k}\overset{k}{\rightarrow}\xi$.
Recall that we assume $a_{1}=0$, and let $\mathcal{W}$, $L$, $\{u_{i}\}_{i=1}^{\ell}$ and $\{u_{i}'\}_{i=1}^{\ell}$ be as obtained in Lemma \ref{lem:good fam of words}. Let $C>1$ be large with respect to $\epsilon$, $\mathcal{W}$, $\Phi$ and $p$. For $1\le i\le\ell$ and $V\in N_{0}$ we have $g_{u_{i}}=g_{u_{i}'}$ and, \[ V(\varphi_{u_{i}}(0)-\varphi_{u_{i}'}(0))=V(a_{i}-A^{L}a_{i})=(I-A^{L})Va_{i}, \] where $I$ is the identity operator here. Set $b_{i,V}:=(I-A^{L})Va_{i}$, then by Proposition \ref{prop:ub on sum of dist to int} it follows that for all $k\ge1$, \[ C\ge\sum_{n\ge0}\Vert\left\langle V(\varphi_{u_{i}}(0)-\varphi_{u_{i}'}(0)),B^{n}\xi_{k}\right\rangle \Vert^{2}=\sum_{n\ge-n_{k}}\Vert\left\langle b_{i,V},B^{n}B^{n_{k}}\xi_{k}\right\rangle \Vert^{2}\:. \]
From $|\xi_{k}|\overset{k}{\rightarrow}\infty$ it follows that $n_{k}\overset{k}{\rightarrow}\infty$. Thus, for every fixed $T\ge1$ and $k\ge1$ large enough with respect to $T$, \[ \sum_{n=0}^{T}\Vert\left\langle b_{i,V},B^{-n}B^{n_{k}}\xi_{k}\right\rangle \Vert^{2}\le C\:. \] From this and since $B^{n_{k}}\xi_{k}\overset{k}{\rightarrow}\xi$, \[ \sum_{n=0}^{T}\Vert\left\langle b_{i,V},B^{-n}\xi\right\rangle \Vert^{2}\le C\:. \] Hence, since this holds for every $T\ge1$, \begin{equation} \sum_{n=0}^{\infty}\Vert\left\langle b_{i,V},B^{-n}\xi\right\rangle \Vert^{2}<\infty\text{ for all }1\le i\le\ell\text{ and }V\in N_{0}\:.\label{eq:< infinity} \end{equation}
Recall that for a linear subspace $\mathbb{V}$ of $\mathbb{C}^{d}$ we denote by $\pi_{\mathbb{V}}$ the orthogonal projection onto $\mathbb{V}$. For $1\le j\le s$ set \[ \zeta_{j}:=(1-\overline{\theta_{j}^{-L}})\pi_{\mathbb{V}_{j}}\xi, \] where we consider $\xi$ as a vector in $\mathbb{C}^{d}$ here. Let $\lambda_{j}:\mathbb{R}^{d}\rightarrow\mathbb{C}$ be with $\lambda_{j}(x)=\left\langle x,\zeta_{j}\right\rangle $ for $x\in\mathbb{R}^{d}$. Regarding $\mathbb{C}$ as a $2$-dimensional vector space over $\mathbb{R}$, the maps $\lambda_{1},...,\lambda_{s}$ are $\mathbb{R}$-linear. Additionally, for $1\le i\le\ell$, $V\in N_{0}$ and $n\ge0$ \begin{eqnarray} \left\langle b_{i,V},B^{-n}\xi\right\rangle & = & \left\langle (I-A^{L})Va_{i},\sum_{j=1}^{s}\overline{\theta_{j}^{n}}\pi_{\mathbb{V}_{j}}\xi\right\rangle \nonumber \\
& = & \sum_{j=1}^{s}\theta_{j}^{n}\left\langle Va_{i},(I-B^{L})\pi_{\mathbb{V}_{j}}\xi\right\rangle =\sum_{j=1}^{s}\theta_{j}^{n}\lambda_{j}(Va_{i}),\label{eq:fin exp dir prod} \end{eqnarray} which in particular implies that $\sum_{j=1}^{s}\theta_{j}^{n}\lambda_{j}(Va_{i})\in\mathbb{R}$. From (\ref{eq:< infinity}) and (\ref{eq:fin exp dir prod}), \begin{equation} \sum_{n=0}^{\infty}\:\Bigl\Vert\sum_{j=1}^{s}\theta_{j}^{n}\lambda_{j}(Va_{i})\Bigr\Vert^{2}<\infty\text{ for all }1\le i\le\ell\text{ and }V\in N_{0}\:.\label{eq:comb of facts} \end{equation}
For every $1\le j\le s$ we have $|\theta_{j}|=2^{\beta}>1$. From this and since $\xi\ne0$, we get that there exists $1\le j_{0}\le s$ so that $\zeta_{j_{0}}\ne0$. Hence $\lambda_{j_{0}}$ is not identically $0$, and so $\ker\lambda_{j_{0}}$ is a proper subspace of $\mathbb{R}^{d}$. Let us show that, \begin{equation} \lambda_{j_{0}}(Va_{i})\ne0\text{ for some }1\le i\le\ell\text{ and }V\in N_{0}\:.\label{eq:func not all 0} \end{equation} Assume by contradiction that this is false. Then, \[ \{a_{i}\}_{i=1}^{\ell}\subset\cap_{V\in N_{0}}V(\ker\lambda_{j_{0}})=:\mathbb{W}\:. \] For $x\in\mathbb{R}^{d}$, \[ \lambda_{j_{0}}(A^{-1}x)=\left\langle A^{-1}x,\zeta_{j_{0}}\right\rangle =\left\langle x,B^{-1}\zeta_{j_{0}}\right\rangle =\theta_{j_{0}}\lambda_{j_{0}}(x), \] from which it follows that $A(\ker\lambda_{j_{0}})=\ker\lambda_{j_{0}}$. Moreover, from $N\triangleleft G$ it follows that $AN_{0}=N_{0}A$, which implies \[ A(\mathbb{W})=\cap_{V\in N_{0}}VA(\ker\lambda_{j_{0}})=\mathbb{W}\:. \] Since $N_{0}$ is a group, we also have $V(\mathbb{W})=\mathbb{W}$ for all $V\in N_{0}$. By (\ref{eq:rep of each g}), for every $1\le i\le\ell$ there exists $V_{i}\in N_{0}$ so that $\varphi_{i}(x)=V_{i}A^{l_{i}}x+a_{i}$. From all of this it follows that $\varphi_{i}(\mathbb{W})=\mathbb{W}$ for all $1\le i\le\ell$. Since $\mathbb{W}\subset\ker\lambda_{j_{0}}$ and since $\ker\lambda_{j_{0}}$ is a proper subspace of $\mathbb{R}^{d}$, this contradicts the affine irreducibility of $\Phi$, which shows that (\ref{eq:func not all 0}) must hold. For $1\le i\le\ell$ and $V\in N_{0}$ set, \[ J_{i,V}:=\{1\le j\le s\::\:\lambda_{j}(Va_{i})\ne0\}\:. \] From (\ref{eq:func not all 0}) it follows that $J_{i,V}\ne\emptyset$ for some $1\le i\le\ell$ and $V\in N_{0}$.
For $1\le i\le\ell$ and $V\in N_{0}$ it follows from (\ref{eq:comb of facts}) and Theorem \ref{thm:gen of pisot} that, \begin{enumerate}
\item $\{\theta_{j}\}_{j\in J_{i,V}}$ is a P.V. $|J_{i,V}|$-tuple or $J_{i,V}=\emptyset$; \item $\lambda_{j}(Va_{i})\in\mathbb{Q}(\theta_{j})$ for $1\le j\le s$; \item $\sigma(\lambda_{j_{1}}(Va_{i}))=\lambda_{j_{2}}(Va_{i})$ for every $j_{1},j_{2}\in J_{i,V}$ and isomorphism $\sigma:\mathbb{Q}(\theta_{j_{1}})\rightarrow\mathbb{Q}(\theta_{j_{2}})$ with $\sigma(\theta_{j_{1}})=\theta_{j_{2}}$ (if such a $\sigma$ exists). \end{enumerate}
Let $1\le i_{0}\le\ell$ and $V_{0}\in N_{0}$ be with $J_{i_{0},V_{0}}\ne\emptyset$, so that $\{\theta_{j}\}_{j\in J_{i_{0},V_{0}}}$ is a P.V. $|J_{i_{0},V_{0}}|$-tuple. By the definition of a P.V. tuple, there exists a nonempty subset
$J$ of $J_{i_{0},V_{0}}$ so that $\{\theta_{j}\}_{j\in J}$ is a P.V. $|J|$-tuple and $\theta_{j_{1}},\theta_{j_{2}}$ are conjugates over $\mathbb{Q}$ for all $j_{1},j_{2}\in J$. For $j\in J$ we have, \[ \left\langle V_{0}a_{i_{0}},\zeta_{j}\right\rangle =\lambda_{j}(V_{0}a_{i_{0}})\ne0, \]
and so $\zeta_{j}\ne0$. Recall that $\zeta_{j}:=(1-\overline{\theta_{j}^{-L}})\pi_{\mathbb{V}_{j}}\xi$, which implies $A^{-1}\zeta_{j}=\theta_{j}\zeta_{j}$ for $j\in J$. It remains to construct to polynomials $P_{i,V}$.
Let $1\le i\le\ell$ and $V\in N_{0}$ be given. Since $\{\theta_{j}\}_{j\in J}$
are algebraic conjugates, since $|\theta_{j}|>1$ for $j\in J$, and since $\{\theta_{j}\}_{j\in J_{i,V}}$ is either empty or a P.V. tuple, it follows that $J\cap J_{i,V}=\emptyset$ or $J\subset J_{i,V}$. If $J\cap J_{i,V}=\emptyset$ we set $P_{i,V}(X):=0$. For $j\in J$ we have $j\notin J_{i,V}$, and so \[ \left\langle Va_{i},\zeta_{j}\right\rangle =\lambda_{j}(Va_{i})=0=P_{i,V}(\theta_{j})\:. \] Next suppose that $J\subset J_{i,V}$, and let $j_{1}\in J$. Since $\theta_{j_{1}}$ is algebraic and from $\lambda_{j_{1}}(Va_{i})\in\mathbb{Q}(\theta_{j_{1}})$, it follows that there exists $P_{i,V}(X)\in\mathbb{Q}[X]$ so that $\lambda_{j_{1}}(Va_{i})=P_{i,V}(\theta_{j_{1}})$. Let $j\in J$, then $\theta_{j_{1}}$ and $\theta_{j}$ are conjugates over $\mathbb{Q}$, and so there exists an isomorphism $\sigma:\mathbb{Q}(\theta_{j_{1}})\rightarrow\mathbb{Q}(\theta_{j})$ with $\sigma(\theta_{j_{1}})=\theta_{j}$. From this and $j_{1},j\in J\subset J_{i,V}$ we get, \[ \left\langle Va_{i},\zeta_{j}\right\rangle =\lambda_{j}(Va_{i})=\sigma(\lambda_{j_{1}}(Va_{i}))=\sigma(P_{i,V}(\theta_{j_{1}}))=P_{i,V}(\sigma(\theta_{j_{1}}))=P_{i,V}(\theta_{j}), \] and so $P_{i,V}$ satisfies the required property. This completes the proof of the proposition. \end{proof}
\subsection{\label{subsec:Construction-of-non-Rajchman}Construction of non-Rajchman self-similar measures}
The purpose of this subsection is to prove the following converse to Proposition \ref{prop:main disc case}. \begin{prop} \label{prop:conv disc case}Suppose that $G$ is discrete, and let $A$ and $N_{0}$ be as defined before the statement of Proposition \ref{prop:main disc case}. Assume that there exist $k\ge1$, $\theta_{1},...,\theta_{k}\in\mathbb{C}$ and $\zeta_{1},...,\zeta_{k}\in\mathbb{C}^{d}\setminus\{0\}$, so that \begin{enumerate} \item $\{\theta_{1},...,\theta_{k}\}$ is a P.V. $k$-tuple; \item $A^{-1}\zeta_{j}=\theta_{j}\zeta_{j}$ for $1\le j\le k$; \item for every $1\le i\le\ell$ and $V\in N_{0}$ there exists $P_{i,V}\in\mathbb{Q}[X]$ so that $\left\langle Va_{i},\zeta_{j}\right\rangle =P_{i,V}(\theta_{j})$ for all $1\le j\le k$; \end{enumerate} Then there exists a positive probability vector $p=(p_{i})_{i=1}^{\ell}$ so that the self-similar measure corresponding to $\Phi$ and $p$ is non-Rajchman. \end{prop}
The proof of the proposition relies on the following lemma. A version of it can be found in \cite[Theorem 3.5]{Ca}, but we provide the short proof for the reader's convenience. \begin{lem} \label{lem:exp decay}Let $\{\theta_{1},...,\theta_{k}\}$ be a P.V. $k$-tuple and let $P\in\mathbb{Z}[X]$. Then there exist $C>1$ and $0<\delta<1$ such that, \[ \Vert P(\theta_{1})\theta_{1}^{n}+...+P(\theta_{k})\theta_{k}^{n}\Vert\le C\delta^{n}\text{ for all }n\ge0\;. \] \end{lem}
\begin{proof} Let $Q\in\mathbb{Z}[X]$ be the monic polynomial of smallest degree with $Q(\theta_{j})=0$ for $1\le j\le k$. Let $\theta_{k+1},...,\theta_{s}$ be the remaining roots of $Q$. Set \[
\delta:=\underset{k<j\le s}{\max}\:|\theta_{j}|\text{ and }C:=\sum_{j=k+1}^{s}|P(\theta_{j})|, \] then $0<\delta<1$ since $\{\theta_{1},...,\theta_{k}\}$ is a P.V. $k$-tuple. Since $\theta_{1},...,\theta_{s}$ are all the roots of $Q$, and by the fundamental theorem of symmetric polynomials, it follows that for all $n\ge0$ \[ P(\theta_{1})\theta_{1}^{n}+...+P(\theta_{s})\theta_{s}^{n}\in\mathbb{Z}\:. \] Hence, \[
\Vert\sum_{j=1}^{k}P(\theta_{j})\theta_{j}^{n}\Vert\le\sum_{j=k+1}^{s}|P(\theta_{j})\theta_{j}^{n}|\le C\delta^{n}, \] which completes the proof of the lemma. \end{proof}
The following lemma is a consequence of the affine irreducibility of $\Phi$. For $(z_{1},...,z_{d})=z\in\mathbb{C}^{d}$ we write $\overline{z}$ in place of $(\overline{z_{1}},...,\overline{z_{d}})$. \begin{lem} \label{lem:zeta equal}Assume the conditions of Proposition \ref{prop:conv disc case} are satisfied. Let $1\le j_{1},j_{2}\le k$ be with $\theta_{j_{2}}=\overline{\theta_{j_{1}}}$, then $\zeta_{j_{2}}=\overline{\zeta_{j_{1}}}$. \end{lem}
\begin{proof} The proof is similar to the argument used in the proof of Proposition \ref{prop:main disc case} to establish (\ref{eq:func not all 0}). Set, \[ \mathbb{V}:=\left\{ x\in\mathbb{R}^{d}\::\:\left\langle x,\zeta_{j_{2}}-\overline{\zeta_{j_{1}}}\right\rangle =0\right\} \text{ and }\mathbb{W}:=\cap_{V\in N_{0}}V(\mathbb{V})\:. \] For $1\le i\le\ell$ and $V\in N_{0}$, \[ \left\langle Va_{i},\zeta_{j_{2}}-\overline{\zeta_{j_{1}}}\right\rangle =P_{i,V}(\theta_{j_{2}})-\overline{P_{i,V}(\theta_{j_{1}})}=0, \] and so $a_{i}\in\mathbb{W}$. For $x\in\mathbb{V}$, \[ \left\langle A^{-1}x,\zeta_{j_{2}}-\overline{\zeta_{j_{1}}}\right\rangle =\left\langle x,B^{-1}\zeta_{j_{2}}-\overline{B^{-1}\zeta_{j_{1}}}\right\rangle =\theta_{j_{2}}\left\langle x,\zeta_{j_{2}}-\overline{\zeta_{j_{1}}}\right\rangle =0, \] and so $A(\mathbb{V})=\mathbb{V}$. Moreover, since $AN_{0}=N_{0}A$, \[ A(\mathbb{W})=\cap_{V\in N_{0}}VA(\mathbb{V})=\mathbb{W}\:. \] Since $N_{0}$ is a group, we also have $V(\mathbb{W})=\mathbb{W}$ for all $V\in N_{0}$. By (\ref{eq:rep of each g}), for every $1\le i\le\ell$ there exists $V_{i}\in N_{0}$ so that $\varphi_{i}(x)=V_{i}A^{l_{i}}x+a_{i}$. From all of this it follows that $\varphi_{i}(\mathbb{W})=\mathbb{W}$ for all $1\le i\le\ell$. Since $\Phi$ is affinely irreducible and $\mathbb{W}\subset\mathbb{V}$, we must have $\mathbb{V}=\mathbb{R}^{d}$. This implies that $\zeta_{j_{2}}=\overline{\zeta_{j_{1}}}$, which completes the proof of the lemma. \end{proof}
The following lemma will enable us to assume that $a_{1}=0$, which will be useful in the proof of Proposition \ref{prop:conv disc case}. \begin{lem} \label{lem:red to a1=00003D0}Assume the conditions of Proposition \ref{prop:conv disc case} are satisfied. Suppose also that $\theta_{1},...,\theta_{k}$ are all conjugates over $\mathbb{Q}$. For $x\in\mathbb{R}^{d}$ set $Tx=x-(I-r_{1}U_{1})^{-1}a_{1}$, where $I$ is the identity operator. Then $T\circ\varphi_{1}\circ T^{-1}(0)=0$, and for every $1\le i\le\ell$ and $V\in N_{0}$ there exists $Q_{i,V}\in\mathbb{Q}[X]$ so that \[ \left\langle VT\circ\varphi_{i}\circ T^{-1}(0),\zeta_{j}\right\rangle =Q_{i,V}(\theta_{j})\text{ for all }1\le j\le k\:. \] \end{lem}
\begin{proof} For $1\le i\le\ell$ we have, \begin{equation} T\circ\varphi_{i}\circ T^{-1}(0)=a_{i}-(I-r_{i}U_{i})(I-r_{1}U_{1})^{-1}a_{1},\label{eq:conj by tran} \end{equation} which shows that $T\circ\varphi_{1}\circ T^{-1}(0)=0$.
Let $V\in N_{0}$. By (\ref{eq:rep of riUi}), since $BN_{0}B^{-1}=N_{0}$ and since $N_{0}$ is finite, there exists $m\ge1$ so that $r_{1}^{m}U_{1}^{-m}=B^{ml_{1}}$ and $B^{m}V=VB^{m}$. Additionally, for every $b\in\mathbb{Z}_{\ge0}$ there exists $V_{b}\in N_{0}$ so that $r_{1}^{b}U_{1}^{-b}V=V_{b}B^{bl_{1}}$. Set $S:=\sum_{b=0}^{m-1}r_{1}^{b}U_{1}^{-b}$, then for $1\le j\le k$, \begin{equation} \left\langle a_{1},SV\zeta_{j}\right\rangle =\sum_{b=0}^{m-1}\left\langle a_{1},V_{b}B^{bl_{1}}\zeta_{j}\right\rangle =\sum_{b=0}^{m-1}\theta_{j}^{-bl_{1}}P_{1,V_{b}^{-1}}(\theta_{j})\:.\label{eq:first dev} \end{equation} On the other hand, since \[ (I-r_{1}U_{1}^{-1})SV\zeta_{j}=(I-r_{1}^{m}U_{1}^{-m})V\zeta_{j}=V(I-B^{ml_{1}})\zeta_{j}=(1-\overline{\theta_{j}^{-ml_{1}}})V\zeta_{j}, \] we have \begin{eqnarray*} \left\langle a_{1},SV\zeta_{j}\right\rangle & = & \left\langle (I-r_{1}U_{1})^{-1}a_{1},(I-r_{1}U_{1}^{-1})SV\zeta_{j}\right\rangle \\
& = & (1-\theta_{j}^{-ml_{1}})\left\langle V^{-1}(I-r_{1}U_{1})^{-1}a_{1},\zeta_{j}\right\rangle \:. \end{eqnarray*} From this and (\ref{eq:first dev}) we get, \[ \left\langle V^{-1}(I-r_{1}U_{1})^{-1}a_{1},\zeta_{j}\right\rangle =(1-\theta_{j}^{-ml_{1}})^{-1}\sum_{b=0}^{m-1}\theta_{j}^{-bl_{1}}P_{1,V_{b}^{-1}}(\theta_{j})\in\mathbb{Q}(\theta_{j})\:. \] Since $\theta_{1},...,\theta_{k}$ are algebraic conjugates, it follows that for every $V\in N_{0}$ there exists $Q_{V}\in\mathbb{Q}[X]$ so that \begin{equation} \left\langle V(I-r_{1}U_{1})^{-1}a_{1},\zeta_{j}\right\rangle =Q_{V}(\theta_{j})\text{ for }1\le j\le k\:.\label{eq:exi Q} \end{equation}
Fix $1\le i\le\ell$ and $V\in N_{0}$. There exists $V'\in N_{0}$ so that $r_{i}VU_{i}=A^{l_{i}}V'$. Hence for $1\le j\le k$, \[ \left\langle r_{i}VU_{i}(I-r_{1}U_{1})^{-1}a_{1},\zeta_{j}\right\rangle =\left\langle V'(I-r_{1}U_{1})^{-1}a_{1},B^{l_{i}}\zeta_{j}\right\rangle =\theta_{j}^{-l_{i}}Q_{V'}(\theta_{j})\:. \] It follows that there exists $R_{i,V}\in\mathbb{Q}[X]$ so that, \[ \left\langle r_{i}VU_{i}(I-r_{1}U_{1})^{-1}a_{1},\zeta_{j}\right\rangle =R_{i,V}(\theta_{j})\text{ for }1\le j\le k\:. \] From this, (\ref{eq:exi Q}) and (\ref{eq:conj by tran}), we get that for $1\le j\le k$ \[ \left\langle VT\circ\varphi_{i}\circ T^{-1}(0),\zeta_{j}\right\rangle =P_{i,V}(\theta_{j})-Q_{V}(\theta_{j})+R_{i,V}(\theta_{j}), \] which completes the proof of the lemma. \end{proof}
\begin{proof}[Proof of Proposition \ref{prop:conv disc case}] There exists $\emptyset\ne J\subset\{1,...,k\}$ so that $\{\theta_{j}\}_{j\in J}$
is a P.V. $|J|$-tuple, and such that $\theta_{j_{1}}$ and $\theta_{j_{2}}$ are conjugates over $\mathbb{Q}$ for all $j_{1},j_{2}\in J$. Thus, be replacing $\{\theta_{j}\}_{j=1}^{k}$ with $\{\theta_{j}\}_{j\in J}$ and $\{\zeta_{j}\}_{j=1}^{k}$ with $\{\zeta_{j}\}_{j\in J}$, without changing the notation, we may assume that $\theta_{1},...,\theta_{k}$ are all conjugates over $\mathbb{Q}$.
Let $T:\mathbb{R}^{d}\rightarrow\mathbb{R}^{d}$ be as in Lemma \ref{lem:red to a1=00003D0}. By that lemma $T\circ\varphi_{1}\circ T^{-1}(0)=0$, and there exists $M\in\mathbb{Z}_{\ge1}$ so that for every $1\le i\le\ell$ and $V\in N_{0}$ there exists $Q_{i,V}\in\mathbb{Z}[X]$ such that, \[ \left\langle VT\circ\varphi_{i}\circ T^{-1}(0),M\zeta_{j}\right\rangle =Q_{i,V}(\theta_{j})\text{ for all }1\le j\le k\:. \] Set $\Phi'=\{T\circ\varphi_{i}\circ T^{-1}\}_{i=1}^{\ell}$, and note that $\Phi'$ is affinely irreducible (since $\Phi$ is), and that the linear parts of the maps in $\Phi'$ are equal to the linear parts of the maps is $\Phi$. Additionally, observe that if $p=(p_{i})_{i=1}^{\ell}$ is a probability vector and $\mu$ is the self-similar measure corresponding to $\Phi$ and $p$, then $T\mu$ is the self-similar measure corresponding to $\Phi'$ and $p$. Moreover, it is clear that $\mu$ is Rajchman if and only if $T\mu$ is Rajchman. From all of this it follows that by replacing $\Phi$ with $\Phi'$, $\{\zeta_{j}\}_{j=1}^{k}$ with $\{M\zeta_{j}\}_{j=1}^{k}$ and $\{P_{i,V}\}$ with $\{Q_{i,V}\}$, without changing the notation, we may assume that $a_{1}=0$ and $P_{i,V}\in\mathbb{Z}[X]$ for all $1\le i\le\ell$ and $V\in N_{0}$.
By Lemma \ref{lem:exp decay}, since $\{P_{i,V}\}\subset\mathbb{Z}[X]$ and since $N_{0}$ is finite, there exists $C>1$ and $0<\delta<1$ so that for all $1\le i\le\ell$, $V\in N_{0}$ and $b\in\mathbb{Z}_{\ge0}$, \begin{equation} \Vert\sum_{j=1}^{k}\theta_{j}^{b}\left\langle Va_{i},\zeta_{j}\right\rangle \Vert=\Vert\sum_{j=1}^{k}\theta_{j}^{b}P_{i,V}(\theta_{j})\Vert\le C\delta^{b}\:.\label{eq:exp decay} \end{equation}
Set $\xi=\sum_{j=1}^{k}\zeta_{j}$. Since $\zeta_{1},...,\zeta_{k}$ are eigenvectors of $A^{-1}$ corresponding to distinct eigenvalues, they are independent. In particular $\xi\ne0$, and $\zeta_{j_{1}}\ne\zeta_{j_{2}}$ for $1\le j_{1}<j_{2}\le k$. Since $\{\theta_{1},...,\theta_{k}\}$ is a P.V. $k$-tuple, for every $1\le j_{1}\le k$ there exists $1\le j_{2}\le k$ with $\theta_{j_{2}}=\overline{\theta_{j_{1}}}$. By Lemma \ref{lem:zeta equal} this implies $\zeta_{j_{2}}=\overline{\zeta_{j_{1}}}$, which shows that $\xi\in\mathbb{R}^{d}$. From (\ref{eq:exp decay}) it follows that for all $1\le i\le\ell$, $V\in N_{0}$ and $b\in\mathbb{Z}_{\ge0}$, \begin{equation} \Vert\left\langle Va_{i},B^{-b}\xi\right\rangle \Vert=\Vert\sum_{j=1}^{k}\theta_{j}^{b}\left\langle Va_{i},\zeta_{j}\right\rangle \Vert\le C\delta^{b}\:.\label{eq:exp decay in prod} \end{equation}
Set, \[ \Delta:=\{(p_{1},...,p_{\ell})\in[0,1]^{\ell}\::\:p_{1}+...+p_{\ell}=1\}\:. \] For $(p_{i})_{i=1}^{\ell}=p\in\Delta$ let $\mu_{p}$ be the self-similar measure corresponding to $\Phi$ and $p$. Additionally, set \[ q_{p}:=\sum_{i=1}^{\ell}p_{i}\delta_{g_{i}}\in\mathcal{M}(G), \] let $X_{p,1},X_{p,2},...$ be i.i.d. $G$-valued random elements with distribution $q_{p}$, and write $\lambda_{p}:=\mathbb{E}[\psi X_{p,1}]$. For $g\in G$ set \[ \rho_{p}(g)=\lambda_{p}^{-1}\mathbb{P}\{\psi X_{p,1}>\psi g\ge0\}, \] and write $\nu_{p}$ in place of $\rho_{p}\:d\mathbf{m}_{G}$. Note that $\nu_{p}\in\mathcal{M}(G)$.
Let $m\ge1$ be large with respect to $\delta$ and $C$. Let $f:\Delta\rightarrow\mathbb{C}$ be such that, \[ f(p)=\int\widehat{\mu_{p}}(2\pi(B^{-m}\xi).g)\:d\nu_{p}(g)\;\text{ for }p\in\Delta\:. \] Let $(1,0,...,0)=:e_{1}\in\Delta$, then $\mu_{e_{1}}$ is unique member of $\mathcal{M}(\mathbb{R}^{d})$ which satisfies $\mu_{e_{1}}=\varphi_{1}\mu_{e_{1}}$. Since $a_{1}=0$, this relation is also satisfied by $\delta_{0}$, where $\delta_{0}$ is the Dirac mass centred at $0$. This implies that $\mu_{e_{1}}=\delta_{0}$, and so $f(e_{1})=1$. It is easy to see that $f$ is continuous, and so there exists $(p_{1},...,p_{\ell})=p\in\Delta$
with $|f(p)|\ge1/2$ and $p_{i}>0$ for $1\le i\le\ell$. Fix this $p$ until the end of the proof. We shall show that $\mu_{p}$ is non-Rajchman. Since $p$ is positive, this will complete the proof of the proposition.
Let $n\ge1$ be large with respect to $m$ and $p$. Set, \[ \mathcal{W}_{n}:=\{i_{1},...,i_{s}\in\Lambda^{*}\::\:\psi(g_{i_{1}...i_{s}})\ge\beta n>\psi(g_{i_{1}...i_{s-1}})\}\:. \] As noted in the beginning of the present section, by Lemma \ref{lem:proper homo from R} we may assume that $\gamma_{\beta}=h$. Thus, for $y\in\mathbb{R}^{d}$ \[ B^{-1}y=2^{\beta}Uy=y.h^{-1}=y.\gamma_{-\beta}\:. \] Additionally, \[ r_{w}U_{w}^{-1}y=y.g_{w}\text{ for }w\in\Lambda^{*}\text{ and }y\in\mathbb{R}^{d}\:. \] Hence, since $\mathcal{W}_{n}$ is a minimal cut-set for $\Lambda^{*}$, \begin{eqnarray} \widehat{\mu_{p}}(2\pi B^{-m-n}\xi) & = & \sum_{w\in\mathcal{W}_{n}}p_{w}\int e^{2\pi i\left\langle B^{-m-n}\xi,\varphi_{w}(x)\right\rangle }\:d\mu_{p}(x)\nonumber \\
& = & \sum_{w\in\mathcal{W}_{n}}p_{w}e^{2\pi i\left\langle B^{-m-n}\xi,\varphi_{w}(0)\right\rangle }\widehat{\mu_{p}}(2\pi(B^{-m}\xi).(\gamma_{-n\beta}g_{w}))\:.\label{eq:dev of fur} \end{eqnarray}
Let, \[ f_{n}(p):=\sum_{w\in\mathcal{W}_{n}}p_{w}\widehat{\mu_{p}}(2\pi(B^{-m}\xi).(\gamma_{-n\beta}g_{w}))\:. \] For $s\in\mathbb{Z}_{\ge1}$ set $Y_{s}:=X_{p,1}\cdot...\cdot X_{p,s}$, and let \[ \tau_{\beta}(n):=\inf\{s\in\mathbb{Z}_{\ge1}\::\:\psi Y_{s}\ge\beta n\}\:. \] Observe that, \[ f_{n}(p)=\mathbb{E}\left[\widehat{\mu_{p}}(2\pi(B^{-m}\xi).(\gamma_{-n\beta}Y_{\tau_{\beta}(n)}))\right]\:. \] Additionally, since $\mathrm{supp}(q_{p})=\{g_{i}\}_{i=1}^{\ell}$ and since $G$ is generated by $\{g_{i}\}_{i=1}^{\ell}$, the measure $q_{p}$ is adapted. Thus, by Proposition \ref{prop:conv in dist}
and by taking $n$ to be large enough with respect to $m$ and $p$, we may assume that $|f_{n}(p)|\ge|f(p)|-\frac{1}{4}\ge\frac{1}{4}$.
By (\ref{eq:rep of riUi}) it follows that for every $1\le i\le\ell$ there exists $V_{i}\in N_{0}$ so that $r_{i}U_{i}=A^{l_{i}}V_{i}$. Hence for $i_{1}...i_{s}=w\in\mathcal{W}_{n}$, \[ \varphi_{w}(0)=\sum_{j=1}^{s}r_{i_{1}...i_{j-1}}U_{i_{1}...i_{j-1}}a_{i_{j}}=\sum_{j=1}^{s}A^{l_{i_{1}}}V_{i_{1}}...A^{l_{i_{j-1}}}V_{i_{j-1}}a_{i_{j}}\:. \] From $N_{0}A=N_{0}A$ it follows that there exist $V_{w,1},...,V_{w,s}\in N_{0}$ so that \[ \varphi_{w}(0)=\sum_{j=1}^{s}A^{\sigma_{w,j}}V_{w,j}a_{i_{j}}, \] where $\sigma_{w,j}:=l_{i_{1}}+...+l_{i_{j-1}}$ for $1\le j\le s$. From (\ref{eq:exp decay in prod}) we now get that for all $b\in\mathbb{Z}_{\ge\sigma_{w,s}}$, \begin{eqnarray*} \Vert\left\langle B^{-b}\xi,\varphi_{w}(0)\right\rangle \Vert & \le & \sum_{j=1}^{s}\Vert\left\langle B^{\sigma_{w,j}-b}\xi,V_{w,j}a_{i_{j}}\right\rangle \Vert\\
& \le & C\sum_{j=1}^{s}\delta^{b-\sigma_{w,j}}\le C\sum_{j=b-\sigma_{w,s}}^{\infty}\delta^{j}=\frac{C}{1-\delta}\delta^{b-\sigma_{w,s}}\:. \end{eqnarray*} Additionally, since $w\in\mathcal{W}_{n}$ \[ \beta n>\psi(g_{i_{1}...i_{s-1}})=\sum_{j=1}^{s-1}\psi(g_{i_{j}})=\beta\sigma_{w,s}, \] which implies, \[ \Vert\left\langle B^{-m-n}\xi,\varphi_{w}(0)\right\rangle \Vert\le\frac{C\delta^{m}}{1-\delta}\:. \] Hence, \[
\left|1-e^{2\pi i\left\langle B^{-m-n}\xi,\varphi_{w}(0)\right\rangle }\right|\le\frac{2\pi C\delta^{m}}{1-\delta}\text{ for }w\in\mathcal{W}_{n}\:. \] Now from this, from (\ref{eq:dev of fur}) and by assuming that $m$ is large enough with respect to $\delta$ and $C$, \[
|\widehat{\mu_{p}}(2\pi B^{-m-n}\xi)-f_{n}(p)|\le\sum_{w\in\mathcal{W}_{n}}p_{w}\left|1-e^{2\pi i\left\langle B^{-m-n}\xi,\varphi_{w}(0)\right\rangle }\right|\le\frac{1}{8}\:. \]
Since $|f_{n}(p)|\ge1/4$, it follows that $|\widehat{\mu_{p}}(2\pi B^{-m-n}\xi)|\ge1/8$. Note that this inequality holds for all sufficiently large $n\ge1$. Since $\xi\ne0$, this shows that $\mu_{p}$ is not a Rajchman measure, which completes the proof of the proposition. \end{proof}
\section{\label{sec:Proof-of-the main}Proof of the main result}
In this section we prove Theorem \ref{thm:main}, which we now restate. As always, recall that $\Phi=\{\varphi_{i}(x)=r_{i}U_{i}x+a_{i}\}_{i=1}^{\ell}$ is an affinely irreducible self-similar IFS on $\mathbb{R}^{d}$ \begin{thm*} There exists a probability vector $p=(p_{i})_{i=1}^{\ell}>0$ such that the self-similar measure corresponding to $\Phi$ and $p$ is non-Rajchman if and only if there exists a linear subspace $\mathbb{V}\subset\mathbb{R}^{d}$, with $d':=\dim\mathbb{V}>0$ and $U_{i}(\mathbb{V})=\mathbb{V}$ for $1\le i\le\ell$, and an isometry $S:\mathbb{V}\rightarrow\mathbb{R}^{d'}$ so that the following conditions are satisfied. \begin{enumerate} \item \label{enu:first cond}For $1\le i\le\ell$ let $U_{i}'\in O(d')$ and $a_{i}'\in\mathbb{R}^{d'}$ be with $S\circ\pi_{\mathbb{V}}\circ\varphi_{i}\circ S^{-1}(x)=r_{i}U_{i}'x+a_{i}'$. Let $\mathbf{H}\subset GL_{d'}(\mathbb{R})$ be the group generated by $\{r_{i}U_{i}'\}_{i=1}^{\ell}$, and set $\mathbf{N}:=\mathbf{H}\cap O(d')$. Then $\mathbf{N}$ is finite, $\mathbf{N}\triangleleft\mathbf{H}$ and $\mathbf{H}/\mathbf{N}$ is cyclic. \item \label{enu:second cond}For every contracting $A\in\mathbf{H}$ with $\{A^{n}\mathbf{N}\}_{n\in\mathbb{Z}}=\mathbf{H}/\mathbf{N}$, there exist $k\ge1$, $\theta_{1},...,\theta_{k}\in\mathbb{C}$ and $\zeta_{1},...,\zeta_{k}\in\mathbb{C}^{d'}\setminus\{0\}$, so that \begin{enumerate} \item $\{\theta_{1},...,\theta_{k}\}$ is a P.V. $k$-tuple; \item $A^{-1}\zeta_{j}=\theta_{j}\zeta_{j}$ for $1\le j\le k$; \item for every $1\le i\le\ell$ and $V\in\mathbf{N}$ there exists $P_{i,V}\in\mathbb{Q}[X]$ so that $\left\langle Va_{i}',\zeta_{j}\right\rangle =P_{i,V}(\theta_{j})$ for all $1\le j\le k$. \end{enumerate} \end{enumerate} \end{thm*}
We shall need following lemma. \begin{lem} \label{lem:aff irr of tag}Let $\mathbb{V}\subset\mathbb{R}^{d}$ be a linear subspace with $d':=\dim\mathbb{V}>0$ and $U_{i}(\mathbb{V})=\mathbb{V}$ for $1\le i\le\ell$, and let $S:\mathbb{V}\rightarrow\mathbb{R}^{d'}$ be an isometry. For $1\le i\le\ell$ set $\varphi_{i}':=S\circ\pi_{\mathbb{V}}\circ\varphi_{i}\circ S^{-1}$, and write $\Phi'$ for the self-similar IFS $\{\varphi_{i}'\}_{i=1}^{\ell}$. Then $\Phi'$ is affinely irreducible. \end{lem}
\begin{proof} Let $\mathbb{W}$ be an affine subspace of $\mathbb{R}^{d'}$ so that $\varphi_{i}'(\mathbb{W})=\mathbb{W}$ for $1\le i\le\ell$. Set $\mathbb{W}_{0}:=S^{-1}(\mathbb{W})$ and let $1\le i\le\ell$, then $\mathbb{W}_{0}\subset\mathbb{V}$ and $\pi_{\mathbb{V}}\circ\varphi_{i}(\mathbb{W}_{0})=\mathbb{W}_{0}$. From this, $U_{i}(\mathbb{V})=\mathbb{V}$ and $U_{i}(\mathbb{V}^{\perp})=\mathbb{V}^{\perp}$, it follows that for $x\in\mathbb{W}_{0}$ and $y\in\mathbb{V}^{\perp}$ \begin{eqnarray*} \varphi_{i}(x+y) & = & r_{i}U_{i}x+\pi_{\mathbb{V}}a_{i}+r_{i}U_{i}y+\pi_{\mathbb{V}^{\perp}}a_{i}\\
& = & \pi_{\mathbb{V}}\circ\varphi_{i}(x)+\pi_{\mathbb{V}^{\perp}}\circ\varphi_{i}(y)\in\mathbb{W}_{0}+\mathbb{V}^{\perp}, \end{eqnarray*} and so $\varphi_{i}(\mathbb{W}_{0}+\mathbb{V}^{\perp})=\mathbb{W}_{0}+\mathbb{V}^{\perp}$. Since this holds for every $1\le i\le\ell$ and $\Phi$ is affinely irreducible, it follows that $\mathbb{W}_{0}+\mathbb{V}^{\perp}=\mathbb{R}^{d}$. Since $\mathbb{W}_{0}\subset\mathbb{V}$, we must have $\mathbb{W}_{0}=\mathbb{V}$. Hence $\mathbb{W}=\mathbb{R}^{d'}$, which shows that $\Phi'$ is affinely irreducible. \end{proof}
\begin{proof}[Proof of Theorem \ref{thm:main}] Suppose first that there exist a linear subspace $\mathbb{V}$ and an isometry $S$ as in the statement of the theorem. For $1\le i\le\ell$ set $\varphi_{i}':=S\circ\pi_{\mathbb{V}}\circ\varphi_{i}\circ S^{-1}$, and let $\Phi':=\{\varphi_{i}'\}_{i=1}^{\ell}$. For every $1\le i\le\ell$ and $x\in\mathbb{R}^{d'}$ we have $\varphi_{i}'(x)=r_{i}U_{i}'x+a_{i}'$. By Lemma \ref{lem:aff irr of tag} it follows that $\Phi'$ is affinely irreducible.
Let $G'\subset\mathbb{R}\times O(d')$ be the group generated by $\{(\log r_{i}^{-1},U_{i}')\}_{i=1}^{\ell}$. Let $A\in\mathbf{H}$ be contracting and with $\{A^{n}\mathbf{N}\}_{n\in\mathbb{Z}}=\mathbf{H}/\mathbf{N}$, and let $\beta>0$ and $U\in O(d')$ be with $A=2^{-\beta}U$. From $\{A^{n}\mathbf{N}\}_{n\in\mathbb{Z}}=\mathbf{H}/\mathbf{N}$ it follows that, \[ G'=\{(n\beta,U^{n}V)\::\:n\in\mathbb{Z}\text{ and }V\in\mathbf{N}\}\:. \] Since $\mathbf{N}$ is finite, $G'$ is easily seen to be discrete and closed in $\mathbb{R}\times O(d')$. By Proposition \ref{prop:conv disc case} and by condition (\ref{enu:second cond}) in the statement of the theorem, it now follows that there exists a probability vector $p=(p_{i})_{i=1}^{\ell}>0$ so that the self-similar measure $\mu'\in\mathcal{M}(\mathbb{R}^{d'})$ corresponding to $\Phi'$ and $p$ is non-Rajchman.
Let $\mu\in\mathcal{M}(\mathbb{R}^{d})$ be the self-similar measure corresponding to $\Phi$ and $p$. Since for $1\le i\le\ell$ we have $U_{i}(\mathbb{V}^{\perp})=\mathbb{V}^{\perp}$, it follows that for $x\in\mathbb{R}^{d}$ \[ \pi_{\mathbb{V}}\varphi_{i}(x)=\pi_{\mathbb{V}}(r_{i}U_{i}\pi_{\mathbb{V}}x+r_{i}U_{i}\pi_{\mathbb{V}^{\perp}}x)+\pi_{\mathbb{V}}a_{i}=\pi_{\mathbb{V}}\circ\varphi_{i}\circ\pi_{\mathbb{V}}(x)\:. \] From this and by the self-similarity of $\mu$, \[ S\pi_{\mathbb{V}}\mu=\sum_{i=1}^{\ell}p_{i}\cdot S\circ\pi_{\mathbb{V}}\circ\varphi_{i}\circ\pi_{\mathbb{V}}\mu=\sum_{i=1}^{\ell}p_{i}\cdot\varphi_{i}'\circ S\circ\pi_{\mathbb{V}}\mu\:. \] Since $\mu'$ is the unique member of $\mathcal{M}(\mathbb{R}^{d'})$ which satisfies the relation \[ \mu'=\sum_{i=1}^{\ell}p_{i}\cdot\varphi_{i}'\mu', \] it follows that $\mu'=S\pi_{\mathbb{V}}\mu$. From this and since $\mu'$ is non-Rajchman, we get that there exist $\epsilon>0$ and
$\xi_{1},\xi_{2},...\in\mathbb{V}$ so that $|\xi_{n}|\overset{n}{\rightarrow}\infty$
and $|\widehat{\pi_{\mathbb{V}}\mu}(\xi_{n})|>\epsilon$. Since $\widehat{\pi_{\mathbb{V}}\mu}(\xi)=\widehat{\mu}(\xi)$ for $\xi\in\mathbb{V}$, this shows that $\mu$ is also non-Rajchman, which completes the proof of the first direction of the theorem.
Suppose next that there exists a probability vector $p=(p_{i})_{i=1}^{\ell}>0$ so that the self-similar measure $\mu$ corresponding to $\Phi$ and $p$ is non-Rajchman. By Proposition \ref{prop:case Psi(G)=00003DR} it follows that $\psi(G)\ne\mathbb{R}$. Recall that $G_{0}$ denotes the connected component of $G$ containing the identity. Let $\mathbb{V}$ be the linear subspace of $\mathbb{R}^{d}$ consisting of all $x\in\mathbb{R}^{d}$ so that $x.g=x$ for all $g\in G_{0}$. By Proposition \ref{prop:cont case psi(G) not R} and since $\mu$ is non-Rajchman, we have $d':=\dim\mathbb{V}>0$. By Lemma \ref{lem:G inv subspaces}, \begin{equation} U_{i}(\mathbb{V})=\mathbb{V}\text{ and }U_{i}(\mathbb{V}^{\perp})=\mathbb{V}^{\perp}\text{ for all }1\le i\le\ell\:.\label{eq:inv subs} \end{equation}
The map $\pi_{\mathbb{V}}\varphi_{1}|_{\mathbb{V}}$ is a strict contraction of $\mathbb{V}$, and so there exists $y\in\mathbb{V}$ with $\pi_{\mathbb{V}}\varphi_{1}(y)=y$. Let $S:\mathbb{V}\rightarrow\mathbb{R}^{d'}$ be an isometry with $Sy=0$. For $1\le i\le\ell$ set $\varphi_{i}':=S\circ\pi_{\mathbb{V}}\circ\varphi_{i}\circ S^{-1}$, and let $U_{i}'\in O(d')$ and $a_{i}'\in\mathbb{R}^{d'}$ be with $\varphi_{i}'(x)=r_{i}U_{i}'x+a_{i}'$ for $x\in\mathbb{R}^{d'}$. Let $\mathbf{H}$ be the smallest closed subgroup of $GL_{d'}(\mathbb{R})$ containing $\{r_{i}U_{i}'\}_{i=1}^{\ell}$.
Since $S$ is also an affine map, there exists a linear isometry $L:\mathbb{V}\rightarrow\mathbb{R}^{d'}$ so that $Sx=Lx-Ly$ for $x\in\mathbb{V}$. From (\ref{eq:inv subs}) it follows that $L\circ U\circ L^{-1}\in O(d')$ for every $(t,U)\in G$. Note that $U_{i}'=L\circ U_{i}\circ L^{-1}$ for $1\le i\le\ell$. For $(t,U)\in G$ set $F(t,U)=2^{-t}L\circ U\circ L^{-1}$, so that $F:G\rightarrow GL_{d'}(\mathbb{R})$ is a continuous homomorphism. It is easy to verify that $F$ is a proper map, which implies that $F$ is a closed map. From this and since the group generated by $\{r_{i}U_{i}'\}_{i=1}^{\ell}$ is dense in $F(G)$, it follows that $\mathbf{H}=F(G)$ and that $F$ descends to an isomorphism of topological groups from $G/\ker F$ onto $\mathbf{H}$.
Since $\psi(G)\ne\mathbb{R}$, we have $G_{0}\subset\{0\}\times O(d)$. From this and by the definition of $\mathbb{V}$, it follows that $G_{0}\subset\ker F$. Since $G_{0}$ is an open subgroup of $G$, it follows that $\ker F$ is also an open subgroup of $G$. This implies that $G/\ker F$ is discrete, and so that $\mathbf{H}$ is also discrete. From this and by the definition of $\mathbf{H}$, it follows that $\mathbf{H}$ is equal to the group generated by $\{r_{i}U_{i}'\}_{i=1}^{\ell}$ (and not just to the closed subgroup generated by these elements).
Set $\mathbf{N}:=\mathbf{H}\cap O(d')$. Since $\mathbf{N}$ is the kernel of the homomorphism taking $rU\in\mathbf{H}$ to $r$, where $U\in O(d')$ and $r>0$, we have $\mathbf{N}\triangleleft\mathbf{H}$. Since $\mathbf{H}$ is closed in $GL_{d'}(\mathbb{R})$ and $O(d')$ is compact, it follows that $\mathbf{N}$ is compact. From this and since $\mathbf{H}$ is discrete, it follows that $\mathbf{N}$ is finite. Since $\psi(G)\ne\mathbb{R}$ and $\mathbf{H}=F(G)$, there exists $A\in\mathbf{H}$ so that $\Vert A\Vert<1$ and $\Vert A\Vert\ge\Vert B\Vert$ for all $B\in\mathbf{H}$ with $\Vert B\Vert<1$, where $\Vert\cdot\Vert$ is the operator norm here. It is now obvious that $\{A^{n}\mathbf{N}\}_{n\in\mathbb{Z}}=\mathbf{H}/\mathbf{N}$, which shows that condition (\ref{enu:first cond}) in the statement of the theorem is satisfied.
We turn to prove that condition (\ref{enu:second cond}) is also satisfied. First we show that, \begin{equation}
\underset{M\rightarrow\infty}{\lim}\sup\{|\widehat{\pi_{\mathbb{V}}\mu}(\xi)|\::\:\xi\in\mathbb{V}\text{ and }|\xi|\ge M\}>0\:.\label{eq:proj non-raj} \end{equation} Since $\mu$ is non-Rajchman, there exists $\epsilon_{0}>0$ so that \[
\underset{|\xi|\rightarrow\infty}{\limsup}\:|\widehat{\mu}(\xi)|>\epsilon_{0}\:. \] Let $0<\epsilon<1$ be small with respect to $\Phi$, $p$ and $\epsilon_{0}$, let $R>1$ be large with respect to $\epsilon$, and let $\xi\in\mathbb{R}^{d}$
be with $|\xi|>R$ and $|\widehat{\mu}(\xi)|>\epsilon_{0}$. By Proposition \ref{prop:cont case psi(G) not R} we may assume that, \[
|\pi_{\mathbb{V}^{\perp}}\xi|<\max\{\epsilon R/2,\epsilon|\pi_{\mathbb{V}}\xi|\}\:. \]
If $|\pi_{\mathbb{V}^{\perp}}\xi|\ge\epsilon|\pi_{\mathbb{V}}\xi|$
then $|\pi_{\mathbb{V}^{\perp}}\xi|<\epsilon R/2$, and so \[
R<|\pi_{\mathbb{V}}\xi|+|\pi_{\mathbb{V}^{\perp}}\xi|\le(\epsilon^{-1}+1)|\pi_{\mathbb{V}^{\perp}}\xi|<2\epsilon^{-1}(\epsilon R/2)=R, \]
which is not possible. Hence we must have $|\pi_{\mathbb{V}^{\perp}}\xi|<\epsilon|\pi_{\mathbb{V}}\xi|\le\epsilon|\xi|$.
We may assume that $R>\epsilon^{-1/2}$, which gives $|\xi|^{-1}\epsilon^{-1/2}<1$. Set, \[
\mathcal{W}=\{i_{1}...i_{n}\in\Lambda^{*}\::\:r_{i_{1}...i_{n}}\le|\xi|^{-1}\epsilon^{-1/2}<r_{i_{1}...i_{n-1}}\}\:. \] Since $\mathcal{W}$ is a minimal cut-set, \[
\epsilon_{0}<|\widehat{\mu}(\xi)|=\left|\sum_{w\in\mathcal{W}}p_{w}\int e^{i\left\langle \xi,\varphi_{w}(x)\right\rangle }\:d\mu(x)\right|\le\sum_{w\in\mathcal{W}}p_{w}\left|\int e^{i\left\langle r_{w}U_{w}^{-1}\xi,x\right\rangle }\:d\mu(x)\right|, \]
and so there exists $w\in\mathcal{W}$ with $|\widehat{\mu}(r_{w}U_{w}^{-1}\xi)|>\epsilon_{0}$. By (\ref{eq:inv subs}) and the definition of $\mathcal{W}$, \[
|r_{w}U_{w}^{-1}\xi-\pi_{\mathbb{V}}(r_{w}U_{w}^{-1}\xi)|=|\pi_{\mathbb{V}^{\perp}}(r_{w}U_{w}^{-1}\xi)|=r_{w}|\pi_{\mathbb{V}^{\perp}}\xi|\le r_{w}\epsilon|\xi|\le\epsilon^{1/2}\:. \] Since $\mu$ is compactly supported, the map which takes $\eta\in\mathbb{R}^{d}$ to $\widehat{\mu}(\eta)$ is uniformly continuous. Hence, by assuming that $\epsilon$ is sufficiently small with respect to $\Phi$, $p$ and $\epsilon_{0}$, we get \begin{equation}
|\widehat{\pi_{\mathbb{V}}\mu}(r_{w}\pi_{\mathbb{V}}U_{w}^{-1}\xi)|=|\widehat{\mu}(r_{w}\pi_{\mathbb{V}}U_{w}^{-1}\xi)|\ge|\widehat{\mu}(r_{w}U_{w}^{-1}\xi)|-\epsilon_{0}/2>\epsilon_{0}/2\:.\label{eq:lb fur of proj} \end{equation}
Set $r_{\mathrm{min}}:=\min_{1\le i\le\ell}\:r_{i}$, then by the definition of $\mathcal{W}$ we have $r_{w}>r_{\mathrm{min}}|\xi|^{-1}\epsilon^{-1/2}$. Thus, \[
|r_{w}\pi_{\mathbb{V}}U_{w}^{-1}\xi|\ge r_{w}|\xi|-|\pi_{\mathbb{V}^{\perp}}(r_{w}U_{w}^{-1}\xi)|>r_{\mathrm{min}}\epsilon^{-1/2}-\epsilon^{1/2}\:. \] Since $\epsilon$ can be chosen to be arbitrarily small, the last expression can be made arbitrarily large (while keeping $\epsilon_{0}$ fixed). This together with (\ref{eq:lb fur of proj}) gives (\ref{eq:proj non-raj}).
Set $\Phi':=\{\varphi_{i}'\}_{i=1}^{\ell}$, where recall that $\varphi_{i}':=S\circ\pi_{\mathbb{V}}\circ\varphi_{i}\circ S^{-1}$ for $1\le i\le\ell$. By Lemma \ref{lem:aff irr of tag} it follows that $\Phi'$ is affinely irreducible. Since $\pi_{\mathbb{V}}\varphi_{1}(y)=y$ and $Sy=0$, we have $a_{1}'=\varphi_{1}'(0)=0$. As in the proof of the first direction of the theorem, it holds that $S\pi_{\mathbb{V}}\mu$ is the self-similar measure corresponding to $\Phi'$ and $p$. From (\ref{eq:proj non-raj}) it clearly follows that $S\pi_{\mathbb{V}}\mu$ is non-Rajchman. Since the closed subgroup generated by $\{r_{i}U_{i}'\}_{i=1}^{\ell}$ is discrete, it follows that all of the assumptions in Proposition \ref{prop:main disc case} are satisfied for the IFS $\Phi'$. This implies that condition (\ref{enu:second cond}) in the statement of the theorem holds, which completes the proof. \end{proof}
\section*{\textbf{Acknowledgment}}
This research was supported by the Herchel Smith Fund at the University of Cambridge. I would like to thank Han Yu for helpful discussions during the preparation of this work. I would also like to thank Amir Algom for helpful remarks.
$\newline$$\newline$\textsc{Centre for Mathematical Sciences,\newline Wilberforce Road, Cambridge CB3 0WA, UK}$\newline$$\newline$\textit{E-mail: } \texttt{ariel.rapaport2@gmail.com}
\end{document} |
\begin{document}
\baselineskip3.15ex \vskip .3truecm
\maketitle { \small \noindent $^1$ Centre de Math\'ematiques Appliqu\'ees, \'Ecole Polytechnique, 91128 Palaiseau, France.\\ Email: gregoire.allaire@polytechnique.fr\\ \noindent $^2$ Centre de Math\'ematiques Appliqu\'ees, \'Ecole Polytechnique, 91128 Palaiseau, France.\\ Email: mariapia.palombaro@polytechnique.fr } \begin{abstract} \small{ We study the homogenization of a Schr\"{o}dinger equation in a locally periodic medium. For the time and space scaling of semi-classical analysis we consider well-prepared initial data that are concentrated near a stationary point (with respect to both space and phase) of the energy, i.e. the Bloch cell eigenvalue. We show that there exists a localized solution which is asymptotically given as the product of a Bloch wave and of the solution of an homogenized Schr\"{o}dinger equation with quadratic potential.
\vskip.3truecm \noindent {\bf Key words:}
Homogenization, localization, Bloch waves, Schr\"{o}dinger. \vskip.2truecm \noindent {\bf 2000 Mathematics Subject Classification:} 35B27, 35J10. } \end{abstract}
\section{introduction}
\noindent We study the homogenization of the following Schr\"{o}dinger equation
\begin{equation}\label{start} \left\{ \begin{array}{ll} \displaystyle \frac{i}{\varepsilon}\frac{\partial u_{\varepsilon}}{\partial t} - \hbox{{\rm div}}\left(A\left(x,\frac{x}{\varepsilon}\right)\nabla u_{\varepsilon}\right) + \frac{1}{\varepsilon^{2}} c\left(x,\frac{x}{\varepsilon}\right) u_{\varepsilon}=0 & \mbox{ in } {\mathbb R}^{N}\times {\mathbb R}^+\\ [3mm] u_{\varepsilon}(0,x)=u^{0}_{\varepsilon}(x) \hspace{5.1cm} & \mbox{ in } {\mathbb R}^{N}\\ \end{array} \right. \end{equation}
\noindent where the unknown $u_{\varepsilon}(t,x)$ is a complex-valued function. The coefficients $A(x,y)$ and $c(x,y)$ are real and sufficiently smooth bounded functions defined for $x\in {\mathbb R}^{N}$ (the macroscopic variable) and $y\in {\mathbb T}^{N}$ (the microscopic variable in the unit torus). The period $\varepsilon$ is a small positive parameter which is intended to go to zero. Furthermore the matrix $A$ is symmetric, uniformly positive definite. Of course the usual Schr\"{o}dinger equation is recovered when $A\equiv Id$ but, since there is no additional difficulty, we keep the general form of equation (\ref{start}) in the sequel (which can be interpreted as introducing a non flat locally periodic metric). \par The scaling of (\ref{start}) is that of semi-classical analysis (see e.g. \cite{blp}, \cite{buslaev}, \cite{guillot}, \cite{gerard}, \cite{gerard2}, \cite{sjostrand}, \cite{guillot2}, \cite{pst}, \cite{pr}): if the period is rescaled to 1, it amounts to look at large, time and space, variables of order $\varepsilon^{-1}$. At least in the case when $A\equiv Id$ and $c(x,y)=c_0(x)+c_1(y)$, there is a well-known theory for the asymptotic limit of (\ref{start}) when $\varepsilon$ goes to zero. By using WKB asymptotic expansion or the notion of semi-classical measures (or Wigner transforms) the homogenized problem is in some sense the Liouville transport equation for a classical particle which is the limit of the wave function $u_\varepsilon$. In other words, for an initial data living in the $n$-th Bloch band and under some technical assumptions on the Bloch spectral cell problem (\ref{celleq}), the semi-classical limit of (\ref{start}) is given by the dynamic of the following Hamiltonian system in the phase space $(x,\theta)\in{\mathbb R}^N\times{\mathbb T}^N$ \begin{equation} \label{hamilton} \left\{ \begin{array}{l} \dot x = \nabla_\theta \lambda_n(x,\theta) \\ \dot \theta = - \nabla_x \lambda_n(x,\theta) \end{array} \right. \end{equation} where the Hamiltonian $\lambda_n(x,\theta)$ is precisely the $n$-th Bloch eigenvalue of (\ref{celleq}) (see \cite{buslaev}, \cite{guillot}, \cite{gerard}, \cite{gerard2}, \cite{sjostrand}, \cite{guillot2}, \cite{pst}, \cite{pr} for more details). \par Our approach to (\ref{start}) is different since we consider special initial data that are monochromatic, have zero group velocity and zero applied force. Namely the initial data is concentrating at a point $(x^n,\theta^n)$ of the phase space where $\nabla_\theta \lambda_n(x^n,\theta^n)=\nabla_x \lambda_n(x^n,\theta^n)=0$. In such a case, the previous Hamiltonian system (\ref{hamilton}) degenerates (its solution is constant) and is unable to describe the precise dynamic of the wave function $u_\varepsilon$. We exhibit another limit problem which is again a Schr\"{o}dinger equation with quadratic potential. In other words we build a sequence of approximate solutions of (\ref{start}) which are the product of a Bloch wave and of the solution of an homogenized Schr\"{o}dinger equation. Furthermore, if the full Hessian tensor of the Bloch eigenvalue $\lambda_n(x,\theta)$ is positive definite at $(x^n,\theta^n)$, we prove that all the eigenfunctions of an homogenized Schr\"{o}dinger equation are exponentially decreasing at infinity. In other words, we exhibit a localization phenomenon for (\ref{start}) since we build a sequence of approximate solutions that decay exponentially fast away from $x^n$. The root of this localization phenomenon is the macroscopic modulation (i.e. with respect to $x$) of the periodic coefficients which is similar in spirit to the randomness that causes Anderson's localization (see \cite{cl} and references therein). \par Let us describe more precisely the type of well-prepared initial data that we consider. For a given point $(x^n,\theta^n)\in{\mathbb R}^N\times{\mathbb T}^N$ and a given function $v^{0}\in H^{1}({\mathbb R}^{N})$ we take \begin{equation}\label{wp} u_{\varepsilon}^{0}(x)=\psi_{n}\Big(x^{n},\frac{x}{\varepsilon},\theta^{n}\Big) e^{2i\pi\frac{\theta^{n}\cdot x}{\varepsilon}} v^{0}\Big(\frac{x-x^n}{\sqrt{\varepsilon}}\Big) \end{equation} where $\psi_{n}(x,y,\theta)$ is a so-called Bloch eigenfunction, solution of the following Bloch spectral cell equation
\begin{equation}\label{celleq} - (\hbox{{\rm div}}_{y} + 2i\pi\theta)(A(x,y)(\nabla_{y} + 2i\pi\theta)\psi_{n}) + c(x,y) = \lambda_{n}(x,\theta)\psi_{n} \hspace{1cm} \mbox{ in }{\mathbb T}^{N}\,, \end{equation}
\noindent corresponding to the $n$-th eigenvalue or energy level $\lambda_{n}$. The Bloch wave $\psi_{n}$ is periodic with respect to $y$ but $v^0$ is not periodic, so $v^{0}\Big(\frac{x-x^n}{\sqrt{\varepsilon}}\Big)$ means that the initial data is concentrated around $x^n$ with a support of asymptotic size $\sqrt\varepsilon$. The Bloch frequency $\theta^n\in{\mathbb T}^{N}$, the localization point $x^n\in{\mathbb R}^{N}$ and the energy level $n$ are chosen such that $\lambda_{n}(x^n,\theta^n)$ is simple and $\nabla_x\lambda_{n}(x^n,\theta^n) = \nabla_\theta\lambda_{n}(x^n,\theta^n) =0$.
Our main result (Theorem \ref{mainth}) shows that the solution of \eqref{start} is approximately given by \begin{equation}\label{asymptot} u_{\varepsilon}(t,x)\approx \psi_{n}\Big(x^{n},\frac{x}{\varepsilon},\theta^{n}\Big) e^{i\frac{\lambda_{n}(x^n,\theta^n) t}{\varepsilon}}e^{2i\pi\frac{\theta^{n}\cdot x}{\varepsilon}} v\Big(t,\frac{x-x^n}{\sqrt{\varepsilon}}\Big)\,, \end{equation} where $v$ is the unique solution of the homogenized Schr\"odinger equation
\begin{equation}\label{hom0} \left\{ \begin{array}{ll} \displaystyle i \frac{\partial v}{\partial t} - \hbox{{\rm div}}\left(A^{*}\nabla v \right) + \hbox{{\rm div}}(v B^{*} z) + c^{*} v + v D^{*} z \cdot z = 0 & \mbox{ in } {\mathbb R}^{N}\times {\mathbb R}^+\\ [3mm] v (0,z)=v^{0}(z) \hspace{5.1cm} & \mbox{ in } {\mathbb R}^{N}\\ \end{array} \right. \end{equation}
\noindent where $c^*$ is a constant coefficient and $A^*,B^*,D^*$ are constant matrices defined by $$ A^{*}=\frac{1}{8\pi^{2}}\nabla_{\theta}\nabla_{\theta}\lambda_{n}(x^n,\theta^n) \,, \ B^{*}=\frac{1}{2i\pi}\nabla_{\theta}\nabla_{x}\lambda_{n}(x^n,\theta^n) \,, \ D^{*}=\frac{1}{2}\nabla_{x}\nabla_{x}\lambda_{n}(x^n,\theta^n) \,. $$ In Proposition \ref{propoself} we show that the homogenized problem \eqref{hom0} is well-posed since the underlying operator is self-adjoint. Furthermore, under the additional assumption that the Hessian tensor $\nabla\nabla\lambda_{n}(x^n,\theta^n)$ (with respect to both variables $x$ and $\theta$) is positive definite, we prove that \eqref{hom0} admits a countable number of eigenvalues and eigenfunctions which all decay exponentially at infinity (see Proposition \ref{localization}). In such a case, formula \eqref{asymptot} defines a family of approximate (exponentially) localized solutions of \eqref{start}. \par Let us indicate that the case of the first eigenvalue (ground state) $n=1$ with $\theta^1=0$ was already studied in \cite{alpiat1} (for the spectral problem rather than the evolution equation). The case of purely periodic coefficients (i.e. that depend only on $y$ and not on $x$) is completely different and was studied in \cite{alpiat2}. Indeed, in this latter case there is no localization effect and one proves that, for a longer time scale (of order $\varepsilon^{-1}$ with respect to \eqref{start}), the homogenized limit is again a Schr\"{o}dinger equation without the drift and quadratic potential in \eqref{hom0}.
\section{Preliminaries} \noindent In the present section we give our main assumptions, set some notation and a few preliminary results needed in the proof of the main results in Section \ref{mainsec}.
We first assume that the coefficients $A_{ij}(x,y)$ and $c(x,y)$ are real, bounded, and Carath\'eodory functions (measurable with respect to $y$ and continuous in $x$), which are periodic with respect to $y$. In other words, they belong to $C_b\left({\mathbb R}^N;L^\infty({\mathbb T}^N)\right)$. Furthermore, the tensor $A(x,y)$ is symmetric uniformly coercive. Under these assumptions, it is well-known that, for any values of the parameters ${\theta\in{\mathbb T}^{N}}$ and ${x\in{\mathbb R}^{N}}$, the cell problem \eqref{celleq} defines a compact self-adjoint operator on $L^{2}({\mathbb T}^{N})$ which admits a countable sequence of real increasing eigenvalues ${\displaystyle \{\lambda_{n}(x,\theta)\}_{n\geq 1}}$ (repeated with their multiplicity) with corresponding eigenfunctions ${\displaystyle \{\psi_{n}(x,\theta,y)\}_{n\geq 1}}$ normalized by $$
||\psi_{n}(x,\theta,\cdot)||_{L^{2}({\mathbb T}^{N})} =1\,. $$
Our main assumptions are:
\noindent {\bf Hypothesis H1.} There exist $x^{n}\in{\mathbb R}^{N}$ and $\theta^{n}\in{\mathbb T}^{N}$ such that
\begin{equation}\label{assumpt} \begin{cases} &\hspace{-3mm}(i)\: \lambda_{n}(x^{n},\theta^{n}) \text{ is a simple eigenvalue,} \\ &\hspace{-3mm}(ii)\: (x^{n},\theta^{n}) \text{ is a critical point of } \lambda_{n}(x,\theta), i.e. \: \nabla_{x}\lambda_{n}(x^{n},\theta^{n})=\nabla_{\theta}\lambda_{n}(x^{n},\theta^{n})=0. \\ \end{cases} \end{equation}
\noindent
{\bf Hypothesis H2.} The coefficients $A(x,y)$ and $c(x,y)$ are of class $C^{2}$ with respect to the variable $x$ in a neighborhood of $x=x^{n}$.
\noindent Then we set: \begin{equation*} A_{1,h}(y):=\frac{\partial A}{\partial x_{h}}(x^{n},y)\,,\quad A_{2,lh}(y):=\frac{\partial^{2} A}{\partial x_{l}\partial x_{h}}(x^{n},y)\,,\quad \text{ for } \: l,h=1,\dots,N\,. \end{equation*} Similar notation is used to denote the derivatives of the function $c$ with respect to the $x$-variable. With an abuse of notation we further set $$ A(y):=A(x^{n},y)\,,\quad \lambda_{n}:=\lambda_{n}(x^{n},\theta^{n})\,,\quad \psi_{n}(y):=\psi_{n}(x^{n},y,\theta^{n})\,, $$ and analogous notation holds for all derivatives of $\psi_{n}$ and $\lambda_{n}$ with respect to the $x$-variable and the $\theta$-variable evaluated at $x=x^{n}$ and $\theta=\theta^{n}$. Without loss of generality we will assume in the sequel that $x^{n}=0$.\\ {\bf Notation.} For any function $\rho(y)$ defined on ${\mathbb T}^{N}$ we set $$ \rho^{\varepsilon}(z):=\rho(z/\sqrt{\varepsilon}) $$ where $z:=\sqrt{\varepsilon}y\equiv x/\sqrt\varepsilon$. In the sequel the symbols $\hbox{{\rm div}}_y$ and $\nabla_y$ will stand for the divergence and gradient operators which act with respect to the $y$-variable while div and $\nabla$ will indicate the divergence and gradient operators which act with respect to the $z$-variable. Finally throughout this paper the Einstein summation convention is used.
Under assumption \eqref{assumpt}-(i) it is a classical matter to prove that the $n$-th eigencouple of \eqref{celleq} is smooth with respect to the variable $\theta$ in a neighborhood of $\theta=\theta^n$ (see \cite{kato}) and has the same differentiability property as the coefficients with respect to the variable $x$. Introducing the unbounded operator ${\mathbb A}_n(x,\theta)$ defined on $L^2({\mathbb T}^N)$ by \begin{equation*} {\mathbb A}_n(x,\theta)\psi = - (\hbox{{\rm div}}_y +2i\pi\theta) \Big( A(x,y) (\nabla_y +2i\pi\theta)\psi \Big) + c(x,y) \psi - \lambda_n(x,\theta) \psi , \end{equation*} it is easy to differentiate \eqref{celleq}. Denoting by $(e_k)_{1\leq k\leq N}$ the canonical basis of ${\mathbb R}^N$, the first derivatives satisfy \begin{equation} \label{deriv1t} \begin{array}{ll} \displaystyle {\mathbb A}_n(x,\theta)\frac{\partial\psi_{n}}{\partial\theta_k} = &\displaystyle 2i\pi e_k A(x,y)(\nabla_y +2i\pi\theta)\psi_{n} \\[0.3cm] &\displaystyle + (\hbox{{\rm div}}_y +2i\pi\theta) \left( A(x,y) 2i\pi e_k \psi_{n} \right) + \frac{\partial\lambda_n}{\partial\theta_k}(x,\theta) \psi_{n} \,, \end{array} \end{equation}
\begin{equation} \label{deriv1x} \begin{array}{ll} \displaystyle {\mathbb A}_n(x,\theta)\frac{\partial\psi_{n}}{\partial x_l} = &\displaystyle (\hbox{{\rm div}}_y +2i\pi\theta) \Big( \frac{\partial A}{\partial x_l}(x,\theta)
(\nabla_y +2i\pi\theta)\psi_{n} \Big) \\[0.3cm] &\displaystyle -\frac{\partial c}{\partial x_l}(x,y)\psi_{n}
+ \frac{\partial \lambda_n}{\partial x_l}(x,\theta)\psi_{n} \,. \end{array} \end{equation}
\noindent Similar formulas hold for second order derivatives. By integrating the cell equations for the second order derivatives against $\psi_{n}$ we obtain the following formulas that will be useful in the sequel (their proofs are safely left to the reader).
\begin{lemma} Assume that assumptions {\bf H1} and {\bf H2} hold true. Then the following equalities hold:
\begin{align}\label{eq4}
& \int_{{\mathbb T}^{N}} \frac{1}{2\pi i}\Big[
A_{1,h}(\nabla_{y}+2i\pi\theta^{n})\frac{\partial \psi_{n}}{\partial \theta_{k}}
\cdot (\nabla_{y} -2i\pi\theta^{n})\bar{\psi}_{n} +
c_{1,h}\,\frac{\partial \psi_{n}}{\partial \theta_{k}} \bar{\psi}_{n}\Big] \: dy \\ \nonumber & +\int_{{\mathbb T}^{N}}\Big[
A_{1,h} e_{k}\psi_{n} \cdot (\nabla_{y}-2i\pi\theta^{n})\bar{\psi}_{n}
+ A e_{k}\frac{\partial \psi_{n}}{\partial x_{h}}\cdot
(\nabla_{y}-2i\pi\theta^{n})\bar{\psi}_{n} \Big]\: dy \\ \nonumber & -\int_{{\mathbb T}^{N}}\Big[
e_{k}\bar{\psi}_{n} A_{1,h}\cdot (\nabla_{y}+2i\pi\theta^{n})\psi_{n}
+ e_{k}\bar{\psi}_{n} A \cdot
(\nabla_{y}+2i\pi\theta^{n})\frac{\partial \psi_{n}}{\partial x_{h}}\Big]\: dy \\ \nonumber & - \frac{1}{2i\pi}\frac{\partial^{2} \lambda_{n}}{\partial x_{h}\partial \theta_{k}} = 0\,, \end{align}
\begin{align}\label{eq5}
& \int_{{\mathbb T}^{N}} \Big[
A_{2,lh}(\nabla_{y}+2i\pi\theta^{n})\psi_{n}\cdot (\nabla_{y} -2i\pi\theta^{n})\bar{\psi}_{n}
+ \Big(c_{2,lh} - \frac{\partial^{2} \lambda_{n}}{\partial x_{l}\partial x_{h}}\Big)
|\psi_{n}|^{2} \Big] \: dy \\ \nonumber & + \int_{{\mathbb T}^{N}} \Big[
A_{1,h}(\nabla_{y}+2i\pi\theta^{n})\frac{\partial \psi_{n}}{\partial x_{l}}
\cdot (\nabla_{y} -2i\pi\theta^{n})\bar{\psi}_{n} +
c_{1,h}\,\frac{\partial \psi_{n}}{\partial x_{l}}\bar{\psi}_{n} \Big]\: dy \\ \nonumber & + \int_{{\mathbb T}^{N}} \Big[
A_{1,l}(\nabla_{y}+2i\pi\theta^{n})\frac{\partial \psi_{n}}{\partial x_{h}}\cdot
(\nabla_{y} -2i\pi\theta^{n})\bar{\psi}_{n}
+ c_{1,l}\,\frac{\partial \psi_{n}}{\partial x_{h}}\bar{\psi}_{n} \Big]
\: dy = 0\,, \end{align}
\begin{align}\label{eq6}
& \int_{{\mathbb T}^{N}} \Big[
2i\pi e_k A(y)
(\nabla_y +2i\pi\theta^{n})\frac{\partial\psi_n}{\partial\theta_l}\bar{\psi}_{n}
- \left( A(y) 2i\pi e_k
\frac{\partial\psi_n}{\partial\theta_l} \right)(\nabla_y -2i\pi\theta^{n})\bar{\psi}_{n}
\Big]\: dy \\ \nonumber & + \int_{{\mathbb T}^{N}} \Big[
2i\pi e_l A(y)
(\nabla_y +2i\pi\theta^{n})\frac{\partial\psi_n}{\partial\theta_k}\bar{\psi}_{n}
-\left( A(y) 2i\pi e_l \frac{\partial\psi_n}{\partial\theta_k} \right)
(\nabla_y -2i\pi\theta^{n})\bar{\psi}_{n} \Big]\: dy \\ \nonumber & -\int_{{\mathbb T}^{N}} \Big[
4\pi^2e_k A(y) e_l |\psi_n|^{2} +4\pi^2e_l A(y) e_k |\psi_n|^{2} \Big]\: dy \\ \nonumber & + \frac{\partial^2\lambda_n}{\partial\theta_l\partial\theta_k}(\theta^{n}) =0\,. \end{align} \end{lemma}
We now give the variational formulations of the above cell problems, rescaled at size $\varepsilon$.
\begin{lemma} Assume that assumptions {\bf H1} and {\bf H2} hold true and let $\varphi(z)$ be a smooth compactly supported function defined from ${\mathbb R}^{N}$ into ${\mathbb C}$. Then the following equalities hold:
\begin{align}\label{eq1}
& \int_{{\mathbb R}^{N}} \Big[ A^{\e}(\nabla_{y}+2i\pi\theta^{n})\psin^{\e}\cdot
(\sqrt{\varepsilon}\nabla-2i\pi\theta^{n})\bar{\varphi}(z) +(c^{\e} - \lan^{\e})\psin^{\e} \bar{\varphi}\Big] \: dz = 0 \,, \end{align}
\begin{align}\label{eq2}
& \int_{{\mathbb R}^{N}}\Big[ A^{\e}(\nabla_{y}+2i\pi\theta^{n})
\frac{\partial \psin^{\e}}{\partial \theta^{n}_{k}}
\cdot (\sqrt{\varepsilon}\nabla -2i\pi\theta^{n})\bar{\varphi} + (c^{\e}- \lan^{\e})
\frac{\partial \psin^{\e}}{\partial \theta^{n}_{k}} \bar{\varphi}\Big] \: dz \\ \nonumber & +\int_{{\mathbb R}^{N}}\Big[ -2\pi i e_{k} \cdot A^{\e}(\nabla_{y}+2i\pi\theta^{n})
\psin^{\e}\bar{\varphi} + A^{\e} \, 2\pi i e_{k} \psin^{\e} \cdot
(\sqrt{\varepsilon}\nabla -2i\pi\theta^{n})\bar{\varphi}\Big] \: dz=0\,, \end{align}
\begin{align}\label{eq3}
& \int_{{\mathbb R}^{N}}\Big[ A^{\e}(\nabla_{y}+2i\pi\theta^{n})
\frac{\partial \psin^{\e}}{\partial x_{h}}\cdot(\sqrt{\varepsilon}\nabla -2i\pi\theta^{n}) \bar{\varphi}
+(c^{\e} - \lan^{\e}) \frac{\partial \psin^{\e}}{\partial x_{h}}\bar{\varphi} \Big] \: dz \\ \nonumber & +\int_{{\mathbb R}^{N}}\Big[
A^{\e}_{1,h}(\nabla_{y}+2i\pi\theta^{n})\psin^{\e} \cdot
(\sqrt{\varepsilon}\nabla -2i\pi\theta^{n})\bar{\varphi} +
c^{\e}_{1,h}\,\psin^{\e} \bar{\varphi}\Big] \: dz =0\,. \end{align}
\end{lemma}
\begin{proof} Formula \eqref{eq1} follows straightforwardly from equation \eqref{celleq} while \eqref{eq2}-\eqref{eq3} are consequences of \eqref{deriv1t}-\eqref{deriv1x}. \end{proof}
Finally we recall the notion of two-scale convergence introduced in \cite{allaire}, \cite{nguetseng} (that will be used with $\delta=\sqrt\varepsilon$).
\begin{proposition} \label{prop2s} Let $f_{\delta}$ be a sequence uniformly bounded in $L^2({\mathbb R}^N)$. \begin{enumerate} \item There exists a subsequence, still denoted by $f_\delta$, and a limit $f_0(x,y) \in L^2({\mathbb R}^N\times{\mathbb T}^N)$ such that $f_\delta$ {\em two-scale converges} weakly to $f_0$ in the sense that $$ \lim_{\delta\to 0} \int_{{\mathbb R}^N} f_\delta(x)\phi(x,x/\delta)\,dx = \int_{{\mathbb R}^N}\int_{{\mathbb T}^N} f_0(x,y)\phi(x,y)\,dx\,dy $$ for all functions $\phi(x,y)\in L^2\left( {\mathbb R}^N ; C({\mathbb T}^N) \right)$.
\item Assume further that $f_\delta$ two-scale converges weakly to $f_0$ and that $$
\lim_{\delta\to 0} \| f_{\delta} \|_{L^2({\mathbb R}^N)} =
\| f_0 \|_{L^2\left({\mathbb R}^N\times{\mathbb T}^N\right)} . $$ Then $f_\delta$ is said to two-scale converge {\em strongly} to its limit $f_0$ in the sense that, if $f_0$ is smooth enough, e.g. $f_0\in L^2\left( {\mathbb R}^N ; C({\mathbb T}^N) \right)$, we have $$
\lim_{\delta\to 0} \int_{{\mathbb R}^N} \left| f_\delta(x)-
f_0\right(x,x/\delta\left) \right|^2 dx = 0. $$
\item Assume that $\delta \nabla f_\delta$ is also uniformly bounded in $L^2({\mathbb R}^N)^N$. Then there exists a subsequence, still denoted by $f_\delta$, and a limit $f_0(x,y) \in L^2({\mathbb R}^N ;H^1({\mathbb T}^N))$ such that $f_\delta$ two-scale converges to $f_0(x,y)$ and $\delta\nabla f_\delta$ two-scale converges to $\nabla_y f_0(x,y)$. \end{enumerate} \end{proposition}
\section{Main results}\label{mainsec} \noindent We begin by recalling the usual a priori estimates for the solution of the Schr\"{o}dinger equation \eqref{start} which hold true since the coefficients are real. They are obtained by multiplying the equation successively by $\overline u_\varepsilon$ and $\frac{\partial \overline u_\varepsilon}{\partial t}$, and integrating by parts.
\begin{lemma}\label{apriori} There exists $C>0$ independent of $\varepsilon$ such that the solution of \eqref{start} satisfies \begin{align*}
& ||u_{\varepsilon}||_{L^{\infty}({\mathbb R}^+;L^{2}({\mathbb R}^{N}))} = ||u_{\varepsilon}^{0}||_{L^{2}({\mathbb R}^{N})}\,,\\
& \varepsilon||\nabla u_{\varepsilon}||_{L^{\infty}({\mathbb R}^+;L^{2}({\mathbb R}^{N}))} \leq C\Big( ||u_{\varepsilon}^{0}||_{L^{2}({\mathbb R}^{N})}+\varepsilon ||\nabla u_{\varepsilon}^{0}||_{L^{2}({\mathbb R}^{N})}\Big)\,. \end{align*} \end{lemma}
\begin{theorem}\label{mainth} Assume that assumptions {\bf H1} and {\bf H2} hold true and that the initial data $u_{\varepsilon}^{0}$ is of the form \eqref{wp}. Then the solution of \eqref{start} can be written as \begin{equation}\label{form} u_{\varepsilon}(t,x)=e^{i\frac{\lambda_{n} t}{\varepsilon}}e^{2i\pi\frac{\theta^{n}\cdot x}{\varepsilon}} v_{\varepsilon}\Big(t,\frac{x-x^n}{\sqrt{\varepsilon}}\Big)\,, \end{equation} where $v_{\varepsilon}(t,z)$ two-scale converges strongly to $\psi_{n}(y)v(t,z)$, i.e. \begin{equation} \label{eq2s.6c}
\lim_{\varepsilon\to0} \int_{{\mathbb R}^N} \left| v_\varepsilon(t,z) -
\psi_{n}\left(\frac{z}{\sqrt\varepsilon}\right) v(t,z) \right|^2 dz = 0 , \end{equation} uniformly on compact time intervals in ${\mathbb R}^+$, and $v$ is the unique solution of the homogenized Schr\"odinger equation
\begin{equation}\label{hom} \left\{ \begin{array}{ll} \displaystyle i \frac{\partial v}{\partial t} - \hbox{{\rm div}}\left(A^{*}\nabla v \right) + \hbox{{\rm div}}(v B^{*} z) + c^{*} v + v D^{*} z \cdot z = 0 & \mbox{ in } {\mathbb R}^{N}\times {\mathbb R}^+\\ [3mm] v (0,z)=v^{0}(z) \hspace{5.1cm} & \mbox{ in } {\mathbb R}^{N}\\ \end{array} \right. \end{equation}
\noindent where $$ A^{*}=\frac{1}{8\pi^{2}}\nabla_{\theta}\nabla_{\theta}\lambda_{n}(x^n,\theta^n) \,, \ B^{*}=\frac{1}{2i\pi}\nabla_{\theta}\nabla_{x}\lambda_{n}(x^n,\theta^n) \,, \ D^{*}=\frac{1}{2}\nabla_{x}\nabla_{x}\lambda_{n}(x^n,\theta^n) \,, $$ and $c^{*}$ is given by \begin{equation*} c^{*}=\int_{{\mathbb T}^{N}}\hspace{-1mm}\Big[
A(\nabla_{y}+2i\pi\theta^{n})\psi_{n}\cdot \frac{\partial \bar{\psi}_{n}}{\partial x_{k}} e_{k}
- A (\nabla_{y}-2i\pi\theta^{n})\frac{\partial \bar{\psi}_{n}}{\partial x_{k}}\cdot \psi_{n}\,e_{k}
- A_{1,k}(\nabla_{y}-2i\pi\theta^{n})\bar{\psi}_{n}\cdot \psi_{n} e_{k} \Big] dy \,. \end{equation*}
\end{theorem}
\begin{remark} {\rm Notice that even if the tensor $A^*$ might be non-coercive, the homogenized problem \eqref{hom} is well posed. Indeed the operator ${\mathbb A}^{*}:L^2({\mathbb R}^N)\to L^2({\mathbb R}^N)$ defined by \begin{equation}\label{herm} {\mathbb A}^* \varphi = - \hbox{{\rm div}}\left(A^{*}\nabla \varphi \right) + \hbox{{\rm div}}(\varphi B^{*} z) + c^{*} \varphi + \varphi D^{*} z \cdot z \end{equation} is self-adjoint (see Proposition \ref{propoself}) and therefore by using semi-group theory (see {\em e.g.} \cite{brezis} or Chapter X in \cite{reedsimon}), one can show that there exists a unique solution in $C({\mathbb R}^+;L^2({\mathbb R}^N))$, although it may not belong to $L^2({\mathbb R}^+;H^1({\mathbb R}^N))$. } \end{remark}
The next result establishes the conservation of the $L^2$-norm for the solution $v$ of the homogenized equation \eqref{hom} and the self-adjointness of the operator ${\mathbb A}^*$.
\begin{proposition}\label{propoself} Let $v\in C({\mathbb R}^+;L^2({\mathbb R}^N))$ be solution to \eqref{hom}. Then \begin{equation}\label{cons}
||v(t,\cdot)||_{L^2({\mathbb R}^N)}=||v^0||_{L^2({\mathbb R}^N)} \quad \forall \,t\in{\mathbb R}^+\,. \end{equation} Moreover the operator ${\mathbb A}^{*}$ defined in \eqref{herm} is self-adjoint. \end{proposition}
\begin{proof} We multiply the equation \eqref{hom} by $\bar{v}$ and take the imaginary part to obtain \begin{equation}\label{impart}
\frac{1}{2}\frac{d}{dt}\int_{{\mathbb R}^N}|v|^2 \, dz =
{\rm Im}\left(\int_{{\mathbb R}^N} v B^*z\cdot \nabla\bar{v}-c^* |v|^2 \, dz \right)\,. \end{equation} After integrating by parts one finds that the right hand side of \eqref{impart} equals $$
-\Big(\frac{1}{2i}{\rm tr}\, B^* + {\rm Im}c^*\Big)\int_{{\mathbb R}^N}|v|^2 \, dz $$ and therefore \eqref{cons} is proved as soon as we show that \begin{equation}\label{contaccio} \frac{1}{2i}{\rm tr}\, B^* + {\rm Im}c^*=0\,. \end{equation} In order to do this we first rewrite the coefficients $c^*$ and $B^*$ in a suitable form. Denoting by $\langle\cdot\,,\cdot\rangle$ the Hermitian inner product in $L^2({\mathbb T}^N)$ and using equation \eqref{deriv1t} we write
\begin{equation}\label{formc} c^*= \frac{1}{2i\pi}\langle {\mathbb A}_n\frac{\partial \psi_{n}}{\partial \theta_k},\frac{\partial \psi_{n}}{\partial x_k}\rangle -\int_{{\mathbb T}^N}A_{1,k}(\nabla_y-2i\pi\theta^n)\bar{\psi}_{n}\cdot\psi_{n} e_k\, dy\,, \end{equation} \noindent while by equations \eqref{deriv1t}-\eqref{eq4} it follows that
\begin{align}\label{formB}
\frac{1}{2i\pi}\frac{\partial^2 \lambda_{n}}{\partial x_h\partial\theta_k}=
& -\frac{1}{2i\pi}\langle\overline{
{\mathbb A}_n\frac{\partial \psi_{n}}{\partial \theta_k},\frac{\partial \psi_{n}}{\partial x_h}}\rangle
-\frac{1}{2i\pi}\langle\overline{
{\mathbb A}_n\frac{\partial \psi_{n}}{\partial x_h},\frac{\partial \psi_{n}}{\partial\theta_k}}\rangle \\ \nonumber & +2i{\rm Im}\int_{{\mathbb T}^N}A_{1,h}(\nabla_y-2i\pi\theta^n)\bar{\psi}_{n}\cdot\psi_{n} e_k\, dy \,. \end{align} \noindent By formulae \eqref{formc}-\eqref{formB} it is readily seen that equality \eqref{contaccio} holds true.
In order to prove the self-adjointness of the operator ${\mathbb A}^*$, one first checks that ${\mathbb A}^*$ is symmetric, which easily follows by \eqref{contaccio} and the fact that ${\displaystyle \overline{B}^*=-B^*}$, and then observes that up to addition of a multiple of the identity the operator ${\mathbb A}^*$ is monotone (see {\em e.g.} \cite{brezis2}, Chapter VII). \end{proof}
In the next proposition we will denote by $\nabla\nabla \lambda_{n}$ the Hessian matrix of the function $\lambda_{n}(x,\theta)$ evaluated at the point $(x^n,\theta^n)$, namely $$ \nabla\nabla \lambda_{n} = \left( \begin{array}{ll} \nabla_{x}\nabla_{x}\lambda_{n} & \nabla_{\theta}\nabla_{x}\lambda_{n} \\ \nabla_{\theta}\nabla_{x}\lambda_{n} & \nabla_{\theta}\nabla_{\theta}\lambda_{n} \end{array} \right) (x^n,\theta^n)\,. $$
\begin{proposition}\label{localization} Assume that the matrix $\nabla\nabla \lambda_{n}$ is positive definite. Then there exists an orthonormal basis ${\displaystyle \{\varphi_n\}_{n\geq 1}}$ of eigenfunctions of ${\mathbb A}^*$; moreover for each $n$ there exists a real constant $\gamma_n>0$ such that \begin{equation}\label{decay}
e^{\gamma_n |z|}\varphi_n\,,\,e^{\gamma_n |z|}\nabla\varphi_n \in L^2({\mathbb R}^N)\,. \end{equation} \end{proposition}
\begin{proof} Up to shifting the spectrum of the operator ${\mathbb A}^{*}$, we may assume that Re$(c^*)=0$. In order to prove the existence of an orthonormal basis of eigenfunctions we introduce the inverse operator of ${\mathbb A}^{*}$, denoted by $G^*$ \begin{align} \nonumber G^{*}: L^2({\mathbb R}^N) & \to L^2({\mathbb R}^N) \\ \nonumber f & \to \varphi \text{ unique solution in } H^1({\mathbb R}^N) \text{ of} \\ \label{inverse} &\quad\quad {\mathbb A}^* \varphi = f \quad\text{ in }{\mathbb R}^N \end{align} and we show that $G^*$ is compact. Indeed multiplication of \eqref{inverse} by $\bar{\varphi}$ yields \begin{equation}\label{compact} \int_{{\mathbb R}^N}[ A^*\nabla\varphi\cdot\nabla\bar{\varphi}-iB^*{\rm Im}(\varphi z\cdot\nabla\bar{\varphi})+
D^*z\cdot z|\varphi|^2 ] \, dz= \int_{{\mathbb R}^N}f\bar{\varphi}\, dz\,. \end{equation} Upon defining the $2N$-dimensional vector-valued function $\Phi$ $$ \Phi:=\left(\hspace{-2mm} \begin{array}{c} 2i\pi z\varphi \\ \nabla\varphi \end{array}\hspace{-2mm} \right) $$ we rewrite \eqref{compact} in agreement with this block notation \begin{equation*} \int_{{\mathbb R}^N}\frac{1}{8\pi^2} \nabla\nabla\lambda_{n} \Phi\cdot\overline\Phi \, dz= \int_{{\mathbb R}^N}f\bar{\varphi}\, dz \,. \end{equation*} By the positivity assumption on the matrix $\nabla\nabla\lambda_{n}$ it follows that there exists a positive constant $c_0$ such that \begin{equation*}
c_0\Big(||\nabla\varphi||_{L^2({\mathbb R}^N)}^2+||z\varphi||_{L^2({\mathbb R}^N)}^2\Big) \leq
||f||_{L^2({\mathbb R}^N)}||\varphi||_{L^2({\mathbb R}^N)}\,, \end{equation*} which implies by a standard argument $$
||\varphi||_{L^2({\mathbb R}^N)}^2 + ||\nabla\varphi||_{L^2({\mathbb R}^N)}^2+||z\varphi||_{L^2({\mathbb R}^N)}^2 \leq C ||f||_{L^2({\mathbb R}^N)}^2, $$ from which we deduce the compactness of $G^*$ in $L^2({\mathbb R}^N)$-strong. Thus there exists an infinite countable number of eigenvalues for ${\mathbb A}^{*}$.
We are left to prove the exponential decay of the eigenfunctions (this fact is quite standard, see {\em e.g.} \cite{alam}). Let $\varphi_n$ be an eigenfunction and let $\sigma_n$ be the associated eigenvalue \begin{equation}\label{eigenfn} {\mathbb A}^* \varphi_n = \sigma_n \varphi_n\,. \end{equation} Let $R_0>0$ and $\rho\in C^\infty({\mathbb R})$ be a real function such that $0\leq\rho\leq 1$, $\rho(s)=0$ for $s\leq R_0$ and $\rho(s)=1$ for $s\geq R_0+1$ and for every positive integer $k$ define $\rho_k\in C^\infty({\mathbb R}^N)$ in the following way $$
\rho_k(z):=\rho(|z|-k). $$ We now multiply \eqref{eigenfn} by $\bar{\varphi}_n\rho_k^2$ to get \begin{equation*} \int_{{\mathbb R}^N}\rho_k^2\left( A^*\nabla\varphi_n\cdot\nabla\bar{\varphi}_n-iB^*{\rm Im}(\varphi_n z\cdot\nabla\bar{\varphi}_n)+
D^*z\cdot z|\varphi_n|^2 -\sigma_n|\varphi_n|^2 \right) dz= \end{equation*} \begin{equation}\label{trucco}
\int_{{\mathbb R}^N}\left(\rho_k|\varphi_n|^2B^*z\cdot\nabla\rho_k - 2\rho_k \,\bar{\varphi}_n A^*\nabla\varphi_n\cdot\nabla\rho_k \right) dz\,. \end{equation} Next remark that since the left hand side of \eqref{trucco} is real the right hand side must be also real and therefore it is equal to \begin{equation}\label{real} \int_{{\mathbb R}^N}- 2\rho_k \,{\rm Re}(\bar{\varphi}_n A^*\nabla\varphi_n)\cdot\nabla\rho_k \, dz\,. \end{equation} Let $B_k$ denote the ball of radius $R_0+k$ and center $z=0$ and observe that the support of $\nabla\rho_k$ is contained in $B_{k+1}\setminus B_k$. Then putting up together \eqref{trucco} and \eqref{real} and using again the positive definiteness of the matrix $\nabla\nabla\lambda_{n}$ we obtain for $R_0$ sufficiently large ($\sqrt{R_0}>\sigma_n$ does the job) \begin{equation*}
||\varphi_n||_{H^1({\mathbb R}^N\setminus B_{k+1})}^2 \leq
c_1\Big(||\varphi_n||^2_{H^1({\mathbb R}^N\setminus B_k)}-||\varphi_n||^2_{H^1({\mathbb R}^N\setminus B_{k+1})}\Big) \end{equation*} where $c_1$ is a positive constant independent of $k$. Thus we deduce that \begin{equation}\label{estim}
||\varphi_n||_{H^1({\mathbb R}^N\setminus B_{k+1})}^2 \leq
\Big(\frac{c_1}{1+c_1}\Big)^k||\varphi_n||^2_{H^1({\mathbb R}^N\setminus B_0)}\,. \end{equation} Upon defining a positive constant $\gamma_0>0$ by $$ \Big(\frac{c_1}{1+c_1}\Big)^k=e^{-2\gamma_0(k+R_0)} $$ it is finally seen that \eqref{estim} implies the estimate \eqref{decay} for any exponent $0<\gamma_n<\gamma_0$. \end{proof}
\noindent {\bf Proof of Theorem \ref{mainth}.} We rescale the space variable by introducing $$ z=\frac{x}{\sqrt{\varepsilon}} \,, $$ and define the sequence $v_{\varepsilon}$ by \begin{equation}\label{vdef} v_{\varepsilon}(t,z):=e^{-i\frac{\lambda_{n} t}{\varepsilon}} e^{-2i\pi\frac{\theta^{n}\cdot x}{\varepsilon}}u_{\varepsilon}(t,x)\,. \end{equation} By the a priori estimates of Lemma \ref{apriori} it follows that $v_{\varepsilon}(t,z)$ satisfies $$
|| v_\varepsilon ||_{L^\infty\left({\mathbb R}^+;L^2({\mathbb R}^N)\right)} +
\sqrt{\varepsilon} || \nabla v_\varepsilon ||_ {L^\infty\left({\mathbb R}^+;L^2({\mathbb R}^N)\right)} \leq C , $$ and applying the compactness of two-scale convergence (see Proposition \ref{prop2s}), up to a subsequence, there exists a limit $v^*(t,z,y)\in L^2\left({\mathbb R}^+\times{\mathbb R}^N;H^1({\mathbb T}^N)\right)$ such that $v_\varepsilon$ and $\sqrt{\varepsilon}\nabla v_\varepsilon$ two-scale converge to $v^*$ and $\nabla_y v^*$, respectively. Similarly, by definition of the initial data, $v_\varepsilon(0,z)$ two-scale converges to $\psi_n (y) v^0(z)$.
\noindent Although $v_\varepsilon$ is the unknown which will pass to the limit in the sequel, it is simpler to write an equation for another function, namely \begin{equation}\label{wdef} w_{\varepsilon}(t,z):=e^{2i\pi\frac{\theta^{n}\cdot z}{\sqrt{\varepsilon}}}v_{\varepsilon}(t,z) = e^{-i\frac{\lambda_{n} t}{\varepsilon}} u_{\varepsilon}(t,x)\,. \end{equation} By \eqref{wdef} it follows that \begin{equation}\label{formula} \nabla w_{\varepsilon}=e^{2i\pi\frac{\theta^{n}\cdot z}{\sqrt{\varepsilon}}} \Big(\nabla +2i\pi\frac{\theta^{n}}{\sqrt{\varepsilon}}\Big)v_{\varepsilon}\,, \end{equation} and it can be checked that the new unknown $w_{\varepsilon}$ solves the following equation \begin{equation}\label{neweq} \left\{ \begin{array}{ll} \displaystyle i \frac{\partial w_{\varepsilon}}{\partial t} - \hbox{{\rm div}} [A\left(\sqrt{\varepsilon}z,z/\sqrt{\varepsilon}\right)\nabla w_{\varepsilon}] + \frac{1}{\varepsilon} [ c(\sqrt{\varepsilon}z, z/\sqrt{\varepsilon}) - \lambda_{n} ] w_{\varepsilon}=0 & \mbox{ in } {\mathbb R}^{N}\times {\mathbb R}^+\\ [3mm] w_{\varepsilon}(0,z)=u^{0}_{\varepsilon}(\sqrt{\varepsilon}z) \hspace{5.1cm} & \mbox{ in } {\mathbb R}^{N}\\ \end{array} \right. \end{equation} \noindent where the differential operators div and $\nabla$ act with respect to the new variable $z$.
\noindent {\bf First step.} We multiply the equation \eqref{neweq} by the complex conjugate of $$ \varepsilon\phi\Big(t,z,\frac{z}{\sqrt{\varepsilon}}\Big) e^{2i\pi\frac{\theta^n\cdot z}{\sqrt{\varepsilon}}} $$ where $\phi(s,z,y)$ is a smooth test function defined on ${\mathbb R}^+\times{\mathbb R}^N\times{\mathbb T}^N$, with compact support in ${\mathbb R}^+\times{\mathbb R}^N$. Since this test function has compact support (fixed with respect to $\varepsilon$), the effect of the non-periodic variable in the coefficients is negligible for sufficiently small $\varepsilon$. Therefore we can replace the value of each coefficient at $(\sqrt{\varepsilon}z, z/\sqrt{\varepsilon})$ by its Taylor expansion of order two about the point $(0, z/\sqrt{\varepsilon})$. Integrating by parts and using \eqref{wdef} and \eqref{formula} yields \begin{align*} & -i \varepsilon \int_{0}^{+\infty}\hspace{-3mm} \int_{{\mathbb R}^{N}}
v_{\varepsilon} \frac{\partial \bar{\phi}^{\varepsilon}}{\partial t}\: dt\, dz
- i \varepsilon \int_{{\mathbb R}^{N}}
v_{\varepsilon}(0,z)\bar{\phi} \Big(0,z,\frac{z}{\sqrt{\varepsilon}}\Big) \, dz \\ & + \int_{0}^{+\infty}\hspace{-3mm}
\int_{{\mathbb R}^{N}}
[ A^{\e} + A^{\e}_{1,h} \, \sqrt{\varepsilon} z_{h} + \textstyle{\frac{1}{2}} A^{\e}_{2,lh} \, \varepsilon z_{l} z_{h}
+ o(\varepsilon)]
(\sqrt{\varepsilon}\nabla +2i\pi\theta^{n})v_{\varepsilon}\hspace{-1mm} \cdot
(\sqrt{\varepsilon}\nabla-2i\pi\theta^{n})\bar{\phi}^{\varepsilon} \,dz \, dt \\ & + \int_{0}^{+\infty}\hspace{-3mm} \int_{{\mathbb R}^{N}}
[c^{\e} + c^{\e}_{1,h} \sqrt{\varepsilon} z_{h} + \textstyle{\frac{1}{2}} c^{\e}_{2,lh}\,\varepsilon z_{l} z_{h}
+o(\varepsilon)- \lambda_{n} ] \, v_{\varepsilon} \bar{\phi}^{\varepsilon}
\:dz \, dt = 0 . \end{align*} Passing to the two-scale limit we get the variational formulation of $$ - (\hbox{{\rm div}}_y +2i\pi\theta^n) \Big( A(y) (\nabla_y +2i\pi\theta^n)v^* \Big) + c(y)v^* = \lambda_{n} v^* \quad \mbox{ in } {\mathbb T}^N . $$ The simplicity of $\lambda_{n}$ implies that there exists a scalar function $v(t,z)\in L^2\left({\mathbb R}^+\times{\mathbb R}^N\right)$ such that \begin{equation}\label{vstar} v^*(t,z,y) = v(t,z) \psi_n(y) . \end{equation}
\noindent {\bf Second step.} We multiply (\ref{neweq}) by the complex conjugate of \begin{equation*} \Psi_{\varepsilon}(t,z)=e^{2i\pi\theta^{n}\cdot\frac{z}{\sqrt{\varepsilon}}}\Big[ \psin^{\e} \phi(t,z) + \sqrt{\varepsilon} \sum_{k=1}^{N} \Big( \frac{1}{2i\pi} \frac{\partial \psin^{\e}}{\partial \theta_{k}} \frac{\partial \phi}{\partial z_{k}}(t,z) + z_{k} \frac{\partial \psin^{\e}}{\partial x_{k}} \phi(t,z) \Big) \Big]\,, \end{equation*} where $\phi(t,z)$ is a smooth test function with compact support in ${\mathbb R}^+\times{\mathbb R}^N$. \noindent We first look at those terms of the equation involving time derivatives: \begin{align}\label{timeder}
& \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}
i\frac{\partial w_\varepsilon}{\partial t}\bar{\Psi}_\varepsilon \, dt\,dz =\\ \nonumber &\int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}-i v_{\varepsilon}
\left[
\bpsin^{\e} \frac{\partial \bar{\phi}}{\partial t} +\sqrt{\varepsilon}\sum_{k=1}^{N}
\left(
-\frac{1}{2i\pi}\frac{\partial \bpsin^{\e}}{\partial \theta_{k}}
\frac{\partial^{2} \bar{\phi}}{\partial t \partial z_{k}} + z_{k}
\frac{\partial \bpsin^{\e}}{\partial x_{k}}\frac{\partial \bar{\phi}}{\partial t}
\right)
\right]\,
dt\, dz \\ \nonumber & -i \int_{{\mathbb R}^{N}}
v_{\varepsilon}(0,z)
\left[
\bpsin^{\e} \bar{\phi} (0,z) +
\sqrt{\varepsilon} \sum_{k=1}^{N}
\left(
-\frac{1}{2i\pi}
\frac{\partial \bpsin^{\e}}{\partial \theta_{k}}
\frac{\partial \bar{\phi}}{\partial z_{k}}(0,z) +
z_{k} \frac{\partial \bpsin^{\e}}{\partial x_{k}}\bar{\phi} (0,z)
\right)
\right] \, dz\,. \end{align}
\noindent Recalling the normalization $\int_{{\mathbb T}^{N}}|\psi_{n}|^{2}\,dy=1$, we find that the two-scale limit of the term on the left hand side of \eqref{timeder} is given by the expression \begin{equation}\label{term1} -i \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}} v \frac{\partial \bar{\phi}}{\partial t}\: dz\, dt -i \int_{{\mathbb R}^{N}} v^{0}\bar{\phi} (0,z) \, dz \,. \end{equation}
\noindent We further decompose $\Psi_\varepsilon$ as follows $$ \Psi_\varepsilon=\Psi_\varepsilon^1+\Psi_\varepsilon^2\cdot z \quad\text{ with }\quad \Psi_\varepsilon^2 = \sqrt{\varepsilon} e^{2i\pi\theta^{n}\cdot\frac{z}{\sqrt{\varepsilon}}} \sum_{k=1}^{N}\frac{\partial \psin^{\e}}{\partial x_{k}}\phi(t,z)e_k. $$ \noindent Getting rid of all terms multiplied by $o(\varepsilon)$ and taking into account \eqref{wdef} and \eqref{formula} we next pass to the limit in the remaining terms of \eqref{neweq} multiplied by $\bar{\Psi}_{\varepsilon}$. The computation is similar to \cite{alpiat2} but it involves new terms since $\psi_{n}$ and its derivatives also depend on $x$. We first look at those terms which are of zero order with respect to $z$, namely
\begin{align}\label{zero}
& \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}\Big[
A^{\e} \nabla w_\varepsilon \cdot (\nabla\bar{\Psi}_\varepsilon^1+ \bar{\Psi}_\varepsilon^2) +
\frac{1}{\varepsilon}(c^{\e}-\lambda_{n})w_\varepsilon \bar{\Psi}_\varepsilon^1 \Big] \: dz\, dt \\ \nonumber & = \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}\Big[
\frac{1}{\varepsilon} A^{\e}\Big(\sqrt{\varepsilon}\nabla + 2i\pi\theta^{n}\Big)v_{\varepsilon}
\cdot (\nabla_{y}-2i\pi\theta^{n})\bpsin^{\e} \bar{\phi} +
\frac{1}{\varepsilon}(c^{\e}-\lambda_{n})\bpsin^{\e} v_{\varepsilon} \bar{\phi}
\Big] \: dz\, dt\\ \nonumber & -\frac{1}{2i\pi}
\int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}\Big[
\frac{1}{\sqrt{\varepsilon}}A^{\e} \Big(\sqrt{\varepsilon}\nabla + 2i\pi\theta^{n}\Big)v_{\varepsilon} \cdot
(\nabla_{y}-2i\pi\theta^{n})\frac{\partial \bpsin^{\e}}{\partial \theta_{k}}
\frac{\partial \bar{\phi}}{\partial z_{k}} \\ \nonumber & \hspace{2.7cm} + \frac{1}{\sqrt{\varepsilon}} (c^{\e}-\lambda_{n}) v_{\varepsilon}
\frac{\partial \bpsin^{\e}}{\partial \theta_{k}}\frac{\partial \bar{\phi}}{\partial z_{k}}
\Big] \:dz\, dt \\ \nonumber & +\int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}
\frac{1}{\sqrt{\varepsilon}}A^{\e} \Big(\sqrt{\varepsilon}\nabla + 2i\pi\theta^{n}\Big)v_{\varepsilon} \cdot
\bpsin^{\e} \nabla \bar{\phi} \: dz\, dt \\ \nonumber & + \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}} -\frac{1}{2\pi i}
A^{\e} \Big(\sqrt{\varepsilon}\nabla + 2i\pi\theta^{n}\Big)v_{\varepsilon} \cdot
\frac{\partial \bpsin^{\e}}{\partial \theta_{k}}
\nabla\frac{\partial \bar{\phi}}{\partial z_{k}} \:dz\, dt\,\\ \nonumber & + \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}
A^{\e} \Big(\sqrt{\varepsilon}\nabla + 2i\pi\theta^{n}\Big)v_{\varepsilon}
\cdot \frac{\partial \bpsin^{\e}}{\partial x_{k}}\bar{\phi} \, e_{k}
\:dz\, dt\,. \end{align}
\noindent Using equation \eqref{eq1} with $\varphi=v_{\varepsilon}\bar{\phi}$ and equation \eqref{eq2} with ${\displaystyle \varphi=v_{\varepsilon}\frac{\partial \bar{\phi}}{\partial z_{k}}}$ we rewrite the first two integrals in the right hand side of \eqref{zero} as follows
\begin{align*} & \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}
-\frac{1}{\sqrt{\varepsilon}} A^{\e}(\nabla_{y}-2i\pi\theta^{n})\bpsin^{\e}\cdot v_{\varepsilon}\nabla \bar{\phi}
\,dz\, dt\, \\ & +\int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}} \Big[
\frac{1}{2i\pi} A^{\e}(\nabla_{y}-2i\pi\theta^{n})\frac{\partial \bpsin^{\e}}{\partial \theta_{k}}
\cdot v_{\varepsilon}\nabla \frac{\partial \bar{\phi}}{\partial z_{k}}
+ \frac{1}{\sqrt{\varepsilon}}A^{\e} e_{k}\cdot
v_{\varepsilon} \frac{\partial \bar{\phi}}{\partial z_{k}}(\nabla_{y}-2i\pi\theta^{n}) \bpsin^{\e} \\ & \hspace{2cm} - \frac{1}{\sqrt{\varepsilon}}A^{\e} \bpsin^{\e} e_{k}\cdot
\Big(\sqrt{\varepsilon}\nabla +2i\pi\theta^{n}\Big)
\Big(v_{\varepsilon}\frac{\partial \bar{\phi}}{\partial z_{k}}\Big)\Big] \,dz\, dt\,. \end{align*}
\noindent Combining the above terms with the other terms in \eqref{zero} and passing to the two-scale limit in \eqref{zero} yields
\begin{align}\label{term2}
& \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}\int_{{\mathbb T}^{N}}
\Big[
\frac{1}{2i\pi}A\psi_{n} (\nabla_{y}-2i\pi\theta^{n})\frac{\partial\bar{\psi}_{n}}{\partial\theta_{k}}
-\frac{1}{2i\pi}A \frac{\partial\bar{\psi}_{n}}{\partial\theta_{k}}(\nabla_{y}+2i\pi\theta^{n})\psi_{n}
- A |\psi_{n}|^{2} e_{k}\Big] \\ \nonumber & \hspace{2.4cm}\cdot v \nabla \frac{\partial \bar{\phi}}{\partial z_{k}}
\:dy \,dz\, dt \\ \nonumber & +\int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}\int_{{\mathbb T}^{N}}
A(\nabla_{y}+2i\pi\theta^{n})\psi_{n}\cdot \frac{\partial \bar{\psi}_{n}}{\partial x_{k}}v\bar{\phi} \, e_{k}
\:dy \,dz\, dt\,. \end{align} By equation \eqref{eq6} it can be seen that the first integral of \eqref{term2} equals \begin{equation}\label{tensorA} \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}} A^{*}\nabla v \nabla \bar{\phi} \,dz\, dt\,. \end{equation}
\noindent We now focus on those terms which are linear in $z$: \begin{align}\label{linear} \nonumber & \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}\Big[
A^{\e}\nabla w_\varepsilon\cdot(\nabla\bar{\Psi}_\varepsilon^2 z)+\frac{1}{\varepsilon}(c^{\e}-\lambda_{n})w_\varepsilon\bar{\Psi}_\varepsilon^2 z
+A^{\e}_{1,k}\sqrt{\varepsilon}z_k\nabla w_\varepsilon\cdot (\nabla\bar{\Psi}_\varepsilon ^1+\bar{\Psi}_\varepsilon^2)\\ \nonumber & \hspace{1.5cm} +\frac{1}{\sqrt{\varepsilon}}c^{\e}_{1,k}z_k w_{\varepsilon}\bar{\Psi}_e^1
\Big] \: dz \, dt \\ \nonumber & = \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}\Big[
\frac{1}{\sqrt{\varepsilon}} A^{\e} \Big(\sqrt{\varepsilon}\nabla +2i\pi\theta^{n}\Big)v_{\varepsilon} \cdot
(\nabla_{y}-2i\pi\theta^{n})\frac{\partial \bpsin^{\e}}{\partial x_{k}}\,\bar{\phi} z_{k}
+ \frac{1}{\sqrt{\varepsilon}}(c^{\e}-\lambda_{n})v_{\varepsilon}\frac{\partial \bpsin^{\e}}{\partial x_{k}}\,\bar{\phi} z_{k}
\Big] dz \, dt \\ \nonumber & + \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}\Big[
\frac{1}{\sqrt{\varepsilon}}A^{\e}_{1,k} \Big(\sqrt{\varepsilon}\nabla +2i\pi\theta^{n}\Big)v_{\varepsilon}\hspace{-1mm}
\cdot \hspace{-1mm}
(\nabla_{y}-2i\pi\theta^{n}) \bpsin^{\e} \,\bar{\phi} z_{k}
+ \frac{1}{\sqrt{\varepsilon}} c^{\e}_{1,k} v_{\varepsilon}\bpsin^{\e} \, \bar{\phi} \, z_{k}
\Big]\: dz \, dt \\ \nonumber & +
\int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}\Big[
A^{\e}(\sqrt{\varepsilon}\nabla +2i\pi\theta^{n})v_{\varepsilon}\cdot
\frac{\partial \bpsin^{\e}}{\partial x_{k}}\nabla\bar{\phi}\, z_{k}
+ A^{\e}_{1,k} (\sqrt{\varepsilon}\nabla +2i\pi\theta^{n})v_{\varepsilon}
\cdot \bpsin^{\e} \nabla\bar{\phi} \, z_{k}\Big] \: dz \, dt \\ \nonumber & -\frac{1}{2i\pi}\int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}\Big[
A^{\e}_{1,h} (\sqrt{\varepsilon}\nabla +2i\pi\theta^{n})v_{\varepsilon}\cdot
(\nabla_{y}-2i\pi\theta^{n})
\frac{\partial \bar{\psi}_{n}}{\partial \theta_{k}}\frac{\partial \bar{\phi}}{\partial z_{k}}\, z_{h}
+ c^{\e}_{1,h} v_{\varepsilon}
\frac{\partial \bar{\psi}_{n}}{\partial \theta_{k}}\frac{\partial \bar{\phi}}{\partial z_{k}}\, z_{h}
\Big]\: dz \, dt \\ \nonumber & +\int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}\Big[
\sqrt{\varepsilon}A^{\e}_{1,h}(\sqrt{\varepsilon}\nabla +2i\pi\theta^{n})v_{\varepsilon}\cdot
\Big(-\frac{1}{2i\pi}
\frac{\partial \bpsin^{\e}}{\partial \theta_{k}}\nabla\frac{\partial \bar{\phi}}{\partial z_{k}}
+\frac{\partial \bpsin^{\e}}{\partial x_{k}}\bar{\phi} \, e_{k}
\Big) z_{h}
\Big]\: dz \, dt \,. \\ \end{align} By equation \eqref{eq3} with ${\displaystyle \varphi = v_{\varepsilon}\bar{\phi} z_{k}}$ it can be seen that the sum of the first two integrals in the right hand side of \eqref{linear} gives \begin{equation}\label{byeq3} - \int_{0}^{+\infty}\int_{{\mathbb R}^{N}} A^{\e}(\nabla_{y}-2i\pi\theta^{n})\frac{\partial \bpsin^{\e}}{\partial x_{k}} \cdot v_{\varepsilon} \nabla (\bar{\phi} z_{k}) + A^{\e}_{1,k} (\nabla_{y}-2i\pi\theta^{n}) \bpsin^{\e} \cdot v_{\varepsilon} \nabla (\bar{\phi} z_{k}) \Big) \: dz \, dt \,. \end{equation}
\noindent Therefore passing to the two-scale limit in \eqref{linear} we find
\begin{align} \label{linear1}
& -\int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}\int_{{\mathbb T}^{N}}\Big[
A (\nabla_{y}-2i\pi\theta^{n})\frac{\partial \bar{\psi}_{n}}{\partial x_{k}}
\cdot v \psi_{n} \bar{\phi}\, e_{k}
+ A_{1,k} (\nabla_{y}-2i\pi\theta^{n}) \bar{\psi}_{n}\cdot
v \psi_{n} \bar{\phi} \, e_{k}\Big] \: dy \, dz \, dt\\ \nonumber & - \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}\int_{{\mathbb T}^{N}}\Big[
A (\nabla_{y}-2i\pi\theta^{n})\frac{\partial \bar{\psi}_{n}}{\partial x_{k}}
\cdot v \psi_{n} z_{k} \nabla\bar{\phi}
+ A_{1,k}(\nabla_{y}-2i\pi\theta^{n})\bar{\psi}_{n} \cdot
v \psi_{n} z_{k} \nabla\bar{\phi}\Big] \: dy \, dz \, dt\\ \nonumber & + \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}\int_{{\mathbb T}^{N}}\Big[
A(\nabla_{y}+2i\pi\theta^{n})\psi_{n} \cdot v
\frac{\partial \bar{\psi}_{n}}{\partial x_{k}} z_{k}\nabla\bar{\phi}
+ A_{1,k}(\nabla_{y}+2i\pi\theta^{n})\psi_{n} \cdot
v \bar{\psi}_{n} z_{k}\nabla\bar{\phi} \Big]\: dy \, dz \, dt\\ \nonumber & -\frac{1}{2i\pi}
\int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}\int_{{\mathbb T}^{N}}\Big[
A_{1,h}(\nabla_{y}+2i\pi\theta^{n})\psi_{n} \cdot
(\nabla_{y}-2i\pi\theta^{n})\frac{\partial \bar{\psi}_{n}}{\partial \theta_{k}}
v z_{h}\frac{\partial \bar{\phi}}{\partial z_{k}}\\ \nonumber & \hspace{3cm} + c_{1,h} \psi_{n}
\frac{\partial \bar{\psi}_{n}}{\partial \theta_{k}}v z_{h}\frac{\partial \bar{\phi}}{\partial z_{k}}
\Big]\: dy \, dz \, dt \,. \end{align} \noindent By equation \eqref{eq4} it follows that the last integral in \eqref{linear1} is equal to \begin{align}\label{linear2}
& \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}\int_{{\mathbb T}^{N}}
\Big[
A_{1,h}\psi_{n} e_{k} \cdot(\nabla_{y}-2i\pi\theta^{n})\bar{\psi}_{n} + A\psi_{n} e_{k}
\cdot(\nabla_{y}-2i\pi\theta^{n})\frac{\partial \bar{\psi}_{n}}{\partial x_{h}}\psi_{n} \Big]
v z_{h} \frac{\partial \bar{\phi}}{\partial z_{k}}
\:\, dy \, dz \, dt \\ \nonumber & - \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}\int_{{\mathbb T}^{N}}
\Big[
A_{1,h}\bar{\psi}_{n} e_{k}\cdot (\nabla_{y}+2i\pi\theta^{n})\psi_{n}
+ A\frac{\partial \bar{\psi}_{n}}{\partial x_{h}}e_{k}\cdot (\nabla_{y}+2i\pi\theta^{n})\psi_{n}\Big]
v z_{h} \frac{\partial \bar{\phi}}{\partial z_{k}}
\:\, dy \, dz \, dt \\ \nonumber & - \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}\int_{{\mathbb T}^{N}}
\frac{1}{2i\pi}\frac{\partial^{2} \lambda_{n}}{\partial x_{h}\partial\theta_{k}}
|\psi_{n}|^{2} v z_{h} \frac{\partial \bar{\phi}}{\partial z_{k}}
\:\, dy \, dz \, dt \,. \end{align}
\noindent Next notice that the first and the second line of \eqref{linear2} cancel out with the second and the third line of \eqref{linear1} respectively and therefore \eqref{linear1} reduces to \begin{align}\label{term3}
& -\int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}\int_{{\mathbb T}^{N}}\Big[
A (\nabla_{y}-2i\pi\theta^{n})\frac{\partial \bar{\psi}_{n}}{\partial x_{k}}
\cdot v\psi_{n}\bar{\phi} \,e_{k}
+ A_{1,k}(\nabla_{y}-2i\pi\theta^{n})\bar{\psi}_{n}\cdot v \psi_{n}
\bar{\phi} \, e_{k}\Big] \:\, dy \, dz \, dt \\ \nonumber & -\int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}
\frac{1}{2i\pi}\frac{\partial^{2} \lambda_{n}}{\partial x_{h}\partial\theta_{k}}
v \frac{\partial \bar{\phi}}{\partial z_{k}} \, z_{h}
\: dz \, dt \,. \end{align}
\noindent Finally we consider all quadratic in $z$ terms: \begin{align} \nonumber & \frac{1}{2}\int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}\Big[
A^{\e}_{2,lh} \,\varepsilon z_{l}z_{h}\nabla w_\varepsilon\cdot(\nabla\bar{\Psi}_\varepsilon^1+\bar{\Psi}_\varepsilon^2)
+ c^{\e}_{2,lh} z_{l} z_{h} w_{\varepsilon}\bar{\Psi_e^1}\Big] \: dz \, dt \\ \nonumber & +\int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}\Big[
A^{\e}_{1,k}\sqrt{\varepsilon}z_k\nabla w_\varepsilon\cdot (z\nabla\bar{\Psi}_\varepsilon ^2)
+\frac{1}{\sqrt{\varepsilon}}c^{\e}_{1,k}z_k w_{\varepsilon}z\cdot\bar{\Psi}_\varepsilon^2 \Big]\: dz \, dt \\ \nonumber & = \frac{1}{2} \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}
A^{\e}_{2,lh} \, \sqrt{\varepsilon} z_{l}z_{h}
\Big(\sqrt{\varepsilon}\nabla+2i\pi\theta^{n}\Big)v_{\varepsilon}\cdot
\Big[
\frac{1}{\sqrt{\varepsilon}}(\nabla_{y}-2i\pi\theta^{n})\bpsin^{\e} \bar{\phi}
+\bpsin^{\e} \nabla\bar{\phi} \Big] \: dz \, dt \\ \nonumber & - \frac{1}{2}\int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}
A^{\e}_{2,lh} \, \sqrt{\varepsilon} z_{l}z_{h}
\Big(\sqrt{\varepsilon}\nabla+2i\pi\theta^{n}\Big)v_{\varepsilon} \\ \nonumber & \hspace{2.4cm}\cdot
\Big[
\frac{1}{2\pi i}
\nabla_{y}\frac{\partial \bpsin^{\e}}{\partial \theta_{k}}
\frac{\partial \bar{\phi}}{\partial z_{k}}
+\sqrt{\varepsilon}
\Big(
\frac{1}{2i\pi}
\frac{\partial \bpsin^{\e}}{\partial \theta_{k}} \nabla\frac{\partial \bar{\phi}}{\partial z_{k}}
+ e_{k} \frac{\partial \bpsin^{\e}}{\partial x_{k}} \bar{\phi}
\Big) \Big] \, dz \, dt \\ \nonumber & + \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}
A^{\e}_{1,h}\,z_{h}\Big(\sqrt{\varepsilon}\nabla+2i\pi\theta^{n}\Big)v_{\varepsilon}
\cdot
\Big[
z_{k}(\nabla_{y}-2i\pi\theta^{n})\frac{\partial \bpsin^{\e}}{\partial x_{k}} \bar{\phi}
+\sqrt{\varepsilon} z_{k} \frac{\partial \bpsin^{\e}}{\partial x_{k}} \nabla \bar{\phi}
\Big] \: \, dz \, dt \\ \nonumber & + \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}
\frac{1}{2} c^{\e}_{2,lh} z_{l} z_{h} v_{\varepsilon}
\Big(
\bpsin^{\e}\bar{\phi}
- \sqrt{\varepsilon} \frac{1}{2i\pi}
\frac{\partial \bpsin^{\e}}{\partial \theta_{k}} \frac{\partial \bar{\phi}}{\partial z_{k}}
\Big) \: \, dz \, dt \\ \nonumber & + \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}
c^{\e}_{1,h} z_{h} v_{\varepsilon}
z_{k} \frac{\partial \bpsin^{\e}}{\partial x_{k}} \bar{\phi}
\: dz \, dt \end{align}
\noindent which give on passing to the two-scale limit \begin{align} \label{quad}
& \frac{1}{2}\int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}} \int_{{\mathbb T}^{N}}
\Big[
A_{2,lh}(\nabla_{y}+2i\pi\theta^{n})\psi_{n}\cdot (\nabla_{y}-2i\pi\theta^{n})\bar{\psi}_{n}
+ c_{2,lh} \psi_{n} \bar{\psi}_{n} \Big] v \bar{\phi} \, z_{l} z_{h} \: dy \, dz \, dt \\ \nonumber & + \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}} \int_{{\mathbb T}^{N}}
\Big[
A_{1,h}(\nabla_{y}+2i\pi\theta^{n})\psi_{n} \cdot
(\nabla_{y}-2i\pi\theta^{n})\frac{\partial \bar{\psi}_{n}}{\partial x_{k}}
+ c_{1,h} \psi_{n} \frac{\partial \bar{\psi}_{n}}{\partial x_{k}}
\, \Big] v \bar{\phi} \, z_{h} z_{k} \: dy \, dz \, dt \,. \end{align} \noindent Now using equation \eqref{eq5} we find that \eqref{quad} reduces itself to
\begin{equation}\label{term4} \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}} \frac{1}{2}\frac{\partial^{2} \lambda_{n}}{\partial x_{l}\partial x_{h}} \: v \bar{\phi} \, z_{l} z_{h} \: dz \, dt \,. \end{equation} \noindent Summing up together \eqref{term1}, \eqref{term2}, \eqref{tensorA}, \eqref{term3} and \eqref{term4} yields the weak formulation of \eqref{hom}. By uniqueness of the solution of the homogenized problem \eqref{hom}, we deduce that the entire sequence $v_\varepsilon$ two-scale converges weakly to $\psi_{n}(y)v(t,x)$.
It remains to prove the strong two-scale convergence of $v_\varepsilon$. By Lemma \ref{apriori} we have $$
|| v_\varepsilon(t) ||_{L^2({\mathbb R}^N)} = || u_\varepsilon(t) ||_{L^2({\mathbb R}^N)} =
|| u^0_\varepsilon ||_{L^2({\mathbb R}^N)} \to || \psi_n v^0 ||_{L^2({\mathbb R}^N\times{\mathbb T}^N)}
= || v^0 ||_{L^2({\mathbb R}^N)} $$ by the normalization condition of $\psi_n$. From the conservation of energy of the homogenized equation \eqref{hom} we have $$
|| v(t) ||_{L^2({\mathbb R}^N)} = || v^0 ||_{L^2({\mathbb R}^N)} , $$ and thus we deduce the strong convergence from Proposition \ref{prop2s}. $\Box$
\begin{remark}\label{lastrem} {\rm As usual in periodic homogenization \cite{allaire}, \cite{blp}, the choice of the test function $\Psi_\varepsilon$, in the proof of Theorem \ref{mainth}, is dictated by the formal two-scale asymptotic expansion that can be obtained for the solution $w_\varepsilon$ of \eqref{neweq}, namely $$ w_\varepsilon(t,z) \approx e^{2i\pi\theta^{n}\cdot\frac{z}{\sqrt{\varepsilon}}}\Big[ \psi_{n}\Big(\frac{z}{\sqrt{\varepsilon}}\Big) v(t,z) + \sqrt{\varepsilon} \sum_{k=1}^{N} \Big( \frac{1}{2i\pi} \frac{\partial \psi_{n}}{\partial \theta_{k}}\Big(\frac{z}{\sqrt{\varepsilon}}\Big) \frac{\partial v}{\partial z_{k}}(t,z) + z_{k} \frac{\partial \psi_{n}}{\partial x_{k}}\Big(\frac{z}{\sqrt{\varepsilon}}\Big)v(t,z) \Big) \Big] $$ where $v$ is the homogenized solution of \eqref{hom}. Actually the homogenized equation that one gets by the asymptotic expansion method is \begin{equation}\label{asym} i \frac{\partial v}{\partial t} - \hbox{{\rm div}}\left(A^{*}\nabla v \right) + B^{*} \nabla v \cdot z + \bar{c}^{*} v + v D^{*} z \cdot z = 0 \, , \end{equation} which apparently differs from \eqref{hom} by the following zero-order term $$ \left( {\rm tr}\,(\nabla_{\theta}\nabla_{x}\lambda_{n}) - 4\pi \text{Im}( c^{*}) \right) v \,. $$ By virtue of \eqref{contaccio} the above term vanishes, so that formulae \eqref{asym} and \eqref{hom} are equivalent. } \end{remark}
\centerline{\sc Acknowledgments} This work was done while M. Palombaro was post-doc at the Centre de Math\'ematiques Appliqu\'ees of Ecole Polytechnique. The hospitality of people there is gratefully acknowledged. This work was partly supported by the MULTIMAT european network MRTN-CT-2004-505226 funded by the EEC.
\end{document} |
\begin{document}
\title{A Nivat Theorem for Weighted Timed Automata and Weighted Relative Distance Logic\thanks{The final version appeared in the Proceedings of the 41st International Colloquium on Automata, Languages, and Programming (ICALP 2014) and is available at link.springer.com; DOI: 10.1007/978-3-662-43951-7\_15}} \author{Manfred Droste and Vitaly Perevoshchikov\thanks{Supported by DFG Graduiertenkolleg 1763 (QuantLA)}} \institute{Universit\"at Leipzig, Institut f\"ur Informatik, \\ 04109 Leipzig, Germany\\ \email{\{droste,perev\}@informatik.uni-leipzig.de} }
\maketitle
\begin{abstract} Weighted timed automata (WTA) model quantitative aspects of real-time systems like continuous consumption of memory, power or financial resources. They accept quantitative timed languages where every timed word is mapped to a value, e.g., a real number. In this paper, we prove a Nivat theorem for WTA which states that recognizable quantitative timed languages are exactly those which can be obtained from recognizable boolean timed languages with the help of several simple operations. We also introduce a weighted extension of relative distance logic developed by Wilke, and we show that our weighted relative distance logic and WTA are equally expressive. The proof of this result can be derived from our Nivat theorem and Wilke's theorem for relative distance logic. Since the proof of our Nivat theorem is constructive, the translation process from logic to automata and vice versa is also constructive. This leads to decidability results for weighted relative distance logic. \begin{keywords} Weighted timed automata, linearly priced timed automata, average behavior, discounting, Nivat's theorem, quantitative logic. \end{keywords} \end{abstract}
\section{Introduction}
Timed automata introduced by Alur and Dill \cite{AD94} are a prominent model for real-time systems. Timed automata form finite representations of infinite-state automata for which various fundamental results from the theory of finite-state automata can be transferred to the timed setting. Although time has a quantitative nature, the questions asked in the theory of timed automata are of a qualitative kind. On the other side, quantitative aspects of systems, e.g., costs, probabilities and energy consumption can be modelled using weighted automata, i.e., classical nondeterministic automata with a transition weight function. The behaviors of weighted automata can be considered as quantitative languages (also known as formal power series) where every word carries a value. Semiring-weighted automata have been extensively studied in the literature (cf. \cite{BR88,Eil74,KS86} and the handbook of weighted automata \cite{DKV09}).
Weighted extensions of timed automata are of much interest for the real-time community, since weighted timed automata (WTA) can model continuous time-dependent consumption of resources. In the literature, various models of WTA were considered, e.g., linearly priced timed automata \cite{ATP01,BFHL01, LBBF01}, multi-weighted timed automata with knapsack-problem objective \cite{LR05}, and WTA with measures like average, reward-cost ratio \cite{BBL04, BBL08} and discounting \cite{AT11, FL09, FL092}. In \cite{Qua10, Qua11}, WTA over semi\-rings were studied with respect to the classical automata-theoretic questions. However, various models, e.g., WTA with average and discounting measures as well as multi-weighted automata cannot be defined using semirings. For the latter situations, only several algorithmic problems were handled. But many questions whether the results known from the theories of timed and weighted automata also hold for WTA remain open. Moreover, there is no unified framework for WTA.
The main goal of this paper is to build a bridge between the theories of WTA and timed automata. First, we develop a general model of {\em timed valuation monoids} for WTA. Recall that Nivat's theorem \cite{Ni68} is one of the fundamental characterizations of rational transductions and establishes a connection between rational transductions and rational languages. Our first main result is an extension of Nivat's theorem to WTA over timed valuation monoids. By Nivat's theorem for semiring-weighted automata described recently in \cite{DK}, recognizable quantitative languages are exactly those which can be constructed from recognizable languages using operations like morphisms and intersections. The proof of this result requires the fact that finite automata are determinizable. However, timed automata do not enjoy this property. Nevertheless, for idempotent timed valuation monoids which model all mentioned examples of WTA, we do not need determinization. In this case, our Nivat theorem for WTA is similar to the one for weighted automata. In the non-idempotent case, we give an example showing that this statement does not hold true. But in this case we can establish a connection between recognizable quantitative timed languages and sequentially, deterministically or unambiguously recognizable timed languages.
As an application of our Nivat theorem, we provide a characterization of recognizable quantitative timed languages by means of quantitative logics. The classical B\"uchi-Elgot theorem \cite{Buc60} was extended to both weighted \cite{DG07, DG09, DM12} and timed settings \cite{Wil94,Wil942}. In \cite{Qua10, Qua11}, a semiring-weighted extension of Wilke's relative distance logic \cite{Wil94,Wil942} was considered. Here, we develop a different weighted version of relative distance logic based on our notion of timed valuation monoids. In our second main result, we show that this logic and WTA have the same expressive power. For the proof of this result, we use a new proof technique and our Nivat theorem to derive our result from the corresponding result for unweighted logic \cite{Wil94, Wil942}. Since the proof of our Nivat theorem is constructive, the translation process from weighted relative distance logic to WTA and vice versa is constructive. This leads to decidability results for weighted relative distance logic. In particular, based on the results of \cite{ATP01,BFHL01, LBBF01}, we show the decidability of several weighted extensions of the satisfiability problem for our logic.
\section{Timed Automata}
An {\em alphabet} is a non-empty finite set. Let $\Sigma$ be a non-empty set. A {\em finite word} over $\Sigma$ is a finite sequence $a_1 ... a_n$ where $n \ge 0$ and $a_1, ..., a_n \in \Sigma$. If $n \ge 1$, then we say that $w$ is {\em non-empty}. Let $\Sigma^+$ denote the set of all non-empty words over $\Sigma$.
Let $\mathbb R_{\ge 0}$ denote the set of all non-negative real numbers. A {\em finite timed word} over $\Sigma$ is a finite word over $\Sigma \times \mathbb R_{\ge 0}$, i.e., a finite sequence $(a_1, t_1) ... (a_n, t_n)$ where $n \ge 0$, $a_1, ..., a_n \in \Sigma$ and $t_1, ..., t_n \in \mathbb R_{\ge 0}$. Let $|w| = n$ and $\langle w \rangle = t_1 + ... + t_n$ and let $\mathbb T \Sigma^+ = (\Sigma \times \mathbb R_{\ge 0})^+$, the set of all non-empty finite timed words. Any set $\mathcal L \subseteq \mathbb T \Sigma^+$ of timed words is called a {\em timed language}.
Let $C$ be a finite set of {\em clock variables} ranging over $\mathbb R_{\ge 0}$. A {\em clock constraint} over $C$ is either $\text{\sc True}$ or (if $C$ is non-empty) a conjunction of formulas of the form $x \bowtie c$ where $x \in C$, $c \in \mathbb N$ and ${\bowtie} \in \{<, \le, =, \ge, >\}$. Let $\Phi(C)$ denote the set of all clock constraints over $C$. A {\em clock valuation} over $C$ is a mapping $\nu: C \to \mathbb R_{\ge 0}$ which assigns a value to each clock variable. Let $\mathbb R_{\ge 0}^C$ be the set of all clock valuations over $C$. The {\em satisfaction relation} ${\models} \subseteq \mathbb R_{\ge 0}^C \times \Phi(C)$ is defined as usual. Now let $\nu \in \mathbb R_{\ge 0}^C$, $t \in \mathbb R_{\ge 0}$ and $\Lambda \subseteq C$. Let $\nu + t$ denote the clock valuation $\nu' \in \mathbb R_{\ge 0}^C$ such that $\nu'(x) = \nu(x) + t$ for all $x \in C$. Let $\nu[\Lambda := 0]$ denote the clock valuation $\nu' \in \mathbb R_{\ge 0}^C$ such that $\nu'(x) = 0$ for all $x \in \Lambda$ and $\nu'(x) = \nu(x)$ for all $x \notin \Lambda$.
\begin{definition} Let $\Sigma$ be an alphabet. A {\em timed automaton} over $\Sigma$ is a tuple ${\mathcal A = (L, C, I, E, F)}$ such that $L$ is a finite set of {\em locations}, $C$ is a finite set of {\em clocks}, $I, F \subseteq L$ are sets of {\em initial} resp. {\em final} locations and ${E \subseteq L \times \Sigma \times \Phi(C) \times 2^C \times L}$ is a finite set of {\em edges}. \end{definition} For an edge $e = (\ell, a, \phi, \Lambda, \ell')$, let $\Label(e) = a$ be the {\em label} of $e$. A {\em run} of $\mathcal A$ is a finite sequence \begin{equation} \label{Eq:DefRun} \rho = (\ell_0, \nu_0) \xrightarrow{t_1} \xrightarrow{e_1} (\ell_1, \nu_1) \xrightarrow{t_2} \xrightarrow{e_2} ... \xrightarrow{t_n} \xrightarrow{e_n} (\ell_n, \nu_n) \end{equation}
where $n \ge 1$, $\ell_0, \ell_1, ..., \ell_n \in L$, $\nu_0, \nu_1, ..., \nu_n \in \mathbb R_{\ge 0}^C$, $t_1, ..., t_n \in \mathbb R_{\ge 0}$ and ${e_1, ..., e_n \in E}$ satisfy the following conditions: $\ell_0 \in I$, $\nu_0(x) = 0$ for all $x \in C$, $\ell_n \in F$ and, for all $1 \le i \le n$, $e_i = (\ell_{i-1}, a_i, \phi_i, \Lambda_i, \ell_i)$ for some $a_i \in \Sigma$, $\phi_i \in \Phi(C)$ and $\Lambda_i \subseteq C$ such that $\nu_{i-1} + t_i \models \phi_i$ and $\nu_i = (\nu_{i-1} + t_i)[\Lambda_i := 0]$. The {\em label} of $\rho$ is the timed word $\Label(\rho) = (\Label(e_1), t_1) ... (\Label(e_n), t_n) \in \mathbb T \Sigma^+$. For any timed word $w \in \mathbb T \Sigma^+$, let $\Run_{\mathcal A}(w)$ denote the set of all runs $\rho$ of $\mathcal A$ such that $\Label(\rho) = w$. Let $\mathcal L(\mathcal A) = \{w \in \mathbb T \Sigma^+ \; | \; \Run_{\mathcal A}(w) \neq \emptyset\}$. We say that an arbitrary timed language $\mathcal L \subseteq \mathbb T \Sigma^+$ is {\em recognizable} if there exists a timed automaton over $\Sigma$ such that $\mathcal L(\mathcal A) = \mathcal L$. Let ${\mathcal A = (L, C, I, E, F)}$ be a timed automaton over $\Sigma$. We say that $\mathcal A$ is {\em unambiguous} if $|\Run_{\mathcal A}(w)| \le 1$ for all $w \in \mathbb T \Sigma^+$. We call $\mathcal A$ {\em deterministic} if $|I| = 1$ and, for all $e_1 = (\ell, a, \phi_1, \Lambda_1, \ell_1) \in E$ and $e_2 = (\ell, a, \phi_2, \Lambda_2, \ell_2) \in E$ with $e_1 \neq e_2$, there exists no clock valuation $\nu \in \mathbb R_{\ge 0}^C$ with $\nu \models \phi_1 \wedge \phi_2$. We call $\mathcal A$ {\em sequential} if $|I| = 1$ and, for all $e_1 = (\ell, a, \phi_1, \Lambda_1, \ell_1) \in E$ and $e_2 = (\ell, a, \phi_2, \Lambda_2, \ell_2) \in E$, we have $e_1 = e_2$; this property can be viewed as a strong form of determinism. Based on these notions, we can define {\em sequentially recognizable}, {\em deterministically recognizable} and {\em unambiguously recognizable} timed languages.
\section{Weighted Timed Automata}
In this section, we introduce a general model of weighted timed automata (WTA) over {\em timed valuation monoids}. We will show that our new model covers a variety of situations known from the literature: linearly priced timed automata \cite{ATP01, BFHL01,LBBF01} and WTA with the measures like average \cite{BBL04, BBL08} and discounting \cite{AT11,FL09,FL092}.
A {\em timed valuation monoid} is a tuple $\mathbb M = (M, +, \val, \mathbb 0)$ where $(M, +, \mathbb 0)$ is a commutative monoid and $\val: \mathbb T (M \times M)^+ \to M$ is a {\em timed valuation function}. We will say that $M$ is the {\em domain} of $\mathbb M$. We say that $\mathbb M$ is {\em idempotent} if $+$ is idempotent, i.e., $m + m = m$ for all $m \in M$.
Let $\Sigma$ be an alphabet and $\mathbb M = (M, +, \val, \mathbb 0)$ a timed valuation monoid. A {\em weighted timed automaton} (WTA) over $\Sigma$ and $\mathbb M$ is a tuple $\mathcal A = (L, C, I, E, F, \wt)$ where $(L, C, I, E, F)$ is a timed automaton over $\Sigma$ and $\wt: L \cup E \to M$ is a {\em weight function}. Let $\rho$ be a run of $\mathcal A$ of the form (\ref{Eq:DefRun}). Let $\wt^{\sharp}(\rho) \in \mathbb T (M \times M)^+$ be the timed word $(u_1, t_1) ... (u_n, t_n)$ where, for all $1 \le i \le n$, $u_i = (\wt(\ell_{i-1}), \wt(e_i))$. Then, the {\em weight} of $\rho$ is defined as $\wt_{\mathcal A}(\rho) = \val(\wt^{\sharp}(\rho)) \in M$. The {\em behavior} of $\mathcal A$ is the mapping $||\mathcal A||: \mathbb T \Sigma^+ \to M$ defined by $
{||\mathcal A||(w) = \sum (\wt_{\mathcal A}(\rho) \; | \; \rho \in \Run_{\mathcal A}(w))} $
for all $w \in \mathbb T \Sigma^+$. A {\em quantitative timed language} (QTL) over $\mathbb M$ is a mapping ${\mathbb L: \mathbb T \Sigma^+ \to M}$. We say that $\mathbb L$ is {\em recognizable} if there exists a WTA $\mathcal A$ over $\Sigma$ and $\mathbb M$ such that $\mathbb L = ||\mathcal A||$.
\begin{example} \label{Ex:TVM} All of the subsequent WTA model the property that staying in a location invokes costs depending on the length of the stay; the subsequent transition also invokes costs but happens instantaneously. We assume that, for all $x \in \mathbb R \cup \{\infty\}$, ${x \cdot \infty = \infty \cdot x = \infty}$ and $x + \infty = \infty + x = \infty$. \begin{itemize} \item [(a)] {\em Linearly priced timed automata} were considered in \cite{ATP01, BFHL01, LBBF01}. We can describe this model by the timed valuation monoid \linebreak ${\mathbb M^{\text{sum}} = (\mathbb R \cup \{\infty\}, \min, \val^{\text{sum}}, \infty)}$ where $\val^{\text{sum}}$ is defined by $\val^{\text{sum}}(v) = \sum_{i = 1}^n (m_i \cdot t_i + m'_i)$ for all $v = ((m_1, m'_1), t_1) ... ((m_n, m'_n), t_n) \in \mathbb T (M \times M)^+$.
\item [(b)] The situation of the average behavior for WTA considered in \linebreak \cite{BBL04, BBL08} can be described by means of the timed valuation monoid \linebreak ${\mathbb M^{\text{avg}} = (\mathbb R \cup \{\infty\}, \min, \val^{\text{avg}}, \infty)}$ where $\val^{\text{avg}}$ is defined as follows. Let ${v = ((m_1, m'_1), t_1) ... ((m_n, m'_n), t_n) \in \mathbb T (M \times M)^+}$. If $\langle v \rangle > 0$, then we let $ \val^{\text{avg}}(v) = \frac{\sum_{i = 1}^n (m_i \cdot t_i + m_i')}{\sum_{i = 1}^n t_i}. $ If $\langle v \rangle = 0$, $m_1 = ... = m_n \in \mathbb R$ and $m_1' = ... = m_n' = 0$, then we put $\val^{\text{avg}}(v) = m_1$. Otherwise, we put $\val^{\text{avg}}(v) = \infty$.
\item [(c)] The model of WTA with the discounting measure was investigated in \cite{AT11, FL09, FL092}. These WTA can be considered as WTA over the timed valuation monoid ${\mathbb M^{\text{disc}_{\lambda}} = (\mathbb R \cup \{\infty\}, \min, \val^{\text{disc}_{\lambda}}, \infty)}$ where $0 < \lambda < 1$ is a {\em discounting factor} and $\val^{\text{disc}_{\lambda}}$ is defined for all $v = ((m_1, m_1'), t_1) ... ((m_n, m_n'), t_n) \in \mathbb T (M \times M)^+$ by $ \val^{\text{disc}_\lambda}(v) = \sum_{i = 1}^n \lambda^{t_1 + ... + t_{i-1}} \cdot \big(\int_{0}^{t_i} m_i \cdot \lambda^{\tau} d \tau + \lambda^{t_i} \cdot m_i'\big). $ \end{itemize} Note that the timed valuation monoids $\mathbb M^{\text{sum}}$, $\mathbb M^{\text{avg}}$ and $\mathbb M^{\text{disc}_{\lambda}}$ are idempotent. \end{example}
\section{Closure Properties} \label{Sect:Closure}
In this section, we consider several closure properties of recognizable quantitative timed languages which we will use for the proof of our Nivat theorem and which could be of independent interest. For lack of space, we will omit the proofs.
Let $\Sigma$ be a set, $\Gamma$ an alphabet and $h: \Gamma \to \Sigma$ a mapping. For a timed word ${v = (\gamma_1, t_1) ... (\gamma_n, t_n) \in \mathbb T \Gamma^+}$, we let $h(v) = (h(\gamma_1), t_1) ... (h(\gamma_n), t_n) \in \mathbb T \Sigma^+$. Then, for a QTL $r: \mathbb T \Gamma^+ \to M$ over $\mathbb M$, we define the QTL $h(r): \mathbb T \Sigma^+ \to M$ over $\mathbb M$ by $
h(r)(w) = \sum (r(v) \; | \; v \in \mathbb T \Gamma^+ \text{ and } h(v) = w) $ for all $w \in \mathbb T \Sigma^+$. Observe that for any $w \in \mathbb T \Sigma^+$ there are only finitely many $v \in \mathbb T \Gamma^+$ with $h(v) = w$, hence the sum exists in $(M, +)$.
\begin{lemma} \label{Lemma:Homo} Let $\Sigma, \Gamma$ be alphabets, $\mathbb M = (M, +, \val, \mathbb 0)$ a timed valuation monoid and $h: \Gamma \to \Sigma$ a mapping. If $r: \mathbb T \Gamma^+ \to M$ is a recognizable QTL over $\mathbb M$, then the QTL $h(r)$ is also recognizable. \end{lemma} For the proof of this lemma, we use a similar construction as in \cite{DV10}, Lemma 1.
Let $g: \Sigma \to M \times M$ be a mapping. We denote by ${\val \circ g: \mathbb T \Sigma^+ \to M}$ the QTL over $\mathbb M$ defined for all $w \in \mathbb T \Sigma^+$ by ${(\val \circ g)(w) = \val(g(w))}$. We say that a timed valuation monoid $\mathbb M = (M, +, \val, \mathbb 0)$ is {\em location-independent} if, for any $v = ((m_1, m_1'), t_1) ... ((m_n, m_n'), t_n) \in \mathbb T (M \times M)^+$ and ${v' = ((k_1, k_1'), t_1) ... ((k_n, k_n'), t_n) \in \mathbb T (M \times M)^+}$ with $m_i' = k_i'$ for all $1 \le i \le n$, we have $\val(v) = \val(v')$.
\begin{lemma} \label{Lemma:Comp} Let $\Sigma$ be an alphabet, $\mathbb M = (M, +, \val, \mathbb 0)$ a timed valuation monoid and $g: \Sigma \to M \times M$ a mapping. Then, $\val \circ g$ is unambiguously recognizable. If $\mathbb M$ is location-independent, then $\val \circ g$ is sequentially recognizable. \end{lemma}
However, in general, $\val \circ g$ is not deterministically recognizable (and hence not sequentially recognizable). Let $\Sigma = \{a, b\}$ and $\mathbb M = \mathbb M^{\text{sum}}$ as in Example \ref{Ex:TVM} (a). Let $g(a) = (1, 0)$ and $g(b) = (2, 0)$. Then, one can show that $\val \circ g$ is not deterministically recognizable.
Let $\mathcal L \subseteq \mathbb T \Sigma^+$ be a timed language and $r: \mathbb T \Sigma^+ \to M$ a QTL over $\mathbb M$. The {\em intersection} $(r \cap \mathcal L): \mathbb T \Sigma^+ \to M$ is the QTL over $\mathbb M$ defined by $(r \cap \mathcal L)(w) = r(w)$ if $w \in \mathcal L$ and $(r \cap \mathcal L)(w) = \mathbb 0$ if $w \in \mathbb T \Sigma^+ \setminus \mathcal L$.
\begin{example} \label{Ex:Bad} As opposed to weighted untimed automata, recognizable quantitative timed languages are not closed under the intersection with recognizable timed languages. Let $\Sigma$ be a singleton alphabet and $\mathcal L$ a recognizable timed language over $\Sigma$ which is not unambiguously recognizable. Wilke \cite{Wil94} showed that such a language exists. Consider the non-idempotent and location-independent timed valuation monoid $\mathbb M = (\mathbb N, +, \val, 0)$ where $+$ is the usual addition of natural numbers and $\val(v) = m_1' \cdot ... \cdot m_n'$ for all $v = ((m_1, m_1'), t_1) ... ((m_n, m_n'), t_n) \in \mathbb T (\mathbb N \times \mathbb N)^+$. Let the QTL ${r: \mathbb T \Sigma^+ \to \mathbb N}$ over $\mathbb M$ be defined by $r(w) = 1$ for all $w \in \mathbb T \Sigma^+$. Then, $r$ is recognizable but $r \cap \mathcal L$ is not recognizable. \end{example}
Nevertheless, the intersection enjoys the following closure properties.
\begin{lemma} \label{Lemma:Inter} Let $\Sigma$ be an alphabet, $\mathbb M = (M, +, \val, \mathbb 0)$ a timed valuation monoid, $\mathcal L \subseteq \mathbb T \Sigma^+$ a recognizable timed language and $r: \mathbb T \Sigma^+ \to M$ a recognizable QTL over $\mathbb M$. If $\mathbb M$ is idempotent, then $r \cap \mathcal L$ is recognizable. If $\mathcal L$ is unambiguously recognizable, then $r \cap \mathcal L$ is recognizable. If $\mathcal L, r$ are unambiguously (deterministically, sequentially) recognizable, then $r \cap \mathcal L $ is also unambiguously (deterministically, sequentially) recognizable. \end{lemma} For the proof, we use a kind of product construction for timed automata.
\section{A Nivat Theorem for Weighted Timed Automata}
Nivat's theorem \cite{Ni68} (see also \cite{Be69}, Theorem 4.1) is one of the fundamental characterizations of rational transductions and establishes a connection between rational transductions and rational languages. A version for semiring-weighted automata was given in \cite{DK}; this shows a connection between recognizable quantitative and qualitative languages. In this chapter, we prove a Nivat-like theorem for recognizable quantitative timed languages.
Let $\Sigma$ be an alphabet and $\mathbb M = (M, +, \val, \mathbb 0)$ a timed valuation monoid. Let $\text{\sc Rec}(\Sigma, \mathbb M)$ denote the collection of all QTL recognizable by a WTA over $\Sigma$ and $\mathbb M$. Let $\mathcal N(\Sigma, \mathbb M)$ (with $\mathcal N$ standing for Nivat) denote the set of all QTL ${\mathbb L: \mathbb T \Sigma^+ \to M}$ over $\mathbb M$ such that there exist an alphabet $\Gamma$, mappings ${h: \Gamma \to \Sigma}$ and ${g: \Gamma \to M \times M}$ and a recognizable timed language $\mathcal L \subseteq \mathbb T \Gamma^+$ such that $\mathbb L = h((\val \circ g) \cap \mathcal L)$. Let the collection $\mathcal N^{\text{\sc Seq}}(\Sigma, \mathbb M)$ be defined like $\mathcal N(\Sigma, \mathbb M)$ with the only difference that $\mathcal L$ is sequentially recognizable. The collections $\mathcal N^{\text{\sc Unamb}}(\Sigma, \mathbb M)$ and $\mathcal N^{\text{\sc Det}}(\Sigma, \mathbb M)$ are defined similarly using unambiguously resp. deterministically recognizable timed languages.
Our Nivat theorem for weighted timed automata is the following.
\begin{theorem} \label{Theorem:Nivat} Let $\Sigma$ be an alphabet and $\mathbb M$ a timed valuation monoid. Then, $\text{\sc Rec}(\Sigma, \mathbb M) = \mathcal N^{\text{\sc Seq}}(\Sigma, \mathbb M) = \mathcal N^{\text{\sc Det}}(\Sigma, \mathbb M) = \mathcal N^{\text{\sc Unamb}}(\Sigma, \mathbb M) \subseteq \mathcal N(\Sigma, \mathbb M)$. \linebreak If $\mathbb M$ is idempotent, then $\text{\sc Rec}(\Sigma, \mathbb M) = \mathcal N(\Sigma, \mathbb M)$. \end{theorem}
As opposed to the result of \cite{DK} for weighted untimed automata, the equality $\text{\sc Rec}(\Sigma, \mathbb M) = \mathcal N(\Sigma, \mathbb M)$ does not always hold: let $\Sigma$, $\mathbb M$, $\mathcal L$ and $r$ be defined as in Example \ref{Ex:Bad}. Then, one can show that $r \cap \mathcal L \in \mathcal N(\Sigma, \mathbb M) \setminus \text{\sc Rec}(\Sigma, \mathbb M)$.
The proof of Theorem \ref{Theorem:Nivat} is based on the closure properties of WTA (cf. Sect. \ref{Sect:Closure}) and the following lemma.
\begin{lemma} \label{Lemma:Transitions} Let $\Sigma$ be an alphabet and $\mathbb M$ a timed valuation monoid. Then, $\text{\sc Rec}(\Sigma, \mathbb M) \subseteq \mathcal N^{\text{\sc Seq}}(\Sigma, \mathbb M)$. \end{lemma}
\begin{proof}[Sketch]
Let $\mathcal A = (L, C, I, E, F, \wt)$ be a WTA over $\Sigma$ and $\mathbb M$. Let $\Gamma = E$. We define the mappings $h: \Gamma \to \Sigma$ and ${g: \Gamma \to M \times M}$ for all $\gamma = (\ell, a, \phi, \Lambda, \ell') \in \Gamma$ by $h(\gamma) = a$ and $g(\gamma) = (\wt(\ell), \wt(\gamma))$. Let $\mathcal L$ be the set of all timed words $w = (\gamma_1, \tau_1) ... (\gamma_n, \tau_n)$ such that there exists a run $\rho$ of $\mathcal A$ of the form (\ref{Eq:DefRun}) with $\gamma_i = e_i$ and $\tau_i = t_i$ for all $1 \le i \le n$. It can be shown that $\mathcal L$ is sequentially recognizable and $||\mathcal A|| = h((\val \circ g) \cap \mathcal L) \in \mathcal N^{\text{\sc Seq}}(\Sigma, \mathbb M)$. \qed \end{proof}
Let $\Sigma$ be an alphabet and $\mathbb M$ a timed valuation monoid with the domain $M$. Let $\mathcal H^{\text{\sc Unamb}}(\Sigma, \mathbb M)$ denote the collection of all QTL ${\mathbb L: \mathbb T \Sigma^+ \to M}$ over $\mathbb M$ such that there exist an alphabet $\Gamma$, a mapping $h: \Gamma \to \Sigma$ and an unambiguously recognizable QTL $r: \mathbb T \Gamma^+ \to M$ over $\mathbb M$ such that $\mathbb L = h(r)$. The collections $\mathcal H^{\text{\sc Seq}}(\Sigma, \mathbb M)$ and $\mathcal H^{\text{\sc Det}}(\Sigma, \mathbb M)$ are defined like $\mathcal H^{\text{\sc Unamb}}(\Sigma, \mathbb M)$ with the only difference that $r$ is sequentially resp. deterministically recognizable.
As a corollary from Theorem 5.1, we establish the following connections between recognizable and unambiguously, sequentially and deterministically recognizable QTL. For the proof of this corollary, we apply Theorem \ref{Theorem:Nivat} and closure properties of WTA considered in Sect. \ref{Sect:Closure}. \begin{corollary} \label{Cor:Unamb} Let $\Sigma$ be an alphabet and $\mathbb M$ a timed valuation monoid. Then, $\mathcal H^{\text{\sc Seq}}(\Sigma, \mathbb M) = \mathcal H^{\text{\sc Det}}(\Sigma, \mathbb M) \subseteq \mathcal H^{\text{\sc Unamb}}(\Sigma, \mathbb M) = \text{\sc Rec}(\Sigma, \mathbb M)$. If $\mathbb M$ is location-independent, then $\mathcal H^{\text{\sc Seq}}(\Sigma, \mathbb M) = \text{\sc Rec}(\Sigma, \mathbb M)$. \end{corollary}
However, the equality $\mathcal H^{\text{\sc Seq}}(\Sigma, \mathbb M) = \text{\sc Rec}(\Sigma, \mathbb M)$ does not always hold. Let ${\Sigma = \{a, b\}}$ and $\mathbb M = \mathbb M^{\text{sum}}$ be the timed valuation monoid as in Example \ref{Ex:TVM} (a); note that $\mathbb M$ is not location-independent. Consider the QTL ${\mathbb L: \mathbb T \Sigma^+ \to M}$ over $\mathbb M$ defined for all $w = (a_1, t_1) ... (a_n, t_n)$ by $\mathbb L(w) = t_1$ if $a_1 = a$ and $\mathbb L(w) = 2 \cdot t_1$ otherwise. We can show that $\mathbb L \in \text{\sc Rec}(\Sigma, \mathbb M) \setminus \mathcal H^{\text{\sc Seq}}(\Sigma, \mathbb M)$.
\section{Weighted Relative Distance Logic}
In this section, we develop a weighted relative distance logic. Relative distance logic on timed words was introduced by Wilke in \cite{Wil94, Wil942}. It was shown that restricted relative distance logic and timed automata have the same expressive power. Here, we will derive a weighted version of this result. We will show that the proof of our result can be deduced from Wilke's result and our Nivat theorem for WTA.
We fix a countable set $V_1$ of {\em first-order variables} and a countable set $V_2$ of {\em second-order variables} such that $V_1 \cap V_2 = \emptyset$. Let $V = V_1 \cup V_2$.
\subsection{Relative Distance Logic}
Let $\Sigma$ be an alphabet. The set $\text{\sc Rdl}(\Sigma)$ of {\em relative distance formulas} over $\Sigma$ is defined by the grammar: $$
\varphi \; ::= \; P_a(x) \; | \; x \le y \; | \; X(x) \; | \; \dd_{\leftarrow}^{{\bowtie} c}(X, x) \; | \; \lnot \varphi \; | \; \varphi \vee \varphi \; | \; \exists x. \varphi \; | \; \exists X. \varphi $$ where $a \in \Sigma$, $x, y \in V_1$, $X \in V_2$, ${\bowtie} \in \{<, \le, =, \ge, >\}$ and $c \in \mathbb N$. The formulas of the form $\dd_{\leftarrow}^{{\bowtie c}}(X, x)$ are called {\em past formulas}.
Let $w = (a_1, t_1) ... (a_n, t_n) \in \mathbb T \Sigma^+$ be a timed word. For every $1 \le i \le n$, let $\langle w \rangle_i = t_1 + ... + t_i$. The {\em domain} of $w$ is the set $\dom(w) = \{1, ..., n\}$ of {\em positions} of $w$. Let $y \in \dom(w)$, $Y \subseteq \dom(w)$, ${\bowtie} \in \{<, \le, =, \ge, >\}$ and $c \in \mathbb N$. Then, we write $\dd^{{\bowtie} c, w}_{\leftarrow}(Y, y)$ iff either there exists a position $z \in Y$ such that $z < y$ and, for the greatest such position $z$, $\langle w \rangle_y - \langle w \rangle_z \bowtie c$, or there exists no position $z \in Y$ with $z < y$, and $\langle w \rangle_y \bowtie c$. A {\em $w$-assignment} is a mapping $\sigma: V \to \dom(w) \cup 2^{\dom(w)}$ such that $\sigma(V_1) \subseteq \dom(w)$ and $\sigma(V_2) \subseteq 2^{\dom(w)}$. We define the {\em update} $\sigma[x/i]$ to be the $w$-assignment such that $\sigma[x/i](x) = i$ and $\sigma[x/i](y) = \sigma(y)$ for all $y \in V \setminus \{x\}$. Similarly, for $X \in V_2$ and $I \subseteq \dom(w)$, we define the update $\sigma[X/I]$. Let $\varphi \in \text{\sc Rdl}(\Sigma)$ and $\sigma$ be a $w$-assignment. The definition that the pair $(w, \sigma)$ {\em satisfies} the formula $\varphi$, written $(w, \sigma) \models \varphi$, is given inductively on the structure of $\varphi$ as usual for MSO logic where, for the new formulas $\dd^{{\bowtie} c}_{\leftarrow}(X, x)$, we put $(w, \sigma) \models \dd^{{\bowtie} c}_{\leftarrow}(X, x)$ iff $\dd_{\leftarrow}^{{\bowtie} c, w}(\sigma(X), \sigma(x))$.
A formula $\varphi \in \text{\sc Rdl}(\Sigma)$ is called a {\em sentence} if every variable occurring in $\varphi$ is bound by a quantifier. Note that, for a sentence $\varphi \in \text{\sc Rdl}(\Sigma)$, the relation $(w, \sigma) \models \varphi$ does not depend on $\sigma$, i.e., for any $w$-assignments $\sigma_1, \sigma_2$, $(w, \sigma_1) \models \varphi$ iff ${(w, \sigma_2) \models \varphi}$. Then, we will write $w \models \varphi$. For a sentence $\varphi \in \text{\sc Rdl}(\Sigma)$, let ${\mathcal L(\varphi) = \{w \in \mathbb T \Sigma^+ \; | \; w \models \varphi\}}$, the timed language {\em defined} by $\varphi$. Let $\Delta \subseteq \text{\sc Rdl}(\Sigma)$. We say that a timed language $\mathcal L \subseteq \mathbb T \Sigma^+$ is {\em $\Delta$-definable} if there exists a sentence $\varphi \in \Delta$ such that $\mathcal L(\varphi) = \mathcal L$.
Let $\mathcal V = \{X_1, ..., X_m\} \subseteq V$ with $|\mathcal V| = m$. For $\varphi \in \text{\sc Rdl}(\Sigma)$, let $\exists \mathcal V. \varphi$ denote the formula $\exists X_1. \; ... \; \exists X_m. \varphi$. For a formula $\varphi \in \text{\sc Rdl}(\Sigma)$, let $\mathcal D(\varphi) \subseteq V_2$ denote the set of all variables $X$ for which there exist $x \in V_1$, ${\bowtie} \in \{<, \le, =, \ge, >\}$ and $c \in \mathbb N$ such that $\dd_{\leftarrow}^{\bowtie c}(X, x)$ is a subformula of $\varphi$. Let $\text{\sc Rdl}^{\leftarrow}(\Sigma) \subseteq \text{\sc Rdl}(\Sigma)$ denote the set of all formulas $\varphi$ where quantification of second-order variables is applied only to variables not in $\mathcal D(\varphi)$. We denote by $\exists \text{\sc Rdl}^{\leftarrow}(\Sigma) \subseteq \text{\sc Rdl}(\Sigma)$ the set of all sentences of the form $\exists \mathcal D(\varphi). \varphi$.
\begin{theorem}[Wilke \cite{Wil942}] \label{Theorem:Wilke} Let $\Sigma$ be an alphabet and $\mathcal L \subseteq \mathbb T \Sigma^+$ a timed language. Then, $\mathcal L$ is recognizable iff $\mathcal L$ is $\exists \text{\sc Rdl}^{\leftarrow}(\Sigma)$-definable. \end{theorem}
\subsection{Weighted Relative Distance Logic}
\label{SSect:wRdl}
In this subsection, we consider a weighted version of relative distance logic. For untimed words, weighted MSO logic over semirings was defined in \cite{DG07}. A weighted MSO logic over (untimed) product valuation monoids was considered in \cite{DM12}. We will use a similar approach to define the syntax and the semantics of our weighted relative distance logic. In \cite{DM12}, valuation monoids were augmented with a product operation and a unit element to define the semantics of weighted formulas. Here, we proceed in a similar way and consider timed {\em product} valuation monoids.
A {\em timed product valuation monoid} (timed pv-monoid) $\mathbb M = (M, +, \val, \diamond, \mathbb 0, \mathbb 1)$ is a timed valuation monoid $(M, +, \val, \mathbb 0)$ equipped with a multiplication $\diamond: M \times M \to M$ and a unit $\mathbb 1 \in M$ such that $m \diamond \mathbb 1 = \mathbb 1 \diamond m = m$ and $m \diamond \mathbb 0 = \mathbb 0 \diamond m = \mathbb 0$ for all $m \in M$, $\val(((\mathbb 1, \mathbb 1), t_1), ..., ((\mathbb 1, \mathbb 1), t_n)) = \mathbb 1$ for all $n \ge 1$ and all $t_1, ..., t_n \in \mathbb R_{\ge 0}$, and $\val(((m_1, m_1'), t_1) ... ((m_n, m_n'), t_n)) = \mathbb 0$ whenever $m'_i = \mathbb 0$ for some $1 \le i \le n$. We say that $\mathbb M$ is {\em idempotent} if $+$ is idempotent.
\begin{example} \label{Ex:TPVM} If we augment the timed valuation monoids $\mathbb M^{\text{sum}}$, $\mathbb M^{\text{avg}}$ and $\mathbb M^{\text{disc}_{\lambda}}$ from Example \ref{Ex:TVM} with the multiplication $\diamond = +$ and the unit $\mathbb 1 = 0$, then we obtain the timed pv-monoids $\mathbb M_0^{\text{sum}}$, $\mathbb M_0^{\text{avg}}$ and $\mathbb M_0^{\text{disc}_{\lambda}}$. Note that these timed pv-monoids are idempotent. \end{example}
Motivated by the examples, for the clarity of presentation, we restrict ourselves to idempotent timed pv-monoids.
Let $\Sigma$ be an alphabet and $\mathbb M = (M, +, \val, \diamond, \mathbb 0, \mathbb 1)$ a timed pv-monoid. The set $\text{\sc wRdl}(\Sigma, \mathbb M)$ of formulas of {\em weighted relative distance logic} over $\Sigma$ and $\mathbb M$ is defined by the grammar $$
\varphi \; ::= \; \mathbb B. \beta \; | \; m \; | \; \varphi \vee \varphi \; | \; \varphi \wedge \varphi \; | \; \exists x. \varphi \; | \; \forall x. (\varphi, \varphi) \; | \; \exists X. \varphi $$ where $\beta \in \text{\sc Rdl}^{\leftarrow}(\Sigma)$, $m \in M$, $x \in V_1$ and $X \in V_2$; the notation $\mathbb B. \beta$ indicates that here $\beta$ will be interpreted in a quantitative way.
Let $\mathbb T \Sigma^+_V$ denote the set of all pairs $(w, \sigma)$ where $w \in \mathbb T \Sigma^+$ and $\sigma$ is a $w$-assignment. For $\varphi \in \text{\sc wRdl}(\Sigma, \mathbb M)$, the {\em semantics} of $\varphi$ is the mapping $[\![\varphi]\!]: \mathbb T \Sigma^+_V \to M$ defined for all $(w, \sigma) \in \mathbb T \Sigma^+_V$ with $w = (a_1, t_1) ... (a_n, t_n)$ inductively on the structure of $\varphi$ as shown in Table \ref{Table:Semantics}. \begin{table}[t] \begin{scriptsize} \begin{tabular}{@{\hspace{0.42cm}}l@{\hspace{1.3cm}}l} $ \begin{array}{rll} \! [\![\mathbb B. \beta]\!](w, \sigma) & = & \begin{cases} \mathbb 1, & \text{if } (w, \sigma) \models \beta, \\ \mathbb 0, & \text{otherwise} \end{cases} \\ \! [\![m]\!](w, \sigma) & = & m \\ \! [\![\varphi_1 \vee \varphi_2]\!](w, \sigma) & = & [\![\varphi_1]\!](w, \sigma) + [\![\varphi_2]\!](w, \sigma) \\ \end{array} $ & $ \begin{array}{rll} \! [\![\varphi_2 \wedge \varphi_2]\!](w, \sigma) & = & [\![\varphi_1]\!](w, \sigma) \diamond [\![\varphi_2]\!](w, \sigma) \\ \! [\![\exists x. \varphi]\!](w, \sigma) & = & \sum\limits_{i \in \dom(w)} [\![\varphi]\!](w, \sigma[x/i]) \\ \! [\![\exists X. \varphi]\!](w, \sigma) & = & \sum\limits_{I \subseteq \dom(w)} [\![\varphi]\!](w, \sigma[X/I]) \end{array} $ \end{tabular} $ \! [\![\forall x. (\varphi_1, \varphi_2)]\!](w, \sigma) = \val[(([\![\varphi_1]\!](w, \sigma[x/i]), [\![\varphi_2]\!](w, \sigma[x/i])), t_i)]_{i \in \dom(w)} $ \end{scriptsize}
\caption{The semantics of weighted relative distance logic} \label{Table:Semantics} \end{table} Here, $x \in V_1$, $X \in V_2$, $\beta \in \text{\sc Rdl}^{\leftarrow}(\Sigma)$, $m \in M$ and ${\varphi, \varphi_1, \varphi_2 \in \text{\sc wRdl}(\Sigma, \mathbb M)}$.
\begin{remark} In \cite{Qua10, Qua11}, Quaas introduced a weighted version of relative distance logic over a semiring $\mathbb S = (S, +, \cdot, \mathbb 0, \mathbb 1)$ and a family of functions $\mathcal F \subseteq S^{\mathbb R_{\ge 0}}$ where elements of $S$ model discrete weights and functions $f \in \mathcal F$ model continuous weights. If $\mathcal F$ is a one-parametric family of functions $(f_s)_{s \in S}$, then our weighted logic incorporates the logic of Quaas over $\mathbb S$ and $\mathcal F$. However, for more complicated timed valuation functions (like average and discounting) we must have formulas which combine both discrete and continuous weights. Therefore, we use the formulas $\forall x. (\varphi_1, \varphi_2)$. Our approach also extends the idea of \cite{DM12} to define the semantics of formulas with a first-order universal quantifier using the valuation function. \end{remark}
\begin{example} Let $\Sigma = \{a, b\}$, $C(a), C(b) \in \mathbb R$ be the {\em continuous costs} of $a,b$ and $D(a), D(b) \in \mathbb R$ the {\em discrete costs}. Given a timed word ${w = (\gamma_1, t_1) ... (\gamma_n, t_n) \in \mathbb T \Sigma^+}$, the {\em average cost} of $w$ is defined as $A(w) = \frac{\sum_{i = 1}^n (C(\gamma_i) \cdot t_i + D(\gamma_i))}{\sum_{i=1}^n t_i}$. Let $\mathbb M_0^{\text{avg}}$ be defined as in Example \ref{Ex:TPVM}. For $U \in \{C, D\}$, let $\varphi_U(x) = (P_a(x) \wedge U(a)) \vee (P_b(x) \wedge U(b))$. Consider the $\text{\sc wRdl}(\Sigma, \mathbb M_0^{\text{avg}})$-sentence $\varphi = \forall x. (\varphi_C(x), \varphi_D(x))$. Then, for all ${w \in \mathbb T \Sigma^+}$, we have: $[\![\varphi]\!](w) = A(w)$. \end{example}
A sentence $\varphi \in \text{\sc wRdl}(\Sigma, \mathbb M)$ is defined as usual as a formula without free variables. Then, for every sentence $\varphi \in \text{\sc wRdl}(\Sigma, \mathbb M)$, every timed word $w \in \mathbb T \Sigma^+$ and every $w$-assignment $\sigma$, the value $[\![\varphi]\!](w, \sigma)$ does not depend on $\sigma$. Hence, we can consider the semantics of $\varphi$ as a quantitative timed language $[\![\varphi]\!]: \mathbb T \Sigma^+ \to M$ over $\mathbb M$.
Similarly to the results of \cite{DG07}, in general weighted relative distance logic and WTA are not expressively equivalent. We can show that the QTL $\mathbb L: \mathbb T \Sigma^+ \to \mathbb R \cup \{\infty\}$ with $\mathbb L(w) = |w|^2$ is not recognizable over the timed valuation monoid $\mathbb M^{\text{sum}}$. But this QTL is defined by the $\text{\sc wRdl}(\Sigma, \mathbb M^{\text{sum}}_0)$-sentence $\forall x. (0, \forall y. (0, 1))$.
Nevertheless, there is a syntactically restricted fragment of weighted relative distance logic which is expressively equivalent to WTA. Let $\Sigma$ be an alphabet and $\mathbb M = (M, +, \val, \diamond, \mathbb 0, \mathbb 1)$ an idempotent timed pv-monoid. A formula $\varphi \in \text{\sc wRdl}(\Sigma, \mathbb M)$ is called {\em almost boolean} if it is built from boolean formulas $\mathbb B. \beta \in \text{\sc Rdl}^{\leftarrow}(\Sigma, \mathbb M)$ and constants $m \in M$ using disjunctions and conjunctions. We say that a formula $\varphi$ is {\em syntactically restricted} if whenever it contains a subformula $\forall x. (\varphi_1, \varphi_2)$, then $\varphi_1, \varphi_2$ are almost boolean; whenever it contains a subformula $\varphi_1 \wedge \varphi_2$, then either $\varphi_1, \varphi_2$ are almost boolean or $\varphi_1 = \mathbb B. \varphi'$ or $\varphi_2 = \mathbb B. \varphi'$ with $\varphi' \in \text{\sc Rdl}^{\leftarrow}(\Sigma)$; every constant $m \in M$ is in the scope of a first-order universal quantifier. Let $\text{\sc Def}^{\text{res}}(\Sigma, \mathbb M)$ denote the collection of all QTL $\mathbb L: \mathbb T \Sigma^+ \to M$ over $\mathbb M$ such that $\mathbb L = [\![\varphi]\!]$ for some syntactically restricted $\text{\sc wRdl}(\Sigma, \mathbb M)$-sentence $\varphi$.
Our main result for weighted relative distance logic is the following theorem.
\begin{theorem} \label{Thm:Rec_Eq_wRdl} Let $\Sigma$ be an alphabet and $\mathbb M$ an idempotent timed pv-monoid. Then, $\text{\sc Def}^{\text{\rm res}}(\Sigma, \mathbb M) = \text{\sc Rec}(\Sigma, \mathbb M)$. \end{theorem}
Now we give a sketch of the proof of this theorem. Let $\mathcal N^{\exists \text{\sc Rdl}^{\leftarrow}}(\Sigma, \mathbb M)$ denote the collection of all QTL ${\mathbb L: \mathbb T \Sigma^+ \to M}$ over $\mathbb M$ such that there exist an alphabet $\Gamma$, mappings $h: \Gamma \to \Sigma$, $g: \Gamma \to M \times M$ and a $\exists \text{\sc Rdl}^{\leftarrow} (\Gamma)$-definable timed language $\mathcal L$ such that $\mathbb L = h((\val \circ g) \cap \mathcal L)$. For the proof of Theorem \ref{Thm:Rec_Eq_wRdl}, we establish a Nivat-like characterization of definable QTL.
\begin{theorem} \label{Thm:LogicNivat} Let $\Sigma$ be an alphabet and $\mathbb M$ an idempotent timed pv-monoid. Then, $\mathcal N^{\exists \text{\sc Rdl}^{\leftarrow}}(\Sigma, \mathbb M) = \text{\sc Def}^{\text{\rm res}}(\Sigma, \mathbb M)$. \end{theorem}
\begin{proof}[Sketch] To show the inclusion $\subseteq$, let $\mathbb L = h((\val \circ g) \cap \mathcal L)$ where $\Gamma$, $h$, $g$ and $\mathcal L$ are as in the definition of $\mathcal N^{\exists \text{\sc Rdl}^{\leftarrow}}(\Sigma, \mathbb M)$. Let $\beta$ be a $\exists \text{\sc Rdl}^{\leftarrow}(\Sigma)$-sentence defining $\mathcal L$. We introduce a family $\mathcal V = (X_{\gamma})_{\gamma \in \Gamma}$ of second-order variables not occurring in $\beta$. We replace each predicate $P_{\gamma}(x)$ with $\gamma \in \Gamma$ occurring in $\beta$ by the formula ${P_{h(\gamma)}(x) \wedge X_{\gamma}(x)}$; so we obtain a formula $\beta' \in \exists \text{\sc Rdl}^{\leftarrow}(\Sigma)$. Assume that $\beta' = \exists \mathcal D(\beta''). \beta''$ with $\beta'' \in \text{\sc Rdl}^{\leftarrow}(\Sigma)$. We construct a formula $\text{Part} \in \text{\sc Rdl}^{\leftarrow}(\Sigma)$ which demands that the variables $\mathcal V$ form a partition of the domain, and a formula $H \in \text{\sc Rdl}^{\leftarrow}(\Sigma)$ which demands that, whenever a position of a word belongs to $X_{\gamma}$, then this position is labelled by $h(\gamma)$. Then, the following syntactically restricted $\text{\sc wRdl}(\Sigma, \mathbb M)$-sentence defines $\mathcal L$: $$ \exists (\mathcal V \cup \mathcal D(\beta'')). \big[\mathbb B. (\beta'' \wedge \text{Part} \wedge H) \wedge \forall x. \textstyle \big(\bigvee_{\gamma \in \Gamma} \mathbb B. X_{\gamma}(x) \wedge g_1(\gamma), \bigvee_{\gamma \in \Gamma} \mathbb B. X_{\gamma}(x) \wedge g_2(\gamma) \big) \big] $$ where, for $i \in \{1,2\}$, $g_i$ is the projection of $g$ to the $i$-th coordinate.
To show the inclusion $\supseteq$, we introduce {\em canonical $\text{\sc wRdl}(\Sigma, \mathbb M)$-sentences} which are of the form $\varphi = \exists \mathcal V. \forall y. (\bigvee_{i = 1}^k \mathbb B. \beta_i \wedge m_i, \bigvee_{i = 1}^k \mathbb B. \beta_i \wedge m'_i)$ where $\mathcal V$ is a set of variables, $m_1, ..., m_k, m_1', ..., m_k' \in M$ and $\beta_1, ..., \beta_k \in \text{\sc Rdl}^{\leftarrow}(\Sigma)$ are such that, for every timed word $w \in \mathbb T \Sigma^+$ and every $w$-assignment $\sigma$, there exists exactly one $i \in \{1, ..., k\}$ such that $(w, \sigma) \models \beta_i$. We can show that every syntactically-restricted sentence can be transformed into a canonical one. It remains to prove that, for a canonical sentence $\varphi$ as above, $[\![\varphi]\!] \in \mathcal N^{\exists \text{\sc Rdl}^{\leftarrow}}(\Sigma, \mathbb M)$. Let $M^1_{\varphi} = \{m_1, ..., m_k\}$ and $M^2_{\varphi} = \{m'_1, ..., m'_k\}$. We put $\Gamma = \Sigma \times M_{\varphi}^1 \times M_{\varphi}^2$. Let $h: \Gamma \to \Sigma$ be the projection to the first coordinate. Let $g: \Gamma \to M \times M$ be the projection to $M_{\varphi}^1 \times M_{\varphi}^2$. Then we can construct a $\exists \text{\sc Rdl}^{\leftarrow}(\Gamma)$-sentence $\beta$ of the form $\exists \mathcal V. \forall y. \beta'$ such that $[\![\varphi]\!] = h((\val \circ g) \cap \mathcal L(\beta))$. \qed \end{proof}
Then, our Theorem \ref{Thm:Rec_Eq_wRdl} follows from Theorem \ref{Thm:LogicNivat}, the Nivat Theorem \ref{Theorem:Nivat} and Wilke's Theorem \ref{Theorem:Wilke}.
\begin{remark} We can also follow the approach of \cite{DG07} to prove our Theorem \ref{Thm:Rec_Eq_wRdl}. Compared to this way, our new proof technique has the following advantages. The proof idea of \cite{DG07} involves technical details like B\"uchi's encodings of assignments and a bulky logical description of accepting runs of timed automata. In our new proof, these details are taken care of by Wilke's proof for unweighted relative distance logic. \end{remark}
Let $\Sigma$ be an alphabet, $\mathbb M^{\text{sum}}$ the timed valuation monoid as in Example \ref{Ex:TVM}(a) and $\mathcal A$ a WTA over $\Sigma$ and $\mathbb M$. As it was shown in \cite{ATP01, BFHL01, LBBF01}, $\inf\{||\mathcal A||(w) \; | \; w \in \mathbb T \Sigma^+\}$ is computable. This result and our Theorem \ref{Thm:Rec_Eq_wRdl} imply decidability results for weighted relative distance logic. \begin{itemize} \item Let $\mathbb M_0^{\text{sum}}$ be the timed pv-monoid as in Example \ref{Ex:TPVM}. It is decidable, given an alphabet $\Sigma$, a syntactically restricted sentence ${\varphi \in \text{\sc wRdl}(\Sigma, \mathbb M^{\text{sum}})}$ with constants from $\mathbb Q$ and a threshold $\theta \in \mathbb Q$, whether there exists $w \in \mathbb T \Sigma^+$ with $[\![\varphi]\!](w) < \theta$. \item Let $\mathbb M_0^{\text{avg}}$ be the timed pv-monoid as in Example \ref{Ex:TPVM}. It is decidable, given an alphabet $\Sigma$, a syntactically restricted sentence ${\varphi \in \text{\sc wRdl}(\Sigma, \mathbb M^{\text{avg}})}$ with constants from $\mathbb Q$ and a threshold $\theta \in \mathbb Q$, whether there exists $w \in \mathbb T \Sigma^+$ with $\langle w \rangle > 0$ and $[\![\varphi]\!](w) < \theta$. \end{itemize}
\section{Conclusion and Future Work}
In this paper, we proved a version of Nivat's theorem for weighted timed automata on finite words which states a connection between the quantitative and qualitative behaviors of timed automata. We also considered several applications of this theorem. Using this theorem, we studied the relations between sequential, unambiguous and non-deterministic WTA. We also introduced a weighted version of Wilke's relative distance logic and established a B\"uchi-like result for this logic, i.e., we showed the equivalence between restricted weighted relative distance logic and WTA. Using our Nivat theorem, we deduced this from Wilke's result.
Because of space constraints, we did not present in this paper the following results. As in \cite{DM12}, for timed pv-monoid with additional properties there are larger fragments of weighted relative-distance logic which are still expressively equivalent to WTA. For the simplicity of presentation, we restricted ourselves to idempotent timed pv-monoids. However, we also obtained a more complicated result for non-idempotent timed pv-monoids. In \cite{Qua10, Qua11}, for weighted relative distance logic over non-idempotent semi\-rings, a strong restriction on the use of a first-order universal quantification was done. Surprisingly, in our result we could avoid this restriction.
Our future work concerns the following directions. The ongoing research should extend the currently obtained results to $\omega$-infinite words. This work should be further extended to the {\em multi-weighted} setting for WTA, e.g., the optimal reward-cost ratio \cite{BBL04, BBL08} or the optimal consumption of several resources where some resources must be restricted \cite{LR05}. A logical characterization of untimed multi-weighted automata was given in \cite{DP13}. It could be also interesting to compare for the weighted and unweighted cases the complexity of translations between logic and automata. We believe that our Nivat theorem will be helpful for this.
\end{comment}
\end{document} |
\begin{document}
\newcommand{\comment}[1]{
\par \noindent \marginpar{\textsc{Note}} \framebox{\begin{minipage}[c]{0.95 \textwidth}
#1 \end{minipage}}
\par}
\title[Symmetries of Spaces with Lower Curvature Bounds]{Symmetries of Spaces with Lower Curvature Bounds}
\author[Searle]{Catherine Searle}
\address[Searle]{Department of Mathematics, Statistics, and Physics, Wichita State University, Wichita, Kansas} \email{catherine.searle@wichita.edu}
\date{\today}
\maketitle
\section{Introduction}
Global Riemannian Geometry generalizes the classical Euclidean, Spherical and Hyperbolic geometries. One of the major challenges in this area is to understand how local invariants such as curvature, that is, how much a space ``bends", relate to global topological invariants such as fundamental group, itself a measure of how ``connected" a manifold is. While locally Riemannian manifolds have the topology of Euclidean space, the geometry typically deviates from that of $\mathbb{R}^n$: curvature is the cause of this deviation.
In this article our main focus is on closed manifolds with bounded lower {\em sectional} curvature. A simple way to understand a lower {\it sectional} curvature bound is via triangle comparisons. We say that a manifold has a lower curvature bound $\kappa$ if the angle sum of any geodesic triangle, that is, a triangle formed by shortest length curves, is larger than or equal to the angle sum of the corresponding triangle in $M^2(\kappa)$, the $2$-dimensional model space with constant curvature $\kappa$. Thus, we say that a manifold has positive, zero or negative curvature, that is, $\kappa >0$, $\kappa=0$, or $\kappa<0$, respectively, if the angle sum of a geodesic triangle, is strictly greater than $>\pi$, equal to $\pi$, or strictly less than $\pi$, respectively. In Figure \ref{triangles} below, we see how a geodesic triangle looks in positive, zero, and negative curvature, that is for $\kappa>0, \kappa=0$, and $\kappa<0$, respectively.
\begin{figure}
\caption{Geodesic Triangles}
\label{triangles}
\end{figure}
\section{Closed Manifolds of Positive and Non-negative Sectional Curvature}
The classification of closed manifolds of positive and non-negative sectional curvature is a long-standing and very difficult problem in Riemannian geometry. Unless otherwise stated, all curvatures considered here are sectional. For positive curvature, to date, other than some special examples in dimensions less than or equal to $24$, all known simply-connected examples are spherical in nature, that is, they are spheres, or quotients of spheres: $\mathrm{S}^n$, $\mathbb{C}\mathrm{P}^k$, or $\mathbb{H}\mathrm{P}^l$. There are many more examples of Riemannian manifolds of non-negative curvature. We list a few examples: \begin{itemize} \item Homogeneous spaces, $G/H$ \item Products of manifolds of non-negative curvature \item Biquotients, $G//H$ \item Bases of Riemannian submersions of non-negatively curved manifolds. \end{itemize}
When approaching classification problems, one looks for {\em obstructions} and {\em constructions}. Among the obstructions to positive and non-negative curvature we have the following five, now classical, results.
\subsection{Obstructions} We begin by listing obstructions for non-negative sectional curvature. Note that some of these come from results about other types of lower curvature bounds such as Ricci, which is an average of the sectional curvatures, and scalar, which is an average of the Ricci curvatures. In particular, if $\sec(M)\geq \kappa$ then both the Ricci and scalar curvatures are bounded below by $\kappa$.
The first result is due to Cheeger and Gromoll \cite{CG1}
and tells us that when studying manifolds of non-negative curvature, it suffices to limit our attention to those that are closed, that is compact without boundary. \begin{ST}[Cheeger and Gromoll \cite{CG1}] Let $M$ be a complete manifold of non-negative sectional curvature. Then there exists a closed, totally geodesic, embedded submanifold, $S$, the {\em soul} of $M$, and $M$ is diffeomorphic to the normal bundle over $S$. \end{ST} \noindent The next theorem limits the fundamental group of a complete, non-negatively curved manifold and is also due to Cheeger and Gromoll \cite{CG2}.
It was originally stated for compact manifolds of non-negative Ricci curvature. \begin{SplittingT}[Cheeger and Gromoll \cite{CG2}] Let $M$ be a closed manifold of non-negative sectional curvature. Then the universal cover of $M$, $\widetilde M$, splits isometrically as $$\widetilde M=N\times \mathbb{R}^k,$$ where $N$ is a closed, simply-connected, non-negatively curved Riemannian manifold, and $k$ is the abelian rank of $\pi_1(M)$. Moreover, $\pi_1(M)$ contains an abelian subgroup of finite index. \end{SplittingT} \noindent The next theorem, due to Gromov \cite{G}, limits the total Betti number of a manifold of non-negative curvature. As one application, it tells us that we can only take a limited number of connected sums of complex projective spaces and maintain non-negative sectional curvature. \begin{GBNT} Let $M^n$ be a compact $n$-dimensional manifold of non-negative sectional curvature. Then there exists a constant $\mathcal{C}(n)$ such that for any field $\mathbb{F}$, $$\Sigma b_i(M^n; \mathbb{F}) \leq \mathcal{C}(n).$$ \end{GBNT} \noindent We say a manifold is flat if all of its sectional curvatures are identically zero. It is known that compact, flat, spin manifolds all have vanishing $\hat{A}$-genus and $\alpha$-invariant. Thus, the topological obstructions for positive scalar curvature due to Hitchin and Lichnerowicz give us an obstruction for manifolds of non-negative curvature. \begin{LHT} A compact, spin, $n$-dimensional manifold, $M^n$, with $\hat{A}(M)\neq 0$ or $\alpha(M)\neq 0$ does not admit a metric of non-negative sectional curvature. \end{LHT} \noindent For example, via this theorem, there are $9$-dimensional exotic spheres that carry no metric of non-negative sectional curvature.
We now pass to obstructions for strictly positive sectional curvature. The first is due to Bonnet and Myers, which was originally stated for manifolds with uniformly bounded positive Ricci curvature, and tells us that when studying manifolds of positive curvature, we may restrict our attention to those that are compact and simply-connected.
\begin{BMT} Let $M$ be a Riemannian manifold with $\sec(M)\geq \kappa >0$, where $\kappa>0$. Then $M$ is compact and $\pi_1(M)$ is finite. \end{BMT} \noindent Finally, a result due to Synge gives us information about the fundamental groups and orientability of closed manifolds of positive curvature.
\begin{Synge}\label{SyT} Let $M$ be a closed manifold of positive sectional curvature. Then the following hold: \begin{enumerate} \item If $M$ is even-dimensional, then $\pi_1(M)$ is either trivial or $\mathbb{Z}_2$; and \item If $M$ is odd-dimensional, then $M$ is orientable. \end{enumerate} \end{Synge} \noindent In particular, Synge's Theorem tells us that $\rrr\mathrm{P}^2\times \rrr\mathrm{P}^2$ does not admit a metric of positive sectional curvature. Likewise, for a closed, orientable, odd-dimensional manifold $M^{2n+1}$, the product manifold, $M^{2n+1}\times \rrr\mathrm{P}^2$ does not admit positive sectional curvature.
It bears mentioning at this point that despite the difference in magnitude of the number of examples of positive and non-negative sectional curvature, when we restrict our attention to the class of closed, simply-connected manifolds, there are no manifolds that admit a metric of non-negative sectional curvature that are known to not admit a metric of positive sectional curvature.
\subsection{Constructions} Turning our attention to constructions, we have the Gray--O'Neill curvature equations and Cheeger deformations. The Gray-O'Neill curvature equations tell us that for a Riemannian submersion, $\pi:E\rightarrow B$, if $\sec(E)\geq\kappa$ for some real number $\kappa$, then $\sec(B)\geq\kappa$. That is, curvatures can only increase. For example, via the Gray--O'Neill equations, it follows that any homogeneous space, $G/H$, admits a submersion metric of non-negative curvature from the bi-invariant metric $g_{\textrm{bi}}$ on the compact Lie group $G$. The same can be seen to be true for bi-quotients, $G//H$, defined as quotients of the free action of $H\subset G\times G$ on $G$ given by $(h_1, h_2)*(g)=h_1gh_2^{-1}$. Observe that while $S^2\times S^2$ covers $\rrr\mathrm{P}^2\times \rrr\mathrm{P}^2$, if $S^2\times S^2$ were to admit positive sectional curvature, see the first Hopf Conjecture below, then the covering map is {\em not} a Riemannian submersion.
Let $G$ be a closed subgroup of the isometry group of $M$, a Riemannian manifold, endowed with any bi-invariant metric $g_G$. Cheeger deformations on a Riemannian manifold, $M$, leverage the power of Riemannian submersions by submersing from a $G$-extension of the manifold, $\pi:M\times G\rightarrow M$, where the base space of the Riemannian submersion is $M=(M\times G)/G$, with the free $G$-action given by $g'(x, g)=(g'x, g'g)$. We obtain a one-parameter family of metrics on the total space, $\{(M\times G, g_M + l^2g_G)\}$, giving us a one-parameter family of metrics on the quotient space $\{((M\times G)/G, g_l)\}$. This family of metrics is called a Cheeger deformation of the original manifold $(M, g_M)$. As $l$ approaches infinity, the deformed metrics $g_l$ converge to the original metric $g_M$, and as $l$ approaches $0$, the sequence of manifolds $\{((M\times G)/G, g_l)\}$ converge to the quotient space $M/G$. Note that positive curvature is preserved under such deformations, while non-negative curvature may be improved to positive curvature. \begin{example} Consider the linear circle action on $\mathbb{C}$ given by complex multiplication, where $\mathbb{C}$ has the flat metric. As $l$ decreases, the manifold admits strictly positive sectional curvature. Notably, in the limit, the manifold collapses to a half line. See Figure \ref{fig:Cheeger} below for three images describing this deformation\footnote{Courtesy of Lawrence Mouill\'e, see \url{https://lawrencemouille.wordpress.com/author/lawrencemouille/} for a .gif of this action}.
\end{example}
\begin{figure}
\caption{A Cheeger deformation of the plane}
\label{fig:image-a}
\label{fig:image-b}
\label{fig:image-c}
\label{fig:Cheeger}
\end{figure}
Cheeger deformations have been useful in deforming $G$-invariant metrics and were originally used by Cheeger \cite{Ch}
to show that $\ccc\mathrm{P}^2\#\ccc\mathrm{P}^2$ admits a metric of non-negative sectional curvature. In particular, while not named explicitly, they feature in the theorem of Lawson and Yau \cite{LY}
showing that the existence of a smooth non-abelian compact Lie group action guarantees positive scalar curvature on a compact manifold. They are also an important tool for many lifting theorems, where one tries to lift a synthetic curvature lower bound on the quotient space $M/G$ to the manifold $M$, such as in work of Searle and Wilhelm \cite{SW}. Additionally, they have recently been used by Cavenaghi, Grama, and Speran\c{c}a \cite{CGSp} who claim to show that the base of a positively curved principal $SU(2)$- or $SO(3)$-bundle must have dimension greater than or equal to $4$. If the result is true, it provides an answer to a special case of the Petersen-Wilhelm conjecture that states that for a fibration of a positively curved manifold, the dimension of the fiber must be strictly less than the dimension of the base.
\subsection{Important Open Questions} Finally, three important open questions for manifolds of positive and non-negative curvature are contained in the following conjectures. \begin{itemize} \item (H. Hopf) {\em $S^2\times S^2$ does not admit a metric of positive sectional curvature.} \item (H. Hopf) {\em Let $M^{2m}$ be an even-dimensional, closed Riemannian manifold of positive, respectively, non-negative sectional curvature. Then $M^{2m}$ has positive, respectively, non-negative Euler characteristic.} \item (Bott){\em Let $M^n$ be a closed, simply-connected Riemannian manifold of non-negative sectional curvature. Then $M$ is rationally elliptic.} \end{itemize} Recall that a closed, simply-connected topological space is called {\em rationally elliptic} if $\pi_*(X)\otimes \,\mathbb{Q}$ and $H_*(X; \mathbb{Q})$ are finite-dimensional $\mathbb{Q}$-vector spaces. If we drop the simply-connected hypothesis, we refer to the space as {\em rationally $\Omega$-elliptic}. Rationally elliptic manifolds have been classified in dimensions less than or equal to $5$ and are diffeomorphic to the known examples of manifolds of non-negative curvature. The classification in dimensions $4$ and $5$ is curvature-free and due to Paternain and Petean \cite{PaPe}, while in dimensions less than or equal to $3$ we only have spheres by the Gauss-Bonnet Theorem in dimension $2$ and the work of Hamilton \cite{Ha}
in dimension $3$. Rationally elliptic manifolds have non-negative Euler characteristic and if the Euler characteristic is positive, then all odd degree Betti numbers vanish. Resolving the Bott Conjecture would then resolve the second Hopf Conjecture for non-negatively curved manifolds.
Note that in dimensions greater than or equal to $4$, the classification of closed, simply-connected, positively and non-negatively curved manifolds is still open. In an attempt to address this issue in dimension $4$, we have the following theorem due to Hsiang and Kleiner \cite{HsKl}. \begin{theorem}[Hsiang and Kleiner] \cite{HsKl} \label{HK} Let $M^4$ be a closed, simply-connected, $4$-manifold of positive sectional curvature admitting an isometric and effective $T^1$-action. Then $M$ is homeomorphic to $S^4$ or $\ccc\mathrm{P}^2$. \end{theorem}
The proof of this theorem reduces to proving that the Euler characteristic of the manifold is bounded between $2$ and $3$ and applying deep classification work of Freedman \cite{Fr}. In particular, the theorem tells us that if $S^2\times S^2$ were to admit a metric of positive curvature, then it must have a finite group of isometries.
The theorem has since been improved to diffeomorphism by work of Grove and Searle \cite{GrS1} and Grove and Wilking \cite{GrWi}. It was extended to non-negative curvature by independent work of Kleiner \cite{Kl} and Searle and Yang \cite{SY}, Galaz-Garc\'ia \cite{G-G}, Galaz-Garc\'ia and Kerin \cite{G-GK}, and Grove and Wilking \cite{GrWi}. There, one sees that three more manifolds occur: $S^2\times S^2$ and $\ccc\mathrm{P}^2\#\pm \ccc\mathrm{P}^2$. More recently, the theorem was extended to almost non-negative curvature by Harvey and Searle \cite{HaS}, who showed that only the same manifolds occur. Grove and Halperin suggested extending the Bott Conjecture to include almost non-negatively curved manifolds. The result in \cite{HaS} confirms this extended Bott Conjecture with the addition of $S^1$-symmetry in dimension $4$.
\subsection{The Symmetry Program}
In the early nineties, Karsten Grove, inspired by Theorem \ref{HK} and observing that the known examples at that time of positive and non-negative curvature were all highly symmetric, proposed his {\em Symmetry Program}:
\begin{SP} Classify closed manifolds of positive and non-negative curvature with ``large" symmetries. \end{SP}
By work of Myers and Steenrod \cite{MySt},
the isometry group of a compact manifold is a compact Lie group, so when we talk about ``symmetries", we mean an isometric action by a compact Lie group. An attractive aspect of this program is the flexibility of the term ``large". For example, for a given $G$-action on a Riemannian manifold $M$, ``large" can mean that the dimension of the quotient space, $M/G$, is small. Another perspective is to consider $G$-actions with large fixed point sets, and yet another is to consider $G$-actions with large rank. The goal of this program is to successively lower the size of the group and in the process find new examples, new obstructions, or new constructions. To date the program has been quite successful in positive and non-negative curvature and has been extended to other types of lower curvature bounds, as well as other spaces that generalize Riemannian manifolds. We will survey some of the results leading to partial classifications, as well as partial resolutions of the three conjectures mentioned earlier.
For further information on the subject of positive and non-negatively curved manifolds with symmetries, there are surveys by Grove \cite{Gr:S}, Ziller \cite{Z1}, Ziller \cite{Z2}, and Wilking \cite{Wi5}.
\section{Preliminaries}
Before we begin, we first establish some notation as well as some useful facts about transformation groups in general. Remark that manifolds are assumed to be closed unless otherwise specified.
\subsection{Transformation Groups}
Let $G$ be a compact Lie group acting on a smooth manifold $M$. We denote by $G_x=\{\, g\in G : gx=x\, \}$ the \emph{isotropy group} at $x\in M$ and by $G(x)=\{\, gx : g\in G\, \}\simeq G/G_x$ the \emph{orbit} of $x$. Orbits are called {\em principal, exceptional,} or {\em singular} depending on the size of their isotropy group, as follows. An orbit is principal if the isotropy group is the smallest possible among all isotropy groups. Orbits are called exceptional when their isotropy group is a non-trivial finite extension of the principal isotropy subgroup, and singular when their isotropy group is of strictly larger dimension than that of the principal orbits. The isotropy subgroups of an orbit are conjugate to one another, that is, given $y\in G(x)$, $y=gx$ for some $g\in G$, and $g^{-1}G_yg=G_x$. It makes sense then to talk of the {\em isotropy type} of an orbit. A $G$-action on a manifold defines a natural stratification, with strata corresponding to the union of orbits of each isotropy type.
For isometric actions of compact Lie groups, the Slice Theorem gives us an explicit description of a sufficiently small tubular neighborhood of any orbit. Namely, given an orbit $G(x)\subset M$, a sufficiently small $\epsilon$-tubular neighborhood, $D_{\epsilon}(G(x))$ is diffeomorphic to $G\times_{G_x} D^{\perp}_x,$ where $D^{\perp}_x$ is the corresponding $\epsilon$-ball at the origin of the normal space $T^{\perp}_x$ to $G(x)$ at the point $x$, called the normal slice to the orbit.
For more details on the theory of transformation groups see Bredon \cite{Br}.
\subsection{Alexandrov Spaces} An Alexandrov space, $(X, \textrm{dist})$, is a finite dimensional length space with curvatures bounded below via triangle comparisons with the corresponding model spaces. All closed Riemannian manifolds are Alexandrov spaces and limits of Gromov-Hausdorff sequences of closed Riemannian manifolds with the same lower curvature bound are, also.
For closed manifolds with sectional curvature bounded below by $\kappa$ and admitting an isometric $G$-action, the quotient space $M/G$ is an Alexandrov space with curvature bounded below by $\kappa$ (see Perelman and Petrunin \cite{PerPet})
with locally totally geodesic orbit strata. There is also a Soul Theorem for Alexandrov spaces of non-negative curvature due to Perelman \cite {Per}. \begin{STA} Let $X^n$ be a complete, $n$-dimensional Alexandrov space with boundary $N$ with $\curv\geq 0$. Then there exists a convex subset $S\subset X$, the {\em soul} of $X$, at maximal distance from $N$ and a deformation retraction of $X$ onto $S$. \end{STA}
For more basic information about Alexandrov spaces, see Burago, Burago, and Ivanov \cite{BBI} and Alexander, Kapovitch, and Petrunin \cite{AKP}.
\subsection{Fixed Point Sets}
We will denote by $M^G=\{\, x\in M : gx=x \,\text{for all} \,g\in G \, \}$ the \emph{fixed point set} of the $G$-action. If $G=T^k$, then we will often simply write $M^T$ for its fixed point set.
The components of the fixed point set of an isometry are closed, totally geodesic submanifolds of $M$ and orientable if $M$ is, by work of Kobayashi \cite{Ko}.
In the special case of a circle action, the components of $M^{S^1}$ are of even codimension. Note as well that for $H$ a proper subgroup of $G$, $M^G\subset M^H$ and $N_G(H)$, the normalizer of $H$ in $G$, acts invariantly on $M^H$ with ineffective kernel $H$. Thus, there is an induced action of $N_G(H)/H$ on $M^H$. If, moreover, $N_G(H)/H$ is connected, then the action is invariant on each connected component of $M^H$. So, for the special case where $G=T^k$ and $M$ is orientable, for every subtorus $T^l\subset T^k$, every $N\subset M^{T^l}$ is also an orientable, closed submanifold admitting an induced $T^{k-l}$-action with the same lower curvature bound and the same parity of dimension as $M$. This sets the stage for induction arguments, something quite unusual in Riemannian geometry.
Finally, we mention some results about the existence of fixed point sets in positive curvature. A result of Weinstein \cite{We} guarantees the existence of a fixed point for any orientation preserving isometry on an even-dimensional, orientable, closed manifold of positive curvature. This result was generalized to general torus actions in even dimensions by Berger \cite{B2}
and in odd dimensions by Sugahara \cite{Su}
as follows. \begin{theorem} Let $M^n$ be a closed, $n$-manifold of positive sectional curvature admitting an isometric, effective $T^k$-action. Then the following hold: \begin{enumerate} \item $($Berger$)$ \cite{B2} If $n=2m$, then $M^{T}\neq \emptyset$; and \item $($Sugahara$)$ \cite{Su} If $n=2m+1$, then either there is a point $p\in M$ such that $T^k(p)\cong S^1$, or $M^{T}\neq \emptyset$. \end{enumerate} \end{theorem} \begin{remark} The results of Berger and Sugahara do not hold for $\mathbb{Z}_p^k$-actions. For example, letting $k=p=2$, there exists a $\mathbb{Z}_2^2$ action on $S^2$ which has no fixed points, generated by the orientation-preserving involutions $$\begin{pmatrix} -1 &&\cr &-1&\cr &&1 \end{pmatrix}\,\,{\textrm{ and }}\,\, \begin{pmatrix} -1 &&\cr &1&\cr && -1 \end{pmatrix}.$$
Likewise, there are free $\mathbb{Z}_2^2$ and $\mathbb{Z}_3^2$ actions on closed, positively curved $7$-manifolds by work of Shankar \cite{Sh}
and Grove and Shankar \cite{GrSha},
respectively and free $\mathbb{Z}_3^2$ actions on closed, positively curved $13$-dimensional manifolds by work of Bazaikin \cite{Ba}. \end{remark}
\begin{remark}\label{bottGGKR} The results of Berger and Sugahara no longer hold when we pass to non-negative curvature: there exist free $T^k$ actions on the $k$-fold product of three-spheres. On the other hand, if the Bott Conjecture holds, then for an effective torus action of sufficiently large rank on a closed, simply-connected manifold of non-negative curvature, the torus action will have non-trivial isotropy.
This is quantified in the work of Galaz-Garc\'ia, Kerin, and Radeschi \cite{G-GKR}, who show that
if $M^n$, a rationally elliptic $n$-dimensional smooth manifold, admits a smooth and effective $T^k$-action, then
$k\leq \lfloor \frac{2n}{3}\rfloor$, and any subtorus acting freely on $M^n$ has rank bounded above by $\lfloor \frac{n}{3}\rfloor$.
\end{remark}
\subsection{What is ``Large"?}
Here we give three examples of what ``large" symmetries can mean. They are: \begin{itemize} \item {\em Small quotient space}, that is, $\textrm{dim}(M/G)$ is small; \item {\em Large fixed point set}, that is, $\textrm{dim}(M^G)$ is large with respect to the dimension of the manifold $M$; and \item {\em Large rank}, that is, we consider group actions $G$ for which $\textrm{rk}(G)$ is large with respect to the maximal possible rank of a group action on a manifold. \end{itemize} In what follows, we will discuss these three different meanings of large and survey results for these definitions in both positive and non-negative curvature. We note that for the last definition, if one passes to discrete groups, for example, $\mathbb{Z}_p^k$, we can define large discrete $p$-symmetry rank, for $p$ a prime, to be large with respect to the maximal possible
number $k$ such that the isometry group of $M$ contains an elementary $p$-group of rank $k$.
\section{Small quotient space} We survey results for $G$-actions on manifolds with small quotient spaces in both positive and non-negative curvature. Here, the general strategy is to leverage knowledge of the quotient space to identify the manifold.
\subsection{Positive Sectional Curvature}
This constraint has been utilized to obtain classifications of closed manifolds of positive sectional curvature of low cohomogeneity, where the cohomogeneity of a $G$-action on a manifold, $M$, is equal to the dimension of the quotient space, or equivalently, the codimension of the principal orbits, $G/H$ of the $G$-action. In particular, homogeneous spaces, those of cohomogeneity $0$, have been classified by Berger \cite{B1}, B\'erard-B\'ergery \cite{B-B}, Aloff and Wallach \cite{AW}, Wallach \cite{W}, Wilking \cite{Wi1}, and Wilking and Ziller \cite{WiZ}.
For cohomogeneity one, closed manifolds of positive sectional curvature have been classified in dimension $5$ by Searle \cite{Se}, in even dimensions by Verdiani \cite{V1, V2}, and in all odd dimensions but $7$ by Grove, Wilking, and Ziller \cite{GrWiZ}. Additionally, a list of possible candidates for dimension $7$ is given in Grove, Wilking, and Ziller. They are grouped into one isolated $7$-manifold, $R$, and two infinite families, $P_k$ and $Q_k$, with $P_1=S^7$ \cite{GrWiZ}. These $7$-dimensional candidates correspond to the total space of the Konishi bundle of the self dual Hitchin orbifold $\mathcal{O}_k$, see Hitchin \cite{H}. Of these candidates, a metric of positive curvature was found to exist on $P_2$, an exotic $T^1S^4$, homeomorphic, but not diffeomorphic, to the unit tangent bundle of the $4$-sphere, independently by Dearricott \cite{D}, and Grove, Verdiani, and Ziller \cite{GrVZ}. Verdiani and Ziller \cite{VZ} have also shown that $R$ does not admit a $G$-invariant cohomogeneity one metric of positive sectional curvature. More recently, Dearricott \cite{D2} claims to have shown that the remaining $P_k$ and all of the $Q_k$ do admit metrics of positive curvature, although none admit a $G$-invariant cohomogeneity one metric of positive curvature.
When working with group actions, one may divide them into those that are {\em polar}, namely, those that admit a section, a closed, totally geodesic immersed submanifold that meets all orbits orthogonally, and those that are {\em non-polar}. Note that all transitive actions and all actions of cohomogeneity one are polar, but there exist cohomogeneity two actions on spheres that are non-polar.
\begin{example} Viewing $\mathbb{R}^3$ as the set of self-adjoint $2 \times 2$ complex matrices, $$\mathbb{R}^3\cong V=\left\{X =\begin{pmatrix} x_1 & x_2 + ix_3 \\ x_2-ix_3 & -x_1 \end{pmatrix}; x_1, x_2, x_3 \in \mathbb{R}\right\},$$ we obtain an action of $U(2)$ action on $S^6\subset \mathbb{R}^3\oplus \mathbb{C}^2$, as follows. Let $A\in U(2)$ act by conjugation on $X\in V$ and by matrix multiplication on $(z_1, z_2)\in \mathbb{C}^2$. The quotient space $S^6/U(2)$ is homeomorphic to a $2$-disk, $D^2$, with a circle's worth of singular orbits with $T^1$-isotropy corresponding to $\partial D^2$ and an isolated vertex orbit with isotropy $T^2$ corresponding to a vertex on $\partial D^2$, see Bredon \cite{Br}. For more examples of non-polar cohomogeneity two actions on spheres, see work of Straume \cite{Str2, Str3}. \end{example}
In Fang, Grove, and Thorberggson \cite{FGrT}, they show that a closed, simply-connected, Riemannian $n$-manifold $M^n$ of positive curvature with a polar $G$-action of cohomogeneity $\geq 2$, is equivariantly diffeomorphic to a compact rank one symmetric space (CROSS) with the corresponding linear $G$-action. Thus, in order to classify the remaining cohomogeneities up to $n-1$, it remains to consider those actions that are non-polar. While the quotient spaces of polar actions on manifolds of positive curvature necessarily have boundary by work of Wilking \cite{Wi3}, a result later generalized to singular Riemannian foliations by Corro and Moreno \cite{CoMo}, the quotient spaces of non-polar actions may or may not have
boundary.
\begin{example} Some of the best known examples of positively curved manifolds are obtained as the base of a Riemannian submersion and correspond to the quotient space of a non-polar action, for example, $\ccc\mathrm{P}^n$ and $\mathbb{H}\mathrm{P}^k$ are both quotients of the Hopf action by $S^1$ and by $S^3$, respectively, on a sphere. Figure \ref{Hopf} depicts the Hopf fibration of $S^3$.\footnote{Image obtained from \url{https://en.wikipedia.org/w/index.php?title=Hopf_fibration&oldid=1101601722}} \end{example} \begin{figure}
\caption{Stereographic Projection of The Hopf fibration of $S^3$ by $S^1$ and the Projection of its Fibers onto $S^2$}
\label{Hopf}
\end{figure} A natural next step would be to tackle the following problem: \begin{problem} Classify non-polar manifolds of positive curvature of low cohomogeneity, beginning with cohomogeneity two. \end{problem} \subsection{Non-negative sectional curvature}
As mentioned earlier, we can put a metric of non-negative sectional curvature on any homogeneous space $G/H$. In contrast, closed, simply-connected, cohomogeneity one manifolds of non-negative curvature have not yet been classified. They naturally admit a $G$-invariant disk bundle decomposition over each of the two singular orbits. While both disk bundles admit a $G$-invariant metric of non-negative sectional curvature, in general, their union does not. Grove, Verdiani, Wilking, and Ziller \cite{GrVWiZ} showed that some odd-dimensional exotic spheres, while they admit cohomogeneity one actions, do not admit such metrics. Later, C. He \cite{He} showed that a larger class of manifolds that includes those in the work of Grove, Verdiani, Wilking, and Ziller \cite{GrVWiZ} do not admit a cohomogeneity one metric of non-negative curvature. While closed, simply-connected, manifolds admitting cohomogeneity one actions may not admit invariant metrics of non-negative curvature, they do admit invariant metrics of almost non-negative curvature by work of Schwachh\"ofer and Tuschmann \cite{SchTu}. We say that a manifold, $M^n$, is {\em almost non-negatively curved} if there exists a sequence of metrics, $\{g_n\}$ on $M$ and a fixed $D>0$ so that
$ \diam(M,g_{n }) \leq D$ and
$\sec(M,g_n) \geq -\frac{1}{n^2}$.
Moving on to cohomogeneity two actions, we see already
in dimension $4$ that there are examples that do not admit non-negative curvature, such as $\ccc\mathrm{P}^2\#\ccc\mathrm{P}^2\#\ccc\mathrm{P}^2$ with a $T^2$ isometric and effective action.
For a fibration, it is known that if any two elements of the fibration are rationally $\Omega$-elliptic, then so is the third. Similarly, as observed by Grove and Halperin \cite{GrH}, for a manifold decomposing as a union of disk bundles $$M=D(A)\cup_E D(B),$$ if one of $A$, $B$, or $E$ is rationally $\Omega$-elliptic, (and hence are all, as the common boundary, $E$, is a sphere bundle over both $A$ and $B$) then $M$ is. Compact Lie groups are known to be rationally $\Omega$-elliptic. Moreover, a cohomogeneity one manifold decomposes as a union of disk bundles over its two singular orbits, glued along the principal orbit. Thus one sees that a closed, simply-connected manifold that is homogeneous or of cohomogeneity one is rationally elliptic, regardless of curvature.
Grove and Halperin proposed that the Bott Conjecture will continue to hold for manifolds of almost non-negative curvature.
While classifying non-negatively curved manifolds of cohomogeneities $\geq 1$ seems out of reach at the moment, in light of the Bott Conjecture, asking a different question seems more tractable. \begin{question} Let $M$ be a closed, simply-connected, almost non-negatively curved manifold. If $M$ admits an isometric $G$-action of low cohomogeneity, is $M$ rationally elliptic? \end{question} This question has already been answered affirmatively for cohomogeneity two manifolds of almost non-negative curvature, by Grove, Wilking, and Yeager \cite{GrWiY}. Part of the proof of this result has been considerably simplified by recent work of Khalili Samani and Radeschi \cite{KSR} on {\em singular Riemannian foliations}, an area of study which can be viewed as a generalization of the concept of a group action.
\section{Large Fixed Point Set}
Here we survey results for $G$-actions on manifolds with large fixed point sets. Once again, one of the main strategies employed is to leverage an understanding of the quotient space to obtain general structure theorems that potentially lead to classification theorems. \subsection{Positive Curvature} An important first example of how large fixed point sets in positive curvature may limit the group action under consideration is given by Frankel's theorem, which tells us fixed point set components of sufficiently large dimension are unique in positive curvature. \begin{FT} Let $M^n$ be an $n$-dimensional closed Riemannian manifold admitting a metric of positive sectional curvature. Suppose that $N^{k_1}_1$ and $N^{k_2}_2$ are two totally geodesic, embedded submanifolds of $M$. Then if $k_1+k_2\geq n$, $$N_1\cap N_2\neq \emptyset.$$ \end{FT} We now observe that the dimension of the quotient space, $M/G$, is constrained by the dimension of the fixed point set $M^G$ of $G$ in $M$. In fact, $\textrm{dim} (M/G)\geq \textrm{dim}(M^G) +1$ for any non-trivial, non-transitive action. In light of this, the {\it fixed-point cohomogeneity} of an action, denoted by $\textrm{cohomfix}(M;G)$, is defined by \[ \textrm{cohomfix}(M; G) = \textrm{dim}(M/G) - \textrm{dim}(M^G) -1\geq 0. \] A manifold with fixed-point cohomogeneity $0$ is also called a {\it $G$-fixed point homogeneous manifold}.
In Grove and Searle \cite{GrS2},
they combine the critical point theory for distance functions introduced by Grove and Shiohama \cite{GrSh}
(see also a survey on the subject by Grove \cite{Gr})
and the Soul Theorem for Alexandrov spaces, to prove that for positively curved fixed point homogeneous manifolds there are at most three orbit types. These are given by the principal orbits, the fixed points contained in $F$, the fixed point set component of dimension equal to $\textrm{dim}(M/G)-1$, and the unique orbit at maximal distance from the fixed point set component $F$. They use this to obtain a $G$-equivariant double disk bundle decomposition of the manifold, namely $M$ decomposes as $$M=D(F)\cup D(G(x)),$$ where $G(x)$ is the orbit in $M$ at maximal distance from $F$. They then use this decomposition to classify closed, simply-connected fixed point homogeneous manifolds of positive curvature, proving that for connected $G$, such a manifold is equivariantly diffeomorphic to a CROSS with a linear $G$-action.
This theorem was extended to the case of closed, simply-connected, fixed point cohomogeneity one manifolds of positive curvature by Grove and Kim \cite{GrK}, who showed that such a manifold is also diffeomorphic to a CROSS. The fixed point homogeneous result has also been generalized to the case of involutions: Fang and Grove \cite{FGr} showed that closed, positively curved, $\mathbb{Z}_2$-fixed point homogeneous manifolds are diffeomorphic to spheres and real projective spaces.
\subsection{Non-negative curvature}
For the class of closed, simply-connected non-negatively curved manifolds, the fixed point homogeneous results were generalized to non-negative curvature in low dimensions by Galaz-Garc\'ia \cite{G-G} and by Galaz-Garc\'ia and Spindeler \cite{G-GSp}. In his thesis, Spindeler \cite{Spi} was then able to fully generalize the disk bundle decomposition result in the following theorem. \begin{theorem}[Spindeler]\label{Spindeler} \cite{Spi} Let $G$ act fixed point homogeneously on a closed, non-negatively curved Riemannian manifold $M$. Let $F$ be a fixed point component of maximal dimension. Then there exists a smooth submanifold $N$ of $M$, without boundary, such that $M$ is diffeomorphic to the normal disk bundles $D(F)$ and $D(N)$ of $F$ and $N$ glued together along their common boundaries; \begin{displaymath} M = D(F) \cup_{E} D(N). \end{displaymath} Further, $N$ is $G$-invariant and all points of $M \setminus \{F \cup N\}$ belong to principal $G$-orbits. \end{theorem}
By contrast with the positively curved case, the submanifold $N$ at maximal distance from $F$ is not in general a single orbit.
As noted by Spindeler, a classification of fixed point homogeneous manifolds of non-negative curvature in higher dimensions is currently out of reach since it is equivalent to a classification of non-negatively curved manifolds. To see this, observe that for $N$, a closed, simply-connected non-negatively curved manifold, the product manifold $M^{n}=N^{n-2}\times S^2$ with the product metric admits an isometric $S^1$-fixed point homogeneous action. Since we only have a classification of closed, simply-connected non-negatively curved manifolds through dimension $3$, obtaining a classification in dimension $5$ is the best we can hope for at the moment.
\section{Large Symmetry and Discrete Symmetry Rank}
In this section we discuss two notions of large rank. The first is large {\em symmetry rank}, where the symmetry rank of a $G$-action on $M$ is defined to be the rank of the isometry group of $M$, that is $$\textrm{symrk}(M)=\textrm{rk}(\Isom(M)).$$ The second is large {\em discrete $p$-symmetry rank}, for $p$ a prime, defined to be the largest number $k$ such that the isometry group of $M$ contains an elementary $p$-group of rank $k$. In particular, we will focus on the case where $G$ is abelian. In contrast to the other two types of ``large" group actions, here strategies balance a mix of knowledge of the quotient space with more general connectedness principles, which lead to structure theorems for topological invariants of the manifold.
\subsection{Positive Curvature and Large Symmetry Rank}
Three fundamental results in this direction are the Maximal, Almost Maximal and Half-Maximal Symmetry Rank theorems due to Grove and Searle \cite{GrS1},
Rong \cite{Ro}
and Fang and Rong \cite{FRo},
and Wilking \cite{Wi2},
respectively. We present them together in one theorem.
\begin{theorem}\label{sr} Let $M^n$ be a closed, positively curved manifold admitting an isometric and effective $T^k$-action. Then the following are true:
\begin{enumerate}
\item[1] \label{1} {\bf Maximal Symmetry Rank} (Grove and Searle
\cite{GrS1}) If $k \geq \tfrac{n}{2}$, then $k=\lfloor \tfrac{n+1}{2}\rfloor$ and $M^n$ is diffeomorphic to $S^{2k}$, $\mathbb{R}\mathrm{P}^{2k}$, $\ccc\mathrm{P}^{k}$, or $S^{2k-1}/\mathbb{Z}_l$ for some $l \geq 3$;
\item[2] {\bf Almost Maximal Symmetry Rank}
(Rong \cite{Ro}, and Fang and Rong \cite{FRo})
\label{2} If $n\neq 6, 7$, $\pi_1(M)=0$, and $k= \lfloor\tfrac{n-1}{2}\rfloor$, then $M^n$ is homeomorphic to $S^{n}$, $\ccc\mathrm{P}^{n/2}$, or $\mathbb{H}\mathrm{P}^{2}$ for $k=3$ only; and
\item[3] {\bf Half-Maximal Symmetry Rank}
(Wilking \cite{Wi2})
\label{3} If $n \geq10$, $\pi_1(M)=0$, and $k \geq {\frac{n}{4}} + 1$, then $M^n$ is homeomorphic to $S^n$ or $\mathbb{H}\mathrm{P}^{k-1}$ with $k = \frac{n}{4} + 1$, or homotopy equivalent to $\ccc\mathrm{P}^{n/2}$.
\end{enumerate}
\end{theorem} Observe first that the Maximal Symmetry Rank result can be improved to equivariant diffeomorphism with a linear $T^k$ action by work of Galaz-Garc\'ia \cite{G-G1}. Additionally, by recent work of Kennard, Khalili Samani, and Searle \cite{KKSS}, The Half-Maximal Symmetry Rank result can be improved as follows: dropping the hypothesis of simple-connectivity, one can show that the only additional manifolds that occur are homotopy equivalent to $\rrr\mathrm{P}^n$ and lens spaces of dimension $2k-1$, where $k=\frac{n}{4} + 1$.
Some comments on the proofs are in order, as many results stemming from these have leveraged the same techniques. The proof of the Maximal Symmetry Rank result hinges on the fact that for the maximal symmetry rank, one can always find a circle subgroup of the $T^k$ acting fixed point homogeneously. The Almost Maximal Symmetry Rank result follows from the Half-Maximal Symmetry Rank result and relies on Sullivan's homeomorphism classification of homotopy complex projective spaces and an analysis of the singular set of the group action to improve the classification of The Half-Maximal Symmetry Rank result to homeomorphism, as well as extend the result to dimensions $8$ and $9$. The proof of Half-Maximal Symmetry Rank result utilizes the theory of Error Correcting Codes, which give information about the dimensions of fixed point sets of involutions, and the following Connectedness Lemma of Wilking \cite{Wi2}.
\begin{theorem}[Wilking]\cite{Wi2} Let $M^n$ be a closed Riemannian manifold with positive sectional curvature.
If $N^{n-k}$ is a closed, totally geodesic submanifold of $M$, then the inclusion map $N^{n-k}\hookrightarrow M^n$
is $(n-2k+1)$-connected. \end{theorem}
Recall that a map $f: N\rightarrow M$ is called {\em $m$-connected}, if $\pi_i(f):\pi_i(N)\rightarrow \pi_i(M)$ is an isomorphism for all $1\leq i\leq m-1$ and $\pi_m(f)$ is surjective. Using the Hurewicz isomorphism one can then make a similar statement about homology (and cohomology) groups. In Conclusion 3 of Theorem \ref{sr}, the Connectedness Lemma is leveraged by combining Poincar\'e duality of $N$ with that of $M$, one obtains a certain periodicity of the cohomology ring of $M$, which in turn can be used to obtain homotopy equivalence. These results have lead to significant progress on both Hopf Conjectures.
As mentioned in the Introduction, for the first Hopf Conjecture, the seminal result of Hsiang and Kleiner established that in the presence of circle symmetry the conjecture is true.
For the higher dimensional Hopf conjecture, which states that the product of two positively curved manifolds does not admit positive curvature, Amann and Kennard \cite{AKe} have shown that given $M$, a closed, simply-connected $n$-manifold, then the $2n$-dimensional product manifold $M\times M$ does not admit a metric of positive
sectional curvature and an isometric torus action of rank $r> \log_{\frac{4}{3}}(2n-3)$.
For the second Hopf Conjecture, the main strategy to prove this conjecture with symmetries has been to use the fact, due to Kobayashi \cite{Ko}, that $$\chi(M)=\chi(M^{S^1}),$$ for some $S^1$ subgroup of an isometric $G$-action.
P\"uttmann and Searle \cite{PuS}
and independently Rong \cite{Ro}
showed that for a $T^k$-action on a $2m$-dimensional manifold $M^{2m}$ with $k\geq \lfloor\frac{m-1}{2}\rfloor$ has $\chi(M)>0$. This lower bound for $k$ was quickly improved to $\lfloor \frac{m-2}{4}\rfloor$ by Rong and Su \cite{RoSu},
and to $\lfloor \frac{m}{5}\rfloor$ by Su and Wang \cite{SuWa}.
Using the method of Steenrod squares, Kennard \cite{Ke}
was able to improve this lower bound to $\log_{2}(2m-2)$. More recently Kennard, Wiemeler, and Wilking \cite{KeWieWi} found a general lower bound for $k$, completely independent of dimension. \begin{theorem}[Kennard, Wiemeler, and Wilking \cite{KeWieWi}] If $M^{2m}$ is an even-dimensional, connected, closed, positively curved Riemannian manifold whose isometry group has rank at least five, then $\chi(M) > 0$.
\end{theorem}
It is reasonable to expect that the tools used in the proof of this theorem will be useful in many different contexts.
Finally, Nienhaus \cite{Ni} has announced an improvement on this theorem, claiming to be able to lower the bound to $4$.
\subsection{Non-negative Curvature and Large Symmetry Rank}
In strong contrast to the positive curvature case and in part due to the lack of guaranteed fixed point sets of torus actions, the maximal symmetry rank of a closed, simply-connected, non-negatively curved manifold has not yet been established in all dimensions. \begin{msrconj} Let $T^k$ act isometrically and effectively on $M^n$, a closed, simply connected, non-negatively curved Riemannian manifold. Then $k\leq \lfloor 2n/3\rfloor$ and when $k= \lfloor 2n/3\rfloor$, $M^n$ is equivariantly diffeomorphic to $\mathcal{Z}/T^m$ with a linear $T^k$-action, where $$\mathcal{Z}= \prod_{i\leq r} S^{2n_i-1} \times\prod_{i>r} S^{2n_i},$$
with $n_i\geq 2, \,\, r= 2\lfloor 2n/3\rfloor-n, \,\,0 \leq m \leq 2n \mod 3,$ and the $T^m$-action on $\mathcal{Z}$ is free and linear. \end{msrconj} We can then ask the following question.
\begin{problem}\label{msr} Let $\mathcal{M}_0^n$ denote the class of closed, simply-connected, non-negatively curved $n$-manifolds. For all $M\in \mathcal{M}_0^n$ establish the upper bound for the maximal symmetry rank and classify all $M\in \mathcal{M}_0^n$ of maximal and almost maximal symmetry rank. \end{problem}
Returning to the Maximal Symmetry Rank Conjecture, work of Galaz-Garc\'ia and Searle \cite{G-GS1}, Galaz-Garc\'ia and Kerin \cite{G-GK}, and of Escher and Searle \cite{ES1} shows that the conjecture holds in dimensions $n\leq 9$. The upper bound for dimensions less than or equal to $12$ was established by Galaz-Garc\'ia and Searle \cite{G-GS1} and by Escher and Searle \cite{ES1}.
We observe that $n-k$ is the maximal possible rank of an isotropy group of a torus action: the dimension of the unit normal sphere to $p\in T(p)$ is a function of the rank of $T_p$ and $\textrm{rk}(T_p)\leq \lfloor (\textrm{dim}(S^{\perp}_p)+1)/2\rfloor$ by the Maximal Symmetry Rank Theorem, see Figure \ref{msrimage}. Notably, the $n$-manifolds described in Part 2 of the Maximal Symmetry Rank Conjecture all admit $T^k$-actions such that there is a point $x \in M^n$ for which $\textrm{rk}(T_x)=n-k$, or equivalently, $\textrm{dim}(T(x))=2k-n$. We call such actions {\em isotropy-maximal}.
\begin{figure}
\caption{The Range of Possible Ranks for the Isotropy Subgroup of a Torus Action}
\label{msrimage}
\end{figure}
In particular, for an isotropy-maximal $T^k$-action, $M^{T^{n-k}}\neq \emptyset$. In fact, any component of $M^{T^{n-k}}$ is contained in a {\em generalized characteristic submanifold} of $M$. That is, there is some circle subgroup $S^1\subset T^k$ with a codimension two fixed point set component, $F$, that contains a $T^{n-k}$-fixed point set component. When $M^{T^{n-k}}$ is $0$-dimensional, $F$ is simply called a characteristic submanifold. In particular, one sees that an isotropy-maximal $T^k$-action is an example of a nested $S^1$-fixed point homogeneous action, generating a tower of nested fixed point sets of subtori of the $T^k$-action in $M$.
The case where $\textrm{dim}(M^{T^{n-k}})=0$ corresponds to that of {\em torus manifolds}. Wiemeler in \cite{Wie}
classified closed, simply-connected, non-negatively curved torus manifolds, finding that they are all equivariantly diffeomorphic to a quotient of a free linear torus action of $\mathcal{Z}$, as in the Maximal Symmetry Rank Conjecture.
Escher and Searle in \cite{ES1} generalized this result to all isotropy-maximal torus actions on closed, simply-connected, non-negatively curved $n$-manifolds, showing that they are all equivariantly diffeomorphic to a quotient of a free linear torus action of $\mathcal{Z}$, as in the Maximal Symmetry Rank Conjecture. Indeed, if the Bott Conjecture holds, one can combine the isotropy-maximal classification result of Escher and Searle \cite{ES1} with the work of Galaz-Garc\'ia, Kerin, and Radeschi \cite{G-GKR} mentioned in Remark \ref{bottGGKR}, to show that the Maximal Symmetry Rank Conjecture holds.
Since then, Dong, Escher, and Searle in \cite{DES} have extended the result to almost isotropy-maximal torus actions, where an action is {\em almost isotropy-maximal} if there is a point $x \in M$ such that the dimension of its isotropy group is $n-k-1$, or, equivalently there is a point $x \in M$ whose orbit is of dimension $2k-n+1$. In particular, the manifolds obtained are as in the isotropy-maximal classification.
We observe that $n$-manifolds of almost maximal symmetry rank have also been classified in dimensions less than or equal to $6$ by independent work of Kleiner \cite{Kl} and of Searle and Yang \cite{SY} in dimension $4$, by work of Galaz-Garc\'ia and Searle \cite{G-GS2} in dimension $5$ and by work of Escher and Searle \cite{ES2} in dimension $6$. Notably, it is only in dimension $5$ that we observe any difference with the maximal symmetry rank classification, as the Wu manifold, $SU(3)/SO(3)$, which is not the quotient of a linear torus action on a product of spheres of dimensions greater than or equal to three, appears. The work above suggests that one approach to Problem \ref{msr} would be to begin by classifying torus actions on closed, simply-connected, non-negatively curved manifolds via the rank of the largest possible isotropy group, beginning with those that have rank $n-k-2$.
Remark that by work of B\"ohm and Wilking \cite{BoWi}, a closed, simply-connected, non-negatively curved manifold admits a metric of positive Ricci curvature. Thus, another approach when studying the class $\mathcal{M}_0^n$ is to consider the {\em $k$th-intermediate Ricci curvature}, a curvature interpolating between sectional curvature and Ricci curvature. Of natural interest is the case where this curvature is positive.
We say an $n$-dimensional Riemannian manifold $(M^n,g)$ has {\em positive $k$th-intermediate Ricci curvature} for $k\in\{1, \hdots,n-1\}$ if, for any choice of orthonormal vectors $\{u, e_1, \hdots, e_k\}$, the sum of sectional curvatures $\Sigma_{i=1}^k \sec(u, e_i)$ is positive. Observe that $\Ric_1 > 0$ is equivalent to positive sectional curvature, and $\Ric_{n-1} > 0$ is equivalent to positive Ricci curvature. Furthermore, if $\Ric_k > 0$, then $\Ric_l > 0$ for all $l \geq k$.
One advantage to studying such curvature lower bounds is that many of the results for positive sectional curvature, such as the existence of fixed points and Wilking's Connectedness Lemma extend to this curvature bound. A natural starting point to consider symmetries in the presence of positive intermediate Ricci curvature is to consider those with $\Ric_2>0$. Mouill\'e \cite{Mou} in recent work has been able to extend the Maximal Symmetry Rank Theorem to closed, $n$-manifolds with $\Ric_2>0$, showing that the symmetry rank of such manifolds is the same as for the positive curvature case, and moreover obtains a similar classification result.
Finally, while a product of spheres $\prod_i^m S^{n_i}$ with the product metric has $\Ric_k>0$ for $k\geq \max_{i\in\{1, \hdots, m\}}\{ 1+ \sum_{j\neq i} n_j\}$, there are also metrics of positive $\Ric_{k'}$ with $k'<k$ on such products. In particular, in Example 2.3 of Mouill\'e \cite{Mou}, he shows that one can put $\Ric_2>0$ metrics on $M^6=S^3\times S^3$ with $T^3$ symmetry, as well as $\Ric_2>0$ on quotients by free torus actions $M^6$. These results are consistent with the almost maximal symmetry rank classification results mentioned earlier and suggest a connection between closed, simply-connected manifolds of non-negative curvature with large symmetry rank and $Ric_k>0$ for small $k$. It would be interesting to explore this relationship, if any exists.
\subsection{Positive and Non-negative Curvature and Large Discrete Symmetry Rank}
As mentioned earlier, a $\mathbb{Z}_p^k$-action need not
have fixed points. To guarantee the existence of fixed points, one may, for example, assume that $p$ is larger than $\mathcal{C}_n$, Gromov's estimate for the total Betti number of a closed $n$-manifold.
Work of Yang \cite{Y}
and Hicks \cite{Hi}
showed that for closed, simply-connected, non-negatively curved $4$-manifolds with discrete $p$-symmetry rank $1$ and $2$, with $p$ suitably chosen, one can bound the total Betti number of $M^4$ above by $7$ and $5$, respectively. In higher dimensions, Fang and Rong \cite{FRo}
extend the Maximal Symmetry Rank Theorem as follows.
\begin{theorem}[Fang and Rong] \cite{FaRo1}
Let $M^n$ be a closed, simply-connected, positively curved $n$-manifold admitting an isometric and effective $G$-action, with $G=\mathbb{Z}_p^k$, $p>\mathcal{C}_n$, and $k\geq \lfloor \frac{n}{2} \rfloor$. Then the following hold:
\begin{enumerate}
\item If $n=2m$, then $k=m$ and $M^n$ is homeomorphic to $S^n$ or $\ccc\mathrm{P}^m$.
\item If $n=2m+1$ and the $G$ action is extended to an isometric and effective $S^1\times G$-action, then $k=m$ and $M^n$ is homeomorphic to $S^n$.
\end{enumerate}
\end{theorem}
Fang and Rong \cite{FaRo1} also obtain an analog of the Half Maximal Symmetry Rank Theorem for discrete $p$-symmetry rank bounded below by approximately $3n/4$. Further reductions of the discrete $p$-symmetry rank for $p>\mathcal{C}_n$ have resulted in confirmation of the second Hopf Conjecture for this class of manifolds by Rong and Su \cite{RoSu}, as well as work by Wang \cite{Wa}, extending work of Rong \cite{Ro2} showing that $\pi_1(M^n)$ is cyclic provided $\textrm{symrk}(M^n)>\frac{n}{4}+1$, to $T^1\times \mathbb{Z}_p^k$-actions with $k>\frac{n+1}{4}$.
More recently, Kennard, Khalili Samani, and Searle in \cite{KKSS} consider positively curved Riemannian manifolds admitting an isometric action of $\mathbb{Z}_2^r$ with a fixed point. They extend the Maximal Symmetry Rank results of Grove and Searle \cite{GrS1} and the Half-Maximal Symmetry Rank results of Wilking \cite{Wi2} to obtain the following three results, noting that the first result is an easy consequence of the work of Fang and Grove in \cite{FGr}.
\begin{theorem}[Kennard, Khalili Samani, and Searle \cite{KKSS}]\label{thm:n} Let $M^n$ be a closed, positively curved manifold such that $\mathbb{Z}_2^r$ acts isometrically on $M$ with $x\in M^{\mathbb{Z}_2^r}$. Then the following hold: \begin{enumerate} \item\label{11} If $r \geq n$, then $r=n$ and $M$ is equivariantly diffeomorphic to $S^r$ or $\mathbb{R}{\mathrm P}^r$ with a linear $\mathbb{Z}_2^r$-action. \item\label{21} If $n\geq 24$ and $r \geq \tfrac{n+1}{2}$, then $M$ is homeomorphic to $S^n$, or homotopy equivalent to $\mathbb{R}{\mathrm P}^n$, $\mathbb{C}{\mathrm P}^{r-1}$, or $S^{2r-1}/\mathbb{Z}_k$ for some $k \geq 3$ and $r=\lceil \tfrac{n+1}{2}\rceil$. \item\label{31} If $n \geq 15$ and $r \geq \tfrac{n+3}{4} \,+ \,1$. Then at least one of the following occurs:
\begin{enumerate}
\item For any subgroup of $\mathbb{Z}_2^r$ with corank at most four, the fixed point set component $F^m$ at $x$ is homotopy equivalent to $S^m$, $\mathbb{R}{\mathrm P}^m$, $\mathbb{C}{\mathrm P}^{\frac m 2}$, or $S^m/\mathbb{Z}_k$ for some $k \geq 3$; or
\item $M^n$ is a simply connected integer cohomology $\mathbb{H}{\mathrm P}^{r-2}$ and $r=\tfrac{n}{4} +2$.
\end{enumerate}
\end{enumerate} \end{theorem}
In Figure \ref{bothmsr} below, the Torus and $\mathbb{Z}_2$-Torus Symmetry Rank results are displayed graphically for closed, simply-connected, positively curved manifolds. \begin{figure}
\caption{The Torus and $\mathbb{Z}_2$-Torus Symmetry Rank Theorems for simply-connected $M$}
\label{fig:image-a}
\label{fig:image-b}
\label{bothmsr}
\label{msrt}
\end{figure} The proof of Conclusion \ref{11} relies on understanding the quotient space of $M$ by an involution fixing a codimension $1$ submanifold. On the other hand, the techniques used to prove Conclusion \ref{21} include Wilking's Connectedness Lemma, and refined estimates for Error Correcting Codes, and an inductive argument. Conclusion \ref{31} however, is not proven by induction and relies heavily on the Borel Formula.
The following question, albeit ambitious, is natural, given the path taken with torus actions to achieve the recent result of Kennard, Wiemeler, and Wilking. \begin{question} How much of the work in Kennard, Wiemeler, and Wilking \cite{KeWieWi} can be adapted to the case of $\mathbb{Z}_2^k$-actions with $M^{\mathbb{Z}_2^k}\neq\emptyset$. \end{question}
Since $\chi(M)\equiv \chi(M^{\mathbb{Z}_2^k})\mod 2$, one does not expect to achieve an analog of their Euler characteristic result, however, other important results in Kennard, Wiemeler, and Wilking \cite{KeWieWi} may yield analogs for the $\mathbb{Z}_2$ case.
\end{document} |
\begin{document}
\title{Randomized control of open quantum systems}
\begin{abstract} The problem of open-loop dynamical control of generic open quantum systems is addressed. In particular, I focus on the task of effectively switching off environmental couplings responsible for unwanted decoherence and dissipation effects. After revisiting the standard framework for dynamical decoupling via deterministic controls, I describe a different approach whereby the controller intentionally acquires a random component. An explicit error bound on worst-case performance of stochastic decoupling is presented. \end{abstract} \title{Randomized control of open quantum systems}
\section{Introduction}
The need for accurately controlling the dynamics of a quantum-mechanical system is central to a variety of tasks ranging across contemporary physics, engineering, and information sciences~\cite{blaquerie,brumer,NC}. In particular, motivated by both continuous experimental advances in nanoscale devices and the challenge to practically implement fault-tolerant quantum information processing, control strategies for {\em open} quantum systems undergoing realistic irreversible dynamics~\cite{BF} play an increasingly prominent role.
Dynamical decoupling techniques offer a versatile control toolbox for open quantum-system engineering~\cite{VL,VKL99,Viola02}. In its essence, a decoupling protocol consists in a sequence of {\em open-loop} transformations on the target system (control pulses in the simplest setting), designed in such a way that the effect of unwanted dynamics is coherently averaged out in the resulting controlled evolution. Applied to the removal of unwanted couplings between the target system and its surrounding environment, this paves the way to a general strategy for decoherence control and error-suppressed quantum computation purely based on unitary control means.
Both within formulations of the decoupling problem and more general coherent-control settings, the restriction to purely {\em deterministic} control fields has provided a most natural starting point. In a way, this finds ample justification in the fact that non-deterministic effects (such as stochastic noise and/or random control imperfections) typically deteriorate system performance, motivating the effort for designing intrinsically robust decoupling schemes~\cite{VK} and for assessing open-loop fault-tolerance thresholds~\cite{kaveh}. Yet, no fundamental reasons exist for not lifting such a restriction, by {\em purposefully allowing stochasticity} in the underlying control design. Beside being conceptually intriguing on its own, it is worth recalling that notable examples may be found of situations where noise and randomness might have a beneficial rather than detrimental effect. Of special relevance are phenomena like the self-averaging of intermolecular interactions in gases and liquids via random microscopic motions~\cite{Haeberlen} and quantum stochastic resonance~\cite{SR}, or the idea of dissipation-assisted quantum computation~\cite{beige}.
A first step toward exploring randomized quantum control was recently taken by Viola and Knill~\cite{VK05}, confirming in principle the possibility of enhanced system performance as compared to deterministic control in relevant scenarios. It is the purpose of this paper to further elucidate the random decoupling framework, by first presenting a general control-theoretic formulation and contrast it to the standard deterministic one (Section II), and then discuss in detail a quantitative error bound on stochastic control performance (Section III). Final remarks conclude in Section IV.
\section{Formulation of the control problem} \subsection{Quantum-control systems}
The standard {open-loop} control problem for an isolated, {\em closed} quantum system $S$ defined on a state space ${\cal H}_S$ of dimension $d_S <\infty$ is described (in units where $\hbar=1$) by a bilinear control system of the form~\cite{BS} \begin{eqnarray} {d U(t) \over dt}&\hspace*{-1mm}=\hspace*{-1mm}& -i \Big( H_0 + H_c (t) \Big) U(t)\:, \nonumber \\ H_c(t)&\hspace*{-1mm}=\hspace*{-1mm}&\sum_{\ell=1}^m H_\ell u_\ell (t)\:. \label{bil} \end{eqnarray} Here, $U(t)$ is the evolution operator (or {\em propagator}) of the system, whereas $H_0\equiv H_S$, $H_\ell$ represent the internal (or {\em drift}) Hamiltonian, and the applied control Hamiltonians, respectively. Both $H_0$ and the $H_\ell$ are Hermitian operators on ${\cal H}_S$ which, without loss of generality, may be assumed to be traceless. The time dependence of the overall control Hamiltonian $H_c(t)$ is modeled through the real functions $u_\ell(t)$, which typically represent electromagnetic fields and are the control inputs of the problem. A broad separation between {\em deterministic} and {\em stochastic} control systems may be drawn depending on whether each control input is a deterministic function of time or some randomness is allowed for at least one input. The state of $S$ is described in general by a Hermitian, positive operator $\rho_S$ on ${\cal H}_S$, normalized with respect to the trace norm in such a way that
${\rm tr}_S (\rho_S)=1$. In what follows, I will assume that $S$ is initially in a {\em pure state}, described by a one-dimensional projector $\pi_S$ of the form $\pi_S=|\psi\rangle \langle \psi|$, with
$|\psi\rangle \in {\cal H}_S$.
It is convenient to focus directly on the {\em control propagator} $U_c(t)$ as the basic object for control design, \begin{equation} {U}_c(t)={\cal T} \hspace*{-.5mm} \exp\left \{ -i \hspace{-0.5mm} \int_0^t du {H}_c(u) \right\} \:, \end{equation} where the symbol ${\cal T}$ denotes as usual time ordering. By effecting a canonical transformation to a time-dependent frame that continuously follows the applied control, \begin{equation} \tilde{\rho}_S(t) = U^\dagger_c(t) \rho_S(t) U_c(t)\:, \end{equation} the explicit action of the control field is removed from the dynamics. The control problem of Eq.~(\ref{bil}) takes the form \begin{eqnarray} {d \tilde{U}(t) \over dt}&\hspace*{-1mm}=\hspace*{-1mm}& -i \tilde{H}(t) \tilde{U}(t)\:, \nonumber \\ \tilde{H}(t)&\hspace*{-1mm}=\hspace*{-1mm}& U_c^\dagger (t) H_0 U_c(t)\:, \label{log} \end{eqnarray} in terms of the propagator $\tilde{U}(t)$ for the transformed state, \begin{equation} \tilde{\rho}_S(t)= \tilde{U}(t) \tilde{\rho}_S(0) \tilde{U}^\dagger (t)\:, \hspace{3mm} \tilde{U}(t)={U}_c^\dagger (t) {U}(t)\:. \end{equation} I will refer to the formulations of Eqs.~(\ref{bil}), (\ref{log}) as {\em physical} and {\em logical} frame formulations, respectively. While from the mathematical point of view the logical frame description has the disadvantage of being highly non-linear in the control inputs, Eq.~(\ref{log}) makes it very convenient to directly map properties of the desired effective evolution back into design constraints for $U_c(t)$, and viceversa. If the control strategy is {\em cyclic}, that is $U_c(t+T_c)=U_c(t)$ for $T_c>0$, and $H_0$ is time-independent as assumed so far, the periodicity of the control field is transferred to the logical Hamiltonian $\tilde{H}(t)$, and an exact representation of the controlled evolution in terms of {\em average Hamiltonian theory} exists~\cite{VKL99,Haeberlen}, \begin{equation} \tilde{U}(t)=e^{-i \overline{H}t}\:, \hspace{3mm} \overline{H}=\sum_{\kappa=0}^\infty \overline{H}^{(\kappa)} \:, \end{equation} each term $\overline{H}^{(\kappa)}$ being computed from the Magnus series for $\tilde{H}(t)$. As it turns out, the logical formulation is also particularly useful in situations where the control strategy directly incorporates {\em symmetry} criteria.
For a realistic {\em open} quantum system, the influence of the surrounding environment may modify the dynamics in two important ways. $(i)$ $S$ may couple to a {\em classical} environment, effectively resulting into a (possibly random) time-dependent modification of the system parameters, in particular $H_S \mapsto H_S(t)$. Deterministic time-dependent quantum control systems have been recently investigated in~\cite{Clark05}. $(ii)$ $S$ may couple to a {\em quantum} environment $E$, that is a second quantum subsystem defined on a state space ${\cal H}_E$ of dimension $d_E >>d_S$ and characterized by an internal Hamiltonian $H_E$. Let ${\bf I}_{S,E}$ denote the identity operator on ${\cal H}_{S,E}$, respectively. The drift Hamiltonian $H_0(t)\equiv H_{SE}(t)$ of a general open quantum system may then be expressed as \begin{equation} H_0(t)=H_S(t) \otimes {\bf I}_E + {\bf I}_S \otimes H_E + \sum_a J_a (t) \otimes B_a \:, \label{totaldrift} \end{equation} where the $B_a$'s are linearly independent environment operators and, without loss of generality, we may assume the coupling operators (or {\em error generators}) to be traceless. In typical situations, both the exact time dependence of $H_S(t)$ and $J_a(t)$, as well as the exact form of $H_E, B_a$ are unknown. If $\rho_{SE}(t)$ denotes the {\em joint} state of the composite $S, E$ system, the evolution of $S$ alone is now described by the {\em reduced} state obtained by a partial trace over $E$, \begin{equation} \rho_S(t)= {\rm tr}_E ( \rho_{SE}(t)) \:. \label{ptrace} \end{equation} In general, the evolution of an initially pure state $\pi_S$ of $S$ under the Hamiltonian (\ref{totaldrift}), followed by $(i)$ the ensemble average over the resulting time histories and/or $(ii)$ the partial trace (\ref{ptrace}), results in a {\em mixed} state of $S$, ${\rm tr}(\rho_S^2(t))<1$. This implies genuinely {\em non-unitary}, irreversible dynamics for $S$, which physically accounts for quantum decoherence and dissipation effects~\cite{BF}.
For an open system, a control problem {\em formally} similar to (\ref{bil}) may still be formulated for the combined propagator $U(t)$ of $S$ plus $E$, provided that the action of the controller is explicitly restricted to the {\em system variables only} that is, \begin{equation} H_c(t) \equiv H_c(t)\otimes {\bf I}_E\:, \hspace{3mm} U_c(t) \equiv U_c(t)\otimes {\bf I}_E \:. \end{equation} Two frame transformations may be relevant in the open system context. The transformation to a logical frame, which explicitly removes the applied control Hamiltonian, is effected as before, \begin{equation} \tilde{\rho}_{SE}(t) = U^\dagger_c(t) \rho_{SE}(t) U_c(t)\:, \end{equation} leading to a control problem formally similar to (\ref{log}), with \begin{eqnarray} \tilde{H}_{SE} (t) &\hspace*{-1mm}=\hspace*{-1mm}& \left[ U_c^\dagger (t) H_S(t) U_c(t)\right] \otimes {\bf I}_E + {\bf I}_S \otimes H_E + \nonumber \\
&\hspace*{-1mm}+\hspace*{-1mm}& \sum_a \left[ U_c^\dagger (t) J_a (t) U_c(t) \right] \otimes B_a \:. \label{lham} \end{eqnarray} If a formulation which also removes the evolution due to $H_E$ is needed, a simultaneous canonical transformation to a logical interaction frame is effected on the environment variables, \begin{equation} \tilde{\rho}'_{SE}(t) = U_E^\dagger(t) \tilde{\rho}_{SE}(t) U_E(t) \:,\hspace{3mm} U_E(t)=e^{-i H_E t}\:. \end{equation} The corresponding propagator $\tilde{U}'(t)$ still satisfies an equation similar to (\ref{log}), where now \begin{eqnarray} \tilde{H}'_{SE} (t) &\hspace*{-1mm}=\hspace*{-1mm}& \left[U_c^\dagger (t) H_S(t) U_c(t) \right]\otimes {\bf I}_E + \label{hp} \\
&\hspace*{-1mm}+\hspace*{-1mm}& \sum_a \left[U_c^\dagger (t) J_a (t) U_c(t)\right] \otimes \left[U_E^\dagger (t) B_a U_E (t) \right] \:. \nonumber \end{eqnarray} The various propagators are related to each other as follows: \begin{equation} U(t)=U_c(t) \tilde{U}(t)= U_c(t)U_E(t)\tilde{U}'(t)\:. \label{relation} \end{equation}
\subsection{Control tasks and performance indicators}
A {\em dynamical control} problem may be regarded as a steering problem for the evolution operator of the target system in the appropriate frame. For an open system, a task of critical importance is decoherence control, which effectively requires the suppression of the error generators $J_a(t)$. In particular, a {\em decoupling problem} consists in determining a control configuration $\{H_\ell, u_\ell(t)\}$ such that for a given evolution time $T>0$ the joint propagator factorizes e.g., \begin{equation} \tilde{U}(T)= \tilde{X}_S (T) \otimes U_E(T)\:, \label{dec0} \end{equation} in the logical frame, $\tilde{X}_S(T)$ being a unitary operator on $S$. Notice that Eq. (\ref{dec0}) implies decoupling in the physical frame as well. The simplest decoupling objective, on which I will focus henceforth, corresponds to identity design on $S$ (the so-called {\em no-op} gate in quantum computation terminology~\cite{NC}, or complete decoupling or annihilation in decoupling terminology~\cite{VKL99,Viola02}), whereby \begin{equation} \tilde{U}(T)= {\bf I}_S \otimes U_E(T)\:. \label{dec1} \end{equation}
If both $H_S$ and the $J_a$ are constant in time, and $U_c(t)$ is periodic, then the logical Hamiltonian (\ref{lham}) is also periodic and the above equation, once fulfilled at time $T=T_c$, remains valid for arbitrary times $T_N=N T_c$, $N \in {\bf N}$. Under these conditions, the logical and physical frames overlap for every $N$, and the controlled evolution reads as \begin{equation} \tilde{\rho}_S(T_N)=\rho_S(T_N)=\rho_S(0)=\pi_S=
|\psi\rangle\langle \psi|\:. \end{equation} Thus, arbitrary initial states of $S$ are {\em stroboscopically preserved} in both the logical and the physical frames. If either $H_S$ or $J_a$ are time-varying, and/or the control strategy is {\em acyclic}, it is still meaningful to require that \begin{equation} \tilde{\rho}_S(T)=\rho_S(0)=\pi_S \:, \hspace{3mm} T>0, \;\forall \pi_S\:. \label{state} \end{equation} For stochastic control, the above objective is further relaxed to {\em average state preservation in the logical frame} that is, \begin{equation} {\bf E} \left\{ \tilde{\rho}_S(T)\right\} =\rho_S(0)=\pi_S \:, \hspace{3mm} T>0\:,\;\forall \pi_S\:, \label{avstate} \end{equation} with ${\bf E}\{\:\}$ denoting ensemble expectation. Clearly, control schemes involving random operations are intrinsically acyclic, the control path practically never returning the system to the physical frame. If, however, the past control trajectory is recorded, this may be exploited to bring the state of $S$ back to the physical frame at any time if desired.
In order to quantify the accuracy of a given control procedure at achieving the intended objective, suitable performance indicators are needed. Let $\pi^\perp_S={\bf I}_S-|\psi\rangle\langle \psi|$ denote the orthogonal complement of $\pi_S$ in ${\cal H}_S$. Then the above task (\ref{avstate}) is achieved if and only if, on average, the logical (reduced) state of the system has zero component along $\pi^\perp_S$ (irrespective of the state of the environment). This naturally suggests to consider, for each pure initial state $\pi_S$, the following {\em a priori error probability}, \begin{equation} \epsilon_T(\pi_S)={\bf E}\left\{ {\rm tr}_S \left( \pi^\perp_S \tilde{\rho}_S(T)\right)\right\}\:. \label{psep} \end{equation} Note that $\epsilon_T(\pi_S)\geq 0$ for all $\pi_S$ follows from the fact that both $\pi^\perp_S $ and $\tilde{\rho}_S(T)$ are Hermitian semi-positive definite operators. A {\em worst-case pure state error probability} may then be defined by maximizing over pure states that is, \begin{equation} \epsilon_T= \mbox{Max}_{\pi_S \in {\cal H}_S} \left\{ \epsilon_T(\pi_S)\right\} \:. \label{ep} \end{equation}
\subsection{Control assumptions and group-theoretical design}
Control design is strongly influenced by the class of available controls. A particularly simple scenario is provided by so-called {\em quantum bang-bang controls}~\cite{VL,VKL99}, whereby the control inputs $u_\ell(t)$ are able to be turned on and off impulsively with unbounded strength, so as to implement sequences of effectively instantaneous control pulses. While such idealized assumptions must (and can~\cite{VK}) be significantly weakened for realistic applications, the bang-bang setting provides the most convenient starting point for discussing stochastic schemes.
Pictorially, it is helpful to visualize a control protocol in terms of the path that $U_c(t)$ follows in the space of unitary transformations on $S$. For bang-bang controls, such a path is described as a piecewise constant time dependence, with jumps between consecutive values corresponding to the application of an instantaneous control kick. In particular, a large class of decoupling schemes may be obtained by constraining such values to belong to a discrete subgroup ${\cal G}$ of unitary operators, the so-called {\em decoupling group}~\cite{VKL99}. Let ${\cal G} =\{g_\ell\}$, where $g_\ell$, $\ell=0,
\ldots, {|{\cal G}|-1}$, $g_0={\bf I}_S$, denote group elements~\footnote{I am identifying an abstractly defined decoupling group with its image under a {\em projective} representation in ${\cal H}_S$. Loosely speaking, ${\cal G}$ is a ``group up to phase factors'', in general. This is irrelevant for the present discussion.}. {\em Cyclic decoupling according to ${\cal G}$ over $T_c$} is implemented by sequentially steering
$U_c(t)$ through each of the $|{\cal G}|$ group elements that is, \begin{equation} U_c[(j -1)\Delta t +s]=g_j\:, \hspace{3mm} s\in [0,\Delta t)\:, \label{bb} \end{equation}
with $\Delta t=T_c/|{\cal G}|$ and $j=1,\ldots,|{\cal G}|$. One can prove that, in a {\em fast control limit} where \begin{equation} T_c \rightarrow 0\:,\:M\rightarrow\infty\:, \hspace{3mm} T=MT_c >0\:, \label{fast} \end{equation} the leading contribution to the average Hamiltonian resulting from $\tilde{H}_{SE}(t)$ in Eq.~(\ref{lham}) is given by \begin{eqnarray} \overline{H}^{(0)}_{SE}&\hspace{-1mm}=\hspace{-1mm}& \overline{H}_S\otimes {\bf I}_E + {\bf I}_S\otimes H_E + \sum_a \overline{J}_a \otimes B_a\:, \nonumber \\ \overline{X}&\hspace{-1mm}=\hspace{-1mm}& {1 \over T_c}\int_0^{T_c} dt \,U_c^\dagger (t) X U_c(t)\:. \label{timeav} \end{eqnarray} The advantage of group-based decoupling scheme is that the above time averages are directly mapped, via Eq.~(\ref{bb}), to averages over the control group ${\cal G}$, effectively implying a {\em symmetrization of the controlled dynamics according to ${\cal G}$}~\cite{VKL99,Zanardi99,Wocjan}. If, in particular, the action of ${\cal G}$ is {\em irreducible}, then by Schur's lemma \begin{equation}
\overline{X}={1\over |{\cal G}|} \sum_{g_\ell \in {\cal G}} g_\ell^\dagger X g_\ell ={{\rm tr} (X) \over d_S} {\bf I}_S =0\:, \end{equation} immediately implying complete decoupling as in Eq.~(\ref{dec1}).
While cyclic schemes may be very powerful and conceptually simple, they are only applicable (at least in the simple formulation presented here) to time-independent control systems. Also, because averaging requires traversing {\em all} of ${\cal G}$, they tend to become very inefficient as the size of ${\cal G}$ grows. The basic idea that underlies {\em random decoupling according to ${\cal G}$} is to replace sequential cycling with {\em random sampling over ${{\cal G}}$}. In the simplest kind of protocols, the value of the propagator $U_c(t)$ is determined by a group element which is picked uniformly at random in ${\cal G}$ that is, \begin{equation}
\mbox{Prob}\,(g_\ell) = {1\over |{\cal G}|}\:, \hspace{3mm} \forall g_\ell \in {\cal G}\:. \label{pick} \end{equation} Thus, both the past control operations and the times at which they are effected are known, but the future control path is random. Under these conditions, no average Hamiltonian formulation is viable, and averaging effects emerge through {\em ensemble} rather than {\em time} averages, \begin{equation} \langle\langle X(t) \rangle\rangle ={\bf E}\left\{ U_c^\dagger (t) X(t) U_c(t)\right\} \:. \label{randomav} \end{equation} Under the uniformity assumption, such expectation values again reduce to averages over ${\cal G}$, leading to the possibility of {\em stochastic averaging}, \begin{equation}
\langle\langle X(t) \rangle\rangle ={1\over |{\cal G}|} \sum_{g_\ell \in {\cal G}} g_\ell^\dagger X (t) g_\ell =0\:. \end{equation} The two key questions to address for random decoupling are to understand whether stochastic protocols are indeed capable of achieving decoupling and, if so, how they perform compared to deterministic counterparts. We focus here on the first question, by presenting an explicit derivation of an error bound for randomized control directly within the open-system context~\footnote{In~\cite{VK05}, a detailed proof was obtained for the closed-system setting, and used to sketch the main steps leading to the open-system result.}.
\section{Random decoupling} \subsection{General error bounds}
We begin by recalling a few preliminary facts.
\begin{remark} Let
$|| A ||_2=\mbox{Max}\,|\mbox{eig}\,(\sqrt{A^\dagger A})|$ denote the operator $2$-norm of $A$. Then (see e.g.~\cite{bhatia})
\\ $(i)$ \ \ $|| A ||_2 =
\mbox{Max}\,|\mbox{eig}\,(A)|\:,$ $\:\forall A=A^\dagger$; \\
$(ii)$ \ $||A B ||_2 \leq || A ||_2 || B ||_2\:,$ $\:\forall A, B$; \\
$(iii)$ If $U$ is unitary, $|| U^\dagger A U ||_2 = || A ||_2\:, $ $\forall A$. \label{rk} \end{remark}
\begin{lemma} \label{rank} Let $A$ be any rank-$1$ operator on ${\cal H}_S$. Then
$$|{\rm tr}(A)|\leq || A ||_2 $$ \end{lemma}
\vspace*{2mm}
\proof $A$ may be represented as $A\simeq |v\rangle\langle v|$, for a
$d$-dimensional complex vector $|v\rangle=[v_1,\ldots, v_d]$ with norm
$||v||=\sum_k |v_k|^2$. Then
$$ |{\rm tr}(A)|= |v_1| \leq || v || = \mbox{Max}\,
|\mbox{eig}\,(\sqrt{ |v\rangle\langle v| })| = || A||_2 \:.$$
Q.E.D.
\begin{theorem} \label{main}
Let $S$ be an open quantum system described by a Hamiltonian of the form (\ref{totaldrift}). Suppose that the control protocol satisfies the following assumptions: \\ $(i)$ \ \ ({\em Irreducibility}) ${\cal G}$ acts irreducibly on ${\cal H}_S$. \\ $(ii)$ \ ({\em Uniformity}) $U_c(t)$ is uniformly random for each $t$. \\ $(iii)$ ({\em Independence}) For any $t,s >0$, $U_c(t)$ and $U_c(t+s)$ are independent for $s > \Delta t$.
If, in addition, the total interaction Hamiltonian is uniformly bounded in time, \begin{equation}
\Big|\Big| H_S(t) \otimes {\bf I}_E +
\sum_a J_a(t) \otimes B_a (t) \Big|\Big|_2 < k \:,\hspace{3mm}\forall t\:, \label{b} \end{equation} then \begin{equation} \epsilon_T = O \left({T \Delta t \, k^2}\right) \;\;\;{\mbox{for}}\;\;\; {T \Delta t \,k^2} \ll 1 \:. \label{rbound} \end{equation} \end{theorem}
\vspace*{2mm}
\proof Let $\pi_S$ be an arbitrary pure state of $S$. The first step is to cast the pure-state error probability (\ref{psep}) in a more convenient form to bound. By purifying the initial state of $E$ if necessary, we may assume that $\rho_{SE}(0)= \pi_S \otimes \pi_E$, both $\pi_{S,E}$ being one-dimensional projectors. By using the definition of partial trace and the cyclicity property of the full trace, we have \begin{eqnarray} \epsilon_T(\pi_S)&\hspace{-1mm}=\hspace{-1mm}& {\bf E}\left\{ {\rm tr}_{S} \left( \pi^\perp_S \tilde{\rho}_S(T)\right)\right\} \\ &\hspace{-1mm}=\hspace{-1mm}& {\bf E}\left\{ {\rm tr}_{SE} \left( \pi^\perp_S\otimes {\bf I}_E \tilde{\rho}_{SE} (T) \right)\right\} \nonumber \\ &\hspace{-1mm}=\hspace{-1mm}& {\bf E}\left\{ {\rm tr}_{SE} \left( \pi^\perp_S\otimes {\bf I}_E \tilde{U}(T) \pi_S \otimes \pi_E \tilde{U}^\dagger (T) \right)\right\} \nonumber \\
&\hspace{-1mm}=\hspace{-1mm}& {\bf E}\left\{ {\rm tr}_{SE} \left( \pi^\perp_S\otimes {\bf I}_E \tilde{U}'(T) \pi_S \otimes \pi_E {{\tilde{U}'}} (T)^\dagger \right)\right\} \nonumber \:, \end{eqnarray} where the relation (\ref{relation}) has been used, and $U_E(t)$ drops. Let $H'_{SE}(t)$ denote the interaction Hamiltonian of Eq.~(\ref{b}). Then the task is to bound the error in implementing identity design on the logical interaction propagator at time $T$, \begin{equation} \tilde{U}'(T)={\cal T} \hspace*{-.5mm} \exp\left \{ -i \hspace{-0.5mm} \int_0^T du \tilde{H}'_{SE}(u) \right\} \:, \end{equation} with $\tilde{H}'_{SE}(t)= U_c^\dagger(t) H'_{SE}(t) U_c(t)$ given in Eq.~(\ref{hp}).
The above propagator may be expressed as follows: \begin{equation} \tilde{U}'(T)= \sum_{n=0}^\infty I_n(T)\:, \end{equation} \begin{equation} I_n(T) =(-i)^n \int_{0\leq u_1\ldots\leq u_n\leq T} \hspace*{-3mm} d {\bf u} \,
\tilde{H}'_{SE}({u_n}) \ldots \tilde{H}'_{SE}({u_1})\:, \end{equation} and similarly for $\tilde{U}(T)^\dagger$, with $d{\bf u}= du_1\ldots du_n$. Thus, we need to calculate \begin{eqnarray} \epsilon_T&\hspace{-7mm}(\pi_S)\hspace{-6mm}&= \nonumber \\ \hspace{-6mm}&=& \hspace{-3mm} {\bf E}\left \{ {{\rm tr}}_{SE} \hspace*{-1mm}\left(
\sum_{n,m=0}^\infty \hspace*{-2mm}\pi_S \otimes \pi_E I_m(T)^\dagger \pi_S^\perp\otimes{\bf I}_E I_n (T) \hspace{-1mm}\right) \hspace{-1mm}\right\} \,. \nonumber \end{eqnarray} The contributions with $n=0$ or $m=0$ vanish because of $\pi_S^\perp$ and $\pi_S$ cancel each other upon exploiting the cyclicity of the trace. Because $\epsilon_T(\pi_S)\geq0$, \begin{eqnarray}
|\epsilon_T&\hspace{-7mm}(\pi_S)|\hspace{-6mm}&\leq \nonumber \\ \hspace{-6mm}&\leq& \hspace{-3mm} \sum_{n,m \geq 1} \hspace*{-3mm}\
\Big| {\bf E}\Big \{ {{\rm tr}}_{SE} \Big( \pi_S \otimes \pi_E I_m(T)^\dagger \pi_S^\perp\otimes{\bf I}_E I_n (T)
\Big) \Big \} \Big|\,. \nonumber \end{eqnarray} Under the assumption of sufficiently smooth behavior, the expectation may be moved under the integral. Fix a pair of integers $n,m \geq 1$, then the relevant contribution is \begin{eqnarray} \int_{{W}^{(n,m)}} \hspace{-6mm}d{\bf u}\, d {\bf t}\: {\bf E}\left \{ \pi_S\otimes \pi_E \tilde{H}'_{SE}(t_1) \ldots \tilde{H}'_{SE}(t_m) \pi_S^\perp \otimes {\bf I}_E \nonumber \right. \\ \left. \tilde{H}'_{SE}(u_n)\ldots \tilde{H}'_{SE}(u_1)\right\}\:, \label{nm} \end{eqnarray}
where the integration region $W^{(n,m)}=\{ ({\bf u}, {\bf t})\,|\, 0\leq u_1$ $\ldots\leq u_n \leq T;\: 0\leq \ldots \leq t_m\leq T\}$. Let $W_1^{(n,m)}(\Delta t)\subset W^{(n,m)}$ denote the subset of points satisfying that $u_\ell$, $t_\ell$ are each time-ordered and {\em no} $u_\ell$ or $t_\ell$ is further away than $\Delta t$ from the rest, and let $W_2^{(n,m)}(\Delta t)\subset W^{(n,m)}$ denote the remaining region. Because, within $W_2^{(n,m)}(\Delta t)$, at least one of the integrating variables is more than $\Delta t$ away from all the other variables, the independence assumption $(iii)$ allows the expectation relative to such a variable to be taken separately. By the uniformity assumption $(ii)$ on $U_c(t)$ for all $t$, and by the tracelessness assumption on $H'_{SE}(t)$ for all $t$, such an expectation vanishes. Therefore, $W_1^{(n,m)}(\Delta t)$ is the only subset of points contributing to the expectation in Eq. (\ref{nm}). Let $d{\bf w}^{(n,m)}$ denote the corresponding integration measure. Then \begin{eqnarray} \epsilon_T(\pi_S) \leq \sum_{n,m \geq 1} \int_{W_1^{(n,m)}} d{\bf w}^{(n,m)}\,\hspace{3cm} \nonumber \\
\left| {\bf E}\left \{ {{\rm tr}}_{SE} \hspace*{-1mm}\left( \pi_S \otimes \pi_E \tilde{H}'_{SE}(t_1) \ldots \pi_S^\perp\otimes {\bf I}_E \ldots \tilde{H}'_{SE}(u_1) \right) \right\}
\right| \nonumber \nonumber \\ \hspace*{-3cm}\leq \sum_{n,m \geq 1} \int_{W_1^{(n,m)}} d{\bf w}^{(n,m)}\,\hspace*{3cm} \nonumber \\
{\bf E} \left\{ \left| {{\rm tr}}_{SE} \hspace*{-1mm}\left( \pi_S \otimes \pi_E \tilde{H}'_{SE}(t_1) \ldots \pi_S^\perp\otimes {\bf I}_E \ldots \tilde{H}'_{SE}(u_1)
\right) \right|\right\} \nonumber \,, \end{eqnarray} where in the second step Jensen's inequality has been used. By noticing that the argument of the trace is a rank-1 operator, Lemma~\ref{rank} may be used to simplify \begin{eqnarray} \epsilon_T(\pi_S) \leq \sum_{n,m \geq 1} \int_{W_1^{(n,m)}} d{\bf w}^{(n,m)}\,\hspace{3cm} \nonumber \\
\hspace*{5mm}{\bf E} \left\{ \Big|\Big| \pi_S \otimes \pi_E \tilde{H}'_{SE}(t_1) \ldots \pi_S^\perp\otimes
{\bf I}_E \ldots \tilde{H}'_{SE}(u_1)\Big|\Big|_2 \right\} \nonumber \,, \hspace{5mm}\nonumber \\ \leq \sum_{n,m \geq 1} {\mbox{Vol}}({W_1^{(n,m)}}) k^{n+m}\:, \hspace{2.6cm} \nonumber \end{eqnarray} where the inequality $(ii)$ in the Remark~\ref{rk} and the uniform bound $k$ for $H'_{SE}(t)$ in (\ref{b}) have been used, and {Vol}$({W_1^{(n,m)}})$ is the volume of ${W_1^{(n,m)}}$. Note that the dependence upon $\pi_S$ has disappeared at this point.
The above volume may be estimated through a combinatorial argument. First, notice that given the two ordered lists $0\leq u_1 \leq\ldots\leq u_n\leq T$, $0\leq t_1 \leq\ldots\leq t_m\leq T$, there are ${n+m\choose m}$ different merged orderings. Fix a particular one. Then each element needs to be either within $\Delta t$ of the next one or of the previous one. Make a choice for the odd-numbered elements, the first element being labeled $1$. There are at most $2^{\lceil (n+m)/2\rceil}$ such choices. For each of them the contribution to the volume may be bounded by ordering the even-numbered elements, then by inserting the odd ones, ignoring the ordering constraint now. Finally, \begin{eqnarray} {\mbox{Vol}}({W_1^{(n,m)}}) &\hspace{-2mm}\leq \hspace{-2mm}& {n+m\choose m} \frac{\,T^{\lfloor(n+m)/2\rfloor} (2\Delta t)^{\lceil (n+m)/2 \rceil}}{(\lfloor (n+m)/2\rfloor)!} \nonumber \\ &\hspace{-2mm}\leq\hspace{-2mm} & 2^{\lceil (n+m)/2\rceil} T^{\lfloor (n+m)/2\rfloor} (2\Delta t)^{\lceil (n+m)/2\rceil} \nonumber \\ &\hspace{-2mm}\equiv\hspace{-2mm} &V_{nm}\:, \label{vnm} \end{eqnarray} where the inequalities ${n+m\choose m}\leq 2^{n+m-1}$ (for $n+m\geq 2$), and $\lfloor (n+m)/2\rfloor ! \geq 2^{\lfloor (n+m)/2 \rfloor -1}$ have been exploited.
The last step is to sum over $n,m$: \begin{equation} \mbox{Max}_{\pi_S} \{\epsilon_T(\pi_S) \} \equiv \epsilon_T \leq \sum_{n,m =1}^\infty V_{nm} k^{n+m} \:. \label{fin} \end{equation} This may be done by considering separately the four partial sums where both $n$ and $m$ have the same (even or odd) parity, or they have opposite (even-odd or odd-even) parity, respectively, and by evaluating the $\lfloor\, \rfloor$, $\lceil \, \rceil$ in Eq. (\ref{vnm}) accordingly. Lengthy but straightforward calculations yield \begin{equation} \epsilon_T \leq (4T\Delta t k^2) \frac{1+ 8 \Delta t k + 4T\Delta t k^2 } {(1-4T\Delta t k^2)^2} = O(T \Delta t k^2) \:, \end{equation} for values of $T \Delta t k^2 \ll 1$, as quoted in Theorem~\ref{main}.
Q.E.D.
\begin{remark} By setting all the coupling operators $J_a=0$, the error bound for random decoupling of a closed or classically time-dependent control system is obtained. \end{remark}
According to the above Theorem, the performance of stochastic control can be made arbitrarily high by appropriate design, in particular by choosing a sufficiently small $\Delta t$ in the present setting. Remarkably, this implies the possibility to {\em arbitrarily suppress on average decoherence in the logical frame}. Note that, unlike deterministic decoupling, stochastic schemes place {\em no} restriction on the time dependence of $H_0(t)$, only on the maximum eigenvalue of the interaction part, $H'_{SE}(t)$. The latter, however, may diverge in physical situations involving infinite-dimensional environments. Thus, appropriate care is needed to properly define the relevant strength $k$ in such situations~\cite{KLV,terhal}. Physically, the parameter $k^{-1}$ is of the order of the {\em shortest} correlation time present in the interaction to be removed. While this provides the relevant time scale to the purposes of obtaining an {\em upper} error bound, {\em lower} or {\em typical} error bounds may be better in specific situations, depending on the details of both the system and the environment.
\subsection{Example: Control of a single noisy qubit}
A simple illustrative example is provided by a single two-state system (a qubit) dissipatively coupled to a quantum reservoir. In this case ${\cal H}_S={\bf C}^2$ and a basis for the traceless operators on $S$ is given by the Pauli operators, $\sigma_\alpha$, $\alpha=x,y,z$. Consider for simplicity a time-independent open-system dynamics. Eq.~(\ref{totaldrift}) takes then the form \begin{equation} H_0 =\omega_0 \sigma_z \otimes {\bf I}_E + {\bf I}_S \otimes H_E + \sum_\alpha \sigma_\alpha \otimes B_\alpha\:, \end{equation} where $\sigma_z$ represents the energy eigenbasis of the isolated qubit, and $\omega_\alpha$, $B_a$ are appropriate real parameters and Hermitian environment operators, respectively. Complete decoupling may be achieved in the deterministic setting by cycling the control propagator through a (projectively represented) error group for the qubit\footnote{The abstract decoupling group is ${\cal Z}_2 \times {\cal Z}_2$ in this case.} that is, ${\cal G}_P\simeq \{ {\bf I}_S, \sigma_x,\sigma_y,\sigma_z\}$. Thus, $T_c= 4\Delta t$, and Eq.~(\ref{bb}) yields \[ U_c(t)= \left\{ \begin{array}{ll} {\bf I}_S & t\in \Delta t_1\:, \\
\sigma_x & t\in \Delta t_2\:,\\
\sigma_y & t\in \Delta t_3\:,\\
\sigma_z & t\in \Delta t_4\:.\\ \end{array}\right. \] In practice, this corresponds to a series of four equally spaced bang-bang so-called $\pi$- (or $180^\circ$-) pulses, alternating between the $\hat{x}$ and $\hat{z}$ axes. In terms of the control inputs introduced in (\ref{bil}), a $\pi$-pulse along the $\alpha$ axis may be performed by applying a linearly polarized oscillating field \begin{eqnarray*} H_\alpha u_\alpha(t)= \sigma_\alpha\,V(t) \cos[\omega (t-t_P)]\:,\hspace{9mm}\\ V(t)=V[\theta (t-t_P)-\theta (t-t_P-\tau)]\:, \hspace{3mm} V>0\:, \end{eqnarray*} where $\omega=\omega_0$ on resonance, $t_P$, $\tau$ are the time at which the pulse is applied and its duration, respectively, and $2V\tau=\pi$ with $\tau\rightarrow 0$, $V\rightarrow \infty$ to satisfy the bang-bang requirement.
For random decoupling over the Pauli group ${\cal G}_P$, the control prescription (\ref{pick}) corresponds to applying a sequence of $\pi$-pulses with are randomly drawn from ${\cal G}_P$ that is, each of the Pauli operators is applied with probability 0.25 at times $t_j=j\Delta t$, $j\in {\bf N}$. Physically, the relevant strength parameter $k$ may be associated to the high-frequency cut-off $\omega_c$ that is contained in the reservoir power spectrum and determines its frequency response. In general, however, additional time scales related to both $\omega_0$ and the temperature affect the overall control performance. Thus, according to the worst-case bound of Eq.~(\ref{rbound}), decoherence suppression at time $T$ is achieved provided $\Delta t$ is made sufficiently small with respect to $\omega_c^{-1}$. Remarkably, an exact solution for the stochastically controlled dynamics may be obtained in the special case where $B_x=B_y=0$, corresponding to pure decoherence. A detailed analysis of this limiting situation is reported in~\cite{SV05}.
\section{Conclusion}
I have discussed a control-theoretic formulation which explicitly invokes random control design, and which is applicable to arbitrary finite-dimensional, time-dependent open quantum control systems. I focused on random decoupling design for decoherence suppression as a relevant case study, and showed how arbitrarily low error rates may be achieved in principle. Further study is needed to both explore concrete applications of randomized schemes and assess their full potential, as well as to integrate random design within existing control settings. Beside pointing to a still largely unexplored territory in the theory and practice of quantum control, the ideas presented here might allow to take advantage of novel perspectives, as offered for instance by noisy quantum games~\cite{meyer} or randomized algorithms for classical uncertain systems~\cite{Tempo}. It is my hope that the results presented here will prompt the control theory community to further investigate the interplay between randomness and coherence in quantum dynamical systems.
\section{Acknowledgments}
The original formulation of the random decoupling problem on which I build here is joint work with Manny Knill. I wish to thank both him and Seth Lloyd for the pleasure of a longstanding collaboration, as well as Lea Santos for her invaluable help on investigating stochastically controlled systems and for a critical reading of the manuscript.
\end{document} |
\begin{document}
\begin{flushleft}
{\Large\bf A quantitative formula for the imaginary part of a Weyl coefficient\\[5mm]
}
\textsc{
Jakob Reiffenstein
\hspace*{-14pt}
\renewcommand{\fnsymbol{footnote}}{\fnsymbol{footnote}}
\setcounter{footnote}{2}
\footnote{
Department of Mathematics, University of Vienna \\
Oskar-Morgenstern-Platz 1, 1090 Wien, AUSTRIA \\
email: jakob.reiffenstein@univie.ac.at}
} \\[1ex]
\end{flushleft}
{\small
\textbf{Abstract.} We investigate two-dimensional canonical systems $y'=zJHy$ on an interval, with positive semi-definite Hamiltonian $H$ . Let $q_H$ be the Weyl coefficient of the system. We prove a formula that determines the imaginary part of $q_H$ along the imaginary axis up to multiplicative constants, which are independent of $H$. We also provide versions of this result for Sturm-Liouville operators and Krein strings. \\ Using classical Abelian-Tauberian theorems, we deduce characterizations of spectral properties such as integrability of a given comparison function w.r.t. the spectral measure $\mu_H$, and boundedness of the distribution function of $\mu_H$ relative to a given comparison function. \\
We study in depth Hamiltonians for which $\arg q_H(ir)$ approaches $0$ or $\pi$ (at least on a subsequence). It turns out that this behavior of $q_H(ir)$ imposes a substantial restriction on the growth of $|q_H(ir)|$. Our results in this context are interesting also from a function theoretic point of view.
\\[3mm]
\textbf{AMS MSC 2020:} 30E99, 34B20, 34L05, 34L40
\\
\textbf{Keywords:} Canonical system, Weyl coefficient, growth estimates, high-energy behaviour
}
\pagenumbering{arabic} \setcounter{page}{1} \setcounter{footnote}{0}
\section[{Introduction}]{Introduction}
\noindent We study two-dimensional \textit{canonical systems} \begin{align} \label{A33} y'(t)=zJH(t)y(t), \quad \quad t \in [a,b) \, \text{ a.e.}, \end{align} where $-\infty < a <b \leq \infty$, $z \in \bb C$ is a spectral parameter and $J:=\smmatrix 0{-1}10$. The \textit{Hamiltonian} $H$ is assumed to be a locally integrable, $\bb R^{2 \times 2}$-valued function on $[a,b)$ that further satisfies \begin{itemize} \item[$\rhd$] $H(t) \geq 0$ and $H(t) \neq 0$, \quad \quad $t \in [a,b)$ a.e.; \item[$\rhd$] $H$ is definite, i.e., if $v \in \mathbb{C}^2$ is s.t. $H(t)v \equiv 0$ on $[a,b)$, then $v=0$; \item[$\rhd$] $\int_a^b \tr H(t) \mkern4mu\mathrm{d} t=\infty$ (limit point case at $b$). \end{itemize} Together with a boundary condition at $a$, the equation (\ref{A33}) becomes the eigenvalue equation of a self-adjoint (possibly multi-valued) operator $A_H$ in a Hilbert space $L^2(H)$ associated with $H$. Throughout this paper, we fix the boundary condition $(1,0)y(a)=0$, which is no loss of generality. \\ Many classical second-order differential operators such as Schr\"odinger and Sturm-Liouville operators, Krein strings, and Jacobi operators can be transformed to the form (\ref{A33}), see, e.g., \cite{remling:2018,teschl:2009,behrndt.hassi.snoo:2020,kaltenbaeck.winkler.woracek:2007,kac:1999}. Canonical systems thus form a unifying framework. \\ All of the above operators have in common that their spectral theory is centered around the Weyl coefficient $q$ of the operator (also referred to as Titchmarsh-Weyl $m$-function). This function is constructed by Weyl's nested disk method and is a Herglotz function, i.e., it is holomorphic on $\bb C \setminus \bb R$ and satisfies there $\frac{\IM q(z)}{\IM z} \geq 0$ as well as $q(\overline{z})=\overline{q(z)}$. It can thus be represented as \begin{align} \label{A17} q(z)=\alpha + \beta z + \int_{\bb R} \bigg(\frac{1}{t-z}-\frac{t}{1+t^2} \bigg) \mkern4mu\mathrm{d} \mu(t), \quad \quad z \in \bb C \setminus \bb R \end{align} with $\alpha \in \bb R$, $\beta \geq 0$, and $\mu$ a positive Borel measure on $\bb R$ satisfying $\int_{\bb R} \frac{d\mu(t)}{1+t^2} <\infty$. The measure $\mu$ in the integral representation (\ref{A17}) of the Weyl coefficient is a spectral measure of the underlying operator model if $\beta =0$ (if $\beta > 0$, a one-dimensional component has to be added). The importance of canonical systems in this context lies in the Inverse Spectral Theorem of L. de Branges, stating that each Herglotz function $q$ is the Weyl coefficient of a unique (suitably normalized) canonical system. \\
\noindent Given a Hamiltonian $H$, we are ultimately interested in the description of properties of its spectral measure $\mu_H$ in terms of $H$. The correspondence between $H$ and $\mu_H$ can be best understood using the Weyl coefficient $q_H$, whose imaginary part $\IM q_H$ determines $\mu_H$ via the Stieltjes inversion formula. \\ In their recent paper \cite{langer.pruckner.woracek:heniest}, Langer, Pruckner, and Woracek gave a two-sided estimate for $\IM q_H(ir)$ in terms of the coefficients of $H$: \begin{align} \label{A43} L(r) \lesssim \IM q_H(ir) \lesssim A(r), \quad \quad r>0, \end{align} where $L,A$ are explicit in terms of $H$, and we used the notation $f(r) \lesssim g(r)$ to state that $f(r) \leq Cg(r)$ for a constant $C>0$. Moreover, in (\ref{A43}) the constants implicit in $\lesssim$ are independent of $H$. The exact formulation of this result will be recalled in \Cref{Y98}. \\ It may happen that $L(r)={\rm o} (A(r))$, and $\IM q_H(ir)$ is not determined by (\ref{A43}). A toy example for this is the Hamiltonian \begin{align*} H(t)=t \left(\begin{matrix}
|\log t|^{\color{white} 1} & |\log t|^2 \\
|\log t|^2 & |\log t|^3 \\ \end{matrix} \right), \quad \quad t \in [0,\infty). \end{align*} For $r \to \infty$, a calculation shows that \begin{align*}
L(r) &\asymp (\log r)^{-3}, \quad \quad A(r) \asymp (\log r)^{-1}, \end{align*} where $f(r) \asymp g(r)$ means that both $f(r) \lesssim g(r)$ and $g(r) \lesssim f(r)$. \newline
\noindent The following theorem, which is our main result, improves the estimate (\ref{A43}) by giving a formula for $\IM q_H(ir)$ up to universal multiplicative constants.
\begin{theorem} \label{T1} Let $H$ be a Hamiltonian on $[a,b)$, and denote\footnote{When there is no risk of ambiguity, we write $\Omega$ and $\omega_j$ instead of $\Omega_H$ and $\omega_j^{(H)}$ for short.} \begin{equation}\label{Y08}
H(t) = \begin{pmatrix} h_1(t) & h_3(t) \\ h_3(t) & h_2(t) \end{pmatrix},\quad
\Omega_H(t) = \begin{pmatrix} \omega_1^{(H)}(t) & \omega_3^{(H)}(t) \\ \omega_3^{(H)}(t) & \omega_2^{(H)}(t) \end{pmatrix}
\mathrel{\mathop:}=\int_a^t H(s)\mkern4mu\mathrm{d} s.
\end{equation} Let $\hat t : (0,\infty) \to (a,b)$ be a function satisfying\footnote{We will see later that the equation $\det \Omega_H(t)=\frac{1}{r^2}$ has a unique solution for every $r>0$. A possible choice of $\hat t$ is thus the function that maps $r>0$ to this solution. } \begin{align} \label{A49} \det \Omega_H(\hat t(r)) \asymp \frac{1}{r^2}, \quad \quad r \in (0,\infty). \end{align} Then \begin{align} \label{A2}
\IM q_H(ir) &\asymp \bigg|q_H(ir)-\frac{\omega_3^{(H)}(\hat t(r))}{\omega_2^{(H)}(\hat t(r))} \bigg| \asymp \frac{1}{r\omega_2^{(H)}(\hat t(r))}, \\[1.7ex] \label{A3}
\frac{\IM q_H(ir)}{|q_H(ir)|^2} &\asymp \frac{1}{r\omega_1^{(H)}(\hat t(r))}, \end{align} for $r \in (0,\infty)$. The constants implicit in $\asymp$ in (\ref{A2}) and (\ref{A3}) depend on the constants hidden in $\asymp$ in (\ref{A49}), but not on $H$. \\
If, in addition, $\IM q_H(ir)={\rm o} (|q_H(ir)|)$ for $r \to \infty$ (or $r \to 0$), then\footnote{With $f(r) \sim g(r)$ meaning $\lim \frac{f(r)}{g(r)}=1.$} \begin{align} \label{A11} q_H(ir) \sim \frac{\omega_3^{(H)}(\hat t(r))}{\omega_2^{(H)}(\hat t(r))}, \quad \quad r \to \infty \quad ( r \to 0). \end{align} \end{theorem}
\noindent The two-sided estimate (\ref{A2}) has some useful features: its pointwise nature, its applicability for $r \to \infty$ and $r \to 0$, and the universality of the constants hidden in $\asymp$. However, it is rather different from an asymptotic formula: it does not capture small oscillations of $\IM q_H(ir)$ around $\frac{1}{r\omega_2^{(H)}(\hat t(r))}$. \\ Note also that the first relation in (\ref{A2}) can be seen as a statement about the real part of $q_H(ir)$. In fact, $\IM q_H(ir)$ is also obtained if we subtract $\RE q_H(ir)$ from $q_H(ir)$, then take absolute values. It is an open question whether $\RE q_H(ir)$ can be described more directly in terms of $H$. \newline
\noindent A most important class of operators is that of Sturm-Liouville (in particular, Schr\"odinger) operators. Let us provide a reformulation of \Cref{T1} for these operators right away.
\subsection*{Sturm-Liouville operators}
We provide a version of \Cref{T1} for Sturm-Liouville equations \begin{align} \label{A44} -(py')'+qy=zwy \end{align} on $(a,b)$, where $1/p, q,w \in L^1_{loc}(a,b)$, $w>0$ and $p,q$ are real-valued. Suppose that $a$ is in limit circle case and $b$ is in limit point case. Impose a Dirichlet boundary condition at $a$, i.e., $y(a)=0$. The Weyl coefficient for this problem is the unique number $m(z)$ with \[ c(z,\cdot)+m(z)s(z,\cdot) \in L^2((a,b),w(x)\mkern4mu\mathrm{d} x) \] where $c(z,\cdot)$ and $s(z,\cdot)$ are solutions of (\ref{A44}) with initial values \[ \binom{p(a)c'(z,a)}{c(z,a)}=\binom{0}{1}, \quad \binom{p(a)s'(z,a)}{s(z,a)}=\binom{1}{0}. \] \begin{theorem} \label{T9}
For each $t \in (a,b)$, let $(.,.)_t$ and $\|.\|_t$ denote the scalar product and norm on $L^2((a,t),w(x)\mkern4mu\mathrm{d} x)$, i.e., \[ (f,g)_t=\int_a^t f(x)\overline{g(x)} w(x) \mkern4mu\mathrm{d} x. \] For $\xi \in \mathbb{R}$, let $\hat t_\xi : (0,\infty) \to (a,b)$ be a function satisfying \begin{align} \label{A51}
\|c(\xi,\cdot)\|_{\hat t_\xi(r)}^2 \|s(\xi,\cdot)\|_{\hat t_\xi(r)}^2 - (c(\xi,\cdot),s(\xi,\cdot))_{\hat t_\xi(r)}^2 \asymp \frac{1}{r^2}, \quad \,\, r \in (0,\infty). \end{align} Then
\begin{align} \label{A45}
\IM m(\xi+ir) &\asymp \frac{1}{r \|s(\xi,\cdot)\|_{\hat t_\xi(r)}^2}, \\ \label{A46}
\frac{\IM m(\xi+ir)}{|m(\xi+ir)|^2} &\asymp \frac{1}{r \|c(\xi,\cdot)\|_{\hat t_\xi(r)}^2}, \end{align} for $r \in (0,\infty)$. The constants implicit in $\asymp$ are independent of $p,q,w$ as well as $\xi$, but do depend on the constants pertaining to $\asymp$ in (\ref{A51}). \end{theorem}
\noindent In fact, \Cref{T9} is a direct consequence of \Cref{T1} upon employing a transformation (cf. \cite{remling:2018} for $p=w=1$ and $\xi=0$) that maps solutions of (\ref{A44}) to solutions of the canonical system $y'=(z-\xi)JH_\xi y$, where \[ H_\xi (t) = w(t) \cdot \begin{pmatrix} c(\xi,t)^2 & -s(\xi,t)c(\xi,t) \\ -s(\xi,t)c(\xi,t) & s(\xi,t)^2 \end{pmatrix}, \quad \quad t \in [a,b). \] The Weyl coefficients then satisfy $m(z)=q_{H_\xi}(z-\xi)$.
\subsection*{Historical remarks}
\noindent The origins of the Weyl coefficient in the theory of the Sturm-Liouville differential equation are well summarized in Everitt's paper \cite{everitt:2004}. We give a short account specifically on the history of estimates for the growth of the Weyl coefficient, which date back at least to the 1950s. Particular attention was often given to the deduction of asymptotic formulae for the Weyl coefficient \cite{marchenko:1952,kac:1973a,everitt:1972,kasahara:1975,atkinson:1981,bennewitz:1989}. However, asymptotic results usually depend on rather strong assumptions on the data. When weakening these assumptions, one can still ask for explicit estimates for $q(z)$ as $z \to \infty$ nontangentially in the upper half-plane. There is a number of rather early results that determine $|q(z)|$ up to $\asymp$, e.g., \cite{hille:1963,atkinson:1988,bennewitz:1989}, although these still depend on data subject to additional restrictions. Fundamental progress has been made by Jitomirskaya and Last \cite{jitomirskaya.last:1999}, who considered Schr\"odinger operators with arbitrary (real-valued and locally integrable) potentials. They found a formula up to $\asymp$ for $|q(z)|$, which also covers the case $z \to 0$. An analog of this formula for canonical systems was given in \cite{hassi.remling.snoo:2000}. \\ When it comes to $\IM q(z)$, however, no such formula was available. Only the very recent estimate (\ref{A43}) from \cite[Theorem 1.1]{langer.pruckner.woracek:heniest} made it possible to obtain our main result that determines $\IM q(z)$ up to $\asymp$.
\subsection*{Structure of the paper}
\noindent The proof of \Cref{T1}, together with some immediate corollaries, makes up \Cref{S2}. In \Cref{S5}, we continue with a first application, a criterion for integrability of a given comparison function with respect to $\mu_H$. We also characterize boundedness of the distribution function of $\mu_H$ relative to a given comparison function. \newline
\noindent \Cref{S3} is dedicated to the boundary behavior of Herglotz functions. Cauchy integrals and the relative behavior of its imaginary and real part have been intensively studied. For example, for a Herglotz function $q$ it is known \cite{poltoratski:2003} that the set of $\xi \in \mathbb{R}$ for which \begin{align} \label{A47}
\lim_{r \to 0} \frac{\IM q(\xi+ir)}{|q(\xi+ir)|}=0 \end{align}
is a zero set w.r.t. $\mu$. In contrast to measure theoretic results like this, we use the de Branges correspondence $H \leftrightarrow q_H$ to investigate this behavior pointwise w.r.t. $\xi$. In \Cref{T7} we show that if $\xi$ is such that (\ref{A47}) holds, then $|q(\xi+ir)|$ is slowly varying (cf. \Cref{A48}). \Cref{T8} is a partial converse of this statement. \newline
\noindent In \Cref{S4} we turn to a finer study of $\IM q_H(ir)$ in the context of the geometric origins of (\ref{A43}) and (\ref{A2}). Namely, the functions $L$ and $A$ describe the imaginary parts of bottom and top of certain Weyl disks containing $q_H(ir)$. We show that there are restrictions on the possible location of $q_H(ir)$ within the disks, and construct a Hamiltonian $H$ for which $q_H(ir)$ oscillates back and forth between the bottoms and tops of the disks. This construction allows us to answer several open problems that were posed in \cite{langer.pruckner.woracek:heniest}. \newline
\noindent We conclude our work with a reformulation of \Cref{T1} for the principal Titchmarsh-Weyl coefficient $q_S$ of a Krein string. This reformulation is the content of \Cref{S6}.
\subsection*{Notation associated to Hamiltonians}
\noindent Let $H$ be a Hamiltonian on $[a,b)$. \newline
\noindent An interval $(c,d) \subseteq [a,b)$ is called $H$-\textit{indivisible} if $H(t)$ takes the form $h(t)\binom{\cos \varphi}{\sin \varphi}\binom{\cos \varphi}{\sin \varphi}^*$ a.e. on $(c,d)$, with scalar-valued $h$ and fixed $\varphi \in [0,\pi)$. The angle $\varphi$ is then called the \textit{type} of the interval.
\begin{definition} Let \begin{align}
\mathring a (H) &:=\inf \Big\{t > a \,\Big| \, (a,t) \text{ is not }H\text{-indivisible of type } 0 \text{ or } \frac{\pi}{2} \Big\}, \\
\hat a (H) &:=\inf \Big\{t > a \,\Big|\, (a,t) \text{ is not }H\text{-indivisible} \Big\}. \end{align} Usually, we write $\mathring a$ and $\hat a$ for short. Since $H$ is assumed to be definite, both of these numbers are smaller than $b$. \end{definition} \noindent Note that $(\omega_1 \omega_2)(t)>0$ if and only if $(a,t)$ is not $H$-indivisible of type $0$ or $\frac{\pi}{2}$, i.e., $t>\mathring a$. Using the assumption $\int_a^b \tr H(t) \mkern4mu\mathrm{d} t=\infty$, we infer that $\omega_1 \omega_2$ is an increasing bijection from $(\mathring a,b)$ to $(0,\infty)$. \\ Similarly, $\det \Omega (t)>0$ is equivalent to $t>\hat a$. We have \[ \frac{d}{dt} \Big(\frac{\det \Omega (t)}{\omega_1(t)} \Big)=\omega_1(t)^{-2} \binom{-\omega_3(t)}{\omega_1(t)}^* H(t)\binom{-\omega_3(t)}{\omega_1(t)} \geq 0 \] and (by symmetry) $\frac{d}{dt} \big(\frac{\det \Omega}{\omega_2}\big) \geq 0$. Since at least one of $\omega_1$ and $\omega_2$ is unbounded, $\det \Omega$ is an increasing bijection from $(\hat a,b)$ to $(0,\infty)$.
\begin{definition} \label{A35} For a Hamiltonian $H$ and a number $\eta >0$, set \\ \begin{minipage}{.5\linewidth} \begin{equation*} \mathring r_{\eta,H} : \left\{\begin{array}{ccc} (\mathring a,b) &\to &(0,\infty) \\[0.5ex] t &\mapsto & \frac{\eta}{ 2\sqrt{(\omega_1 \omega_2)(t)}}, \end{array}\right. \end{equation*} \end{minipage} \begin{minipage}{.5\linewidth} \begin{equation*} \hat r_{\eta,H} : \left\{\begin{array}{ccc} (\hat a,b) &\to &(0,\infty) \\[0.5ex] t &\mapsto & \frac{\eta}{ 2\sqrt{\det \Omega (t)}}. \end{array}\right. \end{equation*} \end{minipage}
\noindent Both of these functions are decreasing and bijective. We define their inverse functions, \begin{align} \mathring t_{\eta,H}:=\mathring r_{\eta,H}^{-1} \,:\, (0,\infty) \to (\mathring a,b), \quad \quad \hat t_{\eta,H}:=\hat r_{\eta,H}^{-1} \,:\, (0,\infty) \to (\hat a,b). \end{align} Note that the functions $\hat t_{\eta,H}$, for any $\eta>0$, satisfy (\ref{A49}). Functions of this form will be the default choice of $\hat t$ for the sake of \Cref{T1}. We will often fix $\eta$ and $H$ and write $\mathring r$, $\mathring t$, $\hat r$, $\hat t$ for short. If $\eta$ is fixed but the Hamiltonian is ambiguous, we may write $\mathring r_H$, $\mathring t_H$, $\hat r_H$, $\hat t_H$ to indicate dependence on $H$. \end{definition}
\section{On the imaginary part of the Weyl coefficient} \label{S2}
\noindent We start by providing the details of the estimate (\ref{A43}), which is the central result in \cite{langer.pruckner.woracek:heniest}.
\begin{theorem}[{\cite[Theorem 1.1]{langer.pruckner.woracek:heniest}}] \label{Y98} Let $H$ be a Hamiltonian on $[a,b)$, and let $\eta \in (0,1-\frac{1}{\sqrt 2})$ be fixed. For $r>0$, let $\mathring t(r)$ be the unique number satisfying \begin{align} \label{A50} (\omega_1^{(H)} \omega_2^{(H)})(\mathring t(r))=\frac{\eta^2}{4r^2}, \end{align} cf. \Cref{A35}. Set\footnote{If $\eta$ and $H$ are clear from the context, we may write $A$ and $L$ for short.} \[ A_{\eta,H}(r):=\frac{\eta}{2r\omega_2^{(H)}(\mathring t(r))}, \quad \quad L_{\eta,H}(r):= \frac{\det \Omega_H (\mathring t(r))}{(\omega_1^{(H)} \omega_2^{(H)})(\mathring t(r))} \cdot A_{\eta,H}(r). \] Then the Weyl coefficient $q_H$ associated with the Hamiltonian $H$ satisfies \begin{align}
|q_H(ir)| &\asymp A_{\eta,H}(r), \label{Y35} \\[1.5ex] L_{\eta,H}(r) \lesssim \IM q_H(ir) &\lesssim A_{\eta,H}(r) \label{Y96} \end{align} for $r \in (0,\infty)$. The constants implicit in these relations are independent of $H$. Their dependence on $\eta$ is continuous. \end{theorem} \noindent In the following proof of \Cref{T1}, we will also show that \Cref{Y98} still holds if $\mathring t: (0,\infty) \to (a,b)$ is a function satisfying $(\omega_1 \omega_2)(\mathring t(r)) \asymp \frac{1}{r^2}$, and \[ A(r):=\frac{1}{r\omega_2^{(H)}(\mathring t(r))}, \quad \quad L(r):= \frac{\det \Omega_H (\mathring t(r))}{(\omega_1^{(H)} \omega_2^{(H)})(\mathring t(r))} \cdot A(r). \] In particular, we can choose any $\eta>0$ in (\ref{A50}).
\begin{proof}[Proof of \Cref{T1}]
Let $\hat t_{\eta,H}$ be defined as in \Cref{A35}. We show that for any $\eta>0$, \Cref{T1} holds for $\hat t_{\eta,H}$ in place of $\hat t$, and that the dependence on $\eta$ of the constants hidden in $\asymp$ in (\ref{A2}) and (\ref{A3}) is continuous. This then implies that \Cref{T1} holds for any function $\hat t$ satisfying (\ref{A49}). \newline
\noindent The proof is divided into steps. \\ \item[\textbf{Step 1.}] We introduce a family of transformations of $H$ that leave the imaginary part of the Weyl coefficient unchanged. If $p \in \bb R$ and \[ H_p(t):=\smmatrix 1p01 H(t) \smmatrix 10p1 = \begin{pmatrix} h_1(t)+2p h_3(t)+p^2 h_2(t) & h_3(t)+ph_2(t) \\ h_3(t)+ph_2(t) & h_2(t) \end{pmatrix}, \] an easy calculation shows that the Weyl coefficient $q_p$ of $H_p$ is given by $q_p(z)=q_0(z)+p=q_H(z)+p$. \\
\item[\textbf{Step 2.}] We prove (\ref{A2})-(\ref{A11}) for fixed $\eta \in (0,1-\frac{1}{\sqrt{2}})$. The following abbreviations are used only in Step 2: \\
\begin{tabular}{|r|l||r|l||r|l|@{}m{0pt}@{}} \hline short form & meaning & short form & meaning &short form & meaning & \\[10pt] \hline \hline \rule{0pt}{3ex}$\mathring t$ & \rule{0pt}{3ex} $\mathring t_{\eta,H}$ & \rule{0pt}{3ex} $\mathring t_p$ & \rule{0pt}{3ex} $\mathring t_{\eta,H_p}$ & \rule{0pt}{3ex} $\Omega_p$ & \rule{0pt}{3ex} $\Omega_{H_p}$& \\[3pt] \hline \rule{0pt}{3ex}$\hat t$ & \rule{0pt}{3ex}$\hat t_{\eta,H}$ & \rule{0pt}{3ex}$\hat t_p$ & \rule{0pt}{3ex}$\hat t_{\eta,H_p}$ & \rule{0pt}{3ex}$\omega_j^{(p)}$ & \rule{0pt}{3ex}$\omega_j^{(H_p)}$ & \\[3pt] \hline \rule{0pt}{3ex}$L_p$& \rule{0pt}{3ex}$L_{\eta,H_p}$ & \rule{0pt}{3ex}$A_p$& \rule{0pt}{3ex}$A_{\eta,H_p}$ & \rule{0pt}{3ex}$\Omega$ & \rule{0pt}{3ex}$\Omega_H$ & \\[3pt] \hline \end{tabular} \\[4pt]
\noindent Let $r>0$ be fixed (this is important). Our first observation is that $\hat t_p(r)=\hat t(r)$ for any $p$ since $\det \Omega_p(t)=\det \Omega (t)$ does not depend on $p$. If we can find $p$ such that $\mathring t_p(r)=\hat t_p(r)=\hat t(r)$, then clearly \[ \frac{L_p(r)}{A_p(r)}=\frac{\det \Omega_p(\mathring t_p(r))}{(\omega_1^{(p)}\omega_2^{(p)})(\mathring t_p(r))}=\frac{\det \Omega_p(\hat t_p(r))}{(\omega_1^{(p)}\omega_2^{(p)})(\mathring t_p(r))}=1. \] We apply \Cref{Y98} with $\eta$ and $H_p$. The estimate (\ref{Y96}) then takes the form \begin{equation} \label{P1} A_p(r) = L_p(r) \lesssim \IM q_H(ir) \lesssim A_p(r) \end{equation} while (\ref{Y35}) turns into \begin{equation} \label{P2}
|q_H(ir)+p| \asymp A_p(r), \end{equation} where \[ A_p(r)= \frac{\eta}{2r\omega_2^{(p)}(\mathring t_p(r))} = \frac{\eta}{2r\omega_2(\hat t(r))}. \] The right choice of $p$ is \[ p=- \frac{\omega_3(\hat t(r))}{\omega_2(\hat t(r))}, \] leading to $\omega_3^{(p)}(\hat t(r))=0$ and thus \[ (\omega_1^{(p)}\omega_2^{(p)})(\hat t(r))=\det \Omega_p (\hat t(r))=\det \Omega (\hat t(r))=\frac{\eta^2}{4r^2}. \] Consequently, $\mathring t_p(r)=\hat t(r)$. Observe that the implicit constants in (\ref{Y35}) and (\ref{Y96}) are independent of $H$ and $r$ and depend continuously on $\eta$. This shows that (\ref{A2}) holds, with constants depending continuously on $\eta$. \\ \item[\textbf{Step 3.}] (\ref{A3}) follows from an application of (\ref{A2}) to $\tilde H:=J^{\top}HJ=\smmatrix {h_2}{-h_3}{-h_3}{h_1}$ and note that $\hat t_{\eta,\tilde H}=\hat t_{\eta,H}$. Thus \[
\frac{\IM q_H(ir)}{|q_H(ir)|^2}=\IM \Big(- \frac{1}{q_H(ir)} \Big)= \IM q_{\tilde H}(ir) \asymp \frac{1}{r \omega_2^{(\tilde H)}(\hat t_{\eta,\tilde H}(r))}=\frac{1}{r \omega_1^{(H)}(\hat t_{\eta,H}(r))}. \]
Formula (\ref{A11}) follows if we divide (\ref{A2}) by $|q_H(ir)|$. Hence, we proved the assertion for $\eta \in (0,1-\frac{1}{\sqrt{2}})$. \newline
\noindent In the remaining steps we treat the missing case $\eta \geq 1-\frac{1}{\sqrt{2}}$. \item[\textbf{Step 4.}] Let $k>0$. For use in Step 5, we show that \begin{align} \label{A31}
\IM q_H(ir) \asymp \IM q_H (ikr), \quad \quad |q_H(ir)| \asymp |q_H (ikr )| \end{align} for $r \in (0,\infty)$, where the constants in $\asymp$ depend continuously on $k$ and are independent of $H$. \\ For the imaginary part, the statement is easy to see from the integral representation (\ref{A17}). For the absolute value, we use the Hamiltonian $\tilde H$ from Step 3 to obtain \[
\frac{\IM q_H(ir)}{|q_H(ir)|^2}=\IM q_{\tilde H}(ir) \asymp \IM q_{\tilde H}(ikr)=\frac{\IM q_H(ikr)}{|q_H(ikr)|^2}. \]
This shows that $|q_H(ir)| \asymp |q_H (ikr )|$ as well.
\item[\textbf{Step 5.}] \noindent Fix a Hamiltonian $H$, and let $\eta_0 \geq 1-\frac{1}{\sqrt{2}}$. Then \[ \mathring t_{\eta_0,H}(r)=\mathring t_{\frac 14,\frac{1}{4\eta_0}H}(r), \quad \quad \hat t_{\eta_0,H}(r)=\hat t_{\frac 14,\frac{1}{4\eta_0}H}(r) \] and \[ A_{\eta_0,H}(r)= A_{\frac 14,\frac{1}{4\eta_0}H}(r), \quad \quad L_{\eta_0,H}(r)= L_{\frac 14,\frac{1}{4\eta_0}H}(r). \]
Since $\frac 14$ is less than $1-\frac{1}{\sqrt{2}}$, we can use \Cref{Y98} with $\eta:=\frac 14$ to obtain \begin{align} \label{A30} L_{\eta_0,H}(r) = L_{\frac 14,\frac{1}{4\eta_0}H}(r) \lesssim &\IM q_{\frac{1}{4\eta_0}H} (ir) \\
\leq &|q_{\frac{1}{4\eta_0}H}(ir)| \asymp A_{\frac 14,\frac{1}{4\eta_0}H}(r) = A_{\eta_0,H}(r) \nonumber \end{align} for $r \in (0,\infty)$. Since $q_{\frac{1}{4\eta_0}H}(z)=q_H \big(\frac{z}{4\eta_0} \big)$ and by Step 4, we see that \Cref{Y98} holds for arbitrary $\eta >0$. It is easy to check that continuous dependence of constants on $\eta$ is retained. Repeating Steps $1-3$ now shows that also \Cref{T1} holds for $\hat t_{\eta,H}$ for any $\eta >0$. Moreover, it is not hard to see that everything still works if $\hat t$ is a function satisfying (\ref{A49}). \end{proof}
\begin{remark} \label{A36} \Cref{Y98} and \Cref{T1}, in the form we stated them, give information about $q_H(z)$ for $z=ir$. However, if $\vartheta \in (0,\pi )$ is fixed, these theorems also hold \begin{itemize} \item [$\rhd$] for $z=re^{i\vartheta}$ uniformly for $r \in (0,\infty )$ and
\item [$\rhd$] for $z=re^{i\varphi}$ uniformly for $r \in (0,\infty)$ and $|\frac{\pi}{2}-\varphi | \leq |\frac{\pi}{2}-\vartheta |$. \end{itemize} We restate the explicit constants coming from \cite{langer.pruckner.woracek:heniest}. Fix $\eta \in (0,1-\frac{1}{\sqrt{2}})$ and set $\sigma :=(1-\eta )^{-2}-1 \in (0,1)$. With \begin{align*}
c_-(\eta ,\vartheta)=\frac{\eta \sin \vartheta}{2(1+|\cos \vartheta |)} \cdot \frac{1-\sigma}{1+\sigma}, \quad \quad c_+(\eta ,\vartheta)=\frac{\sigma+\frac{2}{\eta \sin \vartheta}}{1-\sigma}, \end{align*}
we have\footnote{Since $c_-$ and $c_+$ are clearly monotonic in $\vartheta $, (\ref{A37}) and (\ref{A38}) still hold when $q_H(re^{i\vartheta})$ is replaced by $q_H(re^{i\varphi})$, where $|\frac{\pi}{2}-\varphi | \leq |\frac{\pi}{2}-\vartheta |$.} \begin{align} \label{A37} c_-(\eta ,\vartheta) \cdot \frac{\eta}{2} \cdot \frac{1}{r\omega_2(\hat t_{\eta ,H}(r))} &\leq \IM q_H(re^{i\vartheta}) \leq c_+(\eta ,\vartheta) \cdot \frac{\eta}{2} \cdot \frac{1}{r\omega_2(\hat t_{\eta ,H}(r))}, \\[1ex] \label{A38}
c_-(\eta ,\vartheta) \cdot \frac{\eta}{2} \cdot \frac{1}{r\omega_1(\hat t_{\eta ,H}(r))} &\leq \frac{\IM q_H(re^{i\vartheta})}{|q_H(re^{i\vartheta})|^2} \leq c_+(\eta ,\vartheta) \cdot \frac{\eta}{2} \cdot \frac{1}{r\omega_1(\hat t_{\eta ,H}(r))}. \end{align} In order to show (\ref{A37}), we need to slightly adapt the proof of \Cref{T1} by replacing $ir$ with $re^{i\vartheta}$ in (\ref{P1}) and taking into account the constants provided in
\cite[Theorem 1.1]{langer.pruckner.woracek:heniest}. Then (\ref{A38}) follows as in Step 3 of the proof. \\ For $\vartheta =\frac{\pi}{2}$, the optimal choice of $\eta$ is around $0.13833$ which gives \[ c_+(0.13833, \frac{\pi}{2}) \approx 1.568, \quad c_-(0.13833, \frac{\pi}{2}) \approx 0.002, \quad \frac{c_+(0.13833, \frac{\pi}{2})}{c_-(0.13833, \frac{\pi}{2})} \approx 675.772 . \] While it is possible to derive explicit constants also for $\eta \geq 1-\frac{1}{\sqrt{2}}$, doing so does not result in an improvement of the quotient $c_+ / c_-$. \end{remark}
\subsection*{Immediate consequences of \Cref{T1}}
\noindent \textit{In order to simplify calculations, unless specified otherwise, we will always assume that $\mathring t(r)$ and $\hat t(r)$ are defined implicitly by} \begin{align} \label{A12} (\omega_1 \omega_2)(\mathring t(r))=\frac{1}{r^2}, \quad \det \Omega (\hat t(r))=\frac{1}{r^2}, \end{align} and similarly for $\mathring r$ and $\hat r$ (cf. \Cref{A35} with $\eta=2$).
\noindent We revisit the example from the introduction in more generality. The following example was communicated by Matthias Langer. The calculations can be found in the appendix. \\ \begin{example} \label{A24}
Let $\alpha > 0$ and $\beta_1, \beta_2 \in \bb R$ where $\beta_1 \neq \beta_2$. Set $\beta_3 := \frac{\beta_1 + \beta_2}{2}$ and define, for $t \in (0,\infty)$, \begin{align*} H(t)= t^{\alpha -1}\left(\begin{matrix}
|\log t|^{\beta_1} & |\log t|^{\beta_3} \\
|\log t|^{\beta_3} & |\log t|^{\beta_2} \\ \end{matrix} \right). \end{align*} Then for $r \to \infty$, we have \begin{itemize} \item[] $L(r) \asymp (\log r)^{\frac{\beta_1-\beta_2}{2}-2}$ and
\item[] $A(r) \asymp |q_H(ir)| \asymp (\log r)^{\frac{\beta_1-\beta_2}{2}}$, \end{itemize} i.e., $L(r) = {\rm o} ( A(r))$. Using \Cref{T1}, we can now continue the calculations, leading to \[ \IM q_H(ir) \asymp (\log r)^{\frac{\beta_1-\beta_2}{2}-1} \asymp \sqrt{L(r)A(r)}. \] \end{example}
\noindent It is an immediate consequence of \Cref{T1} that $\IM q_H$ depends monotonically on the off-diagonal of $H$. \begin{corollary} \label{T1+} Let $H=\smmatrix {h_1}{h_3}{h_3}{h_2}$ and $\tilde{H}=\smmatrix {h_1}{\tilde h_3}{\tilde h_3}{h_2}$ be two Hamiltonians on $[a,b)$. If $t>\hat a (H)$ such that \[
\Big|\int_a^t h_3(s) \mkern4mu\mathrm{d} s \Big| \geq \Big|\int_a^t \tilde h_3(s) \mkern4mu\mathrm{d} s \Big|, \] then \[ \IM q_H(i \hat r_H(t)) \lesssim \IM q_{\tilde H}(i \hat r_H(t)) \] with a constant independent of $t$, $H$, and $\tilde H$. \end{corollary} \begin{proof}
Our condition states that $|\omega_3(t)| \geq |\tilde \omega_3(t)|$. Taking into account that $t>\hat a(H)$, this means that $0<\det \Omega (t) \leq \det \tilde \Omega (t)$. Hence $\hat r_H(t) \geq \hat r_{\tilde H}(t)$, and further $\hat t_{\tilde H}(\hat r_H(t)) \leq t$. Now, by (\ref{A2}), \[ \IM q_H(i\hat r_H(t)) \asymp \frac{1}{\hat r_H(t)\omega_2(t)} \leq \frac{1}{\hat r_H(t)\omega_2(\hat t_{\tilde H}(\hat r_H(t)))} \asymp \IM q_{\tilde H}(i\hat r_H(t)). \] \end{proof}
\noindent The following result elaborates on the relative behavior of $\IM q_H$ and $|q_H|$. We obtain a quantitative and pointwise relation between $\frac{\IM q_H}{|q_H|}$ and $\frac{\det \Omega}{\omega_1 \omega_2}$, leading to the equivalence \begin{align} \label{A14}
\lim_{r \to \infty} \frac{\IM q_H(ir)}{|q_H(ir)|}=0 \,\, \Longleftrightarrow \,\, \lim_{t \to \hat a} \frac{\det \Omega (t)}{(\omega_1 \omega_2)(t)}=0. \end{align}
The relation between $\frac{\det \Omega}{\omega_1 \omega_2}$ and $\frac{\IM q_H(ir)}{|q_H(ir)|}$ has been investigated also in \cite{langer.pruckner.woracek:gapsatz-arXiv}. Their proof of (\ref{A14})\footnote{In \cite{langer.pruckner.woracek:gapsatz-arXiv}, $\lim_{t \to a}$ was considered instead of $\lim_{t \to \hat a}$.} is based on compactness arguments. \\ Note that our result shows that (\ref{A14}) holds true for $r \to 0$ and $t \to b$ as well.
\begin{proposition} \label{A4} Let $H$ be a Hamiltonian on $[a,b)$. Then\footnote{$\mathring r(\hat t(r))$ is well-defined because of $\hat t(r) \in (\hat a,b) \subseteq (\mathring a,b)$.} \begin{align} \label{A9}
\frac{\IM q_H(ir)}{|q_H(ir)|} \asymp \frac{\mathring r(\hat t(r))}{r} = \sqrt{\frac{\det \Omega (\hat t(r))}{(\omega_1 \omega_2)(\hat t(r))}} \end{align} for $r \in (0,\infty)$. Moreover, \begin{align} \label{A10}
\big|q_H \big( i \mathring r(\hat t(r)) \big) \big| \asymp |q_H(ir)|, \quad \quad r \in (0,\infty). \end{align} All constants implicit in $\asymp$ do not depend on $H$. \end{proposition} \begin{proof} By definition of $\mathring r$ and using (\ref{A2}) and (\ref{A3}), \[
\mathring r(\hat t(r)) = \frac{1}{\sqrt{(\omega_1 \omega_2)(\hat t(r))}} \asymp r \frac{\IM q_H(ir)}{|q_H(ir)|}. \] We also have \[ \sqrt{\frac{\det \Omega (\hat t(r))}{(\omega_1 \omega_2)(\hat t(r))}} = \frac{1}{\sqrt{r^2(\omega_1 \omega_2)(\hat t(r))}} = \frac{\mathring r(\hat t(r))}{r}, \] and (\ref{A9}) follows. \\ For the proof of (\ref{A10}), we need the formula \[
\omega_1(\mathring t(r)) \asymp \frac{|q_H(ir)|}{r} \] which we get from \Cref{Y98} applied to $J^{\top}HJ$. Combine this with (\ref{Y35}) to get \[
|q_H(ir)|^2 \asymp \frac{\omega_1(\mathring t(r))}{\omega_2(\mathring t(r))}. \] On the other hand, (\ref{A2}) and (\ref{A3}) give \[
|q_H(ir)|^2 \asymp \frac{\omega_1(\hat t(r))}{\omega_2(\hat t(r))}=\frac{\omega_1 \Big(\mathring t \big( \mathring r(\hat t(r)\big)\Big)}
{\omega_2 \Big(\mathring t \big( \mathring r(\hat t(r)\big)\Big)} \asymp \big|q_H \big( i \mathring r(\hat t(r)) \big) \big|^2. \] \end{proof}
\noindent The freedom in the choice of $\eta$ leads to the following formula that we will refer to later on.
\begin{corollary} \label{T5} Let $H$ be a Hamiltonian on $[a,b)$. Then, for any $k>0$, \begin{align} \label{A21}
\IM q_H(ikr) \asymp \bigg|q_H(ikr)-\frac{\omega_3(\hat t(r))}{\omega_2(\hat t(r))} \bigg| &\asymp \bigg|q_H(ir)-\frac{\omega_3(\hat t(r))}{\omega_2(\hat t(r))} \bigg| \end{align} with constants depending on $k$, but not on $H$. \\
If $\IM q_H(ir)={\rm o} (|q_H(ir)|)$ for $r \to \infty$ \emph{[}$r \to 0$\emph{]}, then \begin{align} \label{A20} q_H(ikr) \sim \frac{\omega_3(\hat t(r))}{\omega_2(\hat t(r))}, \quad \quad r \to \infty \quad [r \to 0]. \end{align} \end{corollary} \begin{proof} Apply \Cref{T1} to $H$ using $\hat t_{1,H}$, and to $kH$ using $\hat t_{k,kH}$. Then $\hat t_{1,H}(r)=\hat t_{k,kH}(r)$, and we write $\hat t(r)$ for short. Keeping in mind that $q_{kH}(z)=q_H(kz)$, this leads to \[
\IM q_H(ir) \asymp \bigg|q_H(ir)-\frac{\omega_3^{(H)}(\hat t(r))}{\omega_2^{(H)}(\hat t(r))} \bigg| \asymp \frac{1}{r\omega_2^{(H)}(\hat t(r))} \] as well as \[
\IM q_H(ikr) \asymp \bigg|q_H(ikr)-\frac{k\omega_3^{(H)}(\hat t(r))}{k\omega_2^{(H)}(\hat t(r))} \bigg| \asymp \frac{1}{kr \cdot \omega_2^{(H)}(\hat t(r))}. \]
(\ref{A21}) follows. Now (\ref{A20}) is obtained by dividing (\ref{A21}) by $|q_H(ikr)|$. \end{proof}
\section{Behavior of tails of the spectral measure} \label{S5}
\Cref{T1} that approximately determines the imaginary part of $q_H(ir)$ allows us to determine the growth of the spectral measure $\mu_H$ relative to suitable comparison functions. Let us introduce the measure $\tilde\mu_H$ on $[0,\infty)$ by \begin{equation}\label{Y140}
\tilde\mu_H([0,r)) := \tilde\mu_H(r) := \mu_H((-r,r)), \quad \quad r>0. \end{equation} In \Cref{Y190}, equivalent conditions are given for when the function $r \mapsto \tilde \mu_H$ is integrable w.r.t. a given weight function, and also when the measure $\tilde \mu_H$ is finite w.r.t. to a rescaling function. \\ On the other hand, we can view $\tilde\mu_H$ as a function of the positive real parameter $r$, and compare this to a given function $\ms g$. This is what we do in \Cref{Y02}. \\ We note that the content of this section is analogous to \cite[Section 4]{langer.pruckner.woracek:heniest}. The availability of formula (\ref{A2}) leads to improved results in the present article, however we provide less detail as was given in \cite{langer.pruckner.woracek:heniest}. \newline
\noindent The proofs in this section are based on standard theorems of Abelian-Tauberian type, relating $\mu_H$ to its Poisson integral \begin{align} \mc P [\mu_H](z):= \int_{\bb R} \IM \Big( \frac{1}{t-z} \Big) \mkern4mu\mathrm{d} \mu_H(t). \end{align}
By (\ref{A17}), we have $\mc P [\mu_H](z) = \IM q_H(z) - \beta \IM z $. If $\beta =0$, we can proceed with the application of Abelian-Tauberian theorems without problems. The case $\beta >0$ is equivalent to $a$ being the left endpoint of an $H$-indivisible interval of type $\frac{\pi}{2}$, i.e., $\mathring a(H) >a$ and $h_2$ vanishes a.e. on $[a,\mathring a(H))$. The restricted Hamiltonian $H_-:=H\big|_{[\mathring a(H),b)}$ then has the Weyl coefficient $q_{H_-}(z)=q_H(z)-\beta z$ and thus $\IM q_{H_-}(z) = \mc P [\mu_H](z)$. Hence, we can investigate $\mu_H$ by applying the theorems from this section to $H_-$.
\subsection[{Finiteness of the spectral measure w.r.t. given weight functions}]{Finiteness of the spectral measure w.r.t. given weight functions} \label{Y190}
\begin{theorem} \label{AT0}
Let $H$ be a Hamiltonian defined on $[a,b)$, and assume that $h_2$ does not vanish identically in a neighborhood of $a$. Let $\ms f$ be a continuous, non-decreasing function,
and denote by $\mu_H$ the spectral measure of $H$.
\noindent Then the following statements are equivalent:
\begin{Enumerate}
\item
\begin{equation}
\label{Y161}
\int_1^{\infty} \tilde\mu_H(r)\frac{\ms f(r)}{r^3}\mkern4mu\mathrm{d} r<\infty;
\end{equation}
\item
There is $ a' \in (\hat a,b)$ such that
\[
\int_{\hat a}^{a'}
\frac{1}{\omega_2(t)^2}\binom{\omega_2(t)}{-\omega_3(t)}^*H(t)\binom{\omega_2(t)}{-\omega_3(t)}
\cdot\ms f\bigl(\det \Omega (t)^{-\frac12}\bigr)\mkern4mu\mathrm{d} t<\infty.
\]
\end{Enumerate}
\noindent If, in addition, $\ms f$ is differentiable,
then the above conditions hold if and only if there is $ a'\in(\hat a,b)$ such that
\begin{align*}
\int_{\hat a}^{a'}
\frac{(\det \Omega)'(t)}{\omega_2(t)\det \Omega (t)^{\frac 12}} \ms f'\bigl(\det \Omega (t)^{-\frac12}\bigr)\mkern4mu\mathrm{d} t < \infty.
\end{align*} \end{theorem}
\begin{proof}
First note that finiteness of the integrals in the proposition clearly
does not depend on $a'\in(\hat a,b)$.
\noindent Let $\xi$ be the measure on $[1,\infty)$ such that $\ms f(r)=\xi([1,r))$, $r\ge1$.
It follows from \cite[Lemma~4]{kac:1982} that
\[
\int_{[1,\infty)}\frac{\Poi{\mu_H}(ir)}{r}\mkern4mu\mathrm{d}\xi(r) < \infty
\quad\Longleftrightarrow\quad
\int_1^\infty \frac{\tilde\mu_H(r)\ms f(r)}{r^3}\mkern4mu\mathrm{d} r < \infty.
\]
Since $h_2$ does not vanish identically in a neighborhood of $a$, we have $\Poi{\mu_H}=\IM q_H$. By \Cref{T1}, we have
\[
\frac{\Poi{\mu_H}(ir)}{r}
\asymp \frac{1}{r^2 \omega_2(\hat t(r))}
\asymp \frac{\det \Omega (\hat t(r))}{\omega_2(\hat t(r))}.
\] Hence
\begin{equation}\label{Y162}
\int_1^{\infty} \tilde\mu_H(r)\frac{\ms f(r)}{r^3}\mkern4mu\mathrm{d} r<\infty
\;\;\Longleftrightarrow\;\;
\int_{[1,\infty)}\frac{\det \Omega\bigl(\hat t(r)\bigr)}{\omega_2\bigl(\hat t(r)\bigr)}\mkern4mu\mathrm{d}\xi(r)
< \infty.
\end{equation}
We define a measure $\nu$ on $(0,\infty)$ via $\nu((r,\infty))=\frac{\det \Omega (\hat t(r))}{\omega_2(\hat t(r))}$, $r>0$. Let $\hat\nu$ be the measure on $(\hat a,b)$ satisfying $\hat\nu((\hat a,t))=\nu((\hat r(t),\infty))=\frac{\det \Omega (t)}{\omega_2(t)}$, $t>\hat a$.
Integrating by parts (see, e.g., \cite[Lemma~2]{kac:1965}), we can rewrite the first integral in (\ref{Y162}) as follows:
\begin{align*}
& \int_{[1,\infty)}\frac{\det \Omega (\hat t(r))}{\omega_2(\hat t(r))} \mkern4mu\mathrm{d}\xi(r)
= \int_{[1,\infty)}\nu\bigl((r,\infty)\bigr)\mkern4mu\mathrm{d}\xi(r)
\\
&= \int_{[1,\infty)}\!\ms f(r)\mkern4mu\mathrm{d}\nu(r) = \int_{(\hat a,\hat t(1)]}\!\ms f(\hat r(t))\mkern4mu\mathrm{d} \hat\nu(t)
= \int_{(\hat a,\hat t(1)]}\ms f(\hat r(t))\mkern4mu\mathrm{d} \bigg(\frac{\det \Omega}{\omega_2} \bigg)(t)
\\
&= \int_{\hat a}^{\hat t(1)}\ms f\bigl(\hat r(t)\bigr)\cdot \frac{1}{\omega_2(t)^2}\binom{\omega_2(t)}{-\omega_3(t)}^*H(t)\binom{\omega_2(t)}{-\omega_3(t)}\mkern4mu\mathrm{d} t.
\end{align*}
To prove the additional statement, let us assume that $\ms f$ is differentiable. Using a substitution we can rewrite
the second integral in \eqref{Y162} differently:
\begin{align*}
&\int_{[1,\infty)}\frac{\det \Omega\bigl(\hat t(r)\bigr)}{\omega_2\bigl(\hat t(r)\bigr)}\mkern4mu\mathrm{d}\xi(r)
= \int_1^\infty\frac{\det \Omega\bigl(\hat t(r)\bigr)}{\omega_2\bigl(\hat t(r)\bigr)} \ms f'(r)\mkern4mu\mathrm{d} r
\\
&= \int_{\hat t(r)}^{\hat a} \!\frac{\det \Omega (t)}{\omega_2(t)}\ms f'(\hat r(t))\hat r'(t)\mkern4mu\mathrm{d} t
= \frac 12 \int_{\hat a}^{\hat t(r)}\frac{\det \Omega (t)}{\omega_2(t)}\ms f'(\hat r(t))
\frac{(\det \Omega)'(t)}{\det \Omega (t) ^{\frac32}}\mkern4mu\mathrm{d} t.
\end{align*} \end{proof}
\noindent The following result provides, in particular, information on when the measure $\tilde \mu_H$ is finite w.r.t. a regularly varying rescaling function $\ms g$.
\begin{corollary}
Let $H$ be a Hamiltonian on $[a,b)$, and assume that $h_2$ does not vanish identically in a neighborhood of $a$.
Let $\ms g$ be a continuous function that is regularly varying with index $\alpha \in [0,2]$, and denote by $\mu_H$ the spectral measure of $H$ as in (\ref{A17}).
Then, for $\alpha \in (0,2)$ and every $a'\in (\hat a,b)$, the following statements are equivalent: \begin{alignat*}{2}
&\rm (i)\, && \int_{[1,\infty)} \frac{\mkern4mu\mathrm{d}\tilde\mu_H(r)}{\ms g(r)} < \infty; \\[1.5ex]
&\rm (ii)\, &&\int_{\hat a}^{a'}\mkern-10mu
\frac{1}{\omega_2(t)^2}\binom{\omega_2(t)}{-\omega_3(t)}^* H(t)\binom{\omega_2(t)}{-\omega_3(t)}
\frac{\mkern4mu\mathrm{d} t}{\det \Omega (t)\ms g\bigl(\det \Omega (t)^{-\frac 12}\bigr)} < \infty . \\[1.5ex]
&\rm (iii)\, &&\int_{\hat a}^{a'}\frac{(\det \Omega)'(t)}{\omega_2(t)\det \Omega (t)\ms g\bigl(\det \Omega (t)^{-\frac 12}\bigr)}\mkern4mu\mathrm{d} t
< \infty; \end{alignat*} If $\alpha =0$, then $(iii) \Rightarrow (i)$ and $(iii) \Leftrightarrow (ii)$, while for $\alpha = 2$ we have $(iii) \Rightarrow (i)$ and $(iii) \Rightarrow (ii)$.
\end{corollary}
\begin{proof} The increasing function $\ms f(r):=\int_1^r \frac{t}{\ms g(t)} \mkern4mu\mathrm{d} t$ is regularly varying by Karamata's Theorem (\cite[Propositions 1.5.8 and 1.5.9a]{bingham.goldie.teugels:1989}. Moreover, \begin{equation} \label{Y137}
\ms f(r)\;
\begin{cases}
\; \asymp\frac{r^2}{\ms g(r)}, & 0 \leq \alpha<2,
\\[2ex]
\; \gg\frac{r^2}{\ms g(r)}, & \alpha=2.
\end{cases} \end{equation} Clearly $(iii)$ is equivalent to \[ \int_{\hat a}^{a'}
\frac{(\det \Omega)'(t)}{\omega_2(t)\det \Omega (t)^{\frac 12}} \ms f'\bigl(\det \Omega (t)^{-\frac12}\bigr)\mkern4mu\mathrm{d} t < \infty \] which is the term appearing in the additional statement of \Cref{AT0}. Applying \Cref{AT0} and using (\ref{Y137}), this is equivalent to (for $\alpha \in [0,2)$) or implies (for $\alpha =2$) both $(ii)$ and \[ \int_1^{\infty} \tilde\mu_H(r)\frac{dr}{r\ms g(r)}<\infty. \] By \cite[Proposition 4.5]{langer.pruckner.woracek:heniest}, this is further equivalent to (for $\alpha \in (0,2]$) or implies (for $\alpha =0$) the first item. \end{proof}
\subsection{Comparative growth of the distribution function} \label{Y02}
In this section we investigate $\limsup$-conditions for the quotient $\frac{\tilde\mu_H(r)}{\ms g(r)}$ instead of integrability conditions. Let us introduce the corresponding classes of measures.
\begin{definition}\label{Y77}
Let $\ms g(r)$ be a regularly varying function with index $\alpha \in [0,2]$
and $\lim_{r\to\infty}\ms g(r)=\infty$.
Then we set
\[
\mc F_{\ms g} \mathrel{\mathop:}= \big\{\mu\mid\mkern3mu \tilde\mu(r) \lesssim \ms g(r), \, r \to \infty \big\},\qquad
\mc F_{\ms g}^0 \mathrel{\mathop:}= \big\{\mu\mid\mkern3mu \tilde\mu(r) = {\rm o} ( \ms g(r)), \, r \to \infty\big\},
\]
where again $\tilde\mu(r)\mathrel{\mathop:}=\mu((-r,r))$. \end{definition}
\noindent It should be mentioned that, for non-decreasing $\ms g$, if \[
\int_{[1,\infty)} \frac{\mkern4mu\mathrm{d}\tilde\mu(r)}{\ms g(r)} < \infty , \] then $\mu \in \mc F_{\ms g}^0 \subseteq \mc F_{\ms g}$. For further discussion of this relation, the reader is referred to \cite{langer.pruckner.woracek:heniest}.
\begin{theorem}\label{Y74}
Let $H$ be a Hamiltonian on $[a,b)$, and assume that $h_2$ does not vanish identically in a neighborhood of $a$.
Let $\ms g(r)$ be a regularly varying function with index $\alpha \in [0,2]$ and $\lim_{r\to\infty}\ms g(r)=\infty$. Denote by $\mu_H$ the spectral measure of $H$. For $\alpha < 2$, the following statements hold:
\begin{alignat*}{4}
&\rm (i)\quad &&\mu_H \in \mathcal{F}_{\ms g} \quad &&
\Leftrightarrow
&& \quad \limsup_{t \to \hat a} \frac{1}{\omega_2(t)\ms g \big( \det \Omega (t)^{-\frac 12} \big)}<\infty; \\[1ex]
&\rm (ii)\quad &&\mu_H \in \mathcal{F}_{\ms g}^0 \quad &&
\Leftrightarrow
&&\quad \lim_{t \to \hat a} \frac{1}{\omega_2(t)\ms g \big( \det \Omega (t)^{-\frac 12} \big)}=0. \end{alignat*} If $\alpha =2$, then the right hand side of $(i)$, $(ii)$ implies the left hand side, respectively. \end{theorem}
\begin{proof}
We use \cite[Lemma 4.16]{langer.pruckner.woracek:heniest} which, adapted to our situation, reads as
\[
c_{\alpha}\limsup_{r\to\infty}\biggl(\frac{r}{\ms g(r)}\Poi{\mu_H}(ir)\biggr) \leq \limsup_{r\to\infty}\frac{\tilde\mu_H(r)}{\ms g(r)}
\leq c_{\alpha}' \limsup_{r\to\infty}\biggl(\frac{r}{\ms g(r)}\Poi{\mu_H}(ir)\biggr),
\]
and the second inequality holds even for $\alpha =2$. Since $h_2$ does not vanish identically in a neighborhood of $a$, we have $\Poi{\mu_H}=\IM q_H$. Therefore, the assertion follows from \Cref{T1} and a substitution $r=\hat r(t)$. \end{proof}
\section{Weyl coefficients with tangential behavior} \label{S3}
In this section, we investigate the scenario \begin{align} \label{A16}
\lim_{r \to \infty} \frac{\IM q_H(ir)}{|q_H(ir)|}=0 \quad \quad \text{or} \quad \quad \liminf_{r \to \infty} \frac{\IM q_H(ir)}{|q_H(ir)|}=0. \end{align} This is equivalent to tangential behavior of $q_H(ir)$, i.e., \[ \lim_{r \to \infty} \arg q_H(ir) \in \{0,\pi\} \quad \text{or} \quad \liminf_{r \to \infty} \min \big\{\arg q_H(ir),\, \pi-\arg q_H(ir) \big\}=0. \]
From \Cref{A4} we get that \begin{align} \label{A25}
\lim_{n \to \infty} \frac{\IM q_H(i r_n)}{|q_H(i r_n)|}=0 \,\, \Longleftrightarrow \,\, \lim_{n \to \infty} \frac{\det \Omega(\hat t(r_n))}{(\omega_1 \omega_2)(\hat t(r_n))}=0. \end{align} for every sequence $r_n \to \infty$. All results in this section can be seen from the canonical systems perspective as well as from the Herglotz functions perspective. \\
\noindent To start with, we observe that the second assertion in (\ref{A16}) implies the first unless the limit inferior is assumed only along very sparse sequences. We formulate this fact in the language of Herglotz functions, and prove it within the canonical systems setting. However, we do not know a purely function theoretic proof (which may very well exist in the literature).
\begin{lemma} Let $q$ be a Herglotz function. Suppose there is a sequence $(r_n)_{n \in \bb N}$ with $r_n \to \infty$, $\sup_{n \in \bb N} \frac{r_{n+1}}{r_n} < \infty$, and \[
\lim_{n \to \infty} \frac{\IM q(ir_n)}{|q(ir_n)|}=0. \]
Then $\lim_{r \to \infty} \frac{\IM q(ir)}{|q(ir)|}=0$. \end{lemma} \begin{proof} Let $H$ be a Hamiltonian (on $[0,\infty)$), such that $q=q_H$. \\ Let $d(t):=\frac{\det \Omega(t)}{(\omega_1 \omega_2)(t)}$. Set $t_n := \hat t(r_n)$, then by (\ref{A9}), \[
d(t_n) \asymp \bigg( \frac{\IM q(ir_n)}{|q(ir_n)|} \bigg)^2 \xrightarrow{n \to \infty} 0. \] Suppose that the assertion was not true, i.e., there is a sequence $\xi_1 > \xi_2 > ...$ converging to $0$, such that $d(\xi_k) \geq C>0$ for all $k$. For $k \in \bb N$, set $n(k):=\max \{n \in \bb N \mid t_n > \xi_k \}$. We obtain \begin{align*} &\Big(\frac{r_{n(k)+1}}{r_{n(k)}} \Big)^2 = \frac{\det \Omega(t_{n(k)})}{\det \Omega (t_{n(k)+1})} \geq \frac{\det \Omega(\xi_k)}{\det \Omega (t_{n(k)+1})} \\ &=\frac{d(\xi_k)}{d(t_{n(k)+1})} \cdot \frac{(\omega_1 \omega_2)(\xi_k)}{(\omega_1 \omega_2)(t_{n(k)+1})} \geq \frac{C}{d(t_{n(k)+1})} \xrightarrow{k \to \infty} \infty \end{align*} which contradicts our assumption. \end{proof}
\noindent Recall formulae (\ref{A10}) and (\ref{A9}). On an intuitive level, they tell us that in the case that $\IM q_H(ir) \not\asymp |q_H(ir)|$, the growth of $|q_H(ir)|$ is restricted since $\mathring r(\hat t(r))$ is then far away from $r$. If read in the other direction, this means that if $|q_H(ir)|$ grows quickly and without oscillating too much, then $\mathring r(\hat t(r))$ and $r$ should be close to each other, and hence the quotient $\frac{\IM q_H(ir)}{|q_H(ir)|}$ should not decay.\\ The following definition introduces the notions needed in Theorems \ref{T7} and \ref{T8}, which confirm this intuition.
\begin{definition} \label{A48} \item[$\rhd$] A measurable function $f: (0,\infty) \to (0,\infty)$ is called \textit{regularly varying (at infinity) with index $\alpha \in \bb R$} if, for any $\lambda >0$, \begin{align} \lim_{r \to \infty}\frac{f(\lambda r)}{f(r)} = \lambda^{\alpha}. \end{align} If $\alpha =0$, then $f$ is also called \textit{slowly varying (at infinity)}. \item[$\rhd$] A measurable function $f: (0,\infty) \to (0,\infty)$ is \textit{positively increasing (at infinity)} if there is $\lambda \in (0,1)$ such that \begin{align} \limsup_{r \to \infty} \frac{f(\lambda r)}{f(r)} <1. \end{align} Let us say explicitly that we do not require $f$ to be monotone. \end{definition}
\stepcounter{lemma} \begin{subtheorem} \label{T7}
Let $q \neq 0$ be a Herglotz function. If $|q(ir)|$ or $\frac{1}{|q(ir)|}$ is positively increasing at infinity (in particular, if $|q(ir)|$ is regularly varying with index $\alpha \neq 0$), then $\IM q(ir) \asymp |q(ir)|$ as $r \to \infty$. \end{subtheorem}
\begin{subtheorem} \label{T8}
Let $q \neq 0$ be a Herglotz function. If $\IM q(ir) = {\rm o} (|q(ir)|)$ as $r \to \infty$, then, for every $\delta \in [0,1)$, \begin{align} \label{A19}
\lim_{r \to \infty} \frac{ q \Big(ir \Big[\frac{\IM q(ir)}{|q(ir)|}\Big]^{\delta} \Big)}{q(ir)} =1. \end{align}
For $k>0$, we also have $\lim_{r \to \infty} \frac{q(ikr)}{q(ir)}=1$, in particular, $|q(ir)|$ is slowly varying at infinity. \end{subtheorem}
\begin{remark}
In \Cref{T7}, the requirement that $|q(ir)|$ should be positively increasing is meaningful. It is not enough that $|q(ir)|$ grows sufficiently fast, say, $|q(ir)| \gtrsim r^{\delta}$ for $r \to \infty$ and some $\delta > 0$. \\
In fact, for any given $\delta \in (0,1)$, we construct in \Cref{T6} a Hamiltonian\footnote{Choose suitable parameters $p,l \in (0,1)$, such that $\delta = \frac{\log l}{\log (pl)}$, i.e., $p=l^{\delta ^{-1}-1}$.} $H$ whose Weyl coefficient $q_H$ satisfies (see \Cref{R2}) $|q_H(ir)| \gtrsim r^{\delta}$ as $r \to \infty$, but \[
\liminf_{r \to \infty} \frac{\IM q_H(ir)}{|q_H(ir)|} = 0. \]
In other words, $\IM q_H(ir) \not\asymp |q_H(ir)|$. \\
Note also that for the above-mentioned $H$, certainly $|q_H(ir)|$ is not slowly varying \cite[Proposition 1.3.6]{bingham.goldie.teugels:1989}. Hence, in \Cref{T8} it is not enough to require $\IM q(ir_n) = {\rm o} (|q(ir_n)|)$ on some sequence $r_n \to \infty$. \end{remark}
\begin{example}
Let $q(z)=\log z$, satisfying $|q(ir)| = \big[(\log r)^2+\frac{\pi^2}{4}\big]^{1/2}$ which is increasing. However, $\IM q(ir)$ is constant and hence $\IM q(ir) = {\rm o} (|q(ir)|)$ as $r \to \infty$. \Cref{T7} fails because $|q(ir)|$ is not positively increasing. \end{example}
\begin{proof}[Proof of \Cref{T7}]
Assume first that $|q(ir)|$ is positively increasing. Then there are $\lambda, \sigma \in (0,1)$ and $R > 0$ such that \begin{align} \label{A18}
\frac{|q(i\lambda r)|}{|q(ir)|} \leq \sigma, \quad \quad r \geq R. \end{align} Let $H$ be a Hamiltonian with Weyl coefficient $q_H=q$, allowing us to use (\ref{A10}). \\
Suppose that the assertion was not true. Then there is a (w.l.o.g., monotone) sequence $r_n \to \infty$ with $\lim_{n \to \infty} \frac{\IM q(ir_n)}{|q(ir_n)|}=0$. Let $m(n)$ be such that \[ \lambda^{m(n)+1} \leq \frac{\mathring r(\hat t(r_n))}{r_n} < \lambda^{m(n)}. \] Note that $m(n) \to \infty$ because of (\ref{A9}). \\ Furthermore, (\ref{A10}) ensures that there is $\beta >0$ with \[
\beta \leq \frac{|q(i \mathring r(\hat t(r)))|}{|q(ir)|}, \quad r \in (0,\infty). \] We will also need that for $0<r<r'$, \[
\frac{|q(ir)|}{|q(ir')|} \asymp \frac{r' \omega_2(\mathring t(r'))}{r \omega_2(\mathring t(r))} \leq \frac{r'}{r} \] because $\omega_2$ is nondecreasing. \\ Choosing $n$ so big that $\mathring r(\hat t(r_n)) \geq R$, we get the contradiction \begin{align*}
\beta &\leq \frac{\big|q \big(i \mathring r(\hat t(r_n)) \big) \big|}{\big|q \big(ir_n \big)\big|} = \frac{\big|q \big(i \mathring r(\hat t(r_n)) \big) \big|}{\big|q \big(i\lambda^{m(n)}r_n \big)\big|} \cdot \prod_{j=0}^{m(n)-1}\frac{\big|q \big(i\lambda^{j+1} r_n \big)\big|}{\big|q \big(i\lambda^j r_n \big)\big|} \\ &\lesssim \frac{\lambda^{m(n)}r_n}{\mathring r(\hat t(r_n))} \sigma^{m(n)} \leq \frac{\sigma^{m(n)}}{\lambda} \xrightarrow{n \to \infty} 0. \end{align*}
This proves the theorem in the case that $|q(ir)|$ is positively increasing. \\
If, on the other hand, $\frac{1}{|q(ir)|}$ is positively increasing, we may set $\tilde q :=-\frac 1q$, for which $|\tilde q(ir)|$ is positively increasing. We obtain \[
\frac{\IM q(ir)}{|q(ir)|} = \frac{\IM \tilde q(ir)}{|\tilde q(ir)|} \asymp 1. \]
Finally, we note that if $|q(ir)|$ is regularly varying with index $\alpha >0$, then it is also positively increasing. If $|q(ir)|$ is regularly varying with index $\alpha < 0$, then $\frac{1}{|q(ir)|}$ is regularly varying with index $-\alpha > 0$ and thus positively increasing. \end{proof}
\noindent Our proof of \Cref{T8} is elementary - only folklore facts that follow from the Herglotz integral representation (\ref{A17}) are needed. We would be interested in an elementary proof of \Cref{T7} as well, which so far we have not found. \\
\noindent One fact needed in the following proof is the following: For any Herglotz function $q$ and any $z \in \bb C_+$, we have \begin{equation} \label{A5}
|q'(z)| \leq \frac{\IM q(z)}{\IM z}. \end{equation} This can be seen using the representation (\ref{A17}): We write \[ q'(z)=b+\int_{\bb R} \frac{d\sigma (t)}{(t-z)^2} \] and obtain \[
|q'(z)| \leq b+\int_{\bb R} \frac{d\sigma (t)}{|t-z|^2}= \frac{\IM q(z)}{\IM z}. \]
\begin{proof}[Proof of \Cref{T8}] Let $k \in (0,1)$. Then \begin{align} \label{A15}
&|\log q \big(ikr \big)-\log q(ir)|=\Big|\int_{kr}^r i(\log q)'(is) \mkern4mu\mathrm{d} s \Big| \leq \int_{kr}^r \big|(\log q)'(is)\big| \mkern4mu\mathrm{d} s. \end{align} Apply (\ref{A5}) to $\log q$ and to $i\pi-\log q$ to obtain \begin{align*}
&|(\log q)'(is)| \leq \frac 1s \min \big\{\IM [\log q(is)],\pi - \IM [\log q(is)] \big\} \\
&= \frac 1s \min \big\{\arg q(is), \pi-\arg q(is) \big\} \asymp \frac{\IM q(is)}{s|q(is)|} \end{align*}
for all $s>0$. We will also need monotonicity in $s$ of $s\frac{\IM q(is)}{|q(is)|}$. In fact, it is easy to see from (\ref{A17}) that $s \IM q(is)$ is nondecreasing in $s$. Now we can write \[
s\frac{\IM q(is)}{|q(is)|} = \sqrt{s \IM q(is) \cdot s \IM \Big(-\frac{1}{q(is)} \Big)} \]
and hence $s\frac{\IM q(is)}{|q(is)|}$ is nondecreasing in $s$. Putting together and continuing the estimation in (\ref{A15}), we obtain \begin{align}
&|\log q \big(ikr \big)-\log q(ir)| \lesssim \int_{kr}^r \frac{\IM q(is)}{s|q(is)|} \mkern4mu\mathrm{d} s \leq r \frac{\IM q(ir)}{|q(ir)|} \cdot \int_{kr}^r \frac{ds}{s^2} \nonumber \\
&= r \frac{\IM q(ir)}{|q(ir)|}\Big(\frac{1}{kr}-\frac 1r \Big) \asymp \frac{\IM q(ir)}{|q(ir)|} \xrightarrow{r \to \infty} 0. \label{A28} \end{align}
This shows $\lim_{r \to \infty} \frac{q(ikr)}{q(ir)}=1$. To prove (\ref{A19}), set $k(r):= \frac{\IM q(ir)}{|q(ir)|}$ and repeat the calculations up to the second to last term in (\ref{A28}), but with $k$ replaced by $k(r)^{\delta}$, where $\delta \in [0,1)$. Since \[ r k(r) \Big(\frac{1}{rk(r)^{\delta}}-\frac 1r \Big) \asymp k(r)^{1-\delta} \xrightarrow{r \to \infty} 0, \] we arrive at (\ref{A19}). \end{proof} \noindent Note that $\lim_{r \to \infty} \frac{q(ikr)}{q(ir)}=1$ is also a consequence of (\ref{A20}). The preceding proof, in addition to being elementary, is needed to show (\ref{A19}) which, upon taking absolute values, can be seen as slow variation with a rate.
\section{Maximal oscillation within Weyl disks} \label{S4}
\noindent In order to explain the aim of this section, let us first recall the notion of Weyl disks. Let $W(t,z) \in \bb C^{2 \times 2}$ be the fundamental solution of \begin{equation} \frac{d}{dt} W(t,z)J=zW(t,z)H(t), \end{equation} with initial condition $W(a,z)=I$, solving the transpose of equation (\ref{A33}). We define the \textit{Weyl disks} \begin{equation} \label{A34}
D_{t,z}:=\Big\{ \frac{w_{11}(t,z)\tau + w_{12}(t,z)}{w_{21}(t,z)\tau + w_{22}(t,z)} \Big| \tau \in \overline{\bb C_+} \Big\} \subseteq \overline{\bb C_+}, \end{equation}
where $\bb C_+=\{z \in \bb C | \IM z>0\}$, and the closure is taken in the Riemann sphere $\overline{\bb C}=\bb C \cup \{ \infty \}$. For fixed $z \in \bb C_+$ and $t_1 \leq t_2$, we have $D_{t_1,z} \supseteq D_{t_2,z}$, and the disks shrink down to a single point which is $q_H(z)$: \[ \bigcap_{t \in [a,b)} D_{t,z} = \{ q_H(z) \}. \]
\noindent Now we review the estimate (\ref{A43}) which has a geometric interpretation. Namely, the functions $L(r)$ and $A(r)$ give, up to $\asymp$, the imaginary part of the bottom and top point of $D_{\mathring t(r),ir}$, respectively. The size of $\IM q_H(ir)$ relative to $L(r)$ and $A(r)$ thus corresponds to the vertical position of $q_H(ir)$ within the disk $D_{\mathring t(r),ir}$. \\ In this section we give answers to several questions from \cite{langer.pruckner.woracek:heniest}. For instance, the question was raised whether there is a Hamiltonian $H$ for which $L(r) \asymp \IM q_H(ir) \not\asymp A(r)$ for $r \to \infty$. The answer to this particular question is no, cf. \Cref{T10}. However\footnote{In this section, we use the more transparent notation $f(r) \ll g(r)$ instead of $f(r) = {\rm o} (g(r))$.}, $L(r_n) \asymp \IM q_H(ir_n) \ll A(r_n)$ on a subsequence $r_n \to \infty$ is possible, and we provide examples for this in \Cref{T6} and in \Cref{R3}. The Weyl coefficient of the Hamiltonian constructed in \Cref{T6} exhibits "maximal" oscillatory behaviour in the sense that it goes back and forth between the bottoms and tops of the disks $D_{\mathring t(r),ir}$.
\begin{proposition} \label{T10} Let $H$ be a Hamiltonian on $(a,b)$. The following statements hold: \begin{itemize} \item[$(i)$] Suppose that $L(r) \not\asymp A(r)$ as $r \to \infty$. Then there exists a sequence $(r_n)_{n \in \bb N}$ such that $r_n \to \infty$, $L(r_n) \ll A(r_n)$, and \[ \IM q_H(ir_n) \gtrsim \sqrt{L(r_n)A(r_n)}. \] \item[$(ii)$] Suppose that $L(r) \not\asymp A(r)$, but not $L(r) \ll A(r)$ as $r \to \infty$. Then there is also $(r_n')_{n \in \bb N}$ with $r_n' \to \infty$, $L(r_n') \ll A(r_n')$, and \begin{equation} \label{A32} \IM q_H(ir_n') \asymp \sqrt{L(r_n')A(r_n')}. \end{equation} \end{itemize}
\end{proposition} \begin{proof} We shorten notation by setting $d(t):=\frac{\det \Omega(t)}{(\omega_1 \omega_2)(t)}$. By assumption, $\liminf_{t \to \hat a} d(t)=0$. Let $c \in (\hat a,b)$ be fixed and set $t_n := \max\{t \leq c \mid d(t) \leq \frac 1n \}$. With $t_n^+:= \hat t(\mathring r(t_n)) \geq t_n$, we have $d(t_n^+) \geq \frac 1n = d(t_n)$ if $n$ is large enough for $t_n^+ \leq c$ to hold. Using (\ref{A9}), we obtain \[ \bigg(\frac{\IM q_H(i \mathring r(t_n))}{A(\mathring r(t_n))} \bigg)^2 \asymp d(t_n^+) \geq d(t_n)=\frac{L(\mathring r(t_n))}{A(\mathring r(t_n))}. \] Note that $L(\mathring r(t_n)) \ll A(\mathring r(t_n))$ because of $d(t_n) \to 0$. \\ Suppose now that $s:=\limsup_{r \to \infty} \frac{L(r)}{A(r)}>0$. Set $\xi_n := \max \{t \leq t_n \mid d(\xi_n)=\frac s2\}$ and find $\tau_n$ between $\xi_n$ and $t_n$ such that $d(\tau_n) = \min \{d(t) \mid t \in [\xi_n, t_n]\}$. Certainly, $d(\tau_n) \leq d(t_n)=\frac 1n$ and $d(\tau_n) \leq d(t)$ for all $t \in [\xi_n,c]$. Also note that by the same arguments as above, \begin{align} \label{A26} \IM q_H(i \mathring r(\tau_n)) \gtrsim \sqrt{L(\mathring r(\tau_n)A(\mathring r(\tau_n))}. \end{align}
\noindent We prove next that $\hat r(\tau_n) \ll \mathring r(\xi_n)$. Note that by passing to a subsequence and possibly switching signs of $\omega_3$ by looking at $J^{\top}HJ$ instead of $H$, we can assume that \[ \lim_{n \to \infty} \frac{\omega_3(\tau_n)}{\sqrt{(\omega_1 \omega_2)(\tau_n)}}=1. \] A calculation shows that $\sqrt{(\omega_1 \omega_2)(t)}-\omega_3(t)$ is increasing. Hence \begin{align} \label{A27} &\bigg(\frac{\hat r(\tau_n)}{\mathring r(\xi_n)} \bigg)^2= \frac{(\omega_1 \omega_2)(\xi_n)}{\det \Omega(\tau_n)} \\ &=\frac{(\omega_1 \omega_2)(\xi_n)\Big(1-\frac{\omega_3(\tau_n)}{\sqrt{(\omega_1 \omega_2)(\tau_n)}}\Big)}{\Big(\sqrt{(\omega_1 \omega_2)(\tau_n)}-\omega_3(\tau_n)\Big)^2 \Big(1+\frac{\omega_3(\tau_n)}{\sqrt{(\omega_1 \omega_2)(\tau_n)}}\Big)} \nonumber\\ &\leq \frac{(\omega_1 \omega_2)(\xi_n)\Big(1-\frac{\omega_3(\tau_n)}{\sqrt{(\omega_1 \omega_2)(\tau_n)}}\Big)}{\Big(\sqrt{(\omega_1 \omega_2)(\xi_n)}-\omega_3(\xi_n)\Big)^2 \Big(1+\frac{\omega_3(\tau_n)}{\sqrt{(\omega_1 \omega_2)(\tau_n)}}\Big)} \nonumber\\ &= \frac{\Big(1-\frac{\omega_3(\tau_n)}{\sqrt{(\omega_1 \omega_2)(\tau_n)}}\Big)}{\Big(1-\frac{\omega_3(\xi_n)}{\sqrt{(\omega_1 \omega_2)(\xi_n)}}\Big)^2 \Big(1+\frac{\omega_3(\tau_n)}{\sqrt{(\omega_1 \omega_2)(\tau_n)}}\Big)} \lesssim 1-\frac{\omega_3(\tau_n)}{\sqrt{(\omega_1 \omega_2)(\tau_n)}} \to 0. \nonumber \end{align} Let $\tau_n^- := \mathring t(\hat r(\tau_n))$. By the calculation above, $\mathring r(\tau_n^-)=\hat r(\tau_n) < \mathring r(\xi_n)$ for large enough $n$, implying $\tau_n^- > \xi_n$ and hence $d(\tau_n^-) \geq d(\tau_n)$. Consequently, \[ \frac{L(\hat r(\tau_n))}{A(\hat r(\tau_n))} =d(\tau_n^-) \geq d(\tau_n) \asymp \bigg(\frac{\IM q_H(i\hat r(\tau_n))}{A(\hat r(\tau_n))} \bigg)^2. \] This means that \[ \IM q_H(i \hat r(\tau_n)) \leq C \sqrt{L(\hat r(\tau_n)) A(\hat r(\tau_n))} \] for some $C>0$ and all large $n$. Recall (\ref{A26}) and choose $C'>0$, w.l.o.g. $C'<C$, such that \[ \IM q_H(i \mathring r(\tau_n)) \geq C' \sqrt{L(\mathring r(\tau_n)) A(\mathring r(\tau_n))} \] for large $n$. By continuity, we find, for each large $n$, an $r_n' \in [\mathring r(\tau_n),\hat r(\tau_n)]$ with \[ \frac{\IM q_H(ir_n')}{\sqrt{L(r_n')A(r_n')}} \in [C',C], \] such that $(r_n')_{n \in \bb N}$ satisfies (\ref{A32}). The only thing left to prove is that $L(r_n') \ll A(r_n')$. \\ Suppose not, then on a subsequence we would have $L(r_n') \asymp A(r_n')$. Consider $\xi_n':=\mathring t(r_n') \leq \tau_n$ which would then satisfy $d(\xi_n') \gtrsim 1$ and hence \[ 1-\frac{\omega_3(\xi_n')}{\sqrt{(\omega_1 \omega_2)(\xi_n')}} \gtrsim 1. \] Now look at (\ref{A27}), but with $\xi_n$ replaced by $\xi_n'$. It follows that, for large $n$, $\hat r(\tau_n) < r_n'$, contradicting the choice of $r_n'$. \end{proof}
\noindent In the following definition, we construct a Hamiltonian by prescribing $f:=\frac{\omega_3}{\sqrt{\omega_1 \omega_2}}$ and choosing $f$ to be a highly oscillating function. It should be mentioned that the method we use for prescription works on a general basis: Any locally absolutely continuous function with values in $(-1,1)$ occurs as $\frac{\omega_3}{\sqrt{\omega_1 \omega_2}}$ for some Hamiltonian. Details can be found in the appendix.
\begin{definition} \label{T6} Let $(t_n)_{n \in \bb N}, (\xi_n)_{n \in \bb N}$ be sequences of positive numbers converging to zero, where $\xi_{n+1}<t_n<\xi_n$ for all $n \in \bb N$. Choose $p,l \in (0,1)$ and set \[ f(t_n)=1-p^n, \quad \quad f(\xi_n)=l^n \] and interpolate between those points using monotone and absolutely continuous functions (e.g., linear interpolation). Set \[ \alpha_1(t):= \left\{ \begin{matrix} \frac{f'(t)}{1-f(t)}, & t \in (\xi_{n+1},t_n), \\ 0, & t \in (t_n,\xi_n) \end{matrix}\right. \] and \[ \alpha_2(t):= \left\{ \begin{matrix} \frac{f'(t)}{1-f(t)}, & t \in (\xi_{n+1},t_n), \\ -2\frac{f'(t)}{f(t)}, & t \in (t_n,\xi_n) \end{matrix}\right. \] For $t \in [0,t_1]$, let $\omega_i(t):=\exp \Big(-\int_t^{t_1} \alpha_i(s) \mkern4mu\mathrm{d} s \Big)$, $i=1,2$, and $\omega_3(t):=\sqrt{(\omega_1 \omega_2)(t)} \cdot f(t)$. Set $h_i(t)=\omega_i'(t)$, $i=1,2,3$, $t \in [0,t_1]$. For $t \in (t_1,\infty)$, let $h_1(t):=1$ and $h_2(t):=h_3(t):=0$. Finally, define \[ H_{p,l} :=\begin{pmatrix} h_1 & h_3 \\ h_3 & h_2 \end{pmatrix}. \] \end{definition}
\begin{lemma} $H_{p,l}$ is a Hamiltonian on $[0,\infty)$, and $\omega_i(t)=\int_0^t h_i(s) \mkern4mu\mathrm{d} s$ for $i=1,2,3$ and $t \in [0,t_1]$. Moreover, $0$ is not the left endpoint of an $H_{p,l}$-indivisible interval. \end{lemma} \begin{proof} We write $H$ instead of $H_{p,l}$ for short. First we show that $H(t) \geq 0$ for all $t \in [0,t_1]$. Start by noting that, for $i=1,2$, \[ \frac{h_i(t)}{\omega_i(t)}=(\log \omega_i)'(t)=\alpha_i(t), \] and calculate \begin{align*} &\frac{h_3(t)^2}{(\omega_1 \omega_2)(t)}=\frac{\big[(\sqrt{\omega_1 \omega_2}f )'(t)\big]^2}{(\omega_1 \omega_2)(t)} = \Big(f'(t)+\frac 12 \Big[\frac{h_1(t)}{\omega_1(t)}+\frac{h_2(t)}{\omega_2(t)} \Big]f(t) \Big)^2 \\ &=\Big(f'(t)+\frac{\alpha_1(t)+\alpha_2(t)}{2} f(t) \Big)^2. \end{align*} If $t \in (t_n,\xi_n)$, then this equates to $0$, as does \[ \frac{(h_1h_2)(t)}{(\omega_1 \omega_2)(t)}=\alpha_1(t)\alpha_2(t)=0. \] For $t \in (\xi_{n+1},t_n)$, \[ \Big(f'(t)+\frac{\alpha_1(t)+\alpha_2(t)}{2} f(t) \Big)^2=\Big(\frac{f'(t)}{1-f(t)}\Big)^2=\alpha_1(t)\alpha_2(t)=\frac{(h_1h_2)(t)}{(\omega_1 \omega_2)(t)}. \] In both cases, $\det H(t)=0$. For $i=1,2$, as $\alpha_i(t) \geq 0$, $t \in [0,t_1]$, certainly $\omega_i(t)$ is increasing and thus $h_i(t) \geq 0$. This suffices to show that $H(t) \geq 0$. \\ $H$ is in limit point case since, for $t>t_1$, the trace of $H(t)$ equals $1$. To show that $\omega_i(t)=\int_0^t h_i(s) \mkern4mu\mathrm{d} s$, $i=1,2,3$, $t \in [0,t_1]$, we need to check that $\lim_{t \to 0} \omega_i(t)=0$. For $i=1$, this follows from \begin{equation} \int_0^{t_1} \alpha_1(s) \mkern4mu\mathrm{d} s = \sum_{n=1}^{\infty} \int_{\xi_{n+1}}^{t_n} \frac{f'(s)}{1-f(s)} \mkern4mu\mathrm{d} s=\sum_{n=1}^{\infty} \big[\log (1-l^{n+1})-\log(p^n) \big]=\infty. \end{equation} For $i=2$, it follows from the fact that $\alpha_2(t) \geq \alpha_1(t)$ for all $t \in [0,t_1]$, and for $i=3$ it follows from the definition of $\omega_3$ and the fact that $f(t) <1$, $t \in [0,t_1]$. \\ Finally, $0$ is not the left endpoint of an $H$-indivisible interval because \[ \det \Omega(t)=(\omega_1 \omega_2)(t) \big(1-f(t)^2 \big) >0 \] for all $t \in (0,t_1]$. \end{proof}
\noindent We investigate the behaviour for $r \to \infty$ of $\IM q_{H_{p,l}}(ir)$ as well as $L(r)$ and $A(r)$. A rough description of the situation is:
\begin{center} \label{tikz1} \begin{tikzpicture} \draw[scale=0.5, loosely dashed, domain=1:3.65, smooth, variable=\x] plot ({(\x+2)*(\x+2)}, {4*(\x+2)});
\draw[out=30, in=240] (4.5,6.3) to (6,8.5); \draw[out=60, in=190] (6,8.5) to (6.7,9); \draw[out=10, in=175] (6.7,9) to (7.5,8.9); \draw[out=-5, in=190] (7.5,8.9) to (8,9); \draw[out=10, in=205] (8,9) to (9.5,9);
\draw[out=25, in=240] (9.5,9) to (11.5,11.5); \draw[out=60, in=185] (11.5,11.5) to (12.2,12.1); \draw[out=5, in=205] (12.2,12.1) to (15,10.8); \draw[out=25, in=245] (15,10.8) to (15.95,11.95);
\draw[out=30, in=190] (4.5,5.9) to (5.8,6.5); \draw[out=10, in=200] (5.8,6.5) to (8,6.5); \draw[out=20, in=220] (8,6.5) to (10.2,9.2);
\draw[out=40, in=170] (10.2,9.2) to (12,9.6); \draw[out=-10, in=185] (12,9.6) to (13.4,9.4); \draw[out=5, in=225] (13.4,9.4) to (14.8,10.3); \draw[out=45, in=190] (14.8,10.3) to (15.95,10.95);
\draw[out=30, in=245] (4.5,6.05) to (5.7,7.5); \draw[out=65, in=150] (5.7,7.5) to (7.5,8); \draw[out=-30, in=240] (7.5,8) to (9.35,8.2); \draw[out=60, in=221] (9.35,8.2) to (10.03,9.13); \draw[out=41, in=237] (10.03,9.13) to (11.04,10.4); \draw[out=57, in=180] (11.04,10.4) to (11.7,10.9); \draw[out=0, in=140] (11.7,10.9) to (12.3,10.7); \draw[out=-40, in=172] (12.3,10.7) to (13.52,9.75); \draw[out=-8, in=218] (13.52,9.75) to (15,10.59); \draw[out=38, in=241] (15,10.59) to (15.95,11.67);
\node (A) at (4.5,5) {$\mathring r(\xi_n)$}; \node (B) at (5.1,4.55) {$\hat r(\xi_n)$}; \node (C) at (5.85,5) {$\mathring r(t_n)$}; \node (D) at (9.15,4.55) {$\hat r(t_n)$}; \node (E) at (9.9,5) {$\mathring r(\xi_{n+1})$};
\node (F) at (10.5,4.55) {$\hat r(\xi_{n+1})$}; \node (G) at (11.25,5) {$\mathring r(t_{n+1})$}; \node (H) at (13.7,4.55) {$\hat r(t_{n+1})$}; \node (I) at (14.8,5) {$\mathring r(\xi_{n+2})$}; \node (J) at (15.4,4.55) {$\hat r(\xi_{n+2})$};
\draw[loosely dotted] (4.5,5.3) to (4.5,12.2); \draw[loosely dotted] (5.1,4.85) to (5.1,12.2); \draw[loosely dotted] (5.85,5.3) to (5.85,12.2); \draw[loosely dotted] (9.15,4.85) to (9.15,12.2); \draw[loosely dotted] (9.9,5.3) to (9.9,12.2); \draw[loosely dotted] (10.5,4.85) to (10.5,12.2); \draw[loosely dotted] (11.25,5.3) to (11.25,12.2); \draw[loosely dotted] (13.7,4.85) to (13.7,12.2); \draw[loosely dotted] (14.8,5.3) to (14.8,12.2); \draw[loosely dotted] (15.55,4.85) to (15.55,12.2);
\node[scale=0.8] (J) at (7,6.28) {$L(r)$}; \node[scale=1.15] (K) at (7.1,7.15) {$r^{\frac{\log l}{\log (pl)}}$}; \node[scale=0.8] (L) at (7,8.48) {$\IM q_H(ir)$}; \node[scale=0.8] (M) at (7,9.18) {$A(r)$};
\end{tikzpicture} A sketch of the behaviour of $q_{H_{p,l}}$ \end{center}
\noindent Formal details are given in the following theorem as well as in \Cref{R2}.
\begin{theorem} \label{T2} Let $p,l \in (0,1)$. For the Hamiltonian $H=H_{p,l}$ from \Cref{T6} and for all sufficiently large $n \in \bb N$, we have \begin{equation} \label{A8} \mathring r(\xi_n) < \hat r(\xi_n) <\mathring r(t_n) < \hat r(t_n) < \mathring r(\xi_{n+1}). \end{equation} On the intervals delimited by the terms in (\ref{A8}), the functions $L(r)$, $\IM q_H(ir)$, and $A(r)$ behave in the following way: \begin{itemize} \item[$(i)$] $\IM q_H(ir) \asymp A(r)$ uniformly for $r \in [\mathring r(\xi_n),\hat r(\xi_n)]$, $n \in \bb N$. \item[$(ii)$] $\IM q_H(ir) \asymp A(r)$ uniformly for $r \in [\hat r(\xi_n),\mathring r(t_n)]$, $n \in \bb N$. \\[1ex] Moreover, $L(\mathring r(t_n)) \ll A(\mathring r(t_n))$. \item[$(iii)$] $L(r) \ll A(r)$ uniformly for $r \in [\mathring r(t_n), \hat r(t_n)]$, $n \in \bb N$. \\[1ex] In addition, $L(\mathring r(t_n)) \ll \IM q_H(i \mathring r(t_n)) \asymp A(\mathring r(t_n))$ as well as $L(\hat r(t_n)) \asymp \IM q_H(i \hat r(t_n)) \ll A(\hat r(t_n))$. \item[$(iv)$] $L(r) \asymp \IM q_H(ir) \ll A(r)$ uniformly for $r \in [\hat r(t_n),\mathring r(\xi_{n+1})]$, $n \in \bb N$. \end{itemize} \end{theorem}
\noindent The proof of this theorem involves some (partly tedious) computations that are partly contained in the forthcoming lemma. \\ The symbol $\approx$ should mean equality up to an additive term that is bounded in $n$ and $t$.
\begin{lemma} \label{R4} For the Hamiltonian $H_{p,l}$, the following formulae hold. \newline
\begin{tabular}{|lcl|} \hline$\log \mathring r(t_n)$ & $\mkern-10mu\approx$ & $\mkern-10mu-n^2 \frac{\log(pl)}{2}-n \frac{\log \big(\frac lp \big)}{2}$ \begin{minipage}[c][9mm][t]{0.1mm}
\end{minipage}\\[1ex] \hline $\log \mathring r(\xi_n)$ & $\mkern-10mu\approx$ & $\mkern-10mu-n^2 \frac{\log(pl)}{2}+n \frac{\log (pl)}{2}$ \begin{minipage}[c][9mm][t]{0.1mm}
\end{minipage}\\ \hline \end{tabular} $\mkern-12mu$
\begin{tabular}{|lcl|} \hline $\log \hat r(t_n)$ & $\mkern-10mu\approx$ & $\mkern-10mu-n^2 \frac{\log (pl)}{2}-n \frac{\log l}{2}$ \begin{minipage}[c][9mm][t]{0.1mm}
\end{minipage}\\ \hline $\log \hat r(\xi_n)$ & $\mkern-10mu\approx$ & $\mkern-10mu-n^2 \frac{\log(pl)}{2}+n \frac{\log (pl)}{2}$ \begin{minipage}[c][9mm][t]{0.1mm}
\end{minipage}\\ \hline \end{tabular}
\begin{tabular}{|lcll|} \hline $\log \mathring r(t)$ & $\mkern-10mu \approx$ & $\mkern-10mu-n^2 \frac{\log(pl)}{2}-n \frac{\log \big(\frac lp \big)}{2}+\log f(t)$, & $\mkern-10mu t \in [t_n,\xi_n]$. \begin{minipage}[c][9mm][t]{0.1mm}
\end{minipage}\\ \hline $\log \mathring r(t)$ & $\mkern-10mu\approx$ & $\mkern-10mu-n^2 \frac{\log(pl)}{2}-n \frac{\log (pl)}{2}+\log (1-f(t))$, & $\mkern-10mu t \in [\xi_{n+1},t_n]$. \begin{minipage}[c][9mm][t]{0.1mm}
\end{minipage}\\ \hline $\log \hat r(t)$ & $\mkern-10mu\approx$ & $\mkern-10mu-n^2 \frac{\log(pl)}{2}-n \frac{\log \big(\frac lp \big)}{2}+\log f(t)-\frac {\log (1-f(t))}{2}$, & $\mkern-10mu t \in [t_n,\xi_n]$. \begin{minipage}[c][9mm][t]{0.1mm}
\end{minipage}\\ \hline $\log \hat r(t)$ & $\mkern-10mu\approx$ & $\mkern-10mu-n^2 \frac{\log(pl)}{2}-n \frac{\log (pl)}{2}+\frac {\log (1-f(t))}{2}$, & $\mkern-10mu t \in [\xi_{n+1},t_n]$. \begin{minipage}[c][9mm][t]{0.1mm}
\end{minipage}\\ \hline \end{tabular} \newline \end{lemma}
\begin{proof} First we calculate \begin{align} &\log(\mathring r(t_n))=-\frac 12 \log [(\omega_1 \omega_2)(t_n)] =\frac 12 \int_{t_n}^{t_1} (\alpha_1(s)+\alpha_2(s)) \mkern4mu\mathrm{d} s \nonumber\\ &= \sum_{k=1}^{n-1} \Big( \int_{t_{k+1}}^{\xi_{k+1}} \frac{-f'(s)}{f(s)} \mkern4mu\mathrm{d} s + \int_{\xi_{k+1}}^{t_k} \frac{f'(s)}{1-f(s)} \mkern4mu\mathrm{d} s \Big) \nonumber\\ &= \sum_{k=1}^{n-1} \Big(\log(1-p^{k+1})-(k+1)\log l + \log(1-l^{k+1}) -k\log p \Big) \nonumber\\ \label{A23} &\approx -n^2 \frac{\log(pl)}{2}-n \frac{\log \big(\frac lp \big)}{2}. \end{align} This also leads to \begin{align*} \log \hat r(t_n) &=-\frac 12 \log (1-f(t_n)^2)+\log \mathring r(t_n) \approx -\frac 12 \log (1-f(t_n))+\log \mathring r(t_n) \\ &\approx -n^2 \frac{\log (pl)}{2}-n \frac{\log l}{2}. \end{align*} If $t \in [t_n,\xi_n]$, then \begin{align*} \log \mathring r(t)=\log \mathring r(t_n)-\int_{t_n}^t \frac{-f'(s)}{f(s)} \mkern4mu\mathrm{d} s \approx -n^2 \frac{\log(pl)}{2}-n \frac{\log \big(\frac lp \big)}{2}+\log f(t). \end{align*} If $t \in [\xi_{n+1},t_n]$, then \begin{align*} &\log \mathring r(t)=\log \mathring r(t_n)+\int_t^{t_n} \frac{f'(s)}{1-f(s)} \mkern4mu\mathrm{d} s \\ &\approx -n^2 \frac{\log(pl)}{2}-n \frac{\log (pl)}{2}+\log (1-f(t)). \end{align*} By adding $-\frac 12 \log (1-f(t)^2) \approx -\frac 12 \log (1-f(t))$, the analogous formula for $\hat r(t)$ follows. Lastly, \begin{align*} \log \mathring r(\xi_n) &\approx -n^2 \frac{\log(pl)}{2}-n \frac{\log \big(\frac lp \big)}{2}+\log f(\xi_n) \\ &\approx -n^2 \frac{\log(pl)}{2}+n \frac{\log (pl)}{2}. \end{align*} and \begin{align*} \log \hat r(\xi_n) &=-\frac 12 \log (1-f(\xi_n)^2)+\log \mathring r(\xi_n) \approx \log \mathring r(\xi_n). \end{align*} \end{proof}
\begin{proof}[Proof of \Cref{T2}] It follows from \Cref{R4} that $\hat r(\xi_n)<\mathring r(t_n)$ and $\hat r(t_n) < \mathring r(\xi_{n+1})$ for large enough $n$. The remaining two inequalities in (\ref{A8}) follow from the basic fact that $\mathring r(t) < \hat r(t)$ for all $t \in (0,\infty)$. \\ We will now prove $(i)-(iv)$ in reverse order. \\[1.7ex] \underline{$(iv)$:} $\xi_{n+1} \leq \mathring t(r) \leq t_n$ and $\xi_{n+1} \leq \hat t(r) \leq t_n$. By \Cref{R4}, \begin{align*} &-n^2 \frac{\log(pl)}{2}-n \frac{\log (pl)}{2}+\frac 12 \log \big(1-f(\hat t(r))\big) \approx \log \hat r(\hat t(r))=\log r \\ &=\log \mathring r(\mathring t(r)) \approx -n^2 \frac{\log(pl)}{2}-n \frac{\log (pl)}{2}+\log \big(1-f(\mathring t(r))\big). \end{align*} Hence, \begin{align*} \frac{\IM q_H(ir)}{A(r)} \asymp \sqrt{1-f \big(\hat t(r)\big)^2} \asymp 1-f \big(\mathring t(r)\big)^2 =\frac{L(r)}{A(r)}. \end{align*} In addition, \[ \frac{L \big( \mathring r(\xi_{n+1}) \big)}{A \big( \mathring r(\xi_{n+1}) \big)} \asymp 1-f(\xi_{n+1})^2 \asymp 1, \] while \[ \frac{L \big(\hat r(t_n) \big)}{A \big(\hat r(t_n) \big)} \asymp 1-f \big(\mathring t(\hat r(t_n)) \big)^2 \asymp \sqrt{1-f(t_n)^2} = p^{\frac n2} \ll 1. \] \\[1.7ex] \underline{$(iii)$:} $\xi_{n+1} \leq \mathring t(r) \leq t_n$ and $t_n \leq \hat t(r) \leq \xi_n$. Thus \begin{align*} &-n^2 \frac{\log(pl)}{2}-n \frac{\log \big(\frac lp \big)}{2}+\log f(\hat t(r))-\frac 12 \log \big(1-f(\hat t(r))\big) \approx \log \hat r(\hat t(r)) \\ &=\log \mathring r(\mathring t(r)) \approx -n^2 \frac{\log(pl)}{2}-n \frac{\log (pl)}{2}+\log \big(1-f(\mathring t(r))\big). \end{align*} Consequently, \[ \frac 12 \log \big(1-f(\hat t(r))\big) \approx n \log p + \log f(\hat t(r))-\log \big(1-f(\mathring t(r))\big), \] which implies \[ \sqrt{1-f(\hat t(r))} \asymp p^n \frac{f(\hat t(r))}{1-f(\mathring t(r))}. \] Let us check that the term $f(\hat t(r))$ can be neglected. Using that $f(\mathring t(r)) \leq 1-p^n$, we get \[ \sqrt{1-f(\hat t(r))} \lesssim f(\hat t(r)) \] which is only possible if $f(\hat t(r))$ stays away from $0$. As $f(\hat t(r)) <1$, this means that $f(\hat t(r)) \asymp 1$, leading to \[ \frac{\IM q_H(ir)}{A(r)} \asymp \sqrt{1-f(\hat t(r))} \asymp \frac{p^n}{1-f(\mathring t(r))}. \] Hence, $\IM q_H(i\mathring r(t_n)) \asymp A(\mathring r(t_n))$. Looking back at case $(iv)$, we know that $\IM q_H(i\hat r(t_n)) \asymp L(\hat r(t_n)) \ll A(\hat r(t_n))$. In particular, since $\frac{L(r)}{A(r)}=1-f(\mathring t(r))^2$ is increasing for $r$ in $[\mathring r(t_n),\hat r(t_n)]$, we have $L(r) \ll A(r)$ uniformly on this interval.\\[1.7ex] \underline{$(ii)$:} $t_n \leq \mathring t(r) \leq \xi_n)$ and $t_n \leq \hat t(r) \leq \xi_n$, leading to \begin{align*} &-n^2 \frac{\log(pl)}{2}-n \frac{\log \big(\frac lp \big)}{2}+\log f(\hat t(r))-\frac 12 \log \big(1-f(\hat t(r))\big) \approx \log \hat r(\hat t(r)) \\ &=\log \mathring r(\mathring t(r)) \approx -n^2 \frac{\log(pl)}{2}-n \frac{\log \big(\frac lp \big)}{2}+\log f(\mathring t(r)). \end{align*} Hence \begin{align*} \sqrt{1-f(\hat t(r))} \asymp \frac{f(\hat t(r))}{f(\mathring t(r))} > f(\hat t(r)). \end{align*} In particular, $1-f(\hat t(r))$ stays away from $0$, which means that \[ \frac{\IM q_H(ir)}{A(r)} \asymp \sqrt{1-f(\hat t(r))} \asymp 1. \] In other words, $\IM q_H(ir) \asymp A(r)$ uniformly for $r \in [\hat r(\xi_n), \mathring r(t_n)]$. As we already know, $L(\mathring r(t_n)) \ll \IM q_H(i\mathring r(t_n)) \asymp A(\mathring r(t_n))$.\\[1.7ex] \underline{$(i)$:} $t_n \leq \mathring t(r) \leq \xi_n$ and $\xi_n \leq \hat t(r) \leq t_{n-1}$. In this case \begin{align*} &-n^2 \frac{\log(pl)}{2}+n \frac{\log (pl)}{2}+\frac 12 \log \big(1-f(\hat t(r))\big) \approx \log \hat r(\hat t(r))=\log \mathring r(\mathring t(r)) \\ &\approx -n^2 \frac{\log(pl)}{2}-n \frac{\log \big(\frac lp \big)}{2}+\log f(\mathring t(r)). \end{align*} Taking into account that $f(\mathring t(r)) \geq l^n$ by definition, it follows that \[ \frac{\IM q_H(ir)}{A(r)} \asymp \sqrt{1-f(\hat t(r))} \asymp \frac{f(\mathring t(r))}{l^n} \asymp 1. \] Therefore, $\IM q_H(ir) \asymp A(r)$ uniformly for $r \in [\mathring r(\xi_n),\hat r(\xi_n)]$. At the left end of this interval, we even have $L(\mathring r(\xi_n)) \asymp A(\mathring r(\xi_n))$ by case $(iv)$. \end{proof}
\noindent Before we state our next result, we note that by definition of $H_{p,l}$, \begin{equation} \label{A13} \liminf_{t \to 0} \frac{\det \Omega(t)}{(\omega_1 \omega_2)(t)}=\liminf_{t \to 0} \big(1-f(t)^2 \big)=0. \end{equation}
In view of (\ref{A9}), we have $\liminf_{r \to \infty} \frac{\IM q_{H_{p,l}}(ir)}{|q_{H_{p,l}}(ir)|}=0$ and hence $\IM q_{H_{p,l}}(ir) \not\asymp |q_{H_{p,l}}(ir)|$. \\
Nevertheless, the following lemma shows that $|q_{H_{p,l}}(ir)|$ grows faster than a power. Recalling \Cref{T7}, this means that $|q(ir)| \gtrsim r^{\delta}$ for $r \to \infty$ is not a sufficient condition for $\IM q(ir) \asymp |q(ir)|$ as $r \to \infty$. Instead, we see that $|q(ir)|$ being positively increasing really means that not only does $|q(ir)|$ grow sufficiently fast, but also without oscillating too much.
\begin{lemma} \label{R2} Let $\delta := \frac{\log l}{\log (pl)} \in (0,1)$. Then \begin{itemize}
\item[$\rhd$] $|q_{H_{p,l}}(ir)| \gtrsim r^{\delta}$, $r \to \infty$,
\item[$\rhd$] $|q_{H_{p,l}}(i\mathring r(\xi_n))| \asymp \mathring r(\xi_n)^{\delta}$. \end{itemize} \end{lemma} \begin{proof} We start the proof with calculating, for $t \in [t_n,\xi_n]$, \begin{align*} &\log \sqrt{ \frac{\omega_1(t)}{\omega_2(t)} }=\frac 12 \log \Big(\frac{\omega_1(t)}{\omega_2(t)} \Big)=\sum_{k=1}^{n-1} \int_{t_{k+1}}^{\xi_{k+1}} \frac{-f'(s)}{f(s)} \mkern4mu\mathrm{d} s + \int_{t_n}^t \frac{f'(s)}{f(s)} \mkern4mu\mathrm{d} s\\ &=\sum_{k=1}^{n-1} \Big(\log (1-p^{k+1})-(k+1)\log l \Big)+\log f(t)-\log (1-p^n) \\ &\approx -(n^2+n)\frac{\log l}{2}+\log f(t). \end{align*} Now we use our formula for $\log \mathring r(t)$: \begin{align} &\log \sqrt{\frac{\omega_1(t)}{\omega_2(t)} } \nonumber \\ &\approx \frac{\log l}{\log(pl)}\log \mathring r(t)+\frac 12 \bigg(\frac{\log(l)\log(\frac lp)}{\log(pl)} -\log l \bigg)n + \bigg(1-\frac{\log l}{\log(pl)} \bigg)\log f(t)\nonumber\\ \label{A22} &=\frac{\log l}{\log(pl)}\log \mathring r(t)+\frac{\log p}{\log(pl)}\big(\log f(t)-n \log l\big), \quad t \in [t_n,\xi_n]. \end{align} Since $f$ was assumed to be monotone decreasing on $[t_n,\xi_n]$, and $\log f(\xi_n)=n \log l$, \[ \log \sqrt{\frac{\omega_1(t)}{\omega_2(t)} } \gtrapprox \frac{\log l}{\log(pl)}\log \mathring r(t)=\delta \log \mathring r(t), \] where $\gtrapprox$ indicates that the inequality holds up to an additive term that is bounded in $n$ and $t$. Therefore \[
|q_{H_{p,l}}(i \mathring r(t))| \asymp \sqrt{\frac{\omega_1(t)}{\omega_2(t)}} \gtrsim \mathring r(t)^{\delta}, \quad t \in [t_n,\xi_n]. \] Observing that $\frac{\omega_1}{\omega_2}$ is constant on $[\xi_{n+1},t_n]$ (since $\alpha_1-\alpha_2=0$ there), we obtain this estimate also for $t \in [\xi_{n+1},t_n]$: \[
|q_{H_{p,l}}(i\mathring r(t))| \asymp \sqrt{\frac{\omega_1(t)}{\omega_2(t)}}=\sqrt{\frac{\omega_1(\xi_{n+1})}{\omega_2(\xi_{n+1})}} \gtrsim \mathring r(\xi_{n+1})^{\delta} \geq \mathring r(t)^{\delta}. \]
Finally, setting $t=\xi_n$ in (\ref{A22}) yields $|q_{H_{p,l}}(i\mathring r(\xi_n))| \asymp \mathring r(\xi_n)^{\delta}$. \end{proof}
\begin{example} \label{R3} Let $H$ be as in \Cref{T6}, but $f(\xi_n)=1-l^{n-1}$ instead, where $l > \sqrt{p}$. Similarly to \Cref{T2}, one can show that \[ L(\hat r(t_n)) \asymp \IM q_H(i \hat r(t_n)) \ll A(\hat r(t_n)). \] However, for our new Hamiltonian, \[ \lim_{t \to 0} \frac{\det \Omega(t)}{(\omega_1 \omega_2)(t)} = \limsup_{t \to 0} \frac{\det \Omega(t)}{(\omega_1 \omega_2)(t)} = \limsup_{t \to 0} \big( 1-f(t)^2 \big) = 0 \] as opposed to (\ref{A13}).
\end{example}
\section{Reformulation for Krein strings} \label{S6}
Recall that a \textit{Krein string} is a pair $S[L,\mathfrak{m}]$ consisting of a number $L \in (0,\infty ]$ and a nonnegative Borel measure $\mathfrak{m}$ on $[0,L]$, such that $\mathfrak{m}([0,t])$ is finite for every $t \in [0,L)$, and $\mathfrak{m}(\{L\})=0$. To this pair we associate the equation \begin{equation} y_+'(x)+z\int_{[0,x]} y(t)\mkern4mu\mathrm{d} \mathfrak{m}(t)=0, \quad \quad x \in [0,L), \end{equation} where $y_+'$ denotes the right-hand derivative of $y$, and $z$ is a complex spectral parameter. \\ For each string, we can construct a function $q_S$ called the \textit{principal Titchmarsh-Weyl coefficient} of the string (\cite{langer.winkler:1998} following \cite{kac.krein:1968}). This function belongs to the Stieltjes class, i.e., it is analytic on $\bb C \setminus [0,\infty)$, its imaginary part is nonnegative on $\bb C_+$, and its values on $(-\infty ,0)$ are positive. The correspondence between Krein strings and functions of Stieltjes class is bijective, as was shown by M.G.Krein. \newline
\noindent \Cref{A41} below is the reformulation of \Cref{T1} for the Krein string case.
\begin{theorem} \label{A41} Let $S[L,\mathfrak{m}]$ be a Krein string and set \begin{equation} \delta (t) := \bigg(\int_{[0,t)} \xi^2 \mkern4mu\mathrm{d} \mathfrak{m}(\xi) \bigg)\cdot \bigg(\int_{[0,t)} \mkern4mu\mathrm{d} \mathfrak{m}(\xi) \bigg)-\bigg(\int_{[0,t)} \xi \mkern4mu\mathrm{d} \mathfrak{m}(\xi) \bigg)^2. \end{equation} for $t \in [0,L)$. Let \[
\hat \tau (r):=\inf \big\{t>0 \, \big| \, \frac{1}{r^2} \leq \delta (t) \big\}, \quad \quad r \in (0,\infty). \] We set \begin{align} f(r) :=\mathfrak{m}([0,\hat \tau (r)))+ \mathfrak{m}(\{\hat \tau (r)\}) \frac{\frac{1}{r^2}-\delta (\hat \tau (r))}{\delta (\hat \tau (r)+)-\delta (\hat \tau (r))} \end{align} if $\delta$ is discontinuous at $\hat \tau(r)$, and $f(r):=\mathfrak{m}([0,\hat \tau (r)))$ otherwise. Then \begin{align} \IM q_S(ir) \asymp \frac{1}{rf(r)}, \quad \quad r \in (0,\infty ), \end{align} with constants independent of the string. \end{theorem}
\noindent Before proving \Cref{A41}, we need to introduce the concept of dual strings as well as a Hamiltonian associated to a string. Writing \[ m(t) := \mathfrak{m}([0,t)), \quad \quad t \in [0,L) \] we can define the dual string $S[\hat L , \hat{\mathfrak{m}}]$ of $S[L,\mathfrak{m}]$ by setting \[ \hat L :=\left\{ \begin{array}{ll} m(L) & \text{if } L+m(L)=\infty ,\\ \infty & \text{else} \end{array} \right. \] and \[
\hat m (\xi):=\inf \{t >0 \, | \, \xi \leq m (t)\}. \] The function $\hat m$ is increasing and left-continuous and thus gives rise to a nonnegative Borel measure $\hat{\mathfrak{m}}$. \newline
\noindent The Hamiltonian defined by \begin{equation} \label{A39} H(t) := \left\{ \begin{array}{ll} \begin{pmatrix} \hat m(t)^2 & \hat m(t) \\ \hat m(t) & 1 \end{pmatrix} & \text{if } t \in [0,\hat L], \\[3ex] \begin{pmatrix} 1 & 0 \\ 0 & 0 \end{pmatrix} & \text{if } \hat L + \int_0^{\hat L} \hat m(t)^2 \mkern4mu\mathrm{d} t <\infty ,\,\, \hat L<t<\infty \end{array} \right. \end{equation} then satisfies $q_S=q_H$, see e.g. \cite{kaltenbaeck.winkler.woracek:2007}.
\begin{proof}[Proof of \Cref{A41}] In view of \Cref{T1} and the fact that $q_S=q_H$ for the Hamiltonian $H$ defined in (\ref{A39}), our task is to express $\hat t_H(r)$ in terms of the string. If $\delta (\hat \tau (r)) = \frac{1}{r^2}$, this is easy because of \cite[Corollary 3.4]{kaltenbaeck.winkler.woracek:2007} giving \begin{align*} \det \Omega_H(m (\hat \tau (r))) = \delta (\hat \tau (r))=\frac{1}{r^2} \end{align*} and hence $\hat t_H(r)=m (\hat \tau (r))$. \\ Otherwise, we have $\delta (\hat \tau (r))<\frac{1}{r^2}$ and $\delta (\hat \tau (r)+) \geq \frac{1}{r^2}$. Using again \cite[Corollary 3.4]{kaltenbaeck.winkler.woracek:2007}, we have \begin{equation} \label{A40} \det \Omega_H(m (\hat \tau (r)))= \delta (\hat \tau (r)) < \frac{1}{r^2}, \quad \quad \det \Omega_H(m (\hat \tau (r)+))= \delta (\hat \tau (r)+) \geq \frac{1}{r^2} \end{equation} which tells us that $\hat t_H(r) \in \big(m (\hat \tau (r)),m (\hat \tau (r)+) \big]$. By \cite[Lemma 3.1]{kaltenbaeck.winkler.woracek:2007}, $\hat m$ is constant on this interval. Therefore, for $t \in \big(m (\hat \tau (r)),m (\hat \tau (r)+) \big]$, \begin{align*} \det \Omega_H(t) &=\bigg(\int_0^{m(\hat \tau (r))} \hat m (x)^2 \mkern4mu\mathrm{d} x +\big(t-m(\hat \tau (r))\big)\hat m (t)^2 \bigg)\cdot t \\ &-\bigg(\int_0^{m(\hat \tau (r))} \hat m (x) \mkern4mu\mathrm{d} x +\big(t-m(\hat \tau (r))\big)\hat m (t) \bigg)^2 = c_1(r)t+c_2(r) \end{align*} for some constants $c_1(r),c_2(r)$. Using (\ref{A40}), this leads to \[ \det \Omega_H(t)=\delta (\hat \tau (r)) + \frac{t-m(\hat \tau (r))}{m(\hat \tau (r)+)-m(\hat \tau (r))} \big(\delta (\hat \tau (r)+)-\delta (\hat \tau (r))\big). \] If we equate this to $\frac{1}{r^2}$, we find that \[ \hat t_H(r)=m(\hat \tau (r))+ \big(m(\hat \tau (r)+)-m(\hat \tau (r)) \big) \frac{\frac{1}{r^2}-\delta (\hat \tau (r))}{\delta (\hat \tau (r)+)-\delta (\hat \tau (r))}=f(r). \] Now we have $\omega_{H;2}(t)=\int_0^t h_2(s) \mkern4mu\mathrm{d} s =t$, and \Cref{T1} now shows \[ \IM q_S(ir)=\IM q_H(ir) \asymp \frac{1}{r\hat t_H(r)}=\frac{1}{rf(r)}. \] \end{proof}
\appendix \appendixpage \section{A construction method for Hamiltonians with prescribed angle of $\boldsymbol{q_H}$} \label{APP} \setcounter{lemma}{0}
Let $H$ be a Hamiltonian on $(a,b)$. Assume for simplicity that $\hat a=a$, i.e., $a$ is not the left endpoint of an $H$-indivisible interval. As discussed at the beginning of \Cref{S3}, the behavior of \[ \frac{\det \Omega (t)}{(\omega_1\omega_2)(t)}=1 - \frac{\omega_3(t)^2}{(\omega_1\omega_2)(t)} >0 \] towards the left endpoint $a$ corresponds to the angle of $q_H(ir)$ for $r \to \infty$. It is thus desirable to be able to construct examples of Hamiltonians with prescribed $\frac{\omega_3(t)}{\sqrt{(\omega_1\omega_2)(t)}}$, which is what we did in \Cref{T6}. We give now a general version of this idea. \\ The following result is formulated for Hamiltonians in limit circle case, making the statement cleaner. When we made use of this construction method in \Cref{T6}, we obtained a Hamiltonian in limit point case by simply appending an infinitely long indivisible interval.
\begin{proposition} \label{T3} Let $f$ be locally absolutely continuous on $(a,b]$ and such that $f(t) \in (-1,1)$ for all $t \in (a,b]$. Then there is a Hamiltonian, in limit circle case at $b$, with the properties \begin{itemize} \item[(i)] $a=\hat a$ is not the left endpoint of an $H$-indivisible interval, and \item[(ii)] $f(t)=\frac{\omega_3(t)}{\sqrt{(\omega_1\omega_2)(t)}}$ for all $t \in (a,b]$. \end{itemize} In addition, let \[
\Delta(f):=\frac{2|f'|}{1-\sgn (f')f} \] which is in $L_{loc}^1((a,b])$. Then all possible choices for $(\omega_1\omega_2)(t)$ are given by functions of the form \begin{align*} \exp \Big(c-\int_t^b g(s) \mkern4mu\mathrm{d} s \Big) \end{align*}
where $c \in \bb R$, $g \in L_{loc}^1((a,b]) \setminus L^1((a,b])$ with $g(t) \geq \Delta(f)(t)$ and $g(t)>0$ for $t \in (a,b]$ a.e. \end{proposition} \begin{proof} If $H$ is given and such that $(i),(ii)$ hold, then clearly $f$ is locally absolutely continuous and takes values in $(-1,1)$. \\ Let $f$ be as in the statement. Then clearly $f' \in L_{loc}^1((a,b])$. Also, the denominator of $\Delta (f)$ is locally bounded below by a positive number, and hence $\Delta(f) \in L_{loc}^1((a,b])$. We check the conditions that $\omega_1(t),\omega_2(t)$ must satisfy in order that they, together with $\omega_3(t):=\sqrt{(\omega_1\omega_2)(t)}f(t)$, give rise to a Hamiltonian through $h_i(t):=\omega_i'(t)$, $i=1,2,3$. Clearly, $\omega_1,\omega_2$ have to be increasing, absolutely continuous on $[a,b]$ and satisfy $\omega_1(0)=\omega_2(0)=0$. Moreover, we want \[ (h_1h_2)(t) \geq h_3(t)^2=\Big(\sqrt{(\omega_1\omega_2)(t)}f'(t)+\frac{h_1(t)\omega_2(t)+\omega_1(t)h_2(t)}{2 \sqrt{(\omega_1\omega_2)(t)}}f(t) \Big)^2. \] This is equivalent to \[ \frac{(h_1h_2)(t)}{(\omega_1\omega_2)(t)} \geq \bigg(f'(t)+\frac 12 \Big(\frac{h_1(t)}{\omega_1(t)}+\frac{h_2(t)}{\omega_2(t)} \Big) f(t)\bigg)^2 \] Setting \[ \alpha_i(t):=\frac{h_i(t)}{\omega_i(t)}, \quad i=1,2, \quad \quad g:=\alpha_1+\alpha_2 \in L_{loc}^1((a,b]), \] the inequality takes the form \[ \alpha_i (g-\alpha_i) \geq \Big(f'+\frac 12 gf \Big)^2 \] which is equivalent to \begin{align} \label{A7} \alpha_i \in \Bigg[\frac g2-\sqrt{\frac{g^2}{4}-\Big(f'+\frac 12 gf \Big)^2},\frac g2+\sqrt{\frac{g^2}{4}-\Big(f'+\frac 12 gf \Big)^2}\Bigg]. \end{align} In particular, \begin{align*}
&\frac{g^2}{4}-\Big(f'+\frac 12gf \Big)^2 \geq 0 \, \Longleftrightarrow \, \frac g2 \geq \Big|f'+\frac 12gf \Big| \\ &\, \Longleftrightarrow g \geq \frac{2f'}{1-f} \text{ and } g \geq \frac{-2f'}{1+f} \\
&\, \Longleftrightarrow g \geq \frac{2|f'|}{1-\sgn(f')f} =\Delta(f). \end{align*} Since $\Delta(f) \in L_{loc}^1((a,b])$, we can find $g \in L_{loc}^1((a,b])$, $g(t) \geq \Delta (f)(t)$ a.e. on $(a,b]$, and additionally, $g \not\in L^1((a,b])$. Choose measurable functions $\alpha_1,\alpha_2$ such that \begin{itemize} \item[$\rhd$] $\alpha_1+\alpha_2=g$, \item[$\rhd$] (\ref{A7}) holds for $\alpha_1$ at almost all $t \in (a,b)$ (and hence for $\alpha_2$), and \item[$\rhd$] $\alpha_1,\alpha_2 \not\in L^1((a,b])$. \end{itemize} Note that $\alpha_1$ and $\alpha_2$ belong to $L_{loc}^1((a,b])$ since $g$ does, and that such a choice is possible because one can always take $\alpha_1=\alpha_2=\frac g2$. \\ From the construction it is clear that for a Hamiltonian $H$ with $\frac{d}{dt} [\log \omega_i(t)]=\alpha_i(t)$, $i=1,2$, there is $c \in \bb R$ such that \[ \omega_i(t):=\exp \Big(c-\int_t^b \alpha_i(s) \mkern4mu\mathrm{d} s \Big), \quad \quad i=1,2. \] \end{proof}
\section{Calculations for \Cref{A24}} \label{APPB}
Let $H$ be the Hamiltonian from \Cref{A24}, \begin{align*} H(t)= t^{\alpha -1}\left(\begin{matrix}
|\log t|^{\beta_1} & |\log t|^{\beta_3} \\
|\log t|^{\beta_3} & |\log t|^{\beta_2} \\ \end{matrix} \right), \quad \quad t \in (0,\infty), \end{align*} where $\alpha > 0$, $\beta_1, \beta_2 \in \bb R$ such that $\beta_1 \neq \beta_2$, and $\beta_3 := \frac{\beta_1 + \beta_2}{2}$. We carry out the calculations to justify the claimed asymptotics from the example. They were communicated by Matthias Langer. \\
In order to calculate $\mathring t(r)$ and $\hat t(r)$, two lemmas are needed.
\begin{lemma} \label{approx_inv}
Let $f: (0,\varepsilon) \to (0,\infty)$ be increasing and $f(t) \sim ct^a |\log t|^b$ as $t \to 0$, for $a>0$, $c>0$, $b \in \bb R$. Then \[
f^{-1}(s) \sim \Big(ca^{-b}s|\log s|^{-b} \Big)^{\frac 1a}, \quad s \to 0. \] \end{lemma} \begin{proof} We have \begin{align} \label{sim}
\lim_{t \to 0} \,\frac{f(t)}{t^a|\log t|^b} = c. \end{align} Therefore, \[
\lim_{t \to 0} \Big[\log f(t)-a\log t-b \log |\log t| \Big]= \log c \] and further \[
\lim_{t \to 0} \Big[\frac{\log f(t)}{\log t} - a \Big]= \lim_{t \to 0} \Big[\frac{\log f(t)}{\log t} - a - b \frac{\log |\log t|}{\log t} \Big]= 0. \]
In other words, $|\log t| \sim \frac 1a |\log f(t)|$. At the same time, by (\ref{sim}), \[
t \sim \Big(c^{-1} f(t) |\log t|^{-b} \Big)^{\frac 1a} \sim \Big(c^{-1} f(t) [\frac 1a |\log f(t)|]^{-b} \Big)^{\frac 1a} \] which implies the assertion. \end{proof}
\begin{lemma} \label{int_asy} Let $a>-1$ and $b \in \bb R$. Then \begin{align*} \int_0^t &s^a (-\log s)^b \mkern4mu\mathrm{d} s = \frac{1}{a+1}t^{a+1} (-\log t)^b \\ &\cdot \Big[1-\frac{b}{a+1}(-\log t)^{-1} + \frac{b(b-1)}{(a+1)^2}(-\log t)^2+{\rm O} \big((-\log t)^{-3} \big) \Big] \end{align*} \end{lemma} \begin{proof} \begin{align}
&\int_0^t s^a (-\log s)^b \mkern4mu\mathrm{d} s=\frac{1}{a+1}s^{a+1} (-\log s)^b \Big|_0^t - \frac{1}{a+1} \int_0^t s^a b (-\log s)^{b-1} \mkern4mu\mathrm{d} s \nonumber \\ \label{part_int_sa_logs_b} &= \frac{1}{a+1}t^{a+1} (-\log t)^b - \frac{b}{a+1} \int_0^t s^a (-\log s)^{b-1} \mkern4mu\mathrm{d} s \end{align} Using (\ref{part_int_sa_logs_b}) two more times: \begin{align*} &= \frac{1}{a+1}t^{a+1} (-\log t)^b \\ &\quad- \frac{b}{a+1} \Big[\frac{1}{a+1}t^{a+1} (-\log t)^{b-1} - \frac{b-1}{a+1}\int_0^t s^a (-\log s)^{b-2} \mkern4mu\mathrm{d} s \Big] \\ &=\frac{1}{a+1}t^{a+1} (-\log t)^b \Big[1-\frac{b}{a+1}(-\log t)^{-1}+\frac{b(b-1)}{(a+1)^2}(-\log t)^{-2}\Big] \\ &\quad+c(a,b)\int_0^t s^a (-\log s)^{b-3} \mkern4mu\mathrm{d} s. \end{align*} The assertion follows using Karamata's Theorem \cite[Prop. 1.5.8 and 1.5.9a]{bingham.goldie.teugels:1989}. \end{proof}
We are now in position to determine $L(r)$, $\IM q_H(ir)$, and $A(r)$. \newline
\noindent \underline{Calculation of $\mathring t(r)$:}
By Karamata's Theorem we have \[ \omega_i(t) \sim \frac{1}{\alpha}t^{\alpha} (-\log t)^{\beta_i}, \quad \quad i=1,2,3. \] Hence \begin{align} \label{m1m2} (\omega_1\omega_2)(t) \sim \frac{1}{\alpha ^2}t^{2\alpha} (-\log t)^{\beta_1+\beta_2}. \end{align} Applying \Cref{approx_inv} yields \[ (\omega_1\omega_2)^{-1}(s) \sim c \cdot s^{\frac{1}{2\alpha}} (-\log s)^{-\frac{\beta_3}{\alpha}}. \] We arrive at \begin{align} \label{tring} \mathring t(r)=(\omega_1\omega_2)^{-1}(r^{-2}) \sim c' \cdot r^{-\frac{1}{\alpha}} (\log r)^{-\frac{\beta_3}{\alpha}}. \end{align}
\noindent \underline{Calculation of $A(r)$:} \begin{align*} A(r)&= \sqrt{\frac{\omega_1(\mathring t(r))}{\omega_2(\mathring t(r))}} \sim (-\log \mathring t(r))^{\frac{\beta_1-\beta_2}{2}} \sim \Big[-\log \Big(r^{-\frac{1}{\alpha}} (\log r)^{-\frac{\beta_3}{\alpha}} \Big) \Big]^{\frac{\beta_1-\beta_2}{2}} \\ &\sim (\alpha \log r)^{\frac{\beta_1-\beta_2}{2}}. \end{align*}
\noindent \underline{Calculation of $\hat t(r)$:} We use \Cref{int_asy} to calculate $\omega_i(t)$ with more precision: \begin{align*} \omega_i(t)=&\int_0^t s^{\alpha-1} (-\log s)^{\beta_i} \mkern4mu\mathrm{d} s = \frac{1}{\alpha}t^{\alpha} (-\log t)^{\beta_i} \\ &\cdot \Big[1-\frac{\beta_i}{\alpha}(-\log t)^{-1}+\frac{\beta_i (\beta_i -1)}{\alpha ^2}(-\log t)^{-2}+{\rm O} \big((-\log t)^{-3} \big) \Big] \end{align*} We get \begin{align} &\det \Omega (t)=\frac{1}{\alpha^2}t^{2\alpha} (-\log t)^{\beta_1+\beta_2} \nonumber\\ &\cdot \Big[1-\frac{\beta_1+\beta_2}{\alpha}(-\log t)^{-1}+\frac{\beta_1 (\beta_1-1)+\beta_2 (\beta_2-1) + \beta_1 \beta_2}{\alpha^2}(-\log t)^{-2} \nonumber\\ &-\Big(1-\frac{2\beta_3}{\alpha}(-\log t)^{-1}+\frac{2\beta_3 (\beta_3-1) + \beta_3^2}{\alpha^2}(-\log t)^{-2}+{\rm O} \big((-\log t)^{-3} \big) \Big)\Big] \nonumber\\ &=\frac{1}{\alpha^2}t^{2\alpha} (-\log t)^{\beta_1+\beta_2} \Big[\Big(\frac{\beta_1-\beta_2}{2\alpha} \Big)^2 (-\log t)^{-2} + {\rm O} \big((-\log t)^{-3} \big) \big)\Big] \nonumber\\ \label{detm} &\sim c \cdot t^{2\alpha} (-\log t)^{2(\beta_3-1)}. \end{align} By \Cref{approx_inv}, \[ \big(\det \Omega \big)^{-1}(s) \sim c' \cdot s^{\frac{1}{2\alpha}} (-\log s)^{-\frac{\beta_3-1}{\alpha}} \] and further \[ \hat t(r) = \big(\det \Omega \big)^{-1}(r^{-2}) \sim c'' \cdot r^{-\frac{1}{\alpha}} (\log r)^{-\frac{\beta_3-1}{\alpha}}. \]
\noindent \underline{Calculation of $\IM q_H(ir)$:} \begin{align*} \IM q_H(ir)&\asymp \frac{1}{r\omega_2(\hat t(r))} \sim \frac{\alpha}{r} \big(\hat t(r) \big)^{-\alpha} \big(-\log \hat t(r) \big)^{-\beta_2} \\ &\sim c''' \cdot \frac{\alpha}{r} r (\log r)^{\beta_3-1} (\log r)^{-\beta_2} = c''' \cdot (\log r)^{\frac{\beta_1-\beta_2}{2}-1}. \end{align*}
\noindent \underline{Calculation of $L(r)$:} Using (\ref{detm}), (\ref{m1m2}) and (\ref{tring}), we have \[ \frac{\det \Omega (\mathring t(r))}{(\omega_1\omega_2)(\mathring t(r))} \sim \Big(\frac{\beta_1-\beta_2}{2\alpha} \Big)^2 (\alpha \log r)^{-2}. \] Multiplying by $A(r)$, we obtain \[ L(r) \sim c (\log r)^{\frac{\beta_1-\beta_2}{2}-2}. \]
\subsection*{Acknowledgements.} \noindent This work was supported by the Austrian Science Fund and the Russian Federation for Basic Research (grant number I-4600). Furthermore, I would like to thank my supervisor Harald Woracek for his support and the expertise he provided.
\printbibliography
\end{document} |
\begin{document}
\begin{frontmatter}
\title{Quantification of Continuous Variable Entanglement with only Two Types of Simple Measurements}
\author{Gustavo Rigolin}, \ead{rigolin@ifi.unicamp.br}
\author{Marcos C. de Oliveira} \ead{marcos@ifi.unicamp.br}
\address{ Instituto de F\'{\i}sica Gleb Wataghin, Universidade Estadual de Campinas, Unicamp, 13083-970, Campinas, S\~ao Paulo, Brasil}
\begin{abstract}
Here we propose an experimental set-up in which it is possible to obtain the entanglement of a two-mode Gaussian state, be it pure or mixed, using only simple linear optical measurement devices. After a proper unitary manipulation of the two-mode Gaussian state only number and purity measurements of just one of the modes suffice to give us a complete and exact knowledge of the state's entanglement. \end{abstract}
\begin{keyword} Quantum Information \sep Entanglement production, characterization, and manipulation \PACS 03.67.-a \sep 03.67.Mn \end{keyword} \end{frontmatter}
\section{Introduction}
Quantum theory of information offers, in principle, a plethora of resources for the processing of computational and informational tasks in an efficient way, otherwise unattainable in the classical world \cite{Ben00,Bra05,Ade07,eisert1}. Initially, all the quantum communication protocols were developed for discrete systems (qubits) and later an equivalent formulation for the quantum continuous variable (CV) setting was developed. CV-systems
are described by canonical conjugate operators like position and momentum or the quadrature amplitudes of the quantized electromagnetic field. The main reason for the development of a CV quantum information theory are of a practical order: the crucial steps in quantum communication protocols are relatively simple to implement via the available experimental techniques of quantum optics \cite{Bra05}. Some of these tasks, e.g. quantum cryptography \cite{Eke91,Coh97,Per00}, superdense coding \cite{Wie92,Ban99,Bra00} and teleportation \cite{Ben93,Vai94,Bra98}, require entanglement as a key ingredient for their successful implementation. Therefore, a great deal of effort has been put into the qualitative and quantitative study of entanglement. However, it is still a difficult problem to obtain in a simple experimental fashion the degree of entanglement of a quantum system. Practically all the useful entanglement measures developed so far can only be computed with a complete knowledge of the quantum state, whose reconstruction is not a trivial experimental task.
A direct scheme for the measurement of the entanglement of a pair of pure qubits, without state reconstruction, was recently proposed \cite{Min05} and experimentally realized \cite{Wal06}. For continuous variable systems (CV-systems), being Gaussian states a famous example which are efficiently generated in the laboratory \cite{Bra05,eisert1}, interesting experimental proposals for the measurement of entanglement were presented \cite{Ade04a,Ade04b,Fiu04} and implemented \cite{Gia04} recently. In particular, it was shown that by measuring the marginal (local) and global purities of a two-mode Gaussian state one is able to estimate the state negativity \cite{Wer02}, although its exact value cannot be achieved via this procedure. The negativity, together with the entanglement of formation \cite{Gie03}, are the most important entanglement measures for Gaussian states, where the latter is only analytically computable for symmetric Gaussian states.
Here we present a simple experimental scheme allowing the measurement of the exact value of the entanglement of formation for an arbitrary symmetric two-mode Gaussian state. In fact, the scheme can also be adapted to obtain the exact value of the negativity for an arbitrary two-mode Gaussian state as well, being it symmetric or not. The only prior knowledge needed for entanglement quantification, which is also a requisite of all previous schemes, is upon the Gaussian nature of the two-mode state. Whether it is a pure or a mixed state is irrelevant for our purposes. Furthermore, even if we do not know that we are dealing with a Gaussian state our method can be used to experimentally apply the Simon separability test \cite{Sim00}, which is a sufficient condition for the existence of CV-entanglement.
A most remarkable feature of our scheme, which differentiates it from previous proposals, is that all the information relevant to the \textit{exact} quantification of the entanglement of the state can be obtained, after a non-local unitary operation on the two-modes, via \textit{local projective measurements}. After combining the two-modes in a beam splitter (non-local unitary operation) the entanglement can be quantified solely by measurements of the purity and the number of photons of just one of the modes. Therefore, all the information concerning the entanglement of the two-modes is transferred to local properties of one of the modes \textit{after} a non-local unitary operation. It is in this sense that the entanglement can be seen as determined by two single types of local projective measurements with no need for quantum state reconstruction or, equivalently, without the knowledge of the two-mode covariance matrix.
\section{Entanglement of a two-mode Gaussian state}
A two-mode Gaussian state $\rho$ is completely cha\-rac\-te\-rized by its covariance matrix, with elements $\gamma_{ij}$ $=$ $\langle R_i R_j + R_j R_i\rangle_{\rho}$ $-$ $2$$\langle R_i\rangle_{\rho}$$\langle R_j \rangle_{\rho}$, where $\langle R_i \rangle_{\rho}$ is the quantum expectation value of the observable $R_i$ and $(R_1,R_2,R_3,R_4)$ $=$ $(X_1,P_1,X_2,P_2)$ are the quadratures of the electromagnetic field modes \cite{footnote1}. Any covariance matrix can be brought via local symplectic transformations, i.e. without affecting its entanglement content, to the standard form \cite{Sim00}
\begin{equation} \gamma = \left( \begin{array}{cc} A & C_{\gamma} \\ C_{\gamma}^{T} & B \end{array} \right), \label{simon-standard} \end{equation} where $A, B$, and $C_{\gamma}$ are $2\times 2$ real diagonal matrices given as \cite{Gie03} $A=\mbox{diag}(n,n)$, $B=\mbox{diag}(m,m)$, and $C_{\gamma}=\mbox{diag}(k_x,-k_p)$. It can be shown \cite{Sim00} that a two-mode Gaussian state is not entangled if $k_x k_p \geq 0$. However, it may or may not be entangled when $k_x k_p < 0$. A Gaussian state is not entangled (separable) if, and only if \cite{Sim00}
\begin{equation}
I_1 I_2 + (1 - |I_3|)^2 - I_4 \geq I_1 + I_2, \label{simon} \end{equation}
where $I_1 = \mbox{det}(A)$, $I_2=\mbox{det}(B)$, $I_3=\mbox{det}(C_{\gamma})$ and $I_4=\mbox{tr}(AJC_\gamma JBJC_\gamma^{T}J)$ are the four invariants of the $Sp(2,R)\otimes Sp(2,R)$ group \cite{Sim00}. Here $J$ is an anti-diagonal $2\times 2$ matrix given by $J=\mbox{adiag}(1,-1)$, $\mbox{det}(M)$ stands for the determinant of the matrix $M$, $\mbox{tr}(M)$ is the trace of $M$, and $M^{T}$ is the transpose of $M$.
For a symmetric state ($I_1=I_2$) the entanglement of formation \cite{Gie03} can be written in terms of the symplectic invariants as \cite{Rig04}
\begin{equation}
E_f(\rho) = f\left( \sqrt{I_1 + |I_3| - \sqrt{I_4 + 2\,I_1\,|I_3|}}\right), \label{eof} \end{equation}
where $f(x)=c_+(x)\log_2(c_+(x)) - c_-(x)\log_2(c_-(x))$ and $c_{\pm}(x)=(x^{-1/2}\pm x^{1/2})^2/4$. There is no analytical expression of $E_f$ for non-symmetric Gaussian states. However, their entanglement can be quantified by the negativity, which can be calculated if the above four symplectic invariants are known \cite{Ade04a,Ade04b}.
\section{The four local invariants}
It is important to note, since this is a crucial ingredient of our experimental proposal, that the four quantities $I_1$, $I_2$, $I_3$, and $I_4$ are invariants by local symplectic transformations in the quadratures, or equivalently, they are invariant by local unitary operations in the density matrix $\rho$ \cite{Sim00}. The knowledge of these four invariants allows us to completely characterize the entanglement properties of a two-mode
Gaussian state: we can discover if it is entangled (Eq.~(\ref{simon})) as well as how much it is entangled (Eq.~(\ref{eof})). Therefore, the measurement of these symplectic invariants is the base on which our proposal is built and our goal now is to present an experimental set-up in which they can be easily determined.
For this purpose it is more appropriate to work with the covariance matrix $V$ \cite{footnote4} of the creation, $a_j^{\dagger}=(X_j-\mathrm{i}P_j)/\sqrt{2}$, and annihilation, $a_j=(X_j+\mathrm{i}P_j)/\sqrt{2}$, operators for the two modes ($j=1,2$) \cite{Oli05}. Defining $\mathbf{v}=$ $(v_1, v_2, v_3, v_4)^T$ $=$ $(a_1, a_1^\dagger, a_2, a_2^\dagger)^T$, the matrix elements of $V$ are $V_{ij}=(-1)^{i+j}\langle v_i v_j^\dagger + v_j^\dagger v_i\rangle_{\rho}/2$ \cite{footnote2}. As we did for $\gamma$ we can represent $V$ in terms of four block matrices of dimension two:
\begin{equation} V = \left( \begin{array}{cc} V_1 & C_{V} \\ C_{V}^{\dagger} & V_2 \end{array} \right). \end{equation} In this new representation the four symplectic invariants read: $J_1 = \mbox{det}(V_1)$, $J_2=\mbox{det}(V_2)$, $J_3=\mbox{det}(C_V)$ and $J_4=\mbox{tr}(V_1ZC_VZV_2ZC_V^{\dagger}Z)$, where $Z=\mbox{diag}(1,-1)$. Recalling the definitions of $\gamma_{ij}$, $V_{ij}$, and the relation between $X_j, P_j$ and $a_j, a_j^\dagger$ it is straightforward to see that
\begin{equation} I_1=4J_1,\, I_2=4J_2,\, I_3=4J_3,\, I_4 = 16 J_4. \label{iandj} \end{equation}
Like $\gamma$, under local symplectic transformations $V$ can be brought to the following standard form \cite{footnote-V}:
\begin{equation} \tilde{V} = \left( \begin{array}{cc} \tilde{V}_1 & \tilde{C}_{V} \\ \tilde{C}_{V}^{\dagger} & \tilde{V}_2 \end{array} \right), \label{Vtilde} \end{equation} with
\begin{eqnarray*} \tilde{V}_1 = \left( \begin{array}{cc} \tilde{n}_1 & 0 \\ 0 & \tilde{n}_1 \end{array} \right), \tilde{V}_2 = \left( \begin{array}{cc} \tilde{n}_2 & 0 \\ 0 & \tilde{n}_2 \end{array} \right), \tilde{C}_V = \left( \begin{array}{cc} \tilde{m}_s & \tilde{m}_c \\ \tilde{m}_c & \tilde{m}_s \end{array} \right), \end{eqnarray*}
where $\tilde{m}_s$ and $\tilde{m}_c$ are real parameters. From now on, whenever we deal with the standard form of $V$, we will write its elements as well as any related quantity thereof with a tilde ($\tilde{\,}$).
\section{Experimental proposals}
We only need, then, to experimentally measure $J_1$, $J_2$, $J_3$, and $J_4$ to completely quantify the entanglement of a two-mode Gaussian state. We first show a scheme in which we can determine the first three invariants. With these three invariants, as will be shown in what follows, we can obtain bounds for the entanglement of a two-mode Gaussian state. We then introduce two local unitary operations to the previous scheme, allowing us to determine $J_4$, the remaining invariant.
\subsection{First scheme}
The experimental set-up necessary to measure the first three invariants is very simple and is depicted in Fig.~\ref{fig1}. It can be thought of as a simplification of the scheme presented in Ref. \cite{Aur05} to reconstruct the two-mode covariance matrix of a Gaussian state. Here, however, there is no state reconstruction and we only use linear optical devices to measure the purity and the photon number of the output mode $a'_1$. Loosely speaking the linear optics apparatus (adjustable phase shifter and a beam splitter) can be seen as the agent responsible for transferring the entanglement properties between the two modes $a_1$ and $a_2$ to the mode $a'_1$ \cite{Oli05}. On the other hand, the measuring apparatus (photon counting and/or homodyne detection) are responsible for the local projective measurements which determine these properties.
\begin{figure}
\caption{ Experimental set-up to measure
three symplectic invariants ($J_1$, $J_2$, and $J_3$) of a two-mode Gaussian state. Input modes $a_1$ and $a_2$ pass through a beam splitter (BS) with transmittance $\cos \theta$. Before reaching the BS mode $a_1$ passes through a phase-shifter (PS) acquiring a phase $\varphi$. After the BS measurements (M) are made on the output mode $a'_1$ only. Two types of measurements are required to completely characterize the invariants of the input two-mode Gaussian state: the photon number and the purity (Wigner function at the origin of the phase space) of mode $a'_1$.}
\label{fig1}
\end{figure}
The phase shifter and the beam splitter action on the modes $a_1$ and $a_2$ is modeled by the following non-local bilinear Bogoliubov transformation \cite{Oli05} $\mathbf{v'}=\mathcal{U}\mathbf{v}$, where $\mathbf{v'}$ $=$ $(a'_1, (a'_1)^{\dagger}, a'_2, (a'_2)^{\dagger} )^T$ and
\begin{equation} \mathcal{U} = \left( \begin{array}{cc} \mathcal{R} & \mathcal{S} \\ -\mathcal{S}^{*} & \mathcal{R}^* \end{array} \right). \end{equation}
The $2\times 2$ block matrices $\mathcal{R}=\mbox{diag}(\mathrm{e}^{\mathrm{i}\varphi}\cos \theta, \mathrm{e}^{-\mathrm{i}\varphi}\cos \theta )$ and $\mathcal{S} = \mbox{diag}(\sin \theta, \sin \theta )$ are such that $\cos \theta$ is the transmittance $T$ of the beam splitter and $\varphi$ is the phase shift in mode $a_1$. The two-mode output covariance matrix is $V'=\mathcal{U}^\dagger V \mathcal{U}$. We will only need, however, the mode $a'_1$ local covariance matrix, which reads \cite{Oli05}
\begin{equation} V'_1 = \mathcal{R}^* V_1\mathcal{R} + \mathcal{S}V_2\mathcal{S}^* - \mathcal{S}C^{\dagger}_V\mathcal{R} - \mathcal{R}^* C_V\mathcal{S}^* . \label{v1linha} \end{equation}
Defining $J'_1(\theta,\varphi) = \mbox{det}(V'_1)$, where we explicitly write the dependence of $\mbox{det}(V'_1)$ on the parameters $\theta$ and $\varphi$, and using Eq.~(\ref{v1linha}) we easily see that
\begin{equation} J_1 = J'_1(0,0), \hspace{.5cm} J_2 = J'_1\left(\frac{\pi}{2},0\right). \label{j1andj2} \end{equation}
As expected $J_1$ and $J_2$ are obtained when the beam splitter has, respectively, transmittance one ($\theta = 0$) and reflectivity one ($\theta = \pi/2$). The determination of $J_3$ is not as trivial, requiring that a set of $J'_1$ measurements be made on the output for distinct beam-splitter transmittances and phase-shift arrangements, as well as the measurement of the mode $a'_1$ average photon number $N'_1(\theta, \varphi) = \langle (a')_1^{\dagger}a'_1 + 1/2 \rangle$. A straightforward but tedious calculation gives
\begin{equation} J_3 = \frac{1}{4}\left(\mathcal{J} + \mathcal{N}\right), \label{j3} \end{equation}
where
\begin{eqnarray} \mathcal{J} &=& J'_1\left(\frac{\pi}{4},0\right) + J'_1\left(\frac{\pi}{4},\pi\right) + J'_1\left(\frac{\pi}{4},\frac{\pi}{2}\right) + J'_1\left(\frac{\pi}{4},-\frac{\pi}{2}\right) \nonumber \\ &&-J'_1\left(0,0\right) - J'_1\left(\frac{\pi}{2},0\right), \label{j3a} \\ \mathcal{N} &=& [N'_1(0,0)]^2 + \left[N'_1\left(\frac{\pi}{2},0\right)\right]^2 + 2 \left[N'_1\left(\frac{\pi}{4},0\right)\right]^2 + 2 \left[N'_1\left(\frac{\pi}{4},\frac{\pi}{2}\right)\right]^2 \nonumber \\ &&- 2 \left[N'_1\left(0,0\right) + N'_1\left(\frac{\pi}{2},0\right) \right]
\left[N'_1\left(\frac{\pi}{4},0\right) + N'_1\left(\frac{\pi}{4},\frac{\pi}{2}\right) \right]. \label{j3b} \end{eqnarray}
Eqs. (\ref{j1andj2})-(\ref{j3b}) constitute one of our central results and tell that $J_1$, $J_2$ and $J_3$ of the bipartite input state can be completely determined by measurements on only one of the beam-splitter output ports. This result contrasts to previous proposals \cite{Ade04a,Ade04b,Fiu04}
where measurements on both modes are always required to obtain the symplectic invariants, in particular $J_3$.
Although we do not have yet the fourth invariant, we can obtain a lower bound for the entanglement of formation as given by Eq.~(\ref{eof}). Noting that $J_4$ (or equivalently $I_4$) is a positive quantity and that the function $f(x)$ (Eq.~(\ref{eof})) is a decreasing function of $x$ \cite{Gie03} we readily obtain the lower bound by setting the fourth invariant to zero. A similar approach allows us to derive a bound for the negativity \cite{Ade04a,Ade04b}.
\textit{Remark:} If, and only if, $V$ is given as or can be brought locally to either one of the following two distinct forms the previous three invariants are enough to completely quantify the entanglement of a two-mode Gaussian state. Indeed, when $\mbox{det}(C_V)\geq 0$ one of the forms reads $V_1=\mbox{diag}(n_1,n_1)$, $V_2=\mbox{diag}(n_2,n_2)$, and $C_{V}=\mbox{diag}(m_s, m_s^{*})$. On the other hand, if $\mbox{det}(C_V)\leq 0$ the matrix $C_V$ is anti-diagonal, $C_{V}=\mbox{adiag}(m_c, m_c^{*})$. Note that we cannot go from one form to the other via local unitary operations since we have different signs for $\mbox{det}(C_V)$. A simple calculation using these two particular covariance matrices gives
\begin{equation}
J_4= 2 |J_3| \sqrt{J_1 J_2}. \label{j4} \end{equation}
Hence, as anticipated above, for these two cases $J_1$, $J_2$, and $J_3$ are all that is needed to completely characterize the entanglement of a two-mode Gaussian state. It is worth mentioning, however, that Eq.~(\ref{j4}) is only valid for the two special forms of $V$ described above. In general, Eq.~(\ref{j4}) is no longer valid. Also, we have not used it in any calculations that led to the construction of this and the next scheme.
\subsection{Second scheme}
In order to get $J_4$ we modify the previous scheme introducing two local unitary operations, as depicted in Fig.~\ref{fig2}.
\begin{figure}
\caption{ Modified experimental set-up to measure
the remaining invariant $J_4$ of a two-mode Gaussian state. After the local unitary transformations $U_1$ and $U_2$, affecting modes $1$ and $2$, respectively, the scheme is identical to the one given in Fig.~\ref{fig1}.}
\label{fig2}
\end{figure}
The unitary transformations $U_1$ and $U_2$ are chosen such that the covariance matrix describing the output modes $\tilde{a}_1$ and $\tilde{a}_2$ is in its standard form $\tilde{V}$ (See Eq.~(\ref{Vtilde})). These unitary transformations preserve the Gaussian character of the input state and they are equivalent to local symplectic transformations $S_1$ and $S_2$ on $V$. We should bear in mind that the correct transformation is dependent on the one-mode covariance matrices $V_1$ and $V_2$, which fortunately can be determined locally by standard homodyne detection. Moreover, the important point here is that $S_1$ and $S_2$ (or $U_1$ and $U_2$) always exist and that they are local symplectic transformations. In the Appendix we show how to express these transformations as a function of the input local covariance matrices $V_1$ and $V_2$.
In the standard form $\tilde{V}$ a direct calculation shows that the fourth invariant is given as
\begin{equation}
J_4 = 2 \tilde{n}_1 \tilde{n}_2 \left( |\tilde{m}_s|^2 +
|\tilde{m}_c|^2 \right). \end{equation}
Our task then reduces to the determination of $\tilde{n}_1$, $\tilde{n}_2$,
$|\tilde{m}_s|^2$, and $|\tilde{m}_c|^2$ measuring only the output mode $\tilde{a}'_1$, the one obtained after $\tilde{a_1}$ and $\tilde{a_2}$ enter the beam splitter.
We first note that $\tilde{n}_1$ and $\tilde{n}_2$ are trivially related to invariants already measured in the previous scheme: $\tilde{n}_1 = \sqrt{J_1}$ and $\tilde{n}_2 = \sqrt{J_2}$. Second,
$J_3 = |\tilde{m}_s|^2 - |\tilde{m}_c|^2$ is also known, implying that we just need to measure either $|\tilde{m}_s|^2$ or $|\tilde{m}_c|^2$ to obtain $J_4$. Analyzing $\tilde{V}_1$ we see that,
\begin{eqnarray}
|m_c|^2 &=& \left[ \tilde{N}'_1\left(\frac{\pi}{4},0\right) \right]^2 - \tilde{J}'_1\left(\frac{\pi}{4},0\right),\label{mc2} \\ \mbox{Re}\left(\tilde{m}_s\right) &=& \frac{\tilde{n}_1+\tilde{n}_2}{2} - \tilde{N}'_1\left(\frac{\pi}{4},0\right),\\ \mbox{Im}\left(\tilde{m}_s\right) &=& \frac{\tilde{n}_1+\tilde{n}_2}{2} - \tilde{N}'_1\left(\frac{\pi}{4},\frac{\pi}{2}\right), \label{imms} \end{eqnarray}
where $\tilde{J}'_1(\theta,\varphi) = \mbox{det}(\tilde{V}'_1)$ and $\tilde{N}'_1(\theta, \varphi) = \langle (\tilde{a}')_1^{\dagger}\tilde{a}'_1 + 1/2 \rangle$ is mode $\tilde{a}'_1$ average photon number, all quantities determined for a given set of parameters $\theta$ and $\varphi$. Note that although in the standard form (\ref{Vtilde}) the matrix elements $m_s$ and $m_c$ are real, we have assumed them to be complex, which simplifies the unitary operations $U_1$ and $U_2$ necessary to transform $V$ to $\tilde{V}$. See the Appendix for more details.
Looking at Eqs. (\ref{mc2})-(\ref{imms}) we see that, similar to the first scheme, only two types of measurements are needed to determine $J_4$, namely $\tilde{J}'_1(\theta,\varphi)$ and $\tilde{N}'_1(\theta, \varphi)$.
Additionally, the scheme just presented can be employed without relying on the first one. Indeed, assuming that we are always dealing with an experimental set-up as depicted in Fig.~\ref{fig2}, the first two invariants are
\begin{eqnarray} J_1 = \tilde{n}_1^2 &=&\left[\tilde{N}'_1(0,0)\right]^2, \\ J_2 = \tilde{n}_2^2 &=& \left[\tilde{N}'_1\left(\frac{\pi}{2},0\right)\right]^2. \end{eqnarray}
With the aid of Eqs.~(\ref{mc2})-(\ref{imms}), the last two invariants, $J_3$ and $J_4$, are readily obtained.
In summary, given a general two-mode Gaussian state the first scheme allows us to determine the first three invariants ($J_1, J_2$, and $J_3$) while the second scheme gives us the fourth as well as the other three invariants. With these four invariants the entanglement content of a two-mode Gaussian state is fully determined.
\section{Experimental feasibility}
Our last task is to explain how $J'_1$ and $N'_1$ (or equivalently $\tilde{J}'_1$ and $\tilde{N}'_1$ ) are obtained from experimentally measurable quantities. Firstly, $N'_1$ is simply the output mode $a'_1$ photon number (or intensity) and can be easily determined by a photodetection process. The determinant $J'_1$, on the other hand, is connected to the purity of mode $a'_1$ by the following expression \cite{Ade04a,Ade04b}: $Tr\{(\rho'_1)^2\} = 1/(2\sqrt{J'_1})$. Moreover, one can prove that \cite{Bra05} $W(0)=1/(2\pi \sqrt{J'_1})$ where $W(0)$ is the Wigner function of mode $a'_1$ at the origin of the phase space. Therefore, any technique developed to measure the purity and/or $W(0)$ can be employed to determine $J'_1$. Two interesting proposals were presented in Refs. \cite{Ban96,Fiu04}. The first one \cite{Ban96}, implemented in Ref. \cite{Ban99b}, shows that the photon counting statistics allows one to obtain $W(0)$ without any sophisticated data processing. The second one \cite{Fiu04}, implemented in Ref. \cite{Wen04}, employs only a tunable beam splitter and a single-photon detector to obtain the purity of mode $a'_1$. Remark that both schemes do not require homodyne detection and permit a direct access to $N'_1$ as well. However, homodyne detection \cite{Grangier06} can also be employed for each set of the parameters $\theta$ and $\varphi$ to reconstruct the output mode $a'_1$ covariance matrix, and thus leading to the immediate calculation of $J'_1$ and $N'_1$. Nevertheless, a complete single-mode reconstruction is more than we need to fully characterize the entanglement of a two-mode Gaussian state and also more experimentally demanding than the previous two techniques.
In fact what determines the detection scheme to be employed is the specific scheme detection efficiency in the light frequency range and intensity in use. Obviously a photon counting scheme for the measurement of the Wigner function at the origin of the phase space is more appropriate for low intensity light fields. On the other hand, homodyne reconstruction is more appropriate for continuous bright light fields, unless an alternative procedure allowing access to the photon statistics is implemented, such as for pulsed fields detected by on/off avalanche photodetectors \cite{Zam05}. Photon counting (implemented via avalanche devices with saturated gain where each photon produces a detectable signal (current) at the output) can also be simulated by dual-homodyne schemes \cite{Nem02}. Such schemes may be useful in infrared and optical frequencies since the present day avalanche photodiodes possess very low efficiencies at such communication frequencies \cite{Nem02}.
Whichever the scheme employed imperfect detection and signal losses blur the measurement outcomes avoiding an exact determination of the invariants as delineated above. However, in any situation here considered, the losses can be modeled by a beam splitter of transmittance $\eta$ followed by an ideal detector, being $\eta$ attributed to the overall efficiency of the detection scheme \cite{Wen04}. Thus, the actual measured quantities can be corrected by an appropriate rescaling. Firstly, for homodyne detection the procedure to measure the detection efficiency $\eta_{hom}$ is well established from squeezing experiments \cite{Grangier87,Wen04}. Since $J'_1=I'_1/4=\left(V'_{1,min}V'_{1,max}\right)/4$ and $N'_1=\left(V'_{1,min}+V'_{1,max}\right)/2$, where $V'_{1,min}$ and $V'_{1,max}$ are the squeezed and anti-squeezed quadratures variances, respectively, one can show that in terms of the actual measured variances ${\cal V}'_{1,min}$ and ${\cal V}'_{1,max}$ they are corrected to $V'_{1,min}=\left({\cal V}'_{1,min}-1+\eta_{hom}\right)/\eta_{hom}$ and $V'_{1,max}=\left({\cal V}'_{1,max}-1+\eta_{hom}\right)/\eta_{hom}$ \cite{Wen04}. Secondly, the determination of $J'_1$ and $N'_1$ via photocounting techniques relies on single photon detection of output mode 1 (given by $a'_1$) after the two input modes are recombined in a beam splitter of transmittance $T=\cos\theta$. The detector efficiency $\eta$ is modeled by a beam splitter of transmittance $\eta$ followed by an ideal detector, which in Refs. \cite{Fiu04,Wen04} responds with only two measurement outcomes, while in Refs. \cite{Ban96,Ban99b} is a number resolving photodetector. In either case the effects of non-ideal detection can be simply taken into account by substituting $T\rightarrow \eta T$, whose overall effect is to rescale the proper transmittance necessary to obtain all the invariants
\section{Conclusion}
We have shown that continuous variable entanglement can be directly detected and quantified with few simple measurements. In particular, we have proposed an experimental scheme in which only two types of measurements, i.e. purity (Wigner function at the origin of the phase space) and photon number of a single-mode, are required to characterize the entanglement of a two-mode Gaussian state. Our proposal is valid for either pure or mixed states as well as for either symmetric or non-symmetric Gaussian states, giving always the exact value of the state's amount of entanglement. Furthermore, our scheme can also be seen as a simple procedure to obtain all the four independent symplectic invariants of a covariance matrix. This allows one to easily test for the existence of entanglement even in non-Gaussian states via the Simon's sufficient condition of inseparability. Finally we remark that the same scheme could be appropriately adapted for analyzing continuous variable entanglement in other systems than light fields, such as for the motional degrees of freedom of trapped ions \cite{Win96}, or even for Bose-Einstein condensates \cite{bruno}. This is the case whenever similar processes to the beam splitter and the phase shifter operations can be applied and whenever proper local projective measurements or local state reconstruction can be implemented. \\
\textit{Note:} After completion of this manuscript we devised a fully local procedure by which we can measure the four invariants of a general
two-mode Gaussian state \cite{Har07}. Contrary to the schemes here presented, in Ref. \cite{Har07} there is no need for a non-local unitary operation (as given here by the beam splitter). The trade-off is the inclusion of a classical communication channel and one-mode parity measurements.
\section*{Acknowledgments} We acknowledge support from Fun\-da\-\c{c}\~ao de Amparo \`a Pesquisa do Estado de S\~ao Paulo (FA\-PESP), Conselho Nacional de Desenvolvimento Cient\'{\i}fico e Tecnol\'ogico (CNPq), and Coordena\c{c}\~ao de Aperfei\c{c}oamento de Pessoal de N\'{\i}vel Superior (CAPES).
\appendix \section{Obtaining the standard form $\tilde{V}$}
The covariance matrix $V$ is generally written as:
\begin{eqnarray} V = \left( \begin{array}{cc} V_1 & C_{V} \\ C_{V}^{\dagger} & V_2 \end{array} \right) &=& \left( \begin{array}{cccc} n_1 & m_1 & m_s & m_c \\ m_1^* & n_1 & m_c^* & m_s^*\\ m_s^* & m_c & n_2 & m_2 \\ m_c^* & m_s & m_2^* & n_2 \end{array} \right), \end{eqnarray}
where $V_j$, $j=1,2$, and $C_V$ are block matrices of dimension two. Unitary local operations preserving the Gaussian character of a state described by $V$ are mapped to the following local symplectic transformation \cite{Sim94}:
\begin{equation} S = \left( \begin{array}{cc} S_1 & 0 \\ 0 & S_2 \end{array} \right), \end{equation}
where $S_j$, $j=1,2$, is given as
\begin{equation} S_j = \left( \begin{array}{cc} \mathrm{e}^{-i\alpha_j}\cosh\theta_j& \mathrm{e}^{i\beta_j}\sinh\theta_j \\ \mathrm{e}^{-i\beta_j}\sinh\theta_j& \mathrm{e}^{i\alpha_j}\cosh\theta_j \end{array} \right), \end{equation}
with $\alpha_j$, $\beta_j$, and $\theta_j$ real parameters. The first one is related to rotations of the quadratures of the quantized electromagnetic field and the last one to local squeezing operations. The new covariance matrix $\tilde{V}$ is connected to $V$ by the following relation \cite{Sim94,Oli04},
\begin{equation} \tilde{V} = S V S^{\dagger}, \end{equation}
which implies that
\begin{eqnarray} \tilde{V}_j &=& S_j V_j S_j^{\dagger}, \label{newV}\\ \tilde{C}_V &=& S_1 C_VS_2^{\dagger}. \end{eqnarray}
Explicitly, Eq.~(\ref{newV}) gives for the non-diagonal term of $\tilde{V}_j$:
\begin{eqnarray} \tilde{m}_j &=& \mathrm{e}^{-2\mathrm{i}\alpha_j}m_j\cosh^2\theta_j +\mathrm{e}^{2\mathrm{i}\beta_j}m_j^*\sinh^2\theta_j \nonumber\\ &&+ \mathrm{e}^{\mathrm{i}(\beta_j-\alpha_j)}n_j\sinh(2\theta_j). \end{eqnarray}
By setting $m_j=|m_j|\mathrm{e}^{\mathrm{i}\mu_j}$ and solving for $\tilde{m}_j=0$ we get
\begin{eqnarray} \alpha_j=\beta_j=\frac{\mu_j+\pi}{2}, \\
\tanh(2\theta_j) =\frac{|m_j|}{n_j}. \end{eqnarray}
Note that this is one of several other solutions. However, for our purposes, one is enough to prove that $V$ can be locally transformed to $\tilde{V}$.
\end{document} |
\begin{document}
\footskip=0pt \footnotesep=2pt
\allowdisplaybreaks
\newtheorem{claim}{Claim}[section]
\theoremstyle{definition} \newtheorem{thm}{Theorem}[section] \newtheorem*{thmm}{Theorem} \newtheorem{mydef}{Definition}[section] \newtheorem{lem}[thm]{Lemma} \newtheorem{prop}[thm]{Proposition} \newtheorem{remark}[thm]{Remark}
\newtheorem{rem}{Remark}[section] \newtheorem*{propp}{Proposition} \newtheorem{cor}[thm]{Corollary} \newtheorem{conj}[thm]{Conjecture} \def\mathcal{F}{\mathcal{F}} \def\lambda{\lambda} \def\tilde{R}{\tilde{R}} \def\displaystyle{\displaystyle} \def\varepsilon{\varepsilon} \def\gamma{\gamma} \def\lesssim{\lesssim} \newcommand{\bd}[1]{\mathbf{#1}} \newcommand{\mathbb{R}}{\mathbb{R}} \newcommand{\mathbb{Z}}{\mathbb{Z}} \newcommand{\Omega}{\Omega} \newcommand{\mathbf{v}}{\mathbf{v}} \newcommand{\mathbf{n}}{\mathbf{n}} \newcommand{\mathbf{U}}{\mathbf{U}} \newcommand{\quad}{\quad} \newcommand{\partial}{\partial} \newcommand{\nabla}{\nabla} \newcommand{\frac}{\frac}
\numberwithin{equation}{section}
\title{On the blowup mechanism of smooth solutions to 1D quasilinear strictly hyperbolic systems with large initial data\footnote{Li Jun (lijun@nju.edu.cn) is supported by NSFC (No.11871030). Xu Gang (gxumath@outlook.com, gxu@njnu.edu.cn) and Yin Huicheng (huicheng@nju.edu.cn, 05407@njnu.edu.cn) are supported by NSFC (No.11731007, No.11971237).}}
\author[1]{Li Jun} \author[2]{Xu Gang} \author[1,2]{Yin Huicheng} \affil[1]{Department of Mathematics, Nanjing University, Nanjing 210093, China} \affil[2]{School of Mathematical Sciences and Institute of Mathematical Sciences, Nanjing Normal University, Nanjing 210023, China}
\date{} \maketitle \centerline{}
\date{} \maketitle \thispagestyle{empty} \begin{abstract} For the first order 1D $n\times n$ quasilinear strictly hyperbolic system $\partial_tu+F(u)\partial_xu=0$ with $u(x, 0)=\varepsilon u_0(x)$, where $\varepsilon>0$ is small, $u_0(x)\not\equiv 0$ and $u_0(x)\in C_0^2(\Bbb R)$, when at least one eigenvalue of $F(u)$ is genuinely nonlinear, it is well-known that on the finite blowup time $T_{\varepsilon}$, the derivatives $\partial_{t,x}u$ blow up while the solution $u$ keeps to be small. For the 1D scalar equation or $2\times 2$ strictly hyperbolic system (corresponding to $n=1, 2$), if the smooth solution $u$ blows up in finite time, then the blowup mechanism can be well understood (\emph{i.e.}, only the blowup of $\partial_{t,x}u$ happens). In the present paper, for the $n\times n$ ($n\ge 3$) strictly hyperbolic system with a class of large initial data, we are concerned with the blowup mechanism of smooth solution $u$ on the finite blowup time and the detailed singularity behaviours of $\partial_{t,x}u$ near the blowup point. Our results are based on the efficient decomposition of $u$ along the different characteristic directions, the suitable introduction of the modulated coordinates and the global weighted energy estimates.
\end{abstract}
\vskip 0.2cm
{\bf Keywords:} Blowup mechanism, strictly hyperbolic system, genuinely nonlinear, geometric blowup, modulated coordinate, global weighted energy estimate.\vskip 0.2 true cm
{\bf 2010 Mathematics Subject Classification.} 35L03, 35L67.
\setcounter{tocdepth}{1} \tableofcontents
\section{Introduction}\label{i}
In the paper, we are concerned with the blowup mechanism of smooth solutions to the following Cauchy problem of 1D $n\times n$ quasilinear strictly hyperbolic system: \begin{subequations}\label{i-1}\begin{align} &\partial_t u+F(u)\partial_x u=0,\label{i-1a}\\ &u(x, 0)=u_0(x),\label{i-1b} \end{align} \end{subequations} where $t\ge 0$, $x\in\Bbb R$, $u=(u_1, \cdots, u_n)^{\top}$, the $n\times n$ real matrix $F(u)$ is smooth on its argument $u$, and $u_0(x)\in C^2(\Bbb R)$. The strict hyperbolicity of system \eqref{i-1a} means that $F(u)$ has $n$ distinct real eigenvalues \begin{equation}\label{i-2} \lambda_1(u)<\cdots<\lambda_n(u), \end{equation} meanwhile the corresponding right eigenvectors are denoted by $\gamma_1(u), \cdots, \gamma_n(u)$ respectively. One calls system \eqref{i-1a} to be genuinely nonlinear with respect to some eigenvalue $\lambda_{i_0}(u)$ ($1\le i_0\le n$) when \begin{equation}\label{i-3} \nabla_u\lambda_{i_0}(u)\cdot \gamma_{i_0}(u)\neq 0. \end{equation} Otherwise, \eqref{i-1a} is called to be linearly degenerate with respect to the eigenvalue $\lambda_{i_0}(u)$ when \begin{equation}\label{y-1} \nabla_u\lambda_{i_0}(u)\cdot \gamma_{i_0}(u)\equiv 0. \end{equation} Our purpose of the paper is to discuss the blowup mechanism of smooth solutions to problem \eqref{i-1} for a class of large smooth initial data $u_0(x)$ provided that system \eqref{i-1a} is genuinely nonlinear with respect to some eigenvalue $\lambda_{i_0}(u)$ for $i_0\in \{1, \cdots, n\}$.
\subsection{Reviews and problems} For the 1D scalar equation \begin{equation}\label{Y-2}\begin{cases} \partial_t v+f(v)\partial_x v=0,\\ v(x, 0)=v_0(x), \end{cases} \end{equation} where $v_0(x)\not\equiv 0$, $v_0(x)\in C_0^1(\mathbb{R})$, $f(v)$ is a $C^1$ smooth function and $f'(v)\not= 0$ for $v\in \text{supp} v_0(x)$. Set $g(x)=f(v_0(x))$, then by the characteristics method, it is easy to know that the $C^1$ solution $v$ will blow up on the finite positive time $T^*=-\frac{1}{\min g'(x)}$ due to $\min\limits_{x\in\mathbb{R}}g'(x)<0$. Meanwhile,
$v\in C(\mathbb{R}\times [0, T^*])$ and $\lim\limits_{t\nearrow T^*}\|\partial_{t,x}v(\cdot, t)\|_{C(\Bbb R)}=\infty$ hold. This illustrates that the blowup of solution $v$ to problem \eqref{Y-2} corresponds to the geometric blowup by the terminology in \cite{A0}.
For the 1D $2\times 2$ strictly hyperbolic system \begin{equation}\label{Y-3}\begin{cases} \partial_t v+B(v)\partial_x v=0,\\ v(x, 0)=v_0(x), \end{cases} \end{equation} where $v_0(x)\not\equiv 0$, $v_0(x)\in C_0^1(\Bbb R)$, $B(v)\in C^1$ is a $2\times 2$ matrix which admits two distinct real eigenvalues $\lambda_1(v)$ and $\lambda_2(v)$, by introducing two Riemann invariants $w_1=w_1(v)$ and $w_2=w_2(v)$, then \eqref{Y-3} can be decoupled into the following $2\times 2$ strictly hyperbolic system of $w=(w_1,w_2)$: \begin{equation}\label{Y-4}\begin{cases} \partial_t w_1+\lambda_1(w)\partial_x w_1=0,\\ \partial_t w_2+\lambda_2(w)\partial_x w_2=0,\\ w(x, 0)=w_0(x). \end{cases} \end{equation}
When the system in \eqref{Y-3} is genuinely nonlinear with respect to at least one eigenvalue $\lambda_i(v)$ ($i=1,2$), then by \eqref{Y-4} and \cite{PDL1}, one knows that the smooth solution $v$ will blow up at the maximal finite existence time $T^*$, meanwhile $\|v\|_{L^{\infty}(\mathbb{R}\times [0, T^*])}$ is bounded and
$\lim\limits_{t\nearrow T^*}\|\partial_{t,x}v(\cdot, t)\|_{C(\Bbb R)}=\infty$ holds. This implies that the blowup of solution $v$ to \eqref{Y-3} also corresponds to the geometric blowup.
For the small data solution problem of 1D $n\times n$ quasilinear strictly hyperbolic system \begin{equation}\label{Y-1}\begin{cases} \partial_t v+B(v)\partial_x v=0,\\ v(x, 0)=\varepsilon v_0(x), \end{cases} \end{equation} where $\varepsilon>0$ is small, $v_0(x)\not\equiv 0$, $v_0(x)\in C_0^2(\Bbb R)$ and $B(v)\in C^2$ is a $n\times n$ matrix, when the system in \eqref{Y-1} is genuinely nonlinear with respect to at least one eigenvalue of $B(v)$, it follows from the results in \cite{LH} and \cite{FJ} that the lifespan $T_{\varepsilon}$ of smooth solution $v$ to \eqref{Y-1} satisfies $$\lim\limits_{\varepsilon\to 0^{+}}\varepsilon T_{\varepsilon}=\tau_0>0.$$
Moreover, $\|v\|_{C(\mathbb{R}\times [0, T_\varepsilon])}\le C\varepsilon$ and $\displaystyle\lim_{t\to T_{\varepsilon}-}\|\partial_{t,x}v(\cdot, t)\|_{C(\Bbb R)}=\infty$ hold. This means that the blowup of solution $v$ to \eqref{Y-1} corresponds to the geometric blowup.
Compared with the results on problem \eqref{Y-2} and problem \eqref{Y-3}, two natural problems arise for the system \eqref{i-1a} with $n\geq 3$: when at least one eigenvalue of $F(u)$ is genuinely nonlinear,
{\bf Q1.} Can we find a class of large initial data \eqref{i-1b} such that the blowup of solution $u$ corresponds to the geometric blowup as in the small data solution problem \eqref{Y-1}?
{\bf Q2.} Can we find another class of large initial data \eqref{i-1b} such that the solution $u$ itself blows up in finite time?
In the present paper, we focus on the investigation of {\bf Q1}.
\subsection{Statement of main results} By Proposition \ref{lemA-1} in Section \ref{II}, \eqref{i-1} can be equivalently changed into the following problem \begin{subequations}\label{i-6}\begin{align} &\partial_t w+A(w)\partial_x w=0,\label{i-6a}\\ &w(x, -\varepsilon)=w_0(x),\label{i-6b} \end{align} \end{subequations} where $w(x, t)=(w_1, \cdots, w_n)^{\top}$, $t\geq -\varepsilon$, and $\varepsilon>0$ is a small constant (for the convenience of expression, here the initial temporal variable is shifted from $t=0$ to $t=-\varepsilon$). In addition, the $n$ distinct real eigenvalues of smooth function matrix $A(w)=\left(a_{ij}(w)\right)_{n\times n}$ are denoted by $\mu_1(w), \cdots, \mu_n(w)$. Based on the reduction in Proposition \ref{lemA-1} and the strictly hyperbolic condition \eqref{i-2}, there hold \begin{subequations}\label{i-7}\begin{align} &\mu_1(w)<\cdots<\mu_{i_0-1}(w)<\mu_n(w)<\mu_{i_0}(w)<\cdots<\mu_{n-1}(w),\label{i-7a}\\ &a_{in}(w)=0\ (1\leq i\leq n-1),\label{i-7b}\\
& a_{nn}(w)=\mu_n(w)=\mu_n(0)+\partial_{w_n}\mu_n(0)w_n+\sum\limits_{i=1}^{n-1}\partial_{w_i}\mu_n(0)w_i+O(|w|^2),\label{i-7c}\\ &A(0)=\text{diag}\{\mu_1(0), \cdots, \mu_n(0)\},\label{i-7d} \end{align} \end{subequations} where $\partial_{w_n}\mu_n(0)\neq 0$. This means that the system \eqref{i-6a} is genuinely nonlinear with respect to the eigenvalue $\mu_n(w)$. Let $\ell_i(w)$ and $\gamma_i(w)$ be the left and right eigenvectors of the matrix $A(w)$ corresponding to the eigenvalue $\mu_i(w)$ ($1\le i\le n$), respectively. Together with \eqref{i-7}, without loss of generality, one can assume \begin{subequations}\label{i-71}\begin{align} &\ell_i(w)\cdot\gamma_j(w)=\delta_{i}^{j}\ (1\leq i, j\leq n),\label{i-71a}\\
& \gamma_n(w)={\bf e}_n, \ \gamma_i(0)={\bf e}_i,\ \|\gamma_i(w)\|=1\ (1\leq i\leq n-1),\label{i-71b}\\ &\ell_i(0)={\bf e}_i^{\top}\ (1\leq i\leq n),\label{i-71c} \end{align} \end{subequations}
where $\|\gamma_i(w)\|=\sqrt{\displaystyle\sum_{k=1}^n(\gamma_{i}^k)^2(w)}$ with $\gamma_i(w)=(\gamma_{i}^1(w), ..., \gamma_{i}^n(w))^{\top}$.
To study the blowup mechanism of smooth solution to problem \eqref{i-6} with a class of large initial data $w_0(x)=(w_{10}, \cdots, w_{n0})(x)$, motivated by \cite{TSV1}-\cite{TSV3}, we choose $w_0(x)$ as follows:
At first, let $w_{n0}(x)$ satisfy the following generic nondegenerate condition at $x=0$: \begin{equation}\label{i-8} w_{n0}(0)=\kappa_0\varepsilon^{\frac{1}{3}},\ w_{n0}'(0)=-\frac{1}{\varepsilon}=\min\limits_{x\in\mathbb{R}}w_{n0}'(x),\ w_{n0}''(0)=0,\ w_{n0}'''(0)=\frac{6}{\varepsilon^4}, \end{equation} where $\kappa_0$ is a suitable constant.
Secondly, in order to derive $L^{\infty}$ estimates for the lower order derivatives of $w$ and track the development of possible singularity, we require such assumptions of $w_{0}(x)$: \begin{subequations}\label{i-9}\begin{align}
&|w_{n0}(x)-w_{n0}(0)|\leq 2\varepsilon^{\frac{1}{2}-\frac{1}{30}},\label{i-9a}\\
&|\hat w(x)|\leq \varepsilon^{\frac{3}{2}}\eta^{\frac{1}{6}}(\varepsilon^{-\frac{3}{2}}x),\ \ |\hat w'(x)|\leq \eta^{-\frac{1}{3}}(\varepsilon^{-\frac{3}{2}}x)\quad \text{for $|x|\leq\mathcal{L}\varepsilon^{\frac{3}{2}}$},\label{i-9b}\\
&|\hat w^{(4)}(x)|\leq \varepsilon^{\frac{1}{9}-\frac{9}{2}}\quad\text{for $|x|\leq \varepsilon^{\frac{3}{2}}$},\label{i-9e}\\
&\varepsilon|w_{n0}'(x)|\leq 2\eta^{-\frac{1}{3}}(\varepsilon^{-\frac{3}{2}}x)\quad\text{for $\mathcal{L}\varepsilon^{\frac{3}{2}}\leq|x|\leq 2\mathcal{L}\varepsilon^{\frac{3}{2}}$},\label{i-9c}\\
&\varepsilon|w_{n0}'(x)|\leq \eta^{-1}(\varepsilon^{-\frac{3}{2}}x)\quad\text{for $|x|\geq 2\mathcal{L}\varepsilon^{\frac{3}{2}}$},\label{i-9d} \end{align} \end{subequations} and for $1\leq j\leq n-1$, \begin{equation}\label{i-10}
|w_{j0}(x)|\leq\varepsilon,\ |w_{j0}'(x)|\leq \eta^{-\frac{1}{3}}(\varepsilon^{-\frac{3}{2}}x),\ |w_{j0}''(x)|\leq \varepsilon^{-\frac{11}{6}}\eta^{-\frac{1}{3}}(\varepsilon^{-\frac{3}{2}}x), \end{equation} where $\mathcal{L}=\varepsilon^{-\frac{1}{10}}, \eta(x)=1+x^2$ and $\hat{w}(x)=w_{n0}(x)-\kappa_0\varepsilon^{\frac{1}{3}}-\overline{w}(x)$ with $\overline{w}(x)=\varepsilon^{\frac{1}{2}}\overline{W}(\varepsilon^{-\frac{3}{2}}x)$ and $\overline{W}(y)=(-\frac{y}{2}+(\frac{1}{27}+\frac{y^2}{4})^{\frac{1}{2}})^{\frac{1}{3}} -(\frac{y}{2}+(\frac{1}{27}+\frac{y^2}{4})^{\frac{1}{2}})^{\frac{1}{3}}$.
Thirdly, in order to derive the $L^{2}-$energy estimates for the $\mu_0-$order derivatives of $w$, we demand that \begin{equation}\label{i-11}
\sum\limits_{j=1}^{n-1}\|\partial_x^{\mu_0}w_{j0}(\cdot)\|_{L^2}+\varepsilon\|\partial_x^{\mu_0}w_{n0}(\cdot)\|_{L^2}\lesssim \varepsilon^{\frac{3}{2}(1-\mu_0)}, \footnote{Hereafter, $A\lesssim B$ means that there exists a generic positive constant $C$ such that $A\leq CB$.} \end{equation} where $\mu_0\ge 6$ is a suitably given constant.
Our main results are stated as: \begin{thm}\label{thmi-1} {\it Under the conditions \eqref{i-7}, and without loss of generality, $\mu_n(0)=0$ and $\partial_{w_n}\mu_n(0)=1$ are assumed, then there exists a positive constant $\varepsilon_0$ such that when $0<\varepsilon<\varepsilon_0$ and $w_0(x)$ satisfies \eqref{i-8}-\eqref{i-11}, the problem \eqref{i-6} admits a unique local smooth solution $w$, which will firstly blow up at the point $(x^*, T^*)$. Moreover, \begin{enumerate}[$(1)$] \item $x^{*}=O(\varepsilon^2),\ T^*=O(\varepsilon^{\frac{4}{3}}).$ \item $w$ lies in the following spaces:\begin{equation}\label{i-12}\begin{cases} w\in C([-\varepsilon, T^*), H^{\mu_0}(\mathbb{R}))\cap C^1([-\varepsilon, T^*), H^{\mu_0-1}(\mathbb{R})),\\[2mm] w_i\in C^1([-\varepsilon, T^*]\times \mathbb{R})\ (1\leq i\leq n-1),\ w_n\in L^{\infty}([0, T^*], C^{\frac{1}{3}}(\mathbb{R})). \end{cases} \end{equation}
\item There exist two smooth functions $\xi(t)$ and $\tau(t)$ such that \begin{equation}\label{i-13}\begin{cases} \lim\limits_{t\nearrow T^*}(\xi(t), \tau(t))=(x^*, T^*),\ \lim\limits_{t\nearrow T^*}\partial_x w_n(\xi(t), t)=-\infty,\\[1mm] -2<(T^*-t)\partial_x w_n(\xi(t), t)<-\frac{1}{2}\quad\text{for $-\varepsilon\leq t<T^*$},\\[1mm]
\text{$|\tau(t)-T^*|\lesssim \varepsilon^{\frac{1}{3}}(T^*-t)$ and $|\xi(t)-x^*|\lesssim \varepsilon (T^*-t)$ for $-\varepsilon\leq t<T^*$}. \end{cases} \end{equation}
\end{enumerate}}
\end{thm}
\begin{rem}\label{remi-H1} {\it By Theorem \ref{thmi-1}, we know that the solution $w\in C(\Bbb R\times [0, T^*])$ of \eqref{i-6a}
blows up at the point $(x^*, T^*)$, i.e., $\lim\limits_{t\nearrow T^*}\|\partial_{t,x}w(\cdot, t)\|_{C(\Bbb R)}=\infty$. This corresponds to the geometric blowup for the problem \eqref{i-6}.} \end{rem}
\begin{rem}\label{rem1-1} {\it The assumptions of $\mu_n(0)=0$ and $\partial_{w_n}\mu_n(0)=1$ in Theorem \ref{thmi-1} can be realized by the translation $(t, x)\mapsto (t, x+\mu_n(0)t)$ and then the spatial scaling $x\mapsto \partial_{w_n}\mu_n(0) x$.} \end{rem}
\begin{rem}\label{rem1-2} {\it We now give some comments on \eqref{i-6}-\eqref{i-7}. It is not difficult to find that there are a great number of $w_0(x)$ to fulfill the constrains \eqref{i-8}-\eqref{i-11}. In addition, it follows from Proposition \ref{lemA-1} that the unknown $w$ admits the good components $(w_1, \cdots, w_{n-1})$ and the bad component $w_n$. The conditions \eqref{i-9b}-\eqref{i-9c} imply that the bad component $w_n$ mainly tracks the possible singularity and it can be thought as a suitable perturbation of the singular function $\overline{W}$. On the other hand, in order to control the detailed behaviors of $w_n$
near the possible blowup point, we posed the suitable perturbation for the fourth order derivatives of $\hat w$ in \eqref{i-9e} when $|x|\leq \varepsilon^{\frac{3}{2}}$. The conditions \eqref{i-9a} and \eqref{i-9d} are posed to control the behavior of $w_n$
away from the blowup position. In addition, to avoid the influence of the initial data $w_0(x)$ at infinity,
we naturally pose the appropriate decaying condition \eqref{i-10}-\eqref{i-11} for large $|x|$.} \end{rem}
\begin{rem}\label{remi-3} {\it In \cite{TSV1}-\cite{TSV3}, through introducing suitable modulated coordinates and taking the constructive proofs, the authors systematically study the shock formation of multidimensional compressible Euler equations with a class of smooth initial data. Motivated by these papers, we study the geometric blowup mechanism of problem \eqref{i-1}, whose nonlinear structure is more general than the 1D compressible Euler equations. Thanks to the new reformulation in the equivalent problem \eqref{i-6} as well as \eqref{i-7}, we can establish some suitable exponential-growth controls on the bounds of the characteristics corresponding to $\mu_i(w)\ (1\leq i\leq n-1)$ (see Lemma \ref{lem7-1} and Lemma \ref{lem7-2} below) such that the problem \eqref{i-6} can be mainly dominated by the approximate Burgers equation of $w_n$.} \end{rem}
\begin{rem}\label{remi-H2} {\it When \eqref{i-6a} admits the structure of conservation laws, there are some interesting works on the shock construction through the first-in-time blowup point $(x^*, T^*)$ for $t\ge T^*$. For instances, under various nondegenerate conditions with finite orders or infinitely degenerate conditions for the initial data, the shock construction from the blowup point is completed for the 1D scalar equation $\partial_tu+\partial_x(f(u))=0$ in \cite{YL1}; under the generic nondegenerate condition of initial data, the shock surface from the blowup curve has been constructed for the multidimensional scalar equation $\partial_tu+\partial_1(f_1(u))+\cdot\cdot\cdot+\partial_n(f_n(u))=0$ in \cite{YL2}; under the generic nondegenerate conditions of the initial data, for the 1-D $2\times 2$ $p-$ system of polytropic gases, the authors in \cite{K1},\cite{LB} and \cite{CD} obtain the formation and construction of the shock wave starting from the blowup point under some variant conditions; for the 1-D $3\times 3$ strictly hyperbolic conservation laws with the small initial data or the 3-D full compressible Euler equations with symmetric structure and small perturbed initial data, the authors in \cite{CXY}, \cite{YHC} and \cite{CD2} also get the formation and construction of the resulting shock waves, respectively. In the near future, we hope that the shock formation can be constructed from the blowup point $(x^*, T^*)$ in Theorem \ref{thmi-1} when \eqref{i-6a} has the structure of conservation laws.} \end{rem}
\begin{rem}\label{remi-2} {\it In the recent years, the formation of shock waves have made much progress
for the multi-dimensional Euler equations and the quasilinear wave equations under various restrictions
on the related initial data. One can see
the remarkable articles \cite{TSV1}-\cite{TSV4}, \cite{CD1}, \cite{CD3}-\cite{CD4}, \cite{LS}, \cite{MY} and \cite{SJ}.} \end{rem}
\subsection{Comments on the proof of Theorem \ref{thmi-1}} Let us give comments on the proof of Theorem \ref{thmi-1}. Motivated by \cite{TSV3}, by introducing the modulated coordinate which is smooth before the singularity formation, we can convert the finite time singularity formation of \eqref{i-6} into the global well-posedness of smooth solutions to the resulting new system of $W=(W_1, \cdot\cdot\cdot, W_n)^{\top}$ (see \eqref{ii-4}-\eqref{ii-5}). To achieve this aim, we take the following strategies:
{\bf $\bullet$} Due to the important form of \eqref{i-6a} with \eqref{i-7}, we divide $W$ as the $n-1$ good components $(W_1, \cdots, W_{n-1})^{\top}$ and the bad unknown $w_n$. Inspired by \cite{TSV2}, we continue to decompose $w_n$ into another bad part $W_0$ and a good part $\kappa(t)$
(see \eqref{ii-3}). The $L^{\infty}$ estimates for the lower order derivatives of $W_0$ are carried out in two different domains $\{(y, s): |y|\leq \mathcal{L}\varepsilon^{\frac{1}{4}}e^{\frac{s}{4}}\}$ and $\{(y, s): |y|\geq \mathcal{L}\varepsilon^{\frac{1}{4}}e^{\frac{s}{4}}\}$ with $\mathcal{L}=\varepsilon^{-\frac{1}{10}}$. In the interior domain $\{(y, s): |y|\leq \mathcal{L}\varepsilon^{\frac{1}{4}}e^{\frac{s}{4}}\}$, $W_0$ is expected to have the similar behavior as $\overline{W}(y)$, which is the steady solution of 1D Burgers type equation
$(\partial_s-\frac{1}{2})\overline{W}+\left(\frac{3}{2}y+\overline{W}\right)\partial_y\overline{W}=0$. In the exterior domain $\{(y, s): |y|\geq \mathcal{L}\varepsilon^{\frac{1}{4}}e^{\frac{s}{4}}\}$, the treatment of $W_0$ is rather delicate since the temporal and spatial decay estimates of good components $(W_1, \cdots, W_{n-1})^{\top}$ are required to be established simultaneously.
{\bf $\bullet$} Due to the partially decoupling form of \eqref{i-6a}, in order to prove Theorem \ref{thmi-1}, we need to establish the $L^{\infty}$ estimates of the lower order derivatives and the $L^2$ estimates of the highest order derivatives for $(W_1, \cdots, W_{n})^{\top}$. To get the related $L^{\infty}$ estimates, by utilizing the characteristics method and delicate analysis, at first, we derive the basic exponential controls on the bounds of the characteristics corresponding to $\mu_i(w)\ (1\leq i\leq n-1)$. Subsequently, the spatial decay rate $(1+y^2)^{-\frac{1}{3}}$ of $\partial_y w_0$ and further the temporal decay of $\partial_y W_0$ are obtained. From these, the $L^{\infty}$ estimates of $(W_1, \cdots, W_{n})^{\top}$ are achieved. On the other hand, we observe that the coefficients in the equations of $w$ admit the key $O(e^{\frac{s}{2}})$ scale because of the strict hyperbolicity of \eqref{i-6a} (see \eqref{v-35}). This will lead to the expected $L^2$ estimates on the highest order derivatives of $(W_1, \cdots, W_{n})^{\top}$. Here, we specially point out that the $L^{\infty}$ estimates of each related quantity depend on the information of the higher order derivatives of $W$ since the related \eqref{i-6a} only admits the partial decoupling form. This is the main reason to apply the $L^2$ estimates for dealing with the highest order derivatives of $W$.
When these are done, the proof of Theorem \ref{thmi-1} can be completed successfully. It is hoped that our analysis methods in the paper will be adopted to study the singularity formation problem for the general multi-dimensional symmetric hyperbolic systems with some classes of large initial data, which is a generalization of the results in \cite{TSV1}-\cite{TSV4} for the multi-dimensional compressible Euler equations.
The rest of the paper is arranged as follows: In Section \ref{II}, we reduce the problem \eqref{i-1} into the equivalent partially decoupling problem \eqref{i-6} via Proposition \ref{lemA-1}. In Section \ref{ii}, under the modulated coordinate, the problem \eqref{i-6} and the choice of initial data $w_0(x)$ are reformulated. Moreover, as the heuristics of the formation of the expected singularity, the rigorous derivation on the resulting Burgers-type equation is also given in this section. The bootstrapping assumptions and their closure of the arguments are arranged in Section \ref{iv}-Section \ref{v} respectively: The descriptions of bootstrapping assumptions on $w$ and the modulated coordinate are made in Section \ref{iv}; the $L^{\infty}$ estimates for the bad unknown $W_n$ and the good components $(W_1,\cdot\cdot\cdot, W_{n-1})^{\top}$ are taken in Section \ref{vi}-Section \ref{vii} respectively. In addition, the closure of bootstrapping assumptions for the modulation variables is completed in Section \ref{viii}; the related energy estimates for the higher order derivatives of $W$ are derived in Section \ref{v}. In Section \ref{V}, we establish the main results in Theorem \ref{thmii-1} and further Theorem \ref{thmi-1}. Finally, a useful interpolation inequality and its application for deriving some delicate estimates are given in Appendix \ref{A}.
\section{Reduction}\label{II}
In the section, our main aim is to reduce \eqref{i-1a} to a partially decoupling form \eqref{i-6a} such that the resulting new unknown functions $w=(w_1, \cdot\cdot\cdot, w_n)^{\top}$ will admit $n-1$ good components and only one bad component. The good component and the bad component mean that their regularities are in $C^1$ and in $C^{1/3}$ up to the blowup time, respectively.
\begin{prop}\label{lemA-1} {\it Under assumptions \eqref{i-2}-\eqref{i-3}, there exists a constant $\delta_0>0$
such that when $|u|<\delta_0$, the system \eqref{i-1a} can be equivalently reduced into \begin{equation}\label{A-1} \partial_{t}w+A(w)\partial_{x} w=0, \end{equation} where the smooth mapping $u\mapsto w=w(u)$ is invertible and $w(0)=0$. In addition, the inverse mapping of $w(u)$ is denoted as $u=u(w)$, and the $n\times n$ matrix \begin{equation*} A(w)=\left(\frac{\partial w}{\partial u}\right)F(u(w))\left(\frac{\partial w}{\partial u}\right)^{-1}:=\left(a_{ij}(w)\right)_{n\times n} \end{equation*} satisfies \begin{enumerate}[$(1)$] \item $A(w)$ has $n$ distinct eigenvalues $\left\{\mu_i(w)\right\}_{i=1}^{n}$ with \begin{equation*} \mu_i(w)=\lambda_i(u(w))\ (1\leq i<i_0);\ \mu_i(w)=\lambda_{i+1}(u(w))\ (i_0\leq i<n);\ \mu_n(w)=\lambda_{i_0}(u(w)). \end{equation*} \item $a_{in}(w)=0\ (i\neq n),\ a_{nn}(w)=\mu_{n}(w)$ and $\partial_{w_{n}}\mu_{n}(w)\neq 0$. \item $A(0)=\text{diag}\{\mu_1(0), \cdots, \mu_n(0)\}$. \end{enumerate}}
\end{prop}
\begin{proof} At first, we claim that when $|u|\leq \delta_0$ for some constant $\delta_0>0$, there exist $(n-1)$ linearly independent Riemann invariants $\alpha_i(u)\ (i\neq i_0)$ corresponding to $\lambda_{i_0}(u)$ such that \begin{equation}\label{B-2}
\nabla_u \alpha_i(u)\cdot \gamma_{i_0}(u)=0\ (i\neq i_0,\ |u|<\delta_0). \end{equation} Indeed, let $\{\zeta_i\}_{(i\neq i_0)}$ be $(n-1)$ linearly independent column vectors orthogonal to $\gamma_{i_0}(0)$ and set $\alpha_i(u)=\zeta_i^{\top}\cdot u+\bar{\alpha}_i(u)$, then it follows from \eqref{B-2} that the unknowns $\{\bar{\alpha}_i(u)\}_{i\neq i_0}$ should satisfy \begin{equation}\label{B-3} \nabla_u \bar{\alpha}_i(u)\cdot\gamma_{i_0}(u)=-\zeta_i^{\top}\cdot(\gamma_{i_0}(u)-\gamma_{i_0}(0)),\ \bar{\alpha}_i(0)=0\ (i\neq i_0). \end{equation}
It is not difficult to find that there exists a constant $\delta_0>0$ such that \eqref{B-3} is uniquely solved when $|u|<\delta_0$ and $|\bar{\alpha}_i(u)|\lesssim |u|^2$. Hence \eqref{B-2} is obtained.
By $\alpha_i(0)=0\ (i\neq i_0)$, we define a mapping $u\mapsto v=v(u)=(v_1, \cdots, v_n)^{\top}(u)$ with $v(0)=0$ as \begin{equation}\label{B-4} v_i=\alpha_i(u)\ (1\leq i< i_0);\quad v_i=\alpha_{i+1}(u)\ (i_0\leq i<n);\quad v_{n}=\gamma_{i_0}^{\top}(0)\cdot u. \end{equation}
Note that $\{\zeta_i\}_{i\neq i_0}$ are $(n-1)$ linearly independent column vectors which are orthogonal to $\gamma_{i_0}(0)$. Then the transformation $u\mapsto v=v(u)$ is reversible for $|u|<\delta_0$
since its Jacobian matrix $J_{v}(u)$ satisfies $J_{v}(0)=\left(\frac{\partial v}{\partial u}\right)|_{u=0} =\left(\zeta_1,\cdots, \zeta_{i_0-1}, \zeta_{i_0+1}, \cdots, \zeta_n, \gamma_{i_0}(0)\right)^{\top}$ and $J_{v}(0)$ is non-singular. We now denote the inverse mapping of $v=v(u)$ as $u=u(v)$.
By \eqref{B-2} and \eqref{B-4}, the system \eqref{i-1a} is equivalently converted into \begin{equation}\label{B-5} \partial_t v+G(v)\partial_x v=0, \end{equation} where $G(v)=J_{v}(u)F(u(v))J_{v}^{-1}(u):=\left(g_{ij}(v)\right)_{n\times n}$, $G(v)$ has $n$ distinct eigenvalues $\{\lambda_i(u(v))\}_{i=1}^{n}$ and the corresponding right eigenvectors are $\{J_{v}(u)\gamma_i(u(v))\}_{i=1}^{n}$. In addition, \eqref{B-2} shows \begin{equation}\label{A-6} J_{v}(u)\gamma_{i_0}(u(v))=\gamma_{i_0}^{\top}(0)\cdot\gamma_{i_0}(u(v)){\bf e}_{n}\neq {\bf 0}. \end{equation} This implies that \begin{equation}\label{A-7} g_{i n}(v)=0\ (i\neq n),\quad g_{n n }(v)=\lambda_{i_0}(u(v)). \end{equation} It follows from \eqref{B-5} and \eqref{A-7} that the $(n-1)$ order square matrix \begin{equation}\label{A-9} G_{n-1}(v)=\left(g_{ij}(v)\right)_{(n-1)\times (n-1)} \end{equation} has $(n-1)$ eigenvalues \begin{equation*}\label{A-10} \lambda_1(u(v))<\cdots<\lambda_{i_0-1}(u(v))<\lambda_{i_0+1}(u(v))<\cdots<\lambda_n(u(v)). \end{equation*} Then there exists a unique $(n-1)$ order invertible constant square matrix $B_{n-1}=\left(b_{ij}\right)_{(n-1)\times (n-1)}$ such that \begin{equation}\label{B-10} \bar{G}_{n-1}(v)=B_{n-1}G_{n-1}(v)B_{n-1}^{-1}:=\left(\bar{g}_{ij}(v)\right)_{(n-1)\times (n-1)}, \end{equation} where \begin{equation*} \bar{G}_{n-1}(0)=\text{diag}\{\lambda_1(0),\cdots, \lambda_{i_0-1}(0), \lambda_{i_0+1}(0), \cdots, \lambda_n(0)\}. \end{equation*} Furthermore, it is derived from \eqref{i-3} and \eqref{A-6}-\eqref{A-7} that \begin{equation}\label{A-8} \nabla_{v}\lambda_{i_0}(u(v))J_{v}(u)\gamma_{i_0}(u(v))=\nabla_{u}\lambda_{i_0}(u)\cdot\gamma_{i_0}(u)\neq 0. \end{equation} Denote the invertible transformation $v\mapsto w=w(v):=(w_1, \cdots, w_n)^{\top}(v)$ as \begin{equation}\label{A-11} (w_1, \cdots, w_{n-1})^{\top}=B_{n-1}(v_1, \cdots, v_{n-1})^{\top},\quad w_n=v_n+\sum\limits_{j=1}^{n-1}b_{nj}w_j, \end{equation} where the constants $\{b_{nj}\}_{j=1}^{n-1}$ will be determined later. Set the inverse mapping of $w=w(v)$ as $v=v(w)$ with $v(0)=0$. By \eqref{B-5}, \eqref{A-9}-\eqref{A-11} and a direct computation, we arrive at \begin{equation}\label{A-12} \partial_t w+A(w)\partial_x w=0, \end{equation} where $A(w)=\left(a_{ij}(w)\right)_{n\times n}$ satisfies \begin{equation}\label{A-13}\begin{cases} a_{ij}(w)=\bar{g}_{ij}(v(w))\ (1\leq i, j\leq n-1),\\[2mm] a_{jn}(w)=0\ (1\leq j\leq n-1), \\[2mm] a_{nn}(w)=g_{nn}(v(w)) \end{cases} \end{equation} and \begin{equation}\label{A-14}\begin{aligned} &(a_{n 1}, \cdots, a_{n n-1})(w)\\ =&(g_{n1}, \cdots, g_{n n-1})(v(w))B_{n-1} +(b_{n1 }, \cdots, b_{n n-1})(\bar{G}_{n-1}-g_{n n}I_{n-1})(v(w)). \end{aligned} \end{equation} Since the $(n-1)$ order square matrix \begin{equation*} \bar{G}_{n-1}(0)-g_{n n}(0)I_{n-1}=\text{diag}\{\lambda_1(0)-\lambda_{i_0}(0), \cdots \lambda_{i_0-1}(0)-\lambda_{i_0}(0), \lambda_{i_0+1}(0)-\lambda_{i_0}(0), \cdots, \lambda_{n}(0)-\lambda_{i_0}(0)\} \end{equation*} is invertible due to \eqref{i-2} and \eqref{A-7}, it is derived from \eqref{A-14} that there exists unique $\{b_{n j}\}_{1\leq j\leq n-1}$ such that \begin{equation}\label{A-15} (a_{n 1}, \cdots, a_{n, n-1})(0)=(0, \cdots, 0). \end{equation}
For the constant invertible square matrix $B=\left(b_{ij}\right)_{n\times n}$ with $b_{i n}=\delta_{i}^{n}\ (1\leq i\leq n)$ and \begin{equation}\label{A-16} w=Bz, \end{equation} one has from \eqref{A-11}-\eqref{A-14} that \begin{equation}\label{A-17} A(w)=BG(B^{-1}w)B^{-1}:=(a_{ij}(w))_{n\times n}, \end{equation} where \begin{equation*} A(0)=BG(0)B^{-1}=\text{diag}\{\lambda_1(0), \cdots, \lambda_{i_0-1}(0), \lambda_{i_0+1}(0), \cdots, \lambda_n(0), \lambda_{i_0}(0)\}. \end{equation*}
The expected invertible mapping $u\mapsto w=w(u)$ is just the composition of two mappings $v=v(u)$ and $w=w(v)$ defined by \eqref{B-4} and \eqref{A-16} respectively. Its inverse mapping is denoted as $u=u(w)$. It is easy to know that the matrix $A(w)$ has $n$ distant eigenvalues $\{\lambda_i(w(u))\}_{i=1}^n$ and the corresponding right eigenvectors are $\{J_w(u)\gamma_i(u(w))\}_{i=1}^n$. In addition, it is derived from \eqref{A-6}, \eqref{A-8} and \eqref{A-11} that \begin{equation}\label{A-18} J_w(u)\gamma_{i_0}(u(w))=B J_v(u)\gamma_{i_0}(u(v))=\gamma_{i_0}^{\top}(0)\cdot\gamma_{i_0}(u(v)){\bf e}_n\neq {\bf 0} \end{equation} and \begin{equation}\label{A-19}\begin{aligned} &\nabla_w\lambda_{i_0}(u(w))\cdot J_w(u)\gamma_{i_0}(u(w))\\ =&\nabla_v\lambda_{i_0}(u(v))\cdot J_v(w)\cdot J_w(v)\cdot J_v(u)\gamma_{i_0}(u(v))\\ =&\nabla_v\lambda_{i_0}(u(v))\cdot J_v(u)\gamma_{i_0}(u(v))\neq 0. \end{aligned} \end{equation} Then the properties (1)-(3) of $A(w)$ come from \eqref{A-9}-\eqref{B-10}, \eqref{A-13}, \eqref{A-15} and \eqref{A-18}-\eqref{A-19}. \end{proof}
\begin{rem}\label{YH-1} {\it We point out that Proposition \ref{lemA-1} is a generalization of Lemma 2.1 in \cite{CXY}, where \eqref{i-1} with small initial data and $n=3$ is simplified analogously.} \end{rem}
\section{Reformulation under the modulated coordinates}\label{ii}
Motivated by \cite{TSV2}, to show the geometric blowup mechanism in Theorem \ref{thmi-1}, we introduce three modulation variables $\tau(t),\ \xi(t)$ and $\kappa(t)$ as \begin{equation}\label{ii-1}\begin{cases} \tau(t):\quad \text{tracking the exact blowup time},\\ \xi(t): \quad \text{tracking the location of the blowup point},\\ \kappa(t):\quad \text{fixing the speed of the singularity development}. \end{cases} \end{equation} Set the modulated coordinate $(y, s)$ as follows \begin{equation}\label{ii-2} s=s(t)=-\log(\tau(t)-t),\quad y=(x-\xi(t))(\tau(t)-t)^{-\frac{3}{2}}=(x-\xi(t))e^{\frac{3s}{2}}. \end{equation} In addition, the new unknowns $W=(W_1, \cdots, W_n)^{\top}$ and $W_0$ are defined as \begin{equation}\label{ii-3} w_i(x, t)=W_i(y, s) (i\neq n),\quad w_{n}(x, t)=W_n(y, s)=e^{-\frac{s}{2}}W_{0}(y, s)+\kappa(t), \end{equation} where $\kappa(t)=w_n(\xi(t),t)$.
By \eqref{ii-1}-\eqref{ii-3}, when $t<\tau(t)$, the system \eqref{i-6a} can be equivalently rewritten as \begin{equation}\label{ii-4} \partial_s W+\left(\frac{3}{2}y-e^{\frac{s}{2}}\beta_{\tau}\dot{\xi}(t)\right)\partial_y W +e^{\frac{s}{2}}\beta_{\tau}A(w)\partial_y W=0, \end{equation} where $\beta_{\tau}(t)=\frac{1}{1-\dot{\tau}(t)}$, $\dot{\xi}(t)=\xi'(t)$ and $\dot{\tau}(t)={\tau}'(t)$.
In addition, $W_0$ is determined by \begin{equation}\label{ii-5} (\partial_s-\frac{1}{2})W_{0}+\left(\frac{3}{2}y+e^{\frac{s}{2}}\beta_{\tau}(\mu_{n}(w) -\dot{\xi}(t))\right)\partial_y W_{0}+\sum\limits_{j\neq n}e^{s}\beta_{\tau}a_{nj}(w)\partial_y W_j =-e^{-\frac{s}{2}}\beta_{\tau}\dot{\kappa}(t), \end{equation} where $\dot{\kappa}(t)=\kappa'(t)$.
\subsection{Global steady solution for 1D Burgers equation}\label{ii-a}
The simplest case of problem \eqref{i-6} is (i.e., $n=1$ and $a_n(w)=w_n$) \begin{equation}\label{A-1}\begin{cases} \partial_t\overline{\omega}+\overline{\omega}\partial_x\overline{\omega}=0,\ x\in\mathbb{R}, \ t>-\varepsilon,\\ \overline{\omega}(x, -\varepsilon)=\overline{\omega}_0(x),\ x\in\mathbb{R}, \end{cases} \end{equation} where $\overline{\omega}_0(x)\in C^{\infty}(\mathbb{R})$ and $\overline{\omega}_0'(x)\leq 0$. It is assumed that $\overline{\omega}_0(x)$ satisfies the generic nondegenerate condition at $x=0$: \begin{equation}\label{A-2} \overline{\omega}_0(0)=0,\ \overline{\omega}_0'(0)=-\frac{1}{\varepsilon}=\min\limits_{x\in\mathbb{R}}\overline{\omega}_0'(x),\ \overline{\omega}_0''(0)=0,\ \overline{\omega}_0'''(0)=\frac{6}{\varepsilon^4}. \end{equation} By the characteristics method, it is easy to know that under the assumption \eqref{A-2}, the smooth solution $\overline{\omega}(x, t)$ of problem \eqref{A-1} will blowup at the first-in-time singularity point $(0, 0)$ and the related characteristics starting from the point $(0, -\varepsilon)$ is $\{(x, t): x=0, \ -\varepsilon<t<0\}$. From this and the procedures in \eqref{ii-1}-\eqref{ii-5}, we define \begin{equation}\label{A-3} \tau_0(t)=0,\ \xi_0(t)=0,\ \kappa_0(t)=0, \end{equation} and \begin{equation}\label{A-4} s=s(t)=-\log(\tau_0(t)-t),\ y=(x-\xi_0(t))(\tau_0(t)-t)^{-\frac{3}{2}}=(x-\xi_0(t))e^{\frac{3s}{2}} \end{equation} and \begin{equation}\label{A-5} \overline{\omega}=e^{-\frac{s}{2}}\overline{W}(y, s)+\kappa_0(t). \end{equation}
Then it follows from \eqref{A-1} and \eqref{A-3}-\eqref{A-5} that $\overline{W}(y, s)$ satisfies \begin{equation}\label{ii-6} (\partial_s-\frac{1}{2})\overline{W}+\left(\frac{3}{2}y+\overline{W}\right)\partial_y\overline{W}=0. \end{equation}
In Appendix A.1 of \cite{TSV2}, it is proved that equation \eqref{ii-6} has a group of steady smooth solutions $\overline{W}=\overline{W}(y)$ satisfying such a generic nondegenerate condition \begin{equation}\label{ii-7} \overline{W}(0)=0,\ \overline{W}'(0)=\min\limits_{y\in\mathbb{R}}\overline{W}'(y)<0,\ \overline{W}''(0)=0,\ \overline{W}'''(0)>0. \end{equation} According to the initial data \eqref{A-2} and the transformation \eqref{A-3}-\eqref{A-4}, the solution $\overline{W}(y)$ of \eqref{ii-6} is introduced in \cite{TSV2} \begin{equation}\label{ii-8} \overline{W}(y)=\left(-\frac{y}{2}+\left(\frac{1}{27}+\frac{y^2}{4}\right)^{\frac{1}{2}}\right)^{\frac{1}{3}} -\left(\frac{y}{2}+\left(\frac{1}{27}+\frac{y^2}{4}\right)^{\frac{1}{2}}\right)^{\frac{1}{3}}. \end{equation}
In addition, it is easy to obtain
\begin{subequations}\label{ii-9}\begin{align}
&\overline{W}(0)=0,\ \ \ \overline{W}'(0)=-1,\ \ \ \overline{W}''(0)=0,\ \ \ \overline{W}'''(0)=6,\label{ii-9a}\\
& \|\eta^{-\frac{1}{6}}\overline{W}\|_{L^{\infty}}\leq 1,\ -1\leq\eta^{\frac{1}{3}}\overline{W}'\leq -\frac{1}{6},\ \|\eta^{\frac{5}{6}}\overline{W}''\|_{L^{\infty}}\leq 2,\label{ii-9b}\\
& \|\eta^{\frac{5}{6}}\overline{W}^{(\mu)}\|_{L^{\infty}}\lesssim_{\mu}1\quad\text{for $\mu\geq 3$}.\label{ii-9c}
\end{align}
\end{subequations}
\subsection{Evolution for the modulation variables}\label{ii-b} With the expectation $\lim\limits_{s\nearrow +\infty}W_{0}(y, s)=\overline{W}(y)$ and by the properties of $\overline{W}(y)$, we pose \begin{equation}\label{ii-10} W_0(0, s)=0,\quad \partial_y W_0(0, s)=-1,\quad\partial_y^2 W_0(0, s)=0,\quad \partial_y^3 W_0(0, s)=6. \end{equation}
Next, we derive the equations of the modulation variables in \eqref{ii-1}. For any nonnegative integer $\mu$, acting $\partial^{\mu}=\partial_y^{\mu}$ on both sides of \eqref{ii-4} and \eqref{ii-5} yields \begin{subequations}\label{ii-11}\begin{align} &(\partial_s+\frac{3}{2}\mu)\partial^{\mu}W+(\frac{3}{2}y-e^{\frac{s}{2}}\beta_{\tau}\dot{\xi}(t))\partial_y\partial^{\mu}W +e^{\frac{s}{2}}\beta_{\tau}A(w)\partial_y\partial^{\mu}W=F_{\mu},\label{ii-11a}\\[2mm] &(\partial_s+\frac{3\mu-1}{2})\partial^{\mu}W_0+\left(\frac{3}{2}y+e^{\frac{s}{2}}\beta_{\tau}(\mu_{n}(w) -\dot{\xi}(t))\right)\partial_y\partial^{\mu}W_0=F_{\mu}^0,\label{ii-11b} \end{align} \end{subequations} where \begin{equation*}\begin{cases} F_{\mu}=-e^{\frac{s}{2}}\beta_{\tau}\sum\limits_{1\leq\beta\leq\mu}C_{\mu}^{\beta}\partial^{\beta}A(w)\partial_y\partial^{\mu-\beta}W,\\[2mm] F_{\mu}^0=-e^{\frac{s}{2}}\beta_{\tau}\sum\limits_{1\leq\beta\leq\mu}C_{\mu}^{\beta}\partial^{\beta}\mu_{n}(w)\partial_y\partial^{\mu-\beta}W_0 +e^{s}\beta_{\tau}\sum\limits_{j\neq n}\partial^{\mu}\left(a_{nj}(w)\partial_yW_j\right) -e^{-\frac{s}{2}}\beta_{\tau}\dot{\kappa}(t)\delta_{\mu}^0.\\ \end{cases} \end{equation*}
Due to \eqref{ii-10}, it is derived from \eqref{ii-11b} that for $\mu=0, 1, 2$, the modulation variables satisfy the following ordinary differential system: \begin{subequations}\label{ii-12}\begin{align} \dot{\kappa}(t)&=e^{s}\left(\mu_{n}(w^{0})-\dot{\xi}(t)\right)-e^{\frac{3s}{2}}\sum\limits_{j\neq n}a_{nj}(w^{0})(\partial_y W_j)^0,\label{ii-12a}\\[2mm] \dot{\tau}(t)&=1-\partial_{w_n}\mu_{n}(w^{0})+e^{\frac{s}{2}}\sum\limits_{j\neq n}\partial_{w_j}\mu_{n}(w^{0})(\partial_y W_j)^0\nonumber\\[2mm] &\quad -e^{s}\sum\limits_{j\neq n}\left(\partial_y(a_{nj}(w)\partial_y W_j)\right)^0,\label{ii-12b}\\[2mm] \dot{\xi}(t)&=\mu_{n}(w^{0})-\frac{1}{6}(\partial_y^2 \mu_n(w))^{0}+\frac{1}{6}\sum\limits_{j\neq n}e^{\frac{s}{2}}\left(\partial_y^{2}(a_{nj}(w)\partial_y W_j)\right)^0,\label{ii-12c} \end{align} \end{subequations} where the notation $v^{0}$ represents $v(0, s)$ for the function $v(y, s)$.
\subsection{The equation of $\mathcal{W}=W_{0}-\overline{W}$}\label{ii-c}
Set \begin{equation}\label{ii-14} \mathcal{W}=W_{0}-\overline{W}. \end{equation} It follows from \eqref{ii-5} and \eqref{ii-6} that for any nonnegative integer $\mu$, \begin{equation}\label{ii-15} \left(\partial_{s}+\left(\frac{3}{2}y+e^{\frac{s}{2}}\beta_{\tau}(\mu_{n}(w) -\dot{\xi}(t)\right)\partial_y\right)\partial^{\mu}\mathcal{W}+\mathcal{D}_{\mu}\partial^{\mu}\mathcal{W}=\mathcal{F}_{\mu}, \end{equation} where \begin{equation*} \mathcal{D}_{\mu}=\frac{3\mu-1}{2}+\beta_{\tau}\overline{W}'+e^{\frac{s}{2}}\beta_{\tau}\mu \partial_y \mu_n(w)\\ \end{equation*} and \begin{equation*}\begin{aligned} \mathcal{F}_{\mu}=&-\sum\limits_{1\leq\beta\leq\mu}C_{\mu}^{\beta}\beta_{\tau}\partial_y^{1+\beta}\overline{W}\partial_y^{\mu-\beta}\mathcal{W} -\sum\limits_{2\leq\beta\leq \mu}C_{\mu}^{\beta}e^{\frac{s}{2}}\beta_{\tau}\partial_y^{\beta}\mu_n(w)\partial_y^{\mu-\beta+1}\mathcal{W} -e^{-\frac{s}{2}}\beta_{\tau}\dot{\kappa}(t)\delta_{\mu}^0\\ &-\sum\limits_{j\neq n}e^{s}\beta_{\tau}\partial_y^{\mu}\left(a_{nj}(w)\partial_y W_j\right)-\beta_{\tau}e^{\frac{s}{2}}\partial_y^{\mu}\left(\overline{W}' (\mu_{n}(w)-e^{-\frac{s}{2}}W_0-\dot{\xi}(t))\right)\\ &+(1-\beta_{\tau})\partial_y^{\mu}(\overline{W}' \overline{W}). \end{aligned} \end{equation*}
In addition, for the purpose to establish the weighted estimates of $\mathcal{W}$, it is derived from \eqref{ii-15} that for any real number $\nu$, \begin{equation}\label{ii-16} \left(\partial_s+\left(\frac{3}{2}y+e^{\frac{s}{2}}\beta_{\tau}(\mu_{n}(w) -\dot{\xi}(t))\right)\partial_y\right)\left[\eta^{\nu}\partial^{\mu}\mathcal{W}\right]+\mathcal{D}_{\mu, \nu}\left[\eta^{\nu}\partial^{\mu}\mathcal{W}\right]=\eta^{\nu}\mathcal{F}_{\mu}, \end{equation} where \begin{equation*} \mathcal{D}_{\mu, \nu}=\mathcal{D}_{\mu}-2\nu y\eta^{-1}\left(\frac{3}{2}y+e^{\frac{s}{2}}\beta_{\tau}(\mu_{n}(w)-\dot{\xi}(t))\right). \end{equation*}
\subsection{The decomposition on the derivatives of $W$}\label{ii-d}
To deal with the derivatives of $W$, we adopt the method of eigendecomposition in \cite{PDL1}. Set \begin{equation}\label{ii-17} \partial_{y}^{\mu}W=\sum\limits_{m=1}^{n}W_{\mu}^{m}\gamma_{m}(w), \end{equation} where $\left\{\gamma_{m}(w)\right\}_{m=1}^{n}$ have been defined in \eqref{i-71}.
Acting each left eigenvector $\ell_m(w)$ on both sides of \eqref{ii-11a} and substituting the expansion \eqref{ii-17} into \eqref{ii-11a} yield \begin{equation}\label{ii-18} \left(\partial_s+\left(\frac{3}{2}y+e^{\frac{s}{2}}\beta_{\tau}(\mu_{m}(w)-\dot{\xi}(t))\right)\partial_y +\frac{3}{2}\mu\right)W_{\mu}^{m}=\mathbb{F}_{\mu}^m, \end{equation} where \begin{equation*} \mathbb{F}_{\mu}^m=-\sum\limits_{j=1}^{n}W_{\mu}^{j}\ell_m(w)\cdot\left(\partial_s\gamma_j(w) +((\frac{3}{2}y-e^{\frac{s}{2}}\beta_{\tau}\dot{\xi}(t))I_n+e^{\frac{s}{2}}\beta_{\tau}A(w))\partial_y\gamma_j(w)\right) +\ell_m(w)\cdot F_{\mu}. \end{equation*}
On the other hand, for any real number $\nu$, it is derived from \eqref{ii-18} that \begin{equation}\label{ii-19} \left(\partial_s+\left(\frac{3}{2}y+e^{\frac{s}{2}}\beta_{\tau}(\mu_m(w) -\dot{\xi}(t))\right)\partial_y\right)\left[\eta^{\nu}W_{\mu}^{m}\right] +\mathbb{D}_{\mu, \nu}^{m}\left[\eta^{\nu}W_{\mu}^{m}\right]=\eta^{\nu}\mathbb{F}_{\mu}^{m}, \end{equation} where \begin{equation*} \mathbb{D}_{\mu, \nu}^{m}=\frac{3\mu}{2}-2\nu y\eta^{-1}\left(\frac{3}{2}y+e^{\frac{s}{2}}\beta_{\tau}(\mu_m(w)-\dot{\xi}(t)\right). \end{equation*}
\subsection{Initial data and main results under the modulation coordinates}\label{ii-e}
Under the constrains \eqref{i-8}-\eqref{i-11} and the definition \eqref{ii-3}, the initial data of $W(y, s)$ on $s=-\log\varepsilon$ can be determined accordingly.
Indeed, due to \eqref{i-8} and the definitions \eqref{ii-1} and \eqref{ii-3} (also see \eqref{ii-10}), the initial data of the modulation variables $\tau(t), \xi(t)$ and $\kappa(t)$ on $t=-\varepsilon$ are \begin{equation}\label{iii-14} \tau(-\varepsilon)=0,\ \xi(-\varepsilon)=0,\ \kappa(-\varepsilon)=w_n(0, -\varepsilon)=\kappa_0\varepsilon^{\frac{1}{3}}. \end{equation}
In addition, for the bad component $W_0(y, s)$, it is derived from \eqref{i-8}-\eqref{i-9} and \eqref{ii-2}-\eqref{ii-3} that on $s=-\log\varepsilon$, \begin{equation}\label{iii-6} W_{0}(0, s)=\partial_{y}^{2}W_{0}(0, s)=0,\ \partial_{y}W_{0}(0, s)=\min\limits_{y\in\mathbb{R}}\partial_{y}W_{0}(y, s)=-1,\ \partial_{y}^{3}W_{0}(0, s)=6 \end{equation} and \begin{subequations}\label{iii-7}\begin{align}
&|W_{0}(y, -\log\varepsilon)|\leq 2\varepsilon^{-\frac{1}{30}},\label{iii-7a}\\
&\text{$|\mathcal{W}(y, -\log\varepsilon)|\leq \varepsilon\eta^{\frac{1}{6}}(y)$
and $|\partial_{y}\mathcal{W}(y, -\log\varepsilon)|\leq \varepsilon\eta^{-\frac{1}{3}}(y)$ for $|y|\leq\mathcal{L}$},\label{iii-7b}\\
&|\partial_y^4 \mathcal{W}(y, -\log\varepsilon)|\leq \varepsilon^{\frac{1}{9}}\quad\text{for $|y|\leq 1$},\label{iii-7d}\\
&|\partial_y W_0(y, -\log\varepsilon)|\leq 2\eta^{-\frac{1}{3}}(y)\boldsymbol{1}_{\{\mathcal{L}\leq |y|\leq 2\mathcal{L}\}}+\eta^{-1}(y)\boldsymbol{1}_{\{|y|\geq 2\mathcal{L}\}}\quad\text{for $|y|\geq\mathcal{L}$}.\label{iii-7c} \end{align} \end{subequations}
For the good components of $W$, it follows from \eqref{i-10} and \eqref{ii-2}-\eqref{ii-3} that for $j\neq n$ \begin{subequations}\label{iii-8}\begin{align}
&|W_{j}(y, -\log\varepsilon)|\leq \varepsilon,\label{iii-8a}\\
&|\partial_y W_j(y, -\log\varepsilon)|\leq \varepsilon^{\frac{3}{2}-3\nu} \eta^{-\nu}(y)\ (\nu\in [0, \frac{1}{3}]),\label{iii-8b}\\
&|\partial_y^2 W_j(y, -\log\varepsilon)|\leq \varepsilon^{\frac{7}{6}}\eta^{-\frac{1}{3}}(y).\label{iii-8c} \end{align} \end{subequations}
Following \eqref{i-11}, the initial energy of $\partial_y^{\mu_0}W$ on $s=-\log\varepsilon$ satisfies \begin{equation}\label{iii-9}
\sum\limits_{j=1}^{n-1}\|\partial_{y}^{\mu_0}W_{j}(\cdot, -\log\varepsilon)\|_{L^{2}(\mathbb{R})}
+\varepsilon^{\frac{3}{2}}\|\partial_{y}^{\mu_0}W_{0}(\cdot, -\log\varepsilon)\|_{L^{2}(\mathbb{R})}\lesssim \varepsilon^{\frac{3}{2}}. \end{equation}
Under the preparations above, the new version of Theorem \ref{thmi-1} under the modulated coordinates can be stated as: \begin{thm}\label{thmii-1} {\it Under the conditions in \eqref{i-7} and the notations in \eqref{ii-1}-\eqref{ii-3}, there exists a positive constant $\varepsilon_0$ such that when $0<\varepsilon<\varepsilon_0$, the system \eqref{ii-4}-\eqref{ii-5} and \eqref{ii-12} with the initial data satisfying \eqref{iii-14}-\eqref{iii-9} has a global-in-time solution $W$ and $\tau(t), \xi(t), \kappa(t)$, which satisfy \begin{enumerate}[$(1)$] \item $\lim\limits_{s\to +\infty}\xi(t)=x^*=O(\varepsilon^2), \lim\limits_{s\to +\infty}\tau(t)=T^*=O(\varepsilon^{\frac{4}{3}})$.
\item $|\dot{\tau}(t)|\lesssim \varepsilon^{\frac{1}{3}},\ |\dot{\xi}(t)|\lesssim \varepsilon,\ |\dot{\kappa}(t)|\lesssim 1$, and $|\tau(t)|\lesssim\varepsilon^{\frac{4}{3}},\ |\xi(t)|\lesssim \varepsilon^2,\ |\kappa(t)-\kappa_0\varepsilon^{\frac{1}{3}}|\lesssim\varepsilon$.
\item $|W_0(y, s)|\lesssim\varepsilon^{\frac{1}{3}}e^{\frac{s}{2}}, |W_i(y, s)|\lesssim\varepsilon\ (1\leq i\leq n-1)$.
\item With respect to $W_0$, \begin{equation*}\begin{cases}
\text{$|W_0(y, s)-\overline{W}(y)|\leq\varepsilon^{\frac{1}{11}}\eta^{\frac{1}{6}}(y)$
and $|\partial_y (W_0(y, s)-\overline{W}(y))|\leq \varepsilon^{\frac{1}{12}}\eta^{-\frac{1}{3}}(y)$ for $|y|\leq\mathcal{L}\varepsilon^{\frac{1}{4}}e^{\frac{s}{4}}$},\\
|\partial_y W_0(y, s)|\leq\frac{7}{6}\eta^{-\frac{1}{3}}(y)\quad\text{for $|y|\geq\mathcal{L}\varepsilon^{\frac{1}{4}}e^{\frac{s}{4}}$}. \end{cases} \end{equation*}
\item For $1\leq j\leq n-1$, \begin{equation*}
|\partial_y W_j(y, s)|\lesssim e^{(3\nu-\frac{3}{2})s}\eta^{-\nu}(y)\ (\nu\in [0, \frac{1}{3}]), |\partial_y^2 W_j(y, s)|\lesssim e^{(\nu^+-\frac{7}{6})s}\eta^{-\frac{1}{3}}(y)\ (\nu^+>0). \end{equation*}
\item For $\mu_0$ given in \eqref{iv-50}, \begin{equation*}
\|\partial_y^{\mu_0}W_j(\cdot, s)\|_{L^2(\mathbb{R})}\lesssim e^{-\frac{3}{2}s}\ (1\leq j\leq n-1),\ \|\partial_y^{\mu_0}W_n(\cdot, s)\|_{L^2(\mathbb{R})}\lesssim e^{-\frac{s}{2}}. \end{equation*} \end{enumerate}} \end{thm}
\begin{rem}\label{remii-2} In Theorem \ref{thmii-1}, the spatial decay estimates in $(4)$ and $(5)$ come from the influences of the initial data $W(y, -\log\varepsilon)$ without compact support. \end{rem}
\section{Bootstrap assumptions}\label{iv}
Since the local existence of \eqref{i-6} was known already (one can see \cite{MA} for instance), we utilize the continuous induction to establish the global-in-time estimates in Theorem \ref{thmii-1}. According to the initial data in \eqref{iii-14}-\eqref{iii-9}, we first make the following induction assumptions. In what follows, $M>0$ is denoted as a suitably large constant, which is independent of $\varepsilon$.
For the modulation variables in \eqref{ii-1}, suppose that \begin{subequations}\label{iv-1}\begin{align}
&|\kappa(t)-\kappa_0\varepsilon^{\frac{1}{3}}|\leq M\varepsilon,\quad|\tau(t)|\leq M\varepsilon^{\frac{4}{3}},\quad\quad |\xi(t)|\leq M\varepsilon^2,\label{iv-1a}\\
&|\dot{\kappa}(t)|\leq M,\quad\quad |\dot{\tau}(t)|\leq M\varepsilon^{\frac{1}{3}},\quad\quad |\dot{\xi}(t)|\leq M\varepsilon.\label{iv-1b} \end{align} \end{subequations}
For the bad unknown $W_0$ and the related $\mathcal{W}$ in \eqref{ii-14}, the bootstrap assumptions are \begin{subequations}\label{iv-2}\begin{align}
&\|W_0(\cdot, s)\|_{L^{\infty}}\leq M\varepsilon^{\frac{1}{3}}e^{\frac{s}{2}},\ |\partial_y^{\mu}W_0(y, s)|\leq M\ (1\leq\mu\leq 4),\label{iv-2a}\\
&|\partial_y W_0(y, s)|\leq \frac{7}{6}\eta^{-\frac{1}{3}}(y)\quad\text{for $|y|\geq \mathcal{L}\varepsilon^{\frac{1}{4}}e^{\frac{s}{4}}$},\label{iv-2b}\\
&\text{$|\mathcal{W}(y, s)|\leq \varepsilon^{\frac{1}{11}} \eta^{\frac{1}{6}}(y)$
and $|\partial_y\mathcal{W}(y, s)|\leq \varepsilon^{\frac{1}{12}}\eta^{-\frac{1}{3}}(y)$ for $|y|\leq\mathcal{L}\varepsilon^{\frac{1}{4}}e^{\frac{s}{4}}$}.\label{iv-2c} \end{align} \end{subequations}
For the good unknowns $W_i (1\leq i\leq n-1)$, it is assumed that \begin{subequations}\label{iv-3}\begin{align}
&|W_i(y, s)|\leq M\varepsilon,\ |\partial_y^{\mu} W_i(y, s)|\leq Me^{-\frac{3}{2}s}\ (1\leq\mu\leq 4),\label{iv-3a}\\
&|\partial_y W_i(y, s)|\leq Me^{(3\nu-\frac{3}{2})s}\eta^{-\nu}(y)\ (\nu\in [0, \frac{1}{3}]),\label{iv-3b}\\
&|\partial_y^2 W_i(y, s)|\leq Me^{(\nu^+-\frac{7}{6})s}\eta^{-\frac{1}{3}}(y)\ (\nu^+>0).\label{iv-3c} \end{align} \end{subequations}
In addition, we make the following auxiliary assumptions with $\ell=\frac{1}{M^4}$, \begin{equation}\label{iv-4}
|\partial_y^{\mu}\mathcal{W}(y, s)|\leq \varepsilon^{\frac{1}{10}}\ell^{4-\mu},\ 0\leq \mu\leq 4,\ |y|\leq \ell. \end{equation}
With respect to the energies of the higher order derivatives of $W$, by fixing $\mu_0$ to be the minimum positive integer such that \begin{equation}\label{iv-50} \mu_0\geq 6,\ 3\mu_0-e^{\frac{s}{2}}\beta_{\tau}\partial_y\mu_m(w)\geq \frac{13}{2}\ (1\leq m\leq n-1), \end{equation} we assume \begin{equation}\label{iv-6}
\sum\limits_{m=1}^{n-1}\|\partial_y^{\mu_0}W_m(\cdot, s)\|_{L^2(\mathbb{R})}\leq Me^{-\frac{3}{2}s},\quad \|\partial_y^{\mu_0}W_n(\cdot, s)\|_{L^2(\mathbb{R})}\leq Me^{-\frac{s}{2}}. \end{equation}
\section{Bootstrap estimates on the bad component of $W$}\label{vi}
In the section, we close the bootstrap arguments on $W_0$ and $\mathcal{W}$.
\subsection{The analysis on the characteristics of \eqref{ii-5}}
We now study some properties of the characteristics of \eqref{ii-5}. For any point $(y_0, \zeta_0)$ with $\zeta_0\geq -\log\varepsilon$, the characteristics $y(\zeta)=y(\zeta; y_0, \zeta_0)$ of \eqref{ii-5} starting from $(y_0, \zeta_0)$ is defined as \begin{equation}\label{vi1-1}\begin{cases} \dot{y}(\zeta)=\frac{3}{2}y(\zeta)+e^{\frac{s}{2}}\beta_{\tau}(\mu_n(w)-\dot{\xi}(t(\zeta)))(y(\zeta), \zeta),\ \zeta\geq \zeta_0,\\ y(\zeta_0)=y_0. \end{cases} \end{equation}
\begin{prop}\label{lem5-1} {\it Under the assumptions \eqref{i-7c} and \eqref{iv-1}-\eqref{iv-3}, when $|y_0|\geq \ell$, one has \begin{equation}\label{vi1-2}
|y(\zeta)|\geq |y_0|e^{\frac{\zeta-\zeta_0}{2}}\ (\zeta\geq\zeta_0). \end{equation}
In addition, if $(y(\zeta), \zeta)$ goes through some point $(y, s)$ with $|y|\leq\ell$ and $s\geq\zeta_0$, then \begin{equation}\label{vi1-21}
|y(\zeta)|\leq \ell\ (\zeta_0\leq \zeta\leq s). \end{equation}} \end{prop}
To prove Proposition \ref{lem5-1} and for later uses, we first establish the following results on $\mu_n(w)-\dot{\xi}(t)$. \begin{lem}\label{lem5-0} {\it One has \begin{equation}\label{vi1-6}
|\mu_n(w)-\dot{\xi}(t)|\leq e^{-\frac{s}{2}}(\frac{7}{6}+\varepsilon^{\frac{1}{20}})|y|+M^2e^{-s} \end{equation} and \begin{equation}\label{vi1-61}
|\mu_n(w)-\dot{\xi}(t)-e^{-\frac{s}{2}}W_0|\leq \varepsilon^{\frac{1}{8}}e^{-\frac{s}{2}}|y|^{\frac{1}{2}}+M^2e^{-s}. \end{equation} } \end{lem}
\begin{proof} Note that \begin{equation}\label{vi1-3} (\mu_n(w)-\dot{\xi}(t))(y, s)=(\mu_n(w)-\mu_n(w^0))(y, s)+(\mu_n(w^0)-\dot{\xi}(t))(s). \end{equation} Then it follows from \eqref{ii-3}, \eqref{ii-10}, \eqref{ii-12c}, \eqref{iv-2a} and \eqref{iv-3a} that \begin{equation}\label{vi1-4}
|\mu_n(w^0)-\dot{\xi}(t)|(s)\leq M^2 e^{-s}. \end{equation}
In addition, due to $\partial_{w_n}\mu_n(0)=1$, it is derived from \eqref{i-7c}, \eqref{ii-10}, \eqref{iv-2a}-\eqref{iv-2b}, \eqref{iv-3a} and \eqref{iv-3b} with $\nu=\frac{1}{4}$ that \begin{equation}\label{vi1-5}\begin{aligned}
&|\mu_n(w)-\mu_n(w^0)-e^{-\frac{s}{2}}W_0|(y, s)\\
=&|\mu_n(w)-\mu_n(w^0)-e^{-\frac{s}{2}}\int_0^{y}\partial_y W_0(z, s)dz|(y, s)\\
\leq &\sum\limits_{i=1}^{n}|\int_0^1\partial_{w_i}\mu_n(\beta w+(1-\beta)w^0) d\beta-\delta_n^i|\cdot |\int_0^{y}\partial_y W_i(z, s) dz|\\
\leq &M^2\varepsilon^{\frac{1}{4}}e^{-\frac{s}{2}}\min\{|y|^{\frac{1}{2}}, |y|\}.\end{aligned} \end{equation} By \eqref{iv-2b}-\eqref{iv-2c}, \eqref{ii-9b} and \eqref{ii-10}, we arrive at \begin{equation}\label{vi1-51}
|W_0(y, s)|=|\int_0^y\partial_y W_0(z, s)dz|\leq (\frac{7}{6}+\varepsilon^{\frac{1}{14}})|y|. \end{equation} Substituting \eqref{vi1-4}-\eqref{vi1-51} into \eqref{vi1-3} yields \eqref{vi1-6}-\eqref{vi1-61} and then the proof of Lemma \ref{lem5-0} is completed.\end{proof}
We now start the proof of Proposition \ref{lem5-1}.
\begin{proof} Since \eqref{vi1-21} can be easily derived from \eqref{vi1-2}, it suffices to prove \eqref{vi1-2}. Due to $|\beta_{\tau}|\leq 2$ by \eqref{ii-4} and \eqref{iv-1b}, then it follows from \eqref{vi1-1} and \eqref{vi1-6} that \begin{equation}\label{vi1-7}
\frac{d}{d\zeta}y^2(\zeta)\geq \frac{7}{12}y^2(\zeta)-2M^2 e^{-\zeta}|y(\zeta)|\geq \frac{13}{24}y^2(\zeta)-M^2 e^{-\zeta}. \end{equation}
We derive from \eqref{vi1-7} and the assumption $|y_0|\geq\ell$ that \begin{equation*} e^{-\frac{13}{24}\zeta}y^2(\zeta)\geq e^{-\frac{13}{24}\zeta_0}y_0^2-M^2 e^{-(1+\frac{13}{24})\zeta_0}\geq \frac{1}{4}e^{-\frac{13}{24}\zeta_0}\ell^2\ (\zeta\geq \zeta_0) \end{equation*} and \begin{equation*}
|y(\zeta)|\geq \frac{1}{2}\ell\quad\text{when $\zeta\geq\zeta_0$}. \end{equation*} Together with \eqref{vi1-7}, this yields \begin{equation*} \frac{d}{d\zeta}y^2(\zeta)\geq \frac{1}{2}y^2(\zeta). \end{equation*} Then \eqref{vi1-2} is obtained and the proof of Proposition \ref{lem5-1} is completed.\end{proof}
\begin{rem}\label{lem5-2} {\it For each point $(y, s)\in\mathbb{R}\times [-\log\varepsilon, +\infty)$, one can define the following backward characteristics $y=y(\zeta)$ starting from $(y_0, \zeta_0)=(y_0(y, s), \zeta_0)$ \begin{equation}\label{vi1-8}\begin{cases} \dot{y}(\zeta)=\frac{3}{2}y(\zeta)+e^{\frac{s}{2}}\beta_{\tau}(\mu_n(w)-\dot{\xi}(t))(y(\zeta), \zeta),\ \zeta_0\leq \zeta\leq s,\\ y(\zeta_0)=y_0. \end{cases} \end{equation} According to Proposition \ref{lem5-1}, $(y(\zeta), \zeta)$ can be clarified into one of the following cases: \begin{enumerate}[{\bf Case} $1$.] \item Set $\zeta_0=-\log\varepsilon$ and there are no additional constrains on $y(\zeta)$.
\item When $|y|\leq \ell$, set $\zeta_0=-\log\varepsilon$, then $|y(\zeta)|\leq \ell$ holds for $-\log\varepsilon\leq\zeta\leq s$ due to \eqref{vi1-21}.
\item When $|y|\geq \ell$, set $(y_0, \zeta_0)$ with $|y_0|>\ell$ and $\zeta_0=-\log\varepsilon$ or $|y_0|=\ell$ and $\zeta_0\geq-\log\varepsilon$, then $|y(\zeta)|\geq |y_0| e^{\frac{\zeta-\zeta_0}{2}}$ holds
for $\zeta_0\leq\zeta\leq s$ due to \eqref{vi1-2}.
\end{enumerate}}
\end{rem}
\subsection{Bootstrap estimate on $W_0$}\label{vi-a}
For each point $(y, s)\in\mathbb{R}\times [-\log\varepsilon, +\infty)$, it follows from the {\bf Case} 1 in Remark \ref{lem5-2} and \eqref{ii-11b} with $\mu=0$ that \begin{equation}\label{vi2-1} (\frac{d}{d\zeta}-\frac{1}{2})W_0(y(\zeta), \zeta)=F_0^0(y(\zeta), \zeta). \end{equation} By \eqref{iv-1b}, \eqref{iv-2a} and \eqref{iv-3a}, we have \begin{equation}\label{vi2-2}\begin{aligned}
|F_0^0(y(\zeta), \zeta)|\leq& e^{-\frac{\zeta}{2}}(|\beta_{\tau}||\dot{\kappa}(t)|)(\zeta)
+\beta_{\tau}e^{\zeta}\sum\limits_{j=1}^{n-1}(|a_{nj}(w)||\partial_y W_j|)(y(\zeta), \zeta)\\ \leq& 2Me^{-\frac{\zeta}{2}}+M^2 e^{-\frac{\zeta}{2}}\leq 2M^2 e^{-\frac{\zeta}{2}}. \end{aligned} \end{equation} Combining \eqref{vi2-1} with \eqref{vi2-2} yields \begin{equation}\label{vi2-3}\begin{aligned}
e^{-\frac{s}{2}}|W_0(y, s)|&=|e^{\frac{1}{2}\log\varepsilon}W_0(y_0, -\log\varepsilon)
+\int_{-\log\varepsilon}^{s}e^{-\frac{\zeta}{2}}F_0^0(y(\zeta), \zeta)d\zeta|\\
&\leq \sqrt{\varepsilon}\|W_0(\cdot, -\log\varepsilon)\|_{L^{\infty}}+2M^2\varepsilon. \end{aligned} \end{equation} Then it is derived from \eqref{vi2-3} and \eqref{iii-7a} that \begin{equation}\label{vi2-4}
\|W_0(\cdot, s)\|_{L^{\infty}}\leq \varepsilon^{\frac{1}{3}}e^{\frac{s}{2}}. \end{equation}
\subsection{Bootstrap estimate on $\mathcal{W}$ when $|y|\leq\ell$}\label{vi-b}
For each $(y, s)\in \mathbb{R}\times [-\log\varepsilon, +\infty)$, by {\bf Case} 1 in Remark \ref{lem5-2}, it follows from \eqref{ii-15} that \begin{equation}\label{vi3-1}\begin{aligned} \partial^{\mu}\mathcal{W}(y, s)=&\partial^{\mu}\mathcal{W}(y_0(y, s), -\log\varepsilon)\exp\left({-\int_{-\log\varepsilon}^{s}\mathcal{D}_{\mu}(y(\alpha), \alpha)d\alpha}\right)\\ &+\int_{-\log\varepsilon}^{s}\mathcal{F}_{\mu}(y(\zeta), \zeta)\exp\left(-{\int_{\zeta}^{s}\mathcal{D}_{\mu}(y(\alpha), \alpha)d\alpha}\right)d\zeta. \end{aligned} \end{equation}
We next estimate $\partial_y^4\mathcal{W}(y, s)$ when $|y|\leq\ell$. In this situation,
$|y(\alpha)|\leq\ell$ holds for $-\log\varepsilon\leq\alpha\leq s$ by \eqref{vi1-21} in Proposition \ref{lem5-1}. For $\mu=4$ in \eqref{ii-15}, one has from \eqref{ii-3} and \eqref{ii-14} that \begin{equation}\label{vi3-11}\begin{aligned} &\mathcal{D}_4(y(\alpha), \alpha)\\ =&\frac{11}{2}+\beta_{\tau}\overline{W}'(y(\alpha))+\sum\limits_{m=1}^{n}4e^{\frac{s}{2}}\beta_{\tau}\partial_{w_m}\mu_{n}(w)\partial_y W_m\\ =&\frac{11}{2}+\beta_{\tau}\overline{W}'(y(\alpha))+4\beta_{\tau}\partial_y W_0(y(\alpha), \alpha)+\sum\limits_{m=1}^{n}4e^{\frac{s}{2}}\beta_{\tau}\left(\partial_{w_m}\mu_n(w)-\delta_n^m\right)\partial_y W_m\\ =&\frac{11}{2}+5\beta_{\tau}\overline{W}'(y(\alpha))+4\beta_{\tau}\partial_y\mathcal{W}(y(\alpha), \alpha)+\sum\limits_{m=1}^{n}4e^{\frac{s}{2}}\beta_{\tau}\left(\partial_{w_m}\mu_n(w)-\delta_n^m\right)\partial_y W_m. \end{aligned} \end{equation}
Due to $\partial_{w_n}\mu_n(0)=1$ and $|y(\alpha)|\leq\ell$ for $-\log\varepsilon\leq\alpha\leq s$, then by \eqref{vi3-11}, \eqref{i-7}, \eqref{ii-3}, \eqref{ii-9b}, \eqref{iv-1}, \eqref{iv-2} and \eqref{iv-3a}, we arrive at \begin{equation}\label{vi3-2} \mathcal{D}_4\geq \frac{11}{2}-5-M\varepsilon^{\frac{1}{12}}\geq\frac{1}{3}. \end{equation}
It follows from \eqref{vi3-1} and \eqref{vi3-2} that \begin{equation}\label{vi3-3}\begin{aligned}
|\partial_y^4\mathcal{W}(y, s)|&\leq|\partial_y^4\mathcal{W}(y_0(y, s), -\log\varepsilon)|+3\|\mathcal{F}_{4}(y(\zeta), \zeta)\chi_{[-\log\varepsilon, s]}(\zeta)\|_{L^{\infty}}. \end{aligned} \end{equation}
When $|y|\leq\ell$, it is derived from \eqref{vi1-21} in Proposition \ref{lem5-1} and \eqref{iii-7d} that $|y_0|=|y_0(y, s)|\leq \ell<1$ and \begin{equation}\label{vi3-4}
|\partial_y^4\mathcal{W}(y_0(y, s), -\log\varepsilon)|\leq \varepsilon^{\frac{1}{9}}. \end{equation}
For $\mathcal{F}_4(y(\zeta), \zeta)$ in \eqref{vi3-3}, it follows from \eqref{ii-15}, \eqref{i-7}, \eqref{ii-9}, \eqref{ii-12c}, \eqref{iv-1}, \eqref{iv-2a}-\eqref{iv-2b} and \eqref{iv-3a}-\eqref{iv-3b} that \begin{equation}\label{vi3-5}\begin{aligned}
&|\mathcal{F}_4(y(\zeta), \zeta)|\\
\leq& \sum\limits_{\nu=0}^{3}M|\partial_y^{\nu}\mathcal{W}(y(\zeta), \zeta)|+M^2\varepsilon^{\frac{1}{2}}+Me^{\frac{s}{2}}\sum\limits_{\nu=0}^{4}|\partial_y^{\nu}(\mu_{n}(w)
-e^{-\frac{s}{2}}W_0-\dot{\xi}(t))|(y(\zeta), \zeta) \end{aligned} \end{equation} and \begin{equation}\label{vi3-6}\begin{aligned}
&\sum\limits_{\nu=0}^{4}|\partial_y^{\nu}(\mu_{n}(w)-e^{-\frac{s}{2}}W_0-\dot{\xi}(t))|(y(\zeta), \zeta)\\
=&\sum\limits_{\nu=0}^{4}|\partial_y^{\nu}\left(\sum\limits_{m=1}^{n}\int_0^1(\partial_{w_m}\mu_n(\beta w+(1-\beta)w^0)d\beta-\delta_n^m)\cdot\int_0^{y}\partial_y W_m(z, \zeta)dz\right)|(y(\zeta), \zeta)\\
\leq& M^2 e^{-\frac{3s}{2}}+M^2\varepsilon^{\frac{1}{3}}e^{-\frac{s}{2}}(1+|y(\zeta)|). \end{aligned} \end{equation}
By the definition of $\ell$ in \eqref{iv-4} and $|y(\zeta)|\leq\ell $ shown in \eqref{vi1-21} when $|y|\leq\ell$, then it is derived from \eqref{vi3-5}-\eqref{vi3-6} and \eqref{iv-4} that \begin{equation*}
|\mathcal{F}_4(y(\zeta), \zeta)|\leq\varepsilon^{\frac{1}{10}}\ell^{\frac{3}{5}}. \end{equation*} Combining this with \eqref{vi3-3}-\eqref{vi3-4} yields \begin{equation}\label{vi-9}
|\partial_y^4\mathcal{W}(y, s)|\leq 2\varepsilon^{\frac{1}{10}}\ell^{\frac{1}{2}}\leq\frac{1}{2}\varepsilon^{\frac{1}{10}}\quad
\text{for $|y|\leq\ell$}. \end{equation}
In addition, by \eqref{ii-9a} and \eqref{ii-10}, $\partial_y^{\mu}\mathcal{W}(0, s)=0$ for $\mu=1, 2, 3$, we have that for $|y|\leq\ell$, \begin{equation}\label{vi-10}\begin{aligned}
|\partial_y^{\nu}\mathcal{W}(y, s)|=&|\int_{0}^{y}\partial_y^{\nu+1}\mathcal{W}(z, s)dz|\\ \leq& 2\varepsilon^{\frac{1}{10}}\ell^{4-\nu+\frac{1}{2}}\leq \frac{1}{2}\varepsilon^{\frac{1}{10}}\ell^{4-\nu}\quad (0\leq\nu\leq 3). \end{aligned} \end{equation}
\subsection{Bootstrap estimates on $\mathcal{W}$ when $|y|\leq\mathcal{L}\varepsilon^{\frac{1}{4}}e^{\frac{s}{4}}$}\label{vi-c}
In the region $\{(y, s): |y|\leq \ell\}$, it is derived from \eqref{vi-10} that \begin{equation}\label{vi-11}
|\eta^{-\frac{1}{6}}(y)\mathcal{W}(y, s)|\leq 2\varepsilon^{\frac{1}{10}}\ell^{4+\frac{1}{2}}\quad\text{for $|y|\leq\ell$}. \end{equation}
In the region $\{(y, s): \ell<|y|\leq \mathcal{L}\varepsilon^{\frac{1}{4}}e^{\frac{s}{4}}\}$, by {\bf Case} 3 in Remark \ref{lem5-2} and \eqref{ii-16}, one has \begin{equation*}\label{vi4-1} (\frac{d}{d\zeta}+\mathcal{D}_{\mu, \nu}(y(\zeta), \zeta))[\eta^{\nu}\partial^{\mu}\mathcal{W}](y(\zeta) \zeta)=[\eta^{\nu}\mathcal{F}_{\mu}](y(\zeta), \zeta) \end{equation*} and \begin{equation}\label{vi4-2}\begin{aligned} \left[\eta^{\nu}\partial^{\mu}\mathcal{W}\right](y, s)&=[\eta^{\nu}\partial^{\mu}\mathcal{W}](y_0, \zeta_0)\exp\left(-\int_{\zeta_0}^{s}\mathcal{D}_{\mu, \nu}(y(\alpha), \alpha)d\alpha\right)\\ &+\int_{\zeta_0}^{s}[\eta^{\nu}\mathcal{F}_{\mu}](y(\zeta), \zeta)\exp\left(-\int_{\zeta}^{s}\mathcal{D}_{\mu, \nu}(y(\alpha), \alpha)d\alpha\right)d\zeta. \end{aligned} \end{equation} For $(\mu, \nu)=(0, -\frac{1}{6})$ in \eqref{ii-16}, it comes from \eqref{ii-12}, \eqref{iv-2a} and \eqref{iv-3a} that \begin{equation}\label{vi4-3} \mathcal{D}_{0, -\frac{1}{6}}=-\frac{1}{2(1+y^2)}+\beta_{\tau}\overline{W}'+\frac{\beta_{\tau}}{3}\frac{y}{1+y^2}e^{\frac{s}{2}}(\mu_{n}(w)-\dot{\xi}(t)). \end{equation} In addition, by \eqref{vi1-61}, \eqref{iv-2b} and \eqref{ii-10}, one has \begin{equation}\label{vi4-4}
|\mu_n(w)-\dot{\xi}(t)|(y(\alpha), \alpha)\leq 4e^{-\frac{\alpha}{2}}|y(\alpha)|^{\frac{1}{2}}+M^2 e^{-\alpha}. \end{equation} Combining \eqref{vi4-3}-\eqref{vi4-4} with \eqref{ii-9b} and \eqref{iv-1b} yields \begin{equation*}
|\mathcal{D}_{0, -\frac{1}{6}}(y(\alpha), \alpha)|\leq 10\eta^{-\frac{1}{4}}(y(\alpha))+M^2 e^{-\frac{\alpha}{2}}, \end{equation*} and then it follows from Proposition \ref{lem5-1} and {\bf Case 3} in Remark \ref{lem5-2} that \begin{equation}\label{vi4-5}
\int_{\zeta_0}^{+\infty}|\mathcal{D}_{0, -\frac{1}{6}}|(y(\alpha), \alpha)d\alpha \leq 2M^2\varepsilon^{\frac{1}{2}}+10\int_{\zeta_0}^{+\infty}\frac{1}{(1+\ell^2 e^{\frac{\alpha-\zeta_0}{2}})^{\frac{1}{4}}}d\alpha\leq 200 \ln\frac{1}{\ell}. \end{equation} From \eqref{vi4-2} with $(\mu, \nu)=(0, -\frac{1}{6})$ and \eqref{vi4-5}, we obtain \begin{equation}\label{vi4-6}\begin{aligned}
&|\eta^{-\frac{1}{6}}\mathcal{W}|(y, s)\\
\leq& \frac{1}{\ell^{200}}\left(\eta^{-\frac{1}{6}}(y(\zeta_0))|\mathcal{W}|(y(\zeta_0), \zeta_0)+\int_{\zeta_0}^{s}\eta^{-\frac{1}{6}}(y(\zeta))|\mathcal{F}_0|(y(\zeta), \zeta)d\zeta\right)\quad\text{for $|y|\geq\ell$}. \end{aligned} \end{equation} With \eqref{ii-9b}, \eqref{iii-6}, \eqref{iv-1}, \eqref{iv-3a} and \eqref{vi1-4}-\eqref{vi1-5} , $\mathcal{F}_0$ in \eqref{ii-15} satisfies \begin{equation}\label{vi4-7}\begin{aligned}
&|\mathcal{F}_0|(y(\zeta), \zeta)\\
\leq& 2e^{\frac{s}{2}}\eta^{-\frac{1}{3}}(y(\zeta))(|\mu_n(w^0)-\dot{\xi}(t)|+|\mu_n(w)-\mu_{n}(w^0)-e^{-\frac{s}{2}}W_0|)\\ &+M^2 e^{-\frac{s}{2}}+2M\varepsilon\eta^{-\frac{1}{6}}(y(\zeta))\\ \leq& 2e^{\frac{s}{2}}\eta^{-\frac{1}{3}}(y(\zeta))(M^2\varepsilon^{\frac{1}{4}}e^{-\frac{\zeta}{2}}\eta^{\frac{1}{4}}(y(\zeta)) +M^2 e^{-\zeta})+M^2 e^{-\frac{\zeta}{2}}+2M\varepsilon\eta^{-\frac{1}{6}}(y(\zeta))\\ \leq& M^3 e^{-\frac{\zeta}{2}}+M^3\varepsilon^{\frac{1}{4}}\eta^{-\frac{1}{12}}(y(\zeta)). \end{aligned} \end{equation}
When $|y|\leq\mathcal{L}\varepsilon^{\frac{1}{4}}e^{\frac{s}{4}}$ and $\zeta_0=-\log\varepsilon$, one has $|y(\zeta_0)|\leq\mathcal{L}$ due to \eqref{vi1-2} in Proposition \ref{lem5-1}. Thus, combining \eqref{vi4-7} with \eqref{vi4-6}, \eqref{vi-11}, \eqref{iii-7b} and {\bf Case 3} in Remark \ref{lem5-2} shows \begin{equation}\label{vi-16}
|\eta^{-\frac{1}{6}}(y)\mathcal{W}|(y, s)\leq\frac{1}{2}\varepsilon^{\frac{1}{11}}\quad\text{for $|y|\leq \mathcal{L}\varepsilon^{\frac{1}{4}}e^{\frac{s}{4}}$}. \end{equation}
\subsection{Bootstrap estimates on $\partial_y\mathcal{W}$ when $|y|\leq \mathcal{L}\varepsilon^{\frac{1}{4}}e^{\frac{s}{4}}$}\label{vi-d}
As in Subsection \ref{vi-c}, the $L^{\infty}$ estimate of $\eta^{\frac{1}{3}}\partial_y\mathcal{W}$ is still considered in the cases of $|y|\leq \ell$ and $\ell<|y|\leq \mathcal{L}\varepsilon^{\frac{1}{4}}e^{\frac{s}{4}}$. In the region $\{(y, s): \ell<|y|\leq \mathcal{L}\varepsilon^{\frac{1}{4}}e^{\frac{s}{4}}\}$, by {\bf Case} 3 in Remark \ref{lem5-2}, it follows from \eqref{vi1-2} in Proposition \ref{lem5-1} that when $|y|\leq \mathcal{L}\varepsilon^{\frac{1}{4}}e^{\frac{s}{4}}$, \begin{equation}\label{vi-170}
|y(\zeta)|\leq \mathcal{L}\varepsilon^{\frac{1}{4}}e^{\frac{\zeta}{4}},\ \zeta_0\leq \zeta\leq s. \end{equation}
For $|y|\leq\ell$, by \eqref{vi-10}, one has that \begin{equation}\label{vi-17}
\eta^{\frac{1}{3}}(y)|\partial_y\mathcal{W}(y, s)|\leq 2\varepsilon^{\frac{1}{10}}\ell^{3+\frac{1}{2}}\ (|y|\leq\ell). \end{equation}
Next, we estimate $\eta^{\frac{1}{3}}\partial_y\mathcal{W}$ when $\ell\leq |y|\leq \mathcal{L}\varepsilon^{\frac{1}{4}}e^{\frac{s}{4}}$. For $(\mu, \nu)=(1, \frac{1}{3})$ in \eqref{ii-16}, we have \begin{equation*}\begin{aligned} \mathcal{D}_{1, \frac{1}{3}}=&\frac{1}{1+y^2}+\beta_{\tau}\overline{W}'+\beta_{\tau}e^{\frac{s}{2}}\partial_y \mu_{n}(w)-\frac{2y}{3(1+y^2)}\beta_{\tau}e^{\frac{s}{2}}(\mu_{n}(w)-\dot{\xi}(t))\\ =&\frac{1}{1+y^2}+\beta_{\tau}\overline{W}'+\beta_{\tau}\partial_{w_n}\mu_n(w)\partial_y W_0+\beta_{\tau}e^{\frac{s}{2}}\sum\limits_{j\neq n}\partial_{w_j}\mu_n(w)\partial_y W_j\\ &-\frac{2y}{3(1+y^2)}\beta_{\tau}e^{\frac{s}{2}}(\mu_n(w)-\dot{\xi}(t)). \end{aligned} \end{equation*} Combining this with \eqref{i-7c}, \eqref{ii-9b}, \eqref{iv-1b}, \eqref{iv-2a}-\eqref{iv-2b}, \eqref{iv-3a} and \eqref{vi4-4}-\eqref{vi4-5} yields \begin{equation*}
|\mathcal{D}_{1, \frac{1}{3}}(y(\alpha), \alpha)|\leq 10\eta^{-\frac{1}{4}}(y(\alpha))+M^2 e^{-\frac{\alpha}{2}} \end{equation*} and \begin{equation}\label{vi-18}
\int_{\zeta_0}^{+\infty}|\mathcal{D}_{1, \frac{1}{3}}|(y(\alpha), \alpha)d\alpha\leq 200\ln\frac{1}{\ell}. \end{equation}
It is derived from \eqref{vi4-2} and \eqref{vi-18} that for $|y|\geq\ell$, \begin{equation}\label{vi-19}\begin{aligned}
&|\eta^{\frac{1}{3}}(y)\partial_y\mathcal{W}|(y, s)\\
\leq& \frac{1}{\ell^{200}}\left(\eta^{\frac{1}{3}}(y(\zeta_0))|\partial_y\mathcal{W}|(y(\zeta_0), \zeta_0)+\int_{\zeta_0}^{s}\eta^{\frac{1}{3}}(y(\zeta))|\mathcal{F}_1|(y(\zeta), \zeta)d\zeta\right). \end{aligned} \end{equation} For $\mathcal{F}_1$ in \eqref{vi-19}, one has from \eqref{ii-15}, \eqref{i-7d} and \eqref{iv-1}, \eqref{iv-2a}, \eqref{iv-3a} that \begin{equation}\label{vi-20}\begin{aligned}
|\mathcal{F}_1|&\leq 2|\overline{W}''\mathcal{W}|+Me^{s}|W|\sum\limits_{j=1}^{n-1}|\partial_y^2 W_j|+Me^{s}\sum\limits_{j=1}^{n-1}|\partial_y W_j|^2+Me^{\frac{s}{2}}\sum\limits_{j=1}^{n-1}|\partial_y W_0 \partial_y W_j|\\
&+2e^{\frac{s}{2}}|\overline{W}''||\mu_{n}(w)-e^{-\frac{s}{2}}W_0-\dot{\xi}(t)|+Me^{\frac{s}{2}}|\overline{W}'|\sum\limits_{j=1}^{n-1}|\partial_y W_j|\\
&+2|\overline{W}'||\partial_{w_n}\mu_{n}(w)-1||\partial_y W_0|+2M\varepsilon(|\overline{W}'|^2+|\overline{W}''\overline{W}|) =\sum\limits_{k=1}^{8}I_k. \end{aligned} \end{equation} For $I_1$, it follows from \eqref{ii-9b}, \eqref{vi-16} and \eqref{vi-170} that \begin{equation}\label{vi-21} \eta^{\frac{1}{3}}(y(\zeta))I_1(y(\zeta), \zeta)\leq 2\varepsilon^{\frac{1}{11}}\eta^{-\frac{1}{3}}(y(\zeta)). \end{equation} For $I_2$, we have from \eqref{ii-3}, \eqref{iv-1a}, \eqref{vi2-4}, \eqref{iv-3a} and \eqref{iv-3c} with $\nu^+=\frac{1}{24}$ that \begin{equation}\label{vi-22} \eta^{\frac{1}{3}}(y(\zeta))I_2(y(\zeta), \zeta)\leq M^4\varepsilon^{\frac{1}{3}}e^{-\frac{1}{8}\zeta}. \end{equation} For $I_3$, \eqref{iv-3b} with $\nu=\frac{1}{6}$ shows \begin{equation}\label{vi-23} \eta^{\frac{1}{3}}(y(\zeta))I_3(y(\zeta), \zeta)\leq M^3 e^{-\zeta}. \end{equation} In the similar way, due to $\partial_{w_n}\mu_n(0)=1$, it is derived from \eqref{i-7}, \eqref{iv-2a}, \eqref{iv-3a} and \eqref{ii-9b} that \begin{equation}\label{vi-24} \eta^{\frac{1}{3}}(y(\zeta))(I_4+I_6+I_7+I_8)(y(\zeta), \zeta)\leq M^3 e^{-\zeta} +M^2\varepsilon^{\frac{1}{3}}\eta^{-\frac{1}{3}}(y(\zeta)). \end{equation} In addition, it follows from \eqref{ii-9b} and \eqref{vi1-61} that \begin{equation}\label{vi-25} \eta^{\frac{1}{3}}(y(\zeta))I_5(y(\zeta), \zeta)\leq M^2\varepsilon^{\frac{1}{8}}\eta^{-\frac{1}{2}}(y(\zeta)). \end{equation} Substituting \eqref{vi-21}-\eqref{vi-25} into \eqref{vi-20} yields \begin{equation}\label{vi-26}
\eta^{\frac{1}{3}}(y(\zeta))|\mathcal{F}_1|(y(\zeta), \zeta)\leq 4\varepsilon^{\frac{1}{11}}\eta^{-\frac{1}{3}}(y(\zeta))+\varepsilon^{\frac{1}{11}}e^{-\frac{1}{8}\zeta}. \end{equation} Analogously to obtain \eqref{vi-16}, combining \eqref{vi-26} with \eqref{vi-19}, \eqref{vi-17} and \eqref{iii-7b} derives \begin{equation}\label{vi-27}
\eta^{\frac{1}{3}}(y)|\partial_y\mathcal{W}(y, s)|\leq\frac{1}{2}\varepsilon^{\frac{1}{12}}\quad \text{for $|y|\leq\mathcal{L}\varepsilon^{\frac{1}{4}}e^{\frac{s}{4}}$}. \end{equation}
\subsection{More delicate estimates for $W_0$}\label{vi-e}
In the Subsection, we mainly estimate the weighted $L^{\infty}$ norms of $\eta^{-\frac{1}{6}}W_0$ and $\eta^{\frac{1}{3}}\partial_y W_0$ in the whole spatial space. Since the proof procedures are very similar to the processes in Subsection \ref{vi-c} and Subsection \ref{vi-d}, we just give the sketch of the related verifications. For $\mu\in\mathbb{N}_0$ and $\nu\in\mathbb{R}$, it is derived from \eqref{ii-5} that \begin{equation}\label{vi-28} \left(\partial_s+(\frac{3}{2}y+\beta_{\tau}e^{\frac{s}{2}}(\mu_n(w)-\dot{\xi}(t)))\partial_y\right)[\eta^{\nu}\partial^{\mu}W_0]+\overline{D}_{\mu, \nu}[\eta^{\nu}\partial^{\mu}W_0]=\eta^{\nu}\overline{F}_{\mu}^{0}, \end{equation} where \begin{equation*} \overline{D}_{\mu, \nu}=\frac{3\mu-1}{2}+\mu\beta_{\tau} e^{\frac{s}{2}}\partial_y\mu_n(w)+\beta_{\tau}\partial_y W_0\boldsymbol{1}_{\mu\geq 2}-\frac{2\nu y}{1+y^2}(\frac{3}{2}y+\beta_{\tau}e^{\frac{s}{2}}(\mu_{n}(w)-\dot{\xi}(t))) \end{equation*} and \begin{equation*}\begin{aligned} \overline{F}_{\mu}^{0}&=-\sum\limits_{2\leq\beta\leq\mu-1} C_{\mu}^{\beta}\beta_{\tau}e^{\frac{s}{2}}\partial_y^{\beta}\mu_n(w)\partial_y^{\mu-\beta+1}W_0 -\beta_{\tau}e^{\frac{s}{2}}\partial_y^{\mu}(\mu_n(w)-w_n)\partial_y W_0\boldsymbol{1}_{\mu\geq 2}\\ &-\sum\limits_{j=1}^{n-1}\beta_{\tau}e^{s}\partial_y^{\mu}(a_{nj}(w)\partial_y W_j)-\beta_{\tau}e^{-\frac{s}{2}}\dot{\kappa}(t)\delta_{\mu}^0. \end{aligned} \end{equation*}
When $|y|\leq\mathcal{L}\varepsilon^{\frac{1}{4}}e^{\frac{s}{4}}$, it follows from \eqref{ii-9b}, \eqref{vi-16}
and \eqref{vi-27} that for $|y|\leq\mathcal{L}\varepsilon^{\frac{1}{4}}e^{\frac{s}{4}}$, \begin{equation}\label{vi-29}
\eta^{-\frac{1}{6}}(y)|W_0(y, s)|\leq 1+\frac{1}{2}\varepsilon^{\frac{1}{11}},\ \eta^{\frac{1}{3}}(y)|\partial_y W_0(y, s)|\leq 1+\frac{1}{2}\varepsilon^{\frac{1}{12}}. \end{equation}
When $|y|\geq \mathcal{L}\varepsilon^{\frac{1}{4}}e^{\frac{s}{4}}$, the backward characteristics $y=y(\zeta)$ is defined by \eqref{vi1-8} with $(y_0, \zeta_0)$ satisfying $|y_0|\geq \mathcal{L}, \zeta_0=-\log\varepsilon$ or $|y_0|=\mathcal{L}\varepsilon^{\frac{1}{4}}e^{\frac{\zeta_0}{4}}, \zeta_0>-\log\varepsilon$. In this case, we have $|y(\zeta)|\geq\mathcal{L}e^{\frac{\zeta-\zeta_0}{4}}$ for $\zeta\geq\zeta_0$ due to \eqref{vi1-2}. Moreover, it is derived from \eqref{vi-28} that \begin{equation}\label{vi6-0}\begin{aligned} \left[\eta^{\nu}\partial^{\mu}W_0\right](y, s)&=[\eta^{\nu}\partial^{\mu}W_0](y_0, \zeta_0)\exp \left(-\int_{\zeta_0}^{s}\overline{D}_{\mu, \nu}(y(\alpha), \alpha)d\alpha\right)\\ &+\int_{\zeta_0}^{s}[\eta^{\nu}\overline{F}_{\mu}^0](y(\zeta), \zeta)\exp\left(-\int_{\zeta}^{s}\overline{D}_{\mu, \nu}(y(\alpha), \alpha)d\alpha\right)d\zeta. \end{aligned} \end{equation} In addition, by \eqref{vi4-4} and \eqref{iv-2a}-\eqref{iv-2b}, \eqref{iv-3a}, we have \begin{equation}\label{vi6-1}\begin{aligned}
&|\overline{D}_{0, -\frac{1}{6}}(y(\alpha), \alpha)|\\
=&\left|-\frac{1}{2(1+y(\alpha)^2)}+\frac{y(\alpha)}{3(1+y(\alpha)^2)}e^{\frac{\alpha}{2}}\beta_{\tau}(\mu_n(w)-\dot{\xi}(t))\right|\\
\leq&\frac{1}{2}\eta^{-1}(y(\alpha))+e^{\frac{\alpha}{2}}\eta^{-\frac{1}{2}}(y(\alpha))|\mu_n(w)-\dot{\xi}(t)|\\ \leq&5\eta^{-\frac{1}{4}}(y(\alpha))+M^2 e^{-\frac{\alpha}{2}} \end{aligned} \end{equation} and \begin{equation}\label{vi6-2}\begin{aligned}
&|\overline{D}_{1, \frac{1}{3}}(y(\alpha), \alpha)|\\
=&\left|\frac{1}{1+y(\alpha)^2}+e^{\frac{\alpha}{2}}\beta_{\tau}\partial_y\mu_n(w)
-\frac{2y(\alpha)}{3(1+y(\alpha)^2)}e^{\frac{\alpha}{2}}\beta_{\tau}(\mu_n(w)-\dot{\xi}(t))\right|\\
\leq&\eta^{-1}(y(\alpha))+2e^{\frac{\alpha}{2}}\eta^{-\frac{1}{2}}(y(\alpha))|\mu_n(w)
-\dot{\xi}(t)|+2e^{\frac{\alpha}{2}}\sum\limits_{j=1}^{n}|\partial_{w_j}\mu_n(w)||\partial_y W_j|\\ \leq& 20\eta^{-\frac{1}{4}}(y(\alpha))+M^3 e^{-\frac{\alpha}{2}}. \end{aligned} \end{equation}
Similarly to \eqref{vi4-5}, for $|y(\zeta)|\geq\mathcal{L}e^{\frac{\zeta-\zeta_0}{4}}$, one has from \eqref{vi6-1}-\eqref{vi6-2} that \begin{equation}\label{vi-31}\begin{aligned}
&\int_{\zeta_0}^{s}|\overline{D}_{0, -\frac{1}{6}}|(y(\alpha), \alpha)d\alpha+\int_{\zeta_0}^{s}|\overline{D}_{1, \frac{1}{3}}|(y(\alpha), \alpha)d\alpha\\ \leq& 4M^3\varepsilon^{\frac{1}{2}}+\int_{\zeta_0}^{s}\frac{25}{(1+\mathcal{L}^2 e^{\frac{\zeta-\zeta_0}{2}})^{\frac{1}{4}}}d\zeta\\ \leq & 4M^3\varepsilon^{\frac{1}{2}}+600\ln(1+\mathcal{L}^{-1})\leq \varepsilon^{\frac{1}{20}}. \end{aligned} \end{equation} Based on \eqref{vi-31}, together with \eqref{vi6-0} this yields \begin{equation}\label{vi-32}
\eta^{-\frac{1}{6}}(y)|W_0(y, s)|\leq (1+2\varepsilon^{\frac{1}{20}})\left(\eta^{-\frac{1}{6}}(y_0)|W_0(y_0, \zeta_0)|+\int_{\zeta_0}^{s}\eta^{-\frac{1}{6}}(y(\zeta))|\overline{F}_0^{0}|(y(\zeta), \zeta)d\zeta\right) \end{equation} and \begin{equation}\label{vi-33}
\eta^{\frac{1}{3}}(y)|\partial_y W_0(y, s)|\leq (1+2\varepsilon^{\frac{1}{20}})\left(\eta^{\frac{1}{3}}(y_0)|\partial_y W_0(y_0, \zeta_0)|+\int_{\zeta_0}^{s}\eta^{\frac{1}{3}}(y(\zeta))|\overline{F}_1^{0}|(y(\zeta), \zeta)d\zeta\right). \end{equation} By \eqref{i-7}, \eqref{iv-1}-\eqref{iv-3}, \eqref{iv-3b} with $\nu=\frac{1}{3}$ and \eqref{iv-3c} with $\nu^+=\frac{1}{24}$, then $\overline{F}_0^{0}$ and $\overline{F}_1^{0}$ in \eqref{vi-28} satisfy \begin{equation}\label{vi-34}
|\overline{F}_0^{0}(y(\zeta), \zeta)|\leq 4M^2 e^{-\frac{\zeta}{2}},\ |\overline{F}_1^{0}(y(\zeta), \zeta)|\leq 2M^2e^{-\frac{\zeta}{9}}\eta^{-\frac{1}{3}}(y(\zeta)). \end{equation} Therefore, we derive from \eqref{iii-7b}, \eqref{iii-7c}, \eqref{vi-29} and \eqref{vi-32}-\eqref{vi-34} that \begin{equation}\label{vi-35}
\eta^{-\frac{1}{6}}(y)|W_0(y, s)|\leq 1+\varepsilon^{\frac{1}{21}},\ \eta^{\frac{1}{3}}(y)|\partial_y W_0(y, s)|\leq 1 +\varepsilon^{\frac{1}{21}}. \end{equation} For the estimates of $\partial_y^{\mu}W_0$, together with Lemma \ref{lemA-3} for $u=\partial_y^{\mu}W_0$ and $d=1, p, q=\infty, r=2, j=\mu-1, m=\mu_0-1$, it comes from \eqref{ii-3}, \eqref{iv-6} and \eqref{vi-35} that for $2\leq \mu\leq 4$ and $\alpha =\frac{\mu-1}{\mu_0-\frac{1}{2}}\in (0, \frac{6}{11}]$, \begin{equation}\label{vi-36}
\|\partial_y^{\mu}W_0(\cdot, s)\|_{L^{\infty}}\leq M^{\frac{1}{20}}\|\partial_y^{\mu_0}W_0(\cdot, s)\|_{L^2}^{\alpha}\|\partial_y W_0(\cdot, s)\|_{L^{\infty}}^{1-\alpha} \leq 2^{1-\alpha}M^{\frac{1}{20}+\alpha}\leq M^{\frac{3}{5}}. \end{equation}
\section{Bootstrap estimates on good components of $W$}\label{vii}
In the section, we will apply the characteristics method to establish a series of estimates of $W_m\ (m\neq n)$.
\subsection{Framework for the characteristics method}\label{vii-a}
For $1\leq m\leq n-1$ and any point $(y_0, \zeta_0)\in\mathbb{R}\times [-\log\varepsilon, +\infty)$, we consider the following forward characteristics $y(\zeta):=y(\zeta; y_0, \zeta_0)$ of \eqref{ii-18} which starts from $(y_0, \zeta_0)$: \begin{equation}\label{vii1-0}\begin{cases} \dot{y}(\zeta)=\frac{3}{2}y(\zeta)+e^{\frac{\zeta}{2}}\beta_{\tau}(\mu_m(w)-\dot{\xi}(t))(y(\zeta), \zeta),\\ y(\zeta_0)=y_0. \end{cases} \end{equation} This yields that for $s\geq \zeta_0$, \begin{equation}\label{vii-1} y(\zeta)e^{-\frac{3}{2}\zeta}=y_0 e^{-\frac{3}{2}\zeta_0}+\int_{\zeta_0}^{\zeta}e^{-\alpha}\left(\beta_{\tau}(\cdot)(\mu_m(w)-\dot{\xi}(\cdot))\right)(y(\alpha), \alpha)d\alpha:=G_m(\zeta; y_0, \zeta_0). \end{equation}
Next we discuss the positions of $y(\zeta; y_0, \zeta_0)$ for the different cases of $(y_0, \zeta_0)$.
\begin{lem}\label{lem7-1} {\it For each $i_0\leq m\leq n-1$, $a_m:=\mu_n(0)>0$ is due to \eqref{i-7a} and the assumption $\mu_n(0)=0$ in Remark \ref{rem1-1}. Then for any point $(y_0, \zeta_0)\in\mathbb{R}\times [-\log\varepsilon, +\infty)$, $y(\zeta):=y(\zeta; y_0, \zeta_0)$
can be classified into the following six cases: \begin{enumerate}[{\bf Case} $1$.] \item When $y_0<-4a_m e^{\frac{\zeta_0}{2}}$,\ $y(\zeta)\leq -a_m e^{\frac{3}{2}\zeta-\zeta_0}<0$ holds for $\zeta\geq\zeta_0.$
\item When $ -\frac{a_m}{4}e^{\frac{\zeta_0}{2}}\leq y_0\leq 0$, there exists a number $\zeta^*\geq \zeta_0$ such that $G_m(\zeta^*; y_0, \zeta_0)=0$ and \begin{equation*}\begin{cases} -\frac{3}{2}a_m (e^{-\zeta}-e^{-\zeta^*}) e^{\frac{3}{2}\zeta}\leq y(\zeta)\leq -\frac{a_m}{2}(e^{-\zeta}-e^{-\zeta^*})e^{\frac{3}{2}\zeta}\leq 0 \quad\text{for $\zeta_0\leq \zeta\leq \zeta^*$},\\[2mm] y(\zeta)\geq \frac{a_m}{2}(e^{-\zeta^*}-e^{-\zeta})e^{\frac{3}{2}\zeta}\geq 0\quad\text{for $\zeta\geq \zeta^*$}. \end{cases} \end{equation*} \item When $y_0\geq 0$, one has $y(\zeta)\geq \frac{a_m}{2}(e^{-\zeta_0}-e^{-\zeta})e^{\frac{3}{2}\zeta}\geq 0$ \quad \text{for $\zeta\geq\zeta_0.$}
\item When $(y_0, \zeta_0)\in D^+$ and $(y(\zeta), \zeta)$ lies in the domain $D^+$, there holds \begin{equation*} -4a_m e^{\frac{\zeta}{2}}\leq y(\zeta)\leq -\frac{a_m}{4}e^{\frac{\zeta}{2}}<0\quad\text{for $\zeta\geq \zeta_0$}, \end{equation*} where $D^{+}=\{(y, \zeta): -4a_m e^{\frac{\zeta}{2}}\leq y\leq -\frac{a_m}{4}e^{\frac{\zeta}{2}},\ \zeta\geq -\log\varepsilon\}$.
\item When $(y_0, \zeta_0)\in D^+$ and the characteristics $y=y(\zeta)$ goes through $\partial D^+$ at some point $(\hat y, \hat \zeta)$ with $\hat y=-4a_m e^{\frac{\hat\zeta}{2}}$, we have \begin{equation*}\begin{cases} (y(\zeta), \zeta)\in D^+\quad\text{for $\zeta_0\leq \zeta\leq\hat\zeta$},\\ y(\zeta)\leq -a_m e^{\frac{3}{2}\zeta-\hat\zeta}<0\quad\text{for $\zeta\geq\hat\zeta$.} \end{cases} \end{equation*}
\item When $(y_0, \zeta_0)\in D^+$ and the characteristics $y=y(\zeta)$ goes through $\partial D^+$ at some point $(\hat y, \hat \zeta)$ with $\hat y=-\frac{a_m}{2} e^{\frac{\hat\zeta}{2}}$, there exists $\tilde{\zeta}>\hat\zeta$ such that $G_m(\tilde{\zeta}; y_0, \zeta_0)=0$ and $y=y(\zeta)$ can be divided into the three parts as: \begin{equation*}\begin{cases} (y(\zeta), \zeta)\in D^{+}\quad\text{for $\zeta_0\leq \zeta\leq \hat \zeta$},\\ -2a_m (e^{-\zeta}-e^{-\tilde{\zeta}})e^{\frac{3}{2}\zeta}\leq y(\zeta)\leq -\frac{a_m}{2}(e^{-\zeta}-e^{-\tilde{\zeta}}) e^{\frac{3}{2}\zeta}\leq 0\quad\text{for $\hat \zeta\leq \zeta\leq \tilde{\zeta}$},\\ y(\zeta)\geq\frac{a_m}{2}(e^{-\tilde{\zeta}}-e^{-\zeta})e^{\frac{3}{2}\zeta}\geq 0\quad\text{for $\zeta\geq \tilde{\zeta}$}. \end{cases} \end{equation*} \end{enumerate} } \end{lem}
\begin{proof} Since $a_m>0$ for $i_0\leq m\leq n-1$, by \eqref{ii-3}, \eqref{iv-1}, \eqref{iv-2a} and \eqref{iv-3a}, we then have \begin{equation}\label{vii1-1}
|\beta_{\tau}(\mu_m(w)-\dot{\xi}(t))-a_m|\leq M^3\varepsilon^{\frac{1}{3}}\leq \frac{a_m}{2}. \end{equation}
When $y_0<-4a_m e^{\frac{\zeta_0}{2}}$, it is derived from \eqref{vii-1} and \eqref{vii1-1} that \begin{equation}\label{vii1-2} y(\zeta)e^{-\frac{3}{2}\zeta}\leq -4a_m e^{-\zeta_0}+\frac{3}{2}a_m (e^{-\zeta_0}-e^{-\zeta})\leq -a_m e^{-\zeta_0}. \end{equation} This shows {\bf Case} 1.
When $-\frac{a_m}{4}e^{\frac{\zeta_0}{2}}\leq y_0\leq 0$, it follows from \eqref{vii1-1} that $G_m(\zeta; y_0, \zeta_0)$ in \eqref{vii-1} satisfies \begin{equation}\label{vii1-3}\begin{cases} G_m(\zeta_0; y_0, \zeta_0)=y_0 e^{-\frac{3}{2}\zeta_0}\leq 0,\\ G_m(+\infty; y_0, \zeta_0)\geq y_0 e^{-\frac{3}{2}\zeta_0}+\frac{a_m}{2}e^{-\zeta_0}\geq -\frac{a_m}{4}e^{-\zeta_0}+\frac{a_m}{2}e^{-\zeta_0}>0. \end{cases} \end{equation} Since $G_m(\zeta; y_0, \zeta_0)$ is a continuous function with respect to the variable $\zeta$, \eqref{vii1-3} shows that there exists $\zeta^*\geq \zeta_0$ such that $G_m(\zeta^*; y_0, \zeta_0)=0$. In this situation, we derive from \eqref{vii-1} that \begin{equation}\label{vii1-4} y(\zeta)e^{-\frac{3}{2}\zeta}=G_m(\zeta; y_0, \zeta_0)-G_m(\zeta^*; y_0, \zeta_0)=\int_{\zeta^*}^{\zeta}e^{-\alpha}\left(\beta_{\tau}(\cdot)(\mu_m(w)-\dot{\xi}(\cdot))\right)(y(\alpha), \alpha)d\alpha. \end{equation} Therefore, {\bf Case} 2 is obtained from \eqref{vii1-1}, \eqref{vii1-4} and $a_m>0$ for $i_0\leq m\leq n-1$.
Based on the results established in {\bf Case} 1-{\bf Case} 2, {\bf Case} 3-{\bf Case} 6 in Lemma \ref{lem7-1} can be carried out in the same way due to the formula \eqref{vii-1} and the definition of $D^{+}$, here we omit the details.\end{proof}
\begin{lem}\label{lem7-2} {\it For each $1\leq m\leq i_0-1$, $a_m:=\mu_m(0)<0$ is due to \eqref{i-7a} and the assumption $\mu_n(0)=0$ in Remarks \ref{rem1-1}. Then for any point $(y_0, \zeta_0)\in\mathbb{R}\times [-\log\varepsilon, +\infty)$, $y(\zeta)=y(\zeta; y_0, \zeta_0)$
can be classified into the following six cases: \begin{enumerate}[{\bf Case} $1$.] \item When $y_0>-4a_m e^{\frac{\zeta_0}{2}}$, $y(\zeta)\geq -a_m e^{\frac{3}{2}\zeta-\zeta_0}>0.$
\item When $ 0\leq y_0\leq -\frac{a_m}{4}e^{\frac{\zeta_0}{2}}$, there exists a number $\zeta^{*}\geq \zeta_0$ such that $G_m(\zeta^*; y_0, \zeta_0)=0$ and \begin{equation*}\begin{cases} 0\leq -\frac{a_m}{2}(e^{-\zeta}-e^{-\zeta^*})e^{\frac{3}{2}\zeta}\leq y(\zeta)\leq -\frac{3}{2}a_m (e^{-\zeta}-e^{-\zeta^*}) e^{\frac{3}{2}\zeta} \quad\text{for $\zeta_0\leq \zeta\leq \zeta^{*}$},\\[2mm] y(\zeta)\leq \frac{a_m}{2}(e^{-\zeta^*}-e^{-\zeta})\leq 0\quad\text{for $\zeta\geq \zeta^{*}$}. \end{cases} \end{equation*}
\item When $y_0\leq 0$, $y(\zeta)\leq \frac{a_m}{2}(e^{-\zeta_0}-e^{-\zeta})e^{\frac{3}{2}\zeta}\leq 0$ \quad\text{for $\zeta\geq \zeta_0.$}
\item When $(y_0, \zeta_0)\in D^-$ and the characteristics $(y(\zeta), \zeta)$ lies in $D^- $, one has \begin{equation*} 0<-\frac{a_m}{4} e^{\frac{\zeta}{2}}\leq y(\zeta)\leq -4a_m e^{\frac{\zeta}{2}}\quad\text{for $\zeta\geq \zeta_0$}, \end{equation*} where $D^{-}=\{(y, \zeta): -\frac{a_m}{4}e^{\frac{\zeta}{2}}\leq y\leq -4a_m e^{\frac{\zeta}{2}},\ \zeta\geq -\log\varepsilon\}$. \item When $(y_0, \zeta_0)\in D^-$ and the characteristics $(y(\zeta), \zeta)$ goes through $\partial D^-$ at some point $(\hat y, \hat \zeta)$ with $\hat y=-4 a_m e^{\frac{\hat\zeta}{2}}$, we have \begin{equation*}\begin{cases} (y(\zeta), \zeta)\in D^{-}\quad\text{for $\zeta_0\leq \zeta\leq \hat \zeta$},\\ y(\zeta)\geq -a_m e^{\frac{3}{2}\zeta-\hat \zeta}>0\quad\text{for $\zeta\geq \hat \zeta$}. \end{cases} \end{equation*}
\item When $(y_0, \zeta_0)\in D^-$ and the characteristics $(y(\zeta), \zeta)$ goes through $\partial D^-$ at some point $(\hat y, \hat \zeta)$ with $\hat y=-\frac{a_m}{2} e^{\frac{\hat\zeta}{2}}$, there exists $\tilde{\zeta}>\hat\zeta$ such that $y=y(\zeta)$ can be divided into the three parts as: \begin{equation*}\begin{cases} (y(\zeta), \zeta)\in D^{-}\quad\text{for $\zeta_0\leq \zeta\leq \hat \zeta$},\\ 0\leq -\frac{a_m}{2} (e^{-\zeta}-e^{-\tilde{\zeta}})e^{\frac{3}{2}\zeta}\leq y(\zeta)\leq -2a_m (e^{-\zeta}-e^{-\tilde{\zeta}})e^{\frac{3}{2}\zeta}\quad\text{for $\hat \zeta\leq \zeta\leq \tilde{\zeta}$},\\ y(\zeta)\leq\frac{a_m}{2}(e^{-\tilde{\zeta}}-e^{-\zeta})e^{\frac{3}{2}\zeta}\leq 0\quad\text{for $\zeta\geq \tilde{\zeta}$}. \end{cases} \end{equation*} \end{enumerate} } \end{lem} \begin{proof} Since the proof of Lemma \ref{lem7-2} is just the same as in Lemma \ref{lem7-1}, we omit the details here. \end{proof}
Based on Lemma \ref{lem7-1} and Lemma \ref{lem7-2}, we now establish the following results. \begin{lem}\label{lem7-3} {\it For $1\leq m\leq n-1$ and each forward characteristics $y(\zeta):=y(\zeta; y_0, \zeta_0)$ defined by \eqref{vii1-0}, when the function $\mathcal{D}(z, \zeta)$ satisfies that for some positive constant $c_0$, \begin{equation}\label{vii-2}
|\mathcal{D}(z, \zeta)|\leq c_0\eta^{-\frac{\kappa}{2}}(z)\ (0<\kappa<1), \end{equation} then \begin{equation}\label{vii-3}
\int_{\zeta_0}^{+\infty}|\mathcal{D}(y(\zeta), \zeta)|d\zeta\leq \frac{16 c_0}{\kappa (1-\kappa)|a_m|^{\kappa}}e^{-\frac{\kappa}{2}\zeta_0}. \end{equation}} \end{lem} \begin{proof} We only consider the {\bf Case} 2 in Lemma \ref{lem7-1}. The estimate \eqref{vii-3} for other {\bf Cases} in Lemma \ref{lem7-1} and Lemma \ref{lem7-2} can be done analogously. In the present situation, we choose $\zeta_{1}, \zeta_{2}\in [-\log\varepsilon, +\infty)$ such that
\begin{equation*}
e^{-\zeta_{1}}=\min\{2e^{-\zeta^{*}}, e^{-\zeta_0}\},\ e^{-\zeta_{2}}=\frac{1}{2}e^{-\zeta^{*}}.
\end{equation*} This implies $\zeta_0\leq \zeta_1\leq \zeta^*<\zeta_2$. Then it is derived from \eqref{vii-2} and {\bf Case} 2 in Lemma \ref{lem7-1} that
\begin{equation*}\label{vii-4}\begin{aligned}
&\int_{\zeta_{0}}^{+\infty}|\mathcal{D}(y(\zeta), \zeta)|d\zeta\\
\leq& \frac{2 c_0}{|a_m|^{\kappa}}\left(\int_{\zeta_{0}}^{\zeta_{1}}\frac{e^{-\frac{3}{2}\kappa \zeta}}{|e^{-\zeta}-e^{-\zeta^{*}}|^{\kappa}}d\zeta+\int_{\zeta_{1}}^{\zeta_{2}}\frac{e^{-\frac{3}{2}\kappa \zeta}}{|e^{-\zeta}-e^{-\zeta^{*}}|^{\kappa}}d\zeta+\int_{\zeta_{2}}^{+\infty}\frac{e^{-\frac{3}{2}\kappa \zeta}}{|e^{-\zeta}-e^{-\zeta^{*}}|^{\kappa}}ds\right)\\
\leq & \frac{2 c_0}{|a_m|^{\kappa}}\left(2\int_{\zeta_{0}}^{\zeta_{1}}e^{-\frac{\kappa}{2}\zeta}d\zeta+2 e^{\kappa \zeta^{*}}\int_{\zeta_{2}}^{+\infty}e^{-\frac{3}{2}\kappa \zeta}d\zeta+4 e^{-\frac{\kappa}{2}\zeta^{*}}\int_{\frac{1}{2}}^{2}|1-t|^{-\kappa}dt\right)\\
\leq& \frac{16 c_0}{\kappa (1-\kappa)|a_m|^{\kappa}}e^{-\frac{\kappa}{2}\zeta_{0}}.
\end{aligned}
\end{equation*} Therefore, the estimate \eqref{vii-3} holds for the case $i_0\leq m\leq n-1$ in \eqref{vii-1} and {\bf Case} 2 in Lemma \ref{lem7-1}.\end{proof}
\subsection{Auxiliary analysis}
As in \cite{PDL1}, we will apply the decomposition \eqref{ii-17} and the reduced system \eqref{ii-18}-\eqref{ii-19} to establish the related estimates for the good components of $W$. To this end, we first show the relation between $\partial_y^{\mu}W$ and $W_{\mu}^m$ ($1\leq m\leq n$).
Due to \eqref{i-71}, \eqref{ii-3}, \eqref{iv-1a}, \eqref{iv-2a} and \eqref{iv-3a}, one has $\ell_m^n(w)=0$
and $\|\ell_m(w)\|_{L^{\infty}}\leq 2$ for $1\leq m \leq n-1$. Combining this with \eqref{ii-17} yields \begin{equation}\label{vii-401}\begin{cases}
|W_{\mu}^m|=|\ell_m(w)\cdot \partial_y^{\mu}W|\leq 2\sum\limits_{j=1}^{n-1}|\partial_y^{\mu}W_j|\ (1\leq m\leq n-1),\\
|W_{\mu}^n|=|\ell_n(w)\cdot \partial_y^{\mu}W|\leq 2\sum\limits_{j=1}^{n}|\partial_y^{\mu}W_j|. \end{cases} \end{equation}
In addition, by $\|\gamma_j(w)\|=1$ for $1\leq j\leq n$ and $\gamma_k^n(w)=0$ for $1\leq k\leq n-1$, then it follows from \eqref{ii-17} that \begin{equation}\label{vii-402}\begin{cases}
|\partial_y^{\mu}W_j|=|\sum\limits_{m=1}^{n}W_{\mu}^m\gamma_m^j(w)|\leq \sum\limits_{k=1}^{n-1}|W_{\mu}^k|\ (1\leq j\leq n-1),\\
|\partial_y^{\mu}W_n|=|\sum\limits_{m=1}^{n}W_{\mu}^m\gamma_m^n(w)|\leq |W_{\mu}^n|. \end{cases} \end{equation}
\subsection{Bootstrap estimates on $W_j\ (j\neq n)$}\label{vii-b}
First, by \eqref{i-7}, \eqref{ii-9b}, \eqref{iv-1a}, \eqref{iv-2} and \eqref{iv-3a}-\eqref{iv-3b} with $\nu=\frac{1}{3}$, it is derived from \eqref{ii-18} and \eqref{ii-3} that \begin{equation}\label{vii-5}\begin{aligned} &\partial_s\gamma_{j}(w)+((\frac{3}{2}y-e^{\frac{s}{2}}\beta_{\tau}\dot{\xi}(t))I_{n} +e^{\frac{s}{2}}\beta_{\tau}A(w))\partial_y\gamma_{j}(w)\\ =&\frac{\partial\gamma_j(w)}{\partial w}\left(\partial_s W+(\frac{3}{2}y-e^{\frac{s}{2}}\beta_{\tau}\dot{\xi}(t))\partial_y W\right)+e^{\frac{s}{2}}\beta_{\tau}A(w)\frac{\partial\gamma_j(w)}{\partial w}\partial_y W\\ =&e^{\frac{s}{2}}\beta_{\tau}[A(w), \frac{\partial\gamma_j(w)}{\partial w}]\partial_y W \end{aligned} \end{equation} and \begin{equation}\label{vii3-1}
\left|e^{\frac{s}{2}}\beta_{\tau}[A(w), \frac{\partial\gamma_j(w)}{\partial w}]\partial_y W\right|\leq M^2 \eta^{-\frac{1}{3}}(y)(1-\delta_{j}^n), \end{equation} where $[A, B]=AB-BA$ for two $n\times n$ matrices $A$ and $B$.
For any $(y, s)\in \mathbb{R}\times [-\log\varepsilon, +\infty)$, the backward characteristics $y(\zeta):=y(\zeta; y, s)$ of \eqref{ii-18} which starts from $(y_0(y, s), -\log\varepsilon)$, is defined as \begin{equation}\label{vii-6}\begin{cases} \dot{y}(\zeta)=\frac{3}{2}y(\zeta)+e^{\frac{\zeta}{2}}\beta_{\tau}(\cdot)(\mu_m(w)-\dot{\xi}(\cdot))(y(\zeta), \zeta),\\ y(-\log\varepsilon)=y_0(y, s). \end{cases} \end{equation} Then it is derived from \eqref{ii-18} with $\mu=0$ and \eqref{vii-6} for $1\leq m\leq n-1$ that \begin{equation}\label{vii-8} W_{0}^m(y, s)=W_{0}^m(y_0(y, s), -\log\varepsilon)+\int_{-\log\varepsilon}^{s}\mathbb{F}_{0}^{m}(y(\zeta), \zeta)d\zeta, \end{equation} where $\mathbb{F}_{0}^m$ is given in \eqref{ii-18}, and it comes from $F_0=0$ in \eqref{ii-11a} and \eqref{vii-5}-\eqref{vii3-1} that \begin{equation}\label{vii-9}
|\mathbb{F}_0^m(y(\zeta), \zeta)|\leq M^3 \eta^{-\frac{1}{3}}(y(\zeta))\sum\limits_{j=1}^{n-1}\|W_{0}^j\|_{L^{\infty}}. \end{equation} In addition, by Lemma \ref{lem7-3} with $\kappa=\frac{2}{3}$ and \eqref{vii-8}-\eqref{vii-9}, we arrive at \begin{equation}\label{vii-10}\begin{aligned}
|W_{0}^m(y, s)|&\leq \|W_{0}^m(\cdot, -\log\varepsilon)\|_{L^{\infty}}
+M^4 \varepsilon^{\frac{1}{3}}\sum\limits_{j=1}^{n-1}\|W_{0}^j\|_{L^{\infty}}\\
&\leq \|W_{0}^m(\cdot, -\log\varepsilon)\|_{L^{\infty}}
+\varepsilon^{\frac{1}{4}}\sum\limits_{j=1}^{n-1}\|W_{0}^j\|_{L^{\infty}}. \end{aligned} \end{equation} On the other hand, due to the arbitrariness of $(y, s)$, summing $m$ in both sides of \eqref{vii-10} from $1$ to $n-1$ yields \begin{equation}\label{vii-11}
\sum\limits_{m=1}^{n-1}\|W_{0}^m\|_{L^{\infty}}\leq 2\sum\limits_{m=1}^{n-1}\|W_{0}^m(\cdot, -\log\varepsilon)\|_{L^{\infty}}. \end{equation} Then it follows from \eqref{vii-11}, \eqref{vii-401}-\eqref{vii-402} and \eqref{iii-8a} that \begin{equation}\label{vii-12}
|W_j(y, s)|\leq 4n\sum\limits_{k=1}^{n-1}\|W_k(\cdot,-\log\varepsilon)\|_{L^{\infty}}\leq 4n^2\varepsilon\ (1\leq j\leq n-1). \end{equation}
\subsection{Bootstrap estimates on $\partial_y W_j\ (j\neq n)$}\label{vii-c}
With the definition \eqref{vii-6}, it is derived from \eqref{ii-18} for $\mu=1$ and $1\leq m\leq n-1$ that \begin{equation}\label{vii-13} e^{\frac{3}{2}s}W_{1}^m(y, s)=\varepsilon^{-\frac{3}{2}}W_{1}^m(y_0(y, s), -\log\varepsilon)+\int_{-\log\varepsilon}^{s}e^{\frac{3}{2}\zeta}\mathbb{F}_1^m(y(\zeta), \zeta)d\zeta. \end{equation}
Since $a_{in}(w)=0$ and $\ell_i^n(w)=0$ for $1\leq i\leq n-1$ (see \eqref{i-7b} and \eqref{i-71}), it follows from \eqref{ii-18}, \eqref{vii-402}-\eqref{vii-5}, \eqref{iv-2}-\eqref{iv-3} and \eqref{ii-11a} with $\mu=1$ that \begin{equation}\label{vii-14}
|\mathbb{F}_1^m(y(\zeta), \zeta)|\leq M^3 \eta^{-\frac{1}{3}}(y(\zeta))\sum\limits_{j=1}^{n-1}|W_{1}^j|(y(\zeta), \zeta). \end{equation}
As in \eqref{vii-11}, combining \eqref{vii-13}-\eqref{vii-14} with Lemma \ref{lem7-3} yields that for $1\leq m\leq n-1$, \begin{equation}\label{vii-15}\begin{aligned}
e^{\frac{3}{2}s}|W_{1}^m|(y, s)&\leq \varepsilon^{-\frac{3}{2}}\|W_{1}^m(\cdot, -\log\varepsilon)\|_{L^{\infty}}+M^4\varepsilon^{\frac{1}{3}}\sum\limits_{j=1}^{n-1}\|e^{\frac{3}{2}\varsigma}W_{1}^j(z, \varsigma)\|_{L^{\infty}_{z, \varsigma}}\\
&\leq \varepsilon^{-\frac{3}{2}}\|W_{1}^m(\cdot, -\log\varepsilon)\|_{L^{\infty}}+\varepsilon^{\frac{1}{4}}\sum\limits_{j=1}^{n-1}\|e^{\frac{3}{2}\varsigma}W_{1}^j(z, \varsigma)\|_{L^{\infty}_{z, \varsigma}}. \end{aligned} \end{equation} Similarly to \eqref{vii-11}-\eqref{vii-12}, it is derived from \eqref{vii-15} and \eqref{iii-8b} with $\nu=0$ that \begin{equation}\label{vii-16}
|\partial_y W_j(y, s)|\leq 4n^2 e^{-\frac{3}{2}s}\ (1\leq j\leq n-1). \end{equation} In addition, as in \eqref{vi-36}, by \eqref{iv-6} and \eqref{vii-16}, we have that for $2\leq\mu\leq 4$ and $\alpha=\frac{\mu-1}{\mu_0-\frac{1}{2}}\in (0, \frac{6}{11}]$, \begin{equation}\begin{aligned}\label{vii-17}
\|\partial_y^{\mu}W_k(\cdot, s)\|_{L^{\infty}}&\leq M^{\frac{1}{20}}\|\partial_y^{\mu_0}W_k(\cdot, s)\|_{L^2}^{\alpha}\|\partial_y W_k(\cdot, s)\|_{L^{\infty}}^{1-\alpha}\\ &\leq (4n^2)^{1-\alpha} M^{\frac{1}{20}+\alpha} e^{-\frac{3}{2}s}\leq M^{\frac{3}{5}}e^{-\frac{3}{2}s}\ (1\leq k\leq n-1). \end{aligned} \end{equation}
\subsection{Weighted bootstrap estimates of the good components}\label{vii-d}
For $a=\max\limits_{1\leq i\leq n-1}\{5|\mu_m(0)|+1\}$, set the domain $D^0$ as \begin{equation}\label{vii-18}
D^0=\{(y, s): |y|<4ae^{\frac{s}{2}}, -\log\varepsilon\leq s<+\infty\}. \end{equation} Then $D^{\pm}\subset D^0$ for the domains $D^{\pm}$ defined in Lemma \ref{lem7-1} and Lemma \ref{lem7-2}.
When $(y, s)\in D^0$, it is derived from \eqref{vii-16}-\eqref{vii-17} that \begin{equation}\label{vii-19}\begin{cases}
\sum\limits_{j=1}^{n-1}|\partial_y W_j(y, s)|\leq 4n^2 e^{-\frac{3}{2}s}\leq M^{\frac{1}{5}} e^{(\nu-\frac{3}{2})s}\eta^{-\nu}(y)\ (0\leq \nu\leq \frac{1}{3}]),\\[4mm]
\sum\limits_{j=1}^{n-1}|\partial_y^2 W_j(y, s)|\leq M^{\frac{2}{5}} e^{-\frac{3}{2}s}\leq M^{\frac{3}{5}}e^{-\frac{7}{6}s}\eta^{-\frac{1}{3}}(y). \end{cases} \end{equation}
Next we derive the weighted estimates on the good components of $W$ when $(y, s)\notin D^0$. In this situation, as in \eqref{vii-6}, for each $1\leq m\leq n-1$, the backward characteristics $y(\zeta):=y(\zeta; y, s)$ of \eqref{ii-19} which starts from $(y_0(y, s), \zeta_0)\notin D^0$ is defined as \begin{equation}\label{vii-20}\begin{cases} \dot{y}(\zeta)=\frac{3}{2}y(\zeta)+e^{\frac{\zeta}{2}}\beta_{\tau}(\mu_m(w)-\dot{\xi}(t))(y(\zeta), \zeta),\ \zeta_0\leq\zeta\leq s,\\ y(\zeta_0)=y_0(y, s), \end{cases} \end{equation} where either $\zeta_0=-\log\varepsilon$ or $(y_0(y, s),\zeta_0)\in\partial D^0$.
Note that $y(\zeta)$ in \eqref{vii-20} has the following expression \begin{equation}\label{vii-21} y(\zeta)e^{-\frac{3}{2}\zeta}=y_0(y, s)e^{-\frac{3}{2}\zeta_0}+\int_{\zeta_0}^{\zeta}e^{-\alpha}\beta_{\tau}(\mu_m(w)-\dot{\xi}(t))(y(\alpha), \alpha)d\alpha. \end{equation}
It follows from \eqref{vii1-1} and \eqref{vii-21} with $|y_0(y, s)|\geq 4a e^{\frac{\zeta_0}{2}}$ that for $\zeta_0\leq \zeta\leq s$, \begin{equation}\label{vii-22}
|y(\zeta)|\geq e^{\frac{3}{2}\zeta}(4ae^{-\zeta_0}-2|a_m|(e^{-\zeta_0}-e^{-\zeta}))\geq e^{\frac{3}{2}\zeta-\zeta_0}. \end{equation}
Based on the definition of $y(\zeta)=y(\zeta; y, s)$ in \eqref{vii-20}, we derive from \eqref{ii-19} that \begin{equation}\label{vii5-1}\begin{aligned} \left[\eta^{\nu}W_{\mu}^m\right](y, s)&=[\eta^{\nu} W_{\mu}^m](y_0, \zeta_0)\exp(-\int_{\zeta_0}^{s}\mathbb{D}_{\mu,\nu}^m(y(\alpha), \alpha)d\alpha)\\ &+\int_{\zeta_0}^{s}[\eta^{\nu}\mathbb{F}_{\mu}^{m}(y(\zeta), \zeta) \exp(-\int_{\zeta}^{s}\mathbb{D}_{\mu,\nu}^m(y(\alpha), \alpha)d\alpha))d\zeta \end{aligned} \end{equation} and \begin{equation}\label{vii-23}\begin{aligned}
\mathbb{D}_{\mu, \nu}^m(y(\zeta), \zeta)&=\frac{3\mu}{2}-3\nu+\frac{3\nu}{1+y^2}-\frac{2\nu y}{1+y^2}e^{\frac{s}{2}}\beta_{\tau}(\mu_m(w)-\dot{\xi}(t))\bigl|_{(y, s)=(y(\zeta), \zeta)}\\ &:=\frac{3\mu}{2}-3\nu+\mathbb{D}_{\nu}^{m}(y(\zeta), \zeta). \end{aligned} \end{equation} It is derived from \eqref{vii-22}, \eqref{vii-23} and \eqref{vii1-1} that \begin{equation}\label{vii-25}
\int_{\zeta_0}^{+\infty}|\mathbb{D}_{\nu}^{m}|(y(\zeta), \zeta)d\zeta\leq 6|\nu|a\int_{\zeta_0}^{+\infty}
e^{\zeta_0-\zeta}d\zeta\leq 6|\nu| a. \end{equation} Due to \eqref{vii5-1} and \eqref{vii-25}, when $\frac{3\mu}{2}-3\nu>0$, we obtain \begin{equation}\label{vii-26}\begin{aligned}
&e^{(\frac{3\mu}{2}-3\nu)s}\eta^{\nu}(y)|W_{\mu}^{m}(y, s)|\leq e^{6|\nu|a}\biggl( e^{(\frac{3\mu}{2}-3\nu)\zeta_0}\eta^{\nu}(y_0(y, s))|W_{\mu}^{m}(y_0(y, s), \zeta_0)|\\
&\qquad\qquad\qquad+\int_{\zeta_0}^{s}e^{(\frac{3\mu}{2}-3\nu)\zeta}\eta^{\nu}(y(\zeta))|\mathbb{F}_{\mu}^m|(y(\zeta), \zeta)d\zeta\biggr) \end{aligned} \end{equation} and \begin{equation}\label{vii-27}\begin{aligned}
&e^{(\frac{7}{6}-\nu^+)s}\eta^{\frac{1}{3}}(y)|W_{2}^m(y, s)|\leq e^{2a}\biggl(e^{(\frac{7}{6}-\nu^+)\zeta_0}|\eta^{\frac{1}{3}}(y_0(y, s))|W_{2}^m(y_0(y, s), \zeta_0)|\\
&\qquad\qquad\qquad+\int_{\zeta_0}^{s}e^{(\frac{7}{6}-\nu^+)\zeta}\eta^{\frac{1}{3}}(y(\zeta))|\mathbb{F}_2^m|(y(\zeta), \zeta)dl\biggr)\ (0\leq\nu^+<\frac{7}{6}). \end{aligned} \end{equation} Here we point out that the factor $\frac{7}{6}-\nu^+$ appeared in \eqref{vii-27} for $0\leq \nu^+<\frac{7}{6}$ is due to \eqref{vii-19}.
With respect to $\mathbb{F}_2^m$, similarly to the argument for \eqref{vii-14}, it is derived from \eqref{ii-11a}, \eqref{ii-18}, \eqref{vii-401}, \eqref{vii-16} and \eqref{iv-2}-\eqref{iv-3} that \begin{equation}\label{vii-28}
|\mathbb{F}_2^m(y(\zeta), \zeta)|\leq M^3 (e^{-\zeta}+\eta^{-\frac{1}{3}}(y(\zeta)))\sum\limits_{j=1}^{n-1}(|W_2^j(y(\zeta), \zeta)|
+|W_1^j(y(\zeta), \zeta)|). \end{equation}
For $\mu=1$, $0\leq \nu\leq \frac{1}{3}$ and $1\leq m\leq n-1$ in \eqref{vii-26}, we obtain from \eqref{iii-8b}, \eqref{vii-402}, \eqref{vii-14}, \eqref{vii-19}, \eqref{vii-20} and Lemma \ref{lem7-3} that \begin{equation*}\label{vii-29}\begin{aligned}
e^{(\frac{3}{2}-3\nu)s}\eta^{\nu}(y)|W_1^m(y, s)|&\leq e^{2a}\left(M^{\frac{2}{5}}+M^3(\varepsilon+\varepsilon^{\frac{1}{3}})\sum\limits_{j=1}^{n-1}\|W_1^j(z, \tau)\|_{L^{\infty}(\{|z|\geq 4a e^{\frac{\tau}{2}}\})}\right)\\
&\leq M^{\frac{2}{5}}e^{2a}+\varepsilon^{\frac{1}{4}}\sum\limits_{j=1}^{n-1}\|W_1^j(z, \tau)\|_{L^{\infty}(\{|z|\geq 4a e^{\frac{\tau}{2}}\})}. \end{aligned} \end{equation*} Combining this with \eqref{vii-402} shows that for $1\leq j\leq n-1$, \begin{equation}\label{vii5-2}
|\partial_y W_j(y, s)|\leq \sum\limits_{m-1}^{n-1}|W_1^m(y, s)|\leq M^{\frac{3}{5}}e^{(3\nu-\frac{3}{2})s}\eta^{-\nu}(y)\ (0\leq \nu\leq \frac{1}{3}). \end{equation}
For the estimates of $\partial_y^2 W_m$ with $1\leq m\leq n-1$, it follows from \eqref{iii-8c}, \eqref{vii-401}, \eqref{vii-19}, \eqref{vii-27}-\eqref{vii5-2} and Lemma \ref{lem7-3} that \begin{equation*}\label{vii-30}\begin{aligned}
&e^{(\frac{7}{6}-\nu^+)s}\eta^{\frac{1}{3}}(y)|W_2^m(y, s)|\leq 2e^{2a}M^{\frac{3}{5}}\varepsilon^{\nu^+}+M^{5}(\varepsilon+\varepsilon^{\frac{1}{3}})\\
&\qquad\qquad+M^4(\varepsilon+\varepsilon^{\frac{1}{3}})\sum\limits_{j=1}^{n-1}\|e^{(\frac{7}{6}-\nu^+)\tau}\eta^{\frac{1}{3}}(z)W_2^j(z, \tau)\|_{L^{\infty}(\{|z|\geq 4ae^{\frac{\tau}{2}}\})}\ (0<\nu^+<\frac{7}{6}). \end{aligned} \end{equation*} Together with \eqref{vii-402}, this yields that for $1\leq j\leq n-1$, \begin{equation}\label{vii-30}
|\partial_y^2 W_j(y, s)|\leq \sum\limits_{m=1}^{n-1}|W_2^m(y, s)|\leq M^{\frac{4}{5}}e^{(\nu^+-\frac{7}{6})s}\eta^{-\frac{1}{3}}(y)\ (0<\nu^+<\frac{7}{6}). \end{equation}
\section{Bootstrap estimates on the modulation variables}\label{viii}
For $\dot{\kappa}(t)$ and $\dot{\tau}(t)$, it follows from \eqref{i-7}, \eqref{ii-3}, \eqref{ii-12a}, \eqref{ii-12c}, \eqref{iii-6} and \eqref{iv-1}-\eqref{iv-3} that \begin{equation}\label{viii-2}\begin{aligned}
|\dot{\kappa}(t)|&\leq e^{s}|\mu_n(w^0)-\dot{\xi}(t)|+e^{\frac{3s}{2}}\sum\limits_{j\neq n}|a_{nj}(w^0)||(\partial_y W_j)^0|\\
&\leq e^{s}|(\partial_y^2 \mu_n(w))^0|+e^{\frac{3s}{2}}\sum\limits_{k=0, 2}\sum\limits_{j\neq n}|(\partial_y^k(a_{nj}(w)\partial_y W_j))^0|\\
&\leq |(\partial_{w_n w_n}(\mu_n(w)))^0|+M\varepsilon^{\frac{1}{3}}\leq M^{\frac{1}{4}}\leq M^{\frac{1}{2}} \end{aligned} \end{equation} and \begin{equation}\label{viii-3}\begin{aligned}
|\dot{\tau}(t)|&\leq |1-(\partial_{w_n}\mu_n(w))^0|+M^2 e^{-\frac{s}{2}}\leq M^2\varepsilon^{\frac{1}{2}}\leq \varepsilon^{\frac{1}{3}}\leq 2\varepsilon^{\frac{1}{3}}. \end{aligned} \end{equation} By the coordinate transformation \eqref{ii-2}, one has $t=t(s)$ and \begin{equation}\label{viii-4} \frac{d}{ds}(\kappa, \tau, \xi)(t(s))=(\dot{\kappa}(t), \dot{\tau}(t), \dot{\xi}(t))\frac{e^{-s}}{\beta_{\tau}}. \end{equation} Combining \eqref{viii-4} with \eqref{viii-2}-\eqref{viii-3} and \eqref{iii-14} shows \begin{equation}\label{viii-5}
|\kappa(t)-\kappa_0\varepsilon^{\frac{1}{3}}|=|\int_{-\log\varepsilon}^{s}(\dot{\kappa}(t)\frac{e^{-s}}{\beta_{\tau}})ds|\leq |\kappa_0\varepsilon|+2M^{\frac{1}{4}}\varepsilon\leq M^{\frac{1}{2}} \varepsilon \end{equation} and \begin{equation}\label{viii-51}
|\tau(t)|=|\tau(-\varepsilon)+\int_{-\log\varepsilon}^{s}(\dot{\tau}(t)\frac{e^{-s}}{\beta_{\tau}})ds|\leq 2\varepsilon^{\frac{4}{3}}. \end{equation}
With respect to $\xi(t)$, by \eqref{i-7}, \eqref{ii-3}, \eqref{iii-6}, \eqref{vii-12}, \eqref{vii-16}-\eqref{vii-17} and \eqref{viii-3}-\eqref{viii-5}, then $\xi(t)$ in \eqref{ii-12c} satisfies \begin{equation}\label{viii-1}
|\dot{\xi}(t)|\leq |\mu_{n}(w^0)|+\frac{1}{6}|(\partial^2 \mu_n(w))^0|+\frac{1}{6}\sum\limits_{j\neq n}e^{\frac{s}{2}}|(\partial^2(a_{nj}(w)\partial_y W_j ))^0|\leq M^{\frac{3}{4}}\varepsilon\leq 2M^{\frac{3}{4}}\varepsilon \end{equation} and \begin{equation}\label{viii-11}
|\xi(t)|=\left|\xi(-\varepsilon)+\int_{-\log\varepsilon}^{s}(\dot{\xi}(t)\frac{e^{-s}}{\beta_{\tau}})ds\right|\leq 2M^{\frac{3}{4}}\varepsilon^2. \end{equation}
\section{Weighted energy estimates}\label{v}
In the section, we establish the spatial $L^2-$energy estimates of $\partial_y^{\mu_0}W$ when $\mu_0$ satisfies \eqref{iv-50}.
\begin{thm}\label{thm8-1} {\it For $\mu_0$ satisfying \eqref{iv-50}, under the assumptions \eqref{iv-1}-\eqref{iv-3} and \eqref{iv-6}, one has \begin{equation}\label{v-1}
\sum\limits_{m=1}^{n-1}\|\partial_y^{\mu_0}W_m(\cdot, s)\|_{L^2(\mathbb{R})}\leq M^{\frac{1}{2}} e^{-\frac{3}{2}s},\ \|\partial_y^{\mu_0}W_{n}(\cdot, s)\|_{L^2(\mathbb{R})}\leq M^{\frac{1}{2}} e^{-\frac{s}{2}}. \end{equation}} \end{thm}
Based on the expansion \eqref{ii-17} and the assumption \eqref{iv-6}, we have from \eqref{vii-401} and \eqref{v-1} that \begin{equation}\label{v-2}
\sum\limits_{m=1}^{n-1}\|W_{\mu_0}^m(\cdot, s)\|_{L^2(\mathbb{R})}\leq 2nM e^{-\frac{3}{2}s},\ \|W_{\mu_0}^n(\cdot, s)\|_{L^2(\mathbb{R})}\leq 2M e^{-\frac{s}{2}}. \end{equation}
\subsection{Framework for energy estimates} To prove Theorem \ref{thm8-1}, we first establish the following framework for energy estimates:
\begin{lem}\label{lem8-1} With $\mu_0$ satisfying \eqref{iv-50}, under the assumption \eqref{v-2} (or see \eqref{iv-6}), one has \begin{enumerate}[(1)] \item When $1\leq m\leq n-1$, for any Lipschitz continuous function $q_m(y)$ and with the notation \begin{equation}\label{v-3} Q_m(y, s)=-(\frac{3}{2}y+e^{\frac{s}{2}}\beta_{\tau}(\mu_m(w)-\dot{\xi}(t)))q_m'(y), \end{equation} then \begin{equation}\label{v-4}\begin{aligned}
&\frac{d}{ds}\int_{\mathbb{R}}e^{q_m(y)}|W_{\mu_0}^m|^2(y, s)dy+\int_{\mathbb{R}}(3\mu_0-\frac{5}{2}-e^{\frac{s}{2}}\beta_{\tau}\partial_y\mu_m(w)+Q_{m})e^{q_m(y)}|W_{\mu_0}^m|^2(y, s)dy\\
\leq& \int_{\mathbb{R}}e^{q_m(y)}|\mathbb{F}_{\mu_0}^m|^2(y, s)dy. \end{aligned} \end{equation} \item For $W_{\mu_0}^n$,
\begin{equation}\label{v-5}\frac{d}{ds}\int_{\mathbb{R}}|W_{\mu_0}^n|^2(y, s)dy+\int_{\mathbb{R}}(2\mu_0-\frac{5}{2}-e^{\frac{s}{2}}\beta_{\tau}\partial_y\mu_n(w))|W_{\mu_0}^n|^2(y, s)dy\leq \frac{1}{\mu_0+1}\int_{\mathbb{R}}|\mathbb{F}_{\mu_0}^n|^2(y, s)dy. \end{equation} \end{enumerate} \end{lem}
\begin{proof} For $1\leq m\leq n-1$, multiplying both sides of \eqref{ii-18} with $\mu=\mu_0$ by $2e^{q_m(y)}W_{\mu_0}^m$ and integrating on $\mathbb{R}$ yield \begin{equation}\label{v-6}\begin{aligned}
&\frac{d}{ds}\int_{\mathbb{R}}e^{q_m(y)}|W_{\mu_0}^m|^2(y, s) dy+\int_{\mathbb{R}}(3\mu_0-\frac{3}{2}-e^{\frac{s}{2}}\beta_{\tau}\partial_y\mu_m(w)+Q_m)e^{q_m(y)}|W_{\mu_0}^m|^2(y, s) dy\\ =&2\int_{\mathbb{R}}e^{q_m(y)}(\mathbb{F}_{\mu_0}^m\cdot W_{\mu_0}^m)(y, s) dy\\
\leq &\int_{\mathbb{R}}e^{q_m(y)}|W_{\mu_0}^m|^2(y, s) dy+\int_{\mathbb{R}}e^{q_m(y)}|\mathbb{F}_{\mu_0}^m|^2(y, s) dy. \end{aligned} \end{equation}
Then \eqref{v-4} comes from \eqref{v-3} and \eqref{v-6}. The estimate \eqref{v-5}
can be obtained by a standard energy estimate associated with the equation \eqref{ii-18} for $m=n$ and $\mu=\mu_0$. \end{proof}
Next we analyze the structure of $\mathbb{F}_{\mu_0}^m$. \begin{lem}\label{lem8-2} For $\mu_0$ satisfying \eqref{iv-50} and $1\leq m\leq n-1$, then $\mathbb{F}_{\mu_0}^m$ in \eqref{ii-18} satisfies \begin{equation}\label{v-7}\begin{aligned}
\int_{\mathbb{R}}|\mathbb{F}_{\mu_0}^m|^2 (y, s) dy&
\leq 4nM^{-\frac{1}{16}}\sum\limits_{j=1}^{n-1}\int_{\mathbb{R}}|W_{\mu_0}^j|^2(y, s)dy+2nM^{\frac{1}{16}}\sum\limits_{j=1}^{n-1}\int_{\mathbb{R}}|\eta^{-\frac{1}{3}}(y)W_{\mu_0}^j|^2(y, s)dy\\
&+2n M^{\frac{1}{4}}e^{-2s}\int_{\mathbb{R}}|W_{\mu_0}^n|^2(y, s)dy+e^{-(3+\frac{1}{4})s}. \end{aligned} \end{equation} \end{lem}
\begin{proof} First, due to \eqref{ii-3}, \eqref{vi2-4}, \eqref{vi-35}, \eqref{vii-12}, \eqref{vii-16} and \eqref{viii-5}, the estimate in \eqref{vii3-1} can be improved as \begin{equation}\label{v-72}
|e^{\frac{s}{2}}\beta_{\tau}[A(w), \frac{\partial\gamma_j(w)}{\partial w}]\partial_y W|\leq M^{\frac{1}{64}}\eta^{-\frac{1}{3}}(y)(1-\delta_j^n). \end{equation} Thus, we can obtain from \eqref{ii-18}, \eqref{vii-5} and \eqref{v-72} that \begin{equation}\label{v-71}\begin{aligned}
\int_{\mathbb{R}}|\mathbb{F}_{\mu_0}^m|^2(y, s)dy&\leq M^{\frac{1}{16}}e^{-s}\sum\limits_{j=1}^{n-1}\int_{\mathbb{R}}|W_{\mu_0}^j|^2(y, s)dy+M^{\frac{1}{16}}\sum\limits_{j=1}^{n-1}\int_{\mathbb{R}}|\eta^{-\frac{1}{3}}(y)W_{\mu_0}^j|^2(y, s)dy\\
&+2\int_{\mathbb{R}}|\ell_m\cdot F_{\mu_0}|^2(y, s)dy. \end{aligned} \end{equation} Due to $\ell_k^n(w)=0$ and $a_{kn}(w)=0$ ($1\leq k\leq n-1$) by \eqref{i-7}-\eqref{i-71}, for $F_{\mu_0}$ in \eqref{ii-11a}, one then has \begin{equation}\label{v-8}\begin{aligned}
|\ell_m\cdot F_{\mu_0}|
\leq& \sum\limits_{1\leq \beta\leq \mu_0} 2C_{\mu_0}^{\beta}e^{\frac{s}{2}}|\ell_m\cdot \partial_y^{\beta}A(w)\partial_y^{\mu_0+1-\beta} W|\\
\leq&\sum\limits_{1\leq\beta\leq \mu_0}\sum\limits_{1\leq q\leq \mu_0-\beta+1}2 C_{\mu_0}^{\beta}\|\partial_w^q A(w)\|_{L^{\infty}}e^{\frac{s}{2}}\sum\limits_{k=1}^{n-1}I_{\beta q k}\\ \leq&\left(M^{\frac{1}{16}}e^{s}\sum\limits_{1\leq\beta\leq\mu_0}\sum\limits_{1\leq q\leq \mu_0-\beta+1}\sum\limits_{1\leq k\leq n-1}I_{\beta q k}^2\right)^{1/2}, \end{aligned} \end{equation} where the last inequality comes from \eqref{ii-3}, \eqref{iv-1}-\eqref{iv-3}, and $I_{\beta q k}$ satisfies \begin{equation}\label{v-9}
I_{\beta q k}=\sum\limits_{\gamma_1+\cdots+\gamma_q=\mu_0-\beta+1,\ \gamma_j\geq 1\ (1\leq j\leq q)}|\partial_{y}^{\gamma_1} W|\cdots|\partial_y^{\gamma_q}W|\cdot|\partial_y^{\beta}W_k|. \end{equation} Note that the estimates for ${I_{\beta q k}}'s$ in \eqref{v-9} are taken in Lemma \ref{lemA-4} for $1\leq k\leq n-1$. Substituting the three type estimates in \eqref{A2-3} into \eqref{v-8} shows \begin{equation}\label{v-16}\begin{aligned}
&\int_{\mathbb{R}}|\ell_m\cdot F_{\mu_0}|^2(y, s)dy\\
\leq &2nM^{\frac{1}{16}}\sum\limits_{j=1}^{n-1}\int_{\mathbb{R}}|\eta^{-\frac{1}{3}}(y)W_{\mu_0}^j|^2(y, s)dy+2nM^{-\frac{1}{16}}\sum\limits_{j=1}^{n-1}\int_{\mathbb{R}}|W_{\mu_0}^j|^2(y, s)dy\\
&+4nM^{\frac{3}{8}}e^{-2s}\int_{\mathbb{R}^n}|W_{\mu_0}^n|^2(y, s)dy+M^{3\mu_0+2}e^{-(3+\frac{1}{2})s}\ (1\leq m\leq n-1). \end{aligned} \end{equation} Therefore, \eqref{v-7} follows from \eqref{v-71}-\eqref{v-8} and \eqref{v-16}, and then the proof of Lemma \ref{lem8-2} is finished.\end{proof}
\begin{lem}\label{lem8-3} {\it Let $\mu_0$ satisfy \eqref{iv-50}, then for $\mathbb{F}_{\mu_0}^n$ in \eqref{ii-18}, one has \begin{equation}\label{v-17}
\int_{\mathbb{R}}|\mathbb{F}_{\mu_0}^n|^2(y, s)dy\leq \frac{102}{100}(\mu_0+1)^2\int_{\mathbb{R}}|W_{\mu_0}^n|^2 (y, s)dy+M^{\frac{1}{2}}e^{-s}. \end{equation}} \end{lem}
\begin{proof} Note that under the assumptions \eqref{i-7}-\eqref{i-71}, $\ell_n(w)$ has the decomposition \begin{equation}\label{v-18} \ell_n(w)={\bf e}_n^{\top}+\sum\limits_{j=1}^{n-1}c_j(w)\ell_j(w), \end{equation} where $c_j(w)=-{\bf e}_n^{\top}\cdot\gamma_j(w)\ (1\leq j\leq n-1)$.
Combining \eqref{v-18} with $\gamma_n(w)={\bf e}_n$ in \eqref{i-71}, \eqref{ii-11}, \eqref{ii-18} shows \begin{equation}\label{v-19}\begin{aligned}
|\mathbb{F}_{\mu_0}^n|=&|\ell_n\cdot F_{\mu_0}|
\leq \sum\limits_{j=1}^{n-1}|c_j(w)\ell_j\cdot F_{\mu_0}|+|{\bf e}_n^{\top}\cdot F_{\mu_0}|\\
\leq& \sum\limits_{j=1}^{n-1}|c_j(w)\ell_j\cdot F_{\mu_0}|+e^{\frac{s}{2}}\beta_{\tau}\sum\limits_{1\leq\beta\leq\mu_0}C_{\mu_0}^{\beta}|\partial_y^{\beta}\mu_{n}(w)
\partial_y^{\mu_0-\beta+1}W_n|\\
&+e^{\frac{s}{2}}\beta_{\tau}\sum\limits_{j=1}^{n-1}\sum\limits_{1\leq\beta\leq\mu_0}|\partial_y^{\beta}(a_{nj}(w))
\partial_y^{\mu_0-\beta+1}W_j|:=\sum\limits_{i=1}^{5}J_i, \end{aligned} \end{equation} where \begin{equation*}\begin{cases}
J_1=e^{\frac{s}{2}}\beta_{\tau}(\mu_0+1)|\partial_y W_n\partial_y^{\mu_0}W_n|,\\
J_2=e^{\frac{s}{2}}\beta_{\tau}\sum\limits_{2\leq\beta\leq\mu_0-1}C_{\mu_0}^{\beta}|\partial_y^{\beta}W_n\partial_y^{\mu_0-\beta}W_n|,\\
J_3=\sum\limits_{j=1}^{n-1}|c_j(w)\ell_j\cdot F_{\mu_0}|,\\
J_4=e^{\frac{s}{2}}\beta_{\tau}\sum\limits_{1\leq\beta\leq\mu_0}C_{\mu_0}^{\beta}|\partial_y^{\beta}(\mu_{n}(w)-W_n)\partial_y^{\mu_0-\beta+1}W_n|,\\
J_5=e^{\frac{s}{2}}\beta_{\tau}\sum\limits_{j=1}^{n-1}\sum\limits_{1\leq\beta\leq\mu_0}|\partial_y^{\beta}(a_{nj}(w))\partial_y^{\mu_0-\beta+1}W_j|. \end{cases} \end{equation*} It is derived from \eqref{v-19} that \begin{equation}\label{v-20}
\int_{\mathbb{R}}|\mathbb{F}_{\mu_0}^n|^2(y, s)dy\leq \frac{101}{100}\int_{\mathbb{R}}J_1^2(y, s)dy+M^{\frac{1}{32}}\sum\limits_{k=2}^{5}\int_{\mathbb{R}}J_k^2(y, s)dy. \end{equation} In addition, it follows from \eqref{ii-3}, \eqref{vi-35} and \eqref{viii-3} that \begin{equation}\label{v-21}
\int_{\mathbb{R}}J_1^2(y, s)dy\leq (1+\varepsilon^{\frac{1}{40}})(\mu_0+1)^2\int_{\mathbb{R}}|\partial_y^{\mu_0}W_{n}|^2 (y, s)dy. \end{equation}
For $J_2$, by H\"older inequality, \eqref{ii-3} and \eqref{vi-35}, we have \begin{equation}\label{v-22}\begin{aligned}
\int_{\mathbb{R}}J_2^2(y, s)dy&\leq M^{\frac{1}{8(\mu_0-1)}}\int_{\mathbb{R}}|\partial_y^{\mu_0-1}W_n|^2(y, s)ds\\
&+M^{\frac{1}{16(\mu_0-1)}}e^{s}\sum\limits_{1<\beta<\mu_0-2}\|\partial_y^{\beta}W_n(\cdot, s)\|_{L^{\frac{2(\mu_0-2)}{\beta-1}}}^2\|\partial_y^{\mu_0-\beta}W_n(\cdot, s)\|_{L^{\frac{2(\mu_0-2)}{\mu_0-\beta-1}}}^2. \end{aligned} \end{equation} Note that by Lemma \ref{lemA-3}, \begin{equation*}\begin{aligned}
&\|\partial_y^{\beta}W_n(\cdot, s)\|_{L^{\frac{2(\mu_0-2)}{\beta-1}}}\leq M^{\frac{1}{64(\mu_0-1)}}\|\partial_y W_n(\cdot, s)\|_{L^{\infty}}^{\frac{\mu_0-\beta-1}{\mu_0-2}}\|\partial_y^{\mu_0-1}W_n(\cdot, s)\|_{L^2}^{\frac{\beta-1}{\mu_0-2}},\\
&\|\partial_y^{\mu_0-\beta}W_n(\cdot, s)\|_{L^{\frac{2(\mu_0-2)}{\mu_0-\beta-1}}}\leq M^{\frac{1}{64(\mu_0-1)}}\|\partial_y W_n(\cdot, s)\|_{L^{\infty}}^{\frac{\beta-1}{\mu_0-2}}\|\partial_y^{\mu_0-1}W_n(\cdot, s)\|_{L^{2}}^{\frac{\mu_0-\beta-1}{\mu_0-2}}. \end{aligned} \end{equation*} Then combining these two estimates with \eqref{ii-3}, \eqref{vi-35}, \eqref{v-22} and Lemma \ref{lemA-3} yields \begin{equation}\label{v-221}\begin{aligned}
\int_{\mathbb{R}}J_2^2(y, s)dy&\leq 2M^{\frac{1}{8(\mu_0-1)}}\int_{\mathbb{R}}|\partial_y^{\mu_0-1}W_n|^2(y, s)ds\\
&\leq M^{\frac{1}{4}(\mu_0-1)}\|\partial_y W_n(\cdot, s)\|_{L^{2}}^{\frac{2}{\mu_0-1}}\|\partial_y^{\mu_0}W_n(\cdot, s)\|_{L^2}^{2\frac{\mu_0-2}{\mu_0-1}}\\
&\leq M^{-\frac{1}{8(\mu_0-2)}}\int_{\mathbb{R}}|\partial_y^{\mu_0}W_n|^2(y, s)dy+2M^{\frac{3}{8}}e^{-s}. \end{aligned} \end{equation}
For $J_3$, due to $\gamma_j(0)={\bf e}_j\ (1\leq j\leq n-1)$ in \eqref{i-71}, one then derives from \eqref{ii-3} and \eqref{iv-1}-\eqref{iv-3} that \begin{equation}\label{v-222}
|c_j(w)|\leq M\|W(\cdot, s)\|_{L^{\infty}}\leq M^4\varepsilon^{\frac{1}{3}}\leq \varepsilon^{\frac{1}{4}}\ (1\leq j\leq n-1). \end{equation} On the other hand, it follows from \eqref{v-2}, \eqref{v-16} and \eqref{v-222} that \begin{equation}\label{v-23} \int_{\mathbb{R}}J_3^2(y, s)dy\leq \varepsilon^{\frac{1}{6}} e^{-3s}. \end{equation} With respect to $J_4$, one has from \eqref{v-19} that \begin{equation}\label{w-1}\begin{aligned}
J_4=&e^{\frac{s}{2}}\beta_{\tau}\sum\limits_{1\leq \beta\leq \mu_0}C_{\mu_0}^{\beta}|\partial_y^{\beta}(\mu_n(w)-W_n)\partial_y^{\mu_0-\beta+1}W_n|\\
\leq&e^{\frac{s}{2}}\beta_{\tau}\sum\limits_{1\leq\beta\leq\mu_0}C_{\mu_0}^{\beta}|\partial_{w_n}(\mu_n(w)-W_n)|I_{\beta 1 n}\\
+& e^{\frac{s}{2}}\beta_{\tau}\sum\limits_{1\leq\beta\leq\mu_0}C_{\mu_0}^{\beta}\sum\limits_{1\leq k\leq n-1}|\partial_{w_k}(\mu_n(w)-W_n)|I_{\beta 1 k}\\ +&e^{\frac{s}{2}}\beta_{\tau}\sum\limits_{1\leq\beta\leq\mu_0}\sum\limits_{2\leq q\leq \mu_0-\beta+1}c_{\beta q n}(w)I_{\beta q n}\\ :=&\sum\limits_{1\leq \beta\leq \mu_0}(J_{41 \beta}+J_{42 \beta}+J_{43 \beta}), \end{aligned} \end{equation}
where $|c_{\beta q n}(w)|\leq M$ due to \eqref{ii-3} and \eqref{iv-1}-\eqref{iv-3}. Since $\partial_{w_n}\mu_n(0)=1$, by \eqref{ii-3} and \eqref{iv-1}-\eqref{iv-3}, we have \begin{equation}\label{w-2}
|J_{41 \beta}|\leq M\varepsilon^{\frac{1}{6}}e^{\frac{s}{2}}I_{\beta 1 n}. \end{equation} In the similar and easier way, \begin{equation}\label{w-3}
|J_{42 \beta}|+|J_{43 \beta}|\leq M\sum\limits_{1\leq k\leq n-1}I_{\beta 1 k}+M^2\sum\limits_{2\leq q\leq \mu_0-\beta+1}I_{\beta q n}. \end{equation} With the help of Lemma \ref{lemA-4}, Lemma \ref{lemA-5} and \eqref{v-2}, we obtain from \eqref{w-1}-\eqref{w-3} that \begin{equation}\label{w-4} \int_{\mathbb{R}}J_4^2(y, s)dy\leq \varepsilon^{\frac{1}{10}}e^{-s}. \end{equation} Analogously to the treatment of $J_4$ in \eqref{w-1}-\eqref{w-4}, one has \begin{equation}\label{v-24} \int_{\mathbb{R}}J_5^2(y, s)dy\leq \varepsilon^{\frac{1}{10}}e^{-s}. \end{equation} Therefore, \eqref{v-17} comes from \eqref{v-21}, \eqref{v-221}, \eqref{v-23}, \eqref{w-4}, \eqref{v-24} and the largeness of $M$. \end{proof}
\subsection{Energy estimates of $W_{\mu_0}^n$}
First, we close the estimate of $W_{\mu_0}^n$ in \eqref{v-2}. Substituting \eqref{v-17} into \eqref{v-5} yields \begin{equation}\label{v-25}
\frac{d}{ds}\int_{\mathbb{R}}|W_{\mu_0}^n|^2(y, s)dy+\int_{\mathbb{R}}(\frac{98\mu_0-102}{100}-\frac{5}{2}-\beta_{\tau}e^{\frac{s}{2}}\partial_y\mu_n(w))|W_{\mu_0}^n|^2(y, s)dy\leq M^{\frac{1}{2}}e^{-s}. \end{equation} When $\mu_0\geq 6$, it is derived from \eqref{i-7}, \eqref{ii-3}, \eqref{vi2-4}, \eqref{vi-35}, \eqref{vii-12}, \eqref{vii-16}, \eqref{viii-3} and \eqref{viii-5} that \begin{equation}\label{v-26} \frac{98\mu_0-102}{100}-\frac{5}{2}-\beta_{\tau}e^{\frac{s}{2}}\partial_y\mu_n(w) \geq\frac{486}{100}-\frac{5}{2}-M e^{-s}-(1+\varepsilon^{\frac{1}{20}})>\frac{6}{5}. \end{equation} In addition, one has from \eqref{v-25}-\eqref{v-26} that \begin{equation*}
\frac{d}{ds}\int_{\mathbb{R}}|W_{\mu_0}^n|^2(y, s)dy
+\frac{6}{5}\int_{\mathbb{R}}|W_{\mu_0}^n|^2(y, s)dy\leq M^{\frac{1}{2}}e^{-s}. \end{equation*} This yields \begin{equation}\label{v-27}
\int_{\mathbb{R}}|W_{\mu_0}^n|^2(y, s)dy\leq e^{\frac{6}{5}(-\log\varepsilon-s)}\int_{\mathbb{R}}|W_{\mu_0}^n|^2(y, -\log\varepsilon)dy+5M^{\frac{1}{2}}e^{-s}. \end{equation} Then it follows from \eqref{v-27}, \eqref{ii-3}, \eqref{iii-9} and \eqref{vii-401} that \begin{equation}\label{v-271}
\int_{\mathbb{R}}|W_{\mu_0}^n|^2(y, s)dy\leq 6M^{\frac{1}{2}} e^{-s}. \end{equation}
\subsection{Energy estimates of $W_{\mu_0}^j\ (1\leq j\leq n-1)$}
We now close the estimates of $W_{\mu_0}^j\ (1\leq j\leq n-1)$ in \eqref{v-2}. Similarly to \eqref{v-26}, there exists a minimal positive integer $\mu_0\geq 6$ such that for all $1\leq m\leq n-1$, \begin{equation}\label{v-28}
3\mu_0-\frac{5}{2}-\beta_{\tau}e^{\frac{s}{2}}\partial_y\mu_m(w)\geq 3\mu_0-\frac{5}{2}-M e^{-s}-(1+\varepsilon^{\frac{1}{4}})|\partial_{w_n}\mu_m(0)|\geq 4. \end{equation} This also ensures the assumption \eqref{iv-50} in turn.
In addition, it follows from \eqref{v-7} and \eqref{v-271} that \begin{equation}\label{v-29}
\int_{\mathbb{R}}|\mathbb{F}_{m}^{\mu_0}|^2(y, s)dy\leq \frac{\delta}{2n}\sum\limits_{j=1}^{n-1}\int_{\mathbb{R}}|W_{\mu_0}^j|^2(y, s)dy+c^{*}\sum\limits_{j=1}^{n-1}\int_{\mathbb{R}}|\eta^{-\frac{1}{3}}(y)W_{\mu_0}^j|^2(y, s)dy+24nM^{\frac{3}{4}}e^{-3s}, \end{equation} where $\delta=8n^2 M^{-\frac{1}{16}}$ and $c^*=2n M^{\frac{1}{16}}$. On the other hand, there exist two positive constants $K$ and $K^{*}$ such that \begin{equation}\label{v-30}
\eta^{-\frac{2}{3}}(y)\leq \frac{\delta}{2n c^{*}}\ (|y|\geq K),\ \eta^{-\frac{2}{3}}(y)\leq \frac{K^*}{nc^{*}}\ (|y|\leq K). \end{equation} It is derived from \eqref{v-29} and \eqref{v-30} that \begin{equation}\label{v-31}
\int_{\mathbb{R}}|\mathbb{F}_{m}^{\mu_0}|^2(y, s)dy\leq \frac{\delta}{n}\sum\limits_{j=1}^{n-1}\int_{\mathbb{R}}|W_{\mu_0 j}|^2(y, s)dy+\frac{K^*}{n}\sum\limits_{j=1}^{n-1}\int_{-K}^K|W_{\mu_0 j}|^2(y, s)dy+24n M^{\frac{3}{4}} e^{-3s}. \end{equation}
Next we determine the Lipschitz continuous function $q_m(y)\ (1 \leq m\leq n-1)$ in Lemma \ref{lem8-1}. Due to \eqref{i-7a} and $\mu_n(0)=0$, when $\mu_m(0)<0\ (1\leq m\leq i_0-1)$, then $q_m(y)$ is defined as \begin{equation}\label{v-32} q_m(y):=q_m^-(y)=\begin{cases}0,\ y\leq -K,\\ \frac{y}{K}+1,\ -K\leq y\leq K,\\ 2,\ y\geq K. \end{cases} \end{equation} When $\mu_m(0)>0\ (i_0\leq m\leq n-1)$, $q_m(y)$ is defined as \begin{equation}\label{v-33} q_m(y):=q_m^+(y)=q_m^-(-y). \end{equation} According to the definitions \eqref{v-32}-\eqref{v-33}, we have \begin{equation}\label{v-34} 0\leq q_m(y)\leq 2. \end{equation} On the other hand, $Q_m(y, s)$ in \eqref{v-3} satisfies \begin{equation}\label{v-35}\begin{aligned}
Q_m(y, s)&\geq \left(\frac{1}{K}\beta_{\tau}e^{\frac{s}{2}}|\mu_m(w)-\dot{\xi}(t)|-\frac{3}{2}K\right)\chi_{\{|y|\leq K\}}\\
&\geq \left(\frac{|\mu_m(0)|}{2K}e^{\frac{s}{2}}-\frac{3}{2}K\right)\chi_{\{|y|\leq K\}}\geq K^*\chi_{\{|y|\leq K\}}, \end{aligned} \end{equation} where the last inequality comes from \eqref{i-7}, \eqref{ii-3}, \eqref{vi2-4}, \eqref{vii-12}, \eqref{viii-1} and the fact of $s\geq -\log\varepsilon$.
With \eqref{v-271}-\eqref{v-28}, \eqref{v-31}, \eqref{v-34}-\eqref{v-35} and the largeness of $M$, summing up $m$ on both sides of \eqref{v-4} from $1$ to $n-1$ yield \begin{equation*}\label{v-36}\begin{aligned}
\frac{d}{ds}\sum\limits_{m=1}^{n-1}\int_{\mathbb{R}}|W_{\mu_0}^m|^2(y, s)dy
+\frac{7}{2}\sum\limits_{m=1}^{n-1}\int_{\mathbb{R}}|W_{\mu_0}^m|^2(y, s)dy\leq 14n^2 M^{\frac{3}{4}} e^{4-3s}. \end{aligned} \end{equation*} This shows \begin{equation}\label{v-36}
\sum\limits_{m=1}^{n-1}\int_{\mathbb{R}}|W_{\mu_0}^m|^2(y, s)dy\leq e^{\frac{7}{2}(-\log\varepsilon-s)}\sum\limits_{m=1}^{n-1}\int_{\mathbb{R}}|W_{\mu_0}^m|^2(y, -\log\varepsilon)dy+28n^2 M^{\frac{3}{4}}e^{4-3s}. \end{equation} Then it comes from \eqref{iii-9}, \eqref{vii-401} and \eqref{v-36} that \begin{equation}\label{v-37}
\sum\limits_{m=1}^{n-1}\int_{\mathbb{R}}|W_{\mu_0}^n|^2(y, s)dy\leq 30n^2 e^4 M^{\frac{3}{4}} e^{-3s}. \end{equation}
{\bf Proof of Theorem \ref{thm8-1}:} It is derived from \eqref{v-271}, \eqref{v-37} and \eqref{vii-402} that \begin{equation}\label{v-38}
\sum\limits_{j=1}^{n-1}\|\partial_y^{\mu_0}W_j|^2(\cdot, s)\|_{L^2(\mathbb{R})}\leq (n-1)\sum\limits_{k=1}^{n-1}\|W_{\mu_0}^{k}(\cdot, s)\|_{L^2(\mathbb{R})}\leq \sqrt{30}n^2 e^2 M^{\frac{3}{8}}e^{-\frac{3}{2}s} \end{equation} and \begin{equation}\label{v-39}
\|\partial_y^{\mu_0}W_n(\cdot, s)\|_{L^2(\mathbb{R})}\leq \sum\limits_{k=1}^{n}\|W_{\mu_0}^k(\cdot, s)\|_{L^2(\mathbb{R})}\leq \sqrt{30}n^2 e^2 M^{\frac{3}{8}}e^{-\frac{3}{2}s}+\sqrt{6}M^{\frac{1}{4}}e^{-\frac{s}{2}}. \end{equation} Then the estimates in \eqref{v-1} come from \eqref{v-38}, \eqref{v-39} and the largeness of $M$. Therefore, the proof of Theorem \ref{thm8-1} is completed.
\section{Proof of main theorems}\label{V}
In the section, we complete the proofs of Theorem \ref{thmii-1} and Theorem \ref{thmi-1}. \subsection{Proof of Theorem \ref{thmii-1}}
Based on the local existence of \eqref{i-6} (see \cite{MA}), we utilize the continuous induction to prove Theorem \ref{thmii-1}. To this end, under the induction assumptions \eqref{iv-1}-\eqref{iv-3} and \eqref{iv-6} for suitably large $M>16$, the proof of Theorem \ref{thmii-1} is mainly reduced to recover the estimates \eqref{iv-1}-\eqref{iv-3} and \eqref{iv-6} with the smaller coefficient bounds via the bootstrap arguments.
The induction assumptions of $\kappa(t), \tau(t)$ and $\xi(t)$ in \eqref{iv-1a} and their derivatives in \eqref{iv-1b} are recovered in \eqref{viii-5}-\eqref{viii-51}, \eqref{viii-11} and \eqref{viii-2}-\eqref{viii-3}, \eqref{viii-1} with $M$ replaced by the smaller ones $M^{\frac{1}{2}}, 2$ and $2M^{\frac{3}{4}}$ respectively.
In the similar way, the assumptions \eqref{iv-2} for $W_0$ as well as $\mathcal{W}$ are also recovered by \eqref{vi-24}, \eqref{vi-36}, \eqref{vi-35}, \eqref{vi-16} and \eqref{vi-27} with the coefficients replaced by the smaller ones accordingly. In addition, the assumptions \eqref{iv-3} for $W_j\ (j\neq n)$ are also obtained by \eqref{vii-12}, \eqref{vii-17}, \eqref{vii5-2} and \eqref{vii-30} with the coefficient $M$ replaced by the smaller ones. In addition, the energy assumptions \eqref{iv-6} are obviously derived by \eqref{v-1} in Theorem \ref{thm8-1} with $M$ replaced by $M^{\frac{1}{2}}$.
Therefore, Theorem \ref{thmii-1} is proved via the method of continuous induction.
\subsection{Proof of Theorem \ref{thmi-1}}
Due to \eqref{ii-2} and Theorem \ref{thmii-1}, in order to complete the proof of Theorem \ref{thmi-1}, we only need to verify the $C^{\frac{1}{3}}$ optimal regularity of $W_0$ and \eqref{i-13}.
First, we show that the optimal regularity of $W_0(y, s)$ is $C^{\frac{1}{3}}$ with respect to the spatial variable.
By \eqref{ii-2}, for any $t\geq -\varepsilon$ and $x_1, x_2\in\mathbb{R}$, set \begin{equation}\label{V-1} s=s(t), y_i=(x_i-\xi(t))e^{\frac{3s}{2}}\ (i=1, 2). \end{equation} Then for any $\alpha>0$, by \eqref{ii-3}, we arrive at \begin{equation}\label{V-2}
\frac{|w_n(x_1, t)-w_n(x_2, t)|}{|x_1-x_2|^{\alpha}}=e^{\frac{3\alpha s}{2}-\frac{s}{2}}\frac{|W_0(y_1, s)-W_0(y_2, s)|}{|y_1-y_2|^{\alpha}}. \end{equation} When $\alpha=\frac{1}{3}$, it is derived from \eqref{V-2}, \eqref{ii-9b} and \eqref{vi-35} that \begin{equation}\label{V-3}\begin{aligned}
&\sup\limits_{x_1, x_2\in\mathbb{R}, x_1\neq x_2}\frac{|w_n(x_1, t)-w_n(x_2, t)|}{|x_1-x_2|^{\alpha}}\\
\leq& \sup\limits_{y_1, y_2\in\mathbb{R}, y_1\neq y_2}\frac{|W_0(y_1, s)-W_0(y_2, s)|}{|y_1-y_2|^{\frac{1}{3}}}\\
=&\sup\limits_{y_1, y_2\in\mathbb{R}, y_1\neq y_2}\frac{\displaystyle|\int_{y_2}^{y_1}\partial_y W_0(z, s)dz|}{|y_1-y_2|^{\frac{1}{3}}}\\
\leq&2\sup\limits_{y_1, y_2\in\mathbb{R}, y_1\neq y_2}\frac{\displaystyle|\int_{y_2}^{y_1}\eta^{-\frac{1}{3}}(z)dz|}{|y_1-y_2|^{\frac{1}{3}}}\leq 12. \end{aligned} \end{equation}
When $\frac{1}{3}<\alpha<1$, with \eqref{ii-8}, \eqref{ii-14}, \eqref{vi-16} and $y_{1}^*=1, y_2^*=0$ (Denote ($x_1^*, x_2^*)$ by the corresponding transformation \eqref{V-1} respectively), we have \begin{equation}\label{V-4}\begin{aligned}
\frac{|W_0(y_1^*, s)-W_0(y_2^*, s)|}{|y_1^*-y_2^*|^{\alpha}}&\geq |\overline{W}(1)|-|\mathcal{W}(1, s)-\mathcal{W}(0, s)|\\
&\geq |\overline{W}(1)|-2\varepsilon^{\frac{1}{11}}>\frac{1}{2}|\overline{W}(1)|>0. \end{aligned} \end{equation}
Thus, \eqref{V-4} shows that when $\frac{1}{3}<\alpha<1$, for any $\overline{M}>0$, there exists a constant $s_0\geq -\log\varepsilon$ (and corresponding $t_0$ by \eqref{V-1}) such that when $s\geq s_0$, \begin{equation}\label{V-5}
e^{\frac{3\alpha s}{2}-\frac{s}{2}}\frac{|W_0(y_1^*, s)-W_0(y_2^*, s)|}{|y_1^*-y_2^*|^{\alpha}}\geq \frac{1}{2}e^{\frac{3\alpha s_0}{2}-\frac{s_0}{2}}|\overline{W}(1)|>\overline{M}. \end{equation}
Due to the arbitrariness of $\overline{M}$, it implies from \eqref{V-2} and \eqref{V-5} that $w_n(x, t)\notin C^{\alpha}$ with $\alpha>\frac{1}{3}$. Combining this with \eqref{V-3} shows that the optimal regularity of $w_n(x, t)$ is $C^{\frac{1}{3}}$ with respect to the spatial variable.
Therefore, (1) and (2) in Theorem \ref{thmi-1} are obtained from Theorem \ref{thmii-1} and the above verification of $C^{\frac{1}{3}}$ regularity for $W_0$.
Next we prove \eqref{i-13}. It is derived from \eqref{ii-2}, \eqref{ii-3} and \eqref{ii-10} that \begin{equation}\label{V-6} \partial_x w_n(\xi(t), t)=e^{s}\partial_y W_0(0, s)=-e^{s}=\frac{1}{\tau(t)-t}. \end{equation} By the definition of $\tau(t)$ in \eqref{ii-1}, we have $\tau(T^*)=T^*$ and then \begin{equation}\label{V-7}
|T^*-\tau(t)|=|\tau(T^*)-\tau(t)|\leq \int_t^{T^*}|\dot{\tau}(z)|dz\lesssim \varepsilon^{\frac{1}{3}}(T^*-t), \end{equation} where the last inequality comes from (2) in Theorem \ref{ii-1}.
Following \eqref{V-7}, one has \begin{equation}\label{V-8}\begin{aligned}
&|\tau(t)-t|\leq |T^*-t|+|\tau(t)-T^*|\leq (1+\varepsilon^{\frac{1}{4}})|T^*-t|,\\
&|\tau(t)-t|\geq |T^*-t|-|\tau(t)-T^*|\geq (1-\varepsilon^{\frac{1}{4}})|T^*-t|. \end{aligned} \end{equation}
Then, we derive from \eqref{V-6}-\eqref{V-8} that \begin{equation}\label{V-9} -2<(T^*-t)\partial_x w_n(\xi(t), t)<-\frac{1}{2}. \end{equation}
Collecting \eqref{V-7}, \eqref{V-9} and Theorem \ref{ii-1} yields \eqref{i-13} in Theorem \ref{thmi-1} and then the proof of Theorem \ref{thmi-1} is completed.
\appendix
\renewcommand{\appendixname}{}
\section{Appendix}\label{A}
In the Appendix, we introduce a useful interpolation inequality (see \cite{Ad}) and give its applications. \begin{lem}\label{lemA-3} {\bf (Gagliardo-Nirenberg-Sobolev inequality).} {\it Let $u: \mathbb{R}^d\to\mathbb{R}$. Fix $1\leq q, r\leq \infty$ and $j, m\in\mathbb{N}$, and $\frac{j}{m}\leq \alpha\leq 1$. If \begin{equation*} \frac{1}{p}=\frac{j}{d}+\alpha(\frac{1}{r}-\frac{m}{d})+\frac{1-\alpha}{q}, \end{equation*} then one has \begin{equation}\label{A2-1}
\|D^j u\|_{L^p}\leq C\|D^m u\|_{L^r}^{\alpha}\|u\|_{L^q}^{1-\alpha}, \end{equation} where the positive constant $C$ depends on $d, m, r, q$ and $m$.} \end{lem}
Next we estimate the $L^2-$norm of the terms ${I_{\beta q k}}'s\ (1\leq k\leq n)$ with the expression as \begin{equation}\label{A2-2}
I_{\beta q k}=\sum\limits_{\gamma_1+\cdots+\gamma_q=\mu_0-\beta+1,\ \gamma_j\geq 1 (1\leq j\leq q)}|\partial_y^{\gamma_1}W|\cdots |\partial_y^{\gamma_q}W|\cdot|\partial_y^{\beta}W_k|. \end{equation}
The estimates of ${I_{\beta q k}}'s$ are considered in two cases: $1\leq k\leq n-1$ and $k=n$. \begin{lem}\label{lemA-4} {\it For $1\leq k\leq n-1$, we have \begin{subequations}\label{A2-3}\begin{align} &\sum\limits_{\beta=1, \mu_0}\int_{\mathbb{R}}I_{\beta 1 k}^2(y, s)dy\nonumber\\
&\qquad\qquad\leq 2e^{-s}\sum\limits_{j=1}^{n-1}\int_{\mathbb{R}}|\eta^{-\frac{1}{3}}(y)W_{\mu_0}^j|^2(y, s)dy+M^{\frac{1}{16}}e^{-3s}\int_{\mathbb{R}}|W_{\mu_0}^{n}|^2(y, s)dy+M^3 e^{-6s},\label{A2-3a}\\ &\sum\limits_{1< \beta<\mu_0}\int_{\mathbb{R}}I_{\beta 1 k}^2(y, s)dy\nonumber\\
&\qquad\qquad\leq 2 M^{-\frac{1}{8}}e^{-s}\sum\limits_{j=1}^{n-1}\int_{\mathbb{R}}|W_{\mu_0}^j|^2(y, s)dy+M^{\frac{1}{4}}e^{-3s}\int_{\mathbb{R}}|W_{\mu_0}^n|^2(y, s)dy+M^3 e^{-6s},\label{A2-3b}\\ &\int_{\mathbb{R}}I_{\beta q k}^2(y, s)\leq M^{3\mu_0+1}e^{-(4+\frac{1}{4})s}\ (q\geq 2).\label{A2-3c} \end{align} \end{subequations} } \end{lem}
\begin{proof} For the proof of \eqref{A2-3a}, by \eqref{ii-3}, \eqref{vi-35}, \eqref{vii-16}, the expansion in \eqref{ii-17}, \eqref{vii-401}-\eqref{vii-402} and \eqref{v-2}, we have \begin{equation}\label{v-10}\begin{aligned} &\int_{\mathbb{R}}(I^2_{\mu_0 1 k}+I^2_{1 1 k})(y, s)dy\\
\leq&\int_{\mathbb{R}}|\partial_y W|^2\cdot |\partial_y^{\mu_0}W_k|^2(y, s)dy+\int_{\mathbb{R}}|\partial_y W_k|^2|\partial_y^{\mu_0}W|^2(y, s)dy\\
\leq& 2e^{-s}\sum\limits_{j=1}^{n-1}\int_{\mathbb{R}}|\eta^{-\frac{1}{3}}(y)W_{\mu_0}^j|^2(y, s)dy+M^{\frac{1}{16}} e^{-3s}\sum\limits_{j=1}^{n}\int_{\mathbb{R}}|W_{\mu_0}^j|^2(y, s)dy. \end{aligned} \end{equation} Combining \eqref{v-10} with \eqref{v-2} yields \eqref{A2-3a}.
With respect to the case of $1<\beta<\mu_0$ and $q=1$ in \eqref{A2-2}, it is derived from \eqref{ii-3}, \eqref{vi-35}, \eqref{vii-402}, \eqref{vii-16}, \eqref{v-2}, H\"older inequality and Lemma \ref{lemA-3} that \begin{equation}\label{v-11}\begin{aligned} &\sum\limits_{1<\beta<\mu_0}\int_{\mathbb{R}}I_{\beta 1 k}^2(y, s)dy\\
\leq&\sum\limits_{1<\beta<\mu_0}\int_{\mathbb{R}}|\partial_y^{\mu_0+1-\beta}W|^2 |\partial_y^{\beta}W_k|^2(y, s)dy\\
\leq&\sum\limits_{1<\beta<\mu_0}\|\partial_y^{\beta}W_k(\cdot, s)\|_{L^{\frac{2(\mu_0-1)}{\beta-1}}}^2\|\partial_y^{\mu_0-\beta+1}W(\cdot, s)\|_{L^{\frac{2(\mu_0-1)}{\mu_0-\beta}}}^2\\
\leq&M^{\frac{1}{16}} \sum\limits_{1<\beta<\mu_0}\|\partial_y^{\mu_0}W_k(\cdot, s)\|_{L^2}^{2\frac{\beta-1}{\mu_0-1}}\|\partial_y W_k(\cdot, s)\|_{L^{\infty}}^{2\frac{\mu_0-\beta}{\mu_0-1}}\|\partial_y^{\mu_0}W(\cdot, s)\|_{L^2}^{2\frac{\mu_0-\beta}{\mu_0-1}}\|\partial_y W(\cdot, s)\|_{L^{\infty}}^{2\frac{\beta-1}{\mu_0-1}}\\
\leq&M^{\frac{1}{8}+\frac{\beta-1}{8(\mu_0-1)}}\|\partial_y W_k(\cdot, s)\|_{L^{\infty}}^2\|\partial_y^{\mu_0}W(\cdot, s)\|_{L^2}^2+M^{-\frac{1}{8}}\|\partial_y W(\cdot, s)\|_{L^{\infty}}^2\|\partial_y^{\mu_0}W_k(\cdot, s)\|_{L^2}^2\\
\leq& 2M^{-\frac{1}{8}} e^{-s}\sum\limits_{j=1}^{n-1}\int_{\mathbb{R}}|W_{\mu_0}^j|^2(y, s)dy+M^{\frac{1}{4}}e^{-3s}\sum\limits_{j=1}^{n}\int_{\mathbb{R}}|W_{\mu_0}^j|^2(y, s)dy. \end{aligned} \end{equation} Then \eqref{A2-3b} comes from \eqref{v-11} and \eqref{v-2}.
For the easier cases of $I_{\beta q k}$ ($q\geq 2$) in \eqref{A2-2}, we will apply the following three type estimates with the help of Lemma \ref{lemA-3}: \begin{subequations}\label{v-13}\begin{align}
&\|\partial_y^{\gamma_j}W(\cdot, s)\|_{L^{\infty}}\leq M^{\frac{1}{16}}\|\partial_y^{\mu_0}W(\cdot, s)\|_{L^2}^{\frac{\gamma_j-1}{\mu_0-1-\frac{1}{2}}}\|\partial_y W(\cdot, s)\|_{L^{\infty}}^{\frac{\mu_0-\frac{1}{2}-\gamma_j}{\mu_0-1-\frac{1}{2}}},\label{v-13a}\\
&\|\partial_y^{\gamma_j}W(\cdot, s)\|_{L^{2}}\leq M^{\frac{1}{16}}\|\partial_y^{\mu_0}W(\cdot, s)\|_{L^2}^{\frac{\gamma_j-1-\frac{1}{2}}{\mu_0-1-\frac{1}{2}}}\|\partial_y W(\cdot, s)\|_{L^{\infty}}^{\frac{\mu_0-\gamma_j}{\mu_0-1-\frac{1}{2}}}\ (\gamma_j\geq 2),\label{v-13b}\\
&\|\partial_y^{\beta}W_k(\cdot, s)\|_{L^2}
\leq M^{\frac{1}{16}}\|\partial_y^{\mu_0}W_k(\cdot, s)\|_{L^2}^{\frac{\beta-1-\frac{1}{2}}{\mu_0-1-\frac{1}{2}}}\|\partial_y W_k(\cdot, s)\|_{L^{\infty}}^{\frac{\mu_0-\beta}{\mu_0-1-\frac{1}{2}}}\ (\beta\geq 2).\label{v-13c} \end{align} \end{subequations}
When $\beta\geq 2$ and $q\geq 2$, substituting \eqref{v-13a} and \eqref{v-13c} into \eqref{A2-2} yields \begin{equation}\label{v-131}\begin{aligned} &\int_{\mathbb{R}} I_{\beta q k}^2 (y, s)dy\\
=&\sum\limits_{\gamma_1+\cdots+\gamma_q=\mu_0-\beta+1,\ \gamma_j\geq 1 (1\leq j\leq q)}\|\partial_y^{\gamma_1}W(\cdot, s)\|_{L^{\infty}}^2\cdots\|\partial_y^{\gamma_q}W(\cdot, s)\|_{L^{\infty}}^2\|\partial_y^{\beta}W_k(\cdot, s)\|_{L^{2}}^2\\
\leq & M^{\frac{q+1}{4}}\|\partial_y^{\mu_0}W(\cdot, s)\|_{L^2}^{2\frac{\mu_0-\beta+1-q}{\mu_0-1-\frac{1}{2}}}\|\partial_y W(\cdot, s)\|_{L^{\infty}}^{2\frac{q(\mu_0-\frac{1}{2})-(\mu_0-\beta+1)}{\mu_0-1-\frac{1}{2}}}\|\partial_y^{\mu_0}W_k(\cdot, s)\|_{L^{2}}^{2\frac{\beta-1-\frac{1}{2}}{\mu_0-1-\frac{1}{2}}}\|\partial_y W_k(\cdot, s)\|_{L^{\infty}}^{2\frac{\mu_0-\beta}{\mu_0-1-\frac{1}{2}}}\\
\leq& M^{\frac{q+1}{2}} \left(\|\partial_y W(\cdot, s)\|_{L^{\infty}}^{2q}+\|\partial_y^{\mu_0}W(\cdot, s)\|_{L^2}^{2q}\right)\left(\|\partial_y^{\mu_0}W_k(\cdot, s)\|_{L^2}^2+\|\partial_y W_k(\cdot, s)\|_{L^{\infty}}^2\right). \end{aligned} \end{equation} Combining this with \eqref{iv-6}, \eqref{vi-35} and \eqref{vii-16} derives \begin{equation}\label{v-14} \int_{\mathbb{R}}I_{\beta q k}^2 (y, s)dy\leq M^{3q+1}e^{-(q+3)s}\leq M^{3\mu_0+1}e^{-(q+3)s}\ (q\geq 2, \beta\geq 2). \end{equation}
When $\beta=1$ and $q\geq 3$, similarly to \eqref{v-14}, we have \begin{equation}\label{v-15}\begin{aligned} &\int_{\mathbb{R}} I_{1 q k}^2 (y, s)dy\\
=&\sum\limits_{\gamma_1+\cdots+\gamma_q=\mu_0-\beta+1,\ \gamma_j\geq 1 (1\leq j\leq q)}\|\partial_y^{\gamma_1}W(\cdot, s)\|_{L^{\infty}}^2\cdots\|\partial_y^{\gamma_q}W(\cdot, s)\|_{L^{\infty}}^2\|\partial_y W_k(\cdot, s)\|_{L^{2}}^2\\
\leq & M^{\frac{q+1}{4}}\|\partial_y^{\mu_0}W(\cdot, s)\|_{L^2}^{2\frac{\mu_0-\beta+1-q}{\mu_0-1-\frac{1}{2}}}\|\partial_y W(\cdot, s)\|_{L^{\infty}}^{2\frac{q(\mu_0-\frac{1}{2})-(\mu_0-\beta+1)}{\mu_0-1-\frac{1}{2}}}\|\partial_y W_k(\cdot, s)\|_{L^{2}}^{2}\\
\leq& M^{\frac{q+1}{2}} \left(\|\partial_y W(\cdot, s)\|_{L^{\infty}}^{2q}+\|\partial_y^{\mu_0}W(\cdot, s)\|_{L^2}^{2q}\right)\|\partial_y W_k(\cdot, s)\|_{L^{2}}^2\\ \leq &M^{3q+1}e^{-(q+\frac{5}{4})s}\ (q\geq 3), \end{aligned} \end{equation} where the last estimate comes from \eqref{iv-6} and \eqref{vii5-2} with $\nu=\frac{7}{24}$.
When $\beta=1$ and $q=2$, without loss of generality, we assume $\gamma_2\geq 2$ due to $\gamma_1+\gamma_2=\mu_0\geq 6$. Then we apply \eqref{v-13a} and \eqref{v-13b} to control $\partial_y^{\gamma_1}W$ and $\partial_y^{\gamma_2}W$ respectively and subsequently obtain the following estimate with \eqref{iv-6} and \eqref{vii-16} \begin{equation}\label{v-151}\begin{aligned} &\int_{\mathbb{R}}I_{1 q k}^2(y, s)dy\\
=&\|\partial_y^{\gamma_1}W(\cdot, s)\|_{L^{\infty}}^2\|\partial_y^{\gamma_2}W(\cdot, s)\|_{L^2}^2\|\partial_y W_k(\cdot, s)\|_{L^{\infty}}^2\\
\leq & M^{\frac{1}{4}}\|\partial_y W(\cdot, s)\|_{L^{\infty}}^{2\frac{\mu_0-\frac{1}{2}}{\mu_0-1-\frac{1}{2}}}\|\partial_y^{\mu_0}W(\cdot, s)\|_{L^{2}}^{2\frac{\mu_0-2-\frac{1}{2}}{\mu_0-1-\frac{1}{2}}}\|\partial_y W_k(\cdot, s)\|_{L^{\infty}}^2\\ \leq &M^5 e^{-5s}. \end{aligned} \end{equation} Thus, we get \eqref{A2-3c} from \eqref{v-14}-\eqref{v-151} and the proof of Lemma \ref{lemA-4} is finished.\end{proof}
\begin{lem}\label{lemA-5} {\it For $I_{\beta q n}$, one has \begin{equation}\label{A-10} \int_{\mathbb{R}}I_{\beta q n}^2(y, s)dy\leq M^{3q+1}e^{-(q+1)s}. \end{equation} } \end{lem}
\begin{proof} As in Lemma \ref{lemA-4}, the estimates on the two terms $I_{\mu_0 1 n}$ and $I_{1 1 n}$ are crucial in the proof of \eqref{A-10}. For these two terms, similarly to \eqref{v-10}, one has \begin{equation}\label{A-111}
\int_{\mathbb{R}}(I_{\mu_0 1 n}^2+_{1 1 n}^2)(y, s)dy\leq 2\int_{\mathbb{R}}|\partial_y W|^2\cdot|\partial_y^{\mu_0}W|^2(y, s)dy
\leq 2\|\partial_y W(\cdot, s)\|_{L^{\infty}}^2\cdot\|\partial_y^{\mu_0}W(\cdot, s)\|_{L^2}^2. \end{equation}
When $1<\beta<\mu_0$, similarly to \eqref{v-131}, we arrive at \begin{equation}\label{A-112}
\int_{\mathbb{R}}I_{\beta q n}^2(y, s)dy\leq M^{\frac{q+1}{2}}\left(\|\partial_y W(\cdot, s)\|_{L^{\infty}}^{2q}+\|\partial_y^{\mu_0}W(\cdot, s)\|_{L^2}^{2q}\right)\left(\|\partial_y W_n(\cdot, s)\|_{L^{\infty}}^2+\|\partial_y^{\mu_0}W_n(\cdot, s)\|_{L^2}^2\right). \end{equation} Combining \eqref{A-111}-\eqref{A-112} with \eqref{ii-3}, \eqref{iv-6}, \eqref{vi-35} and \eqref{vii-16} yields \eqref{A-10} and the proof of Lemma \ref{lemA-5} is completed. \end{proof}
\vskip 1 true cm
\end{document} |
\begin{document}
\date{\today}
\title [Square function and local smoothing estimates]{A trilinear approach to square function and local smoothing estimates for the wave operator}
\author[J. Lee]{Jungjin Lee}
\address{Department of Mathematical Sciences, School of Natural Science, Ulsan National Institute of Science and Technology, UNIST-gil 50, Ulsan 44919, Republic of Korea} \email{jungjinlee@unist.ac.kr}
\subjclass[2010]{42B10, 42B15, 42B37}
\keywords{Wave equation, square function, smoothing estimates}
\thanks{ The author was supported in part by NRF grant No. 2017R1D1A1B03036053 (Republic of Korea).}
\begin{abstract} The purpose of this paper is to improve the known estimates for Mockenhaupt's square function in $\mathbb R^3$ and for Sogge's local smoothing in $\mathbb R^{2+1}$ spacetime. For this we use the trilinear approach of S. Lee and A. Vargas for the cone multiplier with some trilinear estimates obtained from the $\ell^2$ decoupling theorem and multilinear restriction theorem. \end{abstract}
\maketitle
\section{Introduction}
Let $\Gamma = \{ (\xi,\tau) \in \mathbb R^2 \times \mathbb R : \tau = |\xi|,~ 1 \le \tau \le 2 \}$ be a truncated light cone in $\mathbb R^3$. For given small $0< \delta <1 $, let $\Gamma_\delta$ denote the $\delta$-neighborhood of $\Gamma$. Let $f$ be a function on $\mathbb R^3$ whose Fourier transform is supported in $\Gamma_\delta$. We partition $\Gamma_\delta$ into $O(\delta^{-1/2})$ sectors \(
\Theta = \{ (\xi, \tau) \in \Gamma_\delta : \xi/|\xi| \in \theta \} \) corresponding to an arc $\theta$ of angular length $O(\delta^{1/2})$ in the unit circle, and let $\mathbf \Pi_\delta$ denote the collection of such sectors. We take a collection of Schwartz functions \( \Xi_{\Theta} \) so that its Fourier transform $\widehat \Xi_\Theta$ is supported on a neighborhood of $\Theta$ and $\{\widehat \Xi_\Theta \}_{\Theta \in \mathbf \Pi_\delta}$ forms a partition of unity of $\Gamma_\delta$. The square function $S_\delta f$ is defined as \[
S_\delta f = \Big(\sum_{\Theta \in \mathbf \Pi_\delta} |f_\Theta|^2 \Big)^{1/2} \] where $f_\Theta = f \ast \Xi_\Theta$. For $1 \le p \le \infty$, we say that the square function estimate ${\mathcal{SQ}}(p \rightarrow p; \alpha)$ holds if the estimate \[
\| f \|_p \le C_{\epsilon}\delta^{-\alpha-\epsilon} \| S_\delta f \|_p \] holds for all $\epsilon>0$ and all functions $f$ having Fourier support in $\Gamma_\delta$, where $C_\epsilon$ is a positive constant depending on $\epsilon$ but not on $\delta$.
It was conjectured that the square function estimate ${\mathcal{SQ}}(p \rightarrow p; \alpha)$ holds for $p > 2$ and $\alpha \ge \max(0, \frac{1}{2} - \frac{2}{p})$, see \cite{garrigos2009cone, tao2000bilinearII}. Mockenhaupt \cite{mockenhaupt1993cone} first considered it, and proved the estimate $\mathcal{SQ}(4 \to 4; 1/8=0.125)$. It was observed by Bourgain \cite{bourgain1995cone} that the exponent $\alpha$ could be less than $1/8$, and Tao and Vargas \cite{tao2000bilinearII} gave an explicit exponent $\alpha$ by combining their bilinear cone restriction estimates with Bourgain's arguments. After that, the sharp bilinear cone restriction estimate was obtained by Wolff \cite{wolff2001sharp}, and the estimate $\mathcal{SQ}(4 \to 4; 5/44=0.113\dot6\dot3)$ immediately followed by a theorem in \cite{tao2000bilinearII}.
Garrig\'os and Seeger \cite{garrigos2009cone} have studied \textit{$\ell^p$ decoupling estimates} (called Wolff-type inequalities \cite{wolff2000local}) for cones, and they further improved the exponent $\alpha$ by combining $\ell^p$ decoupling estimates with bilinear arguments in \cite{tao2000bilinearII}. In \cite{wolff2000local}, Wolff introduced an important type of estimate related to the above square function which have become known as {$\ell^p$ decoupling inequalities}. Decoupling inequalities will play an important role in this paper and will be discussed in detail in section 3. Recently, the sharp $\ell^2$ decoupling theorem for the cone was proved by Bourgain and Demeter \cite{bourgain2015proof} using the multilinear restriction theorem due to Bennett, Carbery and Tao \cite{bennett2006multilinear}. So, by results in \cite{garrigos2009cone} the estimate $\mathcal{SQ}(4 \to 4; 3/32=0.09375)$ was obtained. Our first result is to make a further progress on the exponent $\alpha$.
\begin{thm} \label{thm:sqfEst} The estimate $\mathcal{SQ}(4 \to 4;1/16=0.0625)$ holds. \end{thm}
The approach to Theorem \ref{thm:sqfEst} is based on trilinear methods. S. Lee and Vargas \cite{lee2012cone} already employed a trilinear approach to square function estimates by adapting the multilinear arguments of Bourgain and Guth \cite{bourgain2011bounds}, and obtained the sharp estimate $\mathcal{SQ}(3 \to 3;0)$. In \cite{lee2012cone}, it was observed that trilinear square function estimates for the cone are essentially equivalent to linear ones. To get a trilinear square function estimate, the multilinear restriction theorem of Bennet, Carbery and Tao \cite{bennett2006multilinear} will be utilized as in \cite{lee2012cone}. However, to lift the $L^3$ estimate to the $L^4$ estimate we will combine this with the sharp $\ell^2$ decoupling theorem due to Bourgain and Demeter \cite{bourgain2015proof}. Also, we will adapt the induction-on-scales argument of Bourgain and Demeter \cite{bourgain2015proof}. However, since their arguments take advantage of some properties of decoupling norm not derived from the square function, we cannot formulate an iteration as strong as in \cite{bourgain2015proof}. Nevertheless, it is enough to obtain Theorem \ref{thm:sqfEst}.
The square function estimate is related to several deep questions in harmonic analysis such as the cone multiplier, local smoothing conjecture and the $L^p$ regularity conjecture for convolution operator with the helix. In particular, these conjectures follow from the sharp estimate $\mathcal{SQ}(4 \to 4;0)$, see for example \cite{tao2000bilinearII}, \cite{garrigos2009cone}. Theorem \ref{thm:sqfEst} implies the following partial results on these problems.
\begin{cor} \label{cor} \emph{(i)} If $\alpha > 1/16$ then the local smoothing estimate \[
\Big( \int_{1}^{2} \big\| e^{it \sqrt{-\Delta}} f \big\|^4_{L^4(\mathbb R^2)} dt \Big)^{1/4} \le C_{\alpha} \|f\|_{L_\alpha^{4}(\mathbb R^2)} \] holds, where $L_\alpha^p$ is the $L^p$-Sobolev space of order $\alpha$.
\emph{(ii)} If $\alpha > 1/16$ then the cone multiplier operator $T_\alpha$ defined by $\widehat{T_\alpha f} (\xi,\tau)= \rho(\tau) (1-|\xi|^2/ \tau^2)_+^{\alpha}\hat f(\xi)$ is bounded on $L^4$, where $\rho$ is a bump function on $[1,2]$.
\emph{(iii)} If $\alpha < 5/24$ then the convolution operator $T$ defined by\[ Tf(x) = \int f(x_1 -\cos t, x_2 - \sin t, x_3 -t ) \phi(t) dt \] maps $L^4$ to $L_{\alpha}^4$, where $\phi$ is a bump function. \end{cor}
We note that the sharp estimate $L^p \to L^p_{1/p}$, $p >4$, for the averaging operator $T$ may be obtained by combining the theorem due to Pramanik and Seeger \cite{Pramanik2007averages} and the Bourgain--Demeter decoupling estimates.
The proof of Corollary \ref{cor} is well known, and we will not reproduce here, see for example \cite{tao2000bilinearII}. For other related problems, see \cite{garrigos2009cone}, \cite{bourgain2015proof}.
\
We are further concerned with $L_\alpha^p \to L^q$ type local smoothing estimates \begin{equation} \label{eqn:LS}
\Big( \int_1^2 \big\| e^{it \sqrt{-\Delta}} f \big\|^q_{L^q(\mathbb R^2)} dt \Big)^{1/q} \le C_{p,q,\alpha} \|f\|_{L_\alpha^{p}(\mathbb R^2)}. \end{equation} It is conjectured that this local smoothing estimate holds if \begin{equation}\label{apq_con} \begin{gathered} 1 \le p \le q \le \infty, \\ \frac{1}{p} + \frac{3}{q} = 1, \qquad \alpha \ge \frac{1}{p} -\frac{3}{q} + \frac{1}{2}, \end{gathered} \end{equation} see \cite{schlag1997local, tao2000bilinearII}.
Indeed, the necessity of condition $p \le q$ follows from translation invariance, see \cite{hormander1960estimates}. From the focusing example, Knapp example and delta function, one has three necessary conditions \begin{align} \label{al_1} \alpha &\ge \frac{1}{p} -\frac{3}{q} + \frac{1}{2},\\ \label{al_2} \alpha &\ge \frac{3}{2p} - \frac{3}{2q},\\ \label{al_3} \alpha &\ge \frac{2}{p} -\frac{1}{q} - \frac{1}{2}, \end{align} respectively, see \cite{tao2000bilinearII} for details.
Let $I_1 = (1,1;1/2+\varepsilon),~ I_2 =(2,2;0),~ I_\infty =(\infty, \infty; 1/2+\varepsilon),~ I_{1,\infty} =(1,\infty;3/2+\varepsilon)$ where $\varepsilon >0$ is arbitrary. When $(p,q;\alpha) = I_1, I_2, I_\infty$ and $I_{1,\infty}$, one can obtain \eqref{eqn:LS} from the fixed-time estimates due to Miyachi \cite{miyachi1980some} and Peral \cite{peral1980lp}. First, in case that \eqref{al_3} is dominant, the reciprocal range $(1/p,1/q)$ is the triangular shape with vertices $(1,1)$, $(1/2,1/2)$ and $(1,0)$. In this case, by interpolation, the estimates \eqref{eqn:LS} for such triangular shape range follow from the estimates for $I_1, I_2$ and $I_{1, \infty}$. We see that the conjecture \eqref{apq_con} satisfies both \eqref{al_1} and \eqref{al_2}. If we have the conjecture, by interpolating between \eqref{apq_con} and $I_{\infty}$ the estimates \eqref{eqn:LS} are obtained when \eqref{al_1} is dominant, and analogously the interpolation between \eqref{apq_con} and $I_{2}$ gives the estimates \eqref{eqn:LS} when \eqref{al_2} is dominant. For an endpoint $(p,q;\alpha)=(4,4;0)$, it is known that the local smoothing estimate does not hold, see \cite{Wolff96recentwork}. But, for $q > 4$, $\frac{1}{p} + \frac{3}{q} =1$ and $\alpha = \frac{1}{p} - \frac{3}{q} + \frac{1}{2}$, it is not known whether the local smoothing estimate holds or not.
The critical $L_\alpha^4 \to L^4$ estimate has been considered in Corollary \ref{cor}. We continue to study a sharp $L_\alpha^p \to L^q$ estimate when $p<q$. From Strichartz' estimate $L_{1/2}^2 \to L^6$, this conjecture follows for $q \ge 6$. Schlag and Sogge \cite{schlag1997local} first improved this to $q \ge 5$, and Tao and Vargas \cite{tao2000bilinearII} made further progress by using bilinear approach. By the sharp bilinear cone restriction estimate due to Wolff \cite{wolff2001sharp} and the results in \cite{tao2000bilinearII}, the conjecture was improved to $q \ge 14/3 = 4.\dot6$, and the $\epsilon$-loss of $\alpha$ was removed by S. Lee \cite{lee2003endpoint}. Our second result is to obtain an improved sharp local smoothing estimate.
\begin{thm} \label{thm:localSm} The estimate \eqref{eqn:LS} holds for $q \ge 30/7=4.\dot28571\dot4$ and $p,\alpha$ satisfying the conditions in \eqref{apq_con} except the endpoint $(p,q;\alpha) = (10/3, 30/7 ;1/10)$. \end{thm}
Theorem \ref{thm:localSm} will be proved through the trilinear approach too. The proof is simpler than Theorem \ref{thm:sqfEst}. We will reduce this linear estimate to a trilinear one, and the desired trilinear estimate will be obtained from interpolating between two trilinear estimates deduced from the multilinear restriction theorem \cite{bennett2006multilinear} and the $\ell^2$ decoupling theorem \cite{bourgain2015proof}.
\
Throughout this paper, we write $A \lesssim B$ or $A = O(B)$ if $A \le CB$ for some constant $C >0$ which may depend on $p$, $q$ but not on $\delta$, $R$ and $N$, and $A \sim B$ if $A\lesssim B$ and $B \lesssim A$.
The constants $C$, $C_\varepsilon$, $C_{\epsilon}$, $C_{\epsilon_1}$ and the implicit constants in $\lesssim$ and $\sim$ will be adjusted numerous times throughout the paper. For any finite set $A$, we use $\#A$ to denote its cardinality, and if $A$ is a measurable set, we use $|A|$ to denote its Lebesgue measure. If $R$ is a rectangular box or an ellipsoid and $k$ is a positive real number, we use $kR$ to denote the $k$-dilation of $R$ with center of dilation at the center of $R$.
\section{Reduction to a trilinear estimate}
In this section, we will show that the linear square function estimate is equivalent to a trilinear one. The arguments of this section are a small modification of arguments found in \cite{lee2012cone}. Specifically, we replace $L^3$ arguments by $L^p$ ones for $p \ge 2$.
For an arc $\Omega \subset S^1$ we define a sector $\Gamma^{\Omega}$ and a $\delta$-fattened sector $\Gamma_\delta^{\Omega}$ by \[
\Gamma^\Omega = \{ (\xi,\tau) \in \Gamma : \xi/|\xi| \in \Omega \}, \qquad
\Gamma_\delta^\Omega = \{ (\xi,\tau) \in \Gamma_\delta : \xi/|\xi| \in \Omega \}. \]
Let $\Omega_1, \Omega_2, \Omega_3 \subset S^1$ be arcs whose lengths are comparable to each other. We say that $\Gamma^{\Omega_1}, \Gamma^{\Omega_2}, \Gamma^{\Omega_3}$ are \textit{$\nu$-transverse} if for any unit normal vector $n_i$ to $\Gamma^{\Omega_i}$, $i=1,2,3$, the parallelepiped formed by $n_1, n_2, n_3$ has volume $\ge \nu$, see Figure \ref{fig:transversal}. A key geometric property of the cone $\Gamma$ is that $\Gamma^{\Omega_1}, \Gamma^{\Omega_2}, \Gamma^{\Omega_3}$ are $\nu$-transverse if and only if $\Omega_1, \Omega_2, \Omega_3$ are mutually separated by a distance $\gtrsim \nu^{1/3}$, see \cite{lee2012cone}.
\begin{figure}\label{fig:transversal}
\end{figure}
Let us use the notation $\mathcal{SQ}(p \times p \times p \rightarrow p;\alpha)$ if one has the trilinear square function estimate \[
\Big\| \Big( \prod_{i=1}^{3} |f_i| \Big)^{1/3} \Big\|_p
\le C_{\nu,\epsilon} \delta^{-\alpha-\epsilon} \Big( \prod_{i=1}^{3} \|S_\delta f_i \|_p \Big)^{1/3} \] for all $\epsilon>0$ and all $f_i$ with $\supp \hat f_i \subset \Gamma_\delta^{\Omega_i}$, where $\Omega_1, \Omega_2, \Omega_3$ are any arcs such that their lengths are comparable to each other, and $\Gamma^{\Omega_1}, \Gamma^{\Omega_2}, \Gamma^{\Omega_3}$ are $\nu$-transverse.
It is easy to see that $\mathcal{SQ}(p \rightarrow p;\alpha)$ implies $\mathcal{SQ}(p \times p \times p \rightarrow p;\alpha)$ by H\"older's inequality. We will show that the converse is true. Let \( 1 > \gamma_1 > \gamma_2 > 0 \) be small positive numbers. We define \( \mathbf \Omega(\gamma) \) to be a family of $O(\gamma^{-1})$ arcs of length $\gamma$ covering the unit circle with finite overlap. We take a Schwartz function $\Xi_\Omega$ whose Fourier transform $\widehat \Xi_\Omega$ is a bump function supported on a neighborhood of $\Gamma_\delta^\Omega$. The following is due to S. Lee and Vargas \cite{lee2012cone}*{equation (23)}.
\begin{lem}[Lee--Vargas \cite{lee2012cone}*{equation (23)}] \label{lem:LinTriLcompare} Suppose that $f$ has Fourier support in $\Gamma_\delta$ and let $0 < \gamma_2 < \gamma_1 < 1$. Then for any $x \in \mathbb R^3$, \begin{align}
| f(x) | &\lesssim \max_{\Omega \in \mathbf
\Omega(\gamma_1)}| f_{\Omega}(x) | + \gamma_1^{-1}
\max_{\Omega \in \mathbf \Omega(\gamma_2)} | f_{\Omega}(x)| + \gamma_2^{-50} \max_{\substack{\Omega_1,\Omega_2,\Omega_3 \in \mathbf \Omega(\gamma_2): \\ \dist(\Omega_i, \Omega_j) \ge \gamma_2,\, i
\neq j}} \Big( \prod_{i=1}^{3} |f_{\Omega_i}(x)| \Big)^{1/3} \end{align} where $f_\Omega = f \ast \Xi_\Omega$. \end{lem}
To obtain the above lemma, S. Lee and Vargas adapted the arguments of Bourgain and Guth \cite{bourgain2011bounds} who made progress on the restriction conjecture by using a multilinear approach.
Using Lemma \ref{lem:LinTriLcompare} we can establish the following relation between the linear and trilinear square function estimates.
\begin{prop} \label{prop:MSQmeanSQ} Let $p \ge 2$ and $\alpha \ge 0$. Suppose that $\mathcal{SQ}(p\times p\times p \rightarrow p;\alpha)$ holds. Then $\mathcal{SQ}(p \rightarrow p;\alpha)$ is valid. \end{prop}
\begin{proof} Let $\epsilon>0$ be given. We assume that $\beta \ge 0$ is the best exponent for which \begin{equation} \label{asshy}
\| f \|_p \le C \delta^{-\beta-\epsilon} \| S_\delta f \|_p \end{equation} holds for all $f$ with $\supp \hat f \subset \Gamma_\delta$, i.e., \[
\beta = \inf_{\delta > 0} \Big( \log_{1/\delta} \sup_{f: \supp \hat f \subset \Gamma_\delta} \frac{ \| f \|_p } {\| S_\delta f \|_p} \Big) - \epsilon. \]
It suffices to show that for any small $0<\epsilon_1<1$, \begin{equation} \label{expRel} \beta \le \alpha+ O(\epsilon_1)+\log_{1/\delta} C_{\epsilon, \epsilon_1}, \end{equation} since if we choose a sufficiently small $\epsilon_1$ then $O(\epsilon_1)$ is bounded by $\epsilon$, which can be absorbed in an $\epsilon$-loss in the estimate $\mathcal{SQ}(p \to p;\alpha)$.
The dependence on $\epsilon$ and $\epsilon_1$ of the constant $C_{\epsilon, \epsilon_1}$ in the above inequality comes from employing $\mathcal{SQ}(p \times p \times p \to p; \alpha)$. Especially $\epsilon_1$ is related to the transversality of trilinear estimates below.
We may assume that $\delta>0$ is sufficiently small, say $0< \delta \le \delta_0$, because the desired estimate is trivially obtained, otherwise, where $\delta_0$ is a small parameter to be fixed later in the proof. Let \( 1 > \gamma_1 > \gamma_2\ge \delta_0^{\epsilon_1/2} \) be dyadic multiples of $\delta^{1/2}$, the value of which is to be fixed later in the argument. By Lemma \ref{lem:LinTriLcompare} and the embedding $\ell^{p} \subset \ell^{\infty}$, \begin{equation} \label{LpTricom} \begin{split}
\| f \|_p^p &\lesssim \sum_{\Omega_1 \in \mathbf
\Omega(\gamma_1)} \| f_{\Omega_1} \|_p^p + \gamma_1^{-p}
\sum_{\Omega_2 \in \mathbf \Omega(\gamma_2)} \| f_{\Omega_2} \|_p^p \\ &\qquad\qquad + \gamma_2^{-50p} \sum_{\substack{\Omega_1,\Omega_2,\Omega_3 \in \mathbf \Omega(\gamma_2): \\ \dist(\Omega_i, \Omega_j) \ge \gamma_2,\, i
\neq j}} \Big\| \Big( \prod_{i=1}^{3} |f_{\Omega_i}| \Big)^{1/3} \Big\|_p^p, \end{split} \end{equation} where $\Omega_j$ is taken such that if $\theta$ intersects the interior of $\Omega_j$ then $\theta \subset \Omega_j$ for $j=1,2$.
Consider the first and second summation in the right-hand side of \eqref{LpTricom}. For convenience we denote by $\Omega = \Omega_j$ and $\gamma =\gamma_j$. Using Lorentz rescaling we will show \begin{equation} \label{ppsc}
\| f_\Omega \|_p \le C_\epsilon (\delta / \gamma^2)^{-\beta-\epsilon} \| S_\delta f_{\Omega} \|_p. \end{equation} By rotating the unit circle we may assume that $\Omega$ is centered at $(1,0)$. Let $T : \mathbb R^3 \to \mathbb R^3$ be a linear transformation so that \[ T(e_1,1) = (e_1,1),\quad T(-e_1,1) = \gamma^{2}(-e_1,1),\quad T(e_2,0) = \gamma(e_2,0) \]
where $\{e_1, e_2\}$ is a standard basis in $\mathbb R^2$. Then $\hat f_\Omega \circ T$ is supported in $\Gamma_{\delta/\gamma^2}$. From the equation $\widehat{f_\Omega \circ T^{-t}} = |\det T| \hat f_\Omega \circ T$, it follows that $\widehat{f_\Omega \circ T^{-t}}$ has support in $\Gamma_{\delta/\gamma^2}$ where $T^{-t}$ is the inverse transpose of $T$. Since $\gamma \ge \delta^{1/2}$, by \eqref{asshy} it follows that \begin{equation} \label{bsc}
\|f_\Omega \circ T^{-t}\|_p \lesssim (\delta/\gamma^2)^{-\beta-\epsilon} \|S_{\delta/\gamma^2} (f_\Omega \circ T^{-t}) \|_p. \end{equation} By definition, \[
S_{\delta/\gamma^2} (f_\Omega \circ T^{-t}) = \Big( \sum_{\Upsilon \in \mathbf \Pi_{\delta/\gamma^2}} \big|(f_\Omega \circ T^{-t}) \ast \Xi_{\Upsilon} \big|^2 \Big)^{1/2}. \] From $\ \hat \Xi_\Upsilon \circ T^{-1}= \hat \Xi_{T(\Upsilon)}$, it follows that \(
\big((f_\Omega \circ T^{-t}) \ast \Xi_{\Upsilon} \big)\sphat =|\det T|(\hat f_\Omega \circ T) \hat \Xi_{\Upsilon,}
= |\det T|(\hat f_\Omega \hat \Xi_{T(\Upsilon)} ) \circ T. \) Thus, by taking the inverse Fourier transform, \[ (f_\Omega \circ T^{-t}) \ast \Xi_{\Upsilon} = (f_\Omega \ast \Xi_{T(\Upsilon)}) \circ T^{-t}. \] Since $f_\Omega \ast \Xi_{T(\Upsilon)}$ has Fourier support in $T(\Upsilon)$ which is a sector of size $1 \times \delta \times C\delta^{1/2}$ in $\Gamma_{\delta}$, we have \[
S_{\delta/\gamma^2} (f_\Omega \circ T^{-t}) = \Big( \sum_{\Upsilon \in \mathbf \Pi_{\delta/\gamma^2}} |(f_\Omega \ast \Xi_{T(\Upsilon)}) \circ T^{-t} |^2 \Big)^{1/2} = (S_{\delta} f_\Omega) \circ T^{-t}. \] We substitute this in \eqref{bsc} and remove $T^{-t}$ by changing variables. Then we obtain \eqref{ppsc}.
By \eqref{ppsc} we have \[
\sum_{\Omega \in \mathbf \Omega(\gamma)} \| f_{\Omega} \|_p^p \le C_\epsilon (\delta /\gamma^{2})^{-p\beta-p\epsilon} \sum_{\Omega \in \mathbf \Omega(\gamma)} \| S_\delta f_{\Omega} \|_p^p. \] Since we can decompose $f_\Omega = \sum_{\Theta \in \mathbf \Pi_\delta: \theta \subset \Omega} f \ast \Xi_\Theta$, we have that for $p \ge 2$, \begin{align*}
\sum_{\Omega \in \mathbf \Omega(\gamma)} \| S_\delta f_{\Omega} \|_p^p &= \sum_{\Omega \in \mathbf \Omega(\gamma)} \int \Big( \sum_{\Theta \in \mathbf \Pi_\delta: \theta \subset \Omega} | f \ast \Xi_\Theta|^2 \Big)^{p/2} \\
&\le \int \Big(\sum_{\Omega \in \mathbf \Omega(\gamma)} \sum_{\Theta \in \mathbf \Pi_\delta : \theta \subset \Omega} | f \ast \Xi_\Theta|^2 \Big)^{p/2} \\
&\le \|S_\delta f\|_p^p. \end{align*} Inserting this into the previous estimate, we obtain \begin{equation} \label{indP}
\sum_{\Omega \in \mathbf \Omega(\gamma)} \| f_{\Omega} \|_p^p \le C_\epsilon (\delta /\gamma^{2})^{-p\beta-p\epsilon} \|S_\delta f\|_p^p. \end{equation}
Consider the trilinear part in \eqref{LpTricom}. By applying $\mathcal{SQ}(p\times p \times p \rightarrow p; \alpha)$, \begin{equation} \label{Tripart} \sum_{\substack{\Omega_1,\Omega_2,\Omega_3 \in \mathbf \Omega(\gamma_2): \\ \dist(\Omega_i, \Omega_j) \ge \gamma_2,\, i
\neq j}} \Big\| \Big( \prod_{i=1}^{3} |f_{\Omega_i}| \Big)^{1/3} \Big\|_p^p \le C_{\epsilon, \gamma_2} \gamma_2^{-3} \delta^{-p\alpha-p\epsilon} \|S_\delta f \|_p^p. \end{equation}
We substitute \eqref{indP} and \eqref{Tripart} in \eqref{LpTricom}. Then, \[
\|f\|_p \le (C_\epsilon \gamma_1^{2(\beta+\epsilon)} \delta^{-\beta-\epsilon} + C_\epsilon \gamma_1^{-1} \gamma_{2}^{2(\beta+\epsilon)} \delta^{-\beta-\epsilon} + C_{\epsilon, \gamma_2}\gamma_2^{-60} \delta^{-\alpha-\epsilon}) \|S_\delta f \|_p. \] So, by the assumption for $\beta$, \begin{align*} \delta^{-\beta} &\le (C_\epsilon \gamma_1^{2(\beta+\epsilon)} + C_\epsilon \gamma_1^{-1} \gamma_{2}^{2(\beta+\epsilon)} )\delta^{-\beta} + C_{\epsilon,\gamma_2}\gamma_2^{-60} \delta^{-\alpha}. \end{align*} We now choose $\gamma_1, \gamma_2$ and $\delta_0$ so that $C_\epsilon\gamma_1^{2(\beta+\epsilon)} \le 1/4$, $ C_\epsilon \gamma_1^{-1} \gamma_{2}^{2(\beta+\epsilon)} \le 1/4$ and $1> \gamma_1 > \gamma_2 \ge \delta_0^{{\epsilon_1}/{2}}$. Then $\delta^{-\beta} \le C_{\epsilon,\gamma_2} \gamma_2^{-60}\delta^{-\alpha} \le C_{\epsilon, \epsilon_1} \delta^{-30\epsilon_1 - \alpha}$, which means \eqref{expRel}. \end{proof}
\section{Decoupling norms} \label{sec:decoupling} In this section, we will show that the decoupling norm for the cone essentially satisfies the reverse H\"older inequality, and apply this to the interpolation between decoupling estimates. In fact, our interpolation lemmas can be obtained by using known interpolation theorems, so our proof is an alternative one (which is actually weaker). This section is obtained by modifying the arguments for paraboloid decoupling in \cite{bourgain2015proof}*{section 3}. For further discussion for decoupling, see \cite{wolff2000local}, \cite{laba2002local}, \cite{garrigos2008improvements}, \cite{garrigos2010mixed}.
Let $f$ be a function having Fourier support in $\Gamma_\delta$. For such functions, the norm $\| \cdot \|_{p,\delta}$, $1 \le p \le \infty$ is defined by \[
\|f\|_{p,\delta} := \Big( \sum_{\Theta \in \mathbf \Pi_{\delta}} \| f_{\Theta} \|^2_{p} \Big)^{1/2}. \]
It is easy to see that if $m$ is a positive real number then $\|f\|_{p,m\delta} \le C_m \|f\|_{p,\delta}$ by Minkowski's inequality.
We first introduce a wave packet decomposition, which is a fundamental tool for studying Fourier restriction type problems. To decompose $f$ both in frequency space and in spatial space, we define standard bump functions. Let $\phi(x) := (1+|x|^2)^{-M/2}$ where $M$ is a sufficiently large exponent. Let $\psi: \mathbb R^{3}\rightarrow \mathbb R$ be a nonnegative Schwartz function such that $\psi$ is strictly positive in the unit ball $B(0,1)$, Fourier supported in a ball $B(0,1/4)$ and $\sum_{k \in \mathbb Z^3}\psi(x-k) = 1$. For an ellipsoid $E$, we define $a_E$ to be an affine map from the unit ball $B(0,1)$ to $E$. Let $\phi_E = \phi \circ a_E^{-1}$ and $\psi_E = \psi \circ a_E^{-1}$.
\begin{lem} \label{lem:wavepack} Suppose that $f$ is Fourier supported in $\Gamma_\delta$. Then there exists a decomposition \begin{equation} \label{waveDec} f(x) = \sum_{\Theta \in \mathbf \Pi_\delta}\sum_{\pi \in \mathbf P_\Theta} h_{\pi} f_{\pi}(x), \end{equation} where $\mathbf P_\Theta = \mathbf P_\Theta(f)$ is a family of separated rectangles $\pi$ of size $\delta^{-1} \times \delta^{-1/2} \times 1$ with its dual $\pi^* = \Theta$, such that the coefficients $h_\pi > 0$ have the property that \begin{equation} \label{lp_sum}
\Big( \sum_{\Theta \in \mathbf \Pi_\delta}\Big( \sum_{\pi \in \mathbf P_\Theta} |\pi| h_\pi^p \Big)^{2/p} \Big)^{1/2} \sim \|f\|_{p,\delta} \end{equation} for all $1 \le p < \infty$ and \begin{equation} \label{l_infty}
\Big( \sum_{\Theta \in \mathbf \Pi_\delta} \sup_{\pi \in \mathbf P_\Theta} h_\pi^2 \Big)^{1/2} \sim \|f\|_{\infty,\delta}, \end{equation} and the functions $f_\pi$ obey \begin{equation} \label{fourierSupp} \supp \hat f_\pi \subset 4\Theta \end{equation} and \begin{equation} \label{ess_supp}
|f_{\pi}(x)| \lesssim \phi_\pi(x). \end{equation} \end{lem}
\begin{proof} For each $\Theta \in \mathbf \Pi_\delta$, we partition $\mathbb R^3$ into the dual rectangles $\pi$ of $\Theta$. For each $\pi$, we define a coefficient $h_\pi$ and a function $f_\pi$ by \[
h_{\pi} = \frac{1}{|\pi|}\int |f_\Theta(x)| \psi_\pi(x) dx \qquad \text{and} \qquad
f_\pi(x) = h_\pi^{-1} \psi_\pi(x) f_\Theta (x). \]
Then, \eqref{fourierSupp} immediately follows, and some direct calculating gives \eqref{waveDec}. By Bernstein's inequality, \begin{equation*}
|\psi_{\pi}(x) f_\Theta (x)| \lesssim h_\pi, \end{equation*}
so we have $|f_\pi(x)| \lesssim |\psi_\pi(x)|$. This implies \eqref{ess_supp}.
By H\"older's inequality we have $h_\pi \lesssim \Big( \frac{1}{|\pi|}\int |f_\Theta(x)|^p \psi_\pi(x) dx \Big)^{1/p}$, and using Bernstein's lemma we can see that $\Big( \frac{1}{|\pi|}\int |f_\Theta(x)|^p \psi_\pi(x) dx \Big)^{1/p} \lesssim h_\pi$. So, we have \[
\sum_{\pi \in \mathbf P_\Theta} |\pi| h_\pi^p \sim \sum_{\pi \in \mathbf P_\Theta} \int |f_\Theta|^p \psi_\pi
= \|f_\Theta\|_p^p, \]
from which \eqref{lp_sum} follows. Similarly, we have that $h_\pi \sim \sup_{x \in \pi} |f_\Theta(x)|$ and that $\sup_{\Theta \in \mathbf P_\Theta} h_\pi \sim \|f_\Theta\|_\infty$. Thus \eqref{l_infty} follows.
\end{proof}
Now we study the reverse H\"older inequality for the decoupling norm. We say that $f$ is a \textit{balanced function} if $f$ is a function of the form \eqref{waveDec} with $h_\pi=1$ such that $f$ satisfies \eqref{fourierSupp}, \eqref{ess_supp} and a property that for any $\Theta, \Theta' \in \mathbf \Pi_\delta$, the nonempty $\mathbf P_{\Theta}(f), \mathbf P_{\Theta'}(f)$ have comparable cardinality. These kinds of functions were first explicitly used by Wolff \cite{wolff2000local}.
\begin{lem} \label{lem:revHol} Suppose that $1 \le p,q,r \le \infty$ and that for some $\theta \in (0,1)$, \[ \frac{1}{r} = \frac{1-\theta}{q} + \frac{\theta}{p}. \] Then \[
\| f \|_{r,\delta} \sim \|f\|_{q, \delta}^{1-\theta} \|f\|_{p, \delta}^{\theta}, \] for all balanced function $f$. \end{lem}
\begin{proof} Since $f$ is a balanced function, there is a number $\kappa>0$ such that every nonempty $\mathbf P_\Theta(f)$ has cardinality comparable to $\kappa$. Let $\nu$ be the number of nonempty $\mathbf P_\Theta(f)$. Then by \eqref{lp_sum} and \eqref{l_infty}, one has \[
\|f\|_{r,\delta} \sim \nu^{1/2} \kappa^{1/r} |\pi|^{1/r}
= \nu^{\frac{1-\theta}{2}} \kappa^{\frac{1-\theta}{q}} |\pi|^{\frac{1-\theta}{q}} \nu^{\frac{\theta}{2}} \kappa^{\frac{\theta}{p}} |\pi|^{\frac{\theta}{p}} \sim \|f\|_{q, \delta}^{1-\theta} \|f\|_{p, \delta}^{\theta}. \] \end{proof}
As an application we have the following interpolation lemma.
\begin{lem} \label{lem:interp} Let $2 \le p_1, p_2, q_1, q_2 \le \infty$. Assume that \begin{equation} \label{givenEstLL}
\| f \|_{q_1} \le A_1 \| f \|_{p_1,\delta},\qquad
\| f \|_{q_2} \le A_2 \| f \|_{p_2,\delta} \end{equation} for all $f$ with $\supp \hat f \subset \Gamma_\delta$. Suppose that for some $\theta \in (0,1)$, \[ \frac{1}{q} = \frac{1-\theta}{q_1} + \frac{\theta}{q_2}, \qquad \frac{1}{p} = \frac{1-\theta}{p_1} + \frac{\theta}{p_2}, \] and $2 \le p \le q \le \infty$. Then \begin{equation} \label{ineq:intp}
\| f \|_{q} \lesssim \delta^{-\varepsilon}A_1^{1-\theta} A_2^{\theta} \| f \|_{p,\delta} \end{equation} for all $f$ with $\supp \hat f \subset \Gamma_\delta$ and all $\varepsilon >0$. \end{lem}
\begin{proof} For localization we decompose $f = \sum_{k \in \delta^{-1} \mathbb Z^3} \psi_k f$ where $\psi_k := \psi(\delta(x-k))$. Then, \[
\|f\|_q^q \le \sum_{k' \in \delta^{-1} \mathbb Z^3} \Big\| \sum_{k \in \delta^{-1} \mathbb Z^3} \psi_k f \Big\|_{L^q(B(k',2\delta^{-1}))}^q. \] Since $\psi_k$ has rapid decay outside $B(k,\delta^{-1-\varepsilon})$, we have that if $x \in B(k',2\delta^{-1})$ then \[
\Big|\sum_{k \in \delta^{-1}\mathbb Z^3 \setminus B(k',2\delta^{-1-\varepsilon})} \psi_k(x) \Big| \le C_K \delta^{K} \]
for all $K>0$. Using this and a rough estimate $\|f\|_q \lesssim \delta^{-C} \|f\|_{p,\delta}$, we have that for any $\varepsilon>0$ and $K>0$, \[
\|f\|_q^q \le \sum_{k'} \Big\| \sum_{k \sim k'} \psi_k f \Big\|_{L^q(B(k',2\delta^{-1}))}^q + C_K\delta^{K}\|f\|_{p,\delta}^q, \]
where $k\sim k'$ means that $k \in B(k',2\delta^{-1-\varepsilon}) \cap \delta^{-1}\mathbb Z^{3}$. Since the number of $k \in \delta^{-1}\mathbb Z^3$ contained in $B(k',2\delta^{-1-\varepsilon})$ is $O(\delta^{3\varepsilon})$, we have \begin{align*}
\|f\|_q^q &\lesssim \delta^{-3\varepsilon q} \sum_{k'} \sum_{k \sim k'} \|\psi_k f \|_{L^q(B(k',2\delta^{-1})}^q + C_K\delta^{K} \|f\|_{p,\delta}^q\\
&\lesssim \delta^{-3\varepsilon q} \sum_{k'} \sum_{k \sim k'} \|\psi_k f \|_q^q + C_K\delta^{K} \|f\|_{p,\delta}^q \\
&\lesssim \delta^{-3\varepsilon q-3\varepsilon} \sum_{k} \|\psi_k f \|_q^q + C_K\delta^{K}\|f\|_{p,\delta}^q. \end{align*} Since $p \le q$, we have that for any $\varepsilon >0$ and any $K > 0$, \[
\|f\|_q \lesssim \delta^{-C\varepsilon}\Big( \sum_{k} \| \psi_k f \|_q^{p} \Big)^{1/p} + C_K\delta^{K}\|f\|_{p,\delta}. \]
On the other hands, by Minkowski's inequality and $p \ge 2$ it follows that \[
\Big( \sum_{k} \| \psi_k f \|_{p,2\delta}^{p} \Big)^{1/p} \le \|f\|_{p,2\delta} \lesssim \|f\|_{p,\delta}. \] Thus, by the above two estimates the proof of \eqref{ineq:intp} is reduced to showing \begin{equation*}
\| \psi_k f \|_q \lesssim \delta^{-\varepsilon} A_1^{1-\theta} A_2^{\theta} \| \psi_k f \|_{p,2\delta}. \end{equation*}
By translation invariance it is enough to consider $\psi_0 f$. Let $g := \psi_0 f$. By normalization we may assume that $\|g\|_{p,2\delta} = 1$. Then it is reduced to showing \begin{equation} \label{redForm}
\|g\|_q \lesssim \delta^{-\varepsilon} A_1^{1-\theta} A_2^{\theta}. \end{equation} Since $\psi_0$ has fast decay outside $B(0,C\delta^{-1})$, we have \(
\|g\|_q \le \|g\|_{L^q(B(0,\delta^{-1-\varepsilon}))} + C_K\delta^{K} \) for all $\varepsilon>0$ and $K>0$. Since $\psi_0$ has Fourier support in $B(0,\delta/2)$, $\widehat g$ is supported in $\Gamma_{2\delta}$. By Lemma \ref{lem:wavepack}, it is decomposed into \[ g(x) = \sum_{\Theta \in \mathbf \Pi_{2\delta}}\sum_{\pi \in \mathbf P_\Theta} h_{\pi} g_{\pi}(x). \] We first remove some minor $\pi$'s. By \eqref{ess_supp}, we can eliminate $\pi$ that is disjoint from $B(0, C\delta^{-1-\varepsilon})$. Let $\mathring{\mathbf P}$ be the collection of $\pi$ intersecting $B(0, C\delta^{-1-\varepsilon})$. Then $\# \mathring{\mathbf P} \lesssim \delta^{-2-3\varepsilon}$. The rectangles $\pi$ with $h_\pi = O(\delta^{500})$ can be also eliminated, since \[
\Big\| \sum_{\pi \in \mathring{\mathbf P} : 0< h_\pi \lesssim \delta^{500}} h_\pi g_\pi \Big\|_q
\lesssim \delta^{500} |\pi| \# \mathring{\mathbf P} \lesssim \delta^{400}. \]
We group the rectangles $\pi$ by value of coefficients $h_\pi$. Since $\|g\|_{p,2\delta} =1$, from \eqref{lp_sum} we can see that $h_\pi \lesssim 1$. For any dyadic number $\delta^{500} \lesssim h \lesssim 1$ we define \( \mathring{\mathbf P}_h := \{ \pi \in \mathring{\mathbf P}: h \le h_\pi < 2h \}. \) It is classified into $\mathring{\mathbf P}_{h, \Theta} := \mathring{\mathbf P}_h \cap \mathbf P_\Theta$, and let \[ \mathring{\mathbf P}_{h}^k := \bigcup_{k \le \# \mathring{\mathbf P}_{h, \Theta} < 2k} \mathring{\mathbf P}_{h, \Theta} \] for dyadic numbers $1 \le k \lesssim \delta^{-2}$. Since there are $O(\log \delta^{-1})$ dyadic numbers $\delta^{500} \lesssim h \lesssim 1$ and $1 \le k \lesssim \delta^{-2}$, by pigeonholing there exist $h$ and $k$ so that \[
\Big\|
\sum_{\delta^{500} \le h \lesssim 1} h \sum_{1 \le k \lesssim \delta^{-2}} \sum_{\pi \in \mathring{\mathbf P}_h^k} g_\pi \Big\|_q \lesssim (\log \delta^{-1})^2 h \Big\|\sum_{\pi \in \mathring{\mathbf P}_h^k} g_\pi \Big\|_q . \] Let $\tilde g := \sum_{\pi \in \mathring{\mathbf P}_h^k} g_\pi $. Then from these estimates, one has \[
\|g\|_q \lesssim \delta^{-\varepsilon} h \|\tilde g\|_q +\delta^{400}. \] Since $\tilde g$ is a balanced function, from H\"oler's inequality, \eqref{givenEstLL} and Lemma \ref{lem:revHol} it follows that \[
\|\tilde g\|_q \le \|\tilde g\|_{q_1}^{1-\theta} \|\tilde g\|_{q_2}^{\theta}
\le A_1^{1-\theta} A_2^{\theta}\|\tilde g\|_{p_1,2\delta}^{1-\theta}\|\tilde g\|_{p_2,2\delta}^{\theta} \lesssim A_1^{1-\theta} A_2^{\theta} \|\tilde g\|_{p,2\delta}, \] and by \eqref{lp_sum}, \[
h\| \tilde g \|_{p,2\delta} \lesssim \| g \|_{p,2\delta}. \] Therefore, by combining these estimates we obtain \eqref{redForm}. \end{proof}
\begin{remark} \label{rem:Alt_inter} By using known interpolation theorems we can obtain Lemma \ref{lem:interp} without $\varepsilon$-losses. Indeed, since $f$ in Lemma \ref{lem:interp} has the Fourier support condition, we are not able to apply interpolation theorems directly. To avoid this, we define a linear operator $T$ by \[ T \mathbf f = \sum_{j \in J} f_j \ast \Xi_{\Theta_j} \] for $\mathbf f = \{ f_j\}_{j \in J}$, where $J$ is an index set of $\mathbf\Pi_\delta$. Then the inequality \(
\|f\|_q \le A \|f\|_{p,\delta} \) in Lemma \ref{lem:interp} is equivalent to \(
\| T \mathbf f \|_q \le A \| \mathbf f\|_{\ell^2(L^p)}, \) where $\ell^2(L^p)$ is the space of $L^p$-valued $\ell^2$-sequences. Since the functions $\{f_j\}_{j \in J}$ are not subject to the Fourier support condition, by applying the complex interpolation theorem we get Lemma \ref{lem:interp} without $\varepsilon$-losses. \end{remark}
\
To prove Theorem \ref{thm:sqfEst} we need a trilinear interpolation lemma. Before stating the lemma let us define a notation $\underline\prod$, which will be repeatedly used in the remaining parts of this paper. For $A_1, A_2, A_3 \in \mathbb C$, let $\underline \Pi A_i$ denote the geometric mean of their absolute values; that is, \[
\underline\prod A_i := \Big( \prod_{i=1}^{3} |A_i| \Big)^{1/3}. \] From simple calculations it is easy to see the followings. If $A$, $A_i$ and $B_i$ are complex numbers for $i=1,2,3$, then \begin{align*}
\underline\prod A &= |A|, \\ \underline\prod CA_i &= C \underline\prod A_i \qquad \text{for $C \ge 0$},\\ \underline\prod (A_i B_i) &= \underline\prod A_i \underline\prod B_i, \\ \underline\prod A_i^{\alpha} &= \Big( \underline\prod A_i \Big)^{\alpha} \qquad \text{for $\alpha \in \mathbb R$}. \end{align*}
Also, if all $A_{i,\Delta} \in \mathbb C$ and $f_i \in L^p$, then by H\"older's inequality it follows that for $1 \le p \le \infty$, \begin{align} \label{Holder1}
\Big( \sum_{\Delta} \underline\prod A_{i,\Delta}^{p} \Big)^{1/p} &\le \underline\prod \Big( \sum_{\Delta} |A_{i,\Delta}|^p \Big)^{1/p}, \\ \label{Holder2}
\Big\| \underline\prod f_i \Big\|_p &\le \underline\prod \|f_i\|_p. \end{align}
Now we state our trilinear interpolation lemma.
\begin{lem} \label{lem:MulInterpolation} Let $2 \le p_1,p_2,q_1,q_2 \le \infty$. Assume that \begin{equation} \label{givenEst}
\Big\| \underline\prod f_i \Big\|_{q_1} \le A_1 \underline\prod \| f_i \|_{p_1,\delta},\qquad
\Big\|\underline\prod f_i \Big\|_{q_2} \le A_2 \underline\prod \| f_i \|_{p_2,\delta} \end{equation} for all $f_i$, $i=1,2,3,$ with $\hat f_i \subset \Gamma_\delta$. Suppose that for some $\theta \in (0,1)$, \[ \frac{1}{q} = \frac{1-\theta}{q_1} + \frac{\theta}{q_2}, \qquad \frac{1}{p} = \frac{1-\theta}{p_1} + \frac{\theta}{p_2} \] and $2 \le p \le q \le \infty$. Then \[
\Big\| \underline\prod f_i \Big\|_{q} \lesssim \delta^{-\varepsilon} A_1^{1-\theta} A_2^{\theta} \underline\prod \| f_i \|_{p,\delta} \] for all $f_i$, $i=1,2,3,$ with $\hat f_i \subset \Gamma_\delta$ and all $\varepsilon >0$. \end{lem}
\begin{proof} The proof is similar to Lemma \ref{lem:interp}.
We decompose $\underline \prod f_i = \sum_{k \in \delta^{-1} \mathbb Z^3} \psi_k \underline \prod f_i$ where $\psi_k := \psi(\delta(x-k))$. We can reduce it in an analogous manner to the proof of Lemma \ref{lem:interp}. By localization, it suffices to show that \begin{equation} \label{mulinterpol}
\Big\| \underline\prod g_i \Big\|_q \lesssim \delta^{-\varepsilon} A_1^{1-\theta} A_2^{\theta} \end{equation}
for all $g_i := \psi_0 f_i$ with $\|g_i\|_{p,2\delta} = 1$. Some minor portions can be removed as in the proof of Lemma \ref{lem:interp}. Since $\psi_0$ decays rapidly outside $B(0,C\delta^{-1})$, we have \(
\| \underline\prod g_i\|_q \le \| \underline\prod g_i\|_{L^q(B(0,\delta^{-1-\varepsilon}))} + C_K\delta^{K} \) for all $\varepsilon>0$ and $K>0$. Since $g_i$ is Fourier supported in $\Gamma_{2\delta}$, by Lemma \ref{lem:wavepack}, \[ g_i(x) = \sum_{\Theta_i \in \mathbf \Pi_{\delta}} \sum_{\pi_i \in \mathbf P_{\Theta_i}} h_{\pi_i} g_{\pi_i}(x). \] By \eqref{ess_supp}, we can eliminate $\pi_i$ that is disjoint from $B(0, C\delta^{-1-\varepsilon})$, so we can restrict $\mathbf P_i$ to the collection $\mathring{\mathbf P}_i$ of $\pi_i$ intersecting $B(0, C\delta^{-1-\varepsilon})$. We can also remove $\pi_i$ with $0< h_{\pi_i} \lesssim \delta^{500}$.
For dyadic $\delta^{500} \lesssim h_i \lesssim 1$, we define \( \mathring{\mathbf P}_{h_i}:= \{ \pi \in \mathring{\mathbf P}_i: h_i \le h_\pi < 2h_i \}. \) Let \( \mathring{\mathbf P}_{\Theta_i}(h_i) := \mathring{\mathbf P}_{h_i} \cap \mathbf P_{\Theta_i} \), and for any dyadic number $1 \le k_i \lesssim \delta^{-2}$ we define \[ \mathring{\mathbf P}_{i}(h_i,k_i) = \bigcup_{k_i \le \# \mathring{\mathbf P}_{\Theta_i}(h_i) < 2k_i} \mathring{\mathbf P}_{\Theta_i}(h_i). \] Then, we have \[
\Big\| \underline\prod g_i \Big\|_q \lesssim \Big\| \underline\prod \Big( \sum_{\delta^{500} \lesssim h_i \lesssim 1} h_i \sum_{1 \lesssim k_i \lesssim \delta^{-2}} \sum_{\pi \in \mathring{\mathbf P}_i(h_i,k_i)} g_{\pi_i} \Big) \Big\|_q + \delta^{100}. \] We write as \[ \prod_{i=1}^{3} \Big( \sum_{h_i} h_i \sum_{k_i} \sum_{\pi \in \mathring{\mathbf P}_i(h_i,k_i)} g_{\pi_i} \Big) = \sum_{h_1,h_2,h_3} \sum_{k_1,k_2,k_3} \prod_{i=1}^{3} \Big( h_i \sum_{\pi \in \mathring{\mathbf P}_i(h_i,k_i)} g_{\pi_i} \Big). \]
By dyadic pigeonholing, there exist dyadic numbers $h_i$ and $k_i$, $i=1,2,3,$ so that \[
\Big\|
\underline \prod \Big( \sum_{\delta^{500} \lesssim h_i \lesssim 1} h_i \sum_{1 \lesssim k_i \lesssim \delta^{-2}} \sum_{\pi \in \mathring{\mathbf P}_i(h_i,k_i)} g_{\pi_i} \Big) \Big\|_q \lesssim (\log \delta^{-1})^2 \Big( \underline\prod h_i \Big) \Big\|\underline\prod \Big( \sum_{\pi \in \mathring{\mathbf P}_i(h_i,k_i)} g_{\pi_i} \Big) \Big\|_q . \] Let $\tilde g_i := \sum_{\pi \in \mathring{\mathbf P}_i(h_i,k_i)} g_{\pi_i} $. Then from these estimates we have \[
\Big\| \underline\prod g_i \Big\|_q \lesssim \delta^{-\varepsilon} \Big( \underline\prod h_i \Big) \Big\| \underline\prod \tilde g_i \Big\|_q +\delta^{100}. \] Since $\tilde g_i$ are balanced functions, from H\"oler's inequality, \eqref{givenEst} and Lemma \ref{lem:revHol} it follows that \[
\Big\|\underline\prod \tilde g_i \Big\|_q \le \Big\|\underline\prod \tilde g_i \Big\|_{q_1}^{1-\theta} \Big\|\underline\prod \tilde g_i \Big\|_{q_2}^{\theta}
\le A_1^{1-\theta} A_2^{\theta}\underline\prod \|\tilde g_i\|_{p_1,2\delta}^{1-\theta} \underline\prod \|g_i\|_{p_2,2\delta}^{\theta} \lesssim A_1^{1-\theta} A_2^{\theta} \delta^{-\varepsilon} \underline\prod \| \tilde g_i\|_{p,2\delta} \] and by \eqref{lp_sum}, \[
h_i \| \tilde g_i \|_{p,2\delta} \lesssim \| g_i \|_{p,2\delta}. \] Therefore, these estimates yield \eqref{mulinterpol}. \end{proof} \begin{remark} By using analogous methods to Remark \ref{rem:Alt_inter}, we can obtain Lemma \ref{lem:MulInterpolation} without $\epsilon$-losses by known multilinear interpolation theorems, see, e.g., \cite{bergh1976interpolation}. \end{remark}
\section{Proof of Theorem \ref{thm:sqfEst}.}
This section is devoted to the proof of $\mathcal{SQ}(4 \to 4; 1/16)$. By Proposition \ref{prop:MSQmeanSQ} this follows from the trilinear square function estimate $\mathcal{SQ}(4 \times 4 \times 4 \to 4;1/16)$. To prove this we will utilize the following two theorems. The first one is the multilinear restriction theorem due to Bennet, Carbery and Tao \cite{bennett2006multilinear}.
\begin{thm}[Bennet--Carbery--Tao \cite{bennett2006multilinear}] \label{thm:MRT} Let $f_i$, $i=1,2,3,$ be supported in $\Gamma^{\Omega_i}$. Suppose that $\Gamma^{\Omega_1}, \Gamma^{\Omega_2}, \Gamma^{\Omega_3}$ are $\nu$-transverse.
If $R \gg \nu^{-1}$ then for any $\epsilon >0$ and any ball $Q_R$ of radius $R$, \begin{equation} \label{mrt}
\Big\| \underline\prod \widehat{f_jd\sigma_j} \Big\|_{L^3(Q_R)} \le C_\epsilon R^{\epsilon} \underline\prod \| f_j \|_{2}, \end{equation} where $d\sigma_j$ is the induced Lebesgue measure on $\Gamma^{\Omega_j}$.
\end{thm}
Note that if the restriction operator $\mathfrak R$ is defined as the restriction $\mathfrak R f =\hat f \big|_{\Gamma}$ to $\Gamma$ of the Fourier transform $\hat f$, then the \textit{extension operator} $\widehat{f d\sigma}$ is its adjoint operator $\mathfrak R^*f$.
\
The second one is the $\ell^2$ decoupling theorem due to Bourgain and Demeter \cite{bourgain2015proof}.
\begin{thm}[Bourgain--Demeter \cite{bourgain2015proof}]\label{thm:Decoupling} Suppose that the Fourier support of $f$ is contained in $\Gamma_\delta$. Then for any $\epsilon >0$, \begin{equation} \label{FrDecp}
\| f \|_{6} \le C_\epsilon \delta^{-\epsilon}\Big(\sum_{\Theta \in \mathbf \Pi_\delta} \| f_\Theta \|_6^2 \Big)^{1/2}. \end{equation} \end{thm}
To deal with local estimates we define local norms as follows: \[
\|f\|_{L^p(\psi_B)} := \|f \psi_B \|_p. \] and for any functions $f$ with $\supp \hat f \subset \Gamma_\delta$, \[
\|f\|_{p,\delta,B} := \Big( \sum_{\Theta \in \mathbf \Pi_{\delta}} \| f_{\Theta} \|^2_{L^p(\psi_{B})} \Big)^{1/2}. \] Note that if $B$ is a ball of radius $\ge 2/\sqrt\delta$ then for $p\ge 2$, \begin{equation} \label{locDecNorm}
\| f \psi_{B} \|_{p,\delta} \lesssim \| f \|_{p,\delta,B}. \end{equation} Indeed, we decompose the Fourier transform of $(f\psi_B)\ast \Xi_\Theta$ as follows: \[ (\hat f \ast \hat \psi_{B}) \hat \Xi_\Theta = ((\hat f \hat \Xi_{C\Theta} ) \ast \hat \psi_{B}) \hat \Xi_\Theta + ((\hat f (1- \hat \Xi_{C\Theta}) ) \ast \hat \psi_{B}) \hat \Xi_\Theta. \] Consider the last term of the above equation. We write as \[ ((\hat f (1- \hat \Xi_{C\Theta}) ) \ast \hat \psi_{B})(x) \hat \Xi_\Theta(x) = \int \hat f(y) (1- \hat \Xi_{C\Theta})(y) \hat \psi_{B}(x-y) \hat \Xi_\Theta(x) dy. \]
For $y \in \Gamma_\delta \setminus C\Theta$ and $x \in \Theta$ we have $|x-y| \ge \sqrt\delta$, and $\hat \psi_{B}$ is supported in a ball of radius $\le \sqrt\delta/2$ with center 0. By considering supports we can see that the above equation is zero. Thus, by Fourier inversion, \[ (f \psi_{B}) \ast \Xi_\Theta = ((f \ast \Xi_{C\Theta}) \psi_{B}) \ast \Xi_\Theta. \] By this equation, Young's inequality and the triangle inequality, we have \begin{align*}
\| (f \psi_{B}) \ast \Xi_\Theta \|_p
\lesssim \|(f \ast \Xi_{C\Theta}) \psi_{B} \|_p
\lesssim \sum_{\Theta' \subset C\Theta}\|(f \ast \Xi_{\Theta'}) \psi_{B} \|_p. \end{align*} From this we can obtain \eqref{locDecNorm}.
\subsection{} We will deduce a trilinear decoupling estimate from Theorem \ref{thm:MRT} and Theorem \ref{thm:Decoupling}. By combining Theorem \ref{thm:MRT} with a localization argument and a slicing argument, it follows that \[
\Big\| \underline \prod f_i \Big\|_{3}
\le C_\epsilon \delta^{1/2 -\epsilon} \underline\prod \|f_i\|_{2} \] for all $f_i$ with $\supp \hat f_i \subset \Gamma_\delta^{\Omega_i}$, (for the details, see \cite{bennett2006multilinear}, \cite{lee2012cone}, \cite{tao1998bilinear}). By orthogonality, if $f$ is a function with $\supp \hat f \subset \Gamma_\delta$, then \[
\| f \|_2 \sim \Big( \sum_{\Theta \in \mathbf \Pi_{\delta}} \| f_{\Theta} \|_2^2 \Big)^{1/2} = \| f\|_{2,\delta}. \] Thus, we have \begin{equation*}
\Big\| \underline \prod f_i \Big\|_{3}
\le C_\epsilon \delta^{1/2-\epsilon} \underline\prod \|f_i\|_{2,\delta}. \end{equation*} On the other hand, from \eqref{FrDecp} and H\"older's inequality we have \[
\Big\| \underline \prod f_i \Big\|_{6}
\le C_\epsilon \delta^{-\epsilon} \underline\prod \|f_i\|_{6,\delta}. \]
We interpolate these two estimates by Lemma \ref{lem:MulInterpolation}. Then, \[
\Big\| \underline \prod f_i \Big\|_{4}
\le C_\epsilon \delta^{1/4-\epsilon}\underline\prod \|f_i\|_{3,\delta}. \]
By H\"older's inequality one has $\|f_i\|_{3,\delta} \le \|f_i\|_{4,\delta}^{2/3} \|f_i\|_{2,\delta}^{1/3}$. Inserting this into the above we obtain
\begin{equation} \label{mainEq}
\Big\| \underline \prod f_i \Big\|_{4}
\le C_\epsilon \delta^{1/4-\epsilon} \Big(\underline\prod \|f_i\|_{4,\delta}\Big)^{2/3} \Big( \underline\prod \|f_i\|_{2,\delta} \Big)^{1/3} . \end{equation}
\subsection{} Set $R=\delta^{-1}$. We take a covering $\{\Delta\}$ of $\mathbb R^3$ by finitely overlapping $2R^{1/2}$-balls. We apply the estimate \eqref{mainEq} to $f_i\psi_{\Delta}$. Since the Fourier support of $f_i \psi_\Delta$ is in $\Gamma_{2\sqrt\delta}$, by \eqref{mainEq} and \eqref{locDecNorm} we obtain \[
\Big\| \underline \prod f_i \Big\|_{L^4(\Delta)}
\le C_\epsilon R^{-1/8 +\epsilon/2} \Big(\underline\prod \|f_i\|_{4,\sqrt\delta,\Delta}\Big)^{2/3} \Big( \underline\prod \|f_i\|_{2,\sqrt\delta,\Delta} \Big)^{1/3}. \] After taking the 4th power in the above, we sum over $\Delta$, and apply H\"older's inequality. Then, \[
\sum_{\Delta} \Big\| \underline \prod f_i \Big\|_{L^4(\Delta)}^4
\le C_\epsilon R^{-1/2+2\epsilon} \Big( \sum_{\Delta} \underline\prod \|f_i\|^4_{4,\sqrt\delta,\Delta} \Big)^{2/3} \Big( \sum_{\Delta} \underline\prod \|f_i\|^4_{2,\sqrt\delta,\Delta} \Big)^{1/3}. \] After taking the 4th root in the above, we apply \eqref{Holder1} to the right-hand sums. Then, \begin{align*}
\Big( \sum_{\Delta} \Big\| \underline \prod f_i \Big\|_{L^4(\Delta)}^4 \Big)^{1/4}
&\le C_\epsilon R^{-1/8+\epsilon/2} \Big( \underline\prod \Big( \sum_{\Delta}\|f_i\|^4_{4,\sqrt\delta,\Delta} \Big)^{1/4}\Big)^{2/3} \Big( \underline\prod \Big( \sum_{\Delta}\|f_i\|^4_{2,\sqrt\delta,\Delta} \Big)^{1/4}\Big)^{1/3}. \end{align*} We have \(
\big( \sum_{\Delta}\|f_i\|^4_{4,\sqrt\delta,\Delta} \big)^{1/4} \lesssim \|f_i\|_{4,\sqrt\delta} \) by Minkowski's inequality. Thus, from the above estimate it follows that \begin{equation} \label{eqn:AB}
\Big\| \underline \prod f_i \Big\|_4 \le C_\epsilon R^{-1/8 + \epsilon/2} \Big( \underline\prod A_i \Big)^{2/3} \Big( \underline\prod B_i \Big)^{1/3}, \end{equation} where \[
A_i := \|f_i\|_{4,\sqrt\delta}, \qquad B_i := \Big( \sum_{\Delta}\|f_i\|^4_{2,\sqrt\delta,\Delta} \Big)^{1/4}. \]
\subsection{}
We will show that \begin{equation}\label{L2part}
B_i \lesssim R^{3/8}\| S_\delta f_i\|_{4}. \end{equation}
By definition we write
\( \|f_i\|_{2,\sqrt\delta,\Delta}^2 = \sum_{\Upsilon \in \mathbf \Pi_{\sqrt\delta}} \|f_{i,\Upsilon}\|_{L^2(\psi_{\Delta})}^2. \) Since $f_{i,\Upsilon}$ is decomposed as \( f_{i,\Upsilon} = \sum_{\Theta \in \mathbf \Pi_{\delta} : \Theta \subset 2 \Upsilon} f_{i,\Theta}, \) we have \[
\|f_i\|_{2,\sqrt\delta,\Delta}^2 = \sum_{\Upsilon \in \mathbf \Pi_{\sqrt\delta}} \int \Big| \sum_{\Theta \in \mathbf \Pi_{\delta} :\Theta \subset 2\Upsilon} f_{i,\Theta} \psi_\Delta \Big|^2. \] We see that the Fourier support of $f_{i,\Theta} \psi_\Delta$ is contained in the $\delta^{1/2}$-neighborhood of $\Theta$ which is a rectangular box of size $C\delta^{1/2} \times C\delta^{1/2} \times C$ for some constant $C>1$. So, by orthogonality it follows that \[
\|f_i\|_{2,\sqrt\delta,\Delta}^2
\lesssim \sum_{\Upsilon \in \mathbf \Pi_{\sqrt\delta}} \sum_{\Theta \in \mathbf \Pi_{\delta} :\Theta \subset 2\Upsilon}\int | f_{i,\Theta} \psi_\Delta |^2
\lesssim \sum_{\Theta \in \mathbf \Pi_{\delta}} \int | f_{i,\Theta} \psi_\Delta |^2. \]
Since \( \sum_{\Theta \in \mathbf \Pi_{\delta}} \int | f_{i,\Theta} \psi_\Delta |^2 = \int \big( \sum_{\Theta \in \mathbf \Pi_{\delta}} | f_{i,\Theta} |^2 \big)^{\frac{1}{2} \times 2} \psi_\Delta^2 = \| S_\delta f_i \|_{L^2(\psi_\Delta)}^2, \) the above estimate may be written as \[
\|f_i\|_{2,\sqrt\delta,\Delta} \lesssim \| S_\delta f_i \|_{L^2(\psi_\Delta)}. \] By using this estimate and H\"older's inequality, \[
B_i \lesssim \Big( \sum_{\Delta}\|S_\delta f_i\|^4_{L^2(\psi_{\Delta})} \Big)^{1/4}
\lesssim R^{\frac{3}{2}\big( \frac{1}{2} - \frac{1}{4} \big)} \Big( \sum_{\Delta}\|S_\delta f_i\|^4_{L^4(\psi_{\Delta})} \Big)^{1/4} \lesssim R^{3/8}\| S_\delta f_i\|_{4}. \] Thus we obtain \eqref{L2part}.
\subsection{} Let $\alpha \ge 0$ be the best constant such that $\mathcal{SQ}(4 \times 4 \times 4 \rightarrow 4;\alpha)$, i.e., \[
\alpha = \inf_{\delta > 0} \Big( \log_{1/\delta} \sup_{f_i:\supp \hat f_i \subset \Gamma_\delta^{\Omega_i}} \frac{\| \underline\prod f_i \|_4} {\underline\prod \|S_\delta f_i \|_4} \Big). \] To prove $\mathcal{SQ}(4 \times 4 \times 4 \rightarrow 4;1/16)$ it is enough to show that for any $\epsilon>0$, \[ \alpha \le \frac{1}{16} +C\epsilon. \] By H\"older's inequality, \[
A_i \lesssim R^{\frac{1}{4} \big( \frac{1}{2} - \frac{1}{4} \big)} \Big( \sum_{\Upsilon \in \mathbf \Pi_{\sqrt\delta}} \| f_{i,\Upsilon} \|_{4}^{4} \Big)^{1/4}. \] By the definition of $\alpha$ and Proposition \ref{prop:MSQmeanSQ} one has $SQ(4 \to 4; \alpha)$. By Lorentz rescaling, as in \eqref{ppsc}, \[
\| f_{i,\Upsilon} \|_4 \le C_\epsilon R^{\alpha/2 + \epsilon} \| S_\delta f_{i,\Upsilon} \|_4. \]
So, we have \[ A_i
\le C_\epsilon R^{\alpha/2 + \epsilon} R^{\frac{1}{4} \big( \frac{1}{2} - \frac{1}{4} \big)} \Big( \sum_{\Upsilon \in \mathbf \Pi_{\sqrt\delta}} \| S_\delta f_{i,\Upsilon} \|_4^4 \Big)^{1/4} . \] Since \begin{align*}
\sum_{\Upsilon \in \mathbf \Pi_{\sqrt\delta}} \| S_\delta f_{i,\Upsilon} \|_4^4 &\lesssim \sum_{\Upsilon \in \mathbf \Pi_{\sqrt\delta}} \int \Big( \sum_{\Theta \in \mathbf \Pi_{\delta} :\Theta \subset 2\Upsilon} |f_{i,\Theta}|^2 \Big)^{2} \\
&\lesssim \int \Big( \sum_{\Upsilon \in \mathbf \Pi_{\sqrt\delta}} \sum_{\Theta \in \mathbf \Pi_{\delta} :\Theta \subset 2\Upsilon} |f_{i,\Theta}|^2 \Big)^{2} \\
&\lesssim \| S_\delta f_i \|_4^4, \end{align*} we obtain \begin{equation} \label{APart}
A_i \le C_\epsilon R^{1/16 +\alpha/2+\epsilon} \| Sf_i \|_4. \end{equation}
Now we insert \eqref{APart} and \eqref{L2part} into \eqref{eqn:AB}. Then, \[
\Big\| \underline \prod f_i \Big\|_{L^4(Q_R)} \le C_\epsilon R^{1/24+\alpha/3 + C\epsilon} \underline\prod \|Sf_i \|_4. \] Since $\alpha$ is the best constant holding $SQ(4 \times 4 \times 4 \rightarrow 4;\alpha)$, we have \( \alpha \le \frac{1}{24} + \frac{\alpha}{3} +C\epsilon. \) Therefore, \( \alpha \le \frac{1}{16} +C\epsilon. \) This completes the proof.
\section{Proof of Theorem \ref{thm:localSm}.}
In this section, Theorem \ref{thm:localSm} will be proved by using a corresponding trilinear estimate.
Let us define an operator $U_N$ by \[ U_N f(x,t) = \check \eta_N \ast e^{it \sqrt{-\Delta}} f(x) \]
where $\eta_N$ is a bump function supported in $\{\xi \in \mathbb R^2 : |\xi| \sim N\}$ and $\check \eta_N$ is the inverse Fourier transform of $\eta_N$. By the Littlewood--Paley decomposition, to prove Theorem \ref{thm:localSm} it suffices to show that the estimate \[
\|U_Nf \|_{L^{30/7}(\mathbb R^2 \times [1,2])} \le C_\epsilon N^{1/10+\epsilon} \|f\|_{10/3} \] holds for all $\epsilon>0$, all $N \ge 1$ and all $f \in L^{10/3}(\mathbb R^2)$.
For convenience of rescaling we reform $U_Nf$ as follows. By a linear transformation $J:(\xi_1,\xi_2,\xi_3) \mapsto (\zeta_1,\zeta_2,\zeta_3) = ({\xi_3-\xi_1},\xi_2,{\xi_3+\xi_1})$ which maps the cone $\{ (\xi_1,\xi_2,\pm \sqrt{\xi_1^2+\xi_2^2}) \}$ to the leaned cone $\{ (\zeta_1, \zeta_2,\zeta^2_2/\zeta_1 \}$, we redefine $U_Nf$ by \begin{equation} \label{eqn:U_N} U_N f(x,t) = \int e^{2\pi i (x \cdot \xi +t {\xi_2^2}/{\xi_1})} \hat f(\xi) \eta_{N}(\xi_1) \varphi(\xi_2/\xi_1) d\xi, \qquad \xi=(\xi_1,\xi_2), \end{equation} where $\varphi$ is a bump function supported in the unit interval. Then, ${U_Nf}$ has Fourier support in \[
\Gamma(N) := \{(\xi_1,\xi_2,\xi_2^2/\xi_1) : |\xi_1|\sim N,~ |\xi_2/\xi_1| \lesssim 1\}. \] The leaned cone $(\xi_1,\xi_2,\xi_2^2/\xi_1)$ is written as $\xi_1(1,\theta,\theta^2)$ where $\theta = \xi_2 /\xi_1$. So one may identify $\theta$ with an angular variable of the cone.
We say that the local smoothing estimate $\mathcal{LS}(p \to q; \alpha)$ holds if \begin{equation} \label{unf}
\| U_Nf \|_{L^q(\mathbb R^2 \times [1,2])} \le C_{\epsilon} N^{\alpha+\epsilon} \|f\|_{p} \end{equation} holds for all $\epsilon>0$, all $N > 1$ and all $f \in L^p(\mathbb R^2)$. To prove Theorem \ref{thm:localSm} it suffices to show \[ \mathcal{LS}(10/3 \to 30/7; 1/10). \] For given $1 \le p < q \le \infty$ and $\frac{1}{p} + \frac{3}{q} =1$, we define \begin{equation} \label{alpCon} \alpha= \alpha(p,q) \ge \frac{1}{p} - \frac{3}{q} +\frac{1}{2} \end{equation} to be the best exponent for which the estimate \eqref{unf} holds for all $N > 1$ and all $f \in L^p(\mathbb R^2)$, i.e., \[
\alpha(p,q) = \inf_{N > 1} \Big( \log_N \sup_{f \in L^p(\mathbb R^2)} \frac{\|U_N f\|_{L^q(\mathbb R^2 \times [1,2])}}{\|f\|_p} \Big). \] Then it is enough to show that for all $\epsilon,~ \epsilon_1>0$, \begin{equation} \label{alG} \alpha\Big(\frac{10}{3}, \frac{30}{7} \Big) \le \frac{1}{10} + C\epsilon_1 + \log_N C_{\epsilon, \epsilon_1}, \end{equation} since we may take $\epsilon = \epsilon_1$, which can be absorbed in an $\epsilon$-loss in \eqref{unf}.
\
\subsection{} Let an arbitrary small $\epsilon_1>0$ be given. Let $N \ge N_0$ and \( 1 > \gamma_1 > \gamma_2 \ge N_0^{-\epsilon_1/2} \). Later, $\gamma_1$, $\gamma_2$ and $N_0$ will be chosen. By rescaling and (a minor variant of) Lemma \ref{lem:LinTriLcompare} one has that for any $(x,t) \in \mathbb R^2 \times [1,2]$, \begin{align*}
|U_N f(x,t) | &\lesssim \max_{\Omega \in \mathbf \Omega(\gamma_1)} | U_N^{\Omega} f(x,t)| + \gamma_1^{-1}
\max_{\Omega \in \mathbf \Omega(\gamma_2)} | U_N^{\Omega} f(x,t) |\\ &\qquad \qquad + \gamma_2^{-50} \max_{\substack{\Omega_1,\Omega_2,\Omega_3 \in \mathbf \Omega(\gamma_2): \\ \dist(\Omega_i, \Omega_j) \ge \gamma_2,\, i
\neq j}} \Big| \Big( \prod_{i=1}^{3} |U_N^{\Omega_i} f(x,t)| \Big)^{1/3} \Big|, \end{align*} where $U_N^{\Omega}$ is defined as \eqref{eqn:U_N} with $\varphi$ replaced by $\varphi_\Omega$ which is a bump function supported in $\Omega$.
By embedding $\ell^{q} \subset \ell^{\infty}$ it follows that \begin{equation} \label{LpTricom1} \begin{split}
\|U_N f \|_{L^q(\mathbb R^2 \times I)} & \lesssim \Big( \sum_{\Omega_1 \in \mathbf \Omega(\gamma_1)} \| U_N^{\Omega_1} f \|^q_{L^q(\mathbb R^2 \times I)} \Big)^{1/q} + \gamma_1^{-1} \Big(
\sum_{\Omega_2 \in \mathbf \Omega(\gamma_2)} \| U_N^{\Omega_2} f \|^q_{L^q(\mathbb R^2 \times I)} \Big)^{1/q} \\ &\qquad \qquad + \gamma_2^{-50} \Big( \sum_{\substack{\Omega_1,\Omega_2,\Omega_3 \in \mathbf \Omega(\gamma_2): \\ \dist(\Omega_i, \Omega_j) \ge \gamma_2,\, i
\neq j}} \Big\| \Big( \prod_{i=1}^{3} |U_N^{\Omega_i} f_i| \Big)^{1/3} \Big\|^q_{L^q(\mathbb R^2 \times I)} \Big)^{1/q}, \end{split} \end{equation} where $I=[1,2]$.
We consider the first and second summation in the right-hand side of \eqref{LpTricom1}. From rescaling and the definition of $\alpha$ it follows that \begin{equation} \label{LShyp}
\| U_N^{\Omega_i} f\|_{L^q(\mathbb R^2 \times I)} \le C\gamma_i^{3\big(\frac{1}{q}-\frac{1}{p} \big)} (\gamma_i^2 N)^{\alpha+\epsilon} \| f \|_p. \end{equation} More specifically, by rotating we may assume that $\Omega$ is centered at 0. Then we may write $U_N^{\Omega_i} f$ as \[ U_N^{\Omega_i} f(x,t) = \int e^{2\pi i (x \cdot \xi +t {\xi_2^2}/{\xi_1})} \hat f(\xi) \eta_N(\xi_1) \varphi(\gamma_i^{-1} \xi_2/\xi_1) d\xi. \] Let $\sigma(x_1,x_2,t) = (\gamma_i^2 x_1, \gamma_i x_2, t)$ and $\underline \sigma(x_1,x_2) = (\gamma_i^2 x_1, \gamma_i x_2)$. Then, we have \( U_N^{\Omega_i} f \circ \sigma = U_{\gamma_i^2 N}(f \circ \underline\sigma). \) Thus, using \eqref{unf} and this relation we have \eqref{LShyp}.
If we define $f_\Omega$ by \begin{equation*}
\widehat f_\Omega(\xi_1,\xi_2) = \hat f(\xi_1,\xi_2) \chi_{\{|\xi_1| \sim N\}}(\xi_1) \chi_{\Omega}(\xi_2/\xi_1), \end{equation*} then we may replace $U_N^{\Omega_i} f$ with $U_N^{\Omega_i} f_{\Omega_i}$, where $\chi$ denotes a characteristic function. By \eqref{LShyp}, \begin{equation} \label{eqn:sum_of_U_N}
\Big( \sum_{\Omega_i \in \mathbf \Omega(\gamma_i)} \| U_N^{\Omega_i} f_{\Omega_i} \|_q^q \Big)^{1/q}
\le C \gamma_i^{3\big(\frac{1}{q} - \frac{1}{p} \big)} (\gamma_i^{2}N)^{\alpha+\epsilon} \Big(\sum_{\Omega_i \in \mathbf \Omega(\gamma_i)} \| f_{\Omega_i} \|_p^q \Big)^{1/q}. \end{equation}
We recall the following lemma from \cite{tao2000bilinearII}.
\begin{lem}[\cite{tao2000bilinearII}*{Lemma 7.1}] \label{lem:desum} Let $R_k$ be a collection of rectangles such that the dilates $2R_k$ are almost disjoint, and suppose that $f_k$ are a collection of functions whose Fourier transforms are supported on $R_k$. Then for all $1 \le p \le \infty$ we have \[
\Big( \sum_{k} \|f_k\|_p^{p^*} \Big)^{1/p^*} \lesssim \Big\| \sum_{k} f_k \Big\|_p \lesssim \Big( \sum_k \|f_k \|_p^{p_*} \Big)^{1/p_*}, \] where $p_* = \min(p,p')$, $p^* = \max(p,p')$. \end{lem} It is remarked that Lemma \ref{lem:desum} is elementary, and simply a consequence of interpolation between Plancherel's theorem and Minkowski's inequality for the $L^\infty$ space.
After embedding $\ell^p \subset \ell^q$ in the right-hand side of \eqref{eqn:sum_of_U_N}, we apply Lemma \ref{lem:desum}. Then we obtain \begin{equation} \label{scEst}
\Big( \sum_{\Omega_i \in \mathbf \Omega(\gamma_i)} \| U_N^{\Omega_i} f \|_{L^q(\mathbb R^2 \times I)}^q \Big)^{1/q} \le C \gamma_i^{3\big(\frac{1}{q} - \frac{1}{p} \big)} (\gamma_i^2 N)^{\alpha+\epsilon} \|f\|_p. \end{equation}
\
\subsection{} We consider the last summation in the right-hand side of \eqref{LpTricom1}. We will show that for any $\epsilon>0$, \begin{equation} \label{GTriLS}
\Big\| \underline \prod U_N^{\Omega_i} f \Big\|_{L^{30/7}(\mathbb R^2 \times I)} \le C_\epsilon N^{1/10+\epsilon} \|f\|_{10/3}. \end{equation} First we prove a corresponding local estimate.
\begin{lem} Let $B$ be a unit ball. Then, for any $\epsilon > 0$, \begin{equation} \label{localTLS}
\Big\| \underline\prod |U_N^{\Omega_i} f_i| \Big\|_{L^{30/7}(B \times I)} \le C_\epsilon N^{1/10+\epsilon} \underline\prod \|f_i\|_{10/3}. \end{equation} \end{lem}
\begin{proof} By interpolation it suffices to show \begin{align} \label{LSdec}
\Big\| \underline\prod U_N^{\Omega_i} f_i \Big\|_{L^6(B \times I)} &\le C_\epsilon N^{1/6+\epsilon} \underline\prod \|f_i\|_{6},\\ \label{LSMR}
\Big\| \underline\prod U_N^{\Omega_i} f_i \Big\|_{L^3(B \times I)} &\le C_\epsilon N^{\epsilon} \underline\prod \|f_i\|_{2}. \end{align} Consider \eqref{LSdec}. By H\"older's inequality it is enough to show \begin{equation} \label{LSdec2}
\| U_Nf \|_{L^6(B \times I)}
\le C_\epsilon N^{1/6+\epsilon} \|f\|_6. \end{equation} Since $\psi_{I}(t)U_Nf(x,t) $ has Fourier support in a $C$-neighborhood of $\Gamma(N)$, from Theorem \ref{thm:Decoupling} and rescaling it follows that \[
\| U_Nf \|_{L^6(B \times I)}
\le C_\epsilon N^{\epsilon} \Big( \sum_{\widetilde\Theta} \|(\psi_I U_Nf) \ast \Xi_{\widetilde\Theta}\|_{6}^2 \Big)^{1/2}, \] where $\widetilde\Theta$ is a sector of size $CN^{1/2} \times CN \times C$. By H\"older's inequality, this is bounded by \[
\le C_\epsilon N^{1/6+\epsilon} \Big( \sum_{\widetilde\Theta} \|(\psi_I U_Nf) \ast \Xi_{\widetilde\Theta}\|_{6}^6 \Big)^{1/6}. \] It is well known (see, e.g., \cite{wolff2000local}*{Lemma 6.1}, \cite{stein1993harmonic}*{XI: 4.13}, \cite{mockenhaupt1992wave}) that for $p \ge 2$, \[
\Big( \sum_{\widetilde\Theta} \|(\psi_I U_Nf )\ast \Xi_{\widetilde\Theta}\|_{p}^p \Big)^{1/p} \lesssim \|f\|_p. \] Thus, we obtain \eqref{LSdec2}
Consider \eqref{LSMR}. In \eqref{mrt}, the restriction operator $\widehat{f_j d\sigma_j}$ can be replaced with $U_1^{\Omega_j} \check f$ where $\check f$ denotes the inverse Fourier transform of $f$. Thus, from Theorem \ref{thm:MRT} and Plancherel's theorem it follows that \[
\Big\| \underline\prod U_1^{\Omega_i} f_i \Big\|_{L^3(Q_N)} \le C_\epsilon N^{\epsilon} \underline\prod \|f_i\|_{2}. \] If $s(x,t) = N^{-1}(x,t)$ and $\underline s (x) = N^{-1}x$, then $U_N^{\Omega} f \circ s = U_1^{\Omega} (f \circ \underline s)$. So, by changing variables and translation invariance, the above estimate gives \eqref{LSMR}. \end{proof}
We now prove that \eqref{localTLS} implies \eqref{GTriLS}. This immediately follows from the next localization lemma.
\begin{lem} Suppose that the local estimate \begin{equation} \label{ifloc}
\Big\| \underline\prod U_N^{\Omega_i} f_i \Big\|_{L^q(B \times I)} \le A(N) \underline\prod \| f_i \|_{p} \end{equation} holds for all unit cubes $B$ and all $f_i \in L^p(\mathbb R^2)$. If $p \le q$ then the estimate \begin{equation} \label{thenGl}
\Big\| \underline\prod U_N^{\Omega_i} f_i \Big\|_{L^q(\mathbb R^2 \times I)} \le C N^{\epsilon} A(N) \underline\prod \| f_i \|_{p} \end{equation} holds for all $\epsilon>0$ and all $f_i \in L^p(\mathbb R^2)$. \end{lem}
\begin{proof} We write as \[ U_N f(x,t)
= (K_N(t) \ast f)(x) \] where \[ K_N(t)(x) = K_N(x,t)
:= \int e^{2\pi i (x \cdot \xi +t {\xi_2^2}/{\xi_1})} \eta_N(\xi_1) \varphi(\xi_2/\xi_1) d\xi. \] By using a stationary phase method, it follows that for $(x,t) \in \mathbb R^2 \times I$, \[
|K_N(t)(x)| \le C_M N^{2} (1+ |x|)^{-M} \qquad \forall M>0. \] Thus, for $(x,t) \in \mathbb R^2 \times I$, \begin{equation} \label{asyt}
|U_N f(x,t)|
\le C_M ( a_N \ast |f| )(x), \quad \forall M>0, \end{equation}
where $a_N(x) = N^2 (1+ |x|)^{-M}$.
If a unit lattice square $B \subset \mathbb R^2$ is given, then we decompose \begin{equation} \label{ptLoc}
|U_N f|\chi_{B \times I} \lesssim |U_N(f \chi_{N^{\epsilon}B})| \chi_{B \times I} + C_M |\mathcal E_{B^c}f| \chi_{B \times I}, \end{equation} where \[
\mathcal E_{B^c} f := a_N \ast (|f|\chi_{\mathbb R^2 \setminus N^{\epsilon}B}). \]
Consider $|\mathcal E_{B^c}f| \chi_{B \times I}$. If $|x-y| \gtrsim N^{\epsilon}$ then one has $a_N(x-y) \lesssim N^2N^{-\epsilon M} \le N^{-2000C}$. So, we have \begin{align*}
\chi_B(x) \big( a_N \ast (|f|\chi_{\mathbb R^2 \setminus N^{\epsilon}B} ) \big)(x) &= \chi_B(x) \int a_N(x-y)\chi_{\mathbb R^2 \setminus N^{\epsilon}B}(y) |f(y)| dy \\
&\lesssim N^{-1000C} \chi_B(x) \int a_N^{1/2}(x-y) |f(y)| dy \\
&\lesssim N^{-1000C} \chi_B(x) (a^{1/2}_N \ast |f|)(x). \end{align*}
Thus, by Young's inequality we obtain \begin{equation} \label{errest}
\Big( \sum_{B} \| \mathcal E_{B^c} f\|_{L^q(B)}^q \Big)^{1/q}
\lesssim N^{-900C} \|f \big\|_{p}. \end{equation}
On the other hand, by some rough estimates (cf. Young's inequality) we see that
$\|U_N f \|_{L^q({B \times I})} \lesssim N^C \|f\|_p$. So, by embedding $\ell^p \subset \ell^q$, we have \begin{equation} \label{roughEst}
\Big( \sum_{B} \| U_N(f \chi_{N^\epsilon B}) \|_{L^q({B \times I})}^q \Big)^{1/q}
\lesssim N^{C} \Big( \sum_{B} \|f \big\|_{L^p(N^\epsilon B)}^q \Big)^{1/q}
\lesssim N^{2C} \|f\|_p. \end{equation}
Now, we consider the estimate \eqref{thenGl} by using \eqref{errest} and \eqref{roughEst} . We define $f_{\Omega_i}$ as \[ \widehat f_{\Omega_i}(\xi_1,\xi_2) = \hat f_i(\xi) \eta_N(\xi_1) \varphi_{\Omega_{i}}(\xi_2/\xi_1). \] Then we may replace $U_N^{\Omega_i} f_i$ with $U_N f_{\Omega_i}$. By \eqref{ptLoc}, \begin{align} \underline\prod U_N f_{\Omega_i} \chi_{B \times I} &\lesssim
\underline\prod \Big( |U_N(f_{\Omega_i} \chi_{N^{\epsilon}B})| \chi_{B \times I} + C_M (\mathcal E_{B^c}f_{\Omega_i} )\chi_{B \times I}\Big) \nonumber\\ \label{dle} &\lesssim
\underline\prod |U_N(f_{\Omega_i} \chi_{N^{\epsilon}B})| \chi_{B \times I} + C_M \mathcal E(f_{\Omega_1},f_{\Omega_2},f_{\Omega_3})\chi_{B \times I}, \end{align} where \begin{align*}
\mathcal E(f_{\Omega_1},f_{\Omega_2},f_{\Omega_3}) &:= \sum_{i,j,k \in \{1,2,3\}} \big(\mathcal E_{B^c}f_{\Omega_i} |U_N(f_{\Omega_j} \chi_{N^{\epsilon}B})||U_N(f_{\Omega_k} \chi_{N^{\epsilon}B})| \big)^{1/3} \\
&+ \sum_{i,j,k \in \{1,2,3\}} \big( \mathcal E_{B^c}f_{\Omega_i} \mathcal E_{B^c}f_{\Omega_j} |U_N(f_{\Omega_k} \chi_{N^{\epsilon}B})| \big)^{1/3} + \underline\prod \mathcal E_{B^c}f_{\Omega_i}. \end{align*} By Minkowski's inequality, \begin{equation} \label{errsum} \begin{split}
&\Big( \sum_{B} \| \mathcal E(f_{\Omega_1},f_{\Omega_2},f_{\Omega_3}) \|_{L^q({B \times I})}^q \Big)^{1/q} \\
&\qquad \lesssim \max_{i,j,k}\Big( \sum_{B} \| \big(\mathcal E_{B^c}f_{\Omega_i} |U_N(f_{\Omega_j} \chi_{N^{\epsilon}B})||U_N(f_{\Omega_k} \chi_{N^{\epsilon}B})| \big)^{1/3} \|_{L^q({B \times I})}^q \Big)^{1/q} \\
&\qquad\qquad + \max_{i,j,k}\Big( \sum_{B} \| \big( \mathcal E_{B^c}f_{\Omega_i} \mathcal E_{B^c}f_{\Omega_j} |U_N(f_{\Omega_k} \chi_{N^{\epsilon}B})| \big)^{1/3} \|_{L^q({B \times I})}^q \Big)^{1/q} \\
&\qquad\qquad\qquad +\Big( \sum_{B} \Big\| \underline\prod \mathcal E_{B^c}f_{\Omega_i} \Big\|_{L^q({B \times I})}^q \Big)^{1/q}. \end{split} \end{equation} Consider the right-hand side of \eqref{errsum}. By H\"older's inequality, \begin{multline*}
\Big( \sum_{B} \| \big(\mathcal E_{B^c}f_{\Omega_i} |U_N(f_{\Omega_j} \chi_{N^{\epsilon}B})||U_N(f_{\Omega_k} \chi_{N^{\epsilon}B})| \big)^{1/3} \|_{L^q({B \times I})}^q \Big)^{1/q} \\
\le \Big( \sum_{B} \| \mathcal E_{B^c}f_{\Omega_i} \|_{L^q({B \times I})}^q \Big)^{1/3q} \Big( \sum_{B} \| U_N(f_{\Omega_j} \chi_{N^{\epsilon}B}) \|_{L^q({B \times I})}^q \Big)^{1/3q} \\
\times \Big( \sum_{B} \| U_N(f_{\Omega_k} \chi_{N^{\epsilon}B}) \|_{L^q({B \times I})}^q \Big)^{1/3q}. \end{multline*} Thus, by \eqref{errest} and \eqref{roughEst} it is bounded by \[
\lesssim N^{-200C} \underline\prod \| f_{\Omega_i} \|_p. \] The second and third summations in the right-hand side of \eqref{errsum} are estimated by an analogous method. Thus, \begin{equation} \label{mulerEst}
\Big( \sum_{B} \| \mathcal E(f_{\Omega_1},f_{\Omega_2},f_{\Omega_3}) \|_{L^q({B \times I})}^q \Big)^{1/q} \lesssim N^{-200C} \underline\prod \| f_{\Omega_i} \|_p. \end{equation} By \eqref{dle} \begin{align*}
\Big\| \underline\prod U_N f_{\Omega_i} \Big\|_{L^q(\mathbb R^2 \times I)} &= \Big( \sum_{B} \Big\| \underline\prod |U_N f_{\Omega_i} \Big\|_{L^q({B \times I})}^q \Big)^{1/q} \\
&\lesssim \Big( \sum_{B} \Big\| \underline\prod U_N(f_{\Omega_i} \chi_{N^{\epsilon}B}) \Big\|_{L^q({B \times I})}^q \Big)^{1/q}
+ \Big( \sum_{B} \| \mathcal E(f_{\Omega_1},f_{\Omega_2},f_{\Omega_3}) \|_{L^q({B \times I})}^q \Big)^{1/q}. \end{align*} By \eqref{ifloc}, \eqref{mulerEst} and embedding $\ell^p \subset \ell^q$, it follows that \[
\Big\| \underline\prod U_N f_{\Omega_i} \Big\|_{L^q(\mathbb R^2 \times I)} \lesssim (N^{\epsilon}A(N)+ N^{-200C}) \underline\prod \| f_{\Omega_i} \|_p. \]
Since $\|f_{\Omega_i}\|_p \lesssim \|f_i\|_p$ by Young's inequality, we obtain \eqref{thenGl}. \end{proof}
\
\subsection{} Last of all, we will show \eqref{alG}. We substitute \eqref{scEst} and \eqref{GTriLS} in \eqref{LpTricom1} with $(p,q)=(10/3,30/7)$. Then, it follows that \begin{equation}
\|U_N f \|_{L^{30/7}(I \times \mathbb R^2)} \lesssim (\gamma_1^{2\alpha - \frac{1}{5}+2\epsilon} N^{\alpha+\epsilon} + \gamma_1^{-1} \gamma_2^{2\alpha - \frac{1}{5} +2\epsilon} N^{\alpha+\epsilon} + C_{\epsilon, \epsilon_1} \gamma_2^{-60} N^{\frac{1}{10}+\epsilon}) \|f\|_{10/3}. \end{equation} So, by the assumption that $\alpha$ is a best exponent, \begin{align*} N^{\alpha} \le C(\gamma_1^{2\alpha - \frac{1}{5}+2\epsilon} + \gamma_1^{-1} \gamma_2^{2\alpha - \frac{1}{5}+2\epsilon} ) N^{\alpha} + C_{\epsilon, \epsilon_1}\gamma_2^{-60} N^{\frac{1}{10}}. \end{align*} Observe that $2\alpha -\frac{1}{5} \ge 0$ by \eqref{alpCon}. We now choose $\gamma_1$, $\gamma_2$ and $N_0$ so that $C\gamma_1^{2\alpha - \frac{1}{5}+2\epsilon} \le 1/4$, $ C\gamma_1^{-1} \gamma_{2}^{2\alpha - \frac{1}{5}+2\epsilon} \le 1/4$ and $1> \gamma_1 > \gamma_2 \ge N_0^{-{\epsilon_1}/{2}}$. Then $N^{\alpha} \le C_{\epsilon,\epsilon_1} N^{\frac{1}{10} + 30\epsilon_1} $. Thus we obtain \eqref{alG}.
\section{Acknowledgments} The author is indebted to the anonymous referee whose comments helped improve the presentation of the work. The author would like to thank Andreas Seeger for informing his work with Malabika Pramanik.
\
\end{document} |
\begin{document}
\title[Solvability of a class of braided fusion categories]{Solvability of a class of braided fusion categories} \author{Sonia Natale} \author{Julia Yael Plavnik} \address{Facultad de Matem\'atica, Astronom\'\i a y F\'\i sica, Universidad Nacional de C\'ordoba, CIEM -- CONICET, (5000) Ciudad Universitaria, C\'ordoba, Argentina} \email{natale@famaf.unc.edu.ar, plavnik@famaf.unc.edu.ar \newline \indent \emph{URL:}\/ http://www.famaf.unc.edu.ar/$\sim$natale} \thanks{The research of S. N. was partially supported by CONICET and Secyt-UNC. The research of J. P. was partially supported by CONICET, ANPCyT and Secyt-UNC} \subjclass{18D10; 16T05} \keywords{Fusion category; braided fusion category; solvability} \date{May 10, 2012}
\begin{abstract} We show that a weakly integral braided fusion category ${\mathcal C}$ such that every simple object of ${\mathcal C}$ has Frobenius-Perron dimension $\leq 2$ is solvable. In addition, we prove that such a fusion category is group-theoretical in the extreme case where the universal grading group of ${\mathcal C}$ is trivial. \end{abstract}
\maketitle
\section{Introduction and main results} Let $k$ be an algebraically closed field of characteristic zero. A fusion category over $k$ is a semisimple tensor category over $k$ having finitely many isomorphism classes of simple objects. In this paper we consider the problem of giving structural results of a fusion category $\mathcal C$ under restrictions on the set $\cd ({\mathcal C})$ of Frobenius-Perron dimensions of its simple objects.
Results of this type were obtained in the paper \cite{NP}. For instance, we showed in \cite[Theorem 7.3]{NP} that under the assumption that ${\mathcal C}$ is braided odd-dimensional and $\cd({\mathcal C}) \subseteq \{p^m:\, m \geq 0\}$, where $p$ is a (necessarily odd) prime number, then ${\mathcal C}$ is solvable. Also, the same is true when ${\mathcal C} = \Rep H$, where $H$ is a semisimple quasitriangular Hopf algebra and $\cd (\mathcal C) = \{1, 2\}$ \cite[Theorem 6.12]{NP}.
Using results of the paper \cite{BN}, we also showed in \cite[Theorem 6.4]{NP} that if ${\mathcal C} = \Rep H$, where $H$ is any semisimple Hopf algebra, and $\cd({\mathcal C}) \subseteq \{ 1, 2\}$, then ${\mathcal C}$ is weakly group-theoretical, and furthermore, it is group-theoretical if ${\mathcal C}$ coincides with the adjoint subcategory ${\mathcal C}_{\ad}$.
Our main results are the following theorems. Recall that a fusion category ${\mathcal C}$ is called \emph{weakly integral} if the Frobenius-Perron dimension of ${\mathcal C}$ is a natural integer.
\begin{theorem}\label{soluble} Let ${\mathcal C}$ be a weakly integral braided fusion category such that $\FPdim X \leq 2$, for all simple object $X$ of ${\mathcal C}$. Then ${\mathcal C}$ is solvable. \end{theorem}
Theorem \ref{soluble} extends the previous result for semisimple quasitriangular Hopf algebras mentioned above. It implies in particular that every weakly integral braided fusion category with Frobenius-Perron dimensions of simple objects at most $2$ is weakly group-theoretical. This gives some further support to the conjecture that every weakly integral fusion category is weakly group-theoretical. See \cite[Question 2]{ENO2}.
It is known that a nilpotent braided fusion category, which is in addition integral (that is, $\cd({\mathcal C}) \subseteq \mathbb Z_+$) is always group-theoretical \cite[Theorem 6.10]{DGNO}. We also show that the same conclusion is true in the opposite extreme case:
\begin{theorem}\label{gp-ttic} Let ${\mathcal C}$ be a weakly integral braided fusion category such that $\FPdim X \leq 2$, for all simple object $X$ of ${\mathcal C}$. Suppose that the universal grading group of ${\mathcal C}$ is trivial.
Then ${\mathcal C}$ is group-theoretical. \end{theorem}
Theorems \ref{soluble} and \ref{gp-ttic} are proved in Section \ref{pruebas}. Our proofs rely on the results of Naidu and Rowell \cite{NaR} for the case where $\mathcal C$ is integral and has a faithful self-dual simple object of Frobenius-Perron dimension $2$.
Being group-theoretical, a braided fusion category ${\mathcal C}$ satisfying the assumptions of Theorem \ref{gp-ttic}, has the so called property \textbf{F}, namely, all asso\-ciated braid group representations on the tensor powers of objects of ${\mathcal C}$ factor over finite groups. See \cite[Corollary 4.4]{ERW}. It is conjectured that every braided weakly integral fusion category does have property \textbf{F} \cite{NaR}. This conjecture has been proved for braided fusion categories ${\mathcal C}$ with $\cd({\mathcal C}) = \{ 1, 2\}$ such that all objects of ${\mathcal C}$ are self-dual or ${\mathcal C}$ is generated by a self-dual simple object \cite[Corollary 4.3 and Remark 4.4]{NaR}.
The paper is organized as follows. In Section \ref{preliminaries} we recall the main facts and terminology about fusion and braided fusion categories used throughout. In Section \ref{examples} we discuss some families of (integral) examples that appear in the literature. We also recall in this section the results of the paper \cite{NaR} related to dihedral group fusion rules that will be used later. In Section \ref{pruebas} we give the proofs of Theorems \ref{soluble} and \ref{gp-ttic}.
\section{Preliminaries}\label{preliminaries}
\subsection{Fusion categories}
Let ${\mathcal C}$ be a fusion category. We shall denote by $\Irr({\mathcal C})$ the set of isomorphism classes of simple objects of ${\mathcal C}$ and by $G({\mathcal C})$ the group of isomorphism classes of invertible objects of ${\mathcal C}$. For an object $X$ of ${\mathcal C}$, we shall indicate by ${\mathcal C}[X]$ the fusion subcategory generated by $X$ and by $G[X]$ the subgroup of $G({\mathcal C})$ consisting of invertible objects $g$ such that $g \otimes X \simeq X$.
If $\mathcal D$ is another fusion category, ${\mathcal C}$ and ${\mathcal D}$ are \emph{Morita equivalent} if ${\mathcal D}$ is equivalent to the dual ${\mathcal C}^*_{\mathcal M}$ with respect to an indecomposable module category $\mathcal M$. Recall that ${\mathcal C}$ is called \emph{pointed} if all its simple objects are inver\-tible and it is called \emph{group-theoretical} if it is Morita equivalent to a pointed fusion category.
There is a canonical faithful grading ${\mathcal C} = \oplus_{g \in U({\mathcal C})}{\mathcal C}_g$, with trivial component ${\mathcal C}_e = {\mathcal C}_{\ad}$, where ${\mathcal C}_{\ad}$ is the \emph{adjoint subcategory} of ${\mathcal C}$, that is, the fusion subcategory generated by $X \otimes X^*$, where $X$ runs through the simple objects of ${\mathcal C}$. The group $U({\mathcal C})$ is called the \emph{universal grading group} of ${\mathcal C}$. ${\mathcal C}$ is called nilpotent if the upper central series $\dots \subseteq {\mathcal C}^{(n+1)} \subseteq {\mathcal C}^{(n)} \subseteq \dots \subseteq {\mathcal C}^{(0)} = {\mathcal C}$ converges to $\vect_k$, where ${\mathcal C}^{(i)} : = ({\mathcal C}^{(i-1)})_{\ad}$, $i \geq 1$. See \cite{gel-nik}.
A \emph{weakly group-theoretical} fusion category is a fusion category ${\mathcal C}$ which is Morita equivalent to a nilpotent fusion category. If ${\mathcal C}$ is Morita equivalent to a cyclically nilpotent fusion category, then ${\mathcal C}$ is called \emph{solvable}. We refer the reader to \cite{ENO, ENO2} for further definitions and facts about fusion categories.
\subsection{Braided fusion categories} Let ${\mathcal C}$ be a \emph{braided} fusion category, that is, ${\mathcal C}$ is equipped with natural isomorphisms $c_{X,Y} : X \otimes Y \rightarrow Y \otimes X$, $X, Y \in {\mathcal C}$, satisfying the hexagon axioms.
Recall that ${\mathcal C}$ is called \emph{premodular} if it is also spherical, that is, ${\mathcal C}$ has a pivotal structure such that left and right categorical dimensions coincide. Equivalently, ${\mathcal C}$ is premodular if it is endowed with a compatible ribbon structure \cite{bruguieres, Mu1}.
We say that the objects $X$ and $Y$ of a braided fusion category ${\mathcal C}$ centralize each other if $c_{Y,X} c_{X,Y} = \id_{X\otimes Y}$. The \emph{centralizer} ${\mathcal D}'$ of a fusion subcategory ${\mathcal D} \subseteq {\mathcal C}$ is defined to be the full subcategory of objects of ${\mathcal C}$ that centralize every object of ${\mathcal D}$. The centralizer ${\mathcal D}'$ results a fusion subcategory of ${\mathcal C}$.
The \emph{Müger (or symmetric) center} $Z_2({\mathcal C})$ of ${\mathcal C}$ is $Z_2({\mathcal C}) = {\mathcal C}'$; this is a symmetric fusion subcategory of ${\mathcal C}$ whose objects are called central, dege\-nerate or transparent. A braided fusion category ${\mathcal C}$ is called \emph{non-degenerate} if its Müger center $Z_2({\mathcal C})$ is trivial. A \emph{modular} category is a non-degenerate premodular category ${\mathcal C}$.
\begin{remark}\label{spherical} Recall that a fusion category ${\mathcal C}$ is called pseudo-unitary if $\dim {\mathcal C} = \FPdim {\mathcal C}$, where $\dim {\mathcal C}$ is the global dimension of ${\mathcal C}$ and $\FPdim {\mathcal C}$ is the Frobenius-Perron dimension of ${\mathcal C}$. If ${\mathcal C}$ pseudo-unitary then ${\mathcal C}$ has a canonical spherical structure with respect to which categorical dimensions of all simple objects coincide with their Frobenius-Perron dimensions \cite[Proposition 8.23]{ENO}.
In particular, this holds for any weakly integral fusion category, because it is automatically pseudo-unitary \cite[Proposition 8.24]{ENO}. Hence every weakly integral non-degenerate fusion category is canonically a modular category. \end{remark}
\section{Some families of examples}\label{examples}
\subsection{Examples of fusion categories with Frobenius-Perron dimensions $\leq 2$}
In this subsection we discuss examples of weakly integral fusion categories with Frobenius-Perron dimensions of simple objects $\leq 2$ that appear in the literature.
\begin{ejem} Consider a Hopf algebra $H$ fitting into an abelian exact sequence: \begin{equation}\label{exacta} k\rightarrow k^\Gamma \rightarrow H \rightarrow k\mathbb Z_2 \rightarrow k, \end{equation} where $\Gamma$ is a finite group. Let ${\mathcal C} = \Rep H$. Then $\cd ({\mathcal C}) \subseteq \{1,2\}$ and equality holds if the associated action of $\mathbb Z_2$ on $\Gamma$ is not trivial.
All these examples are group-theoretical, in view of \cite[Theorem 1.3]{gp-ttic}. Observe that, as a consequence of \cite[Theorem 6.4]{BN}, any cosemisimple Hopf algebra $H$ such that $\cd({\mathcal C}) \subseteq \{ 1, 2\}$ is group-theoretical if ${\mathcal C} = {\mathcal C}_{\ad}$. See \cite[Theorem 6.4]{NP}.
Non-trivial examples of cosemisimple Hopf algebras fitting into an exact sequence \eqref{exacta} are given by the Hopf algebras $${\mathcal A}^*_{4m}, {\mathcal B}^*_{4m} \quad m\geq 2,$$ of dimension $4m$, due to Masuoka \cite{mas-cocycle}. In these cases, $\Gamma$ is a dihedral group. \end{ejem}
\begin{ejem}
Let ${\mathcal C} = {\mathcal {TY}} (G, \chi, \tau)$ be the Tambara-Yamagami category associated to a finite (necessarily abelian) group $G$, a symmetric non-degene\-rate bicharacter $\chi : G\times G \rightarrow k^\times$ and an element $\tau\in k$ satisfying $|G|\tau^2 = 1$ \cite{TY}. This is a fusion category with isomorphism classes of simple objects parameterized by the set $G\cup\{X\}$, where $X \notin G$, obeying the fusion rules \begin{equation}\label{ty} g \otimes h = gh, \quad g, h\in G,\quad X \otimes X = \oplus_{g\in G} g. \end{equation}
We have $\cd ({\mathcal C}) = \{1,2\}$ if and only if $G$ is of order $4$. Therefore, in this case $\FPdim {\mathcal C} = 8$.
If $G\simeq \mathbb Z_4$, there are two possible fusion categories ${\mathcal C}$. None of them is braided \cite[Theorem 1.2 (1)]{Siehler-braided}.
If $G\simeq \mathbb Z_2 \times \mathbb Z_2$ there are exactly four classes of Tambara-Yamagami categories with irreducibles degrees $1$ or $2$, by \cite[Theorem 4.1]{TY}. Three of them are (equivalent to) the categories of representations of eight-dimensional Hopf algebras: the dihedral group algebra of order $8$, the quaternion group algebra, and the Kac-Paljutkin Hopf algebra $H_8$. The remaining fusion ca\-tegory, which has the same $\chi$ as $H_8$ but $\tau = -1/2$, is not realized as the fusion category of representations of a Hopf algebra. Since in this case $G$ is an elementary abelian $2$-group all of this categories admit a braiding, by \cite[Theorem 1.2 (1)]{Siehler-braided}.
All the fusion categories in this example are group-theoretical. In fact, by \cite[Lemma 4.5]{GNN}, for any symmetric non-degenerate bicharacter $\chi:G\times G \rightarrow k^{\times}$, $G$ contains a Lagrangian subgroup with respect to $\chi$. Therefore ${\mathcal {TY}} (G, \chi, \tau)$ is group-theoretical, by \cite[Theorem 4.6]{GNN}.
\end{ejem}
\begin{ejem} Recall that a near-group category is a fusion category with exactly one isomorphism class of non-invertible simple object. In the notation of \cite{Siehler-braided}, the fusion rules of ${\mathcal C}$ are determined by a pair $(G, \kappa)$, where $G$ is the group of invertible objects of ${\mathcal C}$ and $\kappa$ is a nonnegative integer. Letting $\Irr({\mathcal C}) = G \cup \{X\}$, where $X$ is non-invertible, we have the relation \begin{equation}
X\otimes X = \oplus_{g \in G} g \oplus \kappa X. \end{equation}
Near-group categories with fusion rule $(G, 0)$ for some finite group $G$ are thus Tambara-Yamagami categories, discussed in the previous example. Let us consider near-group categories with fusion rule $(G, \kappa)$ for some finite group $G$ and a positive integer $\kappa$.
We have $\cd ({\mathcal C}) = \{1,2\}$ if and only if $G$ is of order $2$ and $\kappa = 1$, that means ${\mathcal C}$ is of type $(\mathbb Z_2, 1)$. Therefore, in this case $\FPdim {\mathcal C} = 6$ and since $\kappa > 0$, then ${\mathcal C}$ is group-theoretical, by \cite[Theorem 1.1]{EGO}. By \cite[Theorem 1.5]{Thornton}, there are up to equivalence exactly two non-symmetric braided near-group categories with fusion rule $(\mathbb Z_2, 1)$. \end{ejem}
\begin{ejem} Examples of a weakly integral braided fusion categories which are not integral and Frobenius-Perron dimensions of simple objects are $\leq 2$ are given by the Ising categories, studied in \cite[Appendix B]{DGNOI}. In this case, there is a unique non-invertible simple object $X$ with $X^{\otimes 2} = \textbf{1} \oplus a$, where $a$ generates the group of invertible objects, isomorphic to $\mathbb Z_2$ (note that these are also Tambara-Yamagami ca\-tegories). We have here $\cd({\mathcal C}) = \{ 1, \sqrt 2 \}$ and $\FPdim {\mathcal C} = 4$. Every braided Ising category is modular \cite[Corollary B.12]{DGNOI}.
Other examples come from braided fusion categories with generalized Tambara-Yamagami fusion rules of type $(G, \mathbb Z_2)$, where $G$ is a finite group. See \cite{liptrap}. In these examples, ${\mathcal C}$ is not pointed, the group of invertible objects is $G$, and $\mathbb Z_2 \simeq \Gamma \subseteq G$ is a subgroup such that $X \otimes X^* \simeq \oplus_{h \in \Gamma} h$, for all non-invertible object $X$ of ${\mathcal C}$. Hence we also have $\cd({\mathcal C}) = \{ 1, \sqrt 2 \}$.
Since they are not integral, these examples are not group-theoretical. \end{ejem}
\begin{ejem} Let ${\mathcal C}$ be a braided group-theoretical fusion category. Then ${\mathcal C}$ is an equivariantization of a pointed fusion category, that is, ${\mathcal C} \simeq {\mathcal D}^G$, where ${\mathcal D}$ is a pointed fusion category and $G$ is a finite group acting on ${\mathcal D}$ by tensor autoequivalences \cite{NNW}. In this case, ${\mathcal C}$ contains the category $\Rep G$ of finite-dimensional representations of $G$ as a fusion subcategory.
Suppose that $\cd({\mathcal C}) = \{1, p\}$, where $p$ is any prime number. Then also $\cd(G) \subseteq \{1, p\}$. In particular, the group $G$ must have a normal abelian $p$-complement; moreover, either $G$ contains an abelian normal subgroup of index $p$ or the center $Z(G)$ has index $p^3$. See \cite[Theorems 6.9, 12.11]{isaacs}. \end{ejem}
\subsection{Fusion rules of dihedral type}\label{fusion_D_n}
Let $D_n$ be the dihedral group of order $2n$, $n\geq 1$. Recall that $D_n$ has a presentation by generators $t,z$ and relations $t^2 = 1 = z^n$, $tz = z^{-1}t$.
The following proposition describes the fusion rules of $\Rep D_n$ (\textit{c.f.} \cite{mas-cocycle}).
\begin{proposition}\label{D_n} \begin{enumerate} \item Suppose $n$ is odd. Then the isomorphisms classes of simple objects of $\Rep D_n$ are represented by $2$ invertible objects, $\textbf{1}$ and $g$, and $r = (n-1)/2$ simple objects $X_1, \ldots, X_r$, of dimension $2$, such that
\begin{align*}
& g\otimes X_i = X_i = X_i\otimes g, \qquad \forall i=1, \ldots, r, \\
& X_i\otimes X_j = \left\{
\begin{array}{ll} X_{i+j}\oplus X_{|i-j|}, \quad & \text{if} \quad i+j \leq r, \\
X_{n-(i+j)}\oplus X_{|i-j|}, \quad & \text{if} \quad i+j > r;
\end{array}
\right.
\end{align*} where $X_0 = \textbf{1}\oplus g$.
\item Suppose $n$ is even, that is $n = 2m$. Then the isomorphisms classes of simple objects of $\Rep D_n$ are represented by $4$ invertible objects, $\textbf{1}$, $g$, $h$, $f = gh$, and $m-1$ simple objects $X_1, \ldots, X_{m-1}$, of dimension $2$, such that
\begin{align*}
& g\otimes X_i = X_i = X_i\otimes g, \qquad \forall i=1, \ldots, m-1, \\
& h\otimes X_i = X_{m-i} = X_i\otimes h, \qquad \forall i=1, \ldots, m-1, \\
& X_i\otimes X_j = \left\{ \begin{array}{ll} X_{i+j}\oplus X_{|i-j|}, \quad & \text{if} \quad i+j \leq m, \\
X_{2m-(i+j)}\oplus X_{|i+j|}, \quad & \text{if} \quad i+j >
m; \end{array} \right.
\end{align*} where $X_0 = \textbf{1}\oplus g$ and $X_m = h\oplus f$. \end{enumerate} In particular, the group of invertible objects in $\Rep D_n$ is isomorphic to $\mathbb Z_2$ if $n$ is odd, and to $\mathbb Z_2\times\mathbb Z_2$ if $n$ is even. \end{proposition}
\begin{remark}\label{ndivide4} Suppose that $4$ divides $n = 2m$. Then $X_{m/2}$ is fixed under (left and right) multiplication by all invertible objects of $\Rep D_n$. \end{remark}
Let ${\mathcal C}$ be a fusion category with $\cd ({\mathcal C}) = \{1, 2\}$. Suppose that the Grothendieck ring of ${\mathcal C}$ is commutative (for example, this is the case if ${\mathcal C}$ is braided). Assume in addition that the following conditions hold:
\begin{enumerate} \item[(a)] All objects are self-dual, that is $X \simeq X^*$, for every object $X$ of ${\mathcal C}$.
\item[(b)] ${\mathcal C}$ has a faithful simple object.
\end{enumerate} Then, it is shown in \cite[Theorem 4.2]{NaR} that ${\mathcal C}$ is Grothendieck equivalent to $\Rep D_n$. Moreover, ${\mathcal C}$ is necessarily group-theoretical.
\medbreak It is possible to remove the assumption that all the objects are self-dual but it is still necessary the condition of self-duality on the faithful simple object. Namely, suppose that ${\mathcal C}$ is not self-dual, but satisfies \begin{enumerate} \item[(b')] ${\mathcal C}$ has a faithful self-dual simple object. \end{enumerate}
In this case ${\mathcal C}$ is still group-theoretical and it is Grothendieck equivalent to $\Rep \widetilde D_n$, $n$ odd. See \cite[Remark 4.4]{NaR}. Here $\widetilde D_n$ is the generalized quaternion (binary dihedral) group of order $4n$, that is, the group presented by generators $a, s$, with relations $a^{2n} = 1$, $s^2 = a^n$, $s^{-1}as = a^{-1}$. (Observe that for $n$ odd, $\widetilde D_n$ is isomorphic to the semidirect product $\mathbb Z_n \rtimes \mathbb Z_4$, with respect to the action given by inversion, considered in \cite{NaR}. For even $n$, $\Rep \widetilde D_n$ is Grothendieck equivalent to $\Rep D_{2n}$, while $\mathbb Z_n \rtimes \mathbb Z_4$ has no faithful representation of degree $2$.)
\begin{lemma}\label{centers} Let $n \geq 2$. Then $(\Rep \widetilde D_{n})_{\ad} = \Rep D_{n}$. In addition, \begin{equation*}(\Rep D_{n})_{\ad} = \left\{ \begin{array}{ll} \Rep D_{n/2}, \quad & \text{if} \quad n \quad \text{is even}, \\
\Rep D_{n}, \quad & \text{if}\quad n \quad \text{is odd}. \end{array} \right. \end{equation*} \end{lemma}
\begin{proof} Recall that when ${\mathcal C} = \Rep G$, where $G$ is a finite group, then ${\mathcal C}_{\ad} = \Rep G/Z(G)$ \cite{gel-nik}. The first claim follows from the fact that the center of $\tilde D_n$ equals $\{1, s^2\} \simeq \mathbb Z_2$. On the other hand, the center $Z(D_n)$ is trivial if $n$ is odd, and equals $\{ 1, z^{n/2}\} \simeq \mathbb Z_2$ if $n$ is even.
This implies the second claim and finishes the proof of the lemma.
\end{proof}
\section{Proof of the main results}\label{pruebas}
In this section we shall prove Theorems \ref{soluble} and \ref{gp-ttic}.
\begin{proposition}\label{equiv} Let ${\mathcal C}$ be a premodular fusion category. Suppose ${\mathcal C}$ has an invertible object $g$ of order $n$ and a simple object $X$ such that \begin{flalign} \label{(1)}& g\otimes X = X, \textrm{ and } & \\ \label{(2)}& g \textrm{ centrali\-zes } X.& \end{flalign} Then we have \begin{enumerate} \item[(i)] ${\mathcal C}$ is an equivariantization by the cyclic group $\mathbb Z_n$ of a fusion category $\widetilde {\mathcal C}$. \item[(ii)] If $g \in {\mathcal C}'$, then $\widetilde {\mathcal C}$ is braided. \end{enumerate} \end{proposition}
\begin{proof}
Condition \eqref{(1)} ensures the existence of a fiber functor on the fusion category ${\mathcal C} [g]$ generated by $g$. Then ${\mathcal C} [g]$ is equivalent to $\Rep \mathbb Z_n$ as fusion categories.
Moreover, they are equivalent as braided fusion categories. Indeed, \eqref{(1)} implies ${\mathcal C}[g]\subseteq {\mathcal C}[X]$ and therefore ${\mathcal C}[g]\subseteq Z_2({\mathcal C}[X])$, by \eqref{(2)}. Hence ${\mathcal C}[g]$ is symmetric. Then the only possible twists in ${\mathcal C}$ are $\theta_h = 1$ and $\theta_h = -1$ for all $h\in\langle g \rangle$. But $\theta_h$ is not equal to $-1$ since $h$ centralizes $X$ and $h\otimes X = X$ \cite[Lemma 5.4]{Mu}. Then $\theta_h = 1$ for all $h\in\langle g \rangle$. Therefore ${\mathcal C} [g] \simeq \Rep \mathbb Z_n$ as braided fusion categories, as claimed.
Let $\Gamma = \langle g \rangle \subseteq G({\mathcal C})$. It follows from \cite[Theorem 4.18 (i)]{DGNOI} that the de-equivariantization $\widetilde {\mathcal C} = {\mathcal C}_{\Gamma}$ of ${\mathcal C}$ by $\Gamma$ is a fusion category and there is a canonical equivalence ${\mathcal C}\simeq {\widetilde {\mathcal C}}^{\Gamma}$ between the category ${\mathcal C}$ and the $\Gamma$-equivariantization of $\widetilde {\mathcal C}$, which shows (i).
Furthermore, if $g \in {\mathcal C}'$ then $\widetilde {\mathcal C}$ is braided and the equivalence ${\mathcal C} \simeq {\widetilde {\mathcal C}}^{\Gamma}$ is of braided fusion categories \cite{bruguieres, Mu} (see also \cite[Theorem 4.18 (ii)]{DGNOI}). Thus we get (ii). This proves the proposition. \end{proof}
\begin{lemma}\label{generadores} Let ${\mathcal C}$ be a fusion category with commutative Grothendieck ring. Suppose that ${\mathcal C} = {\mathcal C}_{\ad}$. If ${\mathcal D}_1, \ldots, {\mathcal D}_s$ are fusion subcategories that generate ${\mathcal C}$ as a fusion category, then ${\mathcal D}_1^{(m)}, \ldots, {\mathcal D}_s^{(m)}$ generate ${\mathcal C}$ as a fusion category, $\forall m\geq 0$. \end{lemma}
\begin{proof} Since ${\mathcal D}_1, \ldots, {\mathcal D}_s$ generate ${\mathcal C}$, then $({\mathcal D}_1)_{\ad}, \ldots, ({\mathcal D}_s)_{\ad}$ generate ${\mathcal C}$. In fact, let $X$ be a simple object of ${\mathcal C}$. There exist simple objects $X_{i_1}, \ldots, X_{i_t}$, with $X_{i_l} \in {\mathcal D}_{i_l}$, $1 \leq i_1, \dots, i_t \leq s$, such that $X$ is a direct summand of $X_{i_1}\otimes \ldots \otimes X_{i_t}$. Then $X\otimes X^*$ is a direct summand of $$X_{i_1}\otimes \ldots \otimes X_{i_t}\otimes X_{i_t}^*\otimes \ldots \otimes X_{i_1}^* \simeq (X_{i_1}\otimes X_{i_1}^*)\otimes \ldots \otimes (X_{i_t}\otimes X_{i_t}^*),$$ where we have used that ${\mathcal C}$ has a commutative Grothendieck ring.
Notice that the object in the right hand side belongs to the fusion subcategory generated by $({\mathcal D}_1)_{\ad}, \ldots, ({\mathcal D}_s)_{\ad}$.
Since $X$ was arbitrary, it follows that $({\mathcal D}_1)_{\ad}, \ldots, ({\mathcal D}_s)_{\ad}$ gene\-rate ${\mathcal C}_{\ad}$. But ${\mathcal C} = {\mathcal C}_{\ad}$ by assumption, then we have proved that $({\mathcal D}_1)_{\ad}, \ldots, ({\mathcal D}_s)_{\ad}$ generate ${\mathcal C}$. The statement follows from this by induction on $n$, since ${\mathcal D}_j^{(n)} = ({\mathcal D}_j^{(n-1)})_{\ad}$, for all $j = 1, \ldots s$, $n\geq 1$. \end{proof}
\subsection{Braided fusion categories with irreducible degrees $1$ and $2$ }
Throughout this subsection ${\mathcal C}$ is a braided fusion category with $\cd ({\mathcal C}) = \{1,2\}$. We regard ${\mathcal C}$ as a premodular category with respect to its canonical spherical structure. See Remark \ref{spherical}.
\begin{remark}\label{orderG[X]} Note that $G[X]\neq \textbf{1}$, for all
$X$ such that $\FPdim X = 2$. Moreover, $|G[X]| = 2$ or $4$. In particular the (abelian) group $G({\mathcal C})$ is not trivial. \end{remark}
\begin{proposition}\label{equi_g} Let $g$ be a non-trivial invertible object such that $g^2 = 1$ and $\theta_g = 1$. Assume that $g$ generates the Müger center ${\mathcal C}'$ of ${\mathcal C}$ as a fusion category.
Then ${\mathcal C}$ is the equivariantization of a modular fusion category $\widetilde {\mathcal C}$ by the group $\mathbb Z_2$. Furthermore $\cd(\widetilde {\mathcal C}) \subseteq \{1,2\}$. \end{proposition}
\begin{proof} By assumption ${\mathcal C}'\simeq \Rep \mathbb Z_2$ is tannakian. Then the de-equivarianti\-zation $\widetilde {\mathcal C}$ of ${\mathcal C}$ by ${\mathcal C}'$ is a modular category and there is an action of $\mathbb Z_2$ on $\widetilde {\mathcal C}$ such that ${\mathcal C} \simeq \widetilde {\mathcal C} ^{\mathbb Z_2}$ \cite{bruguieres, Mu}.
Since $\cd(\widetilde {\mathcal C} ^{\mathbb Z_2}) = \cd({\mathcal C}) = \{1,2\}$, then $ \cd(\widetilde {\mathcal C}) \subseteq \{1,2\}$, by \cite[Proof of Proposition 6.2]{ENO2}, \cite[Lemma 7.2]{NP}.
\end{proof}
\begin{lemma}\label{noad} Suppose that ${\mathcal C}\neq {\mathcal C}_{\ad}$ and ${\mathcal C}_{\ad}$ is solvable. Then ${\mathcal C}$ is solvable. \end{lemma}
\begin{proof}
Since ${\mathcal C}$ is braided, its universal grading group $U({\mathcal C})$ is abelian \cite[Theorem 6.2]{gel-nik}. The category ${\mathcal C}$ is a $U({\mathcal C})$-extension of ${\mathcal C}_{\ad}$ and an extension of a solvable category by a solvable group is again solvable \cite[Proposition 4.5 (i)]{ENO2}. Then ${\mathcal C}$ is solvable, as claimed. \end{proof}
\begin{lemma}\label{ad} Assume ${\mathcal C} = {\mathcal C}_{\ad}$. Then $\FPdim {\mathcal C}' \geq 2$. \end{lemma}
\begin{proof} Suppose on the contrary that $\FPdim {\mathcal C}' = 1$, that is, ${\mathcal C}$ is modular. Then, by \cite[Theorem 6.2]{gel-nik}, $U({\mathcal C})\simeq \widehat{G({\mathcal C})} \simeq G({\mathcal C})$. By Remark \ref{orderG[X]}, ${\mathcal C}_{\ad}\subsetneq {\mathcal C}$, against the assumption. Hence $\FPdim {\mathcal C}'\geq 2$, as claimed. \end{proof}
\begin{lemma}\label{lema-dn} Suppose ${\mathcal C}$ is generated by a simple object $X$ such that $X\simeq X^*$ and $\FPdim X = 2$. Then we have \begin{enumerate} \item[(i)] ${\mathcal C}$ is not modular. \end{enumerate} Assume ${\mathcal C} = {\mathcal C}_{\ad}$. Then we have in addition \begin{enumerate} \item[(ii)] There is a group isomorphism $G({\mathcal C})\simeq \mathbb Z_2$. \item[(iii)] $G({\mathcal C})\subseteq {\mathcal C}'$. \end{enumerate}
\end{lemma}
\begin{proof} By \cite[Theorem 4.2; Remark 4.4]{NaR}, ${\mathcal C}$ is Grothendieck equivalent to $\Rep D_n$ or $\Rep \widetilde D_{2n+1}$, for some $n\geq 1$. Since the universal grading group is a Grothendieck invariant, then in the first case $U({\mathcal C})$ is isomorphic to $\mathbb Z_2$ if $n$ is even and is trivial if $n$ is odd. But $G({\mathcal C})$, which is also a Grothendieck invariant, is isomorphic to $\mathbb Z_2 \times \mathbb Z_2$ if $n$ is even and is isomorphic to $\mathbb Z_2$ if $n$ is odd, by Proposition \ref{fusion_D_n}. Then $U({\mathcal C})$ is not isomorphic to $\widehat{G({\mathcal C})}$, for any $n$. Therefore ${\mathcal C}$ is not modular, by \cite[Theorem 6.2]{gel-nik}. Similarly, if ${\mathcal C}$ is Grothendieck equivalent to $\Rep \widetilde D_{2n+1}$, we have $U({\mathcal C}) \simeq \mathbb Z_2$ and $G({\mathcal C}) \simeq \mathbb Z_4$. Hence ${\mathcal C}$ is not modular in this case neither. This shows (i).
Notice that the assumption ${\mathcal C} = {\mathcal C}_{\ad}$ implies that ${\mathcal C}$ is Grothendieck equivalent to $\Rep D_n$, for some $n$ odd. Then
(ii) follows immediately from the fusion rules of $\Rep D_n$, with $n$ odd (see Proposition
\ref{fusion_D_n}). Since, by (i), ${\mathcal C}'$ is not trivial, then $G({\mathcal C}') \neq
\textbf{1}$, because $\cd({\mathcal C}') \subseteq \{ 1, 2\}$ (\textit{c.f.} Remark \ref{orderG[X]}). By part (i), $G({\mathcal C}') =
G({\mathcal C})$ and (iii) follows. \end{proof}
\begin{remark} \label{d_n_impar}If ${\mathcal C}$ is a fusion category as in Lemma \ref{lema-dn}, then the assumption ${\mathcal C} = {\mathcal C}_{\ad}$ is equivalent to saying that ${\mathcal C}$ is Grothendieck equivalent to $\Rep D_n$, for some $n \geq 1$ \emph{odd}. \end{remark}
\begin{lemma}\label{genconad} Suppose that ${\mathcal C} = {\mathcal C}_{\ad}$. Then ${\mathcal C}$ is generated by fusion subca\-tegories ${\mathcal D}_1, \dots, {\mathcal D}_s$, $s \geq 1$, where ${\mathcal D}_i$ is Grothendieck equivalent to $\Rep D_{n_i}$ and $n_i$ is an odd natural number, for all $i = 1, \dots, s$.
\end{lemma}
\begin{proof} Let ${\mathcal C} = {\mathcal C}[X_1, \ldots, X_s]$ for some simple objects $X_1, \ldots, X_s$. Let ${\mathcal D}_i = {\mathcal C}[X_i]$ be the fusion subcategory generated by $X_i$, $i = 1, \ldots, s$. By Lemma \ref{generadores}, $({\mathcal D}_1)_{\ad}, \ldots, ({\mathcal D}_s)_{\ad}$ generate ${\mathcal C}$ as a fusion category. Hence, it is enough to consider only those simple objects $X_i$ whose Frobenius-Perron dimension equals $2$ (otherwise, $\FPdim X_i = 1$ and $X_i\otimes X_i^* \simeq \textbf{1}$).
Moreover, iterating the application of Lemma \ref{generadores}, we may further assume that $|G[X_i]| = 2$, for all $i = 1, \dots, s$. Thus we have a decomposition $X_i \otimes X_i^* \simeq \textbf{1} \oplus g_i \oplus X_i'$, where $G[X_i] = \{ \textbf{1}, g_i\}$ and $X_i'$ is a self-dual simple object of Frobenius-Perron dimension $2$. Since $X_i\otimes X_i^*$ generates $({\mathcal D}_i)_{\ad}$, the above reductions allow us to assume that ${\mathcal D}_i = {\mathcal C}[X_i]$ with $X_i$ simple objects of ${\mathcal C}$ such that $\FPdim X_i = 2$ and $X_i \simeq X_i^*$, $\forall i = 1, \ldots, s$.
We claim that we can choose the $X_i$'s in such a way that $({\mathcal D}_i)_{\ad}\simeq {\mathcal D}_i$. By \cite[Theorem 4.2; Remark 4.4]{NaR}, ${\mathcal D}_i$ is Grothendieck equivalent to $\Rep D_{n_i}$ or to $\Rep \widetilde D_{2n_i+1}$.
Iterating the application of Lemma \ref{generadores} and using Lemma \ref{centers}, we obtain that
${\mathcal C} = {\mathcal C}[{\mathcal D}_1, \ldots, {\mathcal D}_s]$, with ${\mathcal D}_j$ a fusion subcategory of ${\mathcal C}$ Grothendieck equivalent to $\Rep D_{n_j}$, $n_j$ odd, for all $j = 1, \ldots, s$, as we wanted. \end{proof}
\subsection{Proof of Theorems \ref{soluble} and \ref{gp-ttic}} Let ${\mathcal C}$ be a weakly integral fusion category. It follows from \cite[Theorem 3.10]{gel-nik} that either ${\mathcal C}$ is integral, or ${\mathcal C}$ is a $\mathbb Z_2$-extension of a fusion subcategory ${\mathcal D}$. In particular, if ${\mathcal C} = {\mathcal C}_{\ad}$, then ${\mathcal C}$ is necessarily integral.
\begin{lemma}\label{prod-simples-categorico} Let ${\mathcal C}$ be fusion category and let $X, X'$ be simple objects of ${\mathcal C}$. Then the following are equivalent: \begin{enumerate} \item[(i)] The tensor product $X^*\otimes X'$ is simple.
\item[(ii)] For every simple object $Y \neq \textbf{1}$ of ${\mathcal C}$, either $m(Y, X\otimes X^*) = 0$ or $m(Y, X'\otimes X'^*) = 0$. \end{enumerate}
In particular, if $X^*\otimes X'$ is not simple, then ${\mathcal C}[X]_{\ad} \cap {\mathcal C}[X']_{\ad}$ is not trivial. \end{lemma}
\begin{proof} The equivalence between (i) and (ii) is proved in \cite[Lemma 6.1]{BN} in the case where ${\mathcal C}$ is the category of (co)representations of a semisimple Hopf algebra. Note that the proof \textit{loc. cit.} works in this more general context as well.
\end{proof}
\begin{proof}[Proof of Theorem \ref{soluble}] The proof is by induction on $\FPdim {\mathcal C}$. As pointed out at the beginning of this subsection, if ${\mathcal C}$ is not integral, then it is a $\mathbb Z_2$-extension of a fusion subcategory ${\mathcal D}$. Since ${\mathcal D}$ also satisfies the assumptions of the theorem, then ${\mathcal D}$ is solvable, by induction. Hence ${\mathcal C}$ is solvable as well.
We may thus assume that ${\mathcal C}$ is integral. Therefore $\cd({\mathcal C}) = \{1, 2 \}$ and the results of the previous subsection apply. By Lemma \ref{noad}, we may assume that ${\mathcal C} = {\mathcal C}_{\ad}$. Then it follows from Lemma \ref{genconad} that ${\mathcal C} = {\mathcal C}[{\mathcal D}_1, \ldots, {\mathcal D}_s]$, with ${\mathcal D}_j$ Grothendieck equivalent to $\Rep D_{n_j}$, $n_j$ odd, $\forall j = 1, \ldots, s$.
By Lemma \ref{lema-dn}, $G({\mathcal D}_j) = \{\textbf{1}, g_j\}$, $\forall j = 1, \ldots, s$. We claim that $g_i = g_j$ $\forall 1\leq i, j \leq s$. Indeed, let ${\mathcal D}_j = {\mathcal C}[X^{(j)}]$, where $X^{(j)} = X_1^{(j)}$ in the notation of Proposition \ref{D_n}. Then we have $(X^{(j)})^{\otimes 2} = \textbf{1}\oplus g_j \oplus X_2^{(j)}$. Fix $1\leq i, j \leq s$. Since ${\mathcal C}$ has no simple objects of Frobenius-Perron dimension $4$ then $g_i = g_j$ or $X_2^{(j)}\simeq X_2^{(i)}$, by Lemma \ref{prod-simples-categorico}. In the first case we are done. In the second case, we note that $\{1, g_j\} = G[X_2^{(j)}] = G[X_2^{(i)}] = \{1, g_i\}$. Then $g_j = g_i$, as claimed. Let $g = g_j = g_i$.
By Lemma \ref{lema-dn}, $g\in {\mathcal D}_i'$, for all $i = 1, \ldots, s$. Since ${\mathcal D}_i$, $1\leq i \leq s$, generate ${\mathcal C}$ then $g \in {\mathcal C}'$. It follows from Theorem \ref{equiv} (ii) that ${\mathcal C}$ is the equivariantization by $\mathbb Z_2$ of a braided fusion category $\widetilde {\mathcal C}$. In particular, $\FPdim \widetilde {\mathcal C} = \FPdim {\mathcal C} / 2$ and $\cd (\widetilde {\mathcal C}) \subseteq \{1,2\}$, by \cite[Proof of Proposition 6.2 (1)]{ENO2}, \cite[Lemma 7.2]{NP}. By inductive hypothesis, $\widetilde {\mathcal C}$ is solvable. Then ${\mathcal C}$, being the equivariantization of a solvable fusion category by a solvable group is itself solvable \cite[Proposition 4.5 (i)]{ENO2}. \end{proof}
\begin{theorem}\label{morita-ccad} Let ${\mathcal C}$ be a weakly integral braided fusion category that $\FPdim X \leq 2$ for all simple object $X$ of ${\mathcal C}$. Assume in addition that ${\mathcal C} = {\mathcal C}_{\ad}$. Then ${\mathcal C}$ is tensor Morita equivalent to a pointed fusion category ${\mathcal C}(A \rtimes \mathbb Z_2, \tilde \omega)$, where $A$ is an abelian group endowed with an action of $\mathbb Z_2$ by group automorphisms, and $\tilde \omega$ is a certain $3$-cocycle on the semidirect product $A \rtimes \mathbb Z_2$. \end{theorem}
\begin{proof} The assumption ${\mathcal C} = {\mathcal C}_{\ad}$ implies that ${\mathcal C}$ is integral. Hence we may assume that $\cd({\mathcal C}) = \{ 1, 2\}$. By Lemma \ref{genconad}, ${\mathcal C}$ is generated by fusion subcategories ${\mathcal D}_1, \dots, {\mathcal D}_s$, $s \geq 1$, where ${\mathcal D}_i$ is Grothendieck equivalent to $\Rep D_{n_i}$ and $n_i$ is an odd natural number, for all $i = 1, \dots, s$. Furthermore, as in the proof of Theorem \ref{soluble}, the assumption that ${\mathcal C} = {\mathcal C}_{\ad}$ implies that $G({\mathcal D}_i) = \{ \textbf{1}, g\}$, for all $1\leq i \leq s$, and ${\mathcal C}[g] \simeq \Rep \mathbb Z_2$ is a tannakian subcategory of the M\" uger center ${\mathcal C}'$. So that ${\mathcal C} \simeq \tilde {\mathcal C}^{\mathbb Z_2}$ is an equivariantization of a braided fusion category $\tilde {\mathcal C}$.
Equivariantization under a group action gives rise to exact sequences of fusion categories \cite[Subsection 5.3]{tensor-exact}. In our situation we have an exact sequence of braided tensor functors \begin{equation}\label{sec-c}\Rep \mathbb Z_2 \to {\mathcal C} \overset{F}\to \tilde {\mathcal C}.\end{equation} In addition, since ${\mathcal C}[g] \subseteq {\mathcal D}_i$, then \eqref{sec-c} induces by restriction an exact sequence \begin{equation}\label{sec-di}\Rep \mathbb Z_2 \to {\mathcal D}_i \to \tilde {\mathcal C}_i,\end{equation} for all $i = 1, \dots, s$, where $\tilde {\mathcal C}_i$ is the essential image of ${\mathcal D}_i$ in $\tilde {\mathcal C}$ under the functor $F$. Hence $\tilde {\mathcal C}_i$ is a fusion subcategory of $\tilde {\mathcal C}$, for all $i$, and moreover $\tilde {\mathcal C}_1, \dots, \tilde {\mathcal C}_s$ generate $\tilde {\mathcal C}$ as a fusion category. Note in addition that $\cd(\tilde {\mathcal C}), \cd(\tilde {\mathcal C}_i) \subseteq \{ 1, 2\}$, for all $i = 1, \dots, s$. On the other hand, exactness of the sequence \eqref{sec-di} implies that $2n_i = \FPdim {\mathcal D}_i = 2 \FPdim \tilde {\mathcal C}_i$ \cite[Proposition 4.10]{tensor-exact}. Hence $\FPdim \tilde {\mathcal C}_i = n_i$ is an odd natural number.
Since $\tilde {\mathcal C}_i$ is an integral braided fusion category, then the Frobenius-Perron dimension of every simple object of $\tilde {\mathcal C}_i$ divides the Frobenius-Perron dimension of $\tilde {\mathcal C}_i$ \cite[Theorem 2.11]{ENO2}. Thus we get that $\FPdim Y = 1$, for all $Y \in \Irr (\tilde {\mathcal C}_i)$. That is, $\tilde {\mathcal C}_i$ is a pointed braided fusion category, for all $i = 1, \dots, s$. Since $\tilde {\mathcal C}_1, \dots, \tilde {\mathcal C}_s$ generate $\tilde {\mathcal C}$ as a fusion category, then $\tilde {\mathcal C}$ is also pointed. Therefore $\tilde {\mathcal C} \simeq {\mathcal C}(A, \omega)$ as fusion categories, where $A$ is an abelian group and $\omega \in H^3(A, k^{\times})$.
Group actions on pointed categories were classified by Tambara \cite{tambara}. In view of \cite[Theorem 4.1]{tambara} and \cite[Proposition 3.2]{nik}, the fusion category ${\mathcal C} \simeq \tilde {\mathcal C}^{\mathbb Z_2}$ is tensor Morita equivalent to a pointed category ${\mathcal C}(A \rtimes \mathbb Z_2, \tilde \omega)$, where the semidirect product $A \rtimes \mathbb Z_2$ is with respect of the induced action of $\mathbb Z_2$ on the group $A$ of invertible objects of $\tilde {\mathcal C}$, and $\tilde \omega$ is a certain $3$-cocycle on $A \rtimes \mathbb Z_2$. \end{proof}
\begin{proof}[Proof of Theorem \ref{gp-ttic}.]
The proof is an immediate consequence of Theorem \ref{morita-ccad}. \end{proof}
\begin{remark} Let ${\mathcal C}$ be a braided fusion category such that $\cd({\mathcal C}) = \{ 1, 2 \}$. Suppose that ${\mathcal C}$ is nilpotent. By \cite[Theorem 1.1]{DGNO} ${\mathcal C}$ admits a unique decomposition (up to the order of factors) into a tensor product ${\mathcal C}_1 \boxtimes \dots \boxtimes {\mathcal C}_m$, where ${\mathcal C}_i$ are braided fusion categories of Frobenius-Perron dimension $p_i^{m_i}$, for some pairwise distinct prime numbers $p_1, \dots, p_m$. Then ${\mathcal C}_i$ is an integral braided fusion category, for all $i = 1, \dots, m$, and by \cite[Theorem 2.11]{ENO2}, we get that ${\mathcal C}_i$ is pointed whenever $p_i > 2$. Hence ${\mathcal C} \simeq {\mathcal C}_1 \boxtimes \mathcal B$ as braided fusion categories, where ${\mathcal C}_1$ is a braided fusion category of Frobenius-Perron dimension $2^m$ such that $\cd({\mathcal C}_1) = \{ 1, 2 \}$, and $\mathcal B$ is a pointed braided fusion category. \end{remark}
\end{document} |
\begin{document}
\title{Effects of spatial dispersion on Self--induced transparency in two--level media} \author{Zoran Ivi\'c} \affiliation{University of Belgrade, Vin\v ca Institute, PO Box 522, 11001 Belgrade, Serbia} \affiliation{National University of Science and Technology MISiS, Leninsky prosp. 4, Moscow 119049, Russia} \author{Dalibor \v Cevizovi\'c} \author{\v Zeljko Pr\v zulj} \affiliation{University of Belgrade, Vin\v ca Institute, PO Box 522, 11001 Belgrade, Serbia} \author{Nikos Lazaridess} \affiliation{National University of Science and Technology MISiS, Leninsky prosp. 4, Moscow 119049, Russia} \affiliation{Crete Center for Quantum Complexity and Nanotechnology, Department of Physics, University of Crete, P. O. Box 2208, Heraklion 71003, Greece} \author{G.P.Tsironis} \affiliation{National University of Science and Technology MISiS, Leninsky prosp. 4, Moscow 119049, Russia} \affiliation{Crete Center for Quantum Complexity and Nanotechnology, Department of Physics, University of Crete, P. O. Box 2208, Heraklion 71003, Greece} \begin{abstract} We study the effects of dispersion in carrier waves on the properties of soliton self--induced transparency (SIT) in two level media. We found substantial impact of dispersion effects on typical SIT soliton features. For example, the degree of SIT pulse velocity slowing down (acceleration) is determined by the ratio of the incoming pulse frequency over atomic transition frequency - $x=\omega/\omega_0$. Specifically, an immediate pulse stopping is predicted for absorbing media when pulse duration time exceeds some critical value. In the sharp line limit stopping may emerge only for frequency ratio above unity, while for the inhomogeneously broadened systems it appears irrespective of the value of $x$. Analysis performed on the basis of Mcall\& Hahn \textit{Area theorem} implies that pulse stopping is achieved when Ber's absorption coefficient approaches infinity, that is, pulse energy is fully absorbed in the medium. In the case of amplifying media super-luminal motion is predicted as in the case of resonance. However, there is a lowest value in the frequency ratio below which the pulse velocity tends to the sub-luminal region. These new features of the SIT phenomenon open novel ways on how it may be exploited for the control of electromagnetic wave radiation in two-level media. This may be achieved by varying frequency ratio. \end{abstract} \maketitle \section{Introduction} Propagation of short, intense, electromagnetic (EM) pulses, resonantly interacting with two--level atomic media, lead to the emergence of a number of remarkable \textit{coherent cooperative quantum} phenomena such as Dicke superradiance, self--induced transparency (SIT), electromagnetically induced transparency (EIT), photon--echo, coherent population oscillation ... \cite{sr1,sr2,aleb,sit1,sit2,sit3,nlphys,eit1,eit2,pheh3,titt,cpo1,cpo2}. During the last two decades these phenomena attracted particular attention due to the potential for practical applications, i.e., for realization of \textit{quantum memories} \cite{QM1,QM2} that are devices fundamental for the future technologies for quantum communication and processing \cite{comm1,comm2,qc4,devo,geo,para}. In that context the achievement of control over the propagation of EM radiation by matter \cite{QMeng,slow1,zag1,slowrev}, and similarly, the manipulation of atoms, natural and the artificial ones (superconducting or quantum dot qubits), by light \cite{qbmanip}, became a really important issue in physics.
Great practical successes were achieved using EIT which turns out to be very useful allowing substantial slowing down and even stopping of \textit{light pulses} in media composed of natural atoms (atomic vapors \cite{frez2,comm2,frez3,frez4,frez5,frez6,frez7,frezth1,frezth2}). In this way, the information carried by the pulse may be temporarily transferred to the medium. Pulses can then be 'revived' with their original information intact. Nevertheless, EIT -- based techniques have certain limitations in potential practical applications due to the narrow transparency spectrum \cite{OEX1} and vulnerability due to inevitable coupling with the environment leading to relaxation processes (\textit{homogeneous broadening}) and dephasing (\textit{inhomogeneous broadening}). Relaxation effects may be suppressed by exploiting high-intensity pulses. However, this may cause damage to the medium. For that reason, the development of analogous techniques but adjusted for the microwave domain may be useful. These novel trends rely on engineered media, quantum metamaterials (QMM), built with artificial "atoms" made typically of superconducting circuits. This had motivated the investigations of a possible emergence of \textit{collective coherent quantum} phenomena in QMMs and their implementation in design of quantum technological devices \cite{qbeit1,qbeit2,qbeit3,alu,arxiv,fqb,mand,scir}. The use QMMs had shown that, in parallel with EIT--based techniques, the use of SIT could be very useful. Specifically, employing SIT could make possible to phase out out \textit{inhomogeneous broadening}, the main obstacle to quantum coherence, and actually turn it into an advantage. This is due to the fact inhomogeneous broadening is required for the emergence of SIT. We note that the whole concept of SIT relies on inhomogeneous broadening since the Area theorem that is fundamental relies on it. Additionally, relaxation effects (homogeneous broadening) may be avoided using (ultra)short light pulses with the duration far less than both transverse and longitudinal relaxation times.
To recall, SIT is a lossless propagation of an optical pulse through an otherwise opaque optical medium composed of a large number of \textit{inhomogeneously broadened} two-level atoms. According to the McCall -- Hahn \textit{area theorem}, which is the main theoretical result of the whole concept, pulses travel through the media with no gain or loss when their area $\theta(x)\sim \int dt E(x,t) =n\pi$, where $E(x,t)$ is the magnitude of electric field. For even (odd) $n$ pulses are stable (unstable). When the pulse area is below $\pi$, the pulse gradually weakens with traveled distance and finally disappears being absorbed by the medium; on the other side, pulses whose input areas are slightly above $\pi$ increase their area up to a $2\pi$ after which point continue stable propagation as $2\pi$--soliton. Finally, all pulses with the areas $n\pi$ for $n>2$ split in two, three, etc solitons.
While the area theorem is a purely theoretical result, obtained under very restrictive assumptions, it is nevertheless surprising how accurately it describes most of the main features of SIT as confirmed by numerous experimental results \cite{aleb}. For that reason, at least concerning the original problem -- SIT in a system of \textit{atomic vapors}, it's further theoretical consideration could be superfluous. However, recent proposals \cite{alu,arxiv,fqb,mand,scir} of the practical applications of SIT effect in media built of artificial atoms, quantum metamaterials (QMM) for example, reopens the importance of theoretical studies especially those effects which have not been considered. One such issue is the study of the dispersion effects, i.e. dependence of the carrier wave frequency on its wave vector -- $\omega = \omega (k)$. In the original context \cite{sit1,sit2,sit3,nlphys} it was assumed that the dispersion law satisfies the simple relation $ \omega (k)= ck$, where $c$ is the speed of light in the medium. However, as shown recently in the context of SIT in QMM \cite{fqb,mand,scir} and exciton SIT \cite{tal1,tal2} this issue cannot be neglected easily and, under some circumstances, turns out to be of particular interest resulting in some very peculiar features.
In this article, we study the effects of dispersion on SIT within the original model introduced by McCall and Hahn \cite{sit1,sit2}. We base our study on a system of \textit{reduced Maxwell--Bloch equations} (RMBE) obtained from the original one by eliminating fast oscillating terms in accordance with \textit{slowly varying envelope and phase approximation} (SVEA) \cite{aleb} leading, in final instance, to well known solutions \cite{aleb,sit1,sit2,sit3,nlphys}. A brief and very instructive pedagogical overview may be found in \cite{nlphys}. \section{Evaluation of the dispersion law} The starting point of our analysis is RMBEs found through consistent application of SVEA.
\begin{eqnarray}\label{SE02A} \begin{split}
\dot{S}_x&=&-(\Delta+\dot{\phi})S_y,\hspace{8em} & \text{a)}\\
\dot{S}_y&=&(\Delta+\dot{\phi})S_x+\frac{\kappa}{2}\mathcal{E} S_z,\hspace{4em} & \text{b)}\\
\dot{S}_z &=&-\frac{\kappa}{2}\mathcal{E} S_y.\hspace{9em} & \text{c)} \end{split} \end{eqnarray} Here, $S_i(x,t)$, $\mathcal{E}(x,t)$ and $\phi(x,t)$ are new \textit{slow} dynamical variables corresponding, respectively, to transformed atomic functions, envelope and phase of the EM pulse. More precisely, $S_x$ and $S_y$ correspond to 'dispersive' and 'absorptive' components of induced polarization of the medium: $P(x,t)=\mathcal{N} d \langle s_x(z,t)\rangle$, where $ s_x$ -- quantum mechanical expectation value of a $x$ component of Pauli spin matrix ($\sigma_x$) in a state being the superposition of ground and the excited ones. In terms of 'slow' variables it reads $s_x=S_x\cos\Psi(x,t)+S_y\sin\Psi(x,t); \;\; \Psi(x,t)=kx-\omega t+\phi(x,t)$ ; $\mathcal{N}$ stays for the concentration of atoms, $d$ is a dipole transition matrix element, $\Delta=\omega_0-\omega(k)$ with $\omega$ and $k$ are the frequency and the wave vector of a carrier wave and $\omega_0$ corresponding to atom transition frequency. The angular brackets in the last system refer to the fact that in practice we deal with a system of the inhomogeneously broadened atoms. That is, $\omega_0$ corresponds to an atomic transition frequency that is \textit{different for each atom}. In the case of a system composed of a large number of atoms, all of these frequencies may be taken continuously distributed around some mean value. Since the large wavelength ($\lambda \gg d$) EM pulse interacts simultaneously with large number of IH broadened "atoms" the collective back--action on the propagating pulse must be described in terms of the average "polarization" as follows--$\langle ....\rangle=\int_{0}^{\Delta}(....) \mathcal{G}(\Delta') d\Delta'$ where $\mathcal{G}(\Delta)$ is normalized to unity ($\int^{\infty}_0 d\Delta \mathcal{G}(\Delta)=1$) line--shape function. Finally, $c=c_0/n$ is the speed of light in TL medium with index of refraction $n$. In addition to the above system, there are two more equations arising as a result of the transformation of a single \textit{second order} equation to a two first order ones for the amplitude and phase.
\begin{equation}\label{SE02B} \begin{split} \frac{c^2}{2\omega}\Big(k^2-\frac{\omega^2}{c^2}\Big)\mathcal{E}+\Big(\dot{\phi}+\frac{kc^2}{\omega}{\phi}'\Big)\mathcal{E}=\frac{\gamma\omega^2_0}{2\omega}\langle S_x\rangle,\qquad & \text{a)}\\ \dot{\mathcal{E}}+\frac{kc^2}{\omega}{\mathcal{E}}'=\frac{\gamma\omega^2_0}{2\omega}\langle S_y\rangle, \; \gamma = 4\pi \mathcal{N}d.\hspace{3em} & \text{b)} \end{split} \end{equation}
The necessity for the consideration of dispersion effects may be viewed on the basis of equations (2) and (3). We first recall that the simplest analytic solutions of these equations exist in resonance $\omega = \omega_0$ and for stationary phase $\dot \phi\equiv \phi'=0.$ In that case the system of equations (2) and (3) greatly simplifies and may be solved using trigonometric parameterization in terms of Bloch angle ($S_y=S_0\sin\theta$ and $S_z=S_0\cos\theta$) \cite{aleb,sit1,sit2,sit3,nlphys}, which, satisfies sine--Gordon equation. In addition, employing the usual initial conditions that population inversion $S_0=S_z(-\infty)\equiv\pm 1$, from system (2), we obtain that $S_x=const\equiv 0.$ Its immediate consequence is the dispersion law $\omega = \pm kc$ (see eq. 3.a) that holds only at resonance. Out of resonance, this relation is only approximate and a correct treatment requires determination it's true form. In this case parametrization in terms of Bloch angles is again possible but demands an \textit{ad hock} assumption known as \textit{factorization ansatz} introducing the \textit{spectral response function}: $S_y(\Delta)=F(\Delta)S_y(\Delta=0)$. This approach leads once again to the SG equation for the Bloch angle. Knowing that $)S_y(\Delta=0)=S_0\sin\theta$ and that $\dot{\theta}=-\frac{\kappa}{2}\mathcal{E}$ SG equation for Bloch--angle became \begin{equation}\label{SG} \ddot{\theta}+\frac{kc^2}{\omega}\dot{\theta'}=\frac{\gamma\omega^2_0\kappa}{4\omega}{\langle F(\Delta\rangle}\sin\theta. \end{equation} Solutions of this equation are well known, they are $2\pi$ solitons. For the spectral function we use known relation \cite{aleb,sit1,sit2} connecting it with the detuning $\Delta$ and pulse duration time $\tau_p$
However, eq. (4) and thus its solutions still contain a single yet undetermined parameter $k$ that appears in the evaluation of the soliton delay ratio ($v/c$), absorption coefficient ($\alpha$) and the area theorem. In other words, all these functions are fucntions of $k$ whose explicit knowledge is required to examine the potential usability of SIT in practical applications. Thus, the relation (\ref{SG}) is useless in that respect. in order find the dispersion law, we need to go back to the system and assume that the pulse propagates undistorted in a soliton form. This enables us to take that all system variables depend on spatial coordinates and the time only through the variable $\tau=t-\frac{x}{v}$ (i.e. passing to moving frame). Accordingly, systems (\ref{SE02A}) and (\ref{SE02B}) become \begin{eqnarray}\label{mf1} \begin{split}
{S}_{x,\tau}&=&-(\Delta+\phi_{\tau})S_y,\hspace{8em} & \text{a)}\\
{S}_{y,\tau}&=&(\Delta+{\phi}_{\tau})S_x+\frac{\kappa}{2}\mathcal{E} S_z,\hspace{4em} & \text{b)}\\
S_{z,\tau} &=&-\frac{\kappa}{2}\mathcal{E} S_y.\hspace{9em} & \text{c)} \end{split} \end{eqnarray} \begin{equation}\label{mf2} \begin{split} \Big({\phi}_{\tau}+\frac{G}{2\omega\Gamma}\Big)\mathcal{E}=\frac{\gamma\omega^2_0}{2\omega\Gamma}\langle S_x\rangle,\qquad & \text{a)}\\ \mathcal{E}_{\tau}=\frac{\gamma\omega^2_0}{2\omega\Gamma}\langle S_y\rangle, \hspace{5em} & \text{b)}\\ \Gamma=1-\frac{c^2 k}{\omega v}, \;\; G=\frac{\omega^2-c^2 k^2}{2\omega}. \;\;\;\; & \text{c)} \end{split} \end{equation}
The last two equations, combined with the third and first one of system \ref{mf1}, may be easily integrated to give $S_z$ and phase. It is trivial in the \textit{sharp line} limit, while for the finite broadening we have to employ the \textit{factorization ansatz} which enables one to expres $\langle S_y\rangle$ through $S_y$. For that purpose we took $S_y$ in factorized form and its average as: $\langle S_y(\Delta)\rangle =\langle F(\Delta)S_y(\Delta=0)\rangle \equiv \langle F(\Delta)\rangle S_y(\Delta = 0)\;\; \mathrm{with}\;\; F(\Delta)=\frac{1}{1+\Delta^2\tau^2_p}$. Now we multiply the last expression with $1=\frac{F(\Delta)}{F(\Delta)}$. This simple manipulation yields $\langle S_y\rangle = \frac{\langle F(\Delta)\rangle }{F(\Delta)}S_y$.
Employing this approach in the last equation we may combine it with the third one in \ref{mf1} which finally yields: \begin{equation}\label{ESz} S_z=S_0-\frac{\omega\kappa\Gamma}{2\omega^2_0\gamma}\frac{F(\Delta)}{\langle F(\Delta)\rangle}\mathcal{E}^2. \end{equation}
We note that $S_0$ is the initial population of TLS where $S_0=-1$ means that all TLS's is in their ground state, while $S_0=+1$ means that we have all TLS's in the excited state.
We may now focus on the equation for phase--the first one in \ref{mf2}. We first differentiate it with respect to $\tau$, then we use first equation in \ref{mf1} to eliminate $\langle \dot S_x \rangle$. Also, in a final step we use same trik as above to express $\langle S_y\rangle$ through $S_y$. This finally yields :
\begin{equation}\label{EQ04} \phi_{\tau\tau}\mathcal{E}+2\phi_{\tau}\mathcal{E}_{\tau}+ \Big(\tilde{\Delta}-\frac{G}{\Gamma}\Big)\mathcal{E}_{\tau}=0,\;\; \tilde \Delta=\frac{\langle\Delta F(\Delta)\rangle}{\langle F(\Delta) \rangle} \nonumber \end{equation}
Its integration yields:
\begin{equation}\label{EQ06} \phi_{\tau}=\frac{1}{2}\Big(\tilde{\Delta}-\frac{G}{\Gamma}\Big) \nonumber \end{equation}
\noindent Here, we used the initial condition $\lim_{\tau\rightarrow -\infty}\mathcal{E}=0$. At this place, it is necessary to recall that the phase $\phi$ was introduced through overall phase $\psi(x,t)=kx-\omega t+\phi(x,t)$ in which linear terms in $x$ and $t$ are already accounted for independently of $\phi$, which, therefore, cannot contain terms \textit{linear} in ($x$ and $t$). That is, we must have take $\phi_{\tau}=0$. In such a way, last relation implies:
\begin{equation}\label{DR} \tilde{\Delta}-\frac{G}{\Gamma}=0 \end{equation}
This is quadratic equation for the pulse wave vector $k$. It may be solved for $k$ as function of $\tilde{\Delta}$, ratio $\omega/\omega_0$ and pulse velocity as a parameter. However, in a view of the analysis of experimental data, it is more convenient to examine $k$ in dependence of the pulse duration instead of its velocity.
For that purpose we use known solutions: \begin{eqnarray}\label{soliton} \nonumber\mathcal{E}&=&\mathcal{E}_0\mathrm{sech}(\tau/\tau_p)\\ \mathcal{E}_0&=&\sqrt{\frac{4\gamma S_0}{\kappa\omega\Gamma}\langle F(\Delta)\rangle},\; \;\mathcal{E}_0\tau_p=\frac{4}{\kappa},\end{eqnarray} to obtain $\Gamma=\frac{\gamma \kappa S_0}{4\omega_0 x}\langle F(\Delta)\rangle \omega^2_0\tau_p^2 $ and eliminate it from (\ref{DR}). Our final results, expressed in terms of a dimensionless variables: $K=\frac{kc}{\omega_0}$-- the carrier wave quasimomentum, the pulse velocity $V=\frac{v}{c}$ and frequency ratio $x=\frac{\omega}{\omega_0}$, read: \begin{eqnarray}\label{DL1} \nonumber K_{\pm}&=&\sqrt{x^2-2\frac{\tilde\Delta\nu S_0}{\omega_0}\langle F(\Delta)\rangle\omega^2_0\tau^2_p},\\ V_{\pm}&=&\frac{K}{ x-\nu S_0\langle F(\Delta)\rangle\omega^2_0\tau^2_p}.\end{eqnarray} In the last expressions sign $+$ ($-$) corresponds to initial population inversion $S_0=-1$ ($S_0=1$). Here $\nu = \frac{\gamma\kappa}{4\omega_0}$, hereafter called material parameter, characterizes the strength of field--atom interaction. Apparently, the pulse propagation is determined both on its characteristics (duration time) and by the properties of the material.
At this stage, before detailed discussion of the pulse propagation, we perform some preliminary calculations, in order to see how the initial conditions are reflected on the nature of the solution. To this end we derive an alternate form of the dispersion law. \begin{equation}\label{dlaw1} K_{\pm}=-\frac{\tilde{\Delta}}{\omega_0 V}\pm \sqrt{(x+\frac{\tilde{\Delta}}{\omega_0})^2+\frac{\tilde{\Delta}^2}{\omega^2_0}(\frac{1}{V^2}-1)}. \end{equation} From the expression for the pulse amplitude we immediately find that the existence of solutions requires $\Gamma S_0>0$. In the case of simple dispersion $\omega = ck$ it may be addressed to sub--luminal or super--luminal propagation for $S_0=-1$ or $S=1$. The same conclusion holds here and may be easily proved on the basis of (\ref{dlaw1}). Note that condition $\Gamma S_0>0$ may be rewritten as \begin{equation}\label{cond} \Big(1- \frac{K}{xV}\Big)S_0>0.\end{equation} That is, $S_0 = -1$ requires $K>xV$, which, together (\ref{dlaw1}), after some straightforward calculation, yields $x^2(1-V^2)>0$ implying sub--luminal propagation. The same reasoning results with $x^2(1-V^2)<0$ for $S_0=1$ leading to super--luminal motion. \begin{figure}
\caption{\textit{Sharp line limit}: illustration of the soliton dispersion law $k(\omega)$ for a few different values of scaled pulse width: $\tau_0=\omega_0\tau_p$. Upper pane -- absorbing media $S_0=-1$.Lower pane -- amplifying media media $S_0=1$. }
\end{figure} \section{Discussion} \subsection{Sharp line limit} In this case line shape function tends to delta function so that: $\tilde\Delta=\Delta$ and $\langle F(\Delta)\rangle=F(\Delta)$ which significantly simplifies further calculations and both, dispersion law and velocity as function of frequency ratio $x=\omega/\omega_0$ attain simple analytic forms: \begin{eqnarray}\label{analyt} \nonumber K_{\pm}&=&\sqrt{x^2-\frac{2\nu S_0(1-x)\tau^2_0}{1+(1-x)^2\tau^2_0}},\\ V_{\pm}&=&\frac{K}{x-\frac{\nu S_0\tau^2_0}{1+(1-x)^2\tau^2_0}} \end{eqnarray}
We have graphically presented our results on figures (1) and (2) for absorbing ($S_0=-1$) and amplifying ($S_0=1$) initial conditions.
In both cases, around resonance $\omega/\omega_0\sim 1$ and for short pulses $\tau_0<1$ we observe similar results as those obtained within the linear approximation $\omega = ck$. That is, for absorbing media, only subluminal motion is possible $v<c$. While dispersion law attains simple linear functional dependence and velocity gradually decrease as a function of duration time. When pulse duration, for a given $\nu$, exceeds some critical value corresponding a minimum of the function \begin{equation}\label{crit} \tau^2_0={\frac{x^2}{2\nu (1-x)-(1-x)^2x^2}}, \end{equation} sudden vanishing of $k$ is observed when $\omega/\omega_0 \geq 1$. This indicates immediate pulse stopping. For example, $\tau^{crit}_0\sim 1.94$ for $\nu=1$. Above the critical pulse width two branches in dispersion law appear. First branch lies in the interval $0<x<x_1$, while te second one is within $x_2<x<\infty$. $x_1$ and $x_2$ are solutions of the cubic equation $x^3-x^2-2\nu =0$ where $K(X)=0$.
Such behavior of dispersion law is reflected to the pulse delay, i.e, $v/c$ dependence, as follows: for $\omega/\omega_0 <1$ we observe expected behavior similar to that in resonance case. That is, $v<c$ always and slowly decreases as a function of pulse duration. However, for $\omega/\omega_0\equiv x_1 >1$ velocity suddenly vanishes when pulse duration approaches $\tau^{crit}_0$. Nevertheless, for $\omega/\omega_0<1$ velocity still decreases as a function of duration time, but now towards some finite value and can never be stopped. When $\omega/\omega_0 < 0.5 $ pulse velocity becomes practically constant. Out of resonance but for $\omega>\omega_0$ pulse velocity vanishes for large pulses. As indicated by the behavior of $K(x)$ when $x$ exceeds unity pulse \textit{stopping} occurs.
In the case of amplifying media $S_0=1$ super--luminal motion is predicted as in the case of resonance. However, in contrast to the resonant case, here there exists a particular lowest value of frequency ratio ($\omega /\omega_0 \sim 1/3 + (\nu/6)\tau^2_0\langle F(\Delta)\rangle$) below which the pulse velocity tends to sub--luminal region, which contradicts to preceding discussion that for $S_0$ only super--luminal pulses exist. For intermediate values of frequency ratio $v(\tau_0)> c$ exhibits the expected behavior: increases with $\tau_0$ until it reach critical value, different for each $\omega/ \omega_0$, when the sudden drop is observed. \begin{figure}
\caption{\textit{Sharp line limit}: Pulse velocity delay $v/c$ versus dimensionless pulse width $\omega_0\tau_p$ for a few values of the frequency ratio. Upper pane -- absorbing media $S_0=-1$. Lower pane -- amplifying media $S_0=1$. }
\end{figure} \subsection{Influence of the inhomogeneous broadening} For simplicity we took line shape function in the Lorentzian form: \begin{equation}\label{lsf} \mathcal{G}(\Delta)=\frac{2\tau^*}{\pi}\frac{1}{1+\Delta^2\tau^{*2}} \end{equation} where $\tau*$ stays for the inhomogeneous broadening relaxation time. This choice enables us easy analytic evaluation of the average values in the expressions for dispersion law and velocity delay: \begin{eqnarray}\label{exact} \nonumber\langle F(\Delta)\rangle&=&\frac{1}{1+\frac{\tau_p}{\tau*}},\\ \langle \Delta F(\Delta)\rangle&=&\frac{2}{\pi\tau*}\frac{\ln \frac{\tau*}{\tau_p}}{1-\frac{\tau^2_p}{\tau^{*2}}},\\ \nonumber\tilde\Delta&=&\frac{2}{\pi\tau^*}\frac{\ln \frac{\tau^*}{\tau_p}}{1-\frac{\tau_p}{\tau^*}}. \end{eqnarray} Accordingly, dispersion law and pulse delay became: \begin{eqnarray}\label{ihb} \nonumber K_{\pm}&=&\sqrt{x^2+\frac{4S_0\nu \omega_0\tau*}{\pi}y^4\frac{\ln y}{y^2-1}}, \;\; y=\frac{\tau_p}{\tau^*},\\ V_{\pm}&=&\frac{K_{\pm}}{x-\frac{\nu S_0(\omega_0\tau*)^{2}y^2}{1+y}} \end{eqnarray} Our results are visualized in Fig.(3). In upper pane we have plotted, in dimensionless units, dispersion law as function of frequency ratio ($x=\omega/\omega_0$) for a few different values of ratio of the pulse width over the inhomogeneous broadening relaxation time -- $y=\tau_p/\tau^*$. In lower pane we have presented pulse velocity as function of $y=\tau_p/\tau^*$. Dispersion law exhibits substantially different behavior with respect to that observed within the sharp line limit. This particularly concerns the near resonance case ($\omega/\omega_0 \sim 1$) where our results do not tend to the known ones obtained in the resonance. This is the consequence of the appearance of the constant shifts, determined by the ratio $y$, in the expressions for dispersion law and pulse delay (\ref{ihb}). In both cases these shifts tend to zero when $\tau^*\gg \tau_p$ and our results approach those obtained within the strict resonance.
Dispersion relation for finite values of $y$ exhibits very specific behavior for absorbing and amplifying media. That is, for absorbing media, for each particular value of $y$ there is a minimal value of frequency ratio ($x=\omega/\omega_0$) below which there is no solutions for $K$, that is pulse does not exist. When $x$ exceeds this minimal value $K(x)$ monotonically increases approaching linear dependence for large $x$. For large $y$ this limit is approached for the un--physically large values of $x\gg 1$ when presented theory of SIT does not hold.
For the amplifying media, starting from some minimal value, $K$ monotonically increases approaching, again, linear dependence for large $x$. In contrast to absorbing media pulse exists for all $x$.
In the absorbing media, for each $x$, pulse velocity exhibits similar behavior as well as in the sharp line limit: as a function of $y$ it gradually decay towards zero approaching it for some critical value specific for each $x$.
In the amplifying media super--luminal pulse motion is predicted: for each the particular value of the frequency ratio, $v(y)$ exhibits qualitatively the same behavior as well as in the case of resonance with no accounted for effects of dispersion (represented with curve constructed of $\diamond$). That is, as a function of $y$, $v/c$ monotonically increases from unity to infinity, while each curve may be recovered from the some particular one, say $x=1$, by simply rescaling frequency ratio. \begin{figure}
\caption{\textit{Illustration of the impact of the inhomogeneous broadening on SIT pulse properties}: Upper pane -- carrier wave quasi--momentum versus frequency ratio. Dispersion relation for absorbing media ($S_0=-1$) is visualized by full lines, while to amplifying media correspond dotted lines. Blu full line corresponds to resonance case $\omega = kc$. Lower pane -- pulse velocity delay. Full lines correspond to absorbing media $S_0=-1$. Dotted lines correspond to amplifying media. Curves indicated by $\diamond$ stand for resonance.}
\end{figure} \section{Concluding remarks} Our study reveals some new features of the SIT phenomenon stemming from the spatial dispersion of the carrier wave $\omega(k)$. We found that the properties of the SIT pulse substantially depends on the frequency ratio ($x=\omega/\omega_0$). This particularly concerns the delay of the pulse velocity which may be controlled by means of varying of $x$. In that sense the most interesting consequence is the possibility of full stopping of SIT pulse. In particular, in the sharp line limit, for each value of $x>1$ EM pulse of is fully stopped (absorbed) by the medium provided that its width (duration time) exceeds some critical value. In the case of inhomogeneously broadened media each EM pulse gets stopped irrespectively on the value of $x$, that is there is no any limitation on the value of $x$ which may be arbitrarily low provided that pulse is wide enough.
In order to relate dispersion with absorption coefficient we derive the Area theorem from the system (\ref{SE02B}) \begin{equation}\label{at} \frac{\partial \theta}{\partial x}=\frac{\beta }{2} \sin \theta,\;\; \beta=\frac{\gamma\omega^2_0\mathcal{G}(0)}{\kappa^2c^2 k}\equiv \frac{2\gamma\omega^2_0\tau^*}{\kappa^2c^2 k}. \end{equation} Apparently, absorption coefficient due to $1/k$ dependence tends to infinity, indicating its full absorption for $k\mapsto 0.$
In conclusion, our results point to possible new means of the control of propagation of EM waves. It relies on the prediction of possible dramatic influence of the frequency ratio $x=\omega/\omega_0$ on carrier wave of SIT pulse and its velocity whose vanishing may be expected for a convenient choice of $x$ and pulse duration time. In systems built of natural atoms, this may not be easily realized due to small values of material constant. Nevertheless, tunability of the parameters of artificial atoms may enhance the predicted effect and make it possible. Also, by applying an additional driving field like in EIT, mixed induced transparency (SIT+EIT), may be achieved where the best features of both effects were exploited as shown in \cite{park}.
\begin{acknowledgments} This work was partially supported by the Ministry of Education, Science and Technological Development of Republic Serbia, Grants No. III - 45010 and OI - 171009, the Ministry of Science and Higher Education of the Russian Federation in the framework of Increase Competitiveness Program of NUST "MISiS" (No. K2-2019-010), implemented by a governmental decree dated 16th of March 2013, N 211. NL also acknowledges support by General Secretariat for Research and Technology (GSRT) and the Hellenic Foundation for Research and Innovation (HFRI) (Grant no.: 203). \end{acknowledgments}
\end{document} |
\begin{document}
\newcommand{{\mathbb{R}}}{{\mathbb{R}}} \newcommand{{\mathbb{Z}}}{{\mathbb{Z}}} \newcommand{{\mathbb{C}}}{{\mathbb{C}}} \newcommand{{\mathbb{Q}}}{{\mathbb{Q}}} \newcommand{{\mathbb{F}}}{{\mathbb{F}}} \renewcommand{{\mathbb{H}}}{{\mathbb{H}}}
\renewcommand{{\mathbf{A}}}{{\mathbf{A}}} \newcommand{{\mathbf{B}}}{{\mathbf{B}}} \newcommand{{\mathbf{C}}}{{\mathbf{C}}} \newcommand{{\mathbf{D}}}{{\mathbf{D}}} \newcommand{{\mathbf{E}}}{{\mathbf{E}}} \newcommand{{\mathbf{F}}}{{\mathbf{F}}} \newcommand{{\mathbf{G}}}{{\mathbf{G}}}
\newcommand{{\mathcal{M}}}{{\mathcal{M}}} \newcommand{{\mathcal{M}}}{{\mathcal{M}}}
\newcommand{{{\sf X}}}{{{\sf X}}}
\newcommand{{\mathbb{G}}}{{\mathbb{G}}}
\newcommand{{\mathrm{i}}}{{\mathrm{i}}}
\renewcommand{{\boldsymbol{a}}}{{\boldsymbol{a}}} \newcommand{{\boldsymbol{b}}}{{\boldsymbol{b}}} \newcommand{{\boldsymbol{c}}}{{\boldsymbol{c}}} \newcommand{{\boldsymbol{d}}}{{\boldsymbol{d}}} \newcommand{{\boldsymbol{t}}}{{\boldsymbol{t}}} \newcommand{{\boldsymbol{q}}}{{\boldsymbol{q}}} \newcommand{{\boldsymbol{p}}}{{\boldsymbol{p}}}
\newcommand{{\bar{z}}}{{\bar{z}}} \newcommand{{\bar{g}}}{{\bar{g}}} \newcommand{{\bar{n}}}{{\bar{n}}} \newcommand{{\bar{x}}}{{\bar{x}}}
\newcommand{{\widetilde{H}}}{{\widetilde{H}}} \newcommand{{\widetilde{T}}}{{\widetilde{T}}} \newcommand{{\tilde t}}{{\tilde t}}
\newcommand{{\varkappa}}{{\varkappa}} \newcommand{\varepsilon}{\varepsilon} \newcommand{\varepsilon^\vee}{\varepsilon^\vee}
\renewcommand{{\mathfrak{g}}}{{\mathfrak{g}}} \newcommand{{\mathfrak{t}}}{{\mathfrak{t}}}
\newcommand{\hookrightarrow}{\hookrightarrow} \newcommand{\isoto}{\overset{\sim}{\to}} \newcommand{\twoheadrightarrow}{\twoheadrightarrow} \newcommand{\labelto}[1]{\xrightarrow{\makebox[1.5em]{\scriptsize ${#1}$}}}
\newcommand{{\bf{GL}}}{{\bf{GL}}} \newcommand{{\bf{SL}}}{{\bf{SL}}} \newcommand{{\bf{Sp}}}{{\bf{Sp}}} \newcommand{{\bf{PSp}}}{{\bf{PSp}}} \newcommand{{{\bf SO}}}{{{\bf SO}}} \newcommand{{\bf{PSO}}}{{\bf{PSO}}} \newcommand{{{\bf Spin}}}{{{\bf Spin}}} \newcommand{{\bf{HSpin}}}{{\bf{HSpin}}} \newcommand{{\bf{PGL}}}{{\bf{PGL}}} \newcommand{{\bf SU}}{{\bf SU}} \newcommand{{\bf PSU}}{{\bf PSU}}
\newcommand{{\rm Hom}}{{\rm Hom}} \newcommand{{\rm Inn}}{{\rm Inn}} \newcommand{{\rm Aut}}{{\rm Aut}} \newcommand{{\rm Lie\,}}{{\rm Lie\,}} \newcommand{{\rm Gal}}{{\rm Gal}} \newcommand{{\rm coker\,}}{{\rm coker\,}} \newcommand{{\rm tors}}{{\rm tors}} \newcommand{{\rm Ext}}{{\rm Ext}} \newcommand{{\rm Stab}}{{\rm Stab}} \newcommand{{\rm res}}{{\rm res}} \newcommand{{\rm Ad}}{{\rm Ad}} \newcommand{{\rm Cl}}{{\rm Cl}} \newcommand{{\rm ad}}{{\rm ad}} \newcommand{{\rm im\,}}{{\rm im\,}} \newcommand{{{\rm id}}}{{{\rm id}}} \newcommand{{\rm diag}}{{\rm diag}}
\newcommand{\operatorname{Orb}}{\operatorname{Orb}} \newcommand{{\rm Orb}}{{\rm Orb}} \newcommand{\Orbs}[1]{ \# \mathrm{Orb}( {#1} ) }
\newcommand{ *+[F]{1} }{ *+[F]{1} } \newcommand{{ \lower0.20ex\hbox{{\text{\Large$\circ$}}}}}{{ \lower0.20ex\hbox{{\text{\Large$\circ$}}}}} \newcommand{{\lower0.20ex\hbox{\text{\Large$\bullet$}}}}{{\lower0.20ex\hbox{\text{\Large$\bullet$}}}} \newcommand{\bc}[1]{{\overset{#1}{{ \lower0.20ex\hbox{{\text{\Large$\circ$}}}}}}} \newcommand{\bcu}[1]{{\underset{#1}{{ \lower0.20ex\hbox{{\text{\Large$\circ$}}}}}}} \newcommand{\bcb}[1]{{\overset{#1}{{\lower0.20ex\hbox{\text{\Large$\bullet$}}}}}} \newcommand{\bcbu}[1]{{\underset{#1}{{\lower0.20ex\hbox{\text{\Large$\bullet$}}}}}} \newcommand{\sxymatrix}[1]{ \xymatrix@1@R=5pt@C=9pt{#1} } \newcommand{\mxymatrix}[1]{ \xymatrix@1@R=0pt@C=9pt{#1} } \newcommand{ \ar@{-}[r] }{ \ar@{-}[r] } \newcommand{ \ar@{-}[l] }{ \ar@{-}[l] } \newcommand{ \ar@{-}[d] }{ \ar@{-}[d] } \newcommand{ \ar@{-}[u] }{ \ar@{-}[u] } \newcommand{\ar@{=>}[r]}{\ar@{=>}[r]}
\newcommand{{\Rightarrow}}{{\Rightarrow}} \newcommand{\! > \!}{\! > \!} \newcommand{ \!\! < \!\! }{ \!\! < \!\! } \newcommand{\!\Leftarrow\!}{\!\Leftarrow\!} \newcommand{{\sxymatrix{\boxone}}}{{\sxymatrix{ *+[F]{1} }}} \renewcommand{\!-\!}{\!-\!}
\newcommand{\half}{{\tfrac{1}{2}}}
\newcommand{\Ss}{\sideset{}{'}\sum_{k\succeq i}} \newcommand{\Ssd}{\sideset{}{'}\sum_{k\succeq i,\,k\in D^\tau}}
\newcommand{{K}}{{K}}
\newcommand{0}{0}
\newcommand{\alpha'}{\alpha'} \newcommand{{\widetilde{D}}}{{\widetilde{D}}} \newcommand{{\widetilde{D'}}}{{\widetilde{D'}}} \newcommand{m'}{m'} \newcommand{{i'}}{{i'}} \newcommand{{j'}}{{j'}} \newcommand{{\Pi'}}{{\Pi'}} \newcommand{{n'}}{{n'}}
\newcommand{\mathrm{SAut}}{\mathrm{SAut}}
\newcommand{\kern 0.8pt}{\kern 0.8pt} \newcommand{\kern 1.0pt}{\kern 1.0pt} \newcommand{\kern 2.0pt}{\kern 2.0pt}
\newcommand{\bfseries}{\bfseries}
\begin{abstract} Let $G$ be a simply connected absolutely simple algebraic group defined over the field of real numbers ${\mathbb{R}}$. Let $H$ be a simply connected semisimple ${\mathbb{R}}$-subgroup of $G$. We consider the homogeneous space $X=G/H$. We ask: how many connected components has $X({\mathbb{R}})$?
We give a method of answering this question. Our method is based on our solutions of generalized Reeder puzzles. \end{abstract}
\maketitle
\setcounter{section}{-1}
\section{Introduction} In this paper by a semisimple or reductive group we always mean a {\em connected} semisimple or reductive group, respectively. Let $G$ be a {\em simply connected} absolutely simple algebraic group over the field of real numbers ${\mathbb{R}}$. Let $H\subset G$ be a {\em simply connected} semisimple ${\mathbb{R}}$-subgroup. We consider the homogeneous space $X=G/H$, which is an algebraic variety over ${\mathbb{R}}$. The topological space $X({\mathbb{R}})$ of ${\mathbb{R}}$-points of $X$ need not be connected. We ask \begin{question}\label{q:1} How many connected components has $X({\mathbb{R}})$? \end{question}
The group of ${\mathbb{R}}$-points $G({\mathbb{R}})$ acts on the left on $X({\mathbb{R}})$, and we consider the orbits of this action. By Lemma \ref{lem:orbits-components} below, the set of connected components of $X({\mathbb{R}})$ is the set of orbits $G({\mathbb{R}})\backslash X({\mathbb{R}})$ of $G({\mathbb{R}})$ in $X({\mathbb{R}})$. On the other hand, there is a canonical bijection \begin{equation}\label{e:Serre}
G({\mathbb{R}})\backslash X({\mathbb{R}})\isoto \ker\left[H^1({\mathbb{R}},H)\to H^1({\mathbb{R}},G)\right],
\end{equation} see Serre \cite[Section I.5.4, Corollary 1 of Proposition 36]{Serre}, where $H^1({\mathbb{R}},G)$ denotes the first (nonabelian) Galois cohomology of $G$. We see that Question \ref{q:1} is equivalent to the following question: \begin{question}\label{q:2} What is the cardinality of the finite set $\ker\left[H^1({\mathbb{R}},H)\to H^1({\mathbb{R}},G)\right]$? \end{question} In this paper we give a method of answering Question \ref{q:2} and hence, Question \ref{q:1}. Namely, we give an explicit description of the Galois cohomology sets $H^1({\mathbb{R}},G)$ for all simply connected ${\mathbb{R}}$-groups $G$, permitting one to compute the kernel in Question \ref{q:2}. We describe $H^1({\mathbb{R}},G)$ using our solutions of generalized Reeder puzzles.
Let $G$ be a {\em simply connected,} absolutely simple, simply-laced, compact ${\mathbb{R}}$-group. Let $T\subset G$ be a maximal torus. Let $\Pi$ be a basis of the root system $R(G_{\mathbb{C}}, T_{\mathbb{C}})$. Let $D=D(G_{\mathbb{C}},T_{\mathbb{C}},\Pi)$ be the Dynkin diagram of $G$ with the set of vertices numbered by $1,2,\dots,n$, then $D$ is simply-laced, i.e. it has no multiple edges. By a {\em labeling of} $D$ we mean a family ${\boldsymbol{a}}=(a_i)_{i=1,\dots,n}$, where $a_i\in{\mathbb{Z}}/2{\mathbb{Z}}$. In other words, at any vertex $i$ we write a label $a_i=0,1$. We consider the set $L(D)$ of the labelings of $D$, it is an $n$-dimensional vector space over the field ${\mathbb{Z}}/2{\mathbb{Z}}$.
For any vertex $i$ we define the {\em move} ${\mathcal{M}}_i$ applied to a labeling ${\boldsymbol{a}}$: if the vertex $i$ has an {\em odd} number of neighbors with 1, ${\mathcal{M}}_i$ {\em changes} $a_i$ (from 0 to 1 or from 1 to 0), otherwise it does nothing. Clearly ${\mathcal{M}}_i({\mathcal{M}}_i({\boldsymbol{a}}))={\boldsymbol{a}}$. We say that two labelings ${\boldsymbol{a}},{\boldsymbol{a}}'$ are {\em equivalent} if we can pass from ${\boldsymbol{a}}$ to ${\boldsymbol{a}}'$ by a finite sequence of moves. This is indeed an equivalence relation on $L(D)$. We denote the corresponding set of equivalence classes by ${\rm Cl}(D)$. It is the set of orbits of the Weyl group $W$ acting on $L(D)$, and we denote it also by ${\rm Orb}(D)$ in Sections \ref{sec:An}--\ref{sec:G2} below. The set ${\rm Cl}(D)$ has a neutral element $[0]$, the class of the zero labeling $0$. To solve the puzzle means to describe the set of equivalence classes ${\rm Cl}(D)$ and to describe each equivalence class.
This is original Reeder's puzzle \cite{Reeder}, except that Reeder formulated his puzzle for any simply-laced graph, not necessarily a simply-laced Dynkin diagram. For a compact, simply connected, simply-laced group $G$ with Dynkin diagram $D$, the pointed set ${\rm Cl}(D)$ is in a bijection with $H^1({\mathbb{R}},G)$ . In order to deal with non-simply-laced and noncompact groups, we generalize the puzzle.
We permit non-simply-laced Dynkin diagrams. Then, when counting the number of neighbors with 1 of a given vertex $i$, we do not count the {\em shorter} neighbors of $i$ connected with $i$ by a {\em double} edge. In other words, ``the long roots don't see the short roots''.
We consider also colored Dynkin diagrams, which correspond to non-compact inner forms of compact groups. A {\em coloring} of a Dynkin diagram $D$ is a family \[{\boldsymbol{t}}=(t_i)_{i=1,\dots,n},\quad t_i\in{\mathbb{Z}}/2{\mathbb{Z}}.\] If $t_i=1$, we color vertex $i$ in black, otherwise we leave it white. When vertex $i$ is white, the move ${\mathcal{M}}_i$ acts as above. When $i$ is black, the move ${\mathcal{M}}_i$ changes $a_i$ if $i$ has an {\em even} number of neighbors with 1, and does nothing otherwise. We write sometimes $L(D,{\boldsymbol{t}})$ for the set $L(D)$ with this Reeder puzzle. We denote the corresponding set of equivalence classes by ${\rm Cl}(D,{\boldsymbol{t}})$. If ${\boldsymbol{t}}=\boldsymbol{0}=(0,\dots, 0)$, we have ${\rm Cl}(D,\boldsymbol{0})={\rm Cl}(D)$. Note that if $D$ has a {\em black} vertex $i$, then the move ${\mathcal{M}}_i$ takes the zero labeling $0$ to a nonzero labeling and hence does not respect the group structure in $L(D)$.
We recall the definition of $H^1({\mathbb{R}},G)$, cf. \cite[Section III.4.5]{Serre}. Let $G$ be a linear algebraic group over ${\mathbb{R}}$. We denote by $G({\mathbb{C}})$ the set of ${\mathbb{C}}$-points of $G$. The first Galois cohomology set $H^1({\mathbb{R}},G)$ is, by definition,
$Z^1({\mathbb{R}},G)/\sim$, where the set of $1$-cocycles $Z^1({\mathbb{R}},G)$ is defined by $Z^1({\mathbb{R}},G)=\{z\in G({\mathbb{C}})\ |\ z{\bar{z}}=1\}$, and two $1$-cocycles $z,z'\in Z^1({\mathbb{R}},G)$ are cohomologous (we write $z\sim z'$) if
$z'=gz\bar g^{-1}$ for some $g\in G({\mathbb{C}})$. Here the bar denotes the complex conjugation in $G({\mathbb{C}})$; note that $G({\mathbb{R}})=\{g\in G({\mathbb{C}})\ |\ \bar g=g\}$. By definition, the neutral element $[1]\in H^1({\mathbb{R}},G)$ is the class of the neutral cocycle $1\in Z^1({\mathbb{R}},G)\subset G({\mathbb{C}})$.
We write $G({\mathbb{R}})_2$ for the set of elements $g\in G({\mathbb{R}})$ such that $g^2=1$. Then $g\bar{g}=g^2=1$, hence $G({\mathbb{R}})_2\subset Z^1({\mathbb{R}},G)$, and so we obtain a canonical map $G({\mathbb{R}})_2\to H^1({\mathbb{R}},G)$.
Let $G$ be a simply connected absolutely simple ${\mathbb{R}}$-group. For simplicity we assume in the Introduction that $G$ is an {\em inner form of a compact group.} Then $G$ has a compact maximal torus $T$, see Subsection \ref{subsec:t}. Choose a basis $\Pi$ of the root system $R=R(G_{\mathbb{C}},T_{\mathbb{C}})$. We obtain an isomorphism \[ \gamma\colon L(D)\isoto T({\mathbb{R}})_2\subset Z^1({\mathbb{R}}, G),\] see formula \eqref{e:gamma} in Section \ref{sec:compact}. This isomorphism induces a map $L(D)\to H^1({\mathbb{R}},G)$, which is surjective by a result of Kottwitz \cite[Lemma 10.2]{Kottwitz}. By Theorem \ref{thm:inner} the fibers of this map are equivalence classes of the Reeder puzzle for $(D,{\boldsymbol{t}})$ for a certain coloring ${\boldsymbol{t}}$ of $D$. In other words, we obtain a bijection \[ {\rm Cl}(D,{\boldsymbol{t}})\isoto H^1({\mathbb{R}},G). \] Moreover, for a suitable basis $\Pi$ the coloring ${\boldsymbol{t}}$ can be obtained from a Kac diagram by removing vertex 0, see Section \ref{ss:Kac}. In Sections \ref{sec:An}--\ref{sec:G2} we solve case by case the generalized Reeder puzzles for all such pairs $(D,{\boldsymbol{t}})$. Namely, in each case we give a set $\Xi$ of representatives for all equivalence classes in ${\rm Cl}(D,{\boldsymbol{t}})$ and describe explicitly the equivalence class $[0]\subset L(D,{\boldsymbol{t}})$ of the zero labeling $0$.
Now let $H$ be a simply connected semisimple ${\mathbb{R}}$-subgroup of a simply connected absolutely simple ${\mathbb{R}}$-group $G$. For simplicity, we assume in the Introduction that $H$ is absolutely simple and that $G$ and $H$ are inner forms of compact groups. Then $G$ contains a compact maximal torus $T_G$ and $H$ contains a compact maximal torus $T_H$. We may and shall assume that $T_H\subset T_G$. We denote by $(D_H,{\boldsymbol{t}}_H)$ and $(D_G,{\boldsymbol{t}}_G)$ the corresponding Reeder puzzles. For a good choice of bases $\Pi_H$ and $\Pi_G$ we obtain colorings ${\boldsymbol{t}}_H$ and ${\boldsymbol{t}}_G$ coming from Kac diagrams (in particular, not more than one vertex of each of $D_H$ and $D_G$ is black).
We describe our method of answering Question \ref{q:2}. The embedding $T_H\hookrightarrow T_G$ induces an embedding $T_H({\mathbb{R}})_2\hookrightarrow T_G({\mathbb{R}})_2\,$. Thus we obtain an injective homomorphism \[\iota\colon L(D_H)\to L(D_G),\] which can be computed explicitly. Using results of Sections \ref{sec:An}--\ref{sec:G2} for the group $H$, we construct a finite subset $\Xi\subset L(D_H,{\boldsymbol{t}}_H)$ containing exactly one representative of each equivalence class for the corresponding Reeder puzzle. For any $\xi\in\Xi\subset L(D_H,{\boldsymbol{t}}_H)$ we compute $\iota(\xi)\in L(D_G,{\boldsymbol{t}}_G)$. Using results of Sections \ref{sec:An}--\ref{sec:G2} for the group $G$, namely, the description of the equivalence class $[0]$ of $0$ in $L(G,{\boldsymbol{t}}_G)$\,, we can check whether $\iota(\xi)\in L(D_G,{\boldsymbol{t}}_G)$ lies in $[0]$ or not. We obtain a subset $\Xi_0$ of $\Xi$ consisting of all $\xi\in\Xi$ such that $\iota(\xi)\in[0]$. One can show (see Section \ref{sec:examples}) that $\Xi_0$ is in a bijection with $\ker\left[ H^1({\mathbb{R}},H)\to H^1({\mathbb{R}}, G)\right]$ and therefore, the cardinality of $\Xi_0$ answers Questions \ref{q:2} and \ref{q:1}.
Note that in order to answer Question \ref{q:2}, we compute in Sections \ref{sec:An}--\ref{sec:G2} the sets ${\rm Cl}(D,{\boldsymbol{t}})$ for inner forms of a compact group and certain sets ${\rm Cl}(D,\tau,{\boldsymbol{t}})$ for outer forms. Since each of these sets is in a bijection with the corresponding Galois cohomology set, we in particular compute the cardinalities of the Galois cohomology sets $H^1({\mathbb{R}},H)$ for all absolutely simple simply connected ${\mathbb{R}}$-groups $H$. These cardinalities have been known. The Galois cohomology of classical groups and adjoint groups is well known. S.~Garibaldi and N.~Semenov \cite[Example 5.1]{GS} computed $H^1({\mathbb{R}},H)$ for a certain nonsplit simply connected group $H$ of type ${\mathbf{E}}_7$. B.~Conrad \cite[Proof of Lemma 4.9]{Conrad} computed $H^1({\mathbb{R}},H)$ for the split simply connected groups $H$ of types of ${\mathbf{E}}_6$ and ${\mathbf{E}}_7$. The cardinalities of the Galois cohomology sets for ``most'' of simple ${\mathbb{R}}$-groups, in particular, for all absolutely simple simply connected ${\mathbb{R}}$-groups, were recently computed by J.~Adams \cite{A} by a method different from ours. Our results agree with the previous results, in particular with the tables of Adams \cite{A}. Later, after the first version of the present paper appeared in arXiv, Borovoi and Timashev \cite{BoT} proposed a combinatorial method based on the notion of a Kac diagram, permitting one to compute easily the cardinality of $H^1({\mathbb{R}},H)$ when $H$ is an inner form of any compact semisimple ${\mathbb{R}}$-group, not necessarily simply connected. However, it seems that neither of these alternative approaches permits one to answer Question \ref{q:1} about $(G/H)({\mathbb{R}})$, except for the case when $H^1({\mathbb{R}},G)=1$ (which happens only when $G={\bf{SL}}(n)$ or $G={\bf{Sp}}(2n,{\mathbb{R}})$).
The rest of the paper is structured as follows. In Section \ref{sec:1} we recall results of \cite{Bo}. In Sections \ref{sec:compact} and \ref{sec:inner} we compute the moves ${\mathcal{M}}_i$ in the case when $G$ is compact and when it is a noncompact inner form of a compact group, respectively. In particular, in Section \ref{sec:inner} we prove Theorem \ref{thm:inner} describing the pointed set $H^1({\mathbb{R}},G)$ for an {\em inner} form $G$ of a compact simply connected simple group in terms of the corresponding generalized Reeder puzzle. In Section \ref{sec:outer} we prove Theorem \ref{cor:Theorem-3-Bo}, which reduces computing the Galois cohomology of an {\em outer} form of a compact, simply connected, simple ${\mathbb{R}}$-group to computing Galois cohomology of an {\em inner} form of another compact group. In Section \ref{sec:Kac} we describe the generalized Reeder puzzle for $G$ in terms of the Kac diagram of $G$ from \cite[Table 7]{OV}. In Sections \ref{sec:An}--\ref{sec:G2} we solve the generalized Reeder puzzles for all isomorphism classes of simply connected absolutely simple ${\mathbb{R}}$-groups $G$. We state the assertions necessary for our calculations, but omit straightforward proofs for brevity. In the last Section \ref{sec:examples} we describe our method of answering Questions \ref{q:2} and \ref{q:1} for all simply connected $H$ (not necessarily simple), and we give examples of calculations using results of Sections \ref{sec:An}--\ref{sec:G2}.
\section{Galois cohomology of reductive real groups} \label{sec:1}
In this section we state briefly the necessary results of \cite{Bo}. For details see \cite{Bo} or \cite{Borovoi-arXiv}.
Let $G$ be a reductive group over ${\mathbb{R}}$. Let $T$ be a {\em fundamental torus} of $G$, i.e., a maximal torus of $G$ (defined over ${\mathbb{R}}$) containing a maximal compact torus $T_0$ of $G$. Then $T$ is the centralizer of $T_0$ in $G$; see \cite[Section 7]{Borovoi-arXiv}. Let $T_1$ be the largest {\em split} subtorus of $T$. We write $T({\mathbb{R}})_2$ for the group of elements of $T({\mathbb{R}})$ of order dividing 2. \begin{lemma}[{\cite[Lemma 1.1]{Bo}, see also \cite[Lemma 3(a)]{Borovoi-arXiv}}] \label{lem:Bo88} The map $T({\mathbb{R}})_2\to H^1({\mathbb{R}},T)$ induces a canonical isomorphism $T({\mathbb{R}})_2/T_1({\mathbb{R}})_2\isoto H^1({\mathbb{R}},T)$. \end{lemma}
Set $N_0=\mathcal{N}_G(T_0)$, $W_0=N_0/T$. We have $W_0({\mathbb{C}})=W_0({\mathbb{R}})$; see \cite[Section 7]{Borovoi-arXiv}. We define a left action of the group $W_0({\mathbb{R}})$ on the set $H^1({\mathbb{R}},T)$. Let $w\in W_0({\mathbb{R}})$ be represented by $n\in N_0({\mathbb{C}})$ and let $\xi\in H^1({\mathbb{R}},T)$, $\xi=[z]$, where $z\in Z^1({\mathbb{R}},T)$ is a cocycle and $[z]$ denotes the cohomology class of $z$. We set \begin{equation}\label{eq:Bo-action} w* \xi:= [nz{\bar{n}}^{-1}]=[nzn^{-1}\cdot n{\bar{n}}^{-1}], \end{equation} where the bar denotes the complex conjugation in $G({\mathbb{C}})$. This is a well-defined action; see \cite[Construction 8]{Borovoi-arXiv}. (Note that in general the action $*$ does not respect the group structure on $H^1({\mathbb{R}},T)$.\,) It is easy to see that the images of $\xi$ and $w*\xi$ in $H^1({\mathbb{R}},G)$ coincide. Therefore, we obtain a canonical map $$W_0({\mathbb{R}})\backslash H^1({\mathbb{R}},T)\to H^1({\mathbb{R}},G).$$
\begin{proposition}[{\cite[Theorem 1]{Bo}, see also \cite[Theorem 9]{Borovoi-arXiv}}] \label{prop:Bo88} The map $$W_0({\mathbb{R}})\backslash H^1({\mathbb{R}},T)\to H^1({\mathbb{R}},G)$$ induced by the map $H^1({\mathbb{R}},T)\to H^1({\mathbb{R}},G)$ is a bijection. \end{proposition}
\section{Weyl action for compact groups} \label{sec:compact} {\em We change our notation.} In Sections \ref{sec:compact} -- \ref{sec:Kac}, $G$ is a {\em simply connected, simple, {\bfseries compact}} (i.e., anisotropic) linear algebraic group over ${\mathbb{R}}$.
Let $T$ be a maximal torus of $G$. Let $X^*={{\sf X}}^*(T_{\mathbb{C}}):={\rm Hom}(T_{\mathbb{C}}, {\mathbb{G}}_{m,{\mathbb{C}}})$ denote the character group of $T_{\mathbb{C}}$, where ${\mathbb{G}}_{m,{\mathbb{C}}}$ is the multiplicative group over ${\mathbb{C}}$. Let $R=R(G_{\mathbb{C}},T_{\mathbb{C}})\subset X^*$ denote the root system of $G_{\mathbb{C}}$ with respect to $T_{\mathbb{C}}$, then we have a root decomposition \[ {\rm Lie\,} G_{\mathbb{C}}={\rm Lie\,} T_{\mathbb{C}}\oplus\bigoplus_{\beta\in R}{\mathfrak{g}}_\beta\,.\] Let $\Pi\subset R$ be a basis of $R$ (a system of simple roots). Note that $\Pi$ does not have to be a basis of $X^*$. Write $\Pi=\{\alpha_1,\dots,\alpha_n\}$, then a simple root $\alpha_i$ is a homomorphism $\alpha_i\colon T_{\mathbb{C}}\to {\mathbb{G}}_{m,{\mathbb{C}}}$. Let $R_+\subset R$ denote the set of positive roots with respect to the basis $\Pi$, and let $B\subset G_{\mathbb{C}}$ denote the corresponding Borel subgroup of $G_{\mathbb{C}}$ containing $T_{\mathbb{C}}$, then \[ {\rm Lie\,} B={\rm Lie\,} T_{\mathbb{C}}\oplus\bigoplus_{\beta\in R_+}{\mathfrak{g}}_\beta\,.\] Let $D=D(G_{\mathbb{C}},T_{\mathbb{C}},\Pi)=D(G_{\mathbb{C}},T_{\mathbb{C}},B)$ denote the Dynkin diagram of $G_{\mathbb{C}}$ with respect to $T_{\mathbb{C}}$ and $\Pi$, then the set of vertices of $D$ is $\Pi$. Let $W=W(G,T)=N/T$ denote the Weyl group, where $N$ is the normalizer of $T$ in $G$. By abuse of notation we write $W$ also for the group of points $W({\mathbb{R}})=W({\mathbb{C}})$.
Let $X_*={{\sf X}}_*(T_{\mathbb{C}}):={\rm Hom}({\mathbb{G}}_{m,{\mathbb{C}}}, T_{\mathbb{C}})$ denote the cocharacter group of $T$. There is a canonical pairing $$ \langle\ ,\,\rangle\colon X^*\times X_*\to{\mathbb{Z}},\quad (\chi,x)\mapsto \langle \chi,x\rangle\in {\mathbb{Z}},\quad \chi\in X^*,\ x\in X_* $$ defined by $$ \chi\circ x\,=\ ( z\mapsto z^{\langle \chi,x\rangle}\, ) \colon \ {\mathbb{G}}_{m,{\mathbb{C}}}\to {\mathbb{G}}_{m,{\mathbb{C}}}\,. $$ We have a canonical basis $\Pi^\vee= \{\alpha_1^\vee,\dots,\alpha_n^\vee\}$ of the dual root system $R^\vee$, where the simple coroot $\alpha_i^\vee\colon {\mathbb{G}}_{m,{\mathbb{C}}}\to T_{\mathbb{C}}$ is the coroot corresponding to the simple root $\alpha_i$; see \cite[Sections 7.4 and 7.5]{Springer}. Note that $\langle \alpha_i , \alpha_i^\vee \rangle = 2$. Since $G$ is {\em simply connected}, $\Pi^\vee$ is a basis of $X_*$ (this is one of the definitions of a simply connected semisimple algebraic group, cf.~\cite[Section 2.15]{SpringerAMS}).
\begin{lemma}[well-known]\label{lem:repr} Let $G$, $T,\ N$, and $W$ be as above (in particular, $G$ is {\em compact}). Then for any $w\in W({\mathbb{R}})=W({\mathbb{C}})$ there exists a representative $n\in N({\mathbb{R}})$ (and not just in $N({\mathbb{C}})$). \end{lemma}
\begin{proof} The group $W$ is generated by the reflections $r_1,\dots,r_n$, hence, it suffices to find such an $n$ for a reflection $w=r_i$. This reduces to the case where $G={\bf SU}_2$ and $T$ is the diagonal torus, when we can take \[n=\begin{pmatrix}0 &1\\-1 & 0\end{pmatrix}.\] \end{proof}
Since $G$ is {\em compact,} by Borel and Serre \cite[Theorem 6.8, Example (a)]{Borel-Serre}, see also Serre \cite[III.4.5, Example (a)]{Serre} (or by Lemma \ref{lem:Bo88} and Proposition \ref{prop:Bo88} above, where $T$ is compact, $N_0=N$, and $W_0=W$) we have a bijection $W\backslash T({\mathbb{R}})_2\isoto H^1({\mathbb{R}},G)$. Here $W$ acts on $T({\mathbb{R}})_2$ in the standard way. Namely, since $G$ is compact, by Lemma \ref{lem:repr} we can choose a representative $n$ of $w\in W$ in $N({\mathbb{R}})$, and for $a\in T({\mathbb{R}})_2$ we set \[ w*a=na{\bar{n}}^{-1}=nan^{-1}.\] Therefore, we are interested in the standard action of $W$ on $T({\mathbb{R}})_2$. We identify $X_*/2X_*$ with $T({\mathbb{R}})_2$ by $x+ 2 X_*\mapsto x(-1)\in T({\mathbb{R}})_2$ for $x\in X_*$. The canonical ${\mathbb{Z}}$-basis $\alpha_1^\vee,\dots,\alpha_n^\vee$ of $X_*$ gives a ${\mathbb{Z}}/2{\mathbb{Z}}$-basis of $X_*/2X_*$, which we shall again write as $\alpha_1^\vee,\dots,\alpha_n^\vee$.
By a {\em labeling} of the Dynkin diagram $D$ we mean a vector ${\boldsymbol{a}}=(a_i)_{i=1,\dots,n}$, where $a_i\in {\mathbb{Z}}/2{\mathbb{Z}}$, i.e., $a_i=0,1$. In other words, at each vertex $i$ of $D$ we write a label $a_i\in {\mathbb{Z}}/2{\mathbb{Z}}$. We denote the abelian group of labelings of $D$ by $L(D)$. We have a canonical isomorphism \begin{equation}\label{e:gamma} \gamma\colon L(D)\isoto T({\mathbb{R}})_2\subset Z^1({\mathbb{R}},G),\quad {\boldsymbol{a}}\mapsto a= \prod_{i=1}^n \left(\alpha_i^\vee(-1)\right)^{a_i}. \end{equation} By abuse of notation we denote by $\gamma$ both the isomorphism $\gamma\colon L(D)\isoto T({\mathbb{R}})_2$ and the embedding $\gamma\colon L(D)\isoto T({\mathbb{R}})_2\hookrightarrow Z^1({\mathbb{R}},G)$. Thus with ${\boldsymbol{a}}\in L(D)$ we associate $a=\gamma({\boldsymbol{a}})\in T({\mathbb{R}})_2\subset Z^1({\mathbb{R}},G)$. We also associate with ${\boldsymbol{a}}$ the element $\sum_k a_k\alpha^\vee_k\in X_*/2X_*$.
We wish to compute the orbits of $W$ in $T({\mathbb{R}})_2$ with respect to the standard left action. The Weyl group $W$ is generated by the reflections $r_i=r_{\alpha_i}$. We define the {\em moves} ${\mathcal{M}}_i\colon L(D)\to L(D)$ on the set of labelings $L(D)$ by ${\mathcal{M}}_i\kern 1.0pt {\boldsymbol{a}}={\boldsymbol{a}}'$, where \begin{equation}\label{eq:r-i-action} r_i\left(\prod_{j=1}^n \left(\alpha_j^\vee(-1)\right)^{a_j}\right)= \prod_{j=1}^n\left( \alpha_j^\vee(-1)\right)^{a'_j} \ \text{ i.e., }\ r_i \left(\sum_{j=1}^n a_j\,\alpha_j^\vee\right)=\sum_{j=1}^n a'_j\,\alpha_j^\vee\in X_*/2X_*. \end{equation} Note that if ${\boldsymbol{a}}'={\mathcal{M}}_i\kern 1.0pt {\boldsymbol{a}}$, then ${\boldsymbol{a}}={\mathcal{M}}_i\kern 1.0pt{\boldsymbol{a}}'$, because $r_i^2=1$. We say that two labelings ${\boldsymbol{a}},{\boldsymbol{a}}'\in L(D)$ are {\em equivalent} if we can relate them by a series of moves. The set of orbits of $W$ in $T({\mathbb{R}})_2$ is in a canonical bijection with the set of equivalence classes of labelings ${\boldsymbol{a}}\in L(D)$ of the Dynkin diagram $D$ of $(G_{\mathbb{C}},T_{\mathbb{C}},\Pi)$ with respect to the moves.
The following Lemma \ref{prop:non-twisted} says that the moves defined in this sections are indeed the moves of the Reeder puzzle on $D$.
\begin{lemma}\label{prop:non-twisted} Let $G$ be a simply connected, simple, {\em compact} ${\mathbb{R}}$-group of absolute rank $n$, and $D$ its Dynkin diagram, as above. Define the moves ${\mathcal{M}}_i\colon L(D)\to L(D)$ by \eqref{eq:r-i-action}. Then we have $a'_j=a_j$ for $j\neq i$, and $a'_i$ is given by \begin{equation}\label{non-twisted-simply-laced}
a'_i = a_i + \Ss a_k \end{equation} (addition in ${\mathbb{Z}}/2{\mathbb{Z}}$), where $\Ss$ means that the sum is taken over all the {\em neighbors} $k\neq i$ of $i$ except for the vertices $k$ connected to $i$ by a double edge such that the root $\alpha_k$ is {\em shorter} than $\alpha_i$. \end{lemma}
\begin{proof} A reflection $r_i$ acts on $X_*$ by \begin{equation}\label{eq:Springer-reflection} r_i(y)=y-\langle\alpha_i,y\rangle \alpha_i^\vee, \end{equation} cf. \cite[Section 7.4.1]{Springer}. If $y=\sum_k a_k\alpha_k^\vee\in X_*$, then $$ r_i (y) =y-\sum_k a_k\langle\alpha_i,\alpha_k^\vee\rangle\alpha_i^\vee, $$ and the same formula holds if $y=\sum_k a_k\alpha_k^\vee\in X_*/2X_*$. If we write $r_i (y)=\sum_k a'_k\alpha_k^\vee$, then clearly $a'_j=a_j$ for $j\neq i$, and \begin{equation}\label{eq:action-sum} a'_i=a_i+\sum_k (-a_k)\langle \alpha_i,\alpha_k^\vee\rangle, \end{equation} so we need only to compute (in ${\mathbb{Z}}/2{\mathbb{Z}}$) the sum in \eqref{eq:action-sum}.
We may assume that our root system $R$ is a root system in a Euclidean space $V$. Then $$ \langle \alpha_i,\alpha_k^\vee\rangle=\frac{2(\alpha_i,\alpha_k)}{(\alpha_k,\alpha_k)}, $$ where $(\alpha_i,\alpha_k)$ is the scalar product in $V$. If $k=i$, then $\langle\alpha_i,\alpha_k^\vee\rangle=\langle\alpha_i,\alpha_i^\vee\rangle=2\equiv 0 \pmod{2}$. If two different vertices $i$ and $k$ are not connected by an edge, then $\langle\alpha_i,\alpha_k^\vee\rangle=0$. Thus the sum in \eqref{eq:action-sum} is taken over vertices $k$ different from $i$ that are connected to $i$ by an edge. Now we consider cases. If vertices $i$ and $k$ are connected by a single edge, then $\langle\alpha_i,\alpha_k^\vee\rangle=-1$ \cite[VI.1.3, possibility (3)\,]{Bourbaki}, hence vertex $k$ gives $a_k$ to the sum in \eqref{eq:action-sum}. If they are connected by a triple edge, then either $\langle\alpha_i,\alpha_k^\vee\rangle=-1$ or $\langle\alpha_i,\alpha_k^\vee\rangle=-3\equiv -1\pmod{2}$ \cite[VI.1.3, possibility (7)\,]{Bourbaki}, and again vertex $k$ gives $a_k$ to the sum. If they are connected by a double edge and the root $\alpha_k$ is {\em longer} than $\alpha_i$, then $\langle\alpha_i,\alpha_k^\vee\rangle=-1$ \cite[VI.1.3, possibility (5)\,]{Bourbaki}, and again vertex $k$ gives $a_k$ to the sum. However, if the vertices $i$ and $k$ are connected by a double edge and the root $\alpha_k$ is {\em shorter} than $\alpha_i$\emph{}, then $\langle\alpha_i,\alpha_k^\vee\rangle=-2\equiv 0\pmod{2}$ \cite[VI.1.3, possibility (5)\,]{Bourbaki}, hence vertex $k$ gives nothing to the sum in \eqref{eq:action-sum}. We conclude that formula \eqref{eq:action-sum} can be written as \eqref{non-twisted-simply-laced}. \end{proof}
\begin{corollary}\label{cor:non-twisted} If $G$ is as in Lemma \ref{prop:non-twisted}, in particular $G$ is compact, then the map \eqref{e:gamma} induces a bijection ${\rm Cl}(D)\isoto H^1({\mathbb{R}},G)$, where the moves ${\mathcal{M}}_i$ act on $L(D)$ by formula \eqref{non-twisted-simply-laced}. \end{corollary}
\section{Weyl action for inner forms} \label{sec:inner}
In this section $G$, $T$, $R$, $\Pi$, $D$, and $W$ are as in Section \ref{sec:compact}, in particular $G$ is a simply connected, simple, {\em compact} linear algebraic group over ${\mathbb{R}}$.
\subsection{The $t$-twisted action} \label{subsec:t} Write $G^{\rm ad}=G/Z_G,\ T^{\rm ad}=T/Z_G$, where $Z_G$ denotes the center of $G$. Then $T^{\rm ad}$ is a maximal torus in the adjoint group $G^{\rm ad}$. Consider an inner twisted form (inner twist) $_z G$ of $G$,
where $z\in Z^1({\mathbb{R}},G^{\rm ad})$. It is well known that $z$ is cohomologous to some $t\in T^{\rm ad}({\mathbb{R}})_2$ (see e.g., \cite[III.4.5, Example (a)]{Serre}). {\em We fix such an element} $t$. Then $_z G\simeq \kern 0.8pt_t G$. We have $_t G({\mathbb{C}})=G({\mathbb{C}})$, but the complex conjugation in $_t G({\mathbb{C}})$ is given by $$ g\mapsto {}^*{\bar{g}}={\rm Inn}(t)({\bar{g}}) . $$ This means that if we lift $t\in T^{\rm ad}({\mathbb{R}})_2$ to some ${\tilde t}\in T({\mathbb{C}})$, then the complex conjugation in $_t G({\mathbb{C}})$ is given by $$ ^*{\bar{g}}={\tilde t}\,{\bar{g}}\, {\tilde t}^{-1}. $$
Since ${\tilde t}\in T({\mathbb{C}})$, we have $_t T=T$, hence $_t T$ is a compact maximal torus in $_t G$, hence it is a fundamental torus of $_t G$. Thus any inner form of a compact semisimple ${\mathbb{R}}$-group has a compact maximal torus. Let $T_0$ of Section \ref{sec:1} be the maximal compact subtorus of $_t T$, then clearly $T_0=\kern 0.8pt_t T=T$. Let $W_0:=W_0(\kern 0.8pt_t G,\kern 0.8pt_t T)$ be the group $W_0$ of Section \ref{sec:1}, then $W_0=W(G,T)= W$, because $W_0$ was defined in terms of $T_0$.
We consider the $t$-twisted action of $W_0=W$ given by formula \eqref{eq:Bo-action} on $H^1({\mathbb{R}},\kern 0.8pt_t T)=H^1({\mathbb{R}},T)=T({\mathbb{R}})_2$. Let $w\in W({\mathbb{R}})=W({\mathbb{C}})$, $w=nT$, where $n\in N({\mathbb{R}})$. Then \[{\bar{n}}=n,\quad {}^* {\bar{n}}={\tilde t}{\bar{n}}{\tilde t}^{-1}={\tilde t} n{\tilde t}^{-1}. \] For $a\in T({\mathbb{R}})_2=T({\mathbb{C}})_2$ the $t$-twisted action of $w$ is given by \begin{equation}\label{eq:ect-0-gen} w* a= n\, a\, {}^* {\bar{n}}^{-1} =n\, a \, {\tilde t}\, {\bar{n}}^{-1} {\tilde t}^{-1} =n\, a \, {\tilde t}\, n^{-1} {\tilde t}^{-1} = n a n^{-1}\cdot n{\tilde t} n^{-1} {\tilde t} ^{-1}. \end{equation} In particular, let $r_j\in W({\mathbb{R}})=W({\mathbb{C}})$ be the reflection corresponding to a simple root $\alpha_j$. Write $r_j=n_j T$ for some $n_j\in N({\mathbb{R}})$. For $a\in T({\mathbb{R}})_2$ the $t$-twisted action of $r_j$ is given by \begin{equation}\label{eq:ect-0} r_j* a= n_j\, a\, {}^* {\bar{n}}_j^{-1} =n_j\, a \, {\tilde t}\, n_j^{-1} {\tilde t}^{-1}= n_j a n_j^{-1}\cdot n_j{\tilde t} n_j^{-1} {\tilde t} ^{-1}. \end{equation} Note that \begin{equation}\label{eq:twisting} r_j* a=r_j(a) \cdot n_j{\tilde t} n_j^{-1} {\tilde t}^{-1}, \end{equation} where $r_j(a)=n_j a n_j^{-1}$. In particular, we have $r_j * 1= n_j{\tilde t} n_j^{-1} {\tilde t}^{-1}$, so in general $r_j* 1 \neq 1$ and therefore, the $t$-twisted action does not preserve the group structure in $T({\mathbb{R}})_2$.
Define \begin{equation}\label{eq:t-bold} {\boldsymbol{t}}=(t_i)\in({\mathbb{Z}}/2{\mathbb{Z}})^n, \quad\text{where}\quad (-1)^{t_i}=\alpha_i(t). \end{equation} We regard ${\boldsymbol{t}}$ as a {\em coloring} of the diagram $D$. We color a vertex $i$ in black if $t_i=1$, and leave $i$ uncolored (i.e., white) if $t_i=0$. Denote by $_{\boldsymbol{t}} D:=(D,{\boldsymbol{t}})$ the Dynkin diagram $D=D(G_{\mathbb{C}},T_C,\Pi)$ together with the coloring ${\boldsymbol{t}}$ . The notation $_{\boldsymbol{t}} D$ suggests that we regard $_{\boldsymbol{t}} D=(D,{\boldsymbol{t}}) $ as an (inner) twist of $D$ by ${\boldsymbol{t}}$ .
We compute the moves corresponding to the $t$-twisted action. For each vertex $i$ of $D$, we define the move ${\mathcal{M}}_i$ by ${\mathcal{M}}_i\kern 1.0pt {\boldsymbol{a}}={\boldsymbol{a}}'$, where \begin{equation*}\label{eq:r-i-action-twisted}
r_i*\left(\prod_{j=1}^n \left(\alpha_j^\vee(-1)\right)^{a_j}\right)= \prod_{j=1}^n \left(\alpha_j^\vee(-1)\right)^{a'_j}\ \text{ i.e., }\ r_i *\left(\sum_{j=1}^n a_j\,\alpha_j^\vee\right)=\sum_{j=1}^n a'_j\,\alpha_j^\vee\in X_*/2X_*. \end{equation*}
\begin{lemma}\label{prop:twisted} For the $t$-twisted action of $W$ and the move ${\mathcal{M}}_i$ just defined, we have, as in Lemma \ref{prop:non-twisted}, $a'_j=a_j$ for $j\neq i$, while in formula \eqref{non-twisted-simply-laced} the term $t_i\in{\mathbb{Z}}/2{\mathbb{Z}}$ defined by $(-1)^{t_i}=\alpha_i(t)$ must be added. Thus we have \begin{equation}\label{twisted-simply-laced}
a'_i = a_i+t_i + \Ss a_k \ , \end{equation} where the meaning of $\Ss$ is the same as in formula \eqref{non-twisted-simply-laced}. \end{lemma}
\begin{proof} By \eqref{e:gamma}, \eqref{eq:twisting} and Lemma \ref{prop:non-twisted} it suffices to show that $n_j{\tilde t} n_j^{-1} {\tilde t}^{-1}=\left(\alpha_j^\vee(-1)\right)^{t_j}$. We are indebted to Dmitry A. Timashev for the idea of the following proof.
Consider the ${\mathbb{C}}$-torus $T_{\mathbb{C}}$. As above, we write $X_*$ for ${{\sf X}}_*(T_{\mathbb{C}})={\rm Hom}({\mathbb{G}}_{m,{\mathbb{C}}},T_{\mathbb{C}})$. We have a canonical isomorphism of abelian complex Lie groups $$ X_*\underset{{\mathbb{Z}}}{\otimes} {\mathbb{C}}^\times\isoto T({\mathbb{C}}),\quad x\otimes u\mapsto x(u),\quad x\in X_*,\ u\in {\mathbb{C}}^\times={\mathbb{G}}_{m,{\mathbb{C}}}({\mathbb{C}}). $$ Thus we obtain an isomorphism of abelian complex Lie algebras (vector spaces over ${\mathbb{C}}$) $$ X_* \underset{{\mathbb{Z}}}{\otimes} {\mathbb{C}} \isoto {\rm Lie\,} T_{\mathbb{C}},\quad x\otimes v\mapsto dx(v),\quad x\in X_*,\ v\in{\mathbb{C}},\ dx:=d_1 x\,\colon {\mathbb{C}}={\rm Lie\,}{\mathbb{G}}_{m,{\mathbb{C}}}\to {\rm Lie\,} T_{\mathbb{C}}\,. $$ In particular, we obtain a canonical embedding \begin{equation}\label{eq:embedding-X*} X_*\hookrightarrow X_* \underset{{\mathbb{Z}}}{\otimes} {\mathbb{C}} \isoto {\rm Lie\,} T_{\mathbb{C}}\qquad x\mapsto x\otimes 1\mapsto dx(1). \end{equation} Now it is an easy exercise to deduce from \eqref{eq:Springer-reflection} and \eqref{eq:embedding-X*} that for $1\le j\le n$ and for any $y\in{\rm Lie\,} T_{\mathbb{C}}$ we have \begin{equation}\label{eq:Springer-reflection-Lie} r_j(y)=y-\langle d \alpha_j,y\rangle d\alpha_j^\vee(1), \end{equation} where we write $d\alpha_j$ for $d_1\alpha_j\, \colon {\rm Lie\,} T_{\mathbb{C}}\to{\rm Lie\,}{\mathbb{G}}_{m,{\mathbb{C}}}= {\mathbb{C}}$, and we write $\langle d \alpha_j,y\rangle$ for $d\alpha_j(y)\in{\mathbb{C}}$.
Let $\omega_k^\vee\in {\rm Lie\,} T_{\mathbb{C}}$ be the element such that
$\langle d\alpha_j,\omega_k^\vee\rangle=\delta_{jk}$\,, where $\delta_{jk}$ is Kronecker's delta symbol. We set \[{\tilde t}=\exp\left(\pi {\mathrm{i}}\,\sum_k\kern 1.0pt t_k \omega_k^\vee \right) \in T({\mathbb{C}}),\quad\text{where }{\mathrm{i}}^2=-1.\] Then \begin{equation*} \alpha_j({\tilde t})=\exp \left\langle d\alpha_j,\,\pi {\mathrm{i}}\,\sum_k t_k \omega_k^\vee\right\rangle= \exp \left(\pi{\mathrm{i}}\,\sum_k t_k\langle d\alpha_j,\omega_k^\vee\rangle\right)= \exp(\pi{\mathrm{i}}\, t_j)=(-1)^{t_j}, \end{equation*} because the exponential map commutes with homomorphisms of Lie groups; see \cite[Section 1.2.7, p.~29, Problem 26]{OV}. It follows that the image of ${\tilde t}$ in $T^{\rm ad}({\mathbb{C}})$ is indeed $t$. By \eqref{eq:Springer-reflection-Lie} we have \begin{align*}\label{eq:Dima} n_j{\tilde t} n_j^{-1} {\tilde t}^{-1}&=r_j({\tilde t}) {\tilde t}^{-1} =\exp\left(\pi {\mathrm{i}}\, \sum_k\, t_k\left(r_j(\omega_k^\vee)-\omega_k^\vee\right)\,\right)\\ &=\exp\left(-\pi{\mathrm{i}}\sum_k t_k\langle d\alpha_j,\omega_k^\vee\rangle d\alpha_j^\vee(1)\right) =\exp\left( t_j\, d\alpha_j^\vee(-\pi{\mathrm{i}})\right) =\left(\alpha_j^\vee(-1)\right)^{t_j}. \end{align*} Thus $n_j{\tilde t} n_j^{-1} {\tilde t}^{-1}=\left(\alpha_j^\vee(-1)\right)^{t_j}$, as required. \end{proof}
According to Lemma \ref{prop:twisted}, the twisted action of ${\mathcal{M}}_i$ on a labeling ${\boldsymbol{a}}=(a_i)\in ({\mathbb{Z}}/2{\mathbb{Z}})^n$ is given by formula \eqref{twisted-simply-laced}. This means that for any vertex $i$ of $D$, the action of ${\mathcal{M}}_i$ is given by formula \eqref{non-twisted-simply-laced} if vertex $i$ is white (i.e., $t_i=0$), and by formula \begin{equation}\label{twisted-action}
a'_i = a_i + 1+ \Ss a_k \ , \end{equation} if vertex $i$ is black (i.e., $t_i=1$). In other words, this is exactly the generalized Reader puzzle as described in the Introduction. We denote by $L(D,{\boldsymbol{t}})$ (or $L(\kern 0.8pt_{\boldsymbol{t}} D)$) the set of labelings $({\mathbb{Z}}/2{\mathbb{Z}})^n$ with this twisted action of the moves ${\mathcal{M}}_i$. By Lemma \ref{prop:twisted} the action of ${\mathcal{M}}_i$ on $L(D,{\boldsymbol{t}})$ is compatible with the $t$-twisted action of the reflection $r_i\in W$ on $T({\mathbb{R}})_2=\kern 0.8pt_t T({\mathbb{R}})_2$ with respect to the canonical bijection \begin{equation*} \gamma_{\boldsymbol{t}}\colon L(D,{\boldsymbol{t}})\isoto T({\mathbb{R}})_2\subset Z^1({\mathbb{R}},\kern 0.8pt_t G) \quad {\boldsymbol{a}}\mapsto a=\prod_i \left(\alpha_i^\vee(-1)\right)^{a_i}. \end{equation*} By abuse of notation we denote by $\gamma_{\boldsymbol{t}}$ both the isomorphism $\gamma_{\boldsymbol{t}}\colon L(\kern 0.8pt_{\boldsymbol{t}} D)\isoto T({\mathbb{R}})_2$ and the embedding $\gamma_{\boldsymbol{t}}\colon L(\kern 0.8pt_{\boldsymbol{t}} D)\isoto T({\mathbb{R}})_2\hookrightarrow Z^1({\mathbb{R}},G)$. We regard the twisted diagram $_{\boldsymbol{t}} D=(D,{\boldsymbol{t}})$ as the {\em colored Dynkin diagram of the twisted group $\kern 0.8pt_t G$} (with respect to $T$ and $\Pi$). We denote by ${\rm Orb}(\kern 0.8pt_{\boldsymbol{t}} D)$ the set of equivalence classes (orbits) in $L(\kern 0.8pt_{\boldsymbol{t}} D)$ with respect to the equivalence relation given by the moves of Lemma \ref{prop:twisted} (in the Introduction we denoted this set of equivalence classes by ${\rm Cl}(D,{\boldsymbol{t}})$\kern 0.8pt).
The following theorem describes the Galois cohomology of an {\em inner} form $_t G$ of a compact, simply connected, simple ${\mathbb{R}}$-group $G$ in terms of labelings of the corresponding colored Dynkin diagram $_{\boldsymbol{t}} D$.
\begin{theorem}\label{thm:inner} Let $G$, $T$, $R$, $\Pi$, $D$, and $W$ be as in Section \ref{sec:compact}. Let $t\in T^{\rm ad}({\mathbb{R}})_2$ and let ${\boldsymbol{t}}\in({\mathbb{Z}}/2{\mathbb{Z}})^n$ be defined by \eqref{eq:t-bold}. Let $L(_{\boldsymbol{t}} D)$ be the set of labelings of the colored Dynkin diagram $_{\boldsymbol{t}} D$ with the moves given by formula \eqref{twisted-simply-laced}. Then the canonical map $$ \gamma_{\boldsymbol{t}}\colon L(\kern 0.8pt_{\boldsymbol{t}} D)\isoto T({\mathbb{R}})_2 \hookrightarrow Z^1({\mathbb{R}},\kern 0.8pt_t G) $$ induces a canonical bijection $$ \lambda_{\boldsymbol{t}}\colon {\rm Orb}(\kern 0.8pt_{\boldsymbol{t}} D)\isoto H^1({\mathbb{R}},\kern 0.8pt_t G). $$ \end{theorem} The theorem follows immediately from Proposition \ref{prop:Bo88} and Lemma \ref{prop:twisted}. We specify that $\lambda_{\boldsymbol{t}}$ takes the orbit (class) of a labeling ${\boldsymbol{a}}=(a_j)$ to the cohomology class of the cocycle \[\prod_{j=1}^n(\alpha_j^\vee(-1))^{a_j}\in T({\mathbb{R}})_2\subset Z^1({\mathbb{R}},\kern 0.8pt_t G).\]
\section{Weyl action for outer forms} \label{sec:outer}
In this section again $G$, $T$, $R$, $\Pi$, $B$, $D$, $N$ and $W$ are as in Section \ref{sec:compact}, in particular $G$ is a simply connected, simple, {\em compact} linear algebraic group over ${\mathbb{R}}$, and $B$ is
the Borel subgroup of $G_{\mathbb{C}}$ containing $T_{\mathbb{C}}$, corresponding to the basis $\Pi$ of $R$.
Let $\rho\in{\rm Gal}({\mathbb{C}}/{\mathbb{R}})$ denote the complex conjugation. Since $G$ is defined over ${\mathbb{R}}$, the Galois group ${\rm Gal}({\mathbb{C}}/{\mathbb{R}})=\{1,\rho\}$ acts on ${\rm Aut}\,G_{\mathbb{C}}$. The {\em group of semi-automorphisms} $\mathrm{SAut}\,G_{\mathbb{C}}:=({\rm Aut}\,G_{\mathbb{C}})\rtimes {\rm Gal}({\mathbb{C}}/{\mathbb{R}})$ acts on $D$, see \cite[Proposition 3.1]{BKLR}. We describe this action here.
We construct a homomorphism \begin{equation*} \psi_S\colon \mathrm{SAut}\, G_{\mathbb{C}}\to{\rm Aut}\,D. \end{equation*} Let $s\in\mathrm{SAut}\,G_{\mathbb{C}}$. We have $T_{\mathbb{C}}\subset B\subset G_{\mathbb{C}}$. Consider the pair $(s(T_{\mathbb{C}}),s(B))$. There exists $g\in G({\mathbb{C}})$ such that \[g\cdot s(T_{\mathbb{C}})\cdot g^{-1}=T_{\mathbb{C}},\quad g\cdot s(B)\cdot g^{-1}=B,\] and if $g'\in G({\mathbb{C}})$ is another such element, then $g'=t'g$ for some $t'\in T({\mathbb{C}})$. We obtain a semi-automorphism \[{\rm Inn}(g)\circ s \in\mathrm{SAut}(G_{\mathbb{C}},T_{\mathbb{C}},B),\] which induces a well-defined automorphism \[\psi_S(s)\in{\rm Aut}\, D.\]
The restriction of $\psi_S$ to the subgroup \ ${\rm Gal}({\mathbb{C}}/{\mathbb{R}})\subset \mathrm{SAut}\, G$ \ gives the {\em $^*$-action} of ${\rm Gal}({\mathbb{C}}/{\mathbb{R}})$ on $D$, see \cite[Section 2.3]{Tits}. Let \begin{equation}\label{e:psi} \psi\colon {\rm Aut}\, G_{\mathbb{C}}\to{\rm Aut}\,D \end{equation} denote the restriction of $\psi_S$ to the subgroup ${\rm Aut}\, G_{\mathbb{C}}\subset \mathrm{SAut}\, G_{\mathbb{C}}$, then $\psi$ is clearly ${\rm Gal}({\mathbb{C}}/{\mathbb{R}})$-equivariant with respect to the $^*$-action of ${\rm Gal}({\mathbb{C}}/{\mathbb{R}})$ on $D$. The homomorphism $\psi$ fits into the exact sequence \begin{equation}\label{split-Springer} 1\to G^{\rm ad}({\mathbb{C}})\to {\rm Aut}\,G_{\mathbb{C}}\labelto{\psi} {\rm Aut}\,D\to 1 \end{equation} which admits a {\em splitting,} that is, a homomorphism $\phi\colon{\rm Aut}\,D\to{\rm Aut}\,G_{\mathbb{C}}$ such that $\psi\circ\phi={{\rm id}}_{{\rm Aut} D}$\,, see \cite[Expos\'e XXIV, Theorem 1.3]{SGA3} or \cite[Corollary 2.14]{SpringerAMS}, or \cite[Proposition 1.5.5]{Conrad-RGS}. We construct a splitting of \eqref{split-Springer} of a special kind in the next lemma.
\begin{lemma}\label{lem:phi} Let $G$ be as above, in particular compact and simply connected. Then there exists a homomorphism \[\phi\colon{\rm Aut}\,D\to{\rm Aut}\, G_{\mathbb{C}},\quad \theta\mapsto\phi_\theta\] such that $\psi\circ\phi={{\rm id}}_{{\rm Aut} D}$ and for any $\theta\in{\rm Aut}\,D$ the automorphism $\phi_\theta\in{\rm Aut}\,G_{\mathbb{C}}$ is defined over ${\mathbb{R}}$. \end{lemma}
\begin{proof} Consider the complexification ${\mathfrak{g}}_{\mathbb{C}}$ of ${\mathfrak{g}}={\rm Lie\,} G$ and the root decomposition \[ {\mathfrak{g}}_{\mathbb{C}}={\rm Lie\,} T_{\mathbb{C}} \oplus\bigoplus_{\beta\in R}{\mathfrak{g}}_\beta\,. \] Consider a ``canonical system of generators'' $h_i\,,e_i\,,f_i\ (i=1,\dots,n)$ of ${\mathfrak{g}}_{\mathbb{C}}$ satisfying \begin{align*} &[h_i\,,h_j]=0,\quad [e_i\,,f_i]=h_i\,,\quad [e_i\,,f_j]=0\text{ for }i\neq j\\ &[h_i\,,e_j]=a_{ji}e_j\,,\quad [h_i\,, f_j]=-a_{ji} f_j\,, \end{align*} see \cite[Section 4.3.2]{OV}. Here $(a_{ij})$ is the Cartan matrix, \[e_i\in {\mathfrak{g}}_{\alpha_i},\quad f_i\in {\mathfrak{g}}_{-\alpha_i},\quad h_i\in {\rm Lie\,} T_{\mathbb{C}},\quad \Pi=\{\alpha_1,\dots,\alpha_n\}.\] Since $G$ is compact, one can choose the generators $h_i\,,e_i\,,f_i$ such that \[^\rho h_i=-h_i\,, \quad ^\rho e_i=-f_i\,, \quad ^\rho f_i=-e_i\,, \] see \cite[Section 5.1.3, Problem 19]{OV}.
Now let $\theta\in {\rm Aut}\,D$. We define an automorphism $\phi_\theta$ of ${\mathfrak{g}}_{\mathbb{C}}$ on the generators by \[\phi_\theta(h_i)=h_{\theta(i)}\,,\quad \phi_\theta(e_i)=e_{\theta(i)}\,,\quad \phi_\theta(f_i)=f_{\theta(i)}\,.\] Clearly $\phi_\theta$ commutes with $\rho$, hence the automorphism $\phi_\theta$ of ${\mathfrak{g}}_{\mathbb{C}}$ is defined over ${\mathbb{R}}$. The ${\mathbb{R}}$-automorphism $\phi_\theta$ of ${\mathfrak{g}}$ induces a unique automorphism of the connected simply connected algebraic ${\mathbb{R}}$-group $G$; by abuse of notation we denote this automorphism again by $\phi_\theta$. We have $\phi_\theta(T)=T,\ \phi_\theta(B)=B$, and it is clear from our construction of $\psi$ that $\psi(\phi_\theta)=\theta$, hence $\psi\circ\phi={{\rm id}}_{{\rm Aut} D}$\,. Clearly \[\phi\colon{\rm Aut}\,D\to{\rm Aut}_{\mathbb{R}}\, G,\quad \theta\mapsto \phi_\theta\] is a homomorphism. \end{proof}
\begin{corollary} The complex conjugation $\rho$, when acting on $D$ via the $^*$-action, acts on ${\rm Aut}\,D$ trivially. \end{corollary} \begin{proof} Indeed, if $\theta\in{\rm Aut}\,D$, then \[^\rho\theta=\kern 0.8pt^\rho(\psi(\phi_\theta))=\psi(\kern 0.8pt^\rho(\phi_\theta))=\psi(\phi_\theta)=\theta,\] because $\phi_\theta\in{\rm Aut}_{\mathbb{R}}\,G$. \end{proof}
Let $_z G$ be an outer twisted form (outer twist) of $G$, where $z\in Z^1({\mathbb{R}},{\rm Aut}\, G)$ and $z\notin Z^1({\mathbb{R}},{\rm Inn}\, G)$. The homomorphism $\psi$ of \eqref{e:psi} induces a map \[ Z^1({\mathbb{R}},{\rm Aut}\,G)\to Z^1({\mathbb{R}},{\rm Aut}\,D)=({\rm Aut}\,D)_2\,.\] We obtain an element $\tau=\psi(z)\in ({\rm Aut}\,D)_2$, then $\tau$ is a nontrivial involutive automorphism of $D$. It acts on the set of vertices $\Pi$ of $D$; we write $\alpha_j\mapsto \alpha_{\tau(j)}$, $j=1,\dots,n$. We write $\Pi^\tau$ for the set of fixed points of $\tau$ in $\Pi$, and $D^\tau$ for the corresponding Dynkin subdiagram. Furthermore, $\tau$ acts on $\Pi^\vee$ by $\alpha_j^\vee\mapsto\alpha_{\tau(j)}^\vee$ and on $W=\langle r_j\rangle_{j=1,\dots,n}$ by $\tau(r_j)=r_{\tau(j)}$. We write $W^\tau$ for the algebraic subgroup of fixed points of $\tau$ in $W$.
The homomorphism $\phi$ of Lemma \ref{lem:phi} gives an involutive automorphism $\phi_\tau$ of $(G,T,B)$. By abuse of notation, we shall denote this ``diagrammatic'' automorphism $\phi_\tau$ again by $\tau$. Then $\tau\in{\rm Aut}_{\mathbb{R}}(G,T)_2$ and $\tau$ acts on $W$. We write $_\tau T$, $_\tau T^{\rm ad}$, $_\tau G$, $_\tau G^{\rm ad}$, and $_\tau W$ for the corresponding twisted algebraic groups.
We consider the action of $\tau$ on $\Pi$ and on $\Pi^\vee$. The decomposition $$ \Pi=\Pi^\tau\cup (\Pi\smallsetminus \Pi^\tau) $$ of the basis $\Pi$ of the character group ${{\sf X}}^*(T^{\rm ad})$ of the adjoint torus $T^{\rm ad}$ induces a $\tau$-invariant decomposition into a direct product \begin{equation}\label{e:T-product} T^{\rm ad}=T^{\rm ad}(D^\tau)\times_{\mathbb{R}} T^{\rm ad}(D\smallsetminus D^\tau) \end{equation} with ${{\sf X}}^*(T^{\rm ad}(D^\tau))=\langle\Pi^\tau\rangle$ and ${{\sf X}}^*(T^{\rm ad}(D\smallsetminus D^\tau))=\langle\,\Pi\smallsetminus \Pi^\tau\rangle$. Here for a subset $S\subset {{\sf X}}^*(T^{\rm ad})$, we denote by $\langle S\rangle$ the subgroup generated by $S$. Concerning the corresponding $\tau$-twisted tori, we see that $_\tau T^{\rm ad}(D^\tau)=T^{\rm ad}(D^\tau)$ is a compact torus, while the ${\mathbb{R}}$-torus $_\tau T^{\rm ad}(D\smallsetminus D^\tau)$ is isomorphic to the Weil restriction of scalars $R_{{\mathbb{C}}/{\mathbb{R}}} T'$ of some ${\mathbb{C}}$-torus $T'$. It follows that $$ H^1({\mathbb{R}},\kern 0.8pt_\tau T^{\rm ad}(D^\tau))=T^{\rm ad}(D^\tau)({\mathbb{R}})_2,\quad \text{while}\quad H^1({\mathbb{R}},\kern 0.8pt_\tau T^{\rm ad}(D\smallsetminus D^\tau))=1, $$ and therefore, the embedding ${}_\tau T^{\rm ad}(D^\tau)\hookrightarrow\kern 0.8pt_\tau T^{\rm ad}$ induces a canonical isomorphism \begin{equation}\label{eq:isomorphism-ad} T^{\rm ad}(D^\tau)({\mathbb{R}})_2=H^1({\mathbb{R}},\kern 0.8pt_\tau T^{\rm ad}(D^\tau))\isoto H^1({\mathbb{R}},\kern 0.8pt_\tau T^{\rm ad}). \end{equation}
Similarly, we have a decomposition $$ \Pi^\vee=\Pi^{\vee\,\tau}\,\cup\, (\Pi^\vee\smallsetminus \Pi^{\vee\,\tau}), $$ where we write $\Pi^{\vee\,\tau}$ for $(\Pi^\vee)^\tau$. This decomposition of the basis $\Pi^\vee$ of the cocharacter group ${{\sf X}}_*(T)$ induces a $\tau$-invariant decomposition into a direct product $$ T=T(D^\tau)\times T(D\smallsetminus D^\tau) $$ with ${{\sf X}}_*(T(D^\tau))=\langle\,\Pi^{\vee\,\tau} \rangle$ and ${{\sf X}}_*(T(D\smallsetminus\, D^\tau))=\langle\,\Pi^\vee\smallsetminus\, \Pi^{\vee\,\tau} \rangle$. As above, we have $_\tau T(D^\tau)=T(D^\tau)$, hence $$ H^1({\mathbb{R}},\kern 0.8pt_\tau T(D^\tau))=T(D^\tau)({\mathbb{R}})_2,\quad \text{while}\quad H^1({\mathbb{R}},\kern 0.8pt_\tau T(D\smallsetminus D^\tau))=1. $$
The involutive automorphism $\tau$ of $D$ acts on the set of labelings $L(D)$, and we denote by $L(D)^\tau$ the subset of invariants. The homomorphism $\gamma\colon L(D)\to T({\mathbb{C}})_2$ given by formula \eqref{e:gamma} induces an isomorphism $L(D)^\tau\to\kern 0.8pt_\tau T({\mathbb{R}})_2$ (because the complex conjugation acts on $_\tau T({\mathbb{C}})_2$ as $\tau$). We obtain a commutative diagram \begin{equation}\label{eq:isomorphism} \xymatrix{ L(D^\tau)\ar[r]^-\sim \ar@/_1pc/[d] &T(D^\tau)({\mathbb{R}})_2\ar[r]^-\sim\ar@/_1pc/[d] &H^1({\mathbb{R}}, T(D^\tau))\ar[d]^\sim \\ L(D)^\tau\ar[r]^-\sim\ar[u] &_\tau T({\mathbb{R}})_2\ar[r]\ar[u] &H^1({\mathbb{R}},\kern 0.8pt_\tau T) } \end{equation} with obvious maps.
For our $z\in Z^1({\mathbb{R}},{\rm Aut}\,G_{\mathbb{C}})$ and $\tau=\phi_{\psi(z)}$ we have $\psi(z)=\psi(\tau)$. It follows from the exact sequence \eqref{split-Springer} and \cite[I.5.5, Corollary 2 of Proposition 39]{Serre} that our outer form $_z G$ of $G$ is an {\em inner} twist of $_\tau G$, i.e. $_z G\simeq\kern 0.8pt_{z'} (\kern 0.8pt_\tau G)$ for some $z'\in Z^1({\mathbb{R}},\kern 0.8pt_\tau G^{\rm ad})$. By Proposition \ref{prop:Bo88} the cocycle $z'$ is cohomologous to some $t\in Z^1({\mathbb{R}},\kern 0.8pt_\tau T^{\rm ad})\subset Z^1({\mathbb{R}},\kern 0.8pt_\tau G^{\rm ad})$, and by \eqref{eq:isomorphism-ad} we may assume that $t\in T^{\rm ad}(D^\tau)({\mathbb{R}})_2 \subset \kern 0.8pt_\tau T^{\rm ad}({\mathbb{R}})_2$. We denote by ${\rm Inn}(t)$ the corresponding inner automorphism of $_\tau G$ of order dividing 2. We set $\sigma={\rm Inn}(t)\circ\tau$. Note that ${\rm Inn}(t)$ and $\tau$ commute, hence $\sigma$ is an outer automorphism of order 2 of $G$. We write $_{\sigma} G=\, _{{\rm Inn}(t)}(_\tau G)$ for the corresponding twisted form of $G$, then $\sigma\sim z$ and $_\sigma G\simeq\kern 0.8pt_z G$. For simplicity we also write $_{\sigma} G=\kern 0.8pt_{t\tau} G$. We have $_\sigma G({\mathbb{C}})= G({\mathbb{C}})$, but the complex conjugation in $_\sigma G({\mathbb{C}})$ is given by $$ ^*{\bar{g}}=\sigma({\bar{g}})={\rm Inn}(t)(\tau({\bar{g}})) . $$ Note that ${\rm Inn}(t)$ acts trivially on $_\tau T$, hence also on $_\tau W$, because $_\tau W\subset{\rm Aut}(_\tau T)$. We see that $_{t\tau} T=\kern 0.8pt_\tau T$ and $_{t\tau} W=\kern 0.8pt_\tau W$.
We consider the group $W_0:=W_0({}_{t\tau} G)=W_0({}_\tau G)$; see Section \ref{sec:1}. We have $W_0({\mathbb{C}})=W_0({\mathbb{R}})={}_\tau W({\mathbb{R}})$; see \cite[Section 7]{Borovoi-arXiv}. Clearly ${}_\tau W({\mathbb{R}})=W^\tau({\mathbb{C}})$, hence $W_0({\mathbb{R}})=W^\tau({\mathbb{C}})$. The group $W_0({\mathbb{R}})$ acts on $H^1({\mathbb{R}},{}_{t\tau} T)=H^1({\mathbb{R}},{}_\tau T)$ as in formula \eqref{eq:Bo-action}, and it acts on the set of labelings $L(D^\tau)$ via \eqref{eq:isomorphism}. We wish to describe this action explicitly.
Note that if $D$ is of type ${\mathbf{A}}_{2n}$, then $D^\tau=\emptyset$, $T(D^\tau)=1$, $H^1({\mathbb{R}},{}_\tau T)=1$, $H^1({\mathbb{R}},{}_\tau G)=1$ (in this case $_\tau G\simeq {\bf{SL}}_{2n+1}$). From now till the end of this section we shall assume that {\em $D$ is not of type ${\mathbf{A}}_{2n}$.} Then from the classification of Dynkin diagrams we know that for any $j\in D\smallsetminus D^\tau$, the vertices $j$ and $\tau(j)$ are not connected by an edge, and therefore, the reflections $r_j$ and $r_{\tau(j)}$ commute.
\begin{lemma}\label{lem-pairs} Assume that $D$ is not of type ${\mathbf{A}}_{2n}$. Then the group $W_0({\mathbb{R}})$ is generated by the reflections $r_i$ for $i\in D^\tau$ and by the products $r_j\cdot r_{\tau(j)}$ for $j\in D\smallsetminus D^\tau$. \end{lemma}
\begin{proof} We have $$
W_0({\mathbb{R}})={}_\tau W({\mathbb{R}})=W^\tau({\mathbb{C}}). $$ Now the lemma follows from \cite[Proposition 13.1.2]{Ca}. \end{proof}
\begin{lemma}\label{lem:prod-pairs} Assume that $D$ is not of type ${\mathbf{A}}_{2n}$. Let $j\in D\smallsetminus D^\tau$. Then the product $r_j\cdot r_{\tau(j)}$ acts trivially on $H^1({\mathbb{R}},\kern 0.8pt_\sigma T)=H^1({\mathbb{R}},\kern 0.8pt_\tau T)$, where $\sigma={\rm Inn}(t)\circ\tau$. \end{lemma}
\begin{proof} Let $b\in Z^1({\mathbb{R}},{}_\tau T)$. By diagram \eqref{eq:isomorphism} we may assume that $b\in T(D^\tau)({\mathbb{R}})_2\subset T({\mathbb{C}})_2$. Let ${\boldsymbol{b}}=(b_k)\in ({\mathbb{Z}}/2{\mathbb{Z}})^D$ be the corresponding labeling of $D$ such that $b=\prod_k (\alpha_k^\vee(-1))^{b_k}$.
We set $w_{j,\tau(j)}=r_j r_{\tau(j)}$. Let $G_j=G_{\alpha_j}$ denote the simple 3-dimensional subgroup of $G$ corresponding to the simple root $\alpha_j$. Choose a representative $n_j\in G_j({\mathbb{R}})\cap \mathcal{N}_G(T)({\mathbb{R}})$ of $r_j\in W({\mathbb{R}})$. Set $n_{\tau(j)}=\tau(n_j)\in G_{\tau(j)}({\mathbb{R}})$, then $\tau(n_{\tau(j)})=n_j$, because $\tau^2=1$. Since the vertices $j$ and $\tau(j)$ are not connected by an edge, the subgroups $G_j$ and $G_{\tau(j)}$ of $G$ commute, hence $n_j$ and $n_{\tau(j)}$ commute. Set $n_{j,\tau(j)}:=n_j n_{\tau(j)}$, then \[\tau(n_{j,\tau(j)})=\tau(n_j n_{\tau(j)})=n_{\tau(j)} n_j= n_j n_{\tau(j)}=n_{j,\tau(j)}\, ,\] hence $n_{j,\tau(j)}\in \mathcal{N}_G(T)({\mathbb{R}})^\tau$ and $n_{j,\tau(j)}$ represents $w_{j,\tau(j)}$.
We consider the action \eqref{eq:Bo-action} of $w_{j,\tau(j)}$ on $H^1({\mathbb{R}},\,_{t\tau}T)$. We write $w$ for $w_{j,\tau(j)}$ and $n$ for $n_{j,\tau(j)}$. Recall that $\sigma={\rm Inn}(t)\circ\tau$, where $t\in T^{\rm ad}(D^\tau)({\mathbb{R}})_2\subset T^{\rm ad}({\mathbb{C}})_2$. We lift $t$ to some ${\tilde t}\in T({\mathbb{C}})$. Then we have \begin{equation*} w* [b]:=[nbn^{-1}\cdot n\,{\rm Inn}(t)(\tau({\bar{n}})^{-1})]= [nbn^{-1}\cdot n{\tilde t}\tau({\bar{n}})^{-1}{\tilde t}^{-1}]=[nbn^{-1}\cdot n{\tilde t} n^{-1}{\tilde t}^{-1}], \end{equation*} because $\tau({\bar{n}})=n$. Thus the action \eqref{eq:Bo-action} of $w\in W_0({\mathbb{R}})$ on $Z^1({\mathbb{R}},\,_{t\tau}T)$ is compatible with the action \eqref{eq:ect-0-gen} of $w\in W({\mathbb{C}})$ on $T({\mathbb{C}})_2$.
We consider the $t$-twisted action \eqref{eq:ect-0-gen} of $W({\mathbb{C}})$ on $T({\mathbb{C}})_2$. Then Lemma \ref{prop:twisted} is applicable, and it implies that the move ${\mathcal{M}}_j$ corresponding to the reflection $r_j\in W({\mathbb{C}})$ can change only the $j$-coordinate $b_j$ of ${\boldsymbol{b}}$. Now consider $w_{j,\tau(j)}=r_{\tau(j)}r_j\in W_0({\mathbb{R}})\subset W({\mathbb{C}})$ for $j\in D\smallsetminus D^\tau$, then we see that ${\mathcal{M}}_{\tau(j)}{\mathcal{M}}_j$ can change only the $j$- and the $\tau(j)$-coordinates of ${\boldsymbol{b}}$. In particular, if we write ${\boldsymbol{b}}'= ({\mathcal{M}}_j {\mathcal{M}}_{\tau(j)}) {\boldsymbol{b}}$, then $b'_i=b_i$ for any $i\in D^\tau$.
Since $w_{j,\tau(j)}\in W_0({\mathbb{R}})$ and $b\in Z^1({\mathbb{R}},\kern 0.8pt_\sigma T)$, we see that $b':=w_{j,\tau(j)}(b)$ is contained in $Z^1({\mathbb{R}},\kern 0.8pt_\sigma T)$. Since $b'_i=b_i$ for any $i\in D^\tau$, by diagram \eqref{eq:isomorphism} $b'\sim b$ in $Z^1({\mathbb{R}},\kern 0.8pt_\sigma T)$. Thus $w_{j,\tau(j)}=r_{\tau(j)}\,r_j$ acts trivially on $H^1({\mathbb{R}},\kern 0.8pt_\sigma T)$. \end{proof}
\begin{lemma}\label{lem:r-i-t-tau} Let $a\in T(D^\tau)({\mathbb{R}})_2\subset Z^1({\mathbb{R}},\kern 0.8pt_\sigma T)$. Let $i\in D^\tau$, and write $[a']= r_i[a]$, where $a'\in T(D^\tau)({\mathbb{R}})_2\subset Z^1({\mathbb{R}},\kern 0.8pt_\sigma T)$, and $[a]\mapsto r_i[a]$ refers to the action \eqref{eq:Bo-action} of $r_i\in W_0({\mathbb{R}})$ on $H^1({\mathbb{R}},\,_\sigma T)$. Write \begin{equation*} a=\prod_{j\in D^\tau} \left(\alpha_j^\vee(-1)\right)^{a_j},\qquad a'=\prod_{j\in D^\tau} \left(\alpha_j^\vee(-1)\right)^{a'_j} \end{equation*} Then $a'_j=a_j$ for $j\neq i$ and \begin{equation}\label{eq:twisted-outer} a'_i=a_i+t_i+\Ssd a_k\,, \end{equation} where $(-1)^{t_i}=\alpha_i(t)$ and the sum is taken over the neighbors $k$ of $i$ {\em lying in $D^\tau$}. \end{lemma}
\begin{proof} Write $a=\prod_{j\in D} \left(\alpha_j^\vee(-1)\right)^{a_j}$, then $a_j=0$ for $j\in {D\smallsetminus D^\tau}$. Now let $i\in D^\tau$, then arguing as in the proof of Lemma \ref{lem:prod-pairs}, we see that the action \eqref{eq:Bo-action} of $r_i\in W_0({\mathbb{R}})$ is compatible with the action \eqref{eq:ect-0-gen}, where $n=n_i\in G_i({\mathbb{R}})\cap \mathcal{N}_G(T)({\mathbb{R}})$. By Lemma \ref{prop:twisted} this action is given by formula \eqref{twisted-simply-laced}, i.e., by formula \eqref{eq:twisted-outer}, where the sum is taken over {\em all} neighbors $k$ of $i$ in $D$. However, if $k\in D\smallsetminus D^\tau$, then $a_k=0$. We see that in formula \eqref{eq:twisted-outer} we may take the sum only over neighbors $k$ of $i$ contained in $D^\tau$, as required. \end{proof}
The following theorem was announced in \cite{Bo}. It reduces computing the Galois cohomology of an outer form of a simply connected compact group $G$ to computing the Galois cohomology of an inner form of some other simply connected compact group (of type ${\mathbf{A}}_l$ for some $l$).
\begin{theorem}[{\cite[Theorem 3]{Bo}}] \label{cor:Theorem-3-Bo} Let $G$ be a simply connected, simple, compact linear algebraic group over ${\mathbb{R}}$. Let\, $T$, $R$, $\Pi$ and $D$ be as in Section \ref{sec:compact}. Let $\tau$ be an automorphism of order $2$ of the Dynkin diagram $D$ of $G_{\mathbb{C}}$ (then $D$ is simply-laced). Let $l=\# D^\tau$. Let $G(D^\tau)\subset G$ be the ${\mathbb{R}}$-subgroup of type ${\mathbf{A}}_l$ corresponding to the Dynkin subdiagram $D^\tau$ of $D$. Let $t\in T^{\rm ad}(D^\tau)({\mathbb{R}})_2\subset \kern 0.8pt_\tau T^{\rm ad}({\mathbb{R}})_2$. Then the natural embedding $\kern 0.8pt_t G(D^\tau)\hookrightarrow {}_{t\,\tau} G,$ obtained by twisting by $t$ from the embedding $G(D^\tau)\hookrightarrow\kern 0.8pt_\tau G$, induces a bijection $$ H^1({\mathbb{R}},\kern 0.8pt_t G(D^\tau))\isoto H^1({\mathbb{R}},{}_{t\,\tau} G). $$ \end{theorem}
\begin{proof} The embedding $T(D^\tau)\hookrightarrow {}_\tau T$ induces an isomorphism \begin{equation}\label{eq:isom-2} H^1({\mathbb{R}},T(D^\tau))\isoto H^1({\mathbb{R}}, {}_\tau T). \end{equation} The group $W(D^\tau)({\mathbb{R}})$ acts on the left-hand side of \eqref{eq:isom-2}; this group is generated by $r_i$ for $i\in D^\tau$. The group $W_0({\mathbb{R}})$ acts on the right-hand side; by Lemma \ref{lem-pairs} this group is generated by $r_i$ for $i\in D^\tau$ and by $r_{\tau(j)} r_j$ for $j\in {D\smallsetminus D^\tau}$. By Lemma \ref{lem:prod-pairs} the products $r_{\tau(j)} r_j$ for $j\in {D\smallsetminus D^\tau}$ act trivially on the right-hand side of \eqref{eq:isom-2}. Comparing formulas \eqref{twisted-simply-laced} and \eqref{eq:twisted-outer}, we see that the actions of a reflection $r_i$ for $i\in D^\tau$ on the left-hand side and the right-hand side of \eqref{eq:isom-2} are compatible. Thus we obtain a bijection of the quotients: $$ H^1({\mathbb{R}},\kern 0.8pt_t G(D^\tau))=W(D^\tau)({\mathbb{R}})\backslash T(D^\tau)({\mathbb{R}})_2 \isoto W_0({\mathbb{R}})\backslash H^1({\mathbb{R}},{}_\tau T)= H^1({\mathbb{R}},{}_{t\,\tau} G), $$ where the left-hand and right-hand equalities are bijections of Proposition \ref{prop:Bo88}. \end{proof}
From diagram \eqref{eq:isomorphism} and Theorem \ref{cor:Theorem-3-Bo} we obtain a commutative diagram \begin{equation}\label{eq:bijections} \xymatrix{ L(D^\tau)\ar[r]^-\sim \ar@/_1pc/[d] &T(D^\tau)({\mathbb{R}})_2\ar[r]^-\sim\ar@/_1pc/[d] &H^1({\mathbb{R}}, T(D^\tau))\ar[r]\ar[d]^\sim &H^1({\mathbb{R}},\kern 0.8pt_t G(D^\tau))\ar[d]^\sim \\ L(D)^\tau\ar[r]^-\sim\ar[u] &_\tau T({\mathbb{R}})_2\ar[r]\ar[u] &H^1({\mathbb{R}},\kern 0.8pt_\tau T)\ar[r] &H^1({\mathbb{R}},\kern 0.8pt_{t\kern 0.8pt\tau} G) } \end{equation}
Recall that $t\in T^{\rm ad}(D^\tau)({\mathbb{R}})_2$. We define a coloring ${\boldsymbol{t}}=(t_j)_{j\in D^\tau}\ (t_j\in {\mathbb{Z}}/2{\mathbb{Z}})$ of $D^\tau$ as in \eqref{eq:t-bold}, i.e., by $(-1)^{t_j}=\alpha_j(t)$.
\begin{proposition}\label{c:restriction} \begin{enumerate} \item[(i)] In diagram \eqref{eq:bijections}, the map $L(D)^\tau\to H^1({\mathbb{R}},\kern 0.8pt_{t\kern 0.8pt\tau} G)$ of the bottom row of the diagram is surjective. \item[(ii)] Two labelings ${\boldsymbol{a}},{\boldsymbol{a}}'\in L(D)^\tau$ have the same image in $H^1({\mathbb{R}},\kern 0.8pt_{t\kern 0.8pt\tau} G)$ if and only if their images in $L(D^\tau)$ (i.e., their restrictions to $D^\tau$) lie in the same equivalence class in $L(D^\tau,{\boldsymbol{t}})$. In particular, a labeling ${\boldsymbol{a}}\in L(D)^\tau$ maps to $[1]\in H^1({\mathbb{R}},\kern 0.8pt_{t\kern 0.8pt\tau} G)$ if and only if its restriction to $D^\tau$ lies in the equivalence class of 0 in $L(D^\tau,{\boldsymbol{t}})$. \end{enumerate} \end{proposition}
\begin{proof} (i) This follows from Lemma \ref{lem:Bo88} and Proposition \ref{prop:Bo88}.
(ii) Indeed, by Theorem \ref{thm:inner} two labelings ${\boldsymbol{b}},{\boldsymbol{b}}'\in L(D^\tau)$ have the same image in $H^1({\mathbb{R}},\kern 0.8pt_t G(D^\tau))$ if and only if they lie in the same equivalence class in $L(D^\tau,{\boldsymbol{t}})$. \end{proof}
We say that two labelings ${\boldsymbol{a}},{\boldsymbol{a}}'\in L(D)^\tau$ are {\em ${\boldsymbol{t}}$-equivalent} if their restrictions to $D^\tau$ lie in the same equivalence class in $L(D^\tau,{\boldsymbol{t}})$. We denote by ${\rm Cl}(D,\tau,{\boldsymbol{t}})$ the set of ${\boldsymbol{t}}$-equivalence classes in $L(D)^\tau$, then the restriction map $L(D)^\tau\to L(D^\tau)$ induces a bijection \begin{equation}\label{e:res-bij} {\rm Cl}(D,\tau,{\boldsymbol{t}})\isoto {\rm Orb}(D^\tau,{\boldsymbol{t}}). \end{equation} By Proposition \ref{c:restriction} we have a bijection \begin{equation}\label{e:general-bijection}
{\rm Cl}(D,\tau,{\boldsymbol{t}})\isoto H^1({\mathbb{R}},\kern 0.8pt_{t\kern 0.8pt\tau} G). \end{equation} We obtain a bijection ${\rm Orb}(D^\tau,{\boldsymbol{t}})\isoto H^1({\mathbb{R}},\kern 0.8pt_{t\kern 0.8pt\tau} G)$. We specify that this bijection takes the class of a labeling ${\boldsymbol{b}}=(b_\alpha)_{\alpha\in \Pi^\tau}$ of $D^\tau$ to the cohomology class of the cocycle \[\prod_{\alpha\in \Pi^\tau}(\alpha^\vee(-1))^{b_{\alpha}}\in T(D^\tau)({\mathbb{R}})_2\subset Z^1({\mathbb{R}}, \kern 0.8pt_{t\tau} G).\]
In the case when $_z G$ is an {\em inner} form of the compact group $G$, we again set $\tau=\psi(z)\in{\rm Aut}(D)$, then $\tau=1$, and we set ${\rm Cl}(D,\tau,{\boldsymbol{t}})={\rm Orb}(D,{\boldsymbol{t}})$ in this case. In particular, when $_z G=G$ is {\em compact}, we have $\tau=1, t=1,{\boldsymbol{t}}=0$, and we set ${\rm Cl}(D,\tau,{\boldsymbol{t}})={\rm Orb}(D)$ in this case. Then we have bijection \eqref{e:general-bijection} in all the cases.
\section{Reeder puzzles from Kac diagrams} \label{sec:Kac}
In this section $G$, $T$, $R$, $\Pi$, $B$, $D$, and $W$ are as in Section \ref{sec:compact}, in particular $G$ is a simply connected, simple, {\em compact} linear algebraic group over ${\mathbb{R}}$.
\subsection{Inner forms} \label{ss:Kac}
Let $_zG$ be a noncompact inner twisted form (inner twist) of $G$, where $z\in Z^1({\mathbb{R}}, G^{\rm ad})$. By \cite[III.4.5, Example (a)]{Serre}, $z$ is cohomologous to some $t\in T^{\rm ad}({\mathbb{R}})_2\subset Z^1({\mathbb{R}}, G^{\rm ad})$. We regard $t\in T^{\rm ad}({\mathbb{R}})_2\subset ({\rm Aut}\, G)_2$ as an involutive inner automorphism of $G$. Since $_zG$ is noncompact, we have $t\neq 1$. By Kac \cite{Kac}, see also Helgason \cite[Ch.~X, \S\,5]{Helgason}, Onishchik and Vinberg \cite[Ch.~4, \S\,4 and Ch.~5, \S\,1]{OV}, and Gorbatsevich, Onishchik, and Vinberg \cite[Section 3.3.7]{OV2}, involutive {\em inner} automorphisms of $G$ can be described using Kac diagrams of types I and II in \cite[Table 7]{OV}. (The Kac diagrams of type III in \cite[Table 7]{OV} correspond to involutive {\em outer} automorphisms.)
The relation between Kac diagrams and involutive inner automorphisms is as follows, see \cite[Problem 5.1.38]{OV}. Consider the extended Dynkin diagram ${\widetilde{D}}$ of $G$. Its vertices correspond to the roots $\alpha_0,\alpha_1,\dots,\alpha_n$, where $\alpha_1,\dots,\alpha_n$ are the simple roots and $\alpha_0$ is the {\em lowest} root. There is a unique linear dependence $$ m_0\alpha_0+m_1\alpha_1+\dots+m_n\alpha_n=0 $$ normalized so that $m_0=1$, and then $m_j$ are positive integers tabulated in \cite[Table 6]{OV}. A {\em Kac $2$-marking of} ${\widetilde{D}}$ is a family of nonnegative integral numerical marks ${\boldsymbol{q}}=(q_j)_{j=0,1,\dots,n}\in{\mathbb{Z}}^{n+1}_{\ge 0}$ at the vertices ${j=0,1,\dots,n}$ of ${\widetilde{D}}$, satisfying \begin{equation}\label{eq:2markings} m_0 q_0+m_1 q_1+\dots+m_n q_n=2. \end{equation} A Kac 2-marking ${\boldsymbol{q}}$ determines a unique element $t\in T^{\rm ad}({\mathbb{R}})_2\subset G^{\rm ad}({\mathbb{R}})_2$ such that \begin{equation}\label{e:qj} \alpha_j(t)=(-1)^{q_j},\quad j=1,\dots,n. \end{equation} Involutive inner automorphisms of $G$ are classified, up to conjugacy, by Kac 2-markings of ${\widetilde{D}}$. Two Kac 2-markings ${\boldsymbol{p}}$ and ${\boldsymbol{q}}$ give conjugate inner automorphisms if and only if ${\boldsymbol{p}}$ can be obtained form ${\boldsymbol{q}}$ by an automorphism of ${\widetilde{D}}$, see \cite[3.3.6, Theorem 3.11]{OV2}.
\begin{lemma}\label{l:special-transitive}
The group ${\rm Aut}\,{\widetilde{D}}$ acts transitively on the set $\{j\in{\widetilde{D}}\ |\ m_j=1\}$. \end{lemma} \begin{proof} By \cite[Section VI.2.3, Corollary of Proposition 6]{Bourbaki}, the center $Z(G)$ acts simply transitively on this set when acting on ${\widetilde{D}}$. \end{proof}
Let ${\boldsymbol{q}}$ be a Kac 2-marking of ${\widetilde{D}}$. It follows from \eqref{eq:2markings} that there are three possibilities: \begin{enumerate} \item[(0)] $q_i=2$ for some $i\in {\widetilde{D}}$ with $m_i=1$, and $q_j=0$ for all $j\neq i$. \item[(1)] (Type I) $q_i=1$ for some $i$ with $m_i=2$, and $q_j=0$ for all $j\neq i$. \item[(2)] (Type II) $q_{i_1}=1,\ q_{i_2}=1$ for some $i_1\neq i_2$
with $m_{i_1}=1,\ m_{i_2}=1$, and $q_j=0$ for all $j\neq i_1,i_2$. \end{enumerate}
In case (0) clearly $t=1$ and hence, $_tG=G$ is compact, so we do not consider this case.
In case (1) we have $i\neq 0$, because $m_i=2$, while $m_0=1$. We color vertex $i$ of ${\widetilde{D}}$ in black and leave the other vertices white. We say that $({\widetilde{D}},{\boldsymbol{q}})$ is a Kac diagram of type I.
In case (2) by Lemma \ref{l:special-transitive} we may and shall assume that $i_1=0$. We write $i$ for $i_2$, then $i\neq 0$. We color vertices $i_1=0$ and $i_2=i$ in black and leave all the other vertices white. We say that $({\widetilde{D}},{\boldsymbol{q}})$ is a Kac diagram of type II.
The Kac diagrams of types I and II up to isomorphism are tabulated in \cite[Table 7]{OV}. From now on, when we consider Kac diagrams of types I and II, we assume that they are ones from that table. Then in both cases I and II, ${\widetilde{D}}$ has exactly one nonzero black vertex $i$.
We compute the coloring ${\boldsymbol{t}}$ of $D$, induced by $t$, in terms of ${\boldsymbol{q}}$. Comparing \eqref{e:qj} and \eqref{eq:t-bold}, we obtain that \[(-1)^{t_j}=(-1)^{q_j}\quad\text{for } j=1,\dots,n. \] Since $t_j=0,1$ and $q_j=0,1$, we see that $t_j=q_j$ for $j=1,\dots,n$. Thus the map ${\boldsymbol{t}}\colon D\to \{0,1\}$ is the restriction of ${\boldsymbol{q}}\colon {\widetilde{D}}\to{\mathbb{Z}}_{\ge0}$ to the subset $D$ of ${\widetilde{D}}$.
We conclude that if a noncompact inner form $_tG$ of $G$ is given by a Kac diagram $({\widetilde{D}},{\boldsymbol{q}})$ of type I or II from \cite[Table 7]{OV}, then we can obtain a colored Dynkin diagram $(D,{\boldsymbol{t}})$ of the twisted group $_t G$ by removing vertex 0 from $({\widetilde{D}},{\boldsymbol{q}})$. We say that $(D,{\boldsymbol{t}})$ is a {\em twisting diagram} for $_t G$. It has exactly one black vertex $i$. We call $i$ the {\em twisting vertex.} The coloring ${\boldsymbol{t}}$ of $D$ defines the action of Lemma \ref{prop:twisted} of the Weyl group $W$ on $L(D)$; we say that this action of $W$ is {\em twisted at} $i$. The next proposition follows immediately from Lemma \ref{prop:twisted}.
\begin{proposition}\label{prop:twisted-i} Let $_z G$ be a noncompact inner form of $G$. Then $_z G\simeq \kern 0.8pt_t G$, where $t\in T^{\rm ad}({\mathbb{R}})_2$, $t\neq 1$, and $t$ comes from a Kac diagram $({\widetilde{D}},{\boldsymbol{q}})$ of type I or II in \cite[Table 7]{OV}. The corresponding colored Dynkin diagram $(D,{\boldsymbol{t}})$ is obtained from $({\widetilde{D}},{\boldsymbol{q}})$ by removing vertex 0. It has a unique black vertex $i$. For the (twisted at $i$) action of $W$ on $L(D,{\boldsymbol{t}})$, we have the same formula \eqref{non-twisted-simply-laced} for ${\mathcal{M}}_j$ for $j\neq i$ as in Lemma \ref{prop:non-twisted}. For ${\mathcal{M}}_i$ we have, as in Lemma \ref{prop:non-twisted}, $a'_j=a_j$ for $j\neq i$, while in formula \eqref{non-twisted-simply-laced} for $a'_i$ we must add $1$. Namely, we have \begin{equation}\label{twisted-simply-laced-new}
a'_i = a_i+1 + \Ss a_k\, , \end{equation} where the meaning of $\Ss$ is the same as in formula \eqref{non-twisted-simply-laced}. \end{proposition}
\begin{construction} \label{const:augmented-diag} Assume we have a twisting diagram with a black vertex $i$: \begin{equation*} \sxymatrix{ \cdots \ar@{-}[r] &\bc{} \ar@{-}[r] & \bcb{i} \ar@{-}[r] &\bc{} \ar@{-}[r] & \cdots } \end{equation*} We have formula \eqref{twisted-simply-laced-new} for ${\mathcal{M}}_i$. In order to get formula \eqref{non-twisted-simply-laced} instead, we uncolor the vertex $i$ and {\em augment} our diagram by formally adding a new vertex which we call the {\em boxed 1}, connected by a simple edge to vertex $i$: \begin{equation*} \sxymatrix{ \cdots \ar@{-}[r] &\bc{} \ar@{-}[r] \ar@{-}[r] & \bc{i} \ar@{-}[r] \ar@{-}[d] &\bc{} \ar@{-}[r] & \cdots \\ & & *+[F]{1} & & } \end{equation*} Here 1 in the box means that we put 1 as the label at this new vertex. Now the formula for ${\mathcal{M}}_i$ becomes \eqref{non-twisted-simply-laced} where the boxed 1 is included in the sum. Thus this boxed 1 accounts for twisting at $i$. Note that we do not add a move corresponding to the boxed 1, so the label 1 at the boxed 1 cannot be changed by moves. We call the obtained diagram the {\em augmented diagram} corresponding to the twisting vertex $i$. \end{construction}
\begin{example} These are the Kac diagram, the twisting diagram and the augmented diagram for the group $EIII$ of type ${\mathbf{E}}_6$ (see Subsection \ref{subsec:E6(1)} below): \begin{equation*} \mxymatrix { \bcb{1} \ar@{-}[r] & \bc{2} \ar@{-}[r] & \bc{3} \ar@{-}[r] \ar@{-}[d] & \bc{4} \ar@{-}[r] & \bc{5} &&& \bcb{1} \ar@{-}[r] & \bc{2} \ar@{-}[r] & \bc{3} \ar@{-}[r] \ar@{-}[d] & \bc{4} \ar@{-}[r] & \bc{5} &&& *+[F]{1} \ar@{-}[r] &\bc{1} \ar@{-}[r] & \bc{2} \ar@{-}[r] & \bc{3} \ar@{-}[r] \ar@{-}[d] & \bc{4} \ar@{-}[r] & \bc{5} \\ & & {\phantom{\hbox{\SMALL\kern 0.8pt 6}}}{ \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \hbox{\SMALL\kern 0.8pt 6} \ar@{-}[d] & & &&&& &\bcu{6} & & &&& & & &\bcu{6} & & \\ & & \bcbu{0} & & } \end{equation*} \end{example}
\subsection{Outer forms} \label{ss:Kac-outer}
Let $_z G$ be an outer twisted form (outer twist) of $G$, where $z\in Z^1({\mathbb{R}},{\rm Aut}\, G)$. Using the homomorphism $\psi\colon{\rm Aut}\,G_{\mathbb{C}}\to{\rm Aut}\,D$ of \eqref{e:psi}, we obtain an element $\tau=\psi(z)\in Z^1({\mathbb{R}},{\rm Aut}\,D)=({\rm Aut}\,D)_2$; then $\tau^2=1$. Since $_z G$ is an {\em outer} form, $\tau\neq 1$. Thus $\tau$ is a nontrivial involutive automorphism of $D$. We write $\Pi^\tau$ for the set of fixed points of $\tau$ in $\Pi$, and $D^\tau$ for the corresponding Dynkin subdiagram. As in Section \ref{sec:outer}, we denote the ``diagrammatic'' automorphism $\phi_\tau$ of $(G,T,B)$ again by $\tau$. There exists $t\in T^{\rm ad}(D^\tau)({\mathbb{R}})_2$ such that $_z G\simeq\kern 0.8pt_{t\tau}G$, see Section \ref{sec:outer}. (Here $T^{\rm ad}= T^{\rm ad}(D^\tau)\times_{\mathbb{R}} T^{\rm ad}(D\smallsetminus D^\tau)$, see \eqref{e:T-product}.)
By \cite{Kac}, see also \cite[Ch.~X, \S\,5]{Helgason},
\cite[Ch.~4, \S\,4 and Ch.~5, \S\,1]{OV}, and \cite[Section 3.3.11]{OV2}, involutive {\em outer} automorphisms $\sigma=t\tau\in ({\rm Aut}\, G)_2$ can be described using Kac diagrams of type III in \cite[Table 7]{OV} as follows.
Set ${\mathfrak{g}}={\rm Lie\,} G$. The diagrammatic automorphism $\tau$ acts on $T^{\rm ad}$, and we consider the torus $(T^{\rm ad})^\tau$ of dimension $n'=\#\Pi^\tau+\half\#(\Pi-\Pi^\tau)$. The nonzero weights $\alpha'$ of $(T^{\rm ad})^\tau_{\mathbb{C}}$ in ${\mathfrak{g}}_{\mathbb{C}}$ are the restrictions of the roots $\alpha$ of ${\mathfrak{g}}_{\mathbb{C}}$ with respect to $T^{\rm ad}_{\mathbb{C}}$. The restricted roots form a (possibly nonreduced) root system, see \cite[Section 3.3.9, Theorem 3.14]{OV2}. In particular, for any restricted root $\alpha'$ the coroot $(\alpha')^\vee$ is defined. Write ${\mathfrak{g}}^\tau$ and ${\mathfrak{g}}^{-\tau}$ for the $+1$ and $-1$ eigenspaces of $\tau$, respectively. It is known (see \cite[Section 3.3.9]{OV2}) that ${\mathfrak{g}}^\tau_{\mathbb{C}}$ is a simple Lie algebra and the representation of $G^\tau$ in ${\mathfrak{g}}^{-\tau}_{\mathbb{C}}$ is irreducible.
The set ${\Pi'}=\{\alpha'_1,\dots,\alpha'_{n'}\}$ of {\em distinct} restrictions
of simple roots $\{\alpha_1,\dots,\alpha_n\}$ to $(T^{\rm ad})^\tau$ is in a bijection with the set of orbits of $\tau$ in $\Pi=\{\alpha_1,\dots,\alpha_n\}$. This set ${\Pi'}$ is a set of simple roots of ${\mathfrak{g}}^\tau_{\mathbb{C}}$ with respect to $(T^{\rm ad})_{\mathbb{C}}^\tau$ (see \cite[Section 3.3.9]{OV2}). Let $\alpha'_0$ denote the {\em lowest weight} of $(T^{\rm ad})^\tau_{\mathbb{C}}$ in ${\mathfrak{g}}^{-\tau}_{\mathbb{C}}$. Then $\{\alpha'_0,\alpha'_1,\dots,\alpha'_{n'}\}$ is an admissible system of roots in the sense that the Cartan numbers $\langle \alpha'_{i'},(\alpha_{j'}^\prime)^\vee\rangle$ are non-positive for ${i'}\neq {j'}$ (see \cite[Section 3.3.9]{OV2}). The Cartan matrix is encoded by a twisted affine Dynkin diagram ${\widetilde{D'}}$, see \cite[Section 3.1.7]{OV2}. There is a unique linear dependence \[ m'_0\alpha'_0+ m'_1\alpha'_1+\dots +m'_{n'}\alpha'_{n'}=0,\] normalized so that $m'_0=1$, then $m'_{j'}$ are positive integers.
Set
\[{\mathfrak{t}}={\rm Lie\,} T={\rm Lie\,} T^{\rm ad},\quad {\mathfrak{t}}_0={\mathfrak{t}}^\tau={\rm Lie\,}(T^{\rm ad})^\tau,\quad {\mathfrak{t}}_1=\{x\in{\mathfrak{t}}\ |\ \tau(x)=-x\},\] Set $V={\mathrm{i}}{\mathfrak{t}}\subset{\mathfrak{t}}_{\mathbb{C}},\quad V_0={\mathrm{i}}{\mathfrak{t}}_0,\ V_1={\mathrm{i}}{\mathfrak{t}}_1,$ where ${\mathrm{i}}^2=-1$. Let $V^*$, $V_0^*$, and $V_1^*$ denote the dual spaces to the vector ${\mathbb{R}}$-spaces $V$, $V_0$, and $V_1$, resp. Let $Q={{\sf X}}^*(T^{\rm ad}_{\mathbb{C}})$ and $Q_0={{\sf X}}^*(\kern 0.8pt (T^{\rm ad})_{\mathbb{C}}^\tau)$ be the corresponding character groups, then $Q$ embeds into $V^*$ and $Q_0$ embeds into $V_0^*$. The Killing form is positive definite on $V\subset {\mathfrak{t}}_{\mathbb{C}}\subset {\rm Lie\,} G_{\mathbb{C}}$. The orthogonal decomposition $V=V_0\oplus V_1$ with respect to the Killing form induces an identification $V^*=V_0^*\oplus V_1^*$, and the restriction map $Q\to Q_0$ is compatible with the orthogonal projection \[ V^*=V_0^*\oplus V_1^*\to V_0^*\] with respect to this identification. Note that all the simple roots $\alpha\in\Pi$ have the same length (because $D$ admits a nontrivial automorphism). We see that the restrictions (projections onto $V_0^*=(V^*)^\tau\subset V^*$\kern 0.8pt) of the $\tau$-fixed roots $\alpha\in\Pi^\tau$ are longer than the restrictions of the nonfixed roots $\alpha\in \Pi\smallsetminus\Pi^\tau$.
The involutive {\em outer} automorphisms $\sigma=t\tau\in ({\rm Aut}\, G)_2$ are classified, up to conjugacy, by Kac 1-markings of ${\widetilde{D'}}$. A {\em Kac $1$-marking of} ${\widetilde{D'}}$ is a family of nonnegative integer numerical marks ${\boldsymbol{q}}=(q_{j'})_{{j'}=0,1,\dots,{n'}}\in{\mathbb{Z}}_{\ge 0}^{{n'}+1}$ at the vertices of ${\widetilde{D'}}$, satisfying \begin{equation}\label{e:1-marking}
m'_0 q_0+ m'_1 q_1+\dots +m'_{n'} q_{n'}=1.
\end{equation} A Kac 1-marking of ${\widetilde{D'}}$ determines a unique element $t\in(T^{\rm ad})({\mathbb{R}})^\tau_2$ such that \begin{equation}\label{e:aut-1-marking} \alpha'_{j'}(t)=(-1)^{\varkappa}\quad \text{with } \varkappa=q_{j'},\ {j'}=1,\dots,{n'}. \end{equation} With ${\boldsymbol{q}}$ one associates the outer involutive automorphism $\sigma=t\tau$ of $G$. Two Kac 1-markings ${\boldsymbol{p}}$ and ${\boldsymbol{q}}$ of ${\widetilde{D'}}$ give conjugate automorphisms of $G$ if and only if ${\boldsymbol{p}}$ can be obtained from ${\boldsymbol{q}}$ by an automorphism of ${\widetilde{D'}}$, see \cite[3.3.10, Theorem 3.16]{OV2}.
For a 1-marking ${\boldsymbol{q}}$ of ${\widetilde{D'}}$, it follows from \eqref{e:1-marking} that there exists a unique vertex ${i'}$ with $m'_{i'} =1$ such that $q_{i'}=1$; for all ${j'}\neq {i'}$ we have $q_{j'}=0$. We color vertex ${i'}$ of ${\widetilde{D'}}$ in black and leave all the other vertices white. We obtain a Kac diagram $({\widetilde{D'}},{\boldsymbol{q}})$ of type III. The Kac diagrams of type III up to isomorphism are tabulated in \cite[Table 7]{OV}. From now on, when we consider a Kac diagram of type III, we assume that it is from \cite[Table 7]{OV}.
Let ${i'}\in {\widetilde{D'}}$ be the black vertex. We see from \eqref{e:aut-1-marking} that \begin{equation}\label{e:Dynkin-twisted} \alpha'_{{i'}}(t)=-1,\quad \alpha'_{{j'}}(t)=1\text{ for }1\le{j'}\le{n'},\ {j'}\neq {i'}. \end{equation} Clearly, if ${i'}=0$, then $\alpha'_{{j'}}(t)=1$ for all ${j'}\neq 0$, hence $t=1$. We use the English (not Russian) version of \cite{OV}. In the English version of \cite[Table 7]{OV} the vertices of Kac diagrams are numbered, and one can easily see from the table that when ${i'}\neq 0$, the restricted root $\alpha'_{{i'}}$ is long, hence $\alpha'_{{i'}}$ is the restriction to $T^{\rm ad}_{\mathbb{C}}$ of some {\em $\tau$-fixed} root $\alpha_i\in D^\tau$. The element $t\in (T^{\rm ad})^\tau({\mathbb{R}})_2$ is determined by \eqref{e:Dynkin-twisted}. For any $\alpha_j\in \Pi\smallsetminus\Pi^\tau$, let $\alpha'_{{j'}}$ denote the restriction of $\alpha_j$ to $(T^{\rm ad})^\tau_{\mathbb{C}}$, then the restricted root $\alpha'_{j'}$ is short, hence ${j'}\neq {i'}$ and therefore, $\alpha_j(t)=\alpha'_{{j'}}(t)=1$. We see that $t\in T^{\rm ad}(D^\tau)({\mathbb{R}})_2$ and \begin{equation}\label{e:Dynkin-non-twisted} \alpha_{i}(t)=-1,\quad \alpha_{j}(t)=1\text{ for all }j\in D^\tau,\ j\neq i. \end{equation}
Thus we can compute the Galois cohomology of $_z G$ as follows. We take the Kac diagram of $_z G$ from \cite[Table 7]{OV} (the English version). We remove vertex 0 and all vertices corresponding to the short roots. What remains is a simply-laced colored Dynkin diagram $(D^\tau,{\boldsymbol{t}})$ with one black vertex or without black vertices; this is a colored Dynkin diagram for $_t G(D^\tau)$, where $_{t\tau} G\simeq\kern 0.8pt_z G$. The map ${\boldsymbol{t}}\colon D^\tau\to\{0,1\}$ is the restriction of the map ${\boldsymbol{q}}\colon {\widetilde{D'}}\to{\mathbb{Z}}_{\ge0}$ to the subset $D^\tau$ of ${\widetilde{D'}}$. If ${\boldsymbol{t}}=0$ (no black vertices), then the moves ${\mathcal{M}}_j$ of the Reeder puzzle for $(D^\tau,{\boldsymbol{t}})$ are given by formula \eqref{non-twisted-simply-laced} for $D^\tau$. If ${\boldsymbol{t}}\neq 0$, i.e, there is one black vertex $i$ in $D^\tau$, then the moves ${\mathcal{M}}_j$ for $j\in D^\tau,\ j\neq i$ are given by formula \eqref{non-twisted-simply-laced}, while the move ${\mathcal{M}}_i$ is given by formula \eqref{twisted-simply-laced-new}. In these formulas the sum is taken over the neighbors $k$ {\em lying in $D^\tau$}. By solving the Reeder puzzle for $(D^\tau,{\boldsymbol{t}})$, we compute \[{\rm Orb}(D^\tau,{\boldsymbol{t}})\cong H^1({\mathbb{R}},\kern 0.8pt_t G(D^\tau))\cong H^1({\mathbb{R}},\kern 0.8pt_{t\tau} G)\simeq H^1({\mathbb{R}},\kern 0.8pt_z G).\]
\begin{example} These are the Kac diagram for the group $_{t\tau} G=EIV$ of type ${\mathbf{E}}_6$ (see Subsection \ref{subsec:EIV} below) and the twisting diagram for $_t G(D^\tau)=G(D^\tau)$ with trivial twisting, i.e., the uncolored Dynkin diagram ${\mathbf{A}}_2$: \begin{equation*} \mxymatrix { \bcb{0} \ar@{-}[r] & \bc{1} \ar@{-}[r] & \bc{2} & \ar@{=>}[l] \bc{3} \ar@{-}[r] &\bc{4} &&&
\bc{3} \ar@{-}[r] & \bc{4} &&&
{\phantom{\hbox{AAAAA}}} } \end{equation*} \end{example}
\begin{example} These are the Kac diagram for the group $_{t\tau} G=EI$ of type ${\mathbf{E}}_6$ (see Subsection \ref{subsec:EI} below),
the twisting diagram for $_t G(D^\tau)$, and the augmented diagram for $_t G(D^\tau)$: \begin{equation*} \mxymatrix { \bc{0} \ar@{-}[r] & \bc{1} \ar@{-}[r] & \bc{2} & \ar@{=>}[l] \bc{3} \ar@{-}[r] &\bcb{4} &&&
\bc{3} \ar@{-}[r] & \bcb{4} &&& \bc{3} \ar@{-}[r] & \bc{4} \ar@{-}[r] & *+[F]{1} } \end{equation*} \end{example}
\section{Orbits: definitions and terminology} \label{sec:term}
Starting with the next section, we solve the Reeder puzzles case by case, i.e., describe the sets of equivalence classes ${\rm Cl}(D,\tau,{\boldsymbol{t}})$. Proposition \ref{c:restriction} reduces the case of an outer form of a compact group to the case of an inner form of another compact group. In the case of an inner form we determine the set ${\rm Orb}(D,{\boldsymbol{t}})$ of the orbits of the group $W$ generated by the moves ${\mathcal{M}}_i$ (i.e., reflections $r_{\alpha_i}$) acting on the set $L(\kern 0.8pt_{\boldsymbol{t}} D)$. Here $L(\kern 0.8pt_{\boldsymbol{t}} D)$ is the set of labelings ${\boldsymbol{a}}=(a_1,\dots,a_n)$ corresponding to a twisting diagram $\kern 0.8pt_{\boldsymbol{t}} D$ with vertices $i=1,\dots n$, where $a_i\in {\mathbb{Z}}/2{\mathbb{Z}}$ and each $i$ corresponds to the simple root $\alpha_i$. We number the vertices of $D$ as in Onishchik and Vinberg \cite[Table 1]{OV}.
By {\em (connected) components} of a labeling of a Dynkin diagram we mean the connected components of the graph obtained by removing the vertices with zeros and the corresponding edges. For example, the following labeling of ${\mathbf{A}}_9$ has $3$ connected components: \[ \sxymatrix{ 1 \ar@{-}[r] & 1 \ar@{-}[r] & 0 \ar@{-}[r] & 1 \ar@{-}[r] & 0 \ar@{-}[r] & 0 \ar@{-}[r] & 1 \ar@{-}[r] & 1 \ar@{-}[r] & 1} \qquad\longmapsto\qquad \sxymatrix{ 1 \ar@{-}[r] & 1 & & 1 & & 1 \ar@{-}[r] & 1 \ar@{-}[r] & 1.} \] For some diagrams $D$ the number of components of a labeling is an invariant of the action of $W$. For some others, the parity of the number of components is an invariant.
By a {\em fixed labeling} we mean a fixed point of the action of $W$, that is, a labeling
which is fixed under all moves ${\mathcal{M}}_i$\,. For example, for the action of Lemma \ref{prop:non-twisted} on ${\mathbf{A}}_5$, the labelings \[ \sxymatrix{ 0 \ar@{-}[r] & 0 \ar@{-}[r] & 0 \ar@{-}[r] & 0 \ar@{-}[r] & 0} \quad \text{and}\quad \sxymatrix{ 1 \ar@{-}[r] & 0 \ar@{-}[r] & 1 \ar@{-}[r] & 0 \ar@{-}[r] & 1} \] are fixed.
We say that two vertices $i,j$ of a Dynkin diagram $D$ are {\em neighbors} if they are connected by an edge (single or multiple). We say that $i$ is a vertex {\em of degree $d$} if it has exactly $d$ neighbors. We are especially interested in vertices of degree 3. The Dynkin diagrams ${\mathbf{D}}_n$ $(n\ge4)$, ${\mathbf{E}}_6$, ${\mathbf{E}}_7$ and ${\mathbf{E}}_8$ have vertices of degree 3. Now let $D$ be a Dynkin diagram with a vertex $i$ of degree 3, and let ${\boldsymbol{a}}$ be a labeling of $D$ that looks near $i$ like \begin{eqnarray} \sxymatrix{\dots \ar@{-}[r] &1 \ar@{-}[r] & 1 \ar@{-}[r] \ar@{-}[d] & 1 \ar@{-}[r] &\dots
\\& & 1 & } \end{eqnarray} The move ${\mathcal{M}}_i$ of Lemma \ref{prop:non-twisted} splits the component of $i$ to three components (because $D$ has no cycles): \begin{eqnarray} \sxymatrix{\dots \ar@{-}[r] &1 \ar@{-}[r] & 0 \ar@{-}[r] \ar@{-}[d] & 1 \ar@{-}[r] &\dots
\\& & 1 & } \end{eqnarray} and therefore increases the number of components by 2. We call this process {\em splitting at $i$}. The reverse process is called {\em unsplitting}.
Let $G$ be a group with twisting diagram $_{\boldsymbol{t}} D$ and the set of labelings $L(\kern 0.8pt_{\boldsymbol{t}} D)$. We denote by ${\rm Orb}(\kern 0.8pt_{\boldsymbol{t}} D)$ the set of orbits in $L(\kern 0.8pt_{\boldsymbol{t}} D)$ under the action of the Weyl group, i.e., the set of equivalence classes of labelings with respect to the moves. We denote the number of orbits by $\Orbs{\kern 0.8pt_{\boldsymbol{t}} D}$.
In Sections \ref{sec:An} -- \ref{sec:G2} below we describe the pointed sets ${\rm Cl}(D,\tau,{\boldsymbol{t}})$ for simply connected groups of types ${\mathbf{A}}_n$ -- ${\mathbf{G}}_2$. Since these sections may be regarded as parts of a table, most proofs are omitted. In Section \ref{sec:An} we introduce notation which will be used in subsequent sections.
\section{Groups of type ${\mathbf{A}}_n$} \label{sec:An}
\subsection{The compact group ${\bf SU}(n+1)$ of type ${\mathbf{A}}_n^{(0)}$} \label{sect:An}\label{subsec:An}
Here $n\ge 1$. The Dynkin diagram is \begin{equation*} \sxymatrix{ \bc{1} \ar@{-}[r] & \cdots \ar@{-}[r] & \bc{n} } \end{equation*} The Weyl group acts by the moves that are described in Lemma \ref{prop:non-twisted}. We denote the compact form of the complex group of type ${\mathbf{A}}_n$ by ${\mathbf{A}}_n^{(0)}$. The superscript 0 shows that the group is compact and the diagram is uncolored.
\begin{lemma} \label{lem:basic-An} For ${\mathbf{A}}_n^{(0)}$: \begin{enumerate} \item[(a)] The moves do not change the number of components. \item[(b)] Every component can be reduced to length 1, e.g. \[ 0 \!-\! 1 \!-\! 1 \!-\! 1 \!-\! 0\quad \mapsto\quad 0 \!-\! 0 \!-\! 1 \!-\! 0 \!-\! 0 \ . \] \item[(c)] Components may be pushed so that the space between components is of length 1, e.g. \[ 1 \!-\! 0 \!-\! 0 \!-\! 1 \!-\! 0\quad \mapsto\quad 1 \!-\! 0 \!-\! 1 \!-\! 0 \!-\! 0 \ .\] \end{enumerate} \end{lemma}
\begin{notation} \label{def:xi-form} By $\xi_r^n$ (or just $\xi_r$) we mean the labeling of ${\mathbf{A}}_n^{(0)}$ of the form \begin{equation*} \xi_r\quad = \quad \sxymatrix{ 1 \ar@{-}[r] & 0 \ar@{-}[r] & \overset{1-0}{\cdots\cdots} & \ar@{-}[l] \underset{r}{1} \ar@{-}[r] & 0 \ar@{-}[r] & \cdots } \end{equation*} which has $r$ components packed maximally to the left, namely, \[ (\xi^n_r)_i = \begin{cases} 1 & \mbox{ if } i=1,3,\dots,2r-1 \ , \\ 0 & \mbox{ otherwise}\ . \end{cases} \] By $\eta_r^n$ (or just $\eta_r$) we mean the labeling of ${\mathbf{A}}_n^{(0)}$ which has $r$ components packed maximally to the right, namely \[ (\eta^n_r)_i = \begin{cases} 1 & \mbox{ if } i=n,n-2,\dots,n-2(r-1) \ , \\ 0 & \mbox{ otherwise}\ . \end{cases} \] \end{notation}
\begin{example}\
$\xi_3^7 = \ 1 \!-\! 0 \!-\! 1 \!-\! 0 \!-\! 1 \!-\! 0 \!-\! 0 $,\quad\ $\eta_2^7= \ 0 \!-\! 0 \!-\! 0 \!-\! 0 \!-\! 1 \!-\! 0 \!-\! 1$. \end{example}
\begin{lemma} \label{cor:basic-An} Two labelings of ${\mathbf{A}}_n^{(0)}$ are equivalent if and only if they have the same number of components. In particular, any labeling of ${\mathbf{A}}_n^{(0)}$ with $r$ components is equivalent to $\xi_r^n$ and to $\eta_r^n$. \end{lemma}
Thus, in ${\mathbf{A}}_n^{(0)}$ the number of components is an invariant which fully characterizes orbits.
\begin{corollary}\label{cor:An-reps} {\bfseries The orbit of zero} in $L({\mathbf{A}}_n^{(0)})$ consists of one labeling $\xi_0$. As {\bfseries representatives of orbits} we can take $\xi_0,\xi_1,\dots,\xi_r$, where $r=\lceil n/2\rceil$. We have \begin{equation} \label{eq:An-num-of-orbits} \Orbs{{\mathbf{A}}_n^{(0)}}=r+1=\lceil n/2\rceil+1 = \begin{cases} k+1 & \mbox{ if }n=2k \ , \\ k+2 & \mbox{ if }n=2k+1 \ . \end{cases} \end{equation} \end{corollary}
\subsection{The group ${\bf SU}(m,\,n+1-m)$ \ $(\,1 \le m \le\lceil n/2\rceil\,)$ with twisting diagram ${\mathbf{A}}_n^{(m)}$} \label{sect:AnTwistm}\label{subsec:An^m}
The group $G$ is the special unitary group ${\bf SU}(m,n+1-m)$ of the diagonal Hermitian form with $m$ times $-1$ and $n+1-m$ times $+1$ on the diagonal. Our results are valid for all $1\le m\le n$, though ${\bf SU}(m,n+1-m)\simeq {\bf SU}(n+1-m,m)$ and therefore, it suffices to consider only the case $1 \le m \le\lceil n/2\rceil$.
The Kac diagram of $G$ is \begin{equation*} \sxymatrix{ & & \ar@{-}[lld] \bcb{0} \ar@{-}[rrd] & & & \\ \bc{1} \ar@{-}[r] & \cdots \ar@{-}[r] & \bcb{m} \ar@{-}[r] & \cdots \ar@{-}[r] & \bc{n} } \end{equation*}
see \cite[Table 7]{OV}. We obtain the {\em twisting diagram} ${\mathbf{A}}_n^{(m)}$ by removing vertex 0 from the Kac diagram: \begin{equation*} \sxymatrix{ \bc{1} \ar@{-}[r] & \cdots \ar@{-}[r] & \bcb{m} \ar@{-}[r] & \cdots \ar@{-}[r] & \bc{n} } \end{equation*} The superscript in ${\mathbf{A}}_n^{(m)}$ refers to the twisting (black) vertex $m$ in the twisting diagram, with respect to the numbering of Onishchik and Vinberg \cite{OV}. We construct the augmented diagram \begin{equation*} \sxymatrix{ \bc{1} \ar@{-}[r] & \cdots \ar@{-}[r] & \bc{m} \ar@{-}[r] \ar@{-}[d] & \cdots \ar@{-}[r] & \bc{n} \\
& & *+[F]{1} & & } \end{equation*} as in Construction \ref{const:augmented-diag}, i.e. by formally adding a new vertex with constant label 1, called boxed 1, connected to the twisting vertex by a simple edge. This retains the set of labelings and the set of orbits.
\begin{notation}\label{n:l,r} Let ${\boldsymbol{a}}=(a_i)\in L({\mathbf{A}}_n^{(m)})$. We have a schematic diagram: \begin{equation} \label{schematic} \sxymatrix{ \mathrm{LHS} \ar@{-}[r] &a_m \ar@{-}[r] \ar@{-}[d] & \mathrm{RHS} \\ & *+[F]{1} & } \end{equation} where LHS denotes the left-hand side and RHS denotes the right-hand side. We denote by $l({\boldsymbol{a}})$ the number of components of ${\boldsymbol{a}}$ in LHS (to the left of the twisting vertex $m$), and by $r({\boldsymbol{a}})$ the number of components of ${\boldsymbol{a}}$ in RHS (to the right of the twisting vertex $m$), in both cases not taking into account the component of the boxed 1. \end{notation}
\begin{remark} For ${\mathbf{A}}_n^{(m)}$: \begin{enumerate} \item[(i)] Any labeling ${\boldsymbol{a}}$ is equivalent to a labeling ${\boldsymbol{a}}'$ with $a'_m=0$.
\item[(ii)] For the schematic diagram \eqref{schematic}, if $l({\boldsymbol{a}})\ge 1$ and $r({\boldsymbol{a}})\ge 1$, then the rightmost component in LHS and the leftmost component in RHS
can be made to cancel each other out by unsplitting at vertex $m$ (which is a vertex of degree 3 for $1 < m < n$). In other words, if $l({\boldsymbol{a}}),r({\boldsymbol{a}}) \ge 1$, then ${\boldsymbol{a}}$ is equivalent to some labeling ${\boldsymbol{a}}'$ with $l({\boldsymbol{a}}')=l({\boldsymbol{a}})-1$ and $r({\boldsymbol{a}}')=r({\boldsymbol{a}})-1$.
\item[(iii)] A component cannot pass from one hand side to the opposite hand side.
In other words, if ${\boldsymbol{a}}\sim{\boldsymbol{a}}'$, then $r({\boldsymbol{a}})-l({\boldsymbol{a}})=r({\boldsymbol{a}}')-l({\boldsymbol{a}}')$. \end{enumerate} \end{remark}
\begin{proposition}\label{prop:An^(m)-invariant} Two labelings ${\boldsymbol{a}},{\boldsymbol{a}}'\in L({\mathbf{A}}_n^{(m)})$ are equivalent if and only if
is $r({\boldsymbol{a}})-l({\boldsymbol{a}})=r({\boldsymbol{a}}')-l({\boldsymbol{a}}')$, and this invariant $r({\boldsymbol{a}})-l({\boldsymbol{a}})$ can take values between $-\lceil ({m-1})/{2}\rceil$ and $\lceil ({n-m})/{2}\rceil$. \end{proposition}
\begin{notation}\label{not:pq} Let $p,q$ be integers, $0 \le p \le \lceil ({m-1})/{2}\rceil$ and $0 \le q \le \lceil ({n-m})/{2}\rceil$.
We write
\begin{equation}\label{eq:(p|q)}
(p|q) \ := \quad \sxymatrix{ \eta_p^{m-1} \ar@{-}[r] & 0 \ar@{-}[r] \ar@{-}[d] & \xi_q^{n-m} \\ & *+[F]{1} & } \end{equation}
We have $l(p|q)=p$ and $r(p|q)=q$. \end{notation}
\begin{corollary}\label{cor:An^(m)-reps} For ${\mathbf{A}}_n^{(m)}$: \begin{enumerate} \item[(i)] {\bfseries The orbit of zero} is the set of the labelings ${\boldsymbol{a}}$ such that $l({\boldsymbol{a}})=r({\boldsymbol{a}})$.
\item[(ii)] As {\bfseries representatives of orbits} we can take \[
(0|0)\ = \quad \sxymatrix{ \eta_0 \ar@{-}[r] & 0 \ar@{-}[r] \ar@{-}[d] & \xi_0 \\ & *+[F]{1} & } ,
\qquad (p|0)\ = \quad \sxymatrix{ {\eta_p} \ar@{-}[r] & 0 \ar@{-}[r] \ar@{-}[d] & {\xi_0} \\ & *+[F]{1} & },
\qquad (0|q)\ = \quad \sxymatrix{ {\eta_0} \ar@{-}[r] & 0 \ar@{-}[r] \ar@{-}[d] & {\xi_q} \\ & *+[F]{1} & } \] with $1 \le p \le \lceil ({m-1})/{2}\rceil$ and $1 \le q \le \lceil ({n-m})/{2}\rceil$.
\item[(iii)] The number of orbits is \begin{eqnarray} \label{eq:An^m-num-of-orbits}
\Orbs{{\mathbf{A}}_n^{(m)}} & = & \left\lceil ({m-1})/{2}\right\rceil + 1 + \left\lceil ({n-m})/{2}\right\rceil \\ & = & \begin{cases} k+1 & \mbox{ if }n=2k \ , \\ k+1 & \mbox{ if }n=2k+1 \mbox{ and } m \mbox{ is odd}, \\ k+2 & \mbox{ if }n=2k+1 \mbox{ and } m \mbox{ is even}. \end{cases} \nonumber \end{eqnarray} \end{enumerate} \end{corollary}
\subsection{Outer forms of ${\bf SU}(n+1)$} \label{ssec:An-outer} Here $\tau$ is the nontrivial involutive automorphism of the Dynkin diagram $D={\mathbf{A}}_n$, where $n\ge 2$.
Case $G={\bf{SL}}(n+1)$, $n=2k$. Then $D^\tau=\emptyset$, and it is well known that $H^1({\mathbb{R}},{\bf{SL}}(n+1))=1$ (this follows, for example, from \cite[III.1.1, Proposition 1]{Serre}).
Case $G={\bf{SL}}(n+1)$, $n=2k+1$. Then $\#D^\tau=1$, $D^\tau=\bcb{}$\,, and again it is well known that $H^1({\mathbb{R}} ,{\bf{SL}}(n+1))=1$.
Case $G={\bf{SL}}(k+1,{\mathbb{H}})$, where ${\mathbb{H}}$ denotes the Hamilton quaternions. Then $n=2k+1$, $\#D^\tau=1$, $D^\tau=\bc{}$, $\Orbs{{\mathbf{A}}_1}=2$. The orbit of zero in $L({\mathbf{A}}_1)$ consists of 0. {\bfseries The class of zero} in $L(D)^\tau$ consists of the labelings whose restriction to $D^\tau$ is 0, namely with $a_{k+1}=0$. The other class consists of the labelings with $a_{k+1}=1$.
\section{Groups of type ${\mathbf{B}}_n$} \label{sec:Bn}
\subsection{The compact group ${{\bf Spin}}(2n+1)$ of type ${\mathbf{B}}_n^{(0)}$} \label{subsec:Bn}
The Dynkin diagram is \begin{equation*} \sxymatrix{ \bc{1} \ar@{-}[r] & \cdots & \ar@{-}[l] \bc{n-1} \ar@{=>}[r] & \bc{n} }\, , \end{equation*} where $n\ge 2$.
We write a labeling ${\boldsymbol{b}}\in L({\mathbf{B}}_n^{(0)})$ as ${\boldsymbol{b}} = ({\boldsymbol{a}} {\Rightarrow} {\varkappa})$, where ${\boldsymbol{a}}\in L({\mathbf{A}}_{n-1}^{(0)})$ and ${\varkappa} \in \{0,1\}$.
Note that the labeling \begin{equation*} \ell_1^{(0)} = \ (\xi_0 {\Rightarrow} 1) \ = \quad (0 \!-\! ... \!-\! 0 {\Rightarrow} 1) \end{equation*} is a fixed labeling. We denote by $[\ell_1^{(0)}] \in {\rm Orb}({\mathbf{B}}_n^{(0)})$ the orbit of $\ell_1^{(0)}$ (consisting of one labeling), and also, by slight abuse of notation, the subset $\{\,[\ell_1^{(0)}]\,\}\subset {\rm Orb}({\mathbf{B}}_n^{(0)})$ consisting of this orbit.
We also note that if ${\boldsymbol{a}}\in L({\mathbf{A}}_{n-1}^{(0)})$, ${\boldsymbol{a}}\neq 0$, then $({\boldsymbol{a}}{\Rightarrow} 1)$ is equivalent to $({\boldsymbol{a}}{\Rightarrow} 0)$ in $L({\mathbf{B}}_n^{(0)})$.
\begin{proposition} The map $\varphi \colon L({\mathbf{A}}_{n-1}^{(0)}) \to L({\mathbf{B}}_n^{(0)}) $ defined by \[
{\boldsymbol{a}} \,{\longmapsto}\, ({\boldsymbol{a}} {\Rightarrow} 0) \]
induces a bijection $\varphi_* \colon {\rm Orb}({\mathbf{A}}_{n-1}^{(0)}) \isoto {\rm Orb}({\mathbf{B}}_n^{(0)}) \smallsetminus [\ell_1^{(0)}]$ on the orbits. \end{proposition}
\begin{corollary} \label{cor:Bn-reps} For ${\mathbf{B}}_n^{(0)}$: \begin{enumerate} \item[(i)] {\bfseries The orbit of zero} consists of the fixed labeling $\xi_0{\Rightarrow} 0$. \item[(ii)] As {\bfseries representatives of orbits} we can take $$\xi_0{\Rightarrow} 1, \quad \xi_0 {\Rightarrow} 0, \quad \xi_1 {\Rightarrow} 0, \quad \xi_2 {\Rightarrow} 0 \ , \quad ... \ ,\quad \xi_r {\Rightarrow} 0 $$ with $r = \lceil ({n-1})/{2} \rceil$. \item[(iii)] \begin{equation*} \Orbs{{\mathbf{B}}_n^{(0)}} = \Orbs{ {\mathbf{A}}_{n-1}^{(0)} } + 1 = \begin{cases} k+2 & \mbox{ if } n=2k \ , \\ k+2 & \mbox{ if } n=2k+1 \ . \end{cases} \end{equation*} \end{enumerate} \end{corollary}
\subsection{The group ${{\bf Spin}}(2m,\,2n+1-2m)$ \ $(\,1 \le m < n\,)$ with twisting diagram ${\mathbf{B}}_n^{(m)}$} \label{subsec:Bn^m} The group $G$ is the universal covering ${{\bf Spin}}(2m,\,2n+1-2m)$ of the special orthogonal group ${{\bf SO}}(2m,\,2n+1-2m)$ of the diagonal quadratic form with $2m$ times $-1$ and $2n+1-2m$ times $+1$ on the diagonal. The twisting diagram and the augmented diagram are: \begin{equation*} \sxymatrix{
\bc{1} \ar@{-}[r] & \cdots & \ar@{-}[l] \bcb{m} \ar@{-}[r] & \cdots & \ar@{-}[l] \bc{n-1} \ar@{=>}[r] & \bc{n} & \qquad & \bc{1} \ar@{-}[r] & \bc{2} \ar@{-}[r] & \cdots & \ar@{-}[l] \bc{m} \ar@{-}[d] \ar@{-}[r] & \cdots & \ar@{-}[l] \bc{n-1} \ar@{=>}[r] & \bc{n} \\
& & & & \qquad & & & & & & *+[F]{1} & & & } \end{equation*} (see \cite[Table 7]{OV} and Construction \ref{const:augmented-diag}).
Note that if $m$ is even, the labeling \[ \ell_1^{(m)} \ = \quad \sxymatrix{ 1 \ar@{-}[r] & 0 \ar@{-}[r] & \cdots & \ar@{-}[l] 1 \ar@{-}[r] & 0 \ar@{-}[d] \ar@{-}[r] & \cdots & \ar@{-}[l] 0 \ar@{=>}[r] & 1 \\ & & & & *+[F]{1} & & } \] is a fixed labeling. Note also that if ${\boldsymbol{b}}=({\boldsymbol{a}}{\Rightarrow} 1)\in L({\mathbf{B}}_n^{(m)})$ and ${\boldsymbol{b}}\neq\ell_1^{(m)}$, then ${\boldsymbol{b}}\sim ({\boldsymbol{a}}{\Rightarrow} 0)$.
\begin{proposition} \label{lem:Bn^m-bijection} The map $\varphi\colon L({\mathbf{A}}_{n-1}^{(m)})\to L({\mathbf{B}}_n^{(m)})$ defined by $$ {\boldsymbol{a}}\longmapsto (\sxymatrix{{\boldsymbol{a}} \ar@{=>}[r] &0}) $$ induces an injection $$ {\rm Orb}({\mathbf{A}}_{n-1}^{(m)})\to {\rm Orb}({\mathbf{B}}_n^{(m)}) $$ which is bijective when $m$ is odd, and whose image is ${\rm Orb}({\mathbf{B}}_n^{(m)})\smallsetminus[\ell_1^{(m)}]$ when $m$ is even. \end{proposition}
We write \begin{equation}\label{eq:(>)}
(p|q\!\! > \!\!{\varkappa}):=\,{\boldsymbol{a}}{\Rightarrow}{\varkappa},\quad\text{where}\quad{\boldsymbol{a}}=(p|q)\in L({\mathbf{A}}_{n-1}^{(m)}),\ {\varkappa} \in \{0,1\}. \end{equation} It follows from Proposition \ref{lem:Bn^m-bijection} that as a set of representatives for the orbits in $L({\mathbf{B}}_n^{(m)})$ we can take the labelings of the form $\sxymatrix{{\boldsymbol{a}} \ar@{=>}[r] &0}$, where ${\boldsymbol{a}}$ runs over the set of representatives of orbits in $L({\mathbf{A}}_{n-1}^{(m)})$ from Corollary \ref{cor:An^(m)-reps}, and when $m$ is even we should add the fixed labeling $\ell_1^{(m)}\in L({\mathbf{B}}_n^{(m)})$. Explicitly, we obtain:
\begin{corollary} \label{prop:Bn^(m)-reps} {\bfseries The orbit of zero} in $L({\mathbf{B}}_n^{(m)})$ is the set of the labelings ${\boldsymbol{b}}=(\sxymatrix{{\boldsymbol{a}} \ar@{=>}[r] &{\varkappa}})$ with $r({\boldsymbol{a}})=l({\boldsymbol{a}})$. As {\bfseries representatives of orbits} in $L({\mathbf{B}}_n^{(m)})$ we can take
$(p|0\! > \! 0)$ for $0\le p\le \lceil({m-1})/{2}\rceil$,\ \ $(0|q\! > \! 0)$ for $0<q\le \lceil({n-1-m})/{2}\rceil$, and $\ell_1^{(m)}$ when $m$ is even. \end{corollary}
\begin{corollary} \label{cor:Bn^(m)-number} We have: \[ \Orbs{{\mathbf{B}}_n^{(m)}} = \begin{cases} \Orbs{{\mathbf{A}}_{n-1}^{(m)}} & \mbox{ if }m\mbox{ is odd,} \\ \Orbs{{\mathbf{A}}_{n-1}^{(m)}} + 1 & \mbox{ if }m\mbox{ is even.} \end{cases} \] Using Corollary \ref{cor:An^(m)-reps}(iii), we obtain \begin{equation*} \label{eq:Bn^m-number-of-orbits} \Orbs{ {\mathbf{B}}_n^{(m)} } = \begin{cases} k & \mbox{ if } n=2k \mbox{ and } m \mbox{ is odd,} \\
k+2 & \mbox{ if } n=2k \mbox{ and } m \mbox{ is even,} \\
k+1 & \mbox{ if } n=2k+1 \mbox{ and } m \mbox{ is odd,} \\
k+2 & \mbox{ if } n=2k+1 \mbox{ and } m \mbox{ is even.}
\end{cases} \end{equation*} \end{corollary}
\subsection{The group ${{\bf Spin}}(2n,1)$ with twisting diagram ${\mathbf{B}}_n^{(n)}$} \label{subsec:Bn^n} The twisting diagram and the augmented diagram are: \begin{equation*} \sxymatrix{ \bc{1} \ar@{-}[r] & \cdots & \ar@{-}[l] \bc{n-1} \ar@{=>}[r] & \bcb{n} } \qquad\qquad \sxymatrix{ \bc{1} \ar@{-}[r] & \cdots & \ar@{-}[l] \bc{n-1} \ar@{=>}[r] & \bc{n} \ar@{-}[r] & *+[F]{1} } \end{equation*} (see \cite[Table 7]{OV} and Construction \ref{const:augmented-diag}).
If $n=2k$, we have a fixed labeling in $L({\mathbf{B}}_n^{(n)})$ \begin{equation*} \ell_1^{(n)} \ =\quad \sxymatrix{ 1 \ar@{-}[r] & 0 \ar@{-}[r] & 1 \ar@{-}[r] & \overset{0-1}{\cdots} & \ar@{-}[l] 0 \ar@{-}[r] & 1 \ar@{=>}[r] & 1 \ar@{-}[r] & *+[F]{1} } \quad = \quad \xi_k {\Rightarrow} 1\!-\!{\sxymatrix{\boxone}}\quad \in L({\mathbf{B}}_n^{(n)}) \ . \end{equation*}
\begin{proposition} Define a map $\varphi\colon L({\mathbf{A}}_{n-1}^{(0)}) \to L({\mathbf{B}}_n^{(n)})$ by \[ \varphi({\boldsymbol{a}}) = (\, {\boldsymbol{a}} {\Rightarrow} 0 \!-\! {\sxymatrix{\boxone}} \, ) \, , \] then the induced map $$ \varphi_*\colon {\rm Orb}({\mathbf{A}}_{n-1}^{(0)})\to {\rm Orb}({\mathbf{B}}_n^{(n)}) $$ is injective. If $n$ is odd, then $\varphi_*$ is bijective;
if $n$ is even, the image of $\varphi_*$ is ${\rm Orb}({\mathbf{B}}_n^{(n)})\smallsetminus[\ell_1^{(n)}]$. \end{proposition}
\begin{proposition} {\bfseries The orbit of zero} in $L({\mathbf{B}}_n^{(n)})$ consists of two labelings: \[ \sxymatrix{ 0 \ar@{-}[r] & \cdots & \ar@{-}[l] 0 \ar@{=>}[r] & 0 \ar@{-}[r] & *+[F]{1} } \text{ \quad and \quad } \sxymatrix{ 0 \ar@{-}[r] & \cdots & \ar@{-}[l] 0 \ar@{=>}[r] & 1 \ar@{-}[r] & *+[F]{1} } \ . \] \end{proposition}
\begin{corollary}\label{cor:Bnn-reps} As {\bfseries representatives of orbits} in $ L({\mathbf{B}}_n^{(n)})$ we can take \[\sxymatrix{ \xi_0 \ar@{=>}[r] & 0 \ar@{-}[r] & *+[F]{1} } \ ,\qquad \sxymatrix{ \xi_1 \ar@{=>}[r] & 0 \ar@{-}[r] & *+[F]{1} } \ ,\qquad \dots\ ,\qquad \sxymatrix{ \xi_r \ar@{=>}[r] & 0 \ar@{-}[r] & *+[F]{1} } \] where $r = \lceil ({n-1})/{2} \rceil$, together with $\ell_1^{(n)}$ when $n$ is even. \end{corollary}
\begin{corollary}\label{Bnn-orbits} \begin{equation*} \Orbs{ {\mathbf{B}}_n^{(n)} } = \begin{cases} \Orbs{ {\mathbf{A}}_{n-1}^{(0)} } +1=k+2 & \mbox{ if }\ n=2k \ , \\ \Orbs{ {\mathbf{A}}_{n-1}^{(0)} }=k+1 & \mbox{ if }\ n=2k+1 \ . \end{cases} \end{equation*} \end{corollary}
\section{Groups of type ${\mathbf{C}}_n$} \label{sec:Cn}
\subsection{The compact group ${\bf{Sp}}(n)$ with diagram ${\mathbf{C}}_n^{(0)}$} \label{subsec:Cn}
The group $G$ is the compact ``quaternionic'' group ${\bf{Sp}}(n)$ of type ${\mathbf{C}}_n$ ($n\ge 3$) with Dynkin diagram \begin{equation*} \sxymatrix{ \bc{1} \ar@{-}[r] & \cdots & \ar@{-}[l] \bc{n-1} & \ar@{=>}[l] \bc{n} }. \end{equation*}
\begin{construction}\label{con:Cn} Let $L({\mathbf{A}}_{n-1}^{(0)}) \sqcup L({\mathbf{A}}_{n-1}^{(n-1)})$ denote the disjoint union of the sets of labelings $L({\mathbf{A}}_{n-1}^{(0)})$ and $L({\mathbf{A}}_{n-1}^{(n-1)})$. We define a map $$ \varphi\colon L({\mathbf{A}}_{n-1}^{(0)}) \sqcup L({\mathbf{A}}_{n-1}^{(n-1)})\to L({\mathbf{C}}_n^{(0)}) $$ sending ${\boldsymbol{a}}\in L({\mathbf{A}}_{n-1}^{(0)})$ to ${\boldsymbol{a}}\!\Leftarrow\! 0$ and sending ${\boldsymbol{a}}' \! \!-\! {\sxymatrix{\boxone}} \ \in L({\mathbf{A}}_{n-1}^{(n-1)})$ to ${\boldsymbol{a}}'\!\Leftarrow\! 1$. Clearly $\varphi$ is a bijection. \end{construction}
Note that for any ${\boldsymbol{a}}\in L({\mathbf{A}}_{n-1}^{(0)})$ and ${\boldsymbol{a}}'\! \!-\! {\sxymatrix{\boxone}}\ \in L({\mathbf{A}}_{n-1}^{(n-1)})$, the labelings ${\boldsymbol{a}}\!\Leftarrow\! 0$ and ${\boldsymbol{a}}'\!\Leftarrow\! 1$ are not equivalent in $L({\mathbf{C}}_n^{(0)})$.
\begin{proposition} The bijection $\varphi$ of Construction \ref{con:Cn} induces a bijection on orbits $$ \varphi_*\colon {\rm Orb}({\mathbf{A}}_{n-1}^{(0)}) \sqcup {\rm Orb}({\mathbf{A}}_{n-1}^{(n-1)})\isoto {\rm Orb}({\mathbf{C}}_n^{(0)}). $$ \end{proposition}
\begin{corollary} \label{cor:Cn} For ${\mathbf{C}}_n^{(0)}$: \begin{itemize}
\item[(i)] {\bfseries The orbit of zero} is just 0.
\item[(ii)] As {\bfseries representatives for orbits} we can take \[ \xi_0 \!\Leftarrow\! 0 , \quad \xi_1 \!\Leftarrow\! 0 , \quad \cdots , \quad \xi_r \!\Leftarrow\! 0, \] where $r = \lceil ({n-1})/{2} \rceil$, and \[ \xi_0 \!\Leftarrow\! 1 , \quad \xi_1 \!\Leftarrow\! 1 , \quad \cdots , \quad \xi_s \!\Leftarrow\! 1, \] where $s = \lceil {n}/{2} \rceil - 1$.
\item[(iii)] $\Orbs{ {\mathbf{C}}_n^{(0)} } = \Orbs{ {\mathbf{A}}_{n-1}^{(0)} } + \Orbs{ {\mathbf{A}}_{n-1}^{(n-1)} }= n+1.$ \end{itemize} \end{corollary}
(Of course, it is well known that $\# H^1({\mathbb{R}},G)=n+1$ in this case, this follows, for example, from \cite[III.1.1, Proposition 1]{Serre}.)
\subsection{The diagram ${\mathbf{A}}_n^{(m,n)}$}
(We shall need this diagram in Subsection \ref{subsec:Cm,n-m}.) Denote by ${\mathbf{A}}_n^{(m,n)}$ the Dynkin diagram ${\mathbf{A}}_n$ with {\em two} black vertices $m$ and $n$, where $1\le m<n$: $$ \sxymatrix{ \bc{1} \ar@{-}[r] & \cdots \ar@{-}[r] & \bc{m-1} \ar@{-}[r] & \bcb{m} \ar@{-}[r] & \bc{m+1} \ar@{-}[r] & \cdots \ar@{-}[r] & \bc{n-1} \ar@{-}[r] & \bcb{n} \,. } $$ We denote by $L({\mathbf{A}}_n^{(m,n)})$ the set of labelings ${\boldsymbol{a}}=(a_i)$ of ${\mathbf{A}}_n^{(m,n)}$. We consider the moves ${\mathcal{M}}_i$ given by formula \eqref{non-twisted-simply-laced} for white vertices and by formula \eqref{twisted-simply-laced-new} for black vertices. We construct the augmented diagram \begin{equation*} \sxymatrix{ \bc{1} \ar@{-}[r] & \cdots \ar@{-}[r] & \bc{m} \ar@{-}[r] \ar@{-}[d] & \cdots \ar@{-}[r] & \bc{n} \ar@{-}[r] & *+[F]{1} \\
& & *+[F]{1} & & }\ , \end{equation*} by adding $\sxymatrix{ *+[F]{1} }$ two times, and now the moves ${\mathcal{M}}_i$ are given by formula \eqref{non-twisted-simply-laced} for all $i=1,\dots,n$.
We consider the orbits (equivalence classes) in $L({\mathbf{A}}_n^{(m,n)})$. Note that when $m$ is odd, the labeling \begin{equation*} \ell_1^{(m,n)}\ =\quad \sxymatrix{ 1 \ar@{-}[r] & 0 \ar@{-}[r] & \stackrel{1-0}{\cdots} & \ar@{-}[l] 1 \ar@{-}[r] \ar@{-}[d] & 1 \ar@{-}[r] & \stackrel{1}{\cdots} & \ar@{-}[l] 1 \ar@{-}[r] & *+[F]{1} \\ & & & *+[F]{1} & & & & } \end{equation*} is a fixed labeling.
\begin{lemma} For ${\mathbf{A}}_n^{(m,n)}$, we can take the following labelings as representatives of orbits:
$(0|0)$, $(p|0)$ for $p=1,...,\lceil (m-1)/2 \rceil$ and $(0|q)$ for $q=1,...,\lceil (n-1-m)/2 \rceil$, and when $m$ is odd, also the fixed labeling $\ell_1$. \end{lemma}
\subsection{The group ${\bf{Sp}}(m,\,n-m)$ \ $(\, 1\le m\le \lfloor n/2\rfloor\,)$ with twisting diagram ${\mathbf{C}}_n^{(m)}$}\label{subsec:Cm,n-m} The group $G$ is the ``quaternionic'' group ${\bf{Sp}}(m,\,n-m)$, the unitary group of the diagonal quaternionic Hermitian form with $m$ times $-1$ and $n-m$ times $+1$ on the diagonal.
The twisting diagram and the augmented diagram are: \begin{equation*} \sxymatrix{ \bc{1} \ar@{-}[r] & \cdots & \ar@{-}[l] \bcb{m} \ar@{-}[r] & \cdots & \ar@{-}[l] \bc{n-1} & \ar@{=>}[l] \bc{n} } \qquad\qquad \sxymatrix{ \bc{1} \ar@{-}[r] & \cdots & \ar@{-}[l] \bc{m} \ar@{-}[d] \ar@{-}[r] & \cdots & \ar@{-}[l] \bc{n-1} & \ar@{=>}[l] \bc{n} \\
& & *+[F]{1} & & & } \end{equation*} (see \cite[Table 7]{OV} and Construction \ref{const:augmented-diag}).
\begin{proposition}\label{prop0:Cn^(m)} The bijection $$ \varphi\colon L({\mathbf{A}}_{n-1}^{(m)}) \sqcup L({\mathbf{A}}_{n-1}^{(m,n-1)})\to L({\mathbf{C}}_n^{(m)}) $$ sending ${\boldsymbol{a}}\in L({\mathbf{A}}_{n-1}^{(m)})$ to ${\boldsymbol{a}}\!\Leftarrow\! 0$ and sending ${\boldsymbol{a}}'\! \!-\!{\sxymatrix{\boxone}} \in L({\mathbf{A}}_{n-1}^{(m,n-1)})$ to ${\boldsymbol{a}}'\!\Leftarrow\! 1$, induces a bijection $$ \varphi_*\colon {\rm Orb}({\mathbf{A}}_{n-1}^{(m)}) \sqcup {\rm Orb}({\mathbf{A}}_{n-1}^{(m,n-1)})\isoto {\rm Orb}({\mathbf{C}}_n^{(m)}). $$ \end{proposition}
Denote $(p|q \!\! < \!\! {\varkappa})\,=\,{\boldsymbol{a}}\!\!\Leftarrow\!\! {\varkappa}$, where ${\boldsymbol{a}}=(p|q)\in L({\mathbf{A}}_{n-1}^{(m)})$ and ${\varkappa} \in \{0,1\}$. For example, for ${\mathbf{C}}_5^{(3)}$ we have \[
(1|0 \!\! < \!\! 1)\ =\quad \sxymatrix{ 0 \ar@{-}[r] & 1 \ar@{-}[r] & 0 \ar@{-}[d] \ar@{-}[r] & 0 & \ar@{=>}[l] 1 \\ & & *+[F]{1} & & } \ . \]
\begin{corollary} \label{prop:Cn^(m)} For ${\mathbf{C}}_n^{(m)}$: \begin{enumerate} \item[(i)] {\bfseries The orbit of zero} is $$
\{\,({\boldsymbol{a}}\!\Leftarrow\! 0)\ |\ {\boldsymbol{a}}\in L({\mathbf{A}}_{n-1}^{(m)}),\, l({\boldsymbol{a}})=r({\boldsymbol{a}})\,\}. $$
\item[(ii)] As {\bfseries representatives of orbits} we can take $(p|0 \!\! < \!\! 0)$ with $p=0,...,\lceil ({m-1})/{2}
\rceil$, $(0|q \!\! < \!\! 0)$ with $q=1,...,\lceil ({n-1-m})/{2} \rceil$,
$(p|0 \!\! < \!\! 1)$ with $p = 0,...,\lceil ({m-1})/{2} \rceil$,
$(0|q \!\! < \!\! 1)$ with $q = 1,..., \lfloor ({n-1-m})/{2} \rfloor = \lceil ({n-2-m})/{2} \rceil$, and when $m$ is odd, the fixed labeling \[
\ell_1^{(m,n)}\ =\quad \sxymatrix{ 1 \ar@{-}[r] & 0 \ar@{-}[r] & \overset{1-0}{\cdots} & \ar@{-}[l] 1 \ar@{-}[d] \ar@{-}[r] & \overset{1}{\cdots} & \ar@{-}[l] 1 & \ar@{=>}[l] 1 \\ & & & *+[F]{1} & & }\ . \] \item[(iii)] $\Orbs{ {\mathbf{C}}_n^{(m)} } =\Orbs{{\mathbf{A}}_{n-1}^{(m)}} + \Orbs{{\mathbf{A}}_{n-1}^{(m,n-1)}}= n+1$. \end{enumerate} \end{corollary}
(Of course, it is well known that $\# H^1({\mathbb{R}},G) = n+1$ in this case, this follows, for example, from \cite[III.1.1, Proposition 1]{Serre}.)
\subsection{The split group ${\bf{Sp}}(2n,{\mathbb{R}})$ with twisting diagram ${\mathbf{C}}_n^{(n)}$}
The twisting diagram and the augmented diagram are \begin{equation*} \sxymatrix{
\bc{1} \ar@{-}[r] & \cdots & \ar@{-}[l] \bc{n-1} & \ar@{=>}[l] \bcb{n} & \qquad\qquad & \bc{1} \ar@{-}[r] & \cdots & \ar@{-}[l] \bc{n-1} & \ar@{=>}[l] \bc{n} \ar@{-}[r] & *+[F]{1}
} \end{equation*} In this case there is only one orbit, $\Orbs{ {\mathbf{C}}_n^{(n)} } = 1$ (it is well known that $H^1({\mathbb{R}},G)=1$ in this case, see for example \cite[III.1.2, Proposition 3]{Serre}).
\section{Groups of type ${\mathbf{D}}_n$} \label{sec:Dn}
\subsection{The compact group ${{\bf Spin}}(2n)$ of type ${\mathbf{D}}_n^{(0)}$} \label{subsec:Dn}
The group $G$ is the spin group ${{\bf Spin}}(2n)$, the universal covering of the special orthogonal group ${{\bf SO}}(2n)$, where $n\ge 4$. The Dynkin diagram of $G$ is \begin{equation*} \mxymatrix{ \bc{1} \ar@{-}[r] & \cdots \ar@{-}[r] & \bc{n-3} \ar@{-}[r] & \bc{n-2} \ar@{-}[d] \ar@{-}[r] & \bc{n-1} \\ & & & \bcu{n} & } \end{equation*} This diagram has a vertex of degree 3, the vertex $n-2$. For brevity we introduce the following notation: if ${\boldsymbol{a}}\in L({\mathbf{A}}_{n-2}^{(0)})$, ${\boldsymbol{a}}=(a_i)_{i=1}^{n-2}$, ${\varkappa},\lambda\in\{0,1\}$, we write \begin{equation}\label{eq:frac-Dn} {\boldsymbol{a}} \frac{{\varkappa}}{\lambda} \quad:= \quad \xymatrix@1@R=15pt@C=9pt {a_1 \ar@{-}[r] &\cdots &\ a_{n-2} \ar@{-}[l] \ar@{-}[d] \ar@{-}[r] &\ {\varkappa}\ \\ & &\overset{\ }{\lambda} } \ . \end{equation}
Note that for ${\mathbf{D}}_n^{(0)}$ the labelings $$\ell_2^{(0)}=0=\xi_0^{n-2} \dfrac{0}{0}=0...0\frac{0}{0} \quad \text{and}\quad \ell_4^{(0)}= \xi_0^{n-2} \dfrac{1}{1}= 0...0\frac{1}{1}$$
are fixed labelings. If $n$ is even, $n=2k$, then the labelings $$ \ell_1^{(0)}=\xi_{k-1}^{n-2}\dfrac{1}{0}= 10..10\frac{1}{0}\quad \text{and}\quad \ell_3^{(0)}=\xi_{k-1}^{n-2}\dfrac{0}{1}= 10..10\frac{0}{1} $$ are fixed labelings.
\begin{proposition} Define a map \[ \varphi\colon L({\mathbf{A}}_{n-2}^{(0)}) \longrightarrow L({\mathbf{D}}_n^{(0)}), \quad {\boldsymbol{a}} \longmapsto {\boldsymbol{a}} \frac{0}{0}\, . \] Then the induced map $\varphi_*\colon \operatorname{Orb}({\mathbf{A}}_{n-2}^{(0)}) \to \operatorname{Orb}({\mathbf{D}}_n^{(0)})$ is injective. If $n$ is even, $n=2k$, then the image of $\varphi_*$ is $$\operatorname{Orb}({\mathbf{D}}_n^{(0)}) \smallsetminus \left\{ [\ell_4^{(0)}] , \ [\ell_1^{(0)}], \ [\ell_3^{(0)}] \right\}.$$
If $n$ is odd, $n=2k+1$, then the image of $\varphi_*$ is $$\operatorname{Orb}({\mathbf{D}}_n^{(0)}) \smallsetminus [\ell_4^{(0)}].$$ \end{proposition}
\begin{corollary} \label{cor:Dn-reps} For ${\mathbf{D}}_n^{(0)}$: \begin{itemize}
\item[(i)] {\bfseries The orbit of zero} is just the labeling $\ell_2^{(0)}=0$. \item[(ii)] {\bfseries Representatives of orbits} are: \begin{itemize} \item For $n=2k+1$ we can take the following representatives coming from $L({\mathbf{A}}_{n-2}^{(0)})$: \[ \xi_0^{n-2} \frac{0}{0} = 0...0\frac{0}{0} \ , \quad \xi_1^{n-2} \frac{0}{0} = 10...0\frac{0}{0} \ , \quad ... , \quad \xi_{k}^{n-2}\frac{0}{0} = 101..01\frac{0}{0} \] and the fixed labeling $\ell_4^{(0)}$. \item For $n=2k$ we can take the following representatives coming from $L({\mathbf{A}}_{n-2}^{(0)})$: \[ \xi_0^{n-2} \frac{0}{0} = 0...0\frac{0}{0} \ , \quad \xi_1^{n-2} \frac{0}{0} = 10..0\frac{0}{0} \ , \quad ... , \quad \xi_{k-1}^{n-2} \frac{0}{0} = 10..10\frac{0}{0} \ , \] and the fixed labelings $\ell_4^{(0)}$, $\ell_1^{(0)}$, and $\ell_3^{(0)}$. \end{itemize} \item[(iii)] We have \begin{equation*} \Orbs{{\mathbf{D}}_n^{(0)}} = \begin{cases} \Orbs{{\mathbf{A}}_{n-2}^{(0)}} + 3=k+3 & \mbox{ if } \ n = 2k \ , \\ \Orbs{{\mathbf{A}}_{n-2}^{(0)}} + 1=k+2 & \mbox{ if } \ n = 2k+1 \ . \end{cases} \end{equation*} \end{itemize} \end{corollary}
\begin{example} For ${\mathbf{D}}_5^{(0)}$ we have representatives of orbits \[ 000\frac{0}{0} \ , \quad 100\frac{0}{0} \ , \quad 101\frac{0}{0} , \quad 000\frac{1}{1} \ . \] For ${\mathbf{D}}_6^{(0)}$ we have representatives of orbits \[ 0000\frac{0}{0} \ , \quad 1000\frac{0}{0} \ , \quad 1010\frac{0}{0} \ , \quad 1010\frac{1}{0} \ , \quad 1010\frac{0}{1} \ , \quad 0000\frac{1}{1} \ . \] \end{example}
\subsection{The group ${{\bf Spin}}(2m,\,2n-2m)$ \ $(\,1 \le m \le \lfloor n/2 \rfloor\,)$ with twisting diagram ${\mathbf{D}}_n^{(m)}$} \label{subsec:Dn^(m)} The group $G$ is ${{\bf Spin}}(2m,\,2n-2m)$, the universal covering of the special orthogonal group ${{\bf SO}}(2n,\,2n-2m)$ of the diagonal quadratic form with $2m$ times $-1$ and $2n-2m$ times $+1$ on the diagonal. The twisting diagram and the augmented diagram are: \begin{equation*} \mxymatrix{ \bc{1} \ar@{-}[r] & \cdots & \ar@{-}[l] \bcb{m} \ar@{-}[r] & \cdots & \ar@{-}[l] \bc{n-2} \ar@{-}[d] \ar@{-}[r] & \bc{n-1} &\qquad\qquad& \bc{1} \ar@{-}[r] & \cdots & \ar@{-}[l] \bc{m} \ar@{-}[d] \ar@{-}[r] & \cdots & \ar@{-}[l] \bc{n-2} \ar@{-}[d] \ar@{-}[r] & \bc{n-1} \\
& & & & \bcu{n} & &\qquad\qquad& & & *+[F]{1} & & \bcu{n} & } \end{equation*} (see \cite[Table 7]{OV} and Construction \ref{const:augmented-diag}).
\begin{remark}\label{rem:when-they-occur} For ${\mathbf{D}}_n^{(m)}$: \begin{enumerate} \item[(a)] When $m$ is even, we have fixed labelings \[ \ \ell_2^{(m)}\ =\quad \sxymatrix{ 1 \ar@{-}[r] & 0 \ar@{-}[r] & \overset{1-0}{\cdots} & \ar@{-}[l] 1 \ar@{-}[r] & 0 \ar@{-}[d] \ar@{-}[r] & 0 \ar@{-}[r] & \cdots & \ar@{-}[l] 0 \ar@{-}[d] \ar@{-}[r] & 0 \\ & & & & *+[F]{1} & & & 0 & } \] and \[ \ \ell_4^{(m)}\ =\quad \sxymatrix{ 1 \ar@{-}[r] & 0 \ar@{-}[r] & \overset{1-0}{\cdots} & \ar@{-}[l] 1 \ar@{-}[r] & 0 \ar@{-}[d] \ar@{-}[r] & 0 \ar@{-}[r] & \cdots & \ar@{-}[l] 0 \ar@{-}[d] \ar@{-}[r] & 1 \\ & & & & *+[F]{1} & & & 1 & } \ . \]
\item[(b)] When $n-m$ is even, we have fixed labelings \[ \ell_1^{(m)}\ = \quad \sxymatrix{ \xi_0 \ar@{-}[r] & 0 \ar@{-}[d] \ar@{-}[r] & 1 \ar@{-}[r] & 0 \ar@{-}[r] & \overset{1-0}{\cdots} & \ar@{-}[l] 1 \ar@{-}[r] & 0 \ar@{-}[d] \ar@{-}[r] & 1 \\ & *+[F]{1} & & & & & 0 & } \] and \[ \ell_3^{(m)}\ =\quad \sxymatrix{ \xi_0 \ar@{-}[r] & 0 \ar@{-}[d] \ar@{-}[r] & 1 \ar@{-}[r] & 0 \ar@{-}[r] & \overset{1-0}{\cdots} & \ar@{-}[l] 1 \ar@{-}[r] & 0 \ar@{-}[d] \ar@{-}[r] & 0 \\ & *+[F]{1} & & & & & 1 & } \ . \] \end{enumerate} (Cases (a) and (b) can occur together.) \end{remark}
Note that $[\ell_2^{(m)}]$ is in the image of the map $\varphi_*$ of Theorem \ref{lem:Dn(m)} below, while $[\ell_1^{(m)}], [\ell_3^{(m)}]$ and $[\ell_4^{(m)}]$ are not.
\begin{theorem}\label{lem:Dn(m)} Consider the map $\varphi \colon L({\mathbf{A}}_{n-2}^{(m)}) \to L({\mathbf{D}}_n^{(m)})$ defined by ${\boldsymbol{a}} \mapsto {\boldsymbol{a}} \frac{0}{0}$\,. Then the induced map on orbits $\varphi_* \colon {\rm Orb}({\mathbf{A}}_{n-2}^{(m)}) \to {\rm Orb}({\mathbf{D}}_n^{(m)})$ is injective, and its image is the whole set ${\rm Orb}({\mathbf{D}}_n^{(m)})$ except for the fixed labelings $\ell_1^{(m)}$, $\ell_3^{(m)}$, and $\ell_4^{(m)}$ when they occur; see Remark \ref{rem:when-they-occur}. \end{theorem}
\begin{proof} We prove the injectivity. Let ${\boldsymbol{d}}={\boldsymbol{a}}\frac{{\varkappa}}{\lambda}\in L({\mathbf{D}}_n^{(m)})$, where ${\boldsymbol{a}}\in L({\mathbf{A}}_{n-2}^{(m)})$\,. Set $$ \delta({\boldsymbol{d}})=({\varkappa}+\lambda\ {\rm mod}\ 2)(1-d_{n-2}) +r({\boldsymbol{a}})-l({\boldsymbol{a}}), $$ where ${\varkappa}+\lambda\ {\rm mod}\ 2\in \{0,1\}\subset {\mathbb{Z}}$, $d_{n-2}=a_{n-2} \in \{0,1\}\subset {\mathbb{Z}}$. It is easy to check that $\delta({\boldsymbol{d}})$ does not change under the moves in $L({\mathbf{D}}_n^{(m)})$. Clearly we have $\delta({\boldsymbol{a}}\frac{0}{0})=r({\boldsymbol{a}})-l({\boldsymbol{a}})$. Now if ${\boldsymbol{a}},{\boldsymbol{a}}'\in L({\mathbf{A}}_{n-2}^{(m)})$ and ${\boldsymbol{a}}\not\sim {\boldsymbol{a}}'$ in $L({\mathbf{A}}_n^{(m)})$, then by Proposition \ref{prop:An^(m)-invariant} $r({\boldsymbol{a}})-l({\boldsymbol{a}})\neq r({\boldsymbol{a}}')-l({\boldsymbol{a}}')$, hence $\delta({\boldsymbol{a}}\frac{0}{0})\neq \delta({\boldsymbol{a}}'\frac{0}{0})$, and therefore, $({\boldsymbol{a}}\frac{0}{0})\not\sim ({\boldsymbol{a}}'\frac{0}{0})$ in $L({\mathbf{D}}_n^{(m)})$.
We prove the assertion about the image. There are two cases: (1) $n-m$ is odd, and (2) $n-m$ is even.
Case (1): $n-m$ is odd. Let ${\boldsymbol{d}}\in L({\mathbf{D}}_n^{(m)})$. We prove that either ${\boldsymbol{d}}\sim(\dots\frac{0}{0})$ or ${\boldsymbol{d}}=\ell_4^{(m)}$. Up to equivalence, we may assume that \begin{equation}\label{eq:schematic-Dn(m)} {\boldsymbol{d}}={\boldsymbol{a}}\frac{{\varkappa}}{\lambda}=\ \sxymatrix{ {\boldsymbol{a}}^l \ar@{-}[r] &0 \ar@{-}[r] \ar@{-}[d] & {\boldsymbol{a}}^r\dfrac{{\varkappa}}{\lambda} \\ & *+[F]{1} & }\ , \end{equation} where ${\boldsymbol{a}}^l\in L({\mathbf{A}}_{m-1}^{(0)})$ is the left-hand side of ${\boldsymbol{a}}$, ${\boldsymbol{a}}^r\in L({\mathbf{A}}_{n-2-m}^{(0)})$ is the right-hand side of ${\boldsymbol{a}}$, and ${\varkappa},\lambda\in\{0,1\}.$ If ${\varkappa}=\lambda=0$, then ${\boldsymbol{d}}={\boldsymbol{a}}\frac{0}{0}$, as required. If ${\varkappa}=1$, $\lambda=0$, then ${\boldsymbol{a}}^r{\varkappa}={\boldsymbol{a}}^r 1\sim(\dots 0)$ in $L({\mathbf{A}}_{n-m-1}^{(0)})$, because $n-m-1$ is even. Thus ${\boldsymbol{d}}\sim(\dots\frac{0}{0})$, as required. The case ${\varkappa}=0$, $\lambda=1$ is similar to the case ${\varkappa}=1$, $\lambda=0$.
Now assume that ${\varkappa}=\lambda=1$. If ${\boldsymbol{a}}^r\neq 0$, then ${\boldsymbol{a}}^r\sim(\dots 1)$. Thus ${\boldsymbol{d}}\sim(\dots 1\frac{1}{1})\sim(\dots 1\frac{0}{0})$, as required. If ${\boldsymbol{a}}^r=0$ and either $m$ is odd or $m$ is even and ${\boldsymbol{a}}^l\neq\xi_{m/2}$, then we may assume that $d_{m-1}=({\boldsymbol{a}}^l)_{m-1}=0$. Then, applying moves, we can change $d_m$ to 1, then change $d_{m+1}$ to 1, \dots then change $d_{n-2}$ to 1, and finally we obtain that ${\boldsymbol{d}}\sim(\dots 1\frac{1}{1})\sim (\dots 1\frac{0}{0})$, as required. If ${\boldsymbol{a}}^r=0$, $m$ is even and ${\boldsymbol{a}}^l=\xi_{m/2}$, then ${\boldsymbol{d}}=\ell_4^{(m)}$, which completes the proof in Case (1).
Case (2): $n-m$ is even. Let ${\boldsymbol{d}}\in L({\mathbf{D}}_n^{(m)})$. Up to equivalence, we may assume that ${\boldsymbol{d}}$ is as in \eqref{eq:schematic-Dn(m)}. If ${\varkappa}=\lambda=0$, we have nothing to prove. If ${\varkappa}=\lambda=1$ and ${\boldsymbol{d}}\neq\ell_4^{(m)}$, then the argument in Case (1) shows that ${\boldsymbol{d}}\sim(\dots\frac{0}{0})$, as required. Two cases remain: ${\varkappa}=1$, $\lambda=0$, and ${\varkappa}=0$, $\lambda=1$. They are similar; we treat only the case ${\varkappa}=1$, $\lambda=0$.
Consider ${\boldsymbol{a}}{\varkappa}={\boldsymbol{a}} 1\in L({\mathbf{A}}_{n-1}^{(m)})$. Using moves in $L({\mathbf{A}}_{n-1}^{(m)})$, we can reduce $\aa1$ to a labeling which has either 0 components right to the vertex $m$, or 0 components left to $m$. In the former case ${\boldsymbol{d}}\sim(\dots\frac{0}{0})$, as required. In the latter case, if ${\boldsymbol{d}}$ is as in \eqref{eq:schematic-Dn(m)} and ${\boldsymbol{a}}^r 1$ has less than $k:=(n-m)/2$ components, then ${\boldsymbol{a}}^r 1\sim(\dots 0)$ and ${\boldsymbol{d}}\sim(\dots \frac{0}{0})$, as required. If ${\boldsymbol{a}}^r 1$ has $k$ components, then ${\boldsymbol{a}}^r1=\xi_k$. Since ${\boldsymbol{a}}^l=0$, we see that ${\boldsymbol{d}}=\ell_1^{(m)}$. This completes the proof in Case (2). \end{proof}
\begin{corollary} \label{cor:Dn^(m)-zero}
Set $A_0=\{{\boldsymbol{a}} \in L({\mathbf{A}}_{n-2}^{(m)})\ |\ l({\boldsymbol{a}})=r({\boldsymbol{a}})\}$ (this is the orbit of zero in $L({\mathbf{A}}_{n-2}^{(m)})$). We write ${\boldsymbol{a}}=(a_i)$. Then {\bfseries the orbit of zero} in $L({\mathbf{D}}_n^{(m)})$ is $$
\left\{ {\boldsymbol{a}}\frac{0}{0},\, {\boldsymbol{a}}\frac{1}{1}\ |\ {\boldsymbol{a}}\in A_0\right\}\,\cup\,
\left\{ {\boldsymbol{a}}\frac{1}{0},\, {\boldsymbol{a}}\frac{0}{1}\ |\ {\boldsymbol{a}}\in A_0,\, a_{n-2}=1\right\}. $$ \end{corollary}
Set
\begin{equation}\label{eq:(p|q)frac}
(p|q)\frac{{\varkappa}}{\lambda}:={\boldsymbol{a}}\frac{{\varkappa}}{\lambda}\in L({\mathbf{D}}_n^{(m)}),\quad
\text{where}\quad {\boldsymbol{a}}=(p|q)\in L({\mathbf{A}}_{n-2}^{(m)}),\ \ {\varkappa},\lambda \in \{0,1\}, \end{equation}
see formulas \eqref{eq:(p|q)} and \eqref{eq:frac-Dn}.
\begin{corollary} \label{cor:Dn^(m)-reps}
For $L({\mathbf{D}}_n^{(m)})$, as {\bfseries representatives of orbits} we can take the labelings $(p|0)\frac{0}{0}$ \ for \ $0 \le p \le \lfloor m/2 \rfloor = \lceil (m-1)/2 \rceil$, the labelings $(0|q)\frac{0}{0}$ \ for \ $1 \le q \le \lceil ((n-2)-m)/2 \rceil$, and the fixed labelings $\ell_4^{(m)}$ and $\ell_1^{(m)}$, $\ell_3^{(m)}$ when they occur; see Remark \ref{rem:when-they-occur}. \end{corollary}
\begin{corollary} \label{prop:Dn^(m)-number} \begin{equation*} \Orbs{ {\mathbf{D}}_n^{(m)} } = \begin{cases} k+2 & \mbox{ if } n=2k+1 \ , \\
k+3 & \mbox{ if } n=2k \mbox{ and } m \mbox{ is even} , \\
k & \mbox{ if } n=2k \mbox{ and } m \mbox{ is odd}. \end{cases} \end{equation*} \end{corollary}
\subsection{The group ${{\bf Spin}}^*(2n)$ with twisting diagram ${\mathbf{D}}_n^{(n)}$} \label{sec:Dn(n)} \label{subsec:Dn^(n)}
The group $G$ is the ``quaternionic'' spin group ${{\bf Spin}}^*(2n)$, the universal covering of ${{\bf SO}}^*(2n)$, the special unitary group of the diagonal quaternionic skew-Hermitian form in $n$ variables $$ {\mathrm{i}} x_1 {\bar{x}} _1+\dots+{\mathrm{i}} x_n {\bar{x}}_n. $$ The twisting diagram and augmented diagram are: \begin{equation*} \xymatrix @1@R=1pt@C=9pt { \bc{1} \ar@{-}[r] & \cdots \ar@{-}[r] & \bc{n-3} \ar@{-}[r] & \bc{n-2} \ar@{-}[d] \ar@{-}[r] & \bc{n-1}&& && \bc{1} \ar@{-}[r] & \cdots \ar@{-}[r] & \bc{n-3} \ar@{-}[r] & \bc{n-2} \ar@{-}[d] \ar@{-}[r] & \bc{n-1} \\
& & & \bcbu{n} & &&&& & & & { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \ar@{-}[d] & \\ &&&&&&& &&& & *+[F]{1} & } \end{equation*} (see \cite[Table 7]{OV} and Construction \ref{const:augmented-diag}).
We consider the following labelings of ${\mathbf{D}}_n^{(n)}$: \begin{equation*} \label{diag:Dn(n)-odd-rep}
m_1\ =\quad \sxymatrix{ 0 \ar@{-}[r] & \cdots & \ar@{-}[l] 0 \ar@{-}[r] & 0 \ar@{-}[d] \ar@{-}[r] & 0 \\ & & & 0 \ar@{-}[d] & \\ & & & *+[F]{1} & } \qquad\text{and}\qquad m_2\ =\quad\sxymatrix{ 1 \ar@{-}[r] & \cdots & \ar@{-}[l] 0 \ar@{-}[r] & 0 \ar@{-}[d] \ar@{-}[r] & 0 \\ & & & 0 \ar@{-}[d] & \\ & & & *+[F]{1} & } \end{equation*}
\begin{proposition} For ${\mathbf{D}}_n^{(n)}$ there are exactly two orbits: \begin{enumerate} \item[1.] {\bfseries The orbit of zero} which consists of the labelings with odd number of components (including the boxed 1) and we can take $m_1$ as a representative. \item[2.] The other orbit that consists of the labelings with even number of components (including the boxed 1) and we can take $m_2$ as a representative. \end{enumerate} \end{proposition}
\subsection{The group ${{\bf Spin}}(2m+1,\, 2(n-m)-1)$} \label{ssec:Dn-outer} Here $0\le m \le \lfloor (n-1)/2\rfloor$. The group $G$ is an outer form of the compact group ${{\bf Spin}}(2n)$ of type ${\mathbf{D}}_{n}$. Here for $n>4$ we consider the nontrivial involutive automorphism $\tau$ of the Dynkin diagram ${\mathbf{D}}_n$, while for $n=4$ $\tau$ is {\em a} nontrivial involutive automorphism of ${\mathbf{D}}_4$. The Kac diagram is: \[ \sxymatrix{ \bc{0} \ar@{<=}[r] & \bc{1} \ar@{-}[r] & \cdots & \ar@{-}[l] \bcb{m} \ar@{-}[r] & \cdots & \ar@{-}[l] \bc{n-2} \ar@{=>}[r] & \bc{n-1} }\, \] see \cite[Table 7]{OV}. We erase vertex 0 and also the ``short'' vertex $n-1$ (which comes from $D\smallsetminus D^\tau$).
If $m=0$, we obtain $D^\tau={\mathbf{A}}_{n-2}^{(0)}$ (non-twisted). By formula \eqref{eq:An-num-of-orbits} $$ \Orbs{D^\tau}=\lceil (n-2)/2\rceil +1. $$ The orbit of zero in $L(D^\tau)$ is 0. {\bfseries The class of 0} in $L(D)^\tau$ consists of the labelings with zero restriction to $D^\tau$. As {\bfseries representatives of equivalence classes} we can take $\xi_0$, $\xi_1$, \dots, $\xi_r$, where $r=\lceil (n-2)/2\rceil$. These representatives lie in $L(D^\tau)$ and hence in $L(D)^\tau$.
If $m \neq 0$, then after erasing the vertices $0$ and $n-1$ of the Kac diagram we obtain the twisted diagram ${\mathbf{A}}_{n-2}^{(m)}$. We add boxed 1 as a neighbor to vertex $m$ and obtain the augmented diagram \begin{equation*} \sxymatrix{ \bc{1} \ar@{-}[r] & \cdots \ar@{-}[r] & \bc{m} \ar@{-}[r] \ar@{-}[d] & \cdots \ar@{-}[r] & \bc{n-2} \\ & & *+[F]{1} & & } \end{equation*} By formula \eqref{eq:An^m-num-of-orbits} \begin{eqnarray*} \label{eq:outer-num-of-orbits}
\Orbs{D^\tau} & = & \left\lceil ({m-1})/{2}\right\rceil + 1 + \left\lceil ({n-2-m})/{2}\right\rceil \\ & = & \begin{cases} k & \mbox{ if }n=2k \ , \\ k & \mbox{ if }n=2k+1 \mbox{ and } m \mbox{ is odd}, \\ k+1 & \mbox{ if }n=2k+1 \mbox{ and } m \mbox{ is even}. \end{cases} \end{eqnarray*} { The orbit of zero in $L(D,{\boldsymbol{t}})$ consists of the labelings ${\boldsymbol{a}}\in L({\mathbf{A}}_{n-2}^{(m)})$ such that $l({\boldsymbol{a}})=r({\boldsymbol{a}})$, see Notation \ref{n:l,r}. {\bfseries The class of zero in $L(D)^\tau$ } consists of the labelings ${\boldsymbol{d}}$ whose restriction ${\boldsymbol{a}}={\rm res}_{D^\tau}({\boldsymbol{d}})$ to $D^\tau$ satisfy $l({\boldsymbol{a}})=r({\boldsymbol{a}})$.
As {\bfseries representatives of equivalence classes} we can take
$(p|0)$ where $0\le p\le\lceil (m-1)/2\rceil$, and $(0|q)$ where $1\le q\le\lceil (n-2-m)/2\rceil $. Again, these representatives lie in $L(D^\tau)$ and hence in $L(D)^\tau$.
\section{Groups of type ${\mathbf{E}}_6$} \label{sec:E6}
\subsection{The compact group of type ${\mathbf{E}}_6^{(0)}$}
The Dynkin diagram of $G$ is \begin{equation*} \mxymatrix{ \bc{1} \ar@{-}[r] & \bc{2} \ar@{-}[r] & \bc{3} \ar@{-}[r] \ar@{-}[d] & \bc{4} \ar@{-}[r] & \bc{5} \\ & & \bcu{6} & & } \end{equation*}
\begin{proposition} [Reeder \cite{Reeder}]\label{prop:E6} The diagram ${\mathbf{E}}_6^{(0)}$ has $3$ orbits. The orbits are: \begin{enumerate} \item[1.] {\bfseries The orbit of zero} consisting of $0$, which is a fixed labeling. \item[2.] The orbit consisting of all the labelings with $1$ or $3$ components with representative \[ \ell_1\ = \quad \sxymatrix{
1 \ar@{-}[r] & 0 \ar@{-}[r] & 0 \ar@{-}[r] \ar@{-}[d] & 0 \ar@{-}[r] & 0 \\ & & 0 & & }
\] \item[3.] The orbit consisting of all the labelings with $2$ components with representative \[ \ell_2\ = \quad \sxymatrix{ 0 \ar@{-}[r] & 1 \ar@{-}[r] & 0 \ar@{-}[r] \ar@{-}[d] & 0 \ar@{-}[r] & 0 \\ & & 1 & & } \] \end{enumerate} \end{proposition}
\begin{remark} The moves in $L({\mathbf{E}}_n)$ for $n=6,7,8$ preserve the parity of the number of components. \end{remark}
\begin{remark} By \cite[Example 4.4]{Reeder} each of the graphs $D={\mathbf{E}}_6$ and $D={\mathbf{E}}_8$ is nonsingular (namely, a certain quadratic form introduced by Reeder is nonsingular). By \cite[Theorem 7.3 and Lemma 2.2(2)]{Reeder} in in both cases we have exactly $3$ orbits in $L(D)$: $\{0\}$, the orbit consisting of all nonzero labelings with even number of components, and the orbit consisting of all labelings with odd number of components. \end{remark}
\subsection{The group $EII$ with twisting diagram ${\mathbf{E}}_6^{(2)}$}\label{subsec:E6(2)}
A maximal compact subgroup is of type ${\mathbf{A}}_1 {\mathbf{A}}_5$. The twisting diagram and the augmented diagram are: \begin{equation*} \mxymatrix{ \bc{1} \ar@{-}[r] & \bcb{2} \ar@{-}[r] & \bc{3} \ar@{-}[r] \ar@{-}[d] & \bc{4} \ar@{-}[r] & \bc{5} \\ & & \bcu{6} & &
} \qquad\qquad \mxymatrix{
\bc{1} \ar@{-}[r] & \bc{2} \ar@{-}[r] \ar@{-}[d] & \bc{3} \ar@{-}[r] \ar@{-}[d] & \bc{4} \ar@{-}[r] & \bc{5} \\ & *+[F]{1} & \bcu{6} & & } \end{equation*} (see \cite[Table 7]{OV} and Construction \ref{const:augmented-diag}).
\begin{proposition} \label{prop:E6^(2)} The diagram ${\mathbf{E}}_6^{(2)}$ has $3$ orbits. The orbits are: \begin{enumerate} \item[1.] {\bfseries The orbit of zero} consisting of all the labelings with $1$ or $3$ components (including the boxed 1). \item[2.] The orbit consisting of the labelings with $2$ components excluding the fixed labeling $\ell'_1$, with representative \[ \ell_3\ =\quad \sxymatrix{
1 \ar@{-}[r] & 1 \ar@{-}[r] \ar@{-}[d] & 0 \ar@{-}[r] \ar@{-}[d] & 0 \ar@{-}[r] & 0 \\ & *+[F]{1} & 1 & & } \] \item[3.] The fixed labeling \[ \ell'_1\ = \quad \sxymatrix{
1 \ar@{-}[r] & 0 \ar@{-}[r] \ar@{-}[d] & 0 \ar@{-}[r] \ar@{-}[d] & 0 \ar@{-}[r] & 0 \\ & *+[F]{1} & 0 & & }
\] \end{enumerate} \end{proposition}
\subsection{The group $EIII$ of Hermitian type with twisting diagram ${\mathbf{E}}_6^{(1)}$}\label{subsec:E6(1)}
A maximal compact subgroup of $G$ is of type ${\mathbf{D}}_5 T^1$. The twisting diagram and the augmented diagram are: \begin{equation*} \mxymatrix{ \bcb{1} \ar@{-}[r] & \bc{2} \ar@{-}[r] & \bc{3} \ar@{-}[r] \ar@{-}[d] & \bc{4} \ar@{-}[r] & \bc{5} \\ & & \bcu{6} & & } \qquad\qquad \mxymatrix{ *+[F]{1} \ar@{-}[r] & \bc{1} \ar@{-}[r] & \bc{2} \ar@{-}[r] & \bc{3} \ar@{-}[d] \ar@{-}[r] & \bc{4} \ar@{-}[r] & \bc{5} \\ & & & \bcu{6} & & } \end{equation*} (see \cite[Table 7]{OV} and Construction \ref{const:augmented-diag}).
\begin{proposition} \label{prop:E6^(1)} The diagram ${\mathbf{E}}_6^{(1)}$ has $3$ orbits. The orbits are: \begin{enumerate} \item[1.] {\bfseries The orbit of zero} consisting of the labelings with $1$ or $3$ components excluding the fixed labeling $\ell'_2$. \item[2.] The orbit consisting of all the labelings with $2$ components with representative \[ \ell'_3\ = \quad \sxymatrix{ *+[F]{1} \ar@{-}[r] & 1 \ar@{-}[r] & 1 \ar@{-}[r] & 0 \ar@{-}[r] \ar@{-}[d] & 0 \ar@{-}[r] & 0 \\ & & & 1 & & } \] \item[3.] The fixed labeling \[ \ell'_2\ = \quad \sxymatrix{ *+[F]{1} \ar@{-}[r] & 0 \ar@{-}[r] & 1 \ar@{-}[r] & 0 \ar@{-}[r] \ar@{-}[d] & 0 \ar@{-}[r] & 0 \\ & & & 1 & & } \] \end{enumerate} \end{proposition}
\subsection{The group $EIV$ of type ${\mathbf{E}}_6$}\label{subsec:EIV}
This is an outer form of the compact group of type ${\mathbf{E}}_6$ with maximal compact subgroup of type ${\mathbf{F}}_4$. The Kac diagram is $$ \sxymatrix{ \bcb{0} \ar@{-}[r] & \bc{} & \bc{} \ar@{=>}[l] \ar@{-}[r] & \bc{} } $$ We denote by $\tau$ the nontrivial automorphism of the Dynkin diagram $D={\mathbf{E}}_6$. We erase vertex 0 and the other ``short'' vertex of the Kac diagram. We obtain $D^\tau=\sxymatrix{ \bc{3} \ar@{-}[r] & \bc{6}}$ and $\Orbs{D^\tau}=2$. The orbit of zero in $L(D^\tau)$ consists of one labeling 0 of $D^\tau$. {\bfseries The equivalence class of zero} in $L(D)^\tau$ consists of the labelings whose restriction to $D^\tau$ is 0.
\subsection{The split group $EI$ of type ${\mathbf{E}}_6$}\label{subsec:EI}
This is an outer form of the compact group of type ${\mathbf{E}}_6$ with maximal compact subgroup of type ${\mathbf{C}}_4$. The Kac diagram is $$\sxymatrix{ \bc{0} \ar@{-}[r] & \bc{} & \bc{} \ar@{=>}[l] \ar@{-}[r] & \bcb{} }$$ We erase vertex 0 and the other ``short'' vertex of the Kac diagram. We obtain $(D^\tau,{\boldsymbol{t}})=\sxymatrix{ \bc{3} \ar@{-}[r] & \bcb{6}}$. The augmented diagram is \[ \sxymatrix{ \bc{3} \ar@{-}[r] & \bc{6} \ar@{-}[r] & *+[F]{1} } \] We have $\Orbs{D^\tau,{\boldsymbol{t}}}=2$.
The orbit of zero in $L(D^\tau,{\boldsymbol{t}})$ consists of the labelings with one component (including the boxed 1). {\bfseries The equivalence class of zero} in $L(D)^\tau$ consists of the labelings ${\boldsymbol{a}}$ such that either $a_3=0$ or $a_6=1$.
Note that $H^1({\mathbb{R}},EI)$ was earlier computed by B.~Conrad \cite[Proof of Lemma 4.9]{Conrad}.
\section{Groups of type ${\mathbf{E}}_7$} \label{sec:E7}
\subsection{The the compact group of type ${\mathbf{E}}_7^{(0)}$}
The Dynkin diagram is \begin{equation*} \mxymatrix{ \bc{1} \ar@{-}[r] & \bc{2} \ar@{-}[r] & \bc{3} \ar@{-}[r] & \bc{4} \ar@{-}[r] \ar@{-}[d] & \bc{5} \ar@{-}[r] & \bc{6} \\ & & & \bcu{7} & & } \end{equation*}
\begin{proposition}[Weng \cite{Weng}] \label{prop:E7} The diagram ${\mathbf{E}}_7^{(0)}$ has $4$ orbits. The orbits are: \begin{enumerate} \item[1.] {\bfseries The orbit of zero} consisting of the fixed labeling 0. \item[2.] The fixed labeling \[ \ell_3\ =\quad \sxymatrix{ 1 \ar@{-}[r] & 0 \ar@{-}[r] & 1 \ar@{-}[r] & 0 \ar@{-}[r] \ar@{-}[d] & 0 \ar@{-}[r] & 0 \\ & & & 1 & & } \] \item[3.] The orbit consisting of the labelings with $1$ or $3$ components excluding the fixed labeling $\ell_3$, with representative \[ \ell_1\ =\quad \sxymatrix{ 1 \ar@{-}[r] & 0 \ar@{-}[r] & 0 \ar@{-}[r] & 0 \ar@{-}[r] \ar@{-}[d] & 0 \ar@{-}[r] & 0 \\ & & & 0 & & } \] \item[4.] The orbit consisting of all the labelings with $2$ or $4$ components, with representative \[ \ell_2\ =\quad \sxymatrix{ 0 \ar@{-}[r] & 0 \ar@{-}[r] & 1 \ar@{-}[r] & 0 \ar@{-}[r] \ar@{-}[d] & 0 \ar@{-}[r] & 0 \\ & & & 1 & & } \] \end{enumerate} \end{proposition}
\begin{remark} By a lemma of Chih-wen Weng \cite{Weng}, for any (uncolored) simply-laced tree (not necessarily a Dynkin diagram) containing ${\mathbf{E}}_6$ as a subgraph, any {\em movable} (non-fixed) labeling is equivalent either to a labeling with one component or to a labeling with two components. \end{remark}
\subsection{The split group $EV$ with twisting diagram ${\mathbf{E}}_7^{(7)}$} \label{sec:E7(7)}
A maximal compact subgroup is of type ${\mathbf{A}}_7$. The twisting diagram and the augmented diagram are: \begin{equation*} \xymatrix@1@R=0pt@C=9pt { \bc{1} \ar@{-}[r] & \bc{2} \ar@{-}[r] & \bc{3} \ar@{-}[r] & \bc{4} \ar@{-}[r] \ar@{-}[d] & \bc{5} \ar@{-}[r] & \bc{6} \\ & & & \bcbu{7} & & &} \qquad\qquad \sxymatrix{ \bc{1} \ar@{-}[r] & \bc{2} \ar@{-}[r] & \bc{3} \ar@{-}[r] & \bc{4} \ar@{-}[r] \ar@{-}[d] & \bc{5} \ar@{-}[r] & \bc{6} \\ & & & { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \ar@{-}[d] & & \\ & & & *+[F]{1} & & } \end{equation*} (see \cite[Table 7]{OV} and Construction \ref{const:augmented-diag}).
\begin{proposition} \label{prop:E7^(7)} The diagram ${\mathbf{E}}_7^{(7)}$ has $2$ orbits. The orbits are: \begin{enumerate} \item[1.] {\bfseries The orbit of zero} is the orbit consisting of all the labelings with $1$ or $3$ components (including the boxed 1).
\item[2.] The orbit consisting of all the labelings with $2$ or $4$ components (including the boxed 1), with representative \[ m_3\ =\quad \sxymatrix{ 0 \ar@{-}[r] & 1 \ar@{-}[r] & 0 \ar@{-}[r] & 1 \ar@{-}[r] \ar@{-}[d] & 0 \ar@{-}[r] & 1 \\ & & & 0 \ar@{-}[d] & & \\ & & & *+[F]{1} & & } \ . \] \end{enumerate} \end{proposition}
Note that $H^1({\mathbb{R}},EV)$ was earlier computed by B.~Conrad \cite[Proof of Lemma 4.9]{Conrad}.
\subsection{The group $EVI$ with twisting diagram ${\mathbf{E}}_7^{(2)}$}
A maximal compact subgroup is of type ${\mathbf{A}}_1 {\mathbf{D}}_6$. The twisting diagram and augmented diagram are: \begin{equation*} \mxymatrix{ \bc{1} \ar@{-}[r] & \bcb{2} \ar@{-}[r] & \bc{3} \ar@{-}[r] & \bc{4} \ar@{-}[r] \ar@{-}[d] & \bc{5} \ar@{-}[r] & \bc{6} \\ & & & \bcu{7} & & & } \qquad\qquad \mxymatrix{ \bc{1} \ar@{-}[r] & \bc{2} \ar@{-}[r] \ar@{-}[d] & \bc{3} \ar@{-}[r] & \bc{4} \ar@{-}[r] \ar@{-}[d] & \bc{5} \ar@{-}[r] & \bc{6} \\ & *+[F]{1} & & \bcu{7} & & } \end{equation*} (see \cite[Table 7]{OV} and Construction \ref{const:augmented-diag}).
\begin{proposition} \label{prop:E7^(2)} The diagram ${\mathbf{E}}_7^{(2)}$ has $4$ orbits. The orbits are: \begin{enumerate} \item[1.] {\bfseries The orbit of zero} consisting of the labelings with $1$ or $3$ or $5$ components (including the boxed 1), excluding the fixed labeling $\ell'_2$ (see below). \item[2.] The fixed labeling \[\ell'_1\ =\quad \sxymatrix{ 1 \ar@{-}[r] & 0 \ar@{-}[r] \ar@{-}[d] & 0 \ar@{-}[r] & 0 \ar@{-}[r] \ar@{-}[d] & 0 \ar@{-}[r] & 0 \\ & *+[F]{1} & & 0 & & } \] \item[3.] The fixed labeling \[ \ell'_2\ =\quad \sxymatrix{ 0 \ar@{-}[r] & 0 \ar@{-}[r] \ar@{-}[d] & 1 \ar@{-}[r] & 0 \ar@{-}[r] \ar@{-}[d] & 0 \ar@{-}[r] & 0 \\ & *+[F]{1} & & 1 & & } \] \item[4.] The orbit consisting of the labelings with $2$ or $4$ components (including the boxed 1) excluding the fixed labeling $\ell'_1$, with representative \[ \ell'_3\ =\quad \sxymatrix{ 1 \ar@{-}[r] & 0 \ar@{-}[r] \ar@{-}[d] & 1 \ar@{-}[r] & 0 \ar@{-}[r] \ar@{-}[d] & 0 \ar@{-}[r] & 0 \\ & *+[F]{1} & & 1 & & } \] \end{enumerate} \end{proposition}
Note that $H^1({\mathbb{R}},EVI)$ was earlier computed by Garibaldi and Semenov \cite[Example 5.1]{GS} by a different method.
\subsection{The group $EVII$ of Hermitian type with twisting diagram ${\mathbf{E}}_7^{(1)}$}
A maximal compact subgroup is of type ${\mathbf{E}}_6 T^1$. The twisting diagram and augmented diagram are: \begin{equation*} \mxymatrix{ \bcb{1} \ar@{-}[r] & \bc{2} \ar@{-}[r] & \bc{3} \ar@{-}[r] & \bc{4} \ar@{-}[r] \ar@{-}[d] & \bc{5} \ar@{-}[r] & \bc{6} \\ & & & \bcu{7} & & & } \qquad\qquad \mxymatrix{ *+[F]{1} \ar@{-}[r] & \bc{1} \ar@{-}[r] & \bc{2} \ar@{-}[r] & \bc{3} \ar@{-}[r] & \bc{4} \ar@{-}[r] \ar@{-}[d] & \bc{5} \ar@{-}[r] & \bc{6} \\ & & & & \bcu{7} & & } \end{equation*} (see \cite[Table 7]{OV} and Construction \ref{const:augmented-diag}).
\begin{proposition}\label{prop:E7^(1)} The diagram ${\mathbf{E}}_7^{(1)}$ has $2$ orbits. The orbits are: \begin{enumerate} \item[1.] {\bfseries The orbit of zero} consisting of all the labelings with $1$ or $3$ components (including the boxed 1). \item[2.] The orbit consisting of all the labelings with $2$ or $4$ components (including the boxed 1), with representative \[ m'_3\ =\quad \sxymatrix{ *+[F]{1} \ar@{-}[r] & 0 \ar@{-}[r] & 1 \ar@{-}[r] & 0 \ar@{-}[r] & 1 \ar@{-}[r] \ar@{-}[d] & 0 \ar@{-}[r] & 1 \\ & & & & 0 & & } \] \end{enumerate} \end{proposition}
\section{Groups of type ${\mathbf{E}}_8$} \label{sec:E8}
\subsection{The compact group of type ${\mathbf{E}}_8^{(0)}$}
The Dynkin diagram is \begin{equation*} \mxymatrix{ \bc{1} \ar@{-}[r] & \bc{2} \ar@{-}[r] & \bc{3} \ar@{-}[r] & \bc{4} \ar@{-}[r] & \bc{5} \ar@{-}[r] \ar@{-}[d] & \bc{6} \ar@{-}[r] & \bc{7} \\ & & & & \bcu{8} & & } \end{equation*}
\begin{proposition} [Reeder \cite{Reeder}] \label{prop:E8} The diagram ${\mathbf{E}}_8^{(0)}$ has $3$ orbits. The orbits are: \begin{enumerate} \item[1.] {\bfseries The orbit of zero} which contains only $0$. \item[2.] The orbit consisting of all the labelings with odd number of components, with representative \[\ell_3\ =\quad \sxymatrix{ 0 \ar@{-}[r] & 1 \ar@{-}[r] & 0 \ar@{-}[r] & 1 \ar@{-}[r] & 0 \ar@{-}[d] \ar@{-}[r] & 0 \ar@{-}[r] & 0 \\ & & & & 1 & & } \ . \] \item[3.] The orbit consisting of all the labelings with nonzero even number of components, with representative \[\ell_2\ =\quad \sxymatrix{ 0 \ar@{-}[r] & 0 \ar@{-}[r] & 0 \ar@{-}[r] & 0 \ar@{-}[r] & 0 \ar@{-}[d] \ar@{-}[r] & 1 \ar@{-}[r] & 0 \\ & & & & 1 & & } \ . \] \end{enumerate} \end{proposition}
\subsection{The split group $EVIII$ with twisting diagram ${\mathbf{E}}_8^{(7)}$} \label{subsec:EE8(7)}
A maximal compact subgroup is of type ${\mathbf{D}}_8$. The twisting diagram and the augmented diagram are: \begin{equation*} \mxymatrix{ \bc{1} \ar@{-}[r] & \bc{2} \ar@{-}[r] & \bc{3} \ar@{-}[r] & \bc{4} \ar@{-}[r] & \bc{5} \ar@{-}[r] \ar@{-}[d] & \bc{6} \ar@{-}[r] & \bcb{7} \\ & & & & \bcu{8} & & } \qquad\qquad \mxymatrix{ \bc{1} \ar@{-}[r] &\bc{2} \ar@{-}[r] & \bc{3} \ar@{-}[r] & \bc{4} \ar@{-}[r] & \bc{5} \ar@{-}[r] \ar@{-}[d] & \bc{6} \ar@{-}[r] & \bc{7} \ar@{-}[r] & *+[F]{1} \\ & & & & \bcu{8} & & & } \end{equation*} (see \cite[Table 7]{OV} and Construction \ref{const:augmented-diag}).
\begin{proposition} \label{prop:E8^(7)} The diagram ${\mathbf{E}}_8^{(7)}$ has $3$ orbits. The orbits are: \begin{enumerate} \item[1.] {\bfseries The orbit of zero} consisting of the labelings with odd number of components (including the boxed 1), excluding the fixed labeling $\ell'_2$. \item[2.] The fixed labeling \[ \ell'_2\ = \quad \sxymatrix{ 0 \ar@{-}[r] & 0 \ar@{-}[r] & 0 \ar@{-}[r] & 0 \ar@{-}[r] & 0 \ar@{-}[r] \ar@{-}[d] & 1 \ar@{-}[r] & 0 \ar@{-}[r] & *+[F]{1} \\
& & & & 1 & & &} \] \item[3.] The orbit consisting of all the labelings with even number of components, with representative \[ m_3\ = \quad \sxymatrix{ 0 \ar@{-}[r] & 1 \ar@{-}[r] & 0 \ar@{-}[r] & 1 \ar@{-}[r] & 0 \ar@{-}[r] \ar@{-}[d] & 1 \ar@{-}[r] & 0 \ar@{-}[r] & *+[F]{1} \\
& & & & 0 & & &} \] \end{enumerate} \end{proposition}
\subsection{The group $EIX$ with twisting diagram ${\mathbf{E}}_8^{(1)}$}
A maximal compact subgroup is of type ${\mathbf{A}}_1 {\mathbf{E}}_7$. The twisting diagram and the augmented diagram are: \begin{equation*} \mxymatrix{ \bcb{1} \ar@{-}[r] & \bc{2} \ar@{-}[r] & \bc{3} \ar@{-}[r] & \bc{4} \ar@{-}[r] & \bc{5} \ar@{-}[r] \ar@{-}[d] & \bc{6} \ar@{-}[r] & \bc{7} \\ & & & & \bcu{8} & & } \qquad\qquad \mxymatrix{ *+[F]{1} \ar@{-}[r] & \bc{1} \ar@{-}[r] & \bc{2} \ar@{-}[r] & \bc{3} \ar@{-}[r] & \bc{4} \ar@{-}[r] & \bc{5} \ar@{-}[r] \ar@{-}[d] & \bc{6} \ar@{-}[r] & \bc{7} \\ & & & & & \bcu{8} & & } \end{equation*} (see \cite[Table 7]{OV} and Construction \ref{const:augmented-diag}).
\begin{proposition} \label{prop:E8^(1)} The diagram ${\mathbf{E}}_8^{(1)}$ has $3$ orbits. The orbits are: \begin{enumerate} \item[1.] {\bfseries The orbit of zero} consisting of all the labelings with odd number of components (including the boxed 1). \item[2.] The fixed labeling \[ \ell'_3\ =\quad \sxymatrix{ *+[F]{1} \ar@{-}[r] & 0 \ar@{-}[r] & 1 \ar@{-}[r] & 0 \ar@{-}[r] & 1 \ar@{-}[r] & 0 \ar@{-}[r] \ar@{-}[d] & 0 \ar@{-}[r] & 0 \\ & & & & & 1 & & } \] \item[3.] The orbit consisting of the labelings with even number of components, excluding the fixed labeling $\ell'_3$, with representative \[ m'_3 \ =\quad \sxymatrix{ *+[F]{1} \ar@{-}[r] & 0 \ar@{-}[r] & 1 \ar@{-}[r] & 0 \ar@{-}[r] & 1 \ar@{-}[r] & 0 \ar@{-}[r] \ar@{-}[d] & 1 \ar@{-}[r] & 0 \\
& & & & & 0 & & } \] \end{enumerate} \end{proposition}
\section{Groups of type ${\mathbf{F}}_4$} \label{sec:F4}
\subsection{The compact group of type ${\mathbf{F}}_4^{(0)}$}
The Dynkin diagram is \begin{equation*} \sxymatrix{ \bc{1} \ar@{-}[r] & \bc{2} & \ar@{=>}[l] \bc{3} \ar@{-}[r] & \bc{4} }\,. \end{equation*}
\begin{proposition} \label{prop:F4} The diagram ${\mathbf{F}}_4^{(0)}$ has $3$ orbits. The orbits are: \begin{enumerate} \item[1.] {\bfseries The orbit of zero} which contains only \ \ $0 \!-\! 0 \!\Leftarrow\! 0 \!-\! 0$\,. \item[2.] The orbit \[ \left\{\ 1\!-\! 0 \!\Leftarrow\! 0\!-\! 0\, , \quad 1 \!-\! 1 \!\Leftarrow\! 0\!-\! 0\, , \quad 0 \!-\! 1 \!\Leftarrow\! 0 \!-\! 0 \ \right\} \,. \] \item[3.] The orbit that contains the rest, with representative \ $\ell_2=\ 1 \!-\! 0 \!\Leftarrow\! 1 \!-\! 0 $\,. \end{enumerate} \end{proposition}
\subsection{The split group $FI$ with twisting diagram ${\mathbf{F}}_4^{(4)}$} A maximal compact subgroup is of type ${\mathbf{C}}_3 {\mathbf{A}}_1$. The twisting diagram and the augmented diagram are: \begin{equation*} \sxymatrix{ \bc{1} \ar@{-}[r] & \bc{2} & \ar@{=>}[l] \bc{3} \ar@{-}[r] & \bcb{4} } \qquad\qquad \sxymatrix{ \bc{1} \ar@{-}[r] & \bc{2} & \ar@{=>}[l] \bc{3} \ar@{-}[r] & \bc{4} \ar@{-}[r] & *+[F]{1} } \end{equation*} (see \cite[Table 7]{OV} and Construction \ref{const:augmented-diag}).
\begin{proposition} \label{prop:F4^(4)} The diagram ${\mathbf{F}}_4^{(4)}$ has $3$ orbits. The orbits are: \begin{enumerate} \item[1.] {\bfseries The orbit of zero} which consists of the labelings of the form ${\boldsymbol{a}}\Leftarrow {\boldsymbol{a}}'$, where $$ {\boldsymbol{a}}\in L(\sxymatrix{ \bc{1} \ar@{-}[r] & \bc{2} }), \quad {\boldsymbol{a}}'\in L( \sxymatrix{ \bc{3} \ar@{-}[r] & \bc{4} \ar@{-}[r] & *+[F]{1} }\ )\, , $$ and ${\boldsymbol{a}}'$ has only one component. \item[2.] The fixed labeling $\ell'_2\ =\quad 1 \!-\! 0 \!\Leftarrow\! 1 \!-\! 0 \!-\! {\sxymatrix{\boxone}} $\ . \item[3.] The orbit \[ \left\{\ 0 \!-\! 0 \!\Leftarrow\! 1 \!-\! 0 \!-\! {\sxymatrix{\boxone}}\ , \quad 0 \!-\! 1 \!\Leftarrow\! 1 \!-\! 0 \!-\! {\sxymatrix{\boxone}}\ , \quad 1 \!-\! 1 \!\Leftarrow\! 1 \!-\! 0 \!-\! {\sxymatrix{\boxone}} \ \right\} \, . \] \end{enumerate} \end{proposition}
\subsection{The group $FII$ with twisting diagram ${\mathbf{F}}_4^{(1)}$} A maximal compact subgroup is of type ${\mathbf{B}}_4$. The twisting diagram and the augmented diagram are: \begin{equation*} \sxymatrix{ \bcb{1} \ar@{-}[r] & \bc{2} & \ar@{=>}[l] \bc{3} \ar@{-}[r] & \bc{4} } \qquad\qquad \sxymatrix{ *+[F]{1} \ar@{-}[r] & \bc{1} \ar@{-}[r] & \bc{2} & \ar@{=>}[l] \bc{3} \ar@{-}[r] & \bc{4} } \end{equation*} (see \cite[Table 7]{OV} and Construction \ref{const:augmented-diag}).
\begin{proposition} \label{prop:F4^(1)} The diagram ${\mathbf{F}}_4^{(1)}$ has $3$ orbits. The orbits are: \begin{enumerate} \item[1.] {\bfseries The orbit of zero} consisting of \[ \left\{\ {\sxymatrix{\boxone}}\!-\! 0 \!-\! 0 \!\Leftarrow\! 0 \ll0 \, , \quad {\sxymatrix{\boxone}}\!-\! 1 \!-\! 0 \!\Leftarrow\! 0 \!-\! 0 \, , \quad {\sxymatrix{\boxone}}\!-\! 1 \ll1 \LLL0 \!-\! 0 \ \right\} \, . \] \item[2.] The fixed labeling \ $\ell'_1\ =\quad {\sxymatrix{\boxone}}\!-\! 0 \!-\! 1 \!\Leftarrow\! 0 \!-\! 0 $\,. \item[3.] The orbit that contains the rest, with representative \ $\ell'_3=\ \ {\sxymatrix{\boxone}}\!-\! 1 \!-\! 1 \!\Leftarrow\! 1 \!-\! 0 $\,. \end{enumerate} \end{proposition}
\section{Groups of type ${\mathbf{G}}_2$} \label{sec:G2}
\subsection{The compact group of type ${\mathbf{G}}_2^{(0)}$}
The Dynkin diagram is \begin{equation*} \sxymatrix{ \bc{1} & \ar@3{->}[l] \bc{2} }\, . \end{equation*} The description of orbits is similar to the case ${\mathbf{A}}_2^{(0)}$, because $3 \equiv 1 \pmod{2}$. We have $\Orbs{{\mathbf{G}}_2^{(0)}}=2$.
The two orbits are \[ \{\, 0 \!-\! 0\, \} \quad \mbox{ and } \quad \{\, 1 \!-\! 0\,,\quad 1 \!-\! 1\, , \quad 0 \!-\! 1\, \} \, . \]
\subsection{The split group with twisting diagram ${\mathbf{G}}_2^{(2)}$} A maximal compact subgroup is of type ${\mathbf{A}}_1 {\mathbf{A}}_1$. The twisting diagram and the augmented diagram are: \begin{equation*} \sxymatrix{ \bc{1} & \ar@3{->}[l] \bcb{2} } \qquad\qquad \sxymatrix{ \bc{1} & \ar@3{->}[l] \bc{2} \ar@{-}[r] & *+[F]{1} } \end{equation*} (see \cite[Table 7]{OV} and Construction \ref{const:augmented-diag}). The description of orbits is similar to the case ${\mathbf{A}}_2^{(2)}$. We have $\Orbs{{\mathbf{G}}_2^{(2)}}=2$. The two orbits are \[ \{\, 0 \!-\! 0 \!-\! {\sxymatrix{\boxone}}\ , \quad 0 \!-\! 1 \!-\! {\sxymatrix{\boxone}}\ , \quad 1 \!-\! 1 \!-\! {\sxymatrix{\boxone}}\ \} \quad \mbox{ and } \quad \{\, 1 \!-\! 0 \!-\! {\sxymatrix{\boxone}}\ \} \,. \]
\section{Connected components in real homogeneous spaces} \label{sec:examples}
Let $G$ be a simply connected absolutely simple algebraic group over ${\mathbb{R}}$. Let $H\subset G$ be a simply connected semisimple ${\mathbb{R}}$-subgroup. Set $X=G/H$. In this section we describe our method of calculation of the number of connected components $\#\pi_0(X({\mathbb{R}}))$, and give examples.
\subsection{Triple $(D,\tau,{\boldsymbol{t}})$} \label{ss:triple} Let $G$ be a simply connected absolutely simple ${\mathbb{R}}$-group.
If $G$ is an {\em outer} form of a compact group $G_0$, we can write $G=\kern 0.8pt_{t\kern 0.8pt \tau}G_0$ as in Section \ref{sec:outer}, where $\tau$ is an automorphism of order 2 of the Dynkin diagram $D$ of $G_{\mathbb{C}}$. The element $t\in T^{\rm ad}({\mathbb{R}})_2$ defines a coloring ${\boldsymbol{t}}$ of $D^\tau$, and we may assume that the coloring comes from a Kac diagram. We obtain a triple $(D,\tau,{\boldsymbol{t}})$.
If $G$ is an {\em inner} form of a compact group $G_0$, we can write $G=\kern 0.8pt_{t}G_0$ as in Section \ref{sec:inner}, and the element $t\in T^{\rm ad}({\mathbb{R}})_2$ defines a coloring ${\boldsymbol{t}}$ of $D$. In this case we set $\tau=1$, then again ${\boldsymbol{t}}$ is a coloring of $D^\tau$, and again we may assume that the coloring comes from a Kac diagram. Again we obtain a triple $(D,\tau,{\boldsymbol{t}})$.
In both cases we have the bijection \eqref{e:general-bijection} ${\rm Cl}(D,\tau,{\boldsymbol{t}})\isoto H^1({\mathbb{R}},G)$.
\subsection{Describing the connected components}
Let $H$ be a simply connected semisimple ${\mathbb{R}}$-subgroup of a simply connected absolutely simple ${\mathbb{R}}$-group $G$. We do not assume that $H$ is simple and that $H$ and $G$ are inner forms of compact groups.
Let $H=H_1\times\dots\times H_r$ be the decomposition of $H$ into the product of simple ${\mathbb{R}}$-groups. We may and shall assume that each $H_i$ is absolutely simple. Let $T_H$ be a fundamental torus of $H$, i.e., a maximal torus containing a maximal compact torus. Then $T_H=\prod_i T_{i}$ where each $T_{i}\subset H_i$ is a fundamental torus of $H_i$. We present $H_i$ as a twisted form of a compact group as in Subsection \ref{ss:triple} and obtain a triple $(D_i,\tau_i,{\boldsymbol{t}}_i)$, where $D_i$ is the Dynkin diagram of $H_i$, $\tau_i$ is an automorphism of $D_i$ with $\tau_i^2=1$, and ${\boldsymbol{t}}_i$ is a coloring of $D_i^{\tau_i}$. Then we have an isomorphism $L(D_i)^{\tau_i}\isoto T_i({\mathbb{R}})_2$. We set $D_H=\sqcup_i D_i$ (disjoint union), $\tau_H=\prod_i \tau_i\in{\rm Aut}(D)$ (direct product of automorphisms), $L(D_H)=\bigoplus_i L(D_i)$, then $L(D_H)^{\tau_H}=\bigoplus_i L(D_i)^{\tau_i}$, and we have an isomorphism $L(D_H)^{\tau_H}\isoto T_H({\mathbb{R}})_2$. We have a coloring ${\boldsymbol{t}}_H$ of $D_H^{\tau_H}$: a vertex $v\in D_i\subset D_H$ is black in $D_H$ if and only if it is black in $D_i$. We write also $L(D_H,\tau_H,{\boldsymbol{t}}_H)$ for $L(D_H)$. We define ${\rm Cl}(D_H,\tau_H,{\boldsymbol{t}}_H)$ to be $\prod_i{\rm Cl}(D_i,\tau_i,{\boldsymbol{t}}_i)$, then we have a bijection ${\rm Cl}(D_H,\tau_H,{\boldsymbol{t}}_H)\isoto H^1({\mathbb{R}},H)$. Using results of Sections \ref{sec:An}--\ref{sec:G2}, for each $i$ we find a set of representatives $\Xi_i\subset L(D_i,\tau_i,{\boldsymbol{t}}_i)^{\tau_i}$ of all equivalence classes in ${\rm Cl}(D_i,\tau_i,{\boldsymbol{t}}_i)$. We set $\Xi=\prod_i\Xi_i\subset L(D_H,\tau_H,{\boldsymbol{t}}_H)^{\tau_H}$, then $\Xi$ is a set of representatives of all equivalence classes in ${\rm Cl}(D_H,\tau_H,{\boldsymbol{t}}_H)$, i.e., the composite map $\Xi\hookrightarrow L(D_H,\tau_H,{\boldsymbol{t}}_H)^{\tau_H}\to {\rm Cl}(D_H,\tau_H,{\boldsymbol{t}}_H)$ is bijective.
Let $T_G$ be a fundamental torus of $G$. We may and shall assume that $T_H\subset T_G$. We present $G$ as a twisted form of a compact ${\mathbb{R}}$-group, then we have a triple $(D_G,\tau_G,{\boldsymbol{t}}_G)$. Using results of Sections \ref{sec:An}--\ref{sec:G2}, we compute {\em the class of zero} $[0]_G\subset L(D_G,\tau_G,{\boldsymbol{t}}_G)^{\tau_G}$.
The embedding $T_H({\mathbb{R}})_2\hookrightarrow T_G({\mathbb{R}})_2$ induces an injective homomorphism \[\iota\colon L(D_H)^{\tau_H}\to L(D_G)^{\tau_G},\] which can be computed explicitly. Let $\Xi_0$ denote the preimage in $\Xi$ of $[0]_G\subset L(D_G,\tau_G,{\boldsymbol{t}}_G)^{\tau_G}$ under the map $\Xi\hookrightarrow L(D_H,\tau_H,{\boldsymbol{t}}_H)^{\tau_H}\to L(D_G,\tau_G,{\boldsymbol{t}}_G)^{\tau_G}$, see the commutative diagram: \begin{equation*} \xymatrix{ \Xi\ar[r] &L(D_H,\tau_H,{\boldsymbol{t}}_H)^{\tau_H}\ar[r] \ar[d]^\iota &{\rm Cl}(D_H,\tau_H,{\boldsymbol{t}}_H)\ar[r]^-\sim\ar[d] &H^1({\mathbb{R}},H) \ar[d] \\
&L(D_G, \tau_G,{\boldsymbol{t}}_G)^{\tau_G}\ar[r] &{\rm Cl}(D_G,\tau_G,{\boldsymbol{t}}_G)\ar[r]^-\sim\ &H^1({\mathbb{R}},G) } \end{equation*} We see that $\Xi_0$ is in a bijection with $\ker\left[ H^1({\mathbb{R}},H)\to H^1({\mathbb{R}}, G)\right]$, and therefore, the cardinality of $\Xi_0$ answers Questions \ref{q:2} and \ref{q:1}.
\subsection{Generalities on reductive groups and Galois cohomology}
Let $G$ be a simply connected semisimple algebraic ${\mathbb{R}}$-group, $H\subset G$ be an ${\mathbb{R}}$-subgroup. The group $G({\mathbb{R}})$ of ${\mathbb{R}}$-points acts on the left on $(G/H)({\mathbb{R}})$.
\begin{lemma}\label{lem:orbits-components} Any orbit of $G({\mathbb{R}})$ in $(G/H)({\mathbb{R}})$ is a connected component of $(G/H)({\mathbb{R}})$. \end{lemma}
\begin{proof} Write $X=G/H$. Let $x\in X({\mathbb{R}})$, then we have a map $$ \phi_x\colon G({\mathbb{R}})\to X({\mathbb{R}}),\quad g\mapsto g\cdot x. $$ The differential of $\phi_x$ at any point $g\in G({\mathbb{R}})$ is surjective, hence by the implicit function theorem the map $\phi_x$ is open, hence the orbits of $G({\mathbb{R}})$ in $X({\mathbb{R}})$ are open, hence they are open and closed. Since $G$ is semisimple and simply connected, by \cite[Corollary 4.7]{Borel-Tits} or \cite[Proposition 7.6]{PR} the group $G({\mathbb{R}})$ is connected, hence the orbits of $G({\mathbb{R}})$ in $X({\mathbb{R}})$ are connected, hence they are the connected components of $X({\mathbb{R}})$. \end{proof}
\begin{lemma}\label{lem:pi1} Let $\varphi\colon S\to T$ be a homomorphism of $k$-tori over an algebraically closed field $k$ (of arbitrary characteristic), and let $\varphi_*\colon {{\sf X}}_*(S)\to {{\sf X}}_*(T)$ denote the induced homomorphism of the cocharacter groups. Then \begin{enumerate} \item[(i)] There is a canonical isomorphism ${\rm Hom}(\,({{\sf X}}^*(\ker\varphi))_{\rm tors},\,{\mathbb{Q}}/{\mathbb{Z}})\isoto({\rm coker\,}\varphi_*)_{\rm tors}$\,, where by $A_{\rm tors}$ we denote the torsion subgroup of an abelian group $A$. \item[(ii)]$\#\,({\rm coker\,}\varphi_*)_{\rm tors}=\#\, ({{\sf X}}^*(\ker\varphi))_{\rm tors}$. \end{enumerate} \end{lemma}
\begin{proof} Set $T_1={\rm im\,}\varphi$, then $T_1$ is a subtorus of $T$, and there exists a subtorus $T_2\subset T$ such that $T=T_1\times_k T_2$. Let $\varphi_1\colon S\to T_1$ be the canonical surjective homomorphism, then ${\rm coker\,}\varphi_*={\rm coker\,}\varphi_{1,*}\oplus{{\sf X}}_*(T_2)$, whence \[({\rm coker\,}\varphi_*)_{\rm tors}\cong({\rm coker\,}\varphi_{1,*})_{\rm tors}\,.\] Therefore, we may assume that $\varphi$ is surjective. Write $K=\ker\varphi$. From the short exact sequence $$ 1\to K\to S\labelto{\varphi} T\to 1 $$ we obtain a short exact sequence $$
0\to{{\sf X}}^*(T)\labelto{\varphi^*}{{\sf X}}^*(S)\to{{\sf X}}^*(K)\to 0, $$ whence, by taking ${\rm Hom}(\cdot , {\mathbb{Z}})$, we obtain an exact sequence for the functor ${\rm Ext}_{\mathbb{Z}}$ (see e.g. \cite[Theorem III.3.2]{ML}) $$ {\rm Hom}({{\sf X}}^*(S),{\mathbb{Z}})\labelto{\varphi_*} {\rm Hom}({{\sf X}}^*(T),{\mathbb{Z}})\to{\rm Ext}^1_{\mathbb{Z}}({{\sf X}}^*(K),{\mathbb{Z}})\to{\rm Ext}^1_{\mathbb{Z}}({{\sf X}}^*(S),{\mathbb{Z}})=0, $$ where the last equality follows from the fact that $X^*(S)$ is a free abelian group. We have ${\rm Hom}({{\sf X}}^*(S),{\mathbb{Z}})=X_*(S)$ and ${\rm Hom}({{\sf X}}^*(T),{\mathbb{Z}})=X_*(T)$. For a finitely generated abelian group $A$ we have ${\rm Ext}^1_{\mathbb{Z}}(A,{\mathbb{Z}})={\rm Hom}(A_{\rm tors},{\mathbb{Q}}/{\mathbb{Z}})$ \cite[Exercise 4 in Section III.9]{ML}, whence $$ ({\rm coker\,}\varphi_*)_{\rm tors}={\rm coker\,}\varphi_*={\rm Ext}^1_{\mathbb{Z}}(X^*(K),{\mathbb{Z}})={\rm Hom}({{\sf X}}^*(K)_{\rm tors},{\mathbb{Q}}/{\mathbb{Z}}), $$ which proves (i), and (ii) follows immediately. \end{proof}
\begin{corollary}\label{cor:pi1} Let $\varphi\colon H\to G$ be a homomorphism of reductive ${\mathbb{C}}$-groups with finite kernel. Let $T_H\subset H$ and $T\subset G$ be maximal tori such that $\varphi(T_H)\subset T$. Let $\varphi_*\colon {{\sf X}}_*(T_H)\to {{\sf X}}_*(T)$ denote the induced homomorphism of the cocharacter groups. Then $\#\ker[\varphi\colon H\to G]=\#\,({\rm coker\,}\varphi_*)_{\rm tors}$\,. \end{corollary}
\begin{proof} Write $K=\ker\varphi=\ker[T_H\to T]$, then
$$ \#\ker[\varphi\colon H\to G]=\#K=\#{{\sf X}}^*(K)=\#\,({\rm coker\,}\varphi_*)_{\rm tors}\,, $$ where the last equality follows from Lemma \ref{lem:pi1}(ii). \end{proof}
\begin{construction}\label{lem:sc-subgroup} Let $G$ be a simply connected absolutely simple ${\mathbb{R}}$-group. Let $T\subset G$ be a fundamental torus, $R=R(G_{\mathbb{C}},T_{\mathbb{C}})$ be the root system, $\Pi\subset R$ be a basis, $D=D(R,\Pi)$ be the Dynkin diagram. We may and shall assume that $G=\kern 0.8pt_{t\tau}G_0$, where $G_0$ is a compact group, $\tau\in({\rm Aut}\,D)_2$, and $t\in T^{\rm ad}({\mathbb{R}})_2$, see Section \ref{sec:outer}. Let $\Pi_H\subset \Pi$ be a $\tau$-invariant subset, and let $R_H\subset R$ denote the subset consisting of integer linear combinations of simple roots $\alpha\in\Pi_H$ (then $R_H$ is a root system with basis $\Pi_H$). Let $H_1$ denote the algebraic subgroup of $G_{\mathbb{C}}$ generated by $T_{\mathbb{C}}$ and the unipotent ``root'' subgroups $U_\beta$ for all roots $\beta\in R_H$. Let $H$ denote the derived subgroup of $H_1$. Then by \cite[Proposition 12.6]{MT} $H$ is a semisimple group with root system $R_H$. Since $G$ is simply connected, by \cite[Proposition 12.14]{MT} $H$ is simply connected as well. Since the complex conjugation $\rho$ acts on $R$ by $\rho(\beta)=-\tau(\beta)$ for $\beta\in R$, the subset $R_H$ of $R$ is $\rho$-invariant, and hence, the subgroups $H_1$ and $H$ are defined over ${\mathbb{R}}$. \end{construction}
\begin{lemma}\label{lem:odd} Let $$ 1\to A\to B\labelto{\psi} C\to 1 $$ be a short exact sequence of algebraic ${\mathbb{R}}$-groups, where $A$ is finite and central in $B$. If the order $\# A({\mathbb{C}})$ of $A({\mathbb{C}})$ is odd, then the induced map $$ \psi_*\colon H^1({\mathbb{R}},B)\to H^1({\mathbb{R}},C) $$ is bijective. \end{lemma}
\begin{proof} Since $A$ is central, we have a cohomology exact sequence $$ C({\mathbb{R}})\to H^1({\mathbb{R}},A)\to H^1({\mathbb{R}},B)\labelto{\psi_*} H^1({\mathbb{R}},C)\to H^2({\mathbb{R}},A); $$ see \cite[I.5.7, Proposition 43]{Serre}. Since $\#{\rm Gal}({\mathbb{C}}/{\mathbb{R}})=2$ and $\# A({\mathbb{C}})$ is odd, by \cite[Section 6, Corollary 1 of Proposition 8]{AW} we have $H^1({\mathbb{R}},A)=1$ and $H^2({\mathbb{R}},A)=1$. It follows that the map $\psi_*$ is surjective and that $\ker\psi_*=1$. We show that any fiber of $\psi_*$ contains only one element. Indeed, let $\beta\in H^1({\mathbb{R}},B)$ and let $b\in Z^1({\mathbb{R}},B)$ be a cocycle representing $\beta$. By \cite[I.5.5, Corollary 2 of Proposition 39]{Serre}, the fiber $\psi_*^{-1}(\psi_*(\beta))$ is in a bijection with the quotient of $H^1({\mathbb{R}}, A)$ by an action of the group $_b C({\mathbb{R}})$. Since $H^1({\mathbb{R}},A)=1$, our fiber $\psi_*^{-1}(\psi_*(\beta))$ indeed contains only one element. Thus $\psi_*$ is bijective. \end{proof}
In Subsections \ref{ex:E7}\,--\,\ref{ex-E8} we give examples of calculations of $\#\pi_0(\, (G/H)({\mathbb{R}})\,)$ using results of Sections \ref{sec:An}--\ref{sec:G2}.
\subsection{Example with ${\mathbf{E}}_7$} \label{ex:E7} Let $G=EV$, the split simply connected simple ${\mathbb{R}}$-group with compact maximal torus $T$, of type ${\mathbf{E}}_7^{(7)}$ with twisting diagram and augmented diagram \begin{equation*} \mxymatrix { \bc{1} \ar@{-}[r] & \bc{2} \ar@{-}[r] & \bc{3} \ar@{-}[r] & \bc{4} \ar@{-}[r] \ar@{-}[d] & \bc{5} \ar@{-}[r] & \bc{6} \\ & & & \bcbu{7} & & } \qquad\qquad \xymatrix @1@R=9pt@C=9pt { { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \ar@{-}[r] & { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \ar@{-}[r] & { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \ar@{-}[r] & { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \ar@{-}[r] \ar@{-}[d] & { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \ar@{-}[r] & { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \\ & & & { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \ar@{-}[d] & & \\ & & & *+[F]{\text{\small1}} & & } \end{equation*} see Subsection \ref{sec:E7(7)}. Let $\Pi_G=\{ \alpha_1, \dots, \alpha_7\}$ be the simple roots (numbered as on the twisting diagram above). We remove vertex $3$. Set $\Pi_H=\Pi_G\smallsetminus \{\alpha_3\}$, and let $H$ be the corresponding semisimple ${\mathbb{R}}$-subgroup, see Construction \ref{lem:sc-subgroup}, with maximal torus $T_H$ (contained in $T$) and with twisting diagram of type ${\mathbf{A}}_2^{(0)}\sqcup {\mathbf{A}}_4^{(1)}$ \begin{equation*} \xymatrix@1@R=9pt@C=9pt { { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \ar@{-}[r] & { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} &\ \
& { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \ar@{-}[r] \ar@{-}[d] & { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \ar@{-}[r] & { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \\ & & &{\lower0.20ex\hbox{\text{\Large$\bullet$}}} & & } \end{equation*} and augmented diagram: \begin{equation*} \xymatrix@1@R=9pt@C=9pt { { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \ar@{-}[r] & { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} &\ \ & { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \ar@{-}[r] \ar@{-}[d] & { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \ar@{-}[r] & { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \\ & & & { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \ar@{-}[d] & & \\ & & & *+[F]{\text{\small1}} & & } \end{equation*}
Then the semisimple group $H$ is simply connected, see Construction \ref{lem:sc-subgroup}.
We have $H=H_1\times H_2$, where $H_1$ is a compact groups of type ${\mathbf{A}}_2^{(0)}$ and $H_2$ is a twisted (noncompact) group of type ${\mathbf{A}}_4^{(1)}$. By Subsection \ref{sect:An}, for $H_1$ we have $\# {\rm Orb}({\mathbf{A}}_2^{(0)})=2$ with a set of representatives $$ \Xi_1=\ \{\ 0 \!-\! 0,\quad 1 \!-\! 0\ \}. $$ By Subsection \ref{subsec:An^m}, for $H_2$ we have $\# {\rm Orb}({\mathbf{A}}_4^{(1)})=3$ with a set of representatives $$ \Xi_2=\ \{\ {\sxymatrix{\boxone}} \!-\! 0 \!-\! 0 \!-\! 0 \!-\! 0,\quad {\sxymatrix{\boxone}}\!-\! 0 \!-\! 1 \!-\! 0 \!-\! 0, \quad {\sxymatrix{\boxone}}\!-\! 0 \!-\! 1 \!-\! 0 \!-\! 1\ \}. $$ We set $\Xi=\Xi_1\times\Xi_2\subset L({\mathbf{A}}_2^{(0)}\sqcup{\mathbf{A}}_4^{(1)})=L({\mathbf{A}}_2^{(0)})\times L({\mathbf{A}}_4^{(1)})$, hence $\#\Xi=2\cdot 3=6$. We write down $\Xi$: \begin{align*} & 0 \!-\! 0\qquad {\sxymatrix{\boxone}}\!-\! 0 \!-\! 0 \!-\! 0 \!-\! 0\\ & 0 \!-\! 0\qquad {\sxymatrix{\boxone}}\!-\! 0 \!-\! 1 \!-\! 0 \!-\! 0\\ & 0 \!-\! 0\qquad {\sxymatrix{\boxone}}\!-\! 0 \!-\! 1 \!-\! 0 \!-\! 1\\ & 1 \!-\! 0\qquad {\sxymatrix{\boxone}}\!-\! 0 \!-\! 0 \!-\! 0 \!-\! 0\\ & 1 \!-\! 0\qquad {\sxymatrix{\boxone}}\!-\! 0 \!-\! 1 \!-\! 0 \!-\! 0\\ & 1 \!-\! 0\qquad {\sxymatrix{\boxone}}\!-\! 0 \!-\! 1 \!-\! 0 \!-\! 1\\ \end{align*} We must compute the subset $\Xi_0$ of $\Xi$ consisting of the labelings whose images in $L({\mathbf{E}}_7^{(7)})$ are contained in the orbit of zero $[0]$. The homomorphism $L({\mathbf{A}}_2^{(0)}\sqcup{\mathbf{A}}_4^{(1)})\to L({\mathbf{E}}_7^{(7)})$ is induced by the embedding ${\mathbf{A}}_2^{(0)}\sqcup{\mathbf{A}}_4^{(1)}\hookrightarrow {\mathbf{E}}_7^{(7)}$. By Subsection \ref{sec:E7(7)} the labelings of ${\mathbf{E}}_7^{(7)}$ in the orbit of zero are those with 1 or 3 components (including the boxed 1). Thus $\Xi_0$ consists of the following labelings of ${\mathbf{A}}_2^{(0)}\times {\mathbf{A}}_4^{(1)}$: \begin{align*} & 0 \!-\! 0\qquad {\sxymatrix{\boxone}}\!-\! 0 \!-\! 0 \!-\! 0 \!-\! 0\\ & 0 \!-\! 0\qquad {\sxymatrix{\boxone}}\!-\! 0 \!-\! 1 \!-\! 0 \!-\! 1\\ & 1 \!-\! 0\qquad {\sxymatrix{\boxone}}\!-\! 0 \!-\! 1 \!-\! 0 \!-\! 0 \end{align*} We conclude that $$\# \pi_0(\,(G/H)({\mathbb{R}})\,)\ =\ \#\ker \left[ H^1({\mathbb{R}},H)\to H^1({\mathbb{R}},G) \right] =\#\Xi_0=3.$$
Similar calculations show that if we remove vertex 2 instead of vertex 3, then $\# \pi_0(\,(G/H)({\mathbb{R}})\,)=2$, and if we remove vertex 1 instead of vertex 3, then $\# \pi_0(\,(G/H)({\mathbb{R}})\,)=1$, i.e. $(G/H)({\mathbb{R}})$ will be connected.
\subsection{Examples with ${{\bf Spin}}^*(2n)$}
Let $G={{\bf Spin}}^*(2n)\ (n\ge 4)$, the simply connected ``quaternionic'' ${\mathbb{R}}$-group of type ${\mathbf{D}}_n^{(n)}$ with twisting diagram and augmented diagram \begin{equation*} \xymatrix@1@R=0pt@C=9pt { \bc{1} \ar@{-}[r] & \cdots \ar@{-}[r] & \bc{n-3} \ar@{-}[r] & \bc{n-2} \ar@{-}[d] \ar@{-}[r] & \bc{n-1} \\ & & & \bcbu{n} & } \qquad\qquad \sxymatrix{ { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \ar@{-}[r] & \cdots \ar@{-}[r] & { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \ar@{-}[r] & { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \ar@{-}[d] \ar@{-}[r] & { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \\ & & & { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \ar@{-}[d] & \\ & & & *+[F]{1} & } \end{equation*} see Subsection \ref{subsec:Dn^(n)}. Let $\Pi_G=\{ \alpha_1, \dots, \alpha_n\}$ be the simple roots (numbered as on the twisting diagram above). We remove vertex $n-1$. Set $\Pi_H=\Pi_G\smallsetminus \{\alpha_{n-1}\}$, and let $H$ be the corresponding semisimple ${\mathbb{R}}$-subgroup, see Construction \ref{lem:sc-subgroup}, with twisting diagram of type ${\mathbf{A}}_{n-1}^{(1)}$ \begin{equation*} \sxymatrix{ \bc{1} \ar@{-}[r] & \cdots \ar@{-}[r] & \bc{n-2} \ar@{-}[r] &\bcb{n}} \end{equation*} and augmented diagram \begin{equation*} \sxymatrix{ \bc{1} \ar@{-}[r] & \cdots \ar@{-}[r] & \bc{n-2} \ar@{-}[r] & \bc{n} \ar@{-}[r] & *+[F]{1} }\ . \end{equation*} Then the semisimple ${\mathbb{R}}$-subgroup $H$ is simply connected, see Construction \ref{lem:sc-subgroup}. By Subsection \ref{subsec:An^m} we can take for representatives of orbits in $L({\mathbf{A}}_{n-1}^{(1)})$ the set
\[ \Xi=\left\{\eta_i\ |\ 1\le i\le\left\lceil n/2\right\rceil\right\},\]
where $\eta_i$ denotes the labeling with $i$ components (including the boxed 1) maximally packed to the right. By Subsection \ref{sec:Dn(n)}, the orbit of zero in $L({\mathbf{D}}_n^{(n)})$ is the set of labelings with {\em odd} number of components (including the boxed 1). Thus
\[ \Xi_0=\left\{\eta_i\ |\ 1\le i\le\left\lceil n/2\right\rceil,\ i \text{ is odd}\right\}.\] We see that $\#\Xi_0$ is the number of odd numbers $i$ between 1 and $\lceil n/2\rceil$, i.e., $\#\Xi_0=\lceil n/4\rceil$.
We conclude that $$\# \pi_0(\,(G/H)({\mathbb{R}})\,)=\#\ker[H^1({\mathbb{R}},H)\to H^1({\mathbb{R}},G)]=\#\Xi_0= \left\lceil n/4\right\rceil.$$
Now, instead of removing vertex $n-1$, let us remove vertex $m$ with $1\le m\le n-2$: \begin{equation*} \xymatrix@1@R=0pt@C=9pt { \bc{1} \ar@{-}[r] & \cdots \ar@{-}[r] &\bc{m-1} & &\bc{m+1} \ar@{-}[r] &\cdots \ar@{-}[r] & \bc{n-3} \ar@{-}[r] & \bc{n-2} \ar@{-}[d] \ar@{-}[r] & \bc{n-1} \\ & & &&&&& \bcbu{n} & &} \end{equation*} We obtain a subgroup $H=H_1\times H_2$, where $H_1$ is of type ${\mathbf{A}}_{m-1}^{(0)}$ (where $m-1=0$ is possible) and $H_2$ is of type ${\mathbf{D}}_{n-m}^{(n-m)}$ (where $n-m=2$ is possible). On the left of the removed vertex we can take
\[\Xi_1=\{\xi_k\ |\ 0\le k\le \lceil (m-1)/2\rceil\} \] for representatives of orbits in $L({\mathbf{A}}_{m-1}^{(0)})$, see Subsection \ref{subsec:An}. On the right of the removed vertex we can take \[\Xi_2=\{\ell_1,\ell_2\} \] for representatives of orbits in $L({\mathbf{D}}_{n-m}^{(n-m)})$ (where the labeling $\ell_1$ has one component and $\ell_2$ has two components, including the boxed 1), see Subsection \ref{subsec:Dn^(n)}. By Subsection \ref{subsec:Dn^(n)} applied to $G$, the orbit of zero in $L({\mathbf{D}}_n^{(n)})$ is the set of labelings with {\em odd} number of components (including the boxed 1). Now with any $\xi_k\in\Xi_1$ we associate the pair $(\xi_k,\ell)\in \Xi_1\times\Xi_2$, where $\ell$ is either $\ell_1$ or $\ell_2$ such that the total number of components in $\xi_k$ and $\ell$ is odd. We obtain a bijection $\Xi_1\isoto\Xi_0$. Thus in this case \begin{align*} \# \pi_0(\,(G/H)({\mathbb{R}})\,)=\ &\#\ker[H^1({\mathbb{R}},H)\to H^1({\mathbb{R}},G)]\\ =\ &\#\Xi_0=\, \#\Xi_1=\lceil (m-1)/2\rceil+1=\lceil (m+1)/2\rceil. \end{align*} In particular, if $m=n-2$, we obtain $\# \pi_0(\,(G/H)({\mathbb{R}})\,)=\lceil (n-1)/2\rceil$.
\subsection{Example with ${{\bf Spin}}(2m+1,2n+1)$}
Let $G={{\bf Spin}}(2m+1,2n+1)\ (m\ge 2,\ n\ge 3)$, which is an outer form of a compact group. The Kac diagram of $G$ is \[ \sxymatrix{\bc{0} &\bc{1}\ar@{=>}[l] \ar@{-}[r] & \cdots & \ar@{-}[l] \bcb{m} \ar@{-}[r] &\cdots & \ar@{-}[l] \bc{\ell-1} \ar@{=>}[r] & \bc{\ell} }\, , \] see Subsection \ref{ssec:Dn-outer} where $\ell=m+n$, see \cite[Table 7]{OV}. We write $G=\kern 0.8pt_{t\tau}G_0$ as in Construction \ref{lem:sc-subgroup}. We remove the $\tau$-stable vertex $m+n-k$ $(2\le k<n)$ of the Dynkin diagram and denote the obtained semisimple ${\mathbb{C}}$-subgroup by $H$, then by Construction \ref{lem:sc-subgroup} the subgroup $H$ is simply connected and defined over ${\mathbb{R}}$, and we have $H={\bf SU}(m,n-k)\times{{\bf Spin}}(2k+2)$. We are interested in $\pi_0(\, (G/H)({\mathbb{R}})\,)$. By Theorem \ref{cor:Theorem-3-Bo} applied to $G$ and $H$ we have a bijection $\pi_0(\, (G'/H')({\mathbb{R}})\,)\isoto\pi_0(\, (G/H)({\mathbb{R}})\,)$, where $G'={\bf SU}(m,n)$ of type ${\mathbf{A}}_{m+n-1}^{(m)}$ and $H'=H_1\times H_2$
with $H_1={\bf SU}(m,n-k)$ of type ${\mathbf{A}}_{m+n-k-1}^{(m)}$ and $H_2={\bf SU}(k)$ of type ${\mathbf{A}}_{k-1}^{(0)}$. Although probably one can compute $\#\pi_0(\, (G'/H')({\mathbb{R}})\,)$ using real algebraic geometry, we compute this number using Galois cohomology. Namely, for $H_1$ of type ${\mathbf{A}}_{m+n-k-1}^{(m)}$ we can take
\[ \Xi_1=\{(p|0)\ |\ 0\le p\le \lceil (m-1)/2\rceil\}\ \cup\
\{(0|q)\ |\ 1\le q\le \lceil (n-k-1)/2\rceil\} \] for representatives of orbits in $L({\mathbf{A}}_{m+n-k-1}^{(m)})$, see Subsection \ref{subsec:An^m}. For $H_2$ of type ${\mathbf{A}}_{k-1}^{(0)}$ we can take
\[\Xi_2=\{\xi_i\ |\ 0\le i\le \lceil (k-1)/2\rceil\}\] for representatives of orbits in $L({\mathbf{A}}_{k-1}^{(0)})$, see Subsection \ref{subsec:An}. For $G'$, the orbit of zero in $L({\mathbf{A}}_{m+n-1}^{(m)})$ is the set of labelings with the same number of components on the left and on the right of $m$, see Subsection \ref{subsec:An^m}. Thus
\[ \Xi_0=\{(\kern 0.8pt(p|0),\xi_p\kern 0.8pt)\in\Xi_1\times\Xi_2\}.\] Here $0\le p\le \lceil (m-1)/2\rceil$, $0\le p\le \lceil (k-1)/2\rceil$, hence \[\#\Xi_0=1+\min(\lceil (m-1)/2\rceil,\lceil (k-1)/2\rceil)=\min(\lceil (m+1)/2\rceil,\lceil (k+1)/2\rceil).\] We conclude that \[\#\pi_0(\, (G/H)({\mathbb{R}})\,)=\#\pi_0(\, (G'/H')({\mathbb{R}})\,)=\#\Xi_0= \min(\lceil (m+1)/2\rceil,\lceil (k+1)/2\rceil).\]
\subsection{Example with ${\mathbf{E}}_8$} \label{ex-E8} Let $G=EVIII$, the split form ${\mathbf{E}}_8^{(7)}$ of ${\mathbf{E}}_8$ with compact maximal torus $T$, with Kac diagram and augmented diagram \begin{equation*} \xymatrix@1@R=0pt@C=9pt {\bc{0} \ar@{-}[r] &\bc{1} \ar@{-}[r] & \bc{2} \ar@{-}[r] & \bc{3} \ar@{-}[r] & \bc{4} \ar@{-}[r] & \bc{5} \ar@{-}[r] \ar@{-}[d] & \bc{6} \ar@{-}[r] & \bcb{7} \\ & & & & & \bcu{8} & & } \qquad\qquad \xymatrix@1@R=9pt@C=9pt { { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \ar@{-}[r] & { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \ar@{-}[r] & { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \ar@{-}[r] & { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \ar@{-}[r] & { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \ar@{-}[r] \ar@{-}[d] & { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \ar@{-}[r] & { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \ar@{-}[r] & *+[F]{1} \\ & & & & { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} & & & } \end{equation*} see Subsection \ref{subsec:EE8(7)}. In this example, in contrast to the two previous examples, we construct an ${\mathbb{R}}$-subgroup $H$ of $G$ of the same rank, and not of smaller rank. We remove vertex 4 from the Kac diagram (the extended Dynkin diagram), and we do not erase vertex 0. This means that we consider the semisimple ${\mathbb{C}}$-subgroup $H$ of $G$, generated by $T_{\mathbb{C}}$ and the unipotent ``root" subgroups $U_\beta$ with $\beta\in R_H$, where $R_H$ is the set of $\beta\in R$ that are integer linear combinations of the roots $\alpha_i$, $0\le i\le 8,\ i\neq 4$, where $\alpha_1,\dots,\alpha_8$ are the simple roots and $\alpha_0$ is the lowest root. Since $-R_H=R_H$, the ${\mathbb{C}}$-subgroup $H$ is defined over ${\mathbb{R}}$. We obtain a maximal connected algebraic subgroup $H$ of $G$ \cite[Table 5]{OV2} with twisting diagram \begin{equation*} \xymatrix@1@R=0pt@C=9pt {\bc{0} \ar@{-}[r] &\bc{1} \ar@{-}[r] & \bc{2} \ar@{-}[r] & \bc{3} & & \bc{5} \ar@{-}[r] \ar@{-}[d] & \bc{6} \ar@{-}[r] & \bcb{7} \\ & & & & & \bcu{8} & & } \end{equation*} and augmented diagram \begin{equation} \label{eq:Htil} \xymatrix@1@R=9pt@C=9pt { { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \ar@{-}[r] & { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \ar@{-}[r] & { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \ar@{-}[r] & { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} & & { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \ar@{-}[r] \ar@{-}[d] & { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \ar@{-}[r] & { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} \ar@{-}[r] & *+[F]{1} \\ & & & & & { \lower0.20ex\hbox{{\text{\Large$\circ$}}}} & & & } \end{equation}
We compute the fundamental group $\pi_1(H_{\mathbb{C}})$ of the semisimple group $H$. Let ${\widetilde{H}}$ denote the universal covering of $H$. Consider the composite morphism $$ \varphi\colon {\widetilde{H}}\to H\to G, $$ and let ${\widetilde{T}}_H$ denote the maximal torus of ${\widetilde{H}}$ such that $\varphi({\widetilde{T}}_H)=T$. We denote by $\varphi_*\colon{{\sf X}}_*({\widetilde{T}}_H)\to{{\sf X}}_*(T)$ the induced homomorphism of the cocharacter groups. The cocharacter group ${{\sf X}}_*({\widetilde{T}}_H)$ has a basis \begin{equation}\label{eq:basis} \alpha_0^\vee,\ \alpha_1^\vee,\ \alpha_2^\vee,\ \alpha_3^\vee,\ \widehat{\alpha_4^\vee}, \ \alpha_5^\vee,\ \alpha_6^\vee,\ \alpha_7^\vee,\ \alpha_8^\vee, \end{equation} where $\widehat{\alpha_4^\vee}$ means that $\alpha_4^\vee$ is removed from the list. The cocharacter group ${{\sf X}}_*(T)$ has a basis $\alpha_1^\vee, \dots, \alpha_8^\vee$, while the subgroup ${\rm im\,}\varphi_*\subset{{\sf X}}_*(T)$ is generated by the cocharacters \eqref{eq:basis}. There is a linear relation between $\alpha^\vee_0,\ \alpha^\vee_1,\dots,\alpha^\vee_8$, in which the removed simple coroot $\alpha^\vee_4$ appears with coefficient 5, while $\alpha^\vee_0$ appears with coefficient 1; see \cite[Table 6]{OV} or \cite[Planche VII, (IV)]{Bourbaki}. We see that ${\rm im\,}\varphi_*\subset {{\sf X}}_*(T)$ contains $\alpha_i^\vee$ for $i\neq 4$, and it contains $5\alpha_4^\vee$, but not $\alpha_4^\vee$. Thus ${\rm im\,}\varphi_*$ is a subgroup of index 5 in ${{\sf X}}_*(T)$. By Corollary \ref{cor:pi1} the kernel of the canonical epimorphism ${\widetilde{H}}\to H$ is of order 5, hence $\pi_1(H_{\mathbb{C}})$ is of order 5.
Since the order 5 of $\ker\varphi$ is odd, by Lemma \ref{lem:odd} the induced map $H^1({\mathbb{R}},{\widetilde{H}})\to H^1({\mathbb{R}},H)$ is bijective, whence $$ \#\ker[H^1({\mathbb{R}},H)\to H^1({\mathbb{R}},G)]\ =\ \#\ker[H^1({\mathbb{R}},{\widetilde{H}})\to H^1({\mathbb{R}},G)]. $$ We compute $\#\ker[H^1({\mathbb{R}},{\widetilde{H}})\to H^1({\mathbb{R}},G)]$. We have ${\widetilde{H}}={\widetilde{H}}_1\times {\widetilde{H}}_2$, where ${\widetilde{H}}_1$ is compact of type ${\mathbf{A}}_4^{(0)}$ and ${\widetilde{H}}_2$ is of type ${\mathbf{A}}_4^{(4)}$. By Subsection \ref{sect:An} we can take $$ \Xi_1=\ \{\ 0 \!-\! 0 \!-\! 0 \!-\! 0, \quad 1 \!-\! 0 \!-\! 0 \!-\! 0, \quad 1 \!-\! 0 \!-\! 1 \!-\! 0\ \} $$ as a set of representatives of orbits in $L({\mathbf{A}}_4^{(0)})$. By Subsection \ref{subsec:An^m} we can take $$ \Xi_2=\ \{\ 0 \!-\! 0 \!-\! 0 \!-\! 0 \!-\! {\sxymatrix{\boxone}}\,, \quad 0 \!-\! 0 \!-\! 1 \!-\! 0 \!-\! {\sxymatrix{\boxone}}\,, \quad 1 \!-\! 0 \!-\! 1 \!-\! 0 \!-\! {\sxymatrix{\boxone}}\ \} $$ as a set of representatives of orbits in $L({\mathbf{A}}_4^{(4)})$. Set $\Xi=\Xi_1\times\Xi_2$. We denote $\Xi_0$ the preimage in $\Xi$ of the orbit of zero in $L({\mathbf{E}}_8^{(7)})$.
By Subsection \ref{subsec:EE8(7)} the orbit of zero $[0]\subset L({\mathbf{E}}_8^{(7)})$ consists of the labelings with odd number of components (including the boxed 1), excluding the fixed labeling $\ell'_2$. The subset of $\Xi$ consisting of labelings with odd number of components has the following 5 labelings: \begin{align*} & 0 \!-\! 0 \!-\! 0 \!-\! 0 \qquad 0 \!-\! 0 \!-\! 0 \!-\! 0 \!-\! {\sxymatrix{\boxone}}\\ & 0 \!-\! 0 \!-\! 0 \!-\! 0 \qquad 1 \!-\! 0 \!-\! 1 \!-\! 0 \!-\! {\sxymatrix{\boxone}}\\ & 0 \!-\! 0 \!-\! 0 \!-\! 1 \qquad 0 \!-\! 0 \!-\! 1 \!-\! 0 \!-\! {\sxymatrix{\boxone}}\\ & 0 \!-\! 1 \!-\! 0 \!-\! 1 \qquad 0 \!-\! 0 \!-\! 0 \!-\! 0 \!-\! {\sxymatrix{\boxone}}\\ & 0 \!-\! 1 \!-\! 0 \!-\! 1 \qquad 1 \!-\! 0 \!-\! 1 \!-\! 0 \!-\! {\sxymatrix{\boxone}} \end{align*} and one of them ( $0 \!-\! 0 \!-\! 0 \!-\! 0 \quad 1 \!-\! 0 \!-\! 1 \!-\! 0 \!-\! {\sxymatrix{\boxone}}$ ) is the preimage of $\ell'_2$. We see that $\Xi_0 =\ 4$. We conclude that \begin{align*} \# \pi_0(\,(G/H)({\mathbb{R}})\,)\ =\ &\#\ker[H^1({\mathbb{R}},H)\to H^1({\mathbb{R}},G)]\\ =\ &\#\ker[H^1({\mathbb{R}},{\widetilde{H}})\to H^1({\mathbb{R}},G)]=\#\Xi_0=4. \end{align*}
\noindent{\sc Acknowledgements.} The authors are very grateful to Dmitry A.~Timashev for his help in proving Lemma \ref{prop:twisted} and also for reading Subsection \ref{ss:Kac-outer} and correcting an inaccuracy. We thank the anonymous referee for careful reading the paper and for his/her comments, which helped to improve the exposition. We note that Erwann Rozier computed in 2009 the cardinalities $\#{\rm Orb}(\kern 0.8pt_{\boldsymbol{t}} D)$ for some colored graphs $\kern 0.8pt_{\boldsymbol{t}} D$ (in particular, for all Dynkin diagrams and twisting diagrams) under the guidance of the first-named author.
\end{document} |
\begin{document}
\title{On certain definite integrals and infinite series} \author{Ernst Eduard Kummer, Dr. of Mathematics} \date{} \maketitle \begin{abstract} We provide a translation of E. E. Kummer's paper "De integralibus quibusdam definitis and seriebus infinitis" \cite{1} \footnote{This paper was translated from Kummer's Latin original "De integralibus quibusdam definitis and seriebus infinitis" by Alexander Aycock}. \end{abstract}
The definite integrals, that I undertake to treat now, are very closely connected to the infinite series, that I treated in a commentary of this journal on the hypergeometric series, volume $XV$ page 138 \cite{2} and the following, which, so that they can be represented in a simpler way, I will denote by these functions:\\
\begin{alignat*}{9}
&1. && 1 && + \frac{\alpha \cdot x}{\beta \cdot 1} && + \frac{\alpha(\alpha+1) \cdot x^2}{\beta(\beta+1) \cdot 1 \cdot 2} && +\frac{\alpha(\alpha+1)(\alpha+2) \cdot x^3}{\beta(\beta+1)(\beta+2) \cdot 1 \cdot 2 \cdot 3} && +\cdots \cdot && =\varphi{(\alpha, \beta, x)}, && && \\
&2. && 1 && + \frac{x}{\alpha \cdot 1} && + \frac{x^2}{\alpha(\alpha +1) \cdot 1 \cdot 2} && + \frac{x^3}{\alpha(\alpha +1)(\alpha +2) \cdot 1 \cdot 2 \cdot 3} && + \cdots \cdot && =\psi{(\alpha,x)}, && && \\
&3.~~ && 1 && - \frac{\alpha \cdot \beta}{1 \cdot x} && + \frac{\alpha(\alpha+1) \beta(\beta+1)}{1 \cdot 2 \cdot x^2} && - \frac{\alpha(\alpha+1)(\alpha+2) \beta(\beta+1)(\beta+2)}{1 \cdot 2 \cdot 3 \cdot x^3}&& +\cdots \cdot && = \chi{(\alpha,\beta,x)}. && && \\ \end{alignat*} From this the transformations, found at the cited place, of the series can also be exhibited in this way:\\
\begin{alignat*}{9}
&4.~~~~~~ && \varphi{(\alpha, \beta, x)} && = e^{x} \cdot \varphi{(\beta-\alpha, \beta, -x)},&& && && && && \\
&5. && \psi{(\alpha,x)} && = e^{\pm2\sqrt{x}}\varphi{(\alpha-\frac{1}{2},2\alpha-1, \pm4\sqrt{x})}, && && && && \\ \end{alignat*} which is the same formula as \[ ~~~~~~~~~~~~~~~~6.~~~ \varphi{(\alpha,2\alpha,x)}= e^{\frac{x}{2}}\psi({\alpha+\frac{1}{2},\frac{x^2}{16}}) \] and \[ 7. ~~~~ \chi{(\alpha, \beta, x)}= \frac{x^{\alpha} \Pi(\beta-\alpha-1)}{\Pi(\beta-1)}\varphi{(\alpha, \alpha-\beta+1,x)}+\frac{x^{\beta} \Pi(\alpha-\beta-1)}{\Pi(\alpha-1)}\varphi{(\beta, \beta-\alpha+1,x)} \] After having prepared these things, I want to settle the question about this integral at first \[ ~~~~~~~~~~~~~~~~8. ~~~ y= \int_0^{\infty} u^{\alpha-1} \cdot e^{-u} \cdot e^{-\frac{u}{x}} \diff{d}u, \]
from where it follows \[ \frac{\diff{d}y}{\diff{d}x}= -\int_0^{\infty} u^{\alpha-2} \cdot e^{-u} \cdot e^{-\frac{u}{x}} \diff{d}u, ~~\frac{\diff{d}^2y}{\diff{d}x^2}=\int_0^{\infty} u^{\alpha-3} \cdot e^{-u} \cdot e^{-\frac{u}{x}} \diff{d}u, \] it is by differentiating of the quantity $u^{\alpha-1} \cdot e^{-u} \cdot e^{-\frac{u}{x}}$: \[ ~~~~~~~~~~~~~~~~~~~~~~~~~\diff{d}(u^{\alpha-1} \cdot e^{-u} \cdot e^{-\frac{u}{x}}) \] \[ = - u^{\alpha-1} \cdot e^{-u} \cdot e^{-\frac{u}{x}} \diff{d}u+(\alpha-1) u^{\alpha-2} \cdot e^{-u} \cdot e^{-\frac{u}{x}} \diff{d}u+x \cdot u^{\alpha-3} \cdot e^{-u} \cdot e^{-\frac{u}{x}} \diff{d}u, \] and by integration between the boundaries $0$ and $\infty$:
\[ 0= -\int_0^{\infty} u^{\alpha-1} \cdot e^{-u} \cdot e^{-\frac{u}{x}} \diff{d}u+(\alpha-1)\int_0^{\infty} u^{\alpha-2} \cdot e^{-u} \cdot e^{-\frac{u}{x}} \diff{d}u \] \[ ~~~~~~~~~~~~~~~~~~~~~~~~~+x \int_0^{\infty} u^{\alpha-3} \cdot e^{-u} \cdot e^{-\frac{u}{x}} \diff{d}u, \] or, what is the same, \[ ~~~~~~~~~~~~~~~~~~~9. ~~~ 0= y+(\alpha-1)\frac{\diff{d}y}{\diff{d}x}-x\frac{\diff{d}^2y}{\diff{d}x^2}, \] The complete integral of this equation is easily found by means of series, that we denoted by the function $\psi$, \[ ~~~~~~~~~~~~~~~~10. ~~~~ y= A \cdot \psi(1-\alpha, x)+B \cdot x^{\alpha} \psi(1+\alpha,x), \] where $A$ and $B$ are arbitrary constants. From there this expression for the presented integral follows \[ ~~~~~\int_0^{\infty} u^{\alpha-1} \cdot e^{-u} \cdot e^{-\frac{u}{x}} \diff{d}u = A \cdot \psi(1-\alpha, x)+B \cdot x^{\alpha} \psi(1+\alpha,x). \] The determination of the constant $A$ is easy; for, if we suppose the quantity $\alpha$ to be positive, and put $x=0$, we have \[ ~~~~~~~~~~~~~~~~~~~~~~~\int_0^{\infty} u^{\alpha-1} \cdot e^{-u} \diff{d}u=A \] or \[ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~A=\Pi(\alpha-1). \] To determine the constant $B$ in the same way, the integral $y$ has to be transformed by the substitution $u=\frac{x}{v}$, whence it is \[ ~~~~~~~~\int_0^{\infty} u^{\alpha-1} \cdot e^{-u} \cdot e^{-\frac{u}{x}} \diff{d}u =x^{\alpha}\int_0^{\infty} v^{-\alpha-1} \cdot e^{-v} \cdot e^{-\frac{v}{x}} \diff{d}v, \] after having used this integral transformation, equation (11.) is converted into this one: \[ ~~~\int_0^{\infty} v^{-\alpha-1} \cdot e^{-v} \cdot e^{-\frac{v}{x}} \diff{d}v= A \cdot x^{-\alpha} \psi(1-\alpha,x)+B \cdot \psi(1+\alpha,x), \] hence, if we suppose the quantity $\alpha$ to be negative and put $x=0$, we have \[ ~~~~~~~~~~~~~~~~~~~~~~~\int_0^{\infty} v^{-\alpha-1} \cdot e^{-v} \diff{d}v=B \] or \[ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~A=\Pi(-\alpha-1), \] after having finally substituted which values of the constants, it is: \[ ~~~~~~~~12. ~~~ \Pi(\alpha-1) \psi(1-\alpha, x)+\Pi(-\alpha-1) x^{\alpha} \psi(1+\alpha,x). \] From this determination of the constants certain doubts are to be removed, which can arise from the fact, that the one constant was found, after having put $\alpha>0$, the determination of the other constant on the other hand requires the opposite assumption. But it is nevertheless clear, that these conditions would have been superfluous, if, while determining the constants, we had not used the value $x=0$, but any other positive values, and the values of the constants would not have been other ones. Moreover it is to be beared in mind, that formula (12.) is only valid, if $x$ is a positive quantity, otherwise the integral would become infinite; but if $x$ is positive, this integral has a finite value, whatever the quantity $\alpha$ is, positive or negative.\\
From this formula (12.) one can deduce another integral, which is expressed by two series of the kind $\varphi(\alpha, \beta, x)$. By putting $xv$ in the place of $x$, by multiplying by $e^{-v} \cdot v^{\beta-1} \cdot \diff{d}v$ and integrating between the boundaries $0$ and $\infty$, it is \[ \int_0^{\infty} \int_0^{\infty} u^{\alpha-1} \cdot e^{-u} \cdot v^{\beta-1} \cdot e^{-v} \cdot e^{-\frac{xv}{u}} \diff{d}u \diff{d}v= \Pi(\alpha-1) \int_0^{\infty} v^{\beta-1} \cdot e^{-v} \cdot \varphi(1-\alpha,xv) \diff{d}v \] \[ ~~~~~~~~~~~~~~~+\Pi(-\alpha-1)x^{\alpha}\int_0^{\infty} v^{\alpha+\beta-1} \cdot e^{-v} \cdot \varphi(1+\alpha,xv) \diff{d}v, \] the integrations with respect to the variable $v$ are easily executed; or it is \begin{alignat*}{9}
&&& \int_0^{\infty} v^{\beta-1} e^{-v} \psi(1-\alpha,xv)\diff{d}v && = \Pi(\beta-1)\varphi(\beta,1-\alpha,x),&& && && && && \\
& && \int_0^{\infty} v^{\alpha+\beta-1} e^{-v} \psi(1+\alpha,xv)\diff{d}v && = \Pi(\alpha+\beta-1)\varphi{(\alpha+\beta,1+\alpha, x)}, && && && && \\
& && \int_0^{\infty} v^{\beta-1} e^{-v} \cdot e^{-\frac{xv}{u}}\diff{d}v && = \frac{\Pi(\beta-1)}{(1+\frac{x}{u})^{\beta}}, && && && && \\ \end{alignat*} whence \[ \int_0^{\infty} \int_0^{\infty} u^{\alpha-1} \cdot e^{-u} \cdot v^{\beta-1} \cdot e^{-v} \cdot e^{-\frac{xv}{u}} \diff{d}u \diff{d}v= \Pi(\beta-1)\int_0^{\infty} \frac{u^{\alpha-1} \cdot e^{-u} \diff{d}u}{(1+\frac{x}{u})^{\beta}}, \] which integral, by putting $ux$ in the place of $u$, is changed into \[ ~~~~~~~~~~~~~~~~~~~~~~~~\Pi(\beta-1)x^{\alpha} \int_0^{\infty} \frac{u^{\alpha+\beta-1}\cdot e^{-ux} \diff{d}u}{(1+u)^{\beta}} \] after having substituted which values, we finally have \[ ~~~~~~~~~~~~~~~~~~~~~~~~\Pi(\beta-1)x^{\alpha} \int_0^{\infty}\frac{u^{\alpha+\beta-1} \cdot e^{-ux} \diff{d}u}{(1+u)^{\beta}} \] \[ =\Pi(\alpha-1)\Pi(\beta-1)\varphi(\beta,1-\alpha,x)+\Pi(-\alpha-1)\Pi(\alpha+\beta-1)x^{\alpha} \varphi(\alpha+\beta, 1+\alpha,x), \] which formula, by changing $\alpha$ into $\alpha-\beta$, obtains this more convenient form \[ ~~~~~~~~~~~~~~~~~~13. ~~~~ \Pi(\beta-1)x^{\alpha} \int_0^{\infty} \frac{u^{\alpha+\beta-1}\cdot e^{-ux} \diff{d}u}{(1+u)^{\beta}} \] \[ \frac{\Pi(\alpha-\beta-1)}{\Pi(\alpha-1)}x^{\beta} \cdot \varphi(\beta, \beta-\alpha+1,x)+\frac{\Pi(\beta-\alpha-1)}{\Pi(\beta-1)}x^{\alpha} \cdot \varphi(\alpha, \alpha-\beta+1,x). \] Because the one part of this equation, after having interchanged the quantities $\alpha$ and $\beta$, remains the same, it has to be \[ 14. ~~~~\frac{x^{\alpha}}{\Pi(\alpha-1)} \int_0^{\infty} \frac{u^{\alpha-1}\cdot e^{-ux}\cdot \diff{d}u}{(1+u)^{\beta}}=\frac{x^{\beta}}{\Pi(\beta-1)} \int_0^{\infty} \frac{u^{\beta-1}\cdot e^{-ux} \cdot \diff{d}u}{(1+u)^{\alpha}}. \] If the transformation, that equation (7.) contains, is applied to formula (13.), it is \[ ~~~~~~~~~~~~~~~15. ~~~\frac{x^{\alpha}}{\Pi(\alpha-1)} \int_0^{\infty} \frac{u^{\alpha-1}\cdot e^{-ux}\cdot \diff{d}u}{(1+u)^{\beta}}=\chi(\alpha, \beta, x). \] Because the series $\chi(\alpha, \beta, x)$ belongs to the class of semiconvergent series, it seems to be neccessary, that formula (15.) is reeinforced by a proof, from which it becomes clear at the same time, that by computation of a certain number of the first terms of this series, the proximate value of this integral is found. For this purpose I use the known equation \[ 1-\frac{\beta}{1}z+\frac{\beta(\beta+1)}{1 \cdot 2}z^2-\cdots \cdot (-1)^{k-1}\frac{\beta(\beta+1)\cdots \cdot (\beta+k-2)}{1 \cdot 2 \cdots \cdot (k-1)}z^{k-1} \] \[ =\frac{1}{(1+z)^{\beta}}-\frac{(-1)^{k}\beta(\beta+1)\cdots \cdot (\beta+k-1)}{1 \cdot 2 \cdots \cdot (k-1)}z^{k} \int_0^1 \frac{(1-u)^k \diff{d}u}{(1+zu)^{\beta+k}}, \] it is by putting $z=\frac{v}{x}$, by multiplaying by $v^{\alpha-1} \cdot e^{-v} \cdot \diff{d}v$, then by integrating from $v=0$ to $v=\infty$ and dividing by $\Pi(\alpha-1)$ \[ 16. ~~~ 1-\dfrac{\alpha \cdot \beta}{1 \cdot x}+\dfrac{\alpha(\alpha+1)\beta(\beta+1)}{1 \cdot 2 \cdot x^2}- \cdots \cdot (-1)^{k-1}\dfrac{\alpha(\alpha+1) \cdots \cdot (\alpha+k-2)\beta(\beta+1) \cdots \cdot (\beta+k-2)}{1 \cdot 2 \cdot 3 \cdots \cdot (k-1) \cdot x^{k-1}} \] \[ =\frac{1}{\Pi(\alpha-1)}\int_0^{\infty} \frac{v^{\alpha-1} \cdot e^{-v} \cdot \diff{d}v}{(1+\frac{v}{x})^{\beta}}-\tfrac{(-1)^k\beta(\beta+1) \cdots \cdot (\beta+k-1)}{\Pi(\alpha-1)1 \cdot 2 \cdot 3 \cdots \cdot (k-1) \cdot x^{k}} \int_0^1 \int_0^{\infty} \frac{(1-u)^{k-1} \cdot v^{\alpha+k-1} \cdot e^{-v} \cdot \diff{d}v \cdot \diff{d}u}{(1+\frac{uv}{x})^{\beta+k}}, \] this double integral along with its coefficients indicates the error, that is committed, if the integral \[ \frac{1}{\Pi(\alpha-1)}\int_0^{\infty} \frac{v^{\alpha-1} \cdot e^{-v} \cdot \diff{d}v}{(1+\frac{v}{x})^{\beta}}, ~~~~ \text{or, what is the same} ~~~~ \frac{x^{\alpha}}{\Pi(\alpha-1)}\int_0^{\infty} \frac{v^{\alpha-1} \cdot e^{-vx} \cdot \diff{d}v}{(1+v)^{\beta}} \] is computed by the first terms of that series, whose number is $k$, if $k$ is so large, that $\beta+k$ is positive, that quantity, we called the error, changes the sign at the same time as $k$ is converted into $k+1$, or, if a certain number of terms of that series is computed, this sum is either larger or smaller than the desired integral, but if the subsequent term of the series is added, this new sum is smaller than the desired integral, if the sum was larger, if that sum was smaller. Therefore the sums, which that series gives, are alternately too large and too small, and it becomes clear, that the proximate value is found, if the computation is extended to the smallest terms of the semiconvergent series. The same thing can be demonstrated from equation (16.) in this way.\\ Of course it is for positive $\beta+k$: \[ \int_0^1 \int_0^{\infty} \frac{(1-u)^{k-1} \cdot v^{\alpha+k-1} \cdot e^{-v} \cdot \diff{d}v \diff{d}u}{(1+\frac{uv}{x})^{\beta+k}}<\int_0^1 \int_0^{\infty} (1-u)^{k-1} \cdot v^{\alpha+k-1} \cdot e^{-v} \cdot \diff{d}v \diff{d}u \] and \[ ~~~~~~\int_0^1 \int_0^{\infty}(1-u)^{k-1} \cdot v^{\alpha+k-1} \cdot e^{-v} \cdot \diff{d}v \diff{d}u= \frac{\Pi(\alpha+k-1)}{k}, \]
so the error, which is expressed by that double integral, is always smaller than \[ ~~~~~~~~~~~~~~~~~~~~\frac{\beta(\beta+1) \cdots \cdot (\beta+k-1)\Pi(\alpha+k-1)}{1 \cdot 2 \cdot 3 \cdots \cdot (k-1) \cdot \Pi(\alpha-1) x^{k}}, \] because which is the first term neglected, it follows, that the error is always smaller than that term of the series, to which the summation is extended.\\[2mm]
After having put $\beta=1- \alpha$, equation (15.) is converted into this one: \[ ~~~~~~~~~~~~~~~~~~~~~~~~~\frac{x^{\alpha}}{\Pi(\alpha-1)} \int_0^{\infty} (u+u^2)^{\alpha-1} \cdot e^{-ux} \cdot \diff{d}u \] \[ ~~~~~~~=\frac{\Pi(2\alpha-2)}{\Pi(\alpha-1)}x^{1-\alpha} \cdot e^{\frac{x}{2}}\cdot \varphi(1-\alpha,2-2\alpha,x)+ \frac{\Pi(-2\alpha)}{\Pi(-\alpha)}x^{\alpha} \cdot \varphi(\alpha, 2\alpha,x), \] after having transformed which series by means of formula (6.), it is \[ ~~~~~~~~~~~~~~~~~~~~~~~~~\frac{x^{\alpha}}{\Pi(\alpha-1)} \int_0^{\infty} (u+u^2)^{\alpha-1} \cdot e^{-ux} \cdot \diff{d}u \] \[ ~~~~~~~=\frac{\Pi(2\alpha-2)}{\Pi(\alpha-1)}x^{1-\alpha} \cdot e^{\frac{x}{2}}\cdot \psi(\frac{3}{2}-\alpha,\frac{x^2}{16})+ \frac{\Pi(-2\alpha)}{\Pi(-\alpha)}x^{\alpha} \cdot \psi(\frac{1}{2}+\alpha,\frac{x^2}{16}), \] if further $x$ is changed into $4\sqrt{x}$ and $\alpha$ into $\alpha+\frac{1}{2}$, we have by a few reductions \[ ~~~~~~~~17. ~~~~~ \frac{2^{2\alpha+1} \cdot \sqrt{\pi} \cdot x^{\alpha} \cdot e^{-2\sqrt{x}}}{\Pi(\alpha-\frac{1}{2})} \int_0^{\infty} (u+u^2)^{\alpha-\frac{1}{2}} e^{-4u\sqrt{x}} \diff{d}u \] \[ ~~~~~~~~~~~=\Pi(\alpha-1)\psi(1-\alpha,x)+\Pi(-\alpha-1)x^{\alpha}\psi(1+\alpha,x), \] from there it follows by comparision to formula (12.) \[ \int_0^{\infty} u^{\alpha-1} \cdot e^{-u} \cdot e^{-\frac{x}{u}} \cdot \diff{d}u=\frac{2^{2\alpha+1} \cdot \sqrt{\pi} \cdot x^{\alpha} \cdot e^{-2\sqrt{x}}}{\Pi(\alpha-\frac{1}{2})} \int_0^{\infty} (u+u^2)^{\alpha-\frac{1}{2}} e^{-4u\sqrt{x}} \diff{d}u, \] from this formula, or if you like it better, from formula (12.), after having put $\alpha=\frac{1}{2}$, this very simple value of the integral is easily deduced \[ ~~~~~~~~~~~~18.~~~~ \int_0^{\infty} e^{-u^2} \cdot e^{-\frac{x}{u^2}} \cdot \diff{d}u = \frac{\sqrt{\pi}}{2} \cdot e^{-2\sqrt{x}}. \] The integrals, the we just found, have muliple applications in Analyisis, for the sake of an example in the integration of the Riccati equation, that, by means of easy substitutions can be changed into the form of equation (9.); I will not sprend more time on these things, but will rather also settle the question about other similar integrals, whose first I chose to be this one: \[ ~~~~~~~~~~~~19.~~~ z= \int_0^{\frac{\pi}{2}} \cos^{\alpha-1} v\cdot \cos(\frac{1}{2}x \tan{v}+\beta v)\diff{d}v. \] I always suppose the quantity $x$ to be positive, because its negative sign can be transferred to the quantity $\beta$. By dfferentiation of the quantity \[ ~~~~~~~~~~~~~~~~~~~~~~~\cos^{\alpha-1} v\cdot \sin(\frac{1}{2}x \tan{v}+\beta v) \] it is \[
\diff{d} (\cos^{\alpha-1} v\cdot \sin(\frac{1}{2}x \tan{v}+\beta v))= \cos^{\alpha-2} v\cdot \sin v \cdot \sin(\frac{1}{2}x \tan{v}+\beta v)\diff{d}v \] \[ ~~~~~~~~~~~~~~~+(\frac{x}{2\cos^2 v}+\beta) \cos^{\alpha-1} v \cos(\frac{1}{2}x \tan{v}+\beta v)\diff{d}v, \] and by integrating between the boundaries $v=0$ and $v=\frac{\pi}{2}$ \begin{alignat*}{9}
&20.~~~~&& 0 && = -(\alpha-1)\int_0^{\frac{\pi}{2}}\cos^{\alpha-2} v\cdot \sin v \cdot \sin(\frac{1}{2}x \tan{v}+\beta v)\diff{d}v&& && && && && \\
& && && +\frac{x}{2}\int_0^{\frac{\pi}{2}}\cos^{\alpha-3} v\cdot \cos(\frac{1}{2}x \tan{v}+\beta v)\diff{d}v && && && && \\
& && && +\beta\int_0^{\frac{\pi}{2}}\cos^{\alpha-1} v\cdot \cos(\frac{1}{2}x \tan{v}+\beta v)\diff{d}v, && && && && \\ \end{alignat*} it is further \begin{alignat*}{9} & \frac{\diff{d}z}{\diff{d}x}&&~~~~= \frac{1}{2} \int_0^{\frac{\pi}{2}}\cos^{\alpha-2} v\cdot \sin v \cdot \sin(\frac{1}{2}x \tan{v}+\beta v)\diff{d}v,&&\\ & \frac{\diff{d}^2z}{\diff{d}x^2}&&~~~~= -\frac{1}{4} \int_0^{\frac{\pi}{2}}\cos^{\alpha-3} v\cdot \sin v \cdot \cos(\frac{1}{2}x \tan{v}+\beta v)\diff{d}v,&&\\ \end{alignat*} therefore \[ ~~~~~~~~~~~z-\frac{\diff{d}^2z}{\diff{d}x^2}= \int_0^{\frac{\pi}{2}}\cos^{\alpha-3} \cdot \cos(\frac{1}{2}x \tan{v}+\beta v)\diff{d}v, \] after having substituted which values, equation (20.) is converted into this one \[ ~~~~~~~~~~~21. ~~~~ 0= (x+2 \beta)z+4(\alpha-1)\frac{\diff{d}z}{\diff{d}x}-4x \frac{\diff{d}^2z}{\diff{d}x^2}, \] whose complete integral is: \[ ~~~~~~~~y= A \psi(\frac{\beta-\alpha+1}{2}, 1-\alpha, x)+Bx^{\alpha} \psi(\frac{\beta+\alpha+1}{2}, 1+\alpha, x), \] and because $z= e^{-\frac{x}{2}} \cdot y$, it is \[ ~~~~~~~~~~~22. ~~~~~ \int_0^{\frac{\pi}{2}}\cos^{\alpha-1} \cdot \cos(\frac{1}{2}x \tan{v}+\beta v)\diff{d}v \] \[ ~~~~= A \cdot \psi(\frac{\beta-\alpha+1}{2}, 1-\alpha, x)+Bx^{\alpha} \psi(\frac{\beta+\alpha+1}{2}, 1+\alpha, x). \] The determination of the constant $A$ is easily obtained by putting $x= \infty$, if $a$ is a positive quantity, the determination of the other constant on the other hand requires peculiar artifices; we will obtain both constants by this method: Let us multiply equation (22.) by $x^{\lambda-1} e^{-\frac{x}{2}} \diff{d}x$ and integrate between the boundaries $x=0$ and $x= \infty$, whereafter it is \begin{alignat*}{9}
&&& && \int_0^{\infty} \int_0^{\frac{\pi}{2}}\cos^{\alpha-1} v\cdot x^{\lambda-1} e^{-\frac{x}{2}} \cos(\frac{1}{2}x \tan{v}+\beta v)\diff{d}v \diff{d}x&& && && && && \\
& && && =A \int_0^{\infty} x^{\lambda-1} \cdot e^{-x} \psi(\frac{\beta-\alpha+1}{2}, 1-\alpha,x)\diff{d}x && && && && \\
& && && =B \int_0^{\infty} x^{\lambda+\alpha-1} \cdot e^{-x} \psi(\frac{\beta+\alpha+1}{2}, 1+\alpha,x)\diff{d}x. && && && && \\ \end{alignat*} The values of all of these integrals can be expressed by known functions, for it is \[ ~~~~~~~~~\int_0^{\infty} x^{c-1} \cdot e^{-x} \cdot \psi(a,b,x)\diff{d}x = \Pi(c-1)F(c,a,b,1), \] where $F$ denotes the known hypergeometric series, by which, after having expressed it by the the function $\Pi$, it is \[ \int_0^{\infty} x^{c-1} \cdot e^{-x} \cdot \psi(a,b,x)\diff{d}x = \frac{\Pi(c-1)\Pi(b-1)\Pi(b-a-c-1)}{\Pi(b-a-1)\Pi(b-c-1)}, \] further it is \[ \int_0^{\infty} x^{\lambda-1} \cdot e^{-\frac{x}{2}} \cos{(\frac{1}{2}x \tan{v}+\beta v)}\diff{d}x= 2^{\lambda} \Pi(\lambda-1) \cos^{\lambda} v \cdot \cos{(\lambda+\beta)v}, \] whose value is expressed by means of the function $\Pi$ in this way \[ ~~~~~~~~~~~~~~~~~~~~~~~~\frac{\pi \cdot \Pi(\lambda-1)\Pi(\alpha+\lambda-1)}{2^{\alpha} \Pi(\frac{\alpha-\beta-1}{2})\Pi(\frac{\alpha+\beta-1}{2}+\lambda)}, \] after having substituted all which values, equation (23.) is converted into this one: \[ ~~~~~~~~~~~~~~~~~~~~~~~~\frac{\pi \cdot \Pi(\lambda-1)\Pi(\alpha+\lambda-1)}{2^{\alpha} \Pi(\frac{\alpha-\beta-1}{2})\Pi(\frac{\alpha+\beta-1}{2}+\lambda)} \] \[ =A \frac{\Pi(\lambda-1)\Pi(-\alpha)\Pi(-\frac{\alpha+\beta+1}{2}-\lambda)}{\Pi(-\frac{\alpha+\beta+1}{2})\Pi(-\alpha-\lambda)}+B\frac{\Pi(\alpha+\lambda-1)\Pi(\alpha)\Pi(-\frac{\alpha+\beta+1}{2}-\lambda)}{\Pi(\frac{\alpha-\beta-1}{2})\Pi(-\lambda)}, \] this equation is easily reduced to this mre convenient form \[ ~~~~~~~~~\frac{\pi \cdot \cos(\frac{\alpha+\beta}{2})\pi}{2^{\alpha} \Pi(\frac{\alpha-\beta-1}{2})}= \frac{A \cdot \Pi(-\alpha)\sin(\alpha+\lambda)\pi}{\Pi(-\frac{\alpha+\beta+1}{2})}+\frac{B \cdot \Pi(\alpha)\sin \lambda \pi}{\Pi(-\frac{\alpha-\beta-1}{2})}, \] which, because it holds for any arbitrary value of the quantity $\lambda$, is converted into these two \begin{alignat*}{9}
&& \frac{\pi \cos{\frac{\alpha+\beta}{2}\pi}}{2^{\alpha} \Pi(\frac{\alpha-\beta-1}{2})}&&=& \frac{A \cdot \sin {\alpha \pi} \Pi(-\alpha)}{\Pi(-\frac{\alpha+\beta+1}{2})},&&\\
&& -\frac{\pi \sin{\frac{\alpha+\beta}{2}\pi}}{2^{\alpha} \Pi(\frac{\alpha-\beta-1}{2})}&&=& \frac{A \cdot \cos {\alpha \pi} \Pi(-\alpha)}{\Pi(-\frac{\alpha+\beta+1}{2})}&&+\frac{B \cdot \Pi(\alpha)}{\Pi(\frac{\alpha-\beta-1}{2})},\\ \end{alignat*} from which the values of the constants $A$ and $B$ are easily found \[ ~~~~~A= \frac{\pi \cdot \Pi(\alpha-1)}{2^{\alpha} \Pi(\frac{\alpha-\beta-1}{2})\Pi(\frac{\alpha+\beta-1}{2})}, ~~~~~~~ B= -\frac{\pi \cdot \cos(\frac{\alpha-\beta}{2})\pi}{2^{\alpha} \cdot \sin{\alpha \pi} \Pi(\alpha)}, \] after having finally substituted which values of the constants in equation (22.), it is \[ ~~~~~~~~~~24. ~~~~ \int_0^{\frac{\pi}{2}} \cos^{\alpha-1} v \cdot \cos(\frac{1}{2}x \tan{v} +\beta v)\diff{d}v \] \[ =\frac{\pi \cdot \Pi(\alpha-1)e^{-\frac{x}{2}} \cdot \psi(\frac{\beta-\alpha+1}{2},1-\alpha,x)}{2^{\alpha} \cdot \Pi(\frac{\alpha-\beta-1}{2})\Pi(\frac{\alpha+\beta-1}{2})}-\frac{\pi \cdot \cos{\frac{\alpha-\beta}{2}\pi} \cdot x^{\alpha} \cdot e^{-\frac{x}{2}} \psi(\frac{\beta+\alpha+1}{2},1+\alpha,x)}{2^{\alpha} \sin{\alpha \pi} \Pi(\alpha)}. \] Very simple special cases of this formula are: \begin{alignat*}{9} &&27. ~~~~ &&\int_0^{\frac{\pi}{2}} \cos^{\alpha-1} v \cdot \cos(x \tan{v} -(\alpha+1)v)\diff{d}v &=\frac{\pi \cdot x^{\alpha} \cdot e^{-x}}{\Pi(\alpha)},&&\\ &&28. ~~~~ &&\int_0^{\frac{\pi}{2}} \cos^{\alpha-1} v \cdot \cos(x \tan{v} +(\alpha+1)v)\diff{d}v &=0, \\ \end{alignat*} of which the one is obtained, after having put $\beta=-\alpha-1$, the other, after having put $\beta=\alpha+1$. From the connected formulas (25.) and (26.) also these ones follow: \begin{alignat*}{9} &&27. ~~~~ &&\int_0^{\frac{\pi}{2}} \cos^{\alpha-1} v \cdot \cos(x \tan{v})\cos(\alpha+1)v\cdot \diff{d}v &=\frac{\pi \cdot x^{\alpha} \cdot e^{-x}}{2\Pi(\alpha)},&&\\ &&28. ~~~~ &&\int_0^{\frac{\pi}{2}} \cos^{\alpha-1} v \cdot \sin(x \tan{v})\sin(\alpha+1)v\cdot \diff{d}v &=\frac{\pi \cdot x^{\alpha} \cdot e^{-x}}{2\Pi(\alpha)}. \\ \end{alignat*}\\
The formulas (25.) and (26.) agree with the formula, found by the Ill. Laplace, which others later demonstrated in other ways, confer this journal's volume $XIII$, p. 231, where the Cl. Liouville \cite{3}, by the method of differentiation, found for arbitrary parameters \[ ~~~~~~~~~~~~~~~~~~~~~~\int_{-\infty}^{\infty} \frac{e^{\alpha\sqrt{-1}} \cdot \diff{d}\alpha}{(x+\alpha \sqrt{-1})^{\mu}}= \frac{2\pi \cdot e^{-x}}{\Gamma(\mu)}. \] Formula (24.) gives another very simple integral, after having put $\beta=\alpha-1$ \[ ~~~~~~~~29. ~~~ \int_0^{\frac{\pi}{2}} \cos^{\alpha-1} v \cdot \cos{(x\tan{v}+(\alpha-1)v)}\diff{d}v= \frac{\pi e^{-x}}{2}. \] The two series, that are contained in the one part of equation (24.), after having put $\beta$, become $\varphi(\frac{1-\alpha}{2}, 1-\alpha,x)$ and $\varphi(\frac{1+\alpha}{2}, 1+\alpha,x)$, and they can be, by means of formula (6.), transformed into series of the kind $\psi$. After having done the transformations, if one changes $\alpha$ into $2\alpha$ and $x$ into $4\sqrt{x}$, this formula emerges \[ ~~~~~~~~30. ~~~~~ \frac{2\Pi(\alpha-\frac{1}{2})}{\sqrt{\pi}} \int_0^{\frac{\pi}{2}} \cos^{2\alpha-1} v \cdot \cos(2\sqrt{x} \tan{v})\diff{d}v \] \[ ~~~~~~~= \Pi(\alpha-1)\psi(1-\alpha,x)+\Pi(-\alpha-1) \cdot x^{\alpha} \cdot \psi(1+\alpha,x), \] from this, by comparison to formula (12.), it is \[ 31. ~~~~ \frac{2\Pi(\alpha-\frac{1}{2})}{\sqrt{\pi}} \int_0^{\frac{\pi}{2}} \cos^{2\alpha-1} v \cdot \cos(2\sqrt{x} \tan{v})\diff{d}v= \int_0^{\infty} u^{\alpha-1} \cdot e^{-u} \cdot e^{-\frac{x}{u}} \cdot \diff{d}u. \]\\
Likewise the connection of the two integrals can be demonstrated, that are contained in the equations (13.) and (24.); for this formula (24.), if $\alpha-\beta$ is put in the place of $\alpha$ and $\alpha+\beta-1$ in the place of $\beta$ and multiplied by $\frac{1}{\pi}\Pi(-\beta) \cdot 2^{\alpha} \cdot e^{\frac{x}{2}} \cdot x^{\beta}$, obtains this form \[ 32. ~~~\frac{2\Pi(-\beta)\cdot e^{\frac{x}{2}}\cdot x^{\beta}}{\pi}\int_0^{\frac{\pi}{2}} (2\cos v)^{\alpha-\beta-1} \cdot \cos(\frac{1}{2}x \tan{v} +(\alpha+\beta-1)v)\diff{d}v \] \[ =\frac{\Pi(\alpha-\beta-1)}{\Pi(\alpha-1)}x^{\beta} \psi(\beta, \beta-\alpha+1,x)+\frac{\Pi(\beta-\alpha-1)}{\Pi(\beta-1)}x^{\alpha} \psi(\alpha, \alpha-\beta+1,x), \] after having compared which to formula (13.), it is seen to be \[ ~~~~~~~~~~~~~~~~~~~~33. ~~~~ \int_0^{\infty} \frac{u^{\beta} \cdot e^{-ux} \cdot \diff{d}u}{(1+u)^{\alpha}} \] \[ ~~~~~~=\frac{2 \cdot e^{x}{2}}{\sin{\beta \pi}}\int_0^{\frac{\pi}{2}} (2\cos v)^{\alpha-\beta-1} \cdot \cos(\frac{1}{2}x \tan{v} +(\alpha+\beta-1)v)\diff{d}v, \] moreover, since the one part of equation (32.) can be transformed by means of formula (7.), it is \[ 34. ~~~~ \frac{2\Pi(-\beta)e^{\frac{x}{2}}\cdot x^{\beta}}{\pi}=\int_0^{\frac{\pi}{2}} (2\cos v)^{\alpha-\beta-1} \cdot \cos(\frac{1}{2}x \tan{v} +(\alpha+\beta-1)v)\diff{d}v= \chi(\alpha, \beta,x). \] We will also treat this more general integral in the same way \[ ~~~~~~~~~~~~~~y= \int_0^{\frac{\pi}{2}} \sin^{\alpha-1} v \cdot \cos^{\beta-1} \cdot \cos(x\tan{v}+\gamma v)\diff{d}v \] and we will chose the cases, in which it can be expressed by the series mentioned above. We also suppose the quantity $x$ always to be positive in this integral, because it is possible, to transfer its negative sign to the quantity $\gamma$. By differentiating the formula $\sin^{\alpha} v \cdot \cos^{\beta} \cdot \cos(x\tan{v}+\gamma v)$, and integrating from $u=0$ to $u=\frac{\pi}{2}$ thereafter, it is \begin{alignat*}{9}
& && 0 && = \alpha \int_0^{\frac{\pi}{2}} \sin^{\alpha-1} v \cdot \cos^{\beta+1} \cdot \cos(x\tan{v}+\gamma v)\diff{d}v&& && && && && \\
& && && -\beta \int_0^{\frac{\pi}{2}} \sin^{\alpha+1} v \cdot \cos^{\beta-1} \cdot \cos(x\tan{v}+\gamma v)\diff{d}v && && && && \\
& && && -x \int_0^{\frac{\pi}{2}} \sin^{\alpha} v \cdot \cos^{\beta-2} \cdot \sin(x\tan{v}+\gamma v)\diff{d}v && && && && \\
& && && -\gamma \int_0^{\frac{\pi}{2}} \sin^{\alpha} v \cdot \cos^{\beta} \cdot \sin(x\tan{v}+\gamma v)\diff{d}v, && && && && \\ \end{alignat*} from this equation, if the integrals are expressed by $y$ and its differentials, this differential equation of third order is easily deduced \[ ~~~~~~~~~~ 35. ~~~ 0= \alpha y +(\gamma+x)\frac{\diff{d}y}{\diff{d}x}+(\beta-2)\frac{\diff{d}^2y}{\diff{d}x^2}-x\frac{\diff{d}^3y}{\diff{d}x^3}, \] if one puts \[ ~~~~~~~~~~~~~~~~36. ~~~~ A_0+A_1x+A_2x^2+A_3x^3+\cdots, \] the conditional equations are easily found, that have to hold between the coefficients of this series, that this series satisfies the differential equation \begin{alignat*}{9}
& ~~~~~~~&& \alpha &&A_0 &&+\gamma \cdot 1 \cdot A_1 && -1 \cdot 2 \cdot (2-\beta)A_2, &&\\
& && (\alpha +1)&& A_1 &&+\gamma \cdot 2 \cdot A_2 && -2 \cdot 3 \cdot (3-\beta)A_3, &&\\ \end{alignat*} and in general \[ ~~~ 37. ~~~~~ (\alpha+k)A_k+\gamma \cdot (k+1) A_{k+1}- (k+1)(k+2)(k+2-\beta)A_{k+2}. \] If one puts in the same way \[ ~~~~~~~~~~~~~~ 38. ~~~~ y= x^{\beta}(B_0+B_1x+B_2x^2+B_3x^3+\cdots), \] one finds these relations for the coefficients \begin{alignat*}{9}
& ~~~~~~~&& &&+\gamma \cdot \beta \cdot B_0 &&-\beta(\beta+1) \cdot 1 \cdot B_1, &&\\
& && (\alpha +\beta)B_0&& +\gamma(\beta+1)B_1 &&+(\beta+1)(\beta+2) \cdot 2 \cdot B_2, &&\\ \end{alignat*} and in general \[ 39. ~~~ (\alpha+\beta+k)B_k+\gamma(\beta+k+1)B_{k+1}-(\beta+k+1)(\beta+k+2)(k+2)B_{k+2}, \] from this it is clear, that the complete integral of equation (35.) is \[ ~~~~~ 40. ~~~ y= A_0+A_1x+A_2x^2+\cdots+x^{\beta}(B_0+B_1x+B_2x^2+\cdots), \] for, by the equations (37.), two of the quantities $A_0$,$A_1$,$A_2$ etc, and by the equations (39.) one of the quantities $B_0$,$B_1$,$B_2$ etc. remain arbitrary, so that this integral contains three arbitrary constants. Therefore, if the integral mentioned above is resubstituted, it is \[ ~~~~~~~~~~~~~~~~ \int_0^{\frac{\pi}{2}} \sin^{\alpha-1} v \cdot \cos^{\beta-1} \cdot \cos(x\tan{v}+\gamma v)\diff{d}v \] \[ ~~~~~~~~~~~=A_0+A_1x+A_2x^2+\cdots+x^{\beta}(B_0+B_1x+B_2x^2+\cdots). \] From the relations of the coefficients it is easily seen, that these series and this general integral are higher transcendentals than those, that we undertake to treat here; but they nevertheless coincide with those in certain special cases. At first, if we suppose $\gamma= \alpha+\beta$, it follows from the equations (39.) \begin{alignat*}{9}
&~~~~~~~~~&& B_1 &&=\frac{\alpha+\beta}{1(1+\beta)}B_0, &&\\
& && B_2 &&=\frac{(\alpha+\beta)(\alpha+\beta+1)}{1\cdot 2(1+\beta)(2+\beta)}B_0, &&\\
& && B_3 &&=\frac{(\alpha+\beta)(\alpha+\beta+1)(\alpha+\beta+2)}{1\cdot 2 \cdot 3(1+\beta)(2+\beta)(3+\beta)}B_0, &&\\
& &&\text{etc.}&& ~~~~~~~~~~~~\text{etc.}&&\\ \end{alignat*} Further, if $\beta$ is positive, after having put $x=0$, it follows from equation (41.) \[ A_0= \int_0^{\frac{\pi}{2}} \sin^{\alpha-1} v \cdot \cos^{\beta-1} v \cdot \cos((\alpha+\beta)v \cdot \diff{d}v= \frac{\cos{\frac{\alpha \pi}{2}} \Pi(\alpha-1)\Pi(\beta-1)}{\Pi(\alpha-\beta-1)}, \] if equation (41.) is differentiated with respect to $x$ in the same way and one puts $x=0$ afterwards, it is \[ A_1= -\int_0^{\frac{\pi}{2}} \sin^{\alpha} v \cdot \cos^{\beta-2} v \cdot \sin((\alpha+\beta)v \cdot \diff{d}v= -\frac{\cos{\frac{\alpha \pi}{2}} \Pi(\alpha)\Pi(\beta-2)}{\Pi(\alpha-\beta-1)}, \] therefore it is \[ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~A_1=\frac{\alpha}{1(1-\beta)}A_0, \] from there it easily follows from the equations (37.) \begin{alignat*}{9}
&~~~~~~~~~~~~~~~~~~~~~~~~~&& A_2 &&=\frac{\alpha(\alpha+1)A_0}{1 \cdot 2(1-\beta)(2-\beta)}, &&\\
& && A_1 &&=\frac{\alpha(\alpha+1)[\alpha+2)A_0}{1 \cdot 2 \cdot 3(1-\beta)(2-\beta)(3-\beta)}, &&\\
& &&\text{etc.}&& ~~~~~~~~~~~~\text{etc.}&&\\ \end{alignat*} Therefore these two series, by which we expressed our integral, belong to the class of series, that we denoted by $\varphi$ above, in this case $\gamma=\alpha+\beta$, and formula (41.) is converted into this one: \[ ~~~~~~~~~~~~~~~~ \int_0^{\frac{\pi}{2}} \sin^{\alpha-1} v \cdot \cos^{\beta-1} \cdot \cos(x\tan{v}+(\alpha+\beta) v)\diff{d}v \] \[ ~~~=\frac{\cos{\frac{\alpha \pi}{2}}\Pi(\alpha-1)\Pi(\beta-1)}{\Pi(\alpha+\beta-1)} \varphi(\alpha, 1-\beta,x)+B_0x^{\beta}\varphi(\alpha+\beta,1+\beta,x). \] In the determination of the constant $B_0$ we will use the same method as above in the determination of the constants of equation (22.). By multiplying by $x^{\lambda-1} \cdot e^{-x} \diff{d}x$ and integrating between the boundaries $0$ and $\infty$, it is \[ ~~~~~~~~~~\Pi(\lambda-1) \int_0^{\frac{\pi}{2}} \sin^{\alpha-1} v \cdot \cos^{\beta+\lambda-1} \cdot \cos(\alpha+\beta+\lambda) v\diff{d}v \] \[ =\frac{\cos{\frac{\alpha \pi}{2}}\Pi(\alpha-1)\Pi(\beta-1)\Pi(\lambda-1)}{\Pi(\alpha+\beta-1)}F(\lambda, \alpha, 1-\beta,1)+ B_0 \Pi(\beta+\lambda-1)F(\lambda+\beta, \alpha+\beta, 1+\beta,1), \] and after having expressed the hypergeometric series along with the integral by the function $\Pi$, it is \[ ~~~~~~~~~~~~~~~~~~~\frac{\cos{\frac{\alpha \pi}{2}}\Pi(\lambda-1)\Pi(\alpha-1)\Pi(\beta+\lambda-1)}{\Pi(\alpha+\beta+\lambda-1)} \] \[ = \frac{\cos{\frac{\alpha \pi}{2}}\Pi(\lambda-1)\Pi(\alpha-1)\Pi(\beta+\lambda-1)\Pi(-\beta)\Pi(-\beta-\alpha-\alpha)}{\Pi(\alpha+\beta+\lambda-1)\Pi(-\alpha-\beta)\Pi(-\beta-\lambda)} \] \[ ~~~~~~~~~~~~~~~~+B_0\frac{\Pi(\beta+\lambda-1)\Pi(\beta)\Pi(-\beta-\alpha-\lambda)}{\Pi(-\alpha)\Pi(-\beta)}, \] after some reductions the quantity $\lambda$, because it has to, vanishes completely, and this very simple value of the constant $B_0$ emerges \[ ~~~~~~~~~~~~~~~~~~~~~~~~~~~B_0 = \cos{\frac{\alpha \pi}{2}}\Pi(-\beta-1), \] after having finally substituted which value, we have \[ ~~~~~~ 42. ~~~ \int_0^{\frac{\pi}{2}} \sin^{\alpha-1} v \cdot \cos^{\beta-1} v \cdot \cos(x\tan{v}+(\alpha+\beta)v)\diff{d}v \] \[ =\frac{\cos{\frac{\alpha \pi}{2}}\Pi(\alpha)\Pi(\beta-1)}{\Pi(\alpha+\beta-1)}\varphi(\alpha,1-\beta,x)+x^{\beta}\cos{\frac{\alpha \pi}{2}}\Pi(-\beta-1)\varphi(\alpha+\beta,1+\beta,x). \] and after having compared these formulas to each other, one sees the connection of the two integrals \begin{alignat*}{9} & 43. ~~~~ && ~&& \cos{\frac{\alpha \pi}{2}}\int_0^{\frac{\pi}{2}} \sin^{\alpha-1} v \cdot \cos^{\beta-1} v \cdot \sin(x\tan{v}+(\alpha+\beta)v)\diff{d}v\\ & &&=&& \sin{\frac{\alpha \pi}{2}}\int_0^{\frac{\pi}{2}} \sin^{\alpha-1} v \cdot \cos^{\beta-1} v \cdot \cos(x\tan{v}+(\alpha+\beta)v)\diff{d}v,\\ \end{alignat*} which formula can also be exhibited in this way \[ ~~~ 44. ~~~ \int_0^{\frac{\pi}{2}} \sin^{\alpha-1} v \cdot \cos^{\beta-1} v \cdot \sin(x\tan{v}+(\alpha+\beta)v-\frac{\alpha \pi}{2})\diff{d}v=0 \] The special case of formula (42.), in which $\alpha=0$, is worth to be noted \[ ~~~~~~~~~~~~~~ 45. ~~~~ \int_0^{\frac{\pi}{2}} \frac{\cos^{\beta-1} \cdot \sin(x \tan{v}+\beta v}{\sin{v}}\diff{d}v= \frac{\pi}{2}, \] of which the Cl. Liouville found the special case, corresponding to the value $x=0$, in this journal, volume $XIII$, page 232 \cite{3}. Moreover, having compared the formulas (42.) and (13.), one sees the connection of this integral to those, that we treated above, without any difficulty \[ ~~~~~~~~~46. ~~~~~\frac{\cos{\frac{\alpha \pi}{2}} \Pi(\alpha-1)}{\Pi(\alpha+\beta-1)}x^{\beta} \int_0^{\infty} \frac{u^{\alpha+\beta-1}\cdot e^{-ux}\diff{d}u}{(1+u)^{\alpha}} \] \[ ~~~~~=\int_0^{\frac{\pi}{2}} \sin^{\alpha-1} v \cdot \cos^{\beta-1} v \cdot \cos(x\tan{v}+(\alpha+\beta)v)\diff{d}v. \] Another case, in which the series of formula (41.) are converted into series denoted by the character $\psi$, is $\gamma= -\alpha-\beta$, for in this case formula (41.) is easily found by the same method as above, to be converted into this one: \[ ~~~~~~~~~=\int_0^{\frac{\pi}{2}} \sin^{\alpha-1} v \cdot \cos^{\beta-1} v \cdot \cos(x\tan{v}-(\alpha+\beta)v)\diff{d}v \] \[ = \frac{\cos{\frac{\alpha \pi}{2}}\Pi(\alpha-1) \Pi(\beta-1)}{\Pi(\alpha+\beta-1)}\varphi(\alpha, 1-\beta, -x)+B_0x^{\beta} \varphi(\alpha+\beta, 1+\beta, -x), \] but in this case the constant $B_0$ obtains another value, that we find by muliplying by $x^{\alpha+\beta} \cdot e^{-x} \diff{d}x$ and by integrating between the boundaries $x=0$ and $x=\infty$, after having done those integrations, it is \[ ~~~~~~~~~~~\Pi(\alpha+\beta-1) \int_0^{\frac{\pi}{2}} \sin^{\alpha-1} v \cdot \cos^{\alpha+2\beta-1}v \cdot \diff{d} v \] \[ ~~~~~~~=\cos{\frac{\alpha \pi}{2}} \Pi(\alpha-2) \Pi(\beta-1)F(\alpha+\beta, \alpha, 1-\beta, -1) \] \[ ~~~~~~~~~~+B_0 \Pi(\alpha+2\beta-1)F(\alpha+2\beta, \alpha+\beta, 1+\beta, -1), \] these hypergeoemtric series, whose forth element is $=-1$, can also be expressed by the function $\Pi$ by means of the formula \[ ~~~~~~~~~~~~~~~F(\alpha, \beta, \alpha-\beta+1, -1) = \frac{2^{-\alpha} \sqrt{\pi} \Pi(\alpha-\beta)}{\Pi(\frac{\alpha}{2}-\beta)\Pi(\frac{\alpha-1}{2})}, \] that I proved in the commentary about the hypergeometric series in this journal volume $XV$ page 135 \cite{2}. From this, if that integral and the hypergeometric series are expressed by the function $\Pi$, it emerges after certain easy reductions: \[ ~~~~~~~~~~~~~~~~~~~~~~~B_0= \cos(\frac{\alpha}{2}+\beta)\pi \cdot \Pi(-\beta-1), \] and after having substituted the value of the constant, it is: \[ ~~~ 47. ~~~~\int_0^{\frac{\pi}{2}} \sin^{\alpha-1} v \cdot \cos^{\beta-1} v \cdot \cos(x\tan{v}-(\alpha+\beta)v)\diff{d}v \] \[ = \frac{\cos{\frac{\alpha \pi}{2}}\Pi(\alpha-1) \Pi(\beta-1)}{\Pi(\alpha+\beta-1)}\varphi(\alpha, 1-\beta, -x)+x^{\beta}\cos(\frac{\alpha}{2}+\beta)\pi \cdot \Pi(-\beta-1) \varphi(\alpha+\beta, 1+\beta, -x). \] A similar formula is deduced from this one, by changing $\alpha$ into $\alpha-1$, $\beta$ into $\beta+1$ and differentiating with respect to the variable $x$ \[ ~~~ 48. ~~~~\int_0^{\frac{\pi}{2}} \sin^{\alpha-1} v \cdot \cos^{\beta-1} v \cdot \sin(x\tan{v}-(\alpha+\beta)v)\diff{d}v \] \[ = -\frac{\sin{\frac{\alpha \pi}{2}}\Pi(\alpha-1) \Pi(\beta-1)}{\Pi(\alpha+\beta-1)}\varphi(\alpha, 1-\beta, -x)-x^{\beta}\sin(\frac{\alpha}{2}+\beta)\pi \cdot \Pi(-\beta-1) \varphi(\alpha+\beta, 1+\beta, -x). \] These formulas (47.) and (48.) can easily be combined in two ways like this, that they obtain these simpler forms: \[ ~~~ 49. ~~~~\int_0^{\frac{\pi}{2}} \sin^{\alpha-1} v \cdot \cos^{\beta-1} v \cdot \sin(x\tan{v}-(\alpha+\beta)v+(\frac{\alpha}{2}+\beta)\pi)\diff{d}v \] \[ ~~~~~~~~~~~~~~~~~~~~~~= \frac{\pi \Pi(\alpha-1)\varphi(\alpha, 1-\beta,-x)}{\Pi(-\beta)\Pi(\alpha+\beta-1)}, \] \[ ~~~ 50. ~~~~\int_0^{\frac{\pi}{2}} \sin^{\alpha-1} v \cdot \cos^{\beta-1} v \cdot \sin(x\tan{v}-(\alpha+\beta)v+\frac{\alpha \pi}{2})\diff{d}v \] \[ ~~~~~~~~~~~~~~~~~~~~~~= \frac{\pi x^{\beta}}{\Pi(\beta)}\varphi(\alpha+\beta, 1+\beta, -x). \] In all these integrals, that were treated here, as we already mentioned earlier, $x$ has to be a positive quantity, but if $x$ is supposed to be negative, all sums found would be wrong; in this the integral of equation (50.) is worth to be noted, that, for positive $x$, is equal to that series, but vanishes for negative $x$, confer equation (44.).\\[5mm]
Legnica, in the month of April, 1837
\end{document} |
\begin{document}
\title{Kazhdan-Lusztig parameters and extended quotients} \author{Anne-Marie Aubert, Paul Baum and Roger Plymen} \date{} \maketitle
\begin{abstract}The Kazhdan-Lusztig parameters are important parameters in the representation theory of $p$-adic groups and affine Hecke algebras. We show that the Kazhdan-Lusztig parameters have a definite geometric structure, namely that of the extended quotient $T{/\!/} W$ of a complex torus $T$ by a finite Weyl group $W$. More generally, we show that the corresponding parameters, in the principal series of a reductive $p$-adic group with connected centre, admit such a geometric structure. This confirms, in a special case, a recent geometric conjecture in \cite{ABP}.
In the course of this study, we provide a unified framework for Kazhdan-Lusztig parameters on the one hand, and Springer parameters on the other hand. Our framework contains a complex parameter $s$, and allows us to \emph{interpolate} between $s = 1$ and $s = \sqrt q$. When $s = 1$, we recover the parameters which occur in the Springer correspondence; when $s = \sqrt q$, we recover the Kazhdan-Lusztig parameters. \end{abstract}
\section{Introduction} The Kazhdan-Lusztig parameters are important parameters in the representation theory of $p$-adic groups and affine Hecke algebras. We show that the Kazhdan-Lusztig parameters have a definite geometric structure, namely that of the extended quotient $T{/\!/} W$ of a complex torus $T$ by a finite Weyl group $W$. More generally, we show that the corresponding parameters, in the principal series of a reductive $p$-adic group with connected centre, admit such a geometric structure. This confirms, in a special case, a recent geometric conjecture in \cite{ABP}.
In the course of this study, we provide a unified framework for Kazhdan-Lusztig parameters on the one hand, and Springer parameters on the other hand. Our framework contains a complex parameter $s$, and allows us to \emph{interpolate} between $s = 1$ and $s = \sqrt q$. When $s = 1$, we recover the parameters which occur in the Springer correspondence; when $s = \sqrt q$, we recover the Kazhdan-Lusztig parameters, see \S5. Here, $q = q_F$ is the cardinality of the residue field of the underlying local field $F$.
Let $\mathcal{G}$ denote a reductive split $p$-adic group with connected centre, maximal split torus $\mathcal{T}$. Let $G$, $T$ denote the Langlands dual of $\mathcal{G}$, $\mathcal{T}$. Then the quotient variety $T/W$ plays a central role. For example, we have the Satake isomorphism \[ \mathcal{H}(\mathcal{G}, \mathcal{K}) \simeq \mathcal{O}(T/W) \] where $\mathcal{O}(T/W)$ denotes the coordinate algebra of $T/W$, see \cite[2.2.1]{Sh}, and ${\mathcal H}(\mathcal{G}, \mathcal{K})$ denotes the algebra (under convolution) of $\mathcal{K}$-bi-invariant functions of compact support on $\mathcal{G}$, where $\mathcal{K} = \mathcal{G}(\mathfrak{o}_F)$ . In this article, we will show that the \emph{extended quotient} plays a central role in the context of the Kazhdan-Lusztig parameters.
We will prove that the extended quotient $T{/\!/} W$ is a model for the Kazhdan-Lusztig parameters, see \S4. More generally, let \[ {\mathfrak s} = [\mathcal{T}, \chi]_{\mathcal{G}} \] be a point in the Bernstein spectrum of $\mathcal{G}$. We prove that the extended quotient $T{/\!/} W^{{\mathfrak s}}$ attached to ${\mathfrak s}$ is a model of the corresponding parameters attached to ${\mathfrak s}$. This is our main result, Theorem 4.1. \emph{The principal series of a reductive $p$-adic group with connected centre has a definite geometric structure. The principal series is a disjoint union: each component is the extended quotient of the dual torus $T$ by the finite Weyl group $W^{{\mathfrak s}}$ attached to ${\mathfrak s}$.} This confirms, in a special case, a recent geometric conjecture in \cite{ABP}.
We also show in \S4 that our bijection is compatible with base change, in the special case of the irreducible smooth representations of ${\rm GL}(n)$ which admit nonzero Iwahori fixed vectors.
The details of our interpolation between Springer parameters and Kazhdan-Lusztig parameters will be given in \S5. Our formulation creates a projection \[ \pi_{\sqrt q} : T{/\!/} W \to T/W \] which provides a model of the \emph{infinitesimal character}.
We conclude in \S6 with some carefully chosen examples.
Since the crossed product algebra ${\mathcal O}(T)\rtimes W$ is isomorphic to \[\mathbb{C}[X(T)]\rtimes W\,\simeq\,\mathbb{C}[X(T)\rtimes W],\]
we obtain a bijection \[{\rm Prim}\,\mathbb{C}[X(T)\rtimes W]\to T{/\!/} W\] where ${\rm Prim}$ denotes primitive ideals. By composing this bijection with the bijection $\mu$ in Theorem 4.1, we finally get a bijection \[{\rm Prim}\,\mathbb{C}[X(T)\rtimes W]\to{\mathfrak P}(G)\] where ${\mathfrak P}(G)$ denotes the Kazhdan-Lusztig parameters. Let ${\mathcal I}$ be a standard Iwahori subgroup in ${\mathcal G}$ and let ${\mathcal H}({\mathcal G},{\mathcal I})$ denote the corresponding Iwahori-Hecke algebra, {\it i.e.,\,} the algebra (for the convolution product) of compactly supported ${\mathcal I}$-biinvariant functions on ${\mathcal G}$. The algebra is isomorphic to \[ {\mathcal H}(X(T)\rtimes W,q) \] the Hecke algebra of the extended affine Weyl group $X(T)\rtimes W$, with parameter $q$. The simple modules of ${\mathcal H}({\mathcal G},{\mathcal I})$ are parametrized by ${\mathfrak P}(G)$ \cite{KL}.
Hence ${\mathfrak P}(G)$ provides a parametrization of the simple modules of both the Iwahori-Hecke algebra ${\mathcal H}(X(T)\rtimes W,q)$ and of the group algebra of $X(T)\rtimes W$ (that is, the algebra ${\mathcal H}(X(T)\rtimes W,1)$).
Note that the existence of a bijection between these sets of simple modules was already proved by Lusztig (see for instance \cite[p.~81, assertion~(a)]{LuAst}). Lusztig's construction needs to pass through the asymptotic Hecke algebra $J$, while
we have replaced the use of $J$ by the use of the extended quotient $T{/\!/} W$ (which is much simpler to construct).
\section{Extended quotients} Let $\mathcal{O}(T)$ denote the coordinate algebra of the complex torus $T$. In noncommutative geometry, one of the elementary, yet fundamental, concepts is that of \emph{noncommutative quotient} \cite[Example 2.5.3]{K}. The \emph{noncommutative quotient} of $T$ by $W$ is the crossed product algebra \[ \mathcal{O}(T) \rtimes W. \] This is a noncommutative unital $\mathbb{C}$-algebra. We need to filter this idea through periodic cyclic homology. We have an isomorphism \[ {\rm HP}_*(\mathcal{O}(T) \rtimes W) \simeq H^*(T{/\!/} W ; {\rm C}) \] where ${\rm HP}_*$ denotes periodic cyclic homology, $H^*$ denotes cohomology, and $T{/\!/} W$ is the extended quotient of $T$ by $W$, see \cite{B}. We recall the definition of the extended quotient $T{/\!/} W$. \begin{defn} Let \[ \widetilde{T} = \{(t,w) \in T \times W : w \cdot t = t\}. \] The extended quotient is the quotient \[ T{/\!/} W : = \widetilde{T}/W \] where $W$ acts via $\alpha(t,w) = (\alpha \cdot t, \alpha w \alpha^{-1})$ with $\alpha \in W$. \end{defn}
Let $W(t)$ denote the isotropy subgroup of $t$. Let ${\rm conj} (W(t))$ denote the set of conjugacy classes in $W(t)$, and let $[w]$ denote
the conjugacy class of $w$ in $W(t)$. The map \[ \{(t,w) : t \in T, w \in W(t)\} \to \{(t,c) : t \in T, c \in {\rm conj} (W(t))\} \] \[
(t,w) \mapsto (t,[w]) \] induces a canonical bijection \[ \{(t,w) : t \in T, w \in W(t)\}/ W \to \{(t,c) : t \in T, c \in {\rm conj}(W(t))\}/ W \] where $W$ acts via $\alpha (t,c) = ( \alpha \cdot t, [ \alpha x \alpha^{-1}])$ with $x \in c$.
Let ${\rm Irr}(W(t))$ denote the set of equivalence classes of irreducible representations of $W(t)$. A choice of bijection between ${\rm conj}(W(t))$ and ${\rm Irr}(W(t))$ then creates a bijection \[ T{/\!/} W \simeq \{( t, \tau) : t \in T, \tau \in {\rm Irr}(W(t))\}/ W
\] where $W$ acts via $\alpha (t, \tau) = (\alpha \cdot t, \alpha_*(\tau))$. Here, $\alpha_*(\tau)$ is the push-forward of $\tau$ to an irreducible representation of $W(\alpha \cdot t)$.
This leads us to
\begin{defn}
The extended quotient of the second kind is
\[ (T{/\!/} W)_2: = \{( t, \tau) : t \in T, \tau \in {\rm Irr}(W(t))\}/ W
\]
\end{defn}
We then have a non-canonical bijection
\[ T{/\!/} W \simeq (T{/\!/} W)_2. \]
Let $T^w$ denote the fixed set $\{t \in T : w \cdot t = t\}$, and let $Z(w)$ denote the centralizer of $w$ in $W$. We have \begin{align} \label{eqn:(1)} T{/\!/} W = \bigsqcup T^w /Z(w) \end{align}
where one $w$ is chosen in each conjugacy class in $W$. Therefore $T{/\!/} W$ is a complex affine algebraic variety. The number of irreducible components in $T{/\!/} W$ is bounded below by $|{\rm conj}(W)|$.
The Jacobson topology on the primitive ideal spectrum of $\mathcal{O}(T) \rtimes W$ induces a topology on $(T{/\!/} W)_2$ such that the identity map \[ T{/\!/} W \to (T{/\!/} W)_2 \] is continuous. From the point of view of noncommutative geometry \cite{K}, the extended quotient of the second kind is a \emph{noncommutative complex affine algebraic variety}.
The transformation groupoid $ T \rtimes W$ is naturally an \'etale groupoid, see \cite[p. 45]{K}. Its groupoid algebra $\mathbb{C} [T \rtimes W]$ is the crossed product algebra \[ \mathcal{O}(T) \rtimes W . \] In the groupoid $T \rtimes W$, we have \[ \text{source}(t,w) = t, \quad \text{target}(t,w) = w \cdot t \] so that the set \[ \{(t,w) \in T \times W : w \cdot t = t\} \] comprises all the arrows which are \emph{loops}.
The decomposition of the groupoid $T \rtimes W$ into transitive groupoids leads naturally to Eqn.~(\ref{eqn:(1)}). The groupoid $T \rtimes W$ seems to be a bridge between $T{/\!/} W$ and $(T{/\!/} W)_2$.
In the context of algebraic geometry, the extended quotient is known as the inertia stack \cite{M}, in which case the notation is \[ I(T): = \widetilde{T}, \quad \quad [I(T)/W]: = T{/\!/} W. \]
\section{The parameters for the principal series} \label{sec:unram}
Let ${\mathcal W}_F$ denote the Weil group of $F$, let $I_F$ be the inertia subgroup of ${\mathcal W}_F$. Let ${\Phi}_F \subset {\mathcal W}_F$ denote a geometric Frobenius (a generator of ${\mathcal W}_F/I_F \simeq \mathbb{Z}$). We have ${\mathcal W}_F/I_F = <{\rm Frob}>$. We will think of this as a multiplicative group, with identity element $1$.
Let ${\mathfrak P}(G)$ denote the set of conjugacy classes in $G$ of pairs $(\Phi,\rho)$ such that $\Phi$ is a morphism \[ \Phi\colon {\mathcal W}_F/I_F \times {\rm SL}(2,\mathbb{C}) \to G\] which is \emph{admissible}, {\it i.e.,\,} $\Phi(1, - )$ is a morphism of complex algebraic groups, $\Phi({\rm Frob},1)$ is a semisimple element in $G$, and $\rho$ is defined in the following way.
We will adopt the formulation of Reeder \cite{R}. Choose a Borel subgroup $B_2$ in ${\rm SL}(2,\mathbb{C})$ and let
$S_{\Phi} = \Phi({\mathcal W}_F \times B_2)$, a solvable subgroup of $G$.
Let $\mathbf{B}^{\Phi}$ denote the variety of Borel subgroups of $G$ containing $S_{\Phi}$.
Let $G_{\Phi}$ be the centralizer
in $G$ of the image of $\Phi$. Then $G_{\Phi}$ acts naturally on $\mathbf{B}^{\Phi}$, and hence on the
singular homology $H_*(\mathbf{B}^{\Phi},\mathbb{C})$. Then $\rho$ is an irreducible representation of $G_{\Phi}$ which appears in the action
of $G_{\Phi}$ on $H_*(\mathbf{B}^{\Phi},\mathbb{C})$.
A Reeder parameter $(\Phi, \rho)$ determines a Kazhdan-Lusztig parameter $(\sigma, u, \rho)$ in the following way. Let \[ u_0 = \left(
\begin{array}{cc} 1 & 1 \\ 0 & 1 \end{array} \right) , \quad T_x = \left( \begin{array}{cc} x & 0\\ 0 & x^{-1} \end{array} \right) \] and set \[ u = \Phi(1,u_0), \quad \sigma = \Phi({\rm Frob}, T_{\sqrt q}) \] where $q$ is the cardinality of the residue field $k_F$. Then the triple $(\sigma, u, \rho)$ is a Kazhdan-Lusztig parameter. Since $\Phi$ is a homomorphism and \[ T_{\sqrt q} \, u_0 \, T_{\sqrt q}^{-1} = \left( \begin{array}{cc} 1 & q\\ 0 & 1 \end{array} \right) = u_0^q \] it follows that \[ \sigma u \sigma^{-1} = u^q. \]
It is worth noting that the set ${\mathfrak P}(G)$ is $q$-independent.
We now move on to the rest of the principal series. We recall that
$\mathcal{G}$ denotes a reductive split $p$-adic group \emph{with connected centre}, maximal split torus $\mathcal{T}$, and $G$, $T$ denote the Langlands dual of $\mathcal{G}$, $\mathcal{T}$. We assume in addition that the residual characteristic of $F$ is not a torsion prime for $G$.
Let ${\mathfrak Q}(G)$ denote the set of conjugacy classes in $G$ of pairs $(\Phi,\rho)$ such that $\Phi$ is a continuous morphism \[ \Phi\colon {\mathcal W}_F\times {\rm SL}(2,\mathbb{C}) \to G\] which is rational on ${\rm SL}(2,\mathbb{C})$ and such that $\Phi({\mathcal W}_F)$ consists of semisimple element in $G$, and $\rho$ is defined in the following way.
Choose a Borel subgroup $B_2$ in ${\rm SL}(2,\mathbb{C})$ and let $S_{\Phi} = \Phi({\mathcal W}_F \times B_2)$. Let $\mathbf{B}^{\Phi}$ denote the variety of Borel subgroups of $G$ containing $S_{\Phi}$. The variety $\mathbf{B}^{\Phi}$ is non-empty if and only if $\Phi$ factors through the topological abelianization ${\mathcal W}_F^{{\rm ab}}:={\mathcal W}_F/\overline{[{\mathcal W}_F,{\mathcal W}_F]}$ of ${\mathcal W}_F$ (see \cite[\S~4.2]{R}). We will assume that $\mathbf{B}^{\Phi}$ is non-empty, and we will still denote by $\Phi$ the homomorphim \[ \Phi\colon {\mathcal W}_F^{{\rm ab}}\times {\rm SL}(2,\mathbb{C}) \to G.\] Let $I_F^{{\rm ab}}$ denote the image of $I_F$ in ${\mathcal W}_F^{{\rm ab}}$. The choice of Frobenius ${\rm Frob}$ determines a splitting \begin{equation} \label{eqn:splitting} {\mathcal W}_F^{{\rm ab}}=I_F^{{\rm ab}}\times\langle{\rm Frob}\rangle.\end{equation} Let $G_{\Phi}$ be the centralizer in $G$ of the image of $\Phi$. Then $G_{\Phi}$ acts naturally on $\mathbf{B}^{\Phi}$, and hence on the singular homology of $H_*(\mathbf{B}^{\Phi},\mathbb{C})$. Then $\rho$ is an irreducible representation of $G_{\Phi}$ which appears in the action
of $G_{\Phi}$ on $H_*(\mathbf{B}^{\Phi},\mathbb{C})$.
Let $\chi$ be a smooth quasicharacter of ${\mathcal T}$ and let ${\mathfrak s} = [{\mathcal T},\chi]_{{\mathcal G}}$ be the point in the Bernstein spectrum ${\mathfrak B}({\mathcal G})$ determined by $\chi$. Let \begin{equation} \label{eqn:Ws} W^{{\mathfrak s}} = \{w \in W : w\cdot {\mathfrak s} = {\mathfrak s}\}. \end{equation} Let $X$ denote the rational co-character group of ${\mathcal T}$, identified with the rational character group of $T$. Let ${\mathcal T}_0$ be the maximal compact subgroup of ${\mathcal T}$. By choosing a uniformizer in $F$, we obtain a splitting $${\mathcal T}={\mathcal T}_0\times X,$$ according to which \[\chi = \lambda\otimes t,\] where $\lambda$ is a character of ${\mathcal T}_0$, and $t\in T$. Let $r_F\colon {\mathcal W}_F^{{\rm ab}}\to F^\times$ denote the reciprocity isomorphism of abelian class field theory, and let \begin{equation} \label{eqn:hl} {{\widehat\lambda}}\colon I_F^{{\rm ab}}\to T\end{equation} be the unique homomorphism satisfying \begin{equation} \label{eqn:dd} \eta\circ{{\widehat\lambda}}=\lambda\circ\eta\circ r_F,\quad \text{for all $\eta\in X$},\end{equation} where $\eta$ is viewed as a character of $T$ on the left side and as a co-character of ${\mathcal T}$ on the right side of~(\ref{eqn:dd}).
Let $H$ denote the centralizer in $G$ of the image of ${{\widehat\lambda}}$: \begin{equation} \label{eqn:H} H=G_{{{\widehat\lambda}}}.\end{equation} The assumption that $G$ has simply-connected derived group implies that the group $H$ is connected (see \cite[p.~396]{Roc}). Note that $H$ itself does not have simply-connected derived group in general (for instance, if $G$ is the exceptional group of type ${{\rm G}}_2$, and $\sigma$ is the tensor square of a ramified quadratic character of $F^\times$ then $H={\rm SO}(4,\mathbb{C})$).
Let ${\mathfrak Q}(G)_{{{\widehat\lambda}}}$ be the subset of ${\mathfrak Q}(G)$ consisting of the $G$-conjugacy classes of all the pairs $(\Phi,\rho)$ such that $\Phi$ factors through ${\mathcal W}_F^{{\rm ab}}$ and
\[\Phi|_{I_F^{{\rm ab}}}={{\widehat\lambda}}.\] The group $W^{\mathfrak s}$ defined in~(\ref{eqn:Ws}) is a Weyl group: it is the Weyl group of $H$ (indeed, in the decomposition of \cite[Lemma~8.1~(i)]{Roc} the group $C_\chi$ is trivial as proven on \cite[p.~396]{Roc}): \[W^{\mathfrak s}=W_H.\]
\section{Main result}
\begin{thm} \label{thm:ps} There is a canonical bijection of the extended quotient of the second kind $(T{/\!/} W^{\mathfrak s})_2$ onto the set ${\mathfrak Q}(G)_{{{\widehat\lambda}}}$ of conjugacy classes of Reeder parameters attached to the point ${\mathfrak s}$ in the Bernstein spectrum of $\mathcal{G}$. It follows that there is a bijection \[ \mu^{{\mathfrak s}} : T{/\!/} W^{{\mathfrak s}} \simeq {\mathfrak Q}(G)_{{{\widehat\lambda}}} \] so that the extended quotient $T{/\!/} W^{{\mathfrak s}}$ is a model for the Reeder parameters attached to the point ${\mathfrak s}$. \end{thm}
The proof of this theorem requires a series of Lemmas. We recall that \[ W^{{\mathfrak s}} = W_H. \] The plan of our proof is to begin with an element in the extended quotient of the second kind $(T{/\!/} W_H)_2$.
Lemmas 4.2 and 4.3 allow us to infer that $W_H(t)$ is a semidirect product $W_{{\mathfrak G}(t)}\rtimes A_H(t)$. We now combine the Springer correspondence for $W_{{\mathfrak G}(t)}$ with Clifford theory for semidirect products (Clifford theory is a noncommutative version of the Mackey machine). This creates $4$ parameters $(t,x,\varrho, \psi)$. With this data, and the character $\lambda$ determined by the point $ {\mathfrak s}$, we construct a Reeder parameter $(\Phi, \rho)$ such that $\Phi({\rm Frob},1)=t$, $\Phi(1,u_0)=\exp x$ and the restriction of $\rho$ contains $\varrho$.
\begin{lem} \label{lem:disconnected} Let $M$ be a reductive algebraic group. Let $M^0$ denote the connected component of the identity in $M$. Let $T$ be a maximal torus of $M^0$ and let $B$ be a Borel subgroup of $M^0$ containing $T$. Let \[W_{M^0}(T):={\rm N}_{M^0}(T)/T\] denote the Weyl group of $M^0$ with respect to $T$. We set \[ W_M(T):={\rm N}_M(T)/T.\] \begin{enumerate} \item[{\rm (1)}] The group $W_M(T)$ has the semidirect product decomposition: \[W_M(T)=W_{M^0}(T)\rtimes ({\rm N}_M(T,B)/T),\] where ${\rm N}_M(T,B)$ denotes the normalizer in $M$ of the pair $(T,B)$. \item[{\rm (2)}] We have \[{\rm N}_M(T,B)/T\simeq M/M^0=\pi_0(M).\] \end{enumerate} \end{lem} \begin{proof} The group $W_{M^0}(T)$ is a normal subgroup of $W_M(T)$. Indeed, let $n\in{\rm N}_{M^0}(T)$ and let $n'\in{\rm N}_M(T)$, then $n'nn^{\prime-1}$ belongs to $M^0$ (since the latter is normal in $M$) and normalizes $T$, that is, $n'nn^{\prime-1}\in{\rm N}_{M^0}(T)$. On the other hand, $n'(nT)n^{\prime-1}=n'nn^{\prime-1}(n'Tn^{\prime-1})=n'nn^{\prime-1}T$.
Let $w\in W_M(T)$. Then $wBw^{-1}$ is a Borel subgroup of $M^0$ (since, by definition, the Borel subgroups of an algebraic group are the maximal closed connected solvable subgroups). Moreover, $wBw^{-1}$ contains $T$. In a connected reductive algebraic group, the intersection of two Borel subgroups always contains a maximal torus and the two Borel subgroups are conjugate by a element of the normalizer of that torus. Hence $B$ and $wBw^{-1}$ are conjugate by an element $w_1$ of $W_{M^0}(T)$. It follows that $w_1^{-1}w$ normalises $B$. Hence \[w_1^{-1}w\in W_M(T)\cap {\rm N}_{M}(B)={\rm N}_{M}(T,B)/T,\] that is, \[W_M(T)=W_{M^0}(T)\cdot({\rm N}_M(T,B)/T).\] Finally, we have \[W_{M^0}(T)\cap({\rm N}_M(T,B)/T)={\rm N}_{M^0}(T,B)/T=\{1\},\] since ${\rm N}_{M^0}(B)=B$ and $B\cap {\rm N}_{M^0}(T)=T$. This proves (1).
We will now prove (2). We consider the following map: \[{\rm N}_{M}(T,B)/T\to M/M^0\quad\quad mT\mapsto mM^0.\leqno{(*)}\] It is injective. Indeed, let $m,m'\in{\rm N}_{M}(T,B)$ such that $mM^0=m'M^0$. Then $m^{-1}m'\in M^0\cap{\rm N}_{M}(T,B)={\rm N}_{M^0}(T,B)=T$ (as we have seen above). Hence $mT=m'T$.
On the other hand, let $m$ be an element in $M$. Then $m^{-1}Bm$ is a Borel subgroup of $M^0$, hence there exists $m_1\in M^0$ such that $m^{-1}Bm=m_1^{-1}Bm_1$. It follows that $m_1m^{-1}\in{\rm N}_M(B)$. Also $m_1m^{-1}Tmm_1^{-1}$ is a torus of $M^0$ which is contained in $m_1m^{-1}Bmm_1^{-1}=B$. Hence $T$ and $m_1m^{-1}Tmm_1^{-1}$ are conjugate in $B$: there is $b\in B$ such that $m_1m^{-1}Tmm_1^{-1}=b^{-1}Tb$. Then $n:=bm_1m^{-1}\in{\rm N}_M(T,B)$. It gives $m=n^{-1}bm_1$. Since $bm_1\in M^0$, we obtain $mM^0=n^{-1}M^0$. Hence the map $(*)$ is surjective. \end{proof}
In order to approach the notation in \cite[p.471]{CG}, we let ${\mathfrak G}(t)$ denote the identity component of the centralizer $C_H(t)$: \[ {\mathfrak G}(t): = C_H^0(t). \] Let $W_{{\mathfrak G}(t)}$ denote the Weyl group of ${\mathfrak G}(t)$.
\begin{lem} \label{lem:centrals} Let $t \in T$. The isotropy subgroup $W_H(t)$ is the group of ${\rm N}_{C_H(t)}(T)/T$, and we have \[W_H(t) = W_{{\mathfrak G}(t)}\rtimes A_H(t)\quad\text{with $A_H(t):=\pi_0(C_H(t))$.}\] In the case when $H$ has simply-connected derived group, the group $C_H(t)$ is connected and $W_H(t)$ is then the Weyl group of $C_H(t)={\mathfrak G}(t)$. \end{lem} \begin{proof} Let $t \in T$. Note that
\begin{align*}
W_H(t) & = \{w \in W_H : w\cdot t = t\}\\
& = \{w \in W_H : wtw^{-1} = t\}\\
& = \{w \in W_H : wt = tw\}\\
& = W \cap C_H(t). \end{align*} Note that $H$ and $C_H(t)$ have a common maximal torus $T$. Now \begin{align*} W_H \cap C_H(t) & = {\rm N}_H(T)/T \cap C_H(t)\\ & = {\rm N}_{C_H(t)}(T)/T\\ & = W_{C_H(t)}(T). \end{align*} The result follows by applying Lemma~\ref{lem:disconnected} with $M=C_H(t)$.
If $H$ has simply-connected derived group, then the
centralizer $C_H(t)$ is connected by Steinberg's theorem \cite[\S 8.8.7]{CG}. \end{proof}
Let $\tau$ be an irreducible representation of $W_{{\mathfrak G}(t)}$. Now we apply the Springer correspondence to $\tau$. Note: the Springer correspondence that we are considering here coincides with that constructed by Springer for a reductive group over a field of positive characteristic and is obtained from the correspondence constructed by Lusztig by tensoring the latter by the sign representation of $W_{{\mathfrak G}(t)}$ (see \cite{Hot}).
Let $\mathfrak{c}(t)$ denote the Lie algebra of ${\mathfrak G}(t)$, for $x\in{\mathfrak c}(t)$, let $Z_{{\mathfrak G}(t)}(x)$ denote the centralizer of $x$ in ${\mathfrak G}(t)$, via the adjoint representation of ${\mathfrak G}(t)$ on $\mathfrak{c}(t)$, and let \begin{align} A_x = \pi_0 (Z_{{\mathfrak G}(t)}(x)) \end{align} Let $\mathbf{B}_x$ denote the variety of Borel subalgebras of $\mathfrak{c}(t)$ that contain $x$.
All the irreducible components of $\mathbf{B}_x$ have the same dimension $d(x)$ over $\mathbb{R}$, see \cite[Corollary 3.3.24]{CG}. The finite group $A_x$ acts on the set of irreducible components of $\mathbf{B}_x$ \cite[p. 161]{CG}.
\begin{defn} If a group $A$ acts on the variety $\mathbf{X}$, let ${\mathcal R}(A,\mathbf{X})$ denote the set of irreducible representations of $A$ appearing in the homology $H_*(\mathbf{X})$, as in \cite[p.118]{R}. Let ${\mathcal R}_{top}(A, \mathbf{X})$ denote the set of irreducible representations of $A$ appearing in the top homology of $\mathbf{X}$. \end{defn}
The Springer correspondence yields a one-to-one correspondence \begin{equation} \label{eqn:Springercor} (x,\varrho)\mapsto \tau(x,\varrho)\end{equation} between the set of ${\mathfrak G}(t)$-conjugacy classes of pairs $(x,\varrho)$ formed by a nilpotent element $x \in \mathfrak{c}(t)$ and an irreducible representation $\varrho$ of $A=A_x$ which occurs in $H_{d(x)}(\mathbf{B}_x, \mathbb{C})$ (that is, $\varrho\in{\mathcal R}_{top}(A_x,\mathbf{B}_x)$) and the set of isomorphism classes of irreducible representations of the Weyl group $W_{{\mathfrak G}(t)}$.
We now work with the Jacobson-Morozov theorem \cite[p. 183]{CG}. Let $e_0$ be the standard nilpotent matrix in $\mathfrak{sl}(2,\mathbb{C})$: \[e_0 = \left( \begin{array}{cc} 0 & 1 \\ 0 & 0 \end{array}\right) \] There exists a rational homomorphism $\gamma : {\rm SL}(2, \mathbb{C}) \to {\mathfrak C}(t)$ such that its differential $\mathfrak{sl}(2,\mathbb{C}) \to \mathfrak{c}(t)$ sends $e_0$ to $x$, see \cite[\S 3.7.4]{CG}.
Define \begin{eqnarray} \label{eqn:Phi} \Phi \colon {\mathcal W}_F^{{\rm ab}}\times {\rm SL}(2,\mathbb{C}) \to G, \quad\quad (w,{\rm Frob},Y) \mapsto {{\widehat\lambda}}(w)\cdot t \cdot \gamma(Y) \end{eqnarray} \begin{eqnarray} \label{eqn:Upsilon} \Upsilon \colon {\mathcal W}_F^{{\rm ab}}\times {\rm SL}(2,\mathbb{C}) \to H, \quad\quad (w,{\rm Frob},Y) \mapsto {{\widehat\lambda}}(w)\cdot t \cdot \gamma(Y) \end{eqnarray} \begin{eqnarray} \label{eqn:Psi} \Psi \colon {\mathcal W}_F^{{\rm ab}} \times {\rm SL}(2,\mathbb{C}) \to {\mathfrak G}(t), \quad\quad (w,{\rm Frob},Y) \mapsto {{\widehat\lambda}}(w)\cdot t \cdot \gamma(Y) \end{eqnarray} \begin{eqnarray} \label{eqn:Xi} \Xi \colon {\mathcal W}_F^{{\rm ab}} \times {\rm SL}(2,\mathbb{C}) \to {\mathfrak G}(t), \quad \quad (w,{\rm Frob},Y) \mapsto {{\widehat\lambda}}(w)\cdot \gamma(Y). \end{eqnarray} where $w$ is any element in $I_F^{{\rm ab}}$.
Note that $im\,\Phi\subset H$ (see \cite[\S~4.2]{R}) and that $C(im \, \Psi) = C(im \, \Upsilon)$, for any element in $C(im \, \Upsilon)$ must commute with $\Upsilon({\rm Frob}) = t$. We also have $C(im \, \Xi) = C(im \, \Psi) \subset {\mathfrak C}(t)$. Let \[ A_{\Psi} = \pi_0(C(im \, \Psi)), \quad \quad A_{\Xi} = \pi_0(C(im \, \Xi)). \]
\begin{lem} \label{lem:AAA} We have \[ A_x = A_{\Xi} = A_{\Psi}. \] \end{lem}
\begin{proof} According to \cite[\S 3.7.23]{CG}, we have \[ Z_{{\mathfrak G}(t)}(x) = C(im \, \Xi)\cdot U \] with $U$ the unipotent radical of $Z_{{\mathfrak G}(t)}(x)$. Now $U$ is contractible via the map \[ [0,1] \times U \to U, \quad \quad (\lambda, \exp Y) \mapsto \exp( \lambda Y) \] for all $Y \in {\mathfrak n}$ with $\exp {\mathfrak n} = U$. \end{proof}
Lemma~\ref{lem:AAA} allows us to define \[ A: = A_x = A_{\Psi}= A_{\Xi}. \]
Let ${\mathcal C}(t)$ denote a {\it predual} of ${\mathfrak G}(t)$, {\it i.e.,\,} ${\mathfrak G}(t)$ is the Langlands dual of ${\mathcal C}(t)$. Let $\mathbf{B}^{\Psi}$ (resp. $\mathbf{B}^{\Xi}$) denote the variety of the Borel subgroups of ${\mathfrak G}(t)$ which contain $S_{\Psi}: = \Psi({\mathcal W}_F\times B_2)$ (resp. $S_{\Xi}: = \Xi({\mathcal W}_F \times B_2) = \gamma(B_2)$).
\begin{lem} \label{lem:bije} We have \[ {\mathcal R}_{top}(A, \mathbf{B}_x) = {\mathcal R}(A, \mathbf{B}^{\Xi}). \] \end{lem} \begin{proof} Let, as before, $\tau$ be an irreducible representation of $W_{{\mathfrak G}(t)}$. Let $(x,\varrho)$ be the Springer parameter attached to $\tau$ by the inverse bijection of (\ref{eqn:Springercor}). Define $\Xi$ as in Eqn.\ref{eqn:Xi}. Note that $\Xi$ depends on the morphism $\gamma$, which in turn depends on the nilpotent element $x \in \mathfrak{c}(t)$.
Then $\Xi$ is a real tempered $L$-parameter for the $p$-adic group ${\mathcal C}(t)$, see \cite[3.18]{BM}. According to several sources, see \cite[\S 10.13]{Lu}, \cite{BM}, there is a bijection between
Springer parameters and Reeder parameters: \begin{equation} \label{eqnarray:bij} (d \gamma(e_0)),\varrho) \mapsto (\Xi, \varrho). \end{equation} Now $\varrho$ is an irreducible representation of $A$ which appears simultaneously in $H_{d(x)}(\mathbf{B}_x, \mathbb{C})$ and $H_*(\mathbf{B}^{\Xi}, \mathbb{C})$. \end{proof}
We will recall below a result of Ram and Ramagge, which is based on Clifford theoretic results developed by MacDonald and Green.
Let ${\mathcal H}$ be a finite dimensional $\mathbb{C}$-algebra and let ${\mathcal A}$ be a finite group acting by automorphisms on ${\mathcal H}$. If $V$ is a finite dimensional module for ${\mathcal H}$ and $a\in {\mathcal A}$, let ${}^aV$ denote the ${\mathcal H}$-module with the action $f\cdot v:=a^{-1}(f)v$, $f\in{\mathcal H}$ and $v\in V$. Then $V$ is simple if and only if ${}^aV$ is. Let $V$ be a simple ${\mathcal H}$-module. Define the inertia subgroup of $V$ to be \[{\mathcal A}_V:=\left\{a\in {\mathcal A}\;:\;V\simeq {}^aV\right\}.\] Let $a\in {\mathcal A}_V$. Since both $V$ and ${}^a V$ are simple, Schur's lemma implies that the isomorphism $V\to{}^aV$ is unique up to a scalar multiple. For each $a\in{\mathcal A}_V$ we fix an isomorphism \[{\phi}_a\colon V\to{}^{a^{-1}}V.\] Then, as operators on $V$, \[{\phi}_av=a(r){\phi}_a,\quad \text{and} \quad {\phi}_a{\phi}_{a'}=\eta_V(a,a')^{-1}{\phi}_{aa'},\] where $\eta_V(a,a')\in\mathbb{C}^\times$. The resulting function \[\eta_V\colon {\mathcal A}_V\times {\mathcal A}_V\to \mathbb{C}^\times,\] is a cocycle. The isomorphism class of $\eta_V$ is independent of the choice of the isomorphism ${\phi}_a$.
Let $\mathbb{C}[{\mathcal A}_V]_{\eta_V}$ be the algebra with basis $\left\{c_a\,:\,a\in {\mathcal A}_V\right\}$ and multiplication given by \[c_a\cdot c_{a'}=\eta_V(a,a')c_{aa'},\quad\text{for $a,a'\in{\mathcal A}_V$.}\]
Let $\psi$ be a simple $\mathbb{C}[{\mathcal A}_V]_{\eta_V}$-module. Then putting \[(fa)\cdot(v\otimes z)=f{\phi}_av\otimes c_az,\quad\text{for $f\in{\mathcal H}$, $a\in {\mathcal A}_V$, $v\in V$, $z\in\psi$,}\] defines an action of ${\mathcal H}\rtimes {\mathcal A}_V$ on $V\otimes\psi$. Define the induced module \[V\rtimes\psi:={\rm Ind}_{{\mathcal H}\rtimes {\mathcal A}_V}^{{\mathcal H}\rtimes {\mathcal A}}(V\otimes\psi).\] \begin{thm} \label{thm:RaRa} {\rm (Ram-Ramagge, \cite[Theorem~A.6]{RamRam}, Reeder, \cite[(1.5.1)]{R})} The induced module $V\rtimes\psi$ is a simple ${\mathcal H}\rtimes {\mathcal A}$-module, every simple ${\mathcal H}\rtimes {\mathcal A}$-module occurs in this way, and if $V\rtimes\psi\simeq V'\rtimes\psi'$, then $V$, $V'$ are ${\mathcal A}$-conjugate, and $\psi\simeq\psi'$ as $\mathbb{C}[{\mathcal A}_V]_{\eta_V}$-modules. \end{thm}
One the other hand, it follows from Lemma~\ref{lem:centrals} that the isotropy group of $t$ in $W_H$ admits the following semidirect product decomposition: \[W_H(t)=W_{{\mathfrak G}(t)}\rtimes A_H(t)\quad\text{ with $A_H(t):=\pi_0(C_H(t))$.}\] Hence the group algebra $\mathbb{C}[W_H(t)]$ is a crossed-product algebra \[\mathbb{C}[W_H(t)]=\mathbb{C}[W_{{\mathfrak G}(t)}]\rtimes A_H(t).\] By applying Theorem~\ref{thm:RaRa} with ${\mathcal H}=\mathbb{C}[W_{{\mathfrak G}(t)}]$ and ${\mathcal A}= A_H(t)$, we see that the irreducible representations of $W_H(t)$ are the \[\tau(x,\varrho)\rtimes\psi,\] with $\psi$ any simple $\mathbb{C}[A_{\tau}]_{\eta_{\tau}}$-module and $\tau=\tau(x,\varrho)$.
Let ${\mathcal I}$ be a standard Iwahori subgroup in ${\mathcal C}(t)$, and let ${\mathcal H}({\mathcal C}(t),{\mathcal I})$ denote the corresponding Iwahori-Hecke algebra. Recall that $x=d \gamma(e_0)$. We will denote by $V=V(x,\varrho)$ the real tempered simple module of ${\mathcal H}({\mathcal C}(t),{\mathcal I})$ which corresponds to $(x,\varrho)$. Here ``real'' means that the central character of $V$ is real.
By applying Theorem~\ref{thm:RaRa} with ${\mathcal H}={\mathcal H}({\mathcal C}(t),{\mathcal I})$ and ${\mathcal A}=A_H(t)$, we obtain the following subset of simple modules for ${\mathcal H}({\mathcal C}(t),{\mathcal I})\rtimes A_H(t)$: \[V(x,\varrho)\rtimes\psi,\] with $\psi$ any simple $\mathbb{C}[A_{V}]_{\eta_V}$-module and $V=V(x,\varrho)$.
\begin{lem} \label{lem:cocycles} We have \[A_{\tau(x,\varrho)}=A_{V(x,\varrho)}.\] Moreover, the cocycles $\eta_{\tau(x,\varrho)}$ and $\eta_{V(x,\varrho)}$ can be chosen to be equal. \end{lem} \begin{proof} Recall that the \emph{closure order on nilpotent adjoint orbits} is defined as follows \[ {\mathcal O}_1\le{\mathcal O}_2\quad\text{when ${\mathcal O}_1\subset\overline{{\mathcal O}_2}$.}\] \[ {\mathcal O}_1\le{\mathcal O}_2\quad\text{when ${\mathcal O}_1\subset\overline{{\mathcal O}_2}$.}\] For $x$ a nilpotent element of ${\mathfrak c}(t)$, we will denote by ${\mathcal O}_{x}$ the nilpotent adjoint orbit which contains $x$. Then as in \cite[(6.5)]{BM}, we define a \emph{partial order on the representations of $W_{{\mathfrak G}(t)}$} by \begin{equation} \label{eqn:ordering} \tau(x_1,\varrho_1)\le\tau(x_2,\varrho_2)\quad\text{when ${\mathcal O}_{x_1}\le{{\mathcal O}}_{x_2}$}.\end{equation} In this partial order, the trivial representation of $W(t)$ is a minimal element and the sign representation of $W(t)$ is a maximal element.
The $W_{{\mathfrak G}(t)}$-structure of $V(x,\varrho)$ is \begin{equation} \label{eqn:Wstruct}
V(x,\varrho)|_{W_{{\mathfrak G}(t)}}\,=\,\tau(x,\varrho)\,\oplus\, \bigoplus_{(x_1,\varrho_1)\atop\tau(x,\varrho)<\tau(x,\varrho_1)} m_{(x_1,\varrho_1)}\,\tau(x_1,\varrho_1),\end{equation} where the $m_{(x_1,\varrho_1)}$ are non-negative integers. (In case ${\mathcal C}(t)$ has connected centre, (\ref{eqn:Wstruct}) is implied by \cite[Theorem~6.3~(1)]{BM}, the proof in the general case follows the same lines.) In particular, it follows from (\ref{eqn:Wstruct}) that \begin{equation} \label{eqn:dim} \dim_{\mathbb{C}}{\rm Hom}_{W_{{\mathfrak G}(t)}}\left(\tau(x,\varrho),V(x,\varrho)\right)=1. \end{equation}
Let $a\in A_H(t)$. Since the action of $A_H(t)$ on $W_{{\mathfrak G}(t)}$ comes from its action on the root datum, we have (see \cite[2.6.1, 2.7.3]{R}): \[{}^a\tau(x,\varrho)=\tau(a \cdot x,{}^a\varrho).\] Then
\[{}^a V(x,\varrho)|_{W_{{\mathfrak G}(t)}}\,=\,\tau(a \cdot x,{}^a\varrho)\,\oplus\, \bigoplus_{(x_1,\varrho_1)\atop\tau(x,\varrho)\le\tau(x_1,\varrho_1)} m_{(x_1,\varrho_1)}\,\tau(a\cdot x,{}^a\varrho_1).\] Since $\tau(x,\varrho)\le\tau(x_1,\varrho_1)$ if and only if $\chi(a\cdot x,{}^a\varrho)\le\tau(a\cdot x_1,{}^a\varrho_1)$, it follows that ${}^a V(x,\varrho)$ corresponds to the ${\mathfrak G}(t)$-conjugacy class of $(a\cdot x,{}^a\varrho)$ via the bijection induced by~(\ref{eqnarray:bij}).
Hence \[{}^a V(x,\varrho)\simeq V(x,\varrho)\quad\text{ if and only if }\quad {}^a\tau(x,\varrho)\simeq\tau(x,\varrho).\] The equality of the inertia subgroups \[A_H(t)_{V(x,\varrho)}=A_H(t)_{\tau(x,\varrho)}=:A_H(t)_{x,\varrho}\] follows.
Let $\left\{{\phi}_a^V\,:\,a \in A_H(t)_{x,\varrho}\right\}$ (resp. $\left\{{\phi}_a^\tau\,:\,a \in A_H(t)_{x,\varrho}\right\}$) a family of isomorphisms for $V=V(x,\varrho)$ (resp. $\tau=\tau(x,\varrho)$) which determines the cocycle $\eta_V$ (resp. $\eta_\tau$). We have \[{\rm Hom}_{W_{{\mathfrak G}(t)}}(\tau,V)\overset{{\phi}_a^V}\to {\rm Hom}_{W_{{\mathfrak G}(t)}}(\tau,{}^{a^{-1}}V)\overset{{\phi}_a^\tau}\to {\rm Hom}_{W_{{\mathfrak G}(t)}}({}^{{a}^{-1}}\tau,{}^{a^{-1}}V).\] The composed map is given by a scalar, since by Eqn.~(\ref{eqn:dim}) these spaces are one-dimensional. We normalize ${\phi}_a^V$ so that this scalar equals to one. This forces $\eta_V$ and $\eta_\tau$ to be equal. \end{proof}
\begin{lem} There is a bijection between Springer parameters and Reeder parameters for the group $C_H(t)$: \[(x,\varrho,\psi)\mapsto (\Xi,\varrho,\psi).\] \end{lem} \begin{proof} Lemma~\ref{lem:cocycles} allows us to extend the bijection (\ref{eqnarray:bij}) from ${\mathfrak G}(t)$ to $C_H(t)$. \end{proof} \begin{lem} We have \[ \mathbf{B}^{\Psi} = \mathbf{B}^{\Xi}. \] \end{lem} \begin{proof} We note that \[ S_{\Psi} = < t > \gamma(B_2), \quad \quad S_{\Xi} = \gamma(B_2) \]
Let ${\mathfrak b}$ denote a Borel subgroup of the reductive group $C_H(t)$. Since ${\mathfrak b}$ is maximal among the connected solvable subgroups of $C_H(t)$, we have ${\mathfrak b} \subset {\mathfrak G}(t)$. Then we have ${\mathfrak b} = T_{{\mathfrak b}}U_{{\mathfrak b}}$ with $T_{{\mathfrak b}}$ a maximal torus in ${\mathfrak G}(t)$, and $U_{{\mathfrak b}}$ the unipotent radical of ${\mathfrak b}$. Note that $T_{{\mathfrak b}} \subset {\mathfrak G}(t)$. Therefore $yt = ty$ for all $y \in T_{{\mathfrak b}}$. This means that $t$ centralizes $T_{{\mathfrak b}}$, i.e. $t \in Z(T_{{\mathfrak b}})$. In a connected Lie group such as ${\mathfrak G}(t)$, we have \[ Z(T_{{\mathfrak b}}) = T_{{\mathfrak b}}\]
so that $t \in T_{{\mathfrak b}}$. Since $T_{{\mathfrak b}}$ is a group, it follows that $< t > \, \subset T_{{\mathfrak b}}$.
As a consequence, we have \[ {\mathfrak b} \supset \, < t > \gamma(B_2) \iff {\mathfrak b} \supset \gamma(B_2). \] \end{proof}
Let $S_{\Upsilon} = \Upsilon({\mathcal W}_F \times B_2)$, a solvable subgroup of $H$. Let $\mathbf{B}^{\Upsilon}$ denote the variety of Borel subgroups of $H$ containing $S_{\Upsilon}$.
\begin{lem} We have \[ \mathcal{R}(A, \mathbf{B}^{\Upsilon}) = \mathcal{R}(A, \mathbf{B}^{\Psi}) \] \end{lem}
\begin{proof} We denote the Lie algebra of ${\mathfrak G}(t)$ by ${\mathfrak g}(t)$, and the Lie algebra of $C_H(t)$ by ${\mathfrak c}_H(t)$ so that \[ {\mathfrak g}(t) = {\mathfrak c}_H(t). \] We note that the codomain of $\Psi$ is ${\mathfrak G}(t)$.
Let $\mathbf{B}^t$ denote the variety of all Borel subgroups of $G$ which contain $t$. Let $B \in \mathbf{B}^t$. Then $B \cap {\mathfrak G}(t)$ is a Borel subgroup of ${\mathfrak G}(t)$.
The proof in \cite[p.471]{CG} depends on the fact that ${\mathfrak G}(t)$ is connected, and also on a triangular decomposition of ${\rm Lie}({\mathfrak G}(t))$: \[ {\rm Lie}\,{\mathfrak G}(t) = {\mathfrak n}^t \oplus {\mathfrak t} \oplus {\mathfrak n}_{-}^t \] from which it follows that ${\rm Lie}\, B \cap {\rm Lie}\, {\mathfrak G}(t) = {\mathfrak n}^t \oplus {\mathfrak t}$ is a Borel subalgebra in ${\rm Lie} \,{\mathfrak G}(t)$. The superscript ``$t$'' stands for the centralizer of $t$.
There is a canonical map \begin{align} \label{eqn:(7)} \mathbf{B}^t \to {\rm Flag} \, {\mathfrak G}(t), \quad B \mapsto B \cap {\mathfrak G}(t) \end{align} Now ${\mathfrak G}(t)$ acts by conjugation on $\mathbf{B}^t$. We have \begin{align} \mathbf{B}^t = \mathbf{B}_1 \sqcup \mathbf{B}_2 \sqcup \cdots \sqcup \mathbf{B}_m \end{align} a disjoint union of ${\mathfrak G}(t)$-orbits, see \cite[Prop. 8.8.7]{CG}. These orbits are the connected components of $\mathbf{B}^t$, and the irreducible components of the projective variety $\mathbf{B}^t$. The above map~(\ref{eqn:(7)}), restricted to any one of these orbits, is a bijection from the ${\mathfrak G}(t)$-orbit onto ${\rm Flag} \, {\mathfrak G}(t)$ and is ${\mathfrak G}(t)$-equivariant. It is then clear that \[ \mathbf{B}_j^{\Upsilon} \simeq {\rm Flag} \, {\mathfrak G}(t)^{\Psi} \] for each $1 \leq j \leq m$. We also have $t \in S_\Upsilon = S_{\Psi}$. Now \[ \mathbf{B}^{\Upsilon} = (\mathbf{B}^t)^{\Upsilon} = (\mathbf{B}^t)^{\Psi} \] and then \[ H_*(\mathbf{B}^{\Upsilon}, \mathbb{C}) = H_*(\mathbf{B}_1^{\Psi}, \mathbb{C}) \oplus \cdots \oplus H_*(\mathbf{B}_m^{\Psi}, \mathbb{C}) \] a direct sum of \emph{equivalent} $A$-modules.
Hence $\varrho$ occurs in $H_*( \mathbf{B}^{\Upsilon},\mathbb{C})$ if and only if it occurs $H_*(\mathbf{B}^{\Psi}, \mathbb{C})$. \end{proof}
Recall that $x$ is a nilpotent element in ${\mathfrak c}(t)$ (the Lie algebra of ${\mathfrak G}(t)$). Define \[A^+:=\pi_0(Z_{C_H(t)}(x)).\]
\begin{lem} We have \[{\mathcal R}(A,\mathbf{B}^\Upsilon)={\mathcal R}(A^+,\mathbf{B}^\Upsilon).\] \end{lem} \begin{proof} Choose an isogeny $\iota\colon{\widetilde H}\to H$ with ${\widetilde H}_{\rm der}$ simply connected (as in \cite[Theorem~3.5.4]{R}) such that $H={\widetilde H}/Z$ where $Z$ is a finite subgroup of the centre of ${\widetilde H}$ (see \cite[\S~3]{R}). Let $\tilde t$ be a lift of $t$ in ${\widetilde H}$, that is, $\iota(\tilde t)=t$. Then we have (see \cite[\S~3.1]{R}): \begin{equation} \label{eqn:iotacent} \iota(C_{{\widetilde H}}(\tilde t))=C_H^0(t)={\mathfrak G}(t).\end{equation} Let $u:=\exp(x)$, a unipotent element in ${\mathfrak G}(t)$. It follows from Eqn.~(\ref{eqn:iotacent}) that there exists ${\tilde u}\in C_{{\widetilde H}}(\tilde t)$ such that $u=\iota({\tilde u})$. Recall that $A=\pi_0(Z_{{\mathfrak G}(t)}(x))$. Then \[A\simeq\pi_0(Z_{{\mathfrak G}(t)}(u))=\pi_0(Z_{\iota(C_{{\widetilde H}}(\tilde t))}(\iota({\tilde u})))\simeq \pi_0(Z_{C_{{\widetilde H}}}(\tilde t,{\tilde u})),\] and $A$ is a subgroup of $\pi_0(Z_{C_H(t)}(u))\simeq A^+$ (see \cite[\S~3.2--3.3]{R}).
Recall from \cite[Lemma~3.5.3]{R} that \[(\tilde t,{\tilde u},\varrho,\psi) \mapsto (t, u, \rho)\] induces a bijection between $G$-conjugacy classes of quadruples $(\tilde t,{\tilde u},\varrho,\psi)$ and $G$-conjugacy classes of triples $(t,u,\rho)$, where $\rho\in{\mathcal R}(A^+,\mathbf{B}^\Upsilon)$ is such that the restriction of $\rho$ to $A$ contains $\varrho$. \end{proof}
\begin{lem} We have \[{\mathcal R}(A^+,\mathbf{B}^\Upsilon)={\mathcal R}(A^+,\mathbf{B}^\Phi).\] \end{lem} \begin{proof} It follows from \cite[Lemma~4.4.1]{R}. \end{proof}
The proof can be reversed. Here is the reason for this claim: Lemmas 4.5, 4.6, 4.8 4.10 -- 4.13 are all equalities, and Lemma 4.9 is a bijection.
This creates a canonical bijection between the extended quotient of the second kind $(T{/\!/} W^{{\mathfrak s}})_2$ and ${\mathfrak Q}(G)_{{{\widehat\lambda}}}$:
\begin{align} \mu \colon (T{/\!/} W^{{\mathfrak s}})_2 \longrightarrow {\mathfrak Q}(G)_{{{\widehat\lambda}}}, \quad \quad (t, x, \varrho, \psi) \mapsto (\Phi, \rho). \end{align} This in turn creates a bijection
\begin{align} T{/\!/} W^{{\mathfrak s}} \longrightarrow {\mathfrak Q}(G)_{{{\widehat\lambda}}}. \end{align} This bijection is not canonical in general, depending as it does on a choice of bijection between the set of conjugacy classes in $W_H(t)$ and the set of irreducible characters of $W_H(t)$. When $G = {\rm GL}(n)$, the finite group $W_H(t)$ is a product of symmetric groups: in this case there is a canonical bijection between the set of conjugacy classes in $W_H(t)$ and the set of irreducible characters of $W_H(t)$, by the classical theory of Young tableaux.
To close this section, we will consider the case of ${\rm GL}(n,F))$, and the Iwahori point ${\mathfrak i}$ in the Bernstein spectrum of ${\rm GL}(n,F)$. The Langlands dual of ${\rm GL}(n,F)$ is ${\rm GL}(n,{\rm C})$, and we will take $T$ to be the standard maximal torus in ${\rm GL}(n,{\rm C})$. The Weyl group is the symmetric group $S_n$. We will denote our bijection, in this case canonical, as follows: \[ \mu_{F}^{{\mathfrak i}} : T{/\!/} W \to {\mathfrak P}({\rm GL}(n,F)) \] Let $E/F$ be a finite Galois extension of the local field $F$. According to \cite[Theorem 4.3]{MP}, we have a commutative diagram \[ \begin{CD} T{/\!/} W @> \mu_{F}^{{\mathfrak i}} >> {\mathfrak P}({\rm GL}(n,F))\\ @ V VV @VV {\rm BC}_{E/F} V\\ T{/\!/} W @> \mu_{E}^{{\mathfrak i}} >> {\mathfrak P}({\rm GL}(n,E)) \end{CD} \] In this diagram, the right vertical map ${\rm BC}_{E/F}$ is the standard base change map sending one Reeder parameter to another as follows: \[
(\Phi,1) \mapsto (\Phi_{|W_E},1). \]
Let \[f = f(E,F)\] denote the residue degree of the extension $E/F$. We proceed to describe the left vertical map. We note that the action of W on T is as automorphisms of the algebraic group T. Since $T$ is a group, the map \[T \to T, \quad t \mapsto t^f\] is well-defined for any positive integer $f$. The map \[ \widetilde{T} \to \widetilde{T}, \quad (t,w) \mapsto (t^f,w) \] is also well-defined, since \[ w\cdot t^f = wt^fw^{-1} = wtw^{-1}wtw^{-1} \cdots wtw^{-1} = t^f. \] Since \[ \alpha\cdot(t^f) = (\alpha\cdot t)^f \] for all $\alpha \in W$, this induces a map \[ T{/\!/} W \to T{/\!/} W \] which is an endomorphism (as algebraic variety) of the extended quotient $T{/\!/} W$. We shall refer to this endomorphism as the \emph{base change endomorphism of degree $f$.} The left vertical map is the base change endomorphism of degree $f$, according to \cite[Theorem 4.3]{MP}. That is, our bijection $\mu^{{\mathfrak i}}$ is compatible with base change for ${\rm GL}(n)$.
When we restrict our base change endomorphism from the extended quotient $T{/\!/} W$ to the ordinary quotient $T/W$, we see that the commutative diagram containing ${\rm BC}_{E/F}$ is consistent with \cite[Lemma 4.2.1]{Haines}.
\section{Interpolation}
We will now provide details for the interpolation procedure described in \S1. We will focus on the Iwahori point ${\mathfrak i} \in {\mathfrak B}(\mathcal{G})$, {\it i.e.,\,} on the smooth irreducible representations of $\mathcal{G}$ which admit nonzero Iwahori fixed vectors. To simplify notation, we will write $\mu = \mu^{{\mathfrak i}}$. Let ${\mathfrak P}(G)$ denote the set of conjugacy classes in $G$ of Kazhdan-Lusztig parameters. For each $s \in \mathbb{C}^{\times}$, we construct a commutative diagram: \[ \begin{CD} T{/\!/} W @> \mu >> {\mathfrak P}(G)\\ @ V \pi_s VV @VV i_s V\\ T/W @= T/W \end{CD} \] in which the map $\mu$ is bijective. In the top row of this diagram, the set $T{/\!/} W$, the set ${\mathfrak P}(G)$, and the map $\mu$ are independent of the parameter $s$.
We start by defining the vertical maps $i_s$, $\pi_s$ in the diagram. Let $s \in \mathbb{C}^{\times}$. We will define \begin{align} i_s: {\mathfrak P}(G) \to T/W, \quad (\Phi, \rho) \mapsto \Phi ({\rm Frob}, T_s)\end{align} \begin{align} \pi_s : T{/\!/} W \to T/W, \quad (t,w) \mapsto t \cdot \gamma (T_s) \end{align} where $(\Phi, \rho)$ is a Reeder parameter, and $(t,w) \in T{/\!/} W$. We note that \[
\Phi ({\rm Frob}, T_s) = t \cdot \gamma(T_s) \] so that the diagram is commutative.
$\bullet$ Let $s = 1$, and assume, for the moment, that $C_H(t)$ is connected. The map $\mu$ in Theorem 4.1 sends $(t, \tau) $ to $(\Phi ,\rho)$. We note that \[ t = \Phi({\rm Frob}, T_1) = \Phi({\rm Frob},1).\] The map $\mu$ determines the map \[ (t, \tau) \mapsto (t, \Phi(1,u_0), \rho) \] which, in turn, determines the map \[ \tau \mapsto (\exp(x), \rho) \] which is the Springer correspondence for the Weyl group $W_H(t)$.
$\bullet$ Now let $s = \sqrt q$ where $q$ is the cardinality of the residue field $k_F$ of $F$. We now link our result to the representation theory of the $p$-adic group $\mathcal{G}$ as follows. As in \S 3, let
\[ \sigma: = \Phi ({\rm Frob}, T_{\sqrt q}), \quad \quad u: = \Phi(1,u_0). \] Then we have \[ \sigma u \sigma^{-1} = u^q \] and the triple $(\sigma, u, \rho)$ is a Kazhdan-Lusztig triple.
The correspondence $\sigma \mapsto \chi_{\sigma}$ between points in $T$ and unramified quasicharacters of $\mathcal{T}$ can be fixed by the relation \[ \chi_{\sigma}(\lambda(\varpi_F)) = \lambda(\sigma) \] where $\varpi_F$ is a uniformizer in $F$, and $\lambda \in X_*(\mathcal{T}) = X^*(T)$. The Kazhdan-Lusztig triples $(\sigma, u, \rho)$ parametrize the irreducible constituents of the (unitarily) induced representation \[ {\rm Ind}_{\mathcal{B}}^{\mathcal{G}}(\chi_{\sigma}\otimes 1). \]
Note that \[ i_{\sqrt q}: (\Phi,\rho) \mapsto \sigma \] so that $i_{\sqrt q}$ is the \emph{infinitesimal character}. The infinitesimal character is denoted $\mathbf{Sc}$ in \cite[VI.7.1.1]{Renard} ($\mathbf{Sc}$ for \emph{support cuspidal})
Since $\mu$ is bijective and the diagram is commutative, the number of points in the fibre of the $q$-projection $\pi_{\sqrt q}$ equals the number of inequivalent irreducible constituents of ${\rm Ind}_{\mathcal{B}}^{\mathcal{G}}(\chi_{\sigma}\otimes 1)$: \begin{align}
|\pi^{-1}_{\sqrt q}(\sigma)| = |{\rm Ind}_{\mathcal{B}}^{\mathcal{G}}(\chi_{\sigma}\otimes 1) | \end{align} The $q$-projection $\pi_{\sqrt q}$ is a model of the infinitesimal character $\mathbf{Sc}$.
Our formulation leads to Eqn.(24), which appears to have some predictive power. Note that the definition of the $q$-projection $\pi_{\sqrt q}$ depends only on the $L$-parameter $\Phi$. An $L$-parameter determines an $L$-packet, and does not determine the number of irreducible constituents of the $L$-packet.
\section{Examples}
\textsc{Example~1.} \emph{Realization of the ordinary quotient} $T/W$. Consider an $L$-parameter $\Phi$ for which $\Phi | _{{\rm SL}(2,\mathbb{C})} = 1$. Let $t = \Phi({\rm Frob})$. Then \[ G_{\Phi} : = C(im \, \Phi) = C(t) \] so that $G_{\Phi}$ is connected and acts trivially in homology. Therefore $\rho$ is the unit representation $1$.
Now $t$ is a semisimple element in $G$, and all such semisimple elements arise. Modulo conjugacy in $G$, the set of such $L$-parameters $\Phi$ is parametrized by the quotient $T/W$. Explicitly, let \[
{\mathfrak P}_1(G): = \{\Phi \in {\mathfrak P}(G): \Phi |_{ {\rm SL}(2,\mathbb{C})} = 1 \}. \]
Then we have a canonical bijection \[ {\mathfrak P}_1(G) \to T/W, \quad \quad (\Phi,1) \mapsto \Phi({\rm Frob},1) \] which fits into the commutative diagram \[ \begin{CD} {\mathfrak P}_1(G) @>>>T/W \\ @VVV @VVV \\ {\mathfrak P}(G) @>>> T {/\!/} W \end{CD} \] where the vertical maps are inclusions.
\textsc{Example~2.} \emph{The general linear group}. Let ${\mathcal G} = {\rm GL}(n), G = {\rm GL}(n,\mathbb{C})$. Let \[ \Phi = \chi \otimes \tau(n) \] where $\chi$ is an unramified quasicharacter of ${\mathcal W}_F$ and $\tau(n)$ is the irreducible $n$-dimensional representation of ${\rm SL}(2,\mathbb{C})$. By local classfield theory, the quasicharacter $\chi$ factors through $F^{\times}$. In the local Langlands correspondence for ${\rm GL}(n)$, the image of $\Phi$ is the unramified twist $\chi \circ \det$ of the Steinberg representation ${\rm St}(n)$.
The sign representation $sgn$ of the Weyl group $W$ has Springer parameters $(\mathcal{O}_{prin},1)$, where $\mathcal{O}_{prin}$ is the principal orbit in $\mathfrak{gl}(n,\mathbb{C})$. In the \emph{canonical} correspondence between irreducible representations of $S_n$ and conjugacy classes in $S_n$, the trivial representation of $W$ corresponds to the conjugacy class containing the $n$-cycle $w_0 = (123 \cdots n)$.
Now $G_{\Phi} = C(im \, \Phi)$ is connected \cite[\S3.6.3]{CG}, and so acts trivially in homology.
Therefore $\rho$ is the unit representation $1$. The image $\Phi(1,u_0)$ is a regular nilpotent, i.e. a nilpotent with one Jordan block (given by the partition of $n$ with one part). The corresponding conjugacy class in $W$ is $\{w_0\}$. The corresponding irreducible component of the extended quotient is $$T^{w_0}/Z(w_0) = \{(z,z, \ldots,z): z \in \mathbb{C}^{\times}\} \simeq \mathbb{C}^{\times}.$$ This is our model, in the extended quotient picture, of the complex $1$-torus of all unramified twists of the Steinberg representation ${\rm St}(n)$. The map from $L$-parameters to pairs $(w,t) \in T {/\!/} W$ is given by \[ \chi \otimes \tau(n) \mapsto (w_0, \chi({\rm Frob}), \dots, \chi({\rm Frob})). \] Among these representations, there is one real tempered representation, namely ${\rm St}(n)$, with $L$-parameter $1 \otimes \tau(n)$, attached to the principal orbit ${\mathcal O}_{prin} \subset G$.
More generally, let \[ \Phi = \chi_1 \otimes \tau(n_1) \oplus \cdots \oplus \chi_k \otimes \tau(n_k) \] where $n_1 + \cdots + n_k = n$ is a partition of $n$. This determines the unipotent orbit ${\mathcal O}(n_1, \ldots, n_k) \subset G$. There is a conjugacy class in $W$ attached canonically to this orbit: it contains the product of disjoint cycles of lengths $n_1, \ldots, n_k$. The fixed set is a complex torus, and the component in $T{/\!/} W$ is a product of symmetric products of complex $1$- tori.
\textsc{Example 3}. \emph{The exceptional group of type ${{\rm G}}_2$}. This example contains a Reeder parameter $(\Phi,\rho)$ with $\rho \neq 1$. The torus ${\mathcal T}$ is identified with $F^\times\times F^\times$. We take $\lambda=\chi\otimes\chi$ where $\chi$ is a nontrivial quadratic character of ${\mathfrak o}_F^\times$.
Here we have $H={\rm SO}(4,\mathbb{C})\simeq{\rm SL}(2,\mathbb{C})\times{\rm SL}(2,\mathbb{C})/\{\pm I\}$. This complex reductive Lie group is neither simply-connected nor of adjoint type. We have $W^{\mathfrak s}=W_H=\mathbb{Z}/2\mathbb{Z}\times \mathbb{Z}/2\mathbb{Z}$. We will write \[{\rm SL}(2, \mathbb{C}) \times {\rm SL}(2, \mathbb{C}) \longrightarrow H^{\mathfrak s}, \quad (x,y) \mapsto [x,y],\] \[T_{s,s'}=[T_s,T_{s'}],\quad s,s'\in \mathbb{C}^\times.\]
We have \[{\mathfrak Q}(G)_{\hat\lambda}\to T{/\!/} W_H\simeq \mathbb{A}^1\sqcup \mathbb{A}^1\sqcup pt_1\sqcup pt_2\sqcup pt_*\sqcup T/W_H,\] where \begin{itemize} \item one $\mathbb{A}^1$ corresponds to $(\Phi,1)$ with $\Phi({\rm Frob},1)=[I,T_s]$ and $\Phi(1,u_0)=[u_0,I]$, \item the other $\mathbb{A}^1$ corresponds to $(\Phi,1)$ with $\Phi({\rm Frob},1)=[T_s,I]$ and $\Phi(1,u_0)=[I,u_0]$, \item $pt_1$ corresponds to $(\Phi,1)$ with $\Phi({\rm Frob},1)= T_{1,1}$ and $\Phi(1,u_0)=[u_0,u_0]$, \item $pt_2$ corresponds to $(\Phi,1)$ with $\Phi({\rm Frob},1)= T_{1,-1}$ and $\Phi(1,u_0)=[u_0,u_0]$, \item $T/W_H$ corresponds to $(\Phi,1)$ with $\Phi({\rm Frob},1)= T_{s,s'}$ $s,s'\in \mathbb{C}^\times$, and $\Phi(1,u_0)=[I,I]$, \item $pt_*$ corresponds to $(\Phi,{\rm sgn})$ with $\Phi({\rm Frob},1)= T_{i,i}$, $i=\sqrt{ -1}$ and $\Phi(1,u_0)=[I,I]$. \end{itemize}
\emph{Acknowledgement}. We would like to thank A. Premet for drawing our attention to reference \cite{CG}.
Anne-Marie Aubert, Institut de Math\'ematiques de Jussieu, U.M.R. 7586 du C.N.R.S., Paris, France\\ Email: aubert@math.jussieu.fr\\ Paul Baum, Pennsylvania State University, Mathematics Department, University Park, PA 16802, USA\\ Email: baum@math.psu.edu\\ Roger Plymen, School of Mathematics, Alan Turing building, Manchester University, Manchester M13 9PL, England\\ Email: plymen@manchester.ac.uk
\end{document} |
\begin{document}
\title{Time dilation in the oscillating decay laws of moving two-mass unstable quantum states
}
\def\bbm[#1]{\mbox{\boldmath$#1$}}
PACS: 03.65.-w, 03.30.+p
\begin{abstract} The decay of a moving system is studied in case the system is initially prepared in a two-mass unstable quantum state. The survival probability $\mathcal{P}_p(t)$ is evaluated over short and long times in the reference frame where the unstable system moves with constant linear momentum $p$. The mass distribution densities of the two mass states are tailored as power laws with powers $\alpha_1$ and $\alpha_2$ near the non-vanishing lower bounds $\mu_{0,1}$ and $\mu_{0,2}$ of the mass spectra, respectively. If the powers $\alpha_1$ and $\alpha_2$ differ, the long-time survival probability $\mathcal{P}_p(t)$ exhibits a dominant inverse-power-law decay and is approximately related to the survival probability at rest $\mathcal{P}_0(t)$ by a time dilation. The corresponding scaling factor $\chi_{p,k}$ reads $\sqrt{1+p^2/\mu_{0,k}^2}$, the power $\alpha_k$ being the lower of the powers $\alpha_1$ and $\alpha_2$. If the two powers coincide and the lower bounds $\mu_{0,1}$ and $\mu_{0,2}$ differ, the scaling relation is lost and damped oscillations of the survival probability $\mathcal{P}_p(t)$ appear over long times. By changing reference frame, the period $T_0$ of the oscillations at rest transforms in the longer period $T_p$ according to a factor which is the weighted mean of the scaling factors of each mass, with non-normalized weights $\mu_{0,1}$ and $\mu_{0,2}$. \end{abstract}
\title{Time dilation in the oscillating decay laws of moving two-mass unstable quantum states
}
\section{Introduction}\label{1}
In many experiments of high-energy physics or astrophysical phenomena, the involved unstable particles move in the laboratory frame of the observer at relativistic or ultrarelativistic velocities. For this reason, a theoretical description of the relativistic transformations of the decay laws of moving unstable quantum systems is fundamental. The literature which concerns this subject is vast. See Refs. \cite{Khalfin,BakamjianPR1961RQT,ExnerPRD1983,HEP_Stef1996,TD_StefanovichXivHep2006,GiacosaFP2012,UrbPLB2014,UrbAPB2017,GiacosaRelDecayXiv2018}, to name but a few. In the rest reference frame of the moving unstable system the nondecay or survival probability is expressed in term of the mass distribution density (MDD). The low-mass, integral and analytic properties of the MDD determine the long-time decay of the survival probability \cite{FondaGirardiRiminiRPP1978,UrbanowskiEPJD2009,UrbanowskiCEJP2009,GEPJD2015}. In the reference frame where the unstable system moves with constant linear momentum, the survival probability has been evaluated in various ways by adopting quantum theory and special relativity \cite{HEP_Stef1996,HEP_Shir2004,HEP_Shir2006,TD_StefanovichXivHep2006,GiacosaAPPB2016,GiacosaAPPB2017,UrbAPB2017,GiacosaRelDecayXiv2018}. The transformations of the decay laws of a moving unstable quantum system, which are induced by the change of reference frame, raise questions on the appearance of the relativistic dilation of times in quantum decays. As matter of fact, relativistic time dilation is considered to appear over the short-time exponential decay of the survival probability. The literature which concerns this subject is vast. See Refs. \cite{HEP_Stef1996,HEP_Shir2004,TD_StefanovichXivHep2006,HEP_Shir2006,GiacosaAPPB2016,GiacosaAPPB2017,UrbAPB2017}, to name but a few.
Recently, the long-time behavior of the survival probability has been analyzed for arbitrary values of the constant linear momentum of a moving unstable quantum system and for MDDs which are tailored as power laws near the (non-vanishing) lower bound of the mass spectrum \cite{Gxiv2018}. The decay laws which are detected in the rest reference frame of the unstable system are reproduced by the condition of vanishing linear momentum, while non-vanishing values of the linear momentum refer to the laboratory frame of an observer where the unstable particle is moving. Over long times, the survival probability at rest transforms in the reference frame where the unstable system moves with constant linear momentum, according to a time dilation. The scaling factor of the time dilation is determined by the lower bound of the mass spectrum and by the linear momentum. Also, the scaling factor results to be the ratio of the asymptotic value of the instantaneous mass \cite{UrbPLB2014,UrbAPB2017,UrbanowskiEPJD2009,UrbanowskiCEJP2009,UrbanowskiPRA1994} and of the instantaneous mass at rest of the unstable system. By considering the asymptotic value of the instantaneous mass as the effective mass of the unstable system over long times, the scaling factor coincides with the relativistic Lorentz factor of the moving system. This property allows to interpret the time dilation which appears in the long-time decay laws, in terms of the relativistic time dilation. See Ref. \cite{Gxiv2018} for details.
Oscillating decay laws have always attracted a great deal of attention in quantum theory. Oscillating behaviors are obtained in the exponential regime of the decay of an unstable state in the referential $N$-level Friedrichs model \cite{OscFM}. Oscillatory behaviors of the decay laws are obtained if the energy distribution density of the quantum system deviates from the Breit-Wigner form \cite{GPoscillatingDecaysQM2012}. Referential examples of oscillating decays are found in neutrino and unstable meson systems. The literature which concerns this subject is vast. See Refs. \cite{Perkins,HEP_Shir2004,HEP_Shir2006,ShirokovNaumov2006} to name but a few. Oscillating decay laws are obtained in theoretical models which involve unstable two-mass states \cite{HEP_Shir2004,HEP_Shir2006,ShirokovNaumov2006}.
In Ref. \cite{HEP_Shir2006} the initial two-mass state is considered to be a superposition of two eigenstates of the Hamiltonian with different eigenvalues. In this condition the survival probability exhibits regular undamped oscillations. In the reference frame where the unstable system moves with constant linear momentum, the survival probability is obtained from the survival probability at rest via a time dilation which is not Einsteinian if the two mass eigenstates differ. See Ref. \cite{HEP_Shir2006} for details. In Ref. \cite{HEP_Shir2004}, the initial two-mass state is the superposition of two ustable states which are described by MDDs of Breit-Wigner form. The survival probability exhibits oscillations which are enveloped in an exponential decay. The relativistic dilatation of times holds if the two rest masses are approximately equal. See Ref. \cite{HEP_Shir2004} for details.
As a continuation of the scenario described above, here, we consider a moving system which is initially prepared in a two-mass unstable quantum state. The MDDs which describe the two mass states are tailored as power laws near the lower bound of the corresponding mass spectrum. In light of the study which is performed in Ref. \cite{HEP_Shir2004}, we expect damped oscillations of the survival probability in case the two MDDs are somehow different from each other. We intend to study the decay laws in a reference frame where the two-mass unstable quantum state is an eigenstate of the linear momentum with a nonvanishing eigenvalue. We aim to compare the corresponding decay laws with the decay laws which are detected in the rest reference frame of the moving unstable system. We intend to study the transformations of the survival probability and search for possible time dilation. We also aim to describe how the eventual oscillations of the survival probability transform due to the change of reference frame.
The paper is organized as follows. Section \ref{2} is devoted to the description of the decay of general moving single-mass unstable quantum systems in term of the MDD. In Section \ref{3}, the decay laws of moving two-mass unstable quantum states are evaluated over short and long times. Section \ref{4} is devoted to the transformations of the decay laws which are due to the change of reference frame, and to the appearance of time dilation. Summary and conclusion are reported in Section \ref{5}. Demonstrations of the results are provided in Appendix.
\section{Moving single-mass unstable quantum systems}\label{2}
For the sake of clarity and convenience, we report below some details on the transformations of the survival probability, which are due to the change of reference frame, by following Ref. \cite{UrbAPB2017}. Let the quantum state of the unstable particle belong to the Hilbert space $\mathcal{H}$, and let the state kets $|m,p\rangle$ represent the common eigenstates of the linear momentum $P$ operator with eigenvalue $p$, and of the Hamiltonian $H$ self-adjoint operator, with eigenvalue $m$. These assumptions mean that the following equalities, $P|m,p\rangle =p |m,p\rangle$ and $H|m,p\rangle =E(m,p) |m,p\rangle$, hold over the mass spectrum, and for every value of the linear momentum $p$. The spectrum of the Hamiltonian is assumed to be continuous with lower bound $\mu_0$.
Let the state ket $|\phi\rangle$ belong to the Hilbert space $\mathcal{H}$ and represent the initial unstable state of the quantum system. This state can be expressed via the eigenstates $|m,0\rangle$ of the Hamiltonian as $|\phi\rangle=\int_{\mu_0}^{\infty} \langle 0,m||\phi\rangle |m,0\rangle dm$. In the present notation, $\langle 0,m|$ and $\langle p,m|$ are the bras of the state kets $|m,0\rangle$ and $|m,p\rangle$, respectively. In the rest reference frame of the unstable system the survival amplitude reads $A_0(t)=\langle \phi| e^{-\imath H t} |\phi\rangle$, where $\imath$ is the imaginary unit, and is given by the following integral expression \cite{UrbAPB2017,UrbPLB2014,UrbanowskiEPJD2009,UrbanowskiCEJP2009,FondaGirardiRiminiRPP1978}, \begin{eqnarray} A_0(t)=\int_{\mu_0}^{\infty} \omega\left(m\right) e^{-\imath m t} dm. \label{A0Int} \end{eqnarray}
The function $\omega\left(m\right)$ is the MDD of the unstable system and reads $\omega\left(m\right)=\left|\langle 0,m||\phi\rangle\right|^2$. The MDD is determined by the initial state and by the Hamiltonian of the system via the eigenstates $|m,0\rangle$. In the rest reference frame of the moving unstable system, the probability $\mathcal{P}_0(t)$ that the decaying system remains in the initial state at the time $t$, is referred as the survival probability at rest and reads $\mathcal{P}_0(t)=\left|A_0(t)\right|^2$.
In the reference frame where the system moves with constant linear momentum $p$, the transformed survival amplitude $A_p(t)$ is given by the expression $\langle \phi_p|e^{-\imath H t}|\phi_p\rangle$. The state $|\phi_p\rangle$ is an eigenstate of the linear momentum $P$, with eigenvalue $p$, and represents the state $|\phi \rangle$ in the reference frame where the system moves with linear momentum $p$. By adopting this notation, the state $|\phi_{0} \rangle$ represents the initial unstable state $|\phi\rangle$ of the system in the rest reference frame. The transformed survival amplitude $A_p(t)$ is obtained in various ways \cite{HEP_Stef1996,HEP_Shir2004,HEP_Shir2006,GiacosaAPPB2016,GiacosaAPPB2017,UrbAPB2017,GiacosaRelDecayXiv2018}, and results in the following fundamental integral form, \begin{eqnarray} A_p(t)=\int_{\mu_0}^{\infty} \omega\left(m \right) e^{-\imath \sqrt{ p^2+m^2}t} d m. \label{Aptdef} \end{eqnarray}
The survival probability $\mathcal{P}_p(t)$ is given by the square modulus of the above expression, $\mathcal{P}_p(t)=\left|A_p(t)\right|^2$. See Refs. \cite{HEP_Stef1996,HEP_Shir2004,HEP_Shir2006,GiacosaAPPB2016,GiacosaAPPB2017,UrbAPB2017,GiacosaRelDecayXiv2018} for details.
\subsection{Mass distribution densities}\label{21}
Unstable quantum states are usually described by MDDs which consist in a Breit-Wigner form plus, eventually, a form factor and an additional term which provides a low-mass power-law profile \cite{FondaGirardiRiminiRPP1978,UrbPLB2014,UrbAPB2017,UrbanowskiEPJD2009,UrbanowskiCEJP2009,HEP_Shir2004,GiacosaFP2012}. In Ref. \cite{Gxiv2018}, the survival amplitude $A_p(t)$, the survival probability $\mathcal{P}_p(t)$, the instantaneous mass $M_p(t)$ and the instantaneous decay rate $\Gamma_p(t)$ are evaluated, over short and long times, for an arbitrary value $p$ of the constant linear momentum of the moving unstable quantum system, in case the MDD is tailored as a nonegative power law near the lower bound $\mu_0$ of the mass spectrum. The resulting survival probability $\mathcal{P}_p(t)$ decays as an inverse power law over long times and is related to the survival probability at rest $\mathcal{P}_0(t)$ by a scaling law. For the sake of clarity and convenience, the MDDs under study and the main results of Ref. \cite{Gxiv2018} are reported below.
The MDD of the system is described by the auxiliary function $\Omega\left(\xi\right)$, over the infinite support $\left[\right.\mu_0,\infty\left.\right)$, via the following scaling law, $\omega\left(m_s \xi \right)= \Omega\left(\xi\right)/m_s $. The scaling relation holds for every $\xi\geq \xi_0$. The parameter $m_s$ represents a general scale mass. The dimensionless variable $\xi$ is defined as $\xi=m/m_s$, and the parameter $\xi_0$ reads $\xi_0=\mu_0/m_s$. The condition of non-vanishing lower bound of the mass spectrum is equivalent to the constraint $\xi_0>0$. The power-law behavior of the MDD is given by the relation \begin{eqnarray} \Omega\left(\xi \right)= \left(\xi-\xi_0\right)^{\alpha} \Omega_0\left(\xi \right), \label{Omegaalpha} \end{eqnarray} with $\alpha\geq 0$ and $\Omega_0 \left(\xi_0 \right)>0$.
The function $\Omega_0\left(\xi \right)$ and the derivatives $\Omega^{(j)}_0\left(\xi \right)$ must be summable, for every $j=1, \ldots, \lfloor \alpha \rfloor +4$, and continuously differentiable in $\left[\mu_0,+\infty\right.\left.\right)$, for every $j=1, \ldots, \lfloor \alpha \rfloor +3$. Consequently, the limits $\lim_{\xi\to\xi_0^+}\Omega^{(j)}_0\left(\xi \right)$ exist finite and coincide with $\Omega^{(j)}_0\left(\xi_0 \right)$ for every $j=0, \ldots, \lfloor \alpha \rfloor+4$. The functions $\Omega_0^{(j)}\left(\xi \right)$ are required to decay sufficiently fast as $\xi\to+\infty$, so that the auxiliary function $\Omega\left(\xi\right)$ and the derivatives $\Omega^{(j)}\left(\xi\right)$ vanish as $\xi\to+\infty$, for every $j=0, \ldots, \lfloor \alpha \rfloor$.
\subsection{Time dilation in the decay laws of moving single-mass unstable quantum states }\label{22} The survival amplitude $A_p(t)$, given by the integral expression (\ref{Aptdef}), is determined by the auxiliary function $\Omega\left(\xi \right)$ via the following form \cite{Gxiv2018}, \begin{eqnarray} A_p(t)=\int_{\xi_0}^{\infty} \Omega\left(\xi \right) e^{-\imath \eta \tau} d \xi, \label{Aptxi} \end{eqnarray} where $\tau=m_s t$, $\eta=\sqrt{\rho^2+\xi^2}$ and $\rho=p/m_s$. The short-time behavior of the survival amplitude is obtained from the asymptotic analysis of the above integral form and leads to an approximate algebraic decay of the survival probability $\mathcal{P}_p(t)$ over short times, \begin{eqnarray} \mathcal{P}_p(t)\sim 1 - \pi_0 t^2, \label{Pptshort} \end{eqnarray} for $t \ll 1/m_s$. The coefficient $\pi_0$ is given in the Appendix. The long-time behavior of the survival probability is approximated by the form below, \begin{eqnarray} \mathcal{P}_p(t)\sim c^2_0 \left(\frac{\chi_p}{m_s t}\right)^{2\left( 1+\alpha\right)}, \label{Pplongt} \end{eqnarray} for $t \gg 1/m_s$. The constant $c_0$ is defined in the Appendix. The factor $\chi_p$ is determined by the linear momentum $p$ and by the lower bound $\mu_0$ of the mass spectrum, \begin{eqnarray} \chi_p = \sqrt{1+\frac{p^2}{\mu_0^2}}. \label{Chip} \end{eqnarray} The long-time survival probability at rest $\mathcal{P}_0(t)$ is obtained from Eq. (\ref{Pplongt}) for vanishing value of the linear momentum $p$, and reads \begin{eqnarray} \mathcal{P}_0(t)\sim c^2_0 \left(m_s t\right)^{-2\left( 1+\alpha\right)}, \label{P0longt} \end{eqnarray} for $t \gg 1/ m_s$.
For the MDDs under study, the survival probability $\mathcal{P}_p\left(t\right)$ is related to the survival probability at rest $\mathcal{P}_0\left(t\right)$, approximately over long times, by the following scaling law, \begin{eqnarray} \mathcal{P}_p\left(t\right) \sim \mathcal{P}_0\left(\frac{t}{ \chi_p}\right), \label{PpP0L} \end{eqnarray} for $t \gg 1/m_s$. The above approximate long-time scaling relation is obtained by comparing Eqs. (\ref{Pplongt}) and (\ref{P0longt}). This scaling relation describes the transformation of the survival probability at rest, over long times, in the reference frame where the system moves with constant linear momentum $p$. The transformation consists in an approximate time dilation which is determined by the scaling factor $\chi_p$. This factor coincides with the ratio of the asymptotic value of the instantaneous mass $M_p\left(\infty\right)$ and of the instantaneous mass at rest $M_0\left(\infty\right)$ of the moving unstable system, $\chi_p=M_p\left(\infty\right)/M_0\left(\infty\right)$, where $M_p\left(\infty\right)=\sqrt{\mu_0^2+p^2}$, and $M_0\left(\infty\right)=\mu_0$. The equality $\chi_p=\sqrt{1+p^2/\mu_0^2}$, suggests that the scaling factor coincides with the relativistic Lorentz factor of a mass at rest $\mu_0$ which moves with linear momentum $p$ or, equivalently, with constant velocity $1\Big/\sqrt{1+\mu_0^2\big/p^2}$. If the asymptotic value of the instantaneous mass is considered as the effective long-time mass of the unstable system, the time dilation of the survival probability coincides with the relativistic time dilation of the instantaneous mass at rest $M_0\left(\infty\right)$ which moves with constant linear momentum $p$. In this way, the time dilation which is found in the transformation of the decay laws, can be interpreted via the theory of special relativity. See Ref. \cite{Gxiv2018} for details.
\section{Moving two-mass unstable quantum states}\label{3}
At this stage, we study the decay laws of moving two-mass unstable quantum states
\cite{HEP_Shir2004}.
Let the unstable system be initially prepared in a pure state $|\phi\rangle$ which is the superposition of two unstable mass states $|\phi_1\rangle$ and $|\phi_2\rangle$, \begin{eqnarray}
|\phi\rangle= l_1|\phi_1\rangle+l_2|\phi_2\rangle. \label{2Minitial} \end{eqnarray}
The two states $|\phi_1\rangle$ and $|\phi_2\rangle$ are consider to be approximately orthogonal, $\langle \phi_1\left|\right| \phi_2\rangle\simeq 0$. Consequently, the normalization condition, $\langle \phi||\phi\rangle=1$, requires that the coefficients $l_1$ and $l_2$ fulfill the following constraint, $\left|l_1\right|^{2}+\left|l_2\right|^{2}=1$.
Let the function $\omega_j\left(m\right)$ be the MDD which describes the $j$th unstable mass state $|\phi_j\rangle$, for every $j=1,2$. This means that the following equality, $\omega_j\left(m\right)=\left|\langle 0,m| | \phi_j\rangle\right|^2$, holds over the support $\big[\mu_{0,j}, \infty\big)$. Let $\Omega_j\left(\xi\right)$ be the auxiliary function of the MDD $\omega_j\left(m\right)$ for every $j=1,2$. This means that the following relation, $\omega_j\left(m_s \xi \right)=\Omega_j\left(\xi\right)/m_s$, holds for every $\xi\geq \xi_{0,j}$, and for every $j=1,2$, where $\xi_{0,j}=\mu_{0,j}/m_s>0$. The MDDs under study belong to the class which is defined in Section \ref{21}. Consequently, the low-mass power-law behavior is given by the relation \begin{eqnarray} \Omega_j\left(\xi \right)= \left(\xi-\xi_{0,j}\right)^{\alpha_j} \Omega_{0,j}\left(\xi \right), \label{Omegaalphaj} \end{eqnarray} where $\alpha_j\geq 0$, and $\Omega_{0,j} \left(\xi_{0,j} \right)>0$, for every $j=1,2$.
In addition to the approximate orthogonality, it is also required that the time evolution of the mass states $|\phi_1\rangle$ or $|\phi_2\rangle$ is approximately orthogonal to the states $|\phi_2\rangle$ or $|\phi_1\rangle$, respectively. This means that the relation \begin{eqnarray}
\langle\phi_{p,j}\left|e^{-\imath H t}\right|\phi_{p,k} \rangle\simeq 0, \label{ort1} \end{eqnarray}
holds for every $t\geq 0$, for every $j,k=1,2$, such that $j \neq k$, and for every nonnegative real value of the linear momentum $p$. The state $|\phi_{p,l} \rangle$ is an eigenstate of the linear momentum $P$, with eigenvalue $p$, and represents the state $|\phi_{l} \rangle$ in the reference frame where the system moves with linear momentum $p$. In this notation, the state $|\phi_{0,l} \rangle$ represents the mass state $|\phi_l\rangle$, for every $l=1,2$. The assumption (\ref{ort1}) is motivated by the almost exact conservation of the CP-parity in the decay of unstable meson systems \cite{HEP_Shir2004}. In fact, the condition (\ref{ort1}) holds if the unstable states $|\phi_{1} \rangle$ and $|\phi_{2} \rangle$ have different CP-parities. The condition (\ref{ort1}) can be expresses in terms of the MDDs $\omega_{1}\left(m\right)$ and $\omega_{2}\left(m\right)$ and results in the relation below, \begin{eqnarray} \int_{\mu_{\rm M}}^{\infty} \sqrt{\omega_1\left(m\right)\omega_2\left(m\right)}e^{-\imath\left( \varphi_k\left(m\right)-\varphi_j\left(m\right)+\sqrt{m^2+p^2}t\right)}dm \simeq 0, \label{ort2} \end{eqnarray}
for every $t\geq 0$, for every $j,k=1,2$, such that $j \neq k$, and for every nonnegative real value of the linear momentum $p$. The lower extremum $\mu_{\rm M}$ of the integration is defined as the maximum between the lower bounds of the two mass spectra, $\mu_{\rm M}=\max \left\{\mu_{0,1},\mu_{0,2}\right\}$. The function $\varphi_l\left(m\right)$ is defined over the support $\big[\mu_{0,l}, \infty\big)$ as the phase of the amplitude $\langle 0,m||\phi_l\rangle$, for every $l=1,2$. This means that the following relation, $\langle 0,m||\phi_l\rangle= \sqrt{\omega_l\left(m\right)}e^{- \imath \varphi_l\left(m\right)}$, holds over the support $\big[\mu_{0,l}, \infty\big)$, for every $l=1,2$. Qualitatively, the MDDs $\omega_1\left(m\right)$ and $\omega_2\left(m\right)$ are appreciably nonvanishing over different intervals, or the function $e^{-\imath\left( \varphi_1\left(m\right)-\varphi_2\left(m\right)\right)}$ oscillates fast enough over the open interval $\big[\mu_{\rm M}, \infty\big)$, that the integral (\ref{ort2}) is approximately vanishing for every $t \geq 0$, and for every nonvanishing value of the linear momentum $p$.
We are now equipped to evaluate the survival amplitude $A_p(t)$ in case the moving unstable system is initially prepared in the two-mass state (\ref{2Minitial}). Let the MDDs $\omega_1\left(m\right)$ and $\omega_2\left(m\right)$ belong to the class which is defined in Section \ref{21}. Let the constraint (\ref{ort2}) or, equivalently, (\ref{ort1}), be fulfilled for every $j,k=1,2$, such that $j\neq k$, for every $t \geq 0$ and for every nonvanishing value of the linear momentum $p$. Under these conditions, the terms $\langle\phi_{p,1}\left|e^{-\imath H t}\right|\phi_{p,2} \rangle$ and $\langle\phi_{p,2}\left|e^{-\imath H t}\right|\phi_{p,1} \rangle$ are negligible with respect to the terms $\langle\phi_{p,1}\left|e^{-\imath H t}\right|\phi_{p,1} \rangle$ and \\$\langle\phi_{p,2}\left|e^{-\imath H t}\right|\phi_{p,2} \rangle$. Consequently, the survival amplitude $A_p(t)$, which is given by the expression $\langle\phi_{p}\left|e^{-\imath H t}\right|\phi_{p} \rangle$, results in the following form, \begin{eqnarray}
A_p(t)=\sum_{j=1}^2 \left|l_j\right|^2 A_{p,j}(t), \label{Ap12} \end{eqnarray}
where $A_{p,j}(t)=\langle\phi_{p,j}|e^{-\imath H t}
|\phi_{p,j}\rangle$, for every $j=1,2$.
\begin{figure*}
\caption{(Color online) Survival probability $\mathcal{P}_p(t)$ versus $m_s t$, for $ 0 \leq m_s t \leq 10$, and different values of the linear momentum $p$. The computed initial two-mass states are described by Eq. (\ref{2Minitial}) with $l_1=l_2=1/\sqrt{2}$. The corresponding auxiliary functions are given by Eq.
(\ref{Ofigs}) with $\xi_{0,1}=1$ and $\xi_{0,2}=2$, and different values of the powers $\alpha_1$ and $\alpha_2$. Curve $(a)$ corresponds to $\alpha_1=0$, $\alpha_2=2$ and $p= 0 m_s$. Curve $(b)$ corresponds to $\alpha_1=0$, $\alpha_2=2$ and $p= m_s$. Curve $(c)$ corresponds to $\alpha_1=0$, $\alpha_2=2$ and $p= 2 m_s$. Curve $(d)$ corresponds to $\alpha_1=1$, $\alpha_2=2$ and $p= 2 m_s$. }
\label{fig1}
\end{figure*}
\subsection{Survival probability}\label{31}
At this stage, we evaluate the survival probability of the moving two-mass unstable quantum system for MDDs which are defined in Section \ref{21} and fulfill the constraint (\ref{ort2}), or, equivalently, (\ref{ort1}). We expect long-time decay laws which depart from the single-mass decays \cite{Gxiv2018} if the MDDs are "`properly"' different from each other. The "`proper"' difference means that, at least, either the power-law profiles are different, $\alpha_1 \neq\alpha_2$, or the lower bounds of the mass spectra differ, $\mu_{0,1} \neq \mu_{0,2}$. We study the case $\alpha_1 \leq\alpha_2$, below. The decay laws which correspond to the condition $\alpha_1>\alpha_2$, are obtained from the case $\alpha_1<\alpha_2$, by exchanging the indexes $1$ and $2$.
\begin{figure*}
\caption{(Color online) The survival probability $\mathcal{P}_p(t)$ versus $m_s t$, for $ 0 \leq m_s t \leq 8$, and different values of the linear momentum $p$. The computed initial two-mass states are described by Eq. (\ref{2Minitial}) with $l_1=l_2=1/\sqrt{2}$. The corresponding auxiliary functions are given by Eq.
(\ref{Ofigs}) for common values of the parameters $\xi_{0,1}$ and $\xi_{0,2}$ and different values of the powers, $\alpha_1=1$ and $\alpha_2=2$. Curve $(a)$ corresponds to $\xi_{0,1}=\xi_{0,2}=1$ and $p= 0 m_s$. Curve $(b)$ corresponds to $\xi_{0,1}=\xi_{0,2}=1$ and $p= m_s$. Curve $(c)$ corresponds to $\xi_{0,1}=\xi_{0,2}=2$ and $p= 0 m_s$. Curve $(d)$ corresponds to $\xi_{0,1}=\xi_{0,2}=2$ and $p= 2 m_s$. }
\label{fig2}
\end{figure*}
The short-time behavior of the survival probability is described by the following form, \begin{eqnarray} \mathcal{P}_p(t)\sim 1 - \bar{\pi}_0 t^2, \label{Pptshort2M} \end{eqnarray} for $t \ll 1/m_s$. The constant $\bar{\pi}_0$ is defined as $\bar{\pi}_0=2 \bar{a}_1-\bar{a}_0^2$. The constants $\bar{a}_0$ and $\bar{a}_1$ are given by the expressions below, \begin{eqnarray} &&\hspace{-3em}\bar{a}_0=\int_{ \mu_{\rm m}}^{\infty}\omega_{1,2}\left(m\right)\sqrt{m^2+p^2} dm, \hspace{1em}
\bar{a}_1=\frac{1}{2}\int_{ \mu_{\rm m}}^{\infty}\omega_{1,2}\left(m\right) \left(m^2+p^2\right) dm. \nonumber \end{eqnarray} The lower extremum of integration $\mu_{\rm m}$ is defined as the minimum between the lower bounds of the two mass spectra, $\mu_{\rm m}=\min \left\{\mu_{0,1},\mu_{0,2}\right\}$.
The function $\omega_{1,2}\left(m\right)$ is defined over the interval $\big[\mu_{\rm m}, \infty\big)$ as follows, $\omega_{1,2}\left(m\right)=\sum_{j=1}^2 \left|l_j\right|^2\omega_j\left(m\right)$. Each MDD vanishes outside the corresponding support.
\begin{figure*}
\caption{(Color online) Quantity $\left(1-\mathcal{P}_p(t)\right)$ versus $\left(m_s t\right)^2$, for $ 0 \leq m_s t \leq 2$, and different values of the linear momentum $p$. The computed initial two-mass states are described by Eq. (\ref{2Minitial}) with $l_1=l_2=1/\sqrt{2}$. The corresponding auxiliary functions are given by Eq.
(\ref{Ofigs}) with $\xi_{0,1}=1$ and $\xi_{0,2}=2$, and different values of the powers $\alpha_1$ and $\alpha_2$. Curve $(a)$ corresponds to $\alpha_1=1$, $\alpha_2=2$ and $p= 2 m_s$. Curve $(b)$ corresponds to $\alpha_1=0$, $\alpha_2=2$ and $p= 2 m_s$. Curve $(c)$ corresponds to $\alpha_1=0$, $\alpha_2=2$ and $p= m_s$. Curve $(d)$ corresponds to $\alpha_1=0$, $\alpha_2=2$ and $p= 0 m_s$. The lines of different slope in agreement with the short-time behavior of Eq. (\ref{Pptshort2M}). }
\label{fig3}
\end{figure*}
As far as the long-time behavior is concerned, the survival probability results to be the sum of a dominant inverse-power-law and a dominant oscillating decay, \begin{eqnarray} \mathcal{P}_p(t)\sim \mathcal{P}^{({\rm p.l})}_p(t)+ \mathcal{P}^{({\rm osc})}_p(t), \label{P2MLt} \end{eqnarray} for $t \gg 1/\omega_s$. The scripts $({\rm p.l})$ stands for "`power law"', while the script $({\rm osc})$ means "`oscillating"'. If $\alpha_1<\alpha_2$, the dominant part of the long-time inverse-power-law decay $\mathcal{P}^{({\rm p.l})}_p(t)$ is \begin{eqnarray} \mathcal{P}^{({\rm p.l.})}_p(t)\sim \mathfrak{P}^{({\rm p.l.})}_{p,1}\left(m_s t\right)^{-2\left(1+\alpha_1\right)}, \label{P2MLt1} \end{eqnarray} for $t \gg 1/m_s$. Instead, if $\alpha_1=\alpha_2$, the dominant part of the long-time inverse-power-law decay reads \begin{eqnarray} \mathcal{P}^{({\rm p.l.})}_p(t)\sim \mathfrak{P}^{({\rm p.l.})}_{p,1,2} \left(m_s t\right)^{-2\left(1+\alpha_1\right)}, \label{P2MLt12} \end{eqnarray} for $t \gg 1/m_s$. The coefficients $\mathfrak{P}^{({\rm p.l.})}_{p,1}$ and $\mathfrak{P}^{({\rm p.l.})}_{p,1,2}$ are given by the expressions below, \begin{eqnarray} &&\hspace{-3em}\mathfrak{P}^{({\rm p.l.})}_{p,1}=
\left|l_1 \right|^4 c_{0,1}^2 \chi_{p,1}^{2\left(1+\alpha_1\right)}, \hspace{1em}\mathfrak{P}^{({\rm p.l.})}_{p,1,2}=\sum_{j=1}^2
\left|l_j \right|^4 c_{0,j}^2 \chi_{p,j}^{2 \left(1+\alpha_j\right)}, \nonumber \end{eqnarray} where $c_{0,j}=\Gamma\left(1+\alpha_j\right)\Omega_{0,j}\left(\xi_{0,j}\right)$, for every $j=1,2$. The coefficients $\chi_{p,1}$ and $\chi_{p,2}$ are defined as follows, \begin{eqnarray} \chi_{p,j} = \sqrt{1+\frac{p^2}{\mu_{0,j}^2}}, \label{Chipj} \end{eqnarray} for every $j=1,2$. This notation refers to the scaling factor $\chi_p$ which is given by Eq. (\ref{Chip}). In Section \ref{22}, we have described how the scaling factor $\chi_p$ determines the time dilatation in the transformation of the survival probability for a moving single-mass unstable quantum system \cite{Gxiv2018}. Similarly, we show below how the parameters $\chi_{p,1}$ and $\chi_{p,2}$ influence the transformations of the terms $\mathcal{P}^{({\rm p.l.})}_p(t)$ and $\mathcal{P}^{({\rm osc})}_p(t)$ which are induced by the change of reference frame.
\begin{figure*}
\caption{(Color online) Quantity $\left(1-\mathcal{P}_p(t)\right)$ versus $\left(m_s t\right)^2$, for $ 0 \leq m_s t \leq 2$, and different values of the linear momentum $p$. The computed initial two-mass states are described by Eq. (\ref{2Minitial}) with $l_1=l_2=1/\sqrt{2}$. The corresponding auxiliary functions are given by Eq.
(\ref{Ofigs}) for common values of the parameters $\xi_{0,1}$ and $\xi_{0,2}$ and different of the powers, $\alpha_1=1$ and $\alpha_2=2$. Curve $(a)$ corresponds to $\xi_{0,1}=\xi_{0,2}=2$ and $p= 2 m_s$. Curve $(b)$ corresponds to $\xi_{0,1}=\xi_{0,2}=2$ and $p= 0 m_s$. Curve $(c)$ corresponds to $\xi_{0,1}=\xi_{0,2}=1$ and $p= m_s$. Curve $(d)$ corresponds to $\xi_{0,1}=\xi_{0,2}=1$ and $p= 0 m_s$. }
\label{fig4}
\end{figure*}
Damped oscillations of the survival probability appear over long times if the lower bounds of the two MDDs differ, $\mu_{0,1}\neq\mu_{0,2}$. Under this condition, the dominant term of the oscillatory decay is given by the form below, \begin{eqnarray} \hspace{-1em}\mathcal{P}^{({\rm osc})}_p(t)\sim 2\mathfrak{P}^{({\rm osc})}_{p,1,2} \left(m_s t\right)^{-2-\alpha_1-\alpha_2} \cos \left(\frac{\pi}{2}\left(\alpha_2-\alpha_1\right) + \varpi_p t\right), \label{P2MLtosc} \end{eqnarray} for $t\gg 1/m_s$. The parameters $\mathfrak{P}^{({\rm osc})}_{p,1,2}$ and $\varpi_p$ are given by the following expressions, \begin{eqnarray}
&&\hspace{-1em}\mathfrak{P}^{({\rm osc})}_{p,1,2}= \Pi_{j=1}^2 \left|l_j\right|^2 c_{0,j} \chi_{p,j}^{1+\alpha_j}, \nonumber \end{eqnarray} and \begin{eqnarray} &&\hspace{-1em}\varpi_p=\mu_{0,2} \chi_{p,2}-\mu_{0,1} \chi_{p,1}. \label{o21m} \end{eqnarray}
The frequency of the damped oscillations is the absolute value $\left|\varpi_p\right|$, and coincides with the expression (\ref{o21m}), if $\mu_{0,1}<\mu_{0,2}$, or is the opposite, if $\mu_{0,2}<\mu_{0,1}$. Even if the evolution is not periodic, we naturally define the period $T_p$ of the damped oscillations in term of the frequency of the oscillations as below, \begin{eqnarray}
T_p=\frac{2 \pi}{\left|\varpi_p\right|}. \label{Tp} \end{eqnarray} A similar expression for the frequency of the damped oscillations of the survival probability is obtained in Refs. \cite{HEP_Shir2004,HEP_Shir2006}. In fact, damped oscillations of the survival probability are found in Ref. \cite{HEP_Shir2006} for unstable meson systems in case the two mass states are described by MDDs of different Breit-Wigner forms. The frequency of the oscillations is obtained from the expression (\ref{o21m}) by substituting the lower bounds of the MDDs with the corresponding rest masses. Due to the regular oscillations, the two-mass unstable system is proposed as a quantum clock. See Ref. \cite{HEP_Shir2004} for details.
If the lower bounds of the two mass spectra coincide, $\mu_{0,1}=\mu_{0,2}$, the damped long-time oscillations of the survival probability disappear, since the dominant part of the term $\mathcal{P}^{({\rm osc})}_p(t)$ becomes an inverse power law over long times, \begin{eqnarray} \hspace{-1em}\mathcal{P}^{({\rm osc})}_p(t)\sim 2\cos \left(\frac{\pi}{2}\left(\alpha_2-\alpha_1\right) \right)\mathfrak{P}^{({\rm osc})}_{p,1,2} \left(m_s t\right)^{-2-\alpha_1-\alpha_2} , \label{P2MLtoscNnpl} \end{eqnarray} for $t \gg 1/m_s$. The above form is obtained from Eq. (\ref{P2MLtosc}) in case $\mu_{0,1}=\mu_{0,2}$.
For the sake of completeness, we consider also the case where the MDDs exhibit the same low-mass power-law profile, $\alpha_1=\alpha_2$, and the same lower bound of the mass spectrum, $\mu_{0,1}=\mu_{0,2}$. In this condition, the long-time survival probability exhibits no oscillations and results in the following dominant inverse-power-law decay, \begin{eqnarray} \hspace{0em}\mathcal{P}_p(t)\sim \mathfrak{P}_{p,0} \left(m_s t\right)^{-2 \left(1+\alpha_1\right)}, \label{Pp0} \end{eqnarray} for $t \gg 1/m_s$. The parameter $\mathfrak{P}_{p,0}$ reads \begin{eqnarray} \hspace{-2em}\mathfrak{P}_{p,0}=\left(\sum_{j=1,2}
\left|l_j\right|^2 c_{0,j}\right)^2 \chi_{p,1}^{2 \left(1+\alpha_1\right)}. \nonumber \end{eqnarray}
In summary, if the low-mass power-law profiles of the MDDs differ, $\alpha_1\neq\alpha_2$, the survival probability $\mathcal{P}_p(t)$ exhibits the long-time inverse-power-law decay $\tau^{-2\left(1+\alpha_k\right)}$, where $\alpha_k=\min\left\{\alpha_1,\alpha_2\right\}$. The decay is described by Eq. (\ref{P2MLt1}), if $\alpha_1<\alpha_2$, while, if $\alpha_2<\alpha_1$, the decay is obtained from Eq. (\ref{P2MLt1}) by exchanging the indexes $1$ and $2$. If the low-mass power-law profiles of the two MDDs coincide, $\alpha_1=\alpha_2=\alpha$, and the two mass spectra have the same lower bounds, $\mu_{0,1}=\mu_{0,2}$, the survival probability $\mathcal{P}_p(t)$ exhibits the long-time inverse power-law decay $\tau^{-2\left(1+\alpha\right)}$ which is described by Eq. (\ref{Pp0}). If the low-mass power-law profiles of the two MDDs coincide, $\alpha_1=\alpha_2=\alpha$, and the lower bounds of the mass spectra differ, $\mu_{0,1}\neq\mu_{0,2}$, the long-time survival probability $\mathcal{P}_p(t)$ is described by Eqs. (\ref{P2MLt}), (\ref{P2MLt12}) and (\ref{P2MLtosc}), and exhibits damped oscillations which are enveloped in an inverse-power-law profile, $\tau^{-2\left(1+\alpha\right)}\left(\mathfrak{P}^{({\rm p.l.})}_{p,1,2}+2\mathfrak{P}^{({\rm osc})}_{p,1,2}\cos \left( \varpi_p t\right)\right)$.
Numerical analysis of the survival probability $\mathcal{P}_p(t)$ is displayed in Figures \ref{fig1}, \ref{fig2}, \ref{fig3}, \ref{fig4}, \ref{fig5}, \ref{fig6}, \ref{fig7} and \ref{fig8}. The computed initial two-mass states are described by Eq. (\ref{2Minitial}) with $l_1=l_2=2^{-1/2}$, and by toy MDDs which are given by the following auxiliary functions, \begin{eqnarray} \Omega_j\left(\xi\right)= w_{j} \xi \left(\xi^2-\xi^2_{0,j}\right)^{\alpha_j} e^{- \xi^2},\label{OmegaFigj}
\label{Ofigs} \end{eqnarray} for every $j=1,2$. The parameters $w_{1}$ and $w_2$ are normalization factors. The computation has been performed by considering various values of the parameters $\xi_{0,1}$ and $\xi_{0,2}$, of the non-negative powers $\alpha_1$ and $\alpha_2$, and of the linear momentum $p$. The presence of long-time oscillations in Figures \ref{fig1} and \ref{fig5}, which are computed for $\mu_{0,1}\neq\mu_{0,2}$, and the lack of long-time oscillations in Figure \ref{fig2}, which are computed for $\mu_{0,1}=\mu_{0,2}$, agree with the asymptotic forms of the survival probability which are described by Eqs. (\ref{P2MLt})-(\ref{Tp}). The short-time linear growth of the curves which are displayed in Figures \ref{fig3} and \ref{fig4}, is in accordance with the short-time algebraic decay of the survival probability, which is given by Eq. (\ref{Pptshort2M}). The oscillating behavior and the asymptotic lines appearing in the log-log plot of Figure \ref{fig6}, agree with the oscillatory asymptotic decay of the survival probability which is given by Eqs. (\ref{P2MLt}) and (\ref{P2MLt12})-(\ref{Tp}). The asymptotic lines which appear in the log-log plot of Figure \ref{fig7}, are in accordance with the inverse-power-law decay of the survival probability which is given by Eqs. (\ref{P2MLt}) and (\ref{P2MLt1}). The undamped oscillations which are displayed in Figure \ref{fig8}, agree with the long-time oscillatory decay of the survival probability which is given by Eqs. (\ref{P2MLt}) and (\ref{P2MLt12})-(\ref{Tp}). The oscillations are in accordance with the expression (\ref{Tp}) of the period.
\subsection{Nonrelativistic and ultrarelativistic limits}\label{32}
The long-time behavior of the survival probability in the nonrelativistic limit or, equivalently, in the rest reference frame of the moving unstable system, is obtained from Eqs. (\ref{P2MLt})-(\ref{Pp0}), for $p=0$. The survival probability $\mathcal{P}_0(t)$ is approximated over long times, $t \gg 1/m_s$, by the sum of the inverse-power-law term $\mathcal{P}^{({\rm p.l})}_p(t)$ and by the damped oscillating term $\mathcal{P}^{({\rm osc})}_0(t)$, \begin{eqnarray} \mathcal{P}_0(t)\sim \mathcal{P}^{({\rm p.l})}_0(t)+ \mathcal{P}^{({\rm osc})}_0(t), \label{P2MLtp0} \end{eqnarray} for $t \gg 1/\omega_s$. If the low-mass power-law profiles differ and $\alpha_1<\alpha_2$ we find \begin{eqnarray} \mathcal{P}^{({\rm p.l.})}_0(t)\sim \mathfrak{P}^{({\rm p.l.})}_{0,1}\left(m_s t\right)^{-2\left(1+\alpha_1\right)}, \label{P2MLt10} \end{eqnarray} for $t \gg /m_2$; while, if $\alpha_1=\alpha_2$, we obtain \begin{eqnarray} \mathcal{P}^{({\rm p.l.})}_0(t)\sim \mathfrak{P}^{({\rm p.l.})}_{0,1,2} \left(m_s t\right)^{-2\left(1+\alpha_1\right)}, \label{P2MLt12p0} \end{eqnarray} for $t \gg /m_2$. The coefficients $\mathfrak{P}^{({\rm p.l.})}_{0,1}$ and $\mathfrak{P}^{({\rm p.l.})}_{0,1,2}$ are given by the expression below, \begin{eqnarray} &&\hspace{-3em}\mathfrak{P}^{({\rm p.l.})}_{0,1}=
\left|l_1 \right|^4 c_{0,1}^2, \hspace{1em}\mathfrak{P}^{({\rm p.l.})}_{0,1,2}=\sum_{j=1}^2
\left|l_j \right|^4 c_{0,j}^2 . \nonumber \end{eqnarray} The oscillating term $\mathcal{P}^{({\rm osc})}_0(t)$ reads \begin{eqnarray} \hspace{-1em}\mathcal{P}^{({\rm osc})}_0(t)\sim 2\mathfrak{P}^{({\rm osc})}_{0,1,2} \left(m_s t\right)^{-2-\alpha_1-\alpha_2} \cos \left(\frac{\pi}{2}\left(\alpha_2-\alpha_1\right) + \varpi_0 t\right), \label{P2MLtoscNnplp0} \end{eqnarray} for $t\gg 1/m_s$, if $\alpha_1\leq\alpha_2$. The parameters $\mathfrak{P}^{({\rm osc})}_{0,1,2}$ and $\varpi_0$ are given by the expressions below, \begin{eqnarray}
&&\hspace{-1em}\mathfrak{P}^{({\rm osc})}_{0,1,2}= \Pi_{j=1}^2 \left|l_j\right|^2 c_{0,j}, \nonumber \end{eqnarray} and \begin{eqnarray} &&\hspace{-1em}\varpi_0=\mu_{0,2}-\mu_{0,1} . \label{o21mp0} \end{eqnarray}
The frequency of the damped oscillations is $\left|\mu_{0,2}-\mu_{0,1}\right|$, and the period $T_0$ of the damped oscillations is \begin{eqnarray}
T_0=\frac{2 \pi}{\left|\mu_{0,2}-\mu_{0,1}\right|}. \label{T0} \end{eqnarray} If the lower bounds of the two mass spectra coincide, $\mu_{0,1}=\mu_{0,2}$, the term $\mathcal{P}^{({\rm osc})}_0(t)$ results in a dominant inverse power law over long times, \begin{eqnarray} \hspace{-1em}\mathcal{P}^{({\rm osc})}_0(t)\sim 2\cos \left(\frac{\pi}{2}\left(\alpha_2-\alpha_1\right) \right)\mathfrak{P}^{({\rm osc})}_{0,1,2} \left(m_s t\right)^{-2-\alpha_1-\alpha_2} , \label{P2MLtoscNn0} \end{eqnarray} for $t \gg 1/m_s$. If the MDDs exhibit the same low-mass power-law profile, $\alpha_1=\alpha_2$, and the same lower bound of the mass spectrum, $\mu_{0,1}=\mu_{0,2}$, the long-time survival probability reads \begin{eqnarray} \hspace{-1em}\mathcal{P}_0(t)\sim \mathfrak{P}_{0,0} \left(m_s t\right)^{-2 \left(1+\alpha_1\right)} \label{P00} \end{eqnarray} for $t \gg 1/m_s$, where $$\mathfrak{P}_{0,0}=\left(\sum_{j=1,2}
\left|l_j\right|^2 c_{0,j}\right)^2 .$$
\begin{figure*}
\caption{(Color online) Survival probability $\mathcal{P}_p(t)$ versus $m_s t$, for $ 10 \leq m_s t \leq 40$, and different values of the linear momentum $p$. The computed initial two-mass states are described by Eq. (\ref{2Minitial}) with $l_1=l_2=1/\sqrt{2}$. The corresponding auxiliary functions are given by Eq.
(\ref{Ofigs}) with $\xi_{0,1}=1$ and $\xi_{0,2}=2$, for different values of the powers $\alpha_1$ and $\alpha_2$. Curve $(a)$ corresponds to $\alpha_1=1$, $\alpha_2=2$ and $p= 2 m_s$. Curve $(b)$ corresponds to $\alpha_1=0$, $\alpha_2=2$ and $p= 0 m_s$. Curve $(c)$ corresponds to $\alpha_1=0$, $\alpha_2=2$ and $p= m_s$. Curve $(d)$ corresponds to $\alpha_1=0$, $\alpha_2=2$ and $p= 2 m_s$. }
\label{fig5}
\end{figure*}
In the ultrarelativistic limit, $p \gg \max\left\{\mu_{0,1},\mu_{0,2}\right\}$, the long-time survival probability is still given by Eqs. (\ref{P2MLt})-(\ref{Pp0}), but the involved factors are approximated by the following forms, \begin{eqnarray} &&\hspace{-3em}\mathfrak{P}^{({\rm p.l.})}_{p,1}\simeq
\left|l_1 \right|^4 c_{0,1}^2 \left(\frac{p}{\mu_{0,1}}\right)^{2\left(1+\alpha_1\right)}, \hspace{1em}\mathfrak{P}^{({\rm p.l.})}_{p,1,2}\simeq \sum_{j=1}^2
\left|l_j \right|^4 c_{0,j}^2 \left(\frac{p}{\mu_{0,j}}\right)^{2 \left(1+\alpha_j\right)}, \nonumber \\
&&\hspace{-3em}\mathfrak{P}^{({\rm osc})}_{p,1,2}\simeq p^{2+\alpha_1+\alpha_2}\Pi_{j=1}^2 \left|l_j\right|^2 c_{0,j} \mu_{0,j}^{-\left(1+\alpha_j\right)}, \hspace{1em} \mathfrak{P}_{p,0}\simeq\left(\sum_{j=1,2}
\left|l_j\right|^2 c_{0,j}\right)^2 \left(\frac{p}{\mu_{0,1}}\right)^{2 \left(1+\alpha_1\right)}. \nonumber \end{eqnarray}
The frequency of the oscillations $\left|\varpi_p\right|$ vanishes and, consequently, the period $T_p$ diverges in the ultrarelativistic limit.
\begin{figure*}
\caption{(Color online) Quantity $\left|\log\left(\mathcal{P}_p(t)\right)\right|$ versus $\log\left(m_s t\right)$, for $ e^{-1} \leq m_s t \leq e^{5}$, and different values of the linear momentum $p$. The computed initial two-mass states are described by Eq. (\ref{2Minitial}) with $l_1=l_2=1/\sqrt{2}$. The corresponding auxiliary functions are given by Eq. (\ref{Ofigs}) with $\xi_{0,1}=1$ and $\xi_{0,2}=2$, for different values of the powers $\alpha_1$ and $\alpha_2$. Curve $(a)$ corresponds to $\alpha_1=0$, $\alpha_2=2$ and $p= 2 m_s$. Curve $(b)$ corresponds to $\alpha_1=0$, $\alpha_2=2$ and $p= m_s$. Curve $(c)$ corresponds to $\alpha_1=0$, $\alpha_2=2$ and $p= 0 m_s$. Curve $(d)$ corresponds to $\alpha_1=1$, $\alpha_2=2$ and $p= 2 m_s$. }
\label{fig6}
\end{figure*}
\begin{figure*}
\caption{(Color online) Quantity $\left|\log\left(\mathcal{P}_p(t)\right)\right|$ versus $\log\left(m_s t\right)$, for $ e^{-1} \leq m_s t \leq e^{3}$, and different values of the linear momentum $p$. The computed initial two-mass states are described by Eq. (\ref{2Minitial}) with $l_1=l_2=1/\sqrt{2}$. The corresponding auxiliary functions are given by Eq. (\ref{Ofigs}) for common values of the parameters $\xi_{0,1}$ and $\xi_{0,2}$ and different values of the powers, $\alpha_1=1$ and $\alpha_2=2$. Curve $(a)$ corresponds to $\xi_{0,1}=\xi_{0,2}=2$ and $p= 2 m_s$. Curve $(b)$ corresponds to $\xi_{0,1}=\xi_{0,2}=2$ and $p= 0m_s$. Curve $(c)$ corresponds to $\xi_{0,1}=\xi_{0,2}=1$, $\alpha_1=1$, $\alpha_2=2$ and $p= m_s$. Curve $(d)$ corresponds to $\xi_{0,1}=\xi_{0,2}=1$ and $p= 0 m_s$. }
\label{fig7}
\end{figure*}
\section{Time dilation in the decay laws}\label{4}
For a moving single-mass unstable quantum state the survival probability at rest $\mathcal{P}_0(t)$ transforms in the survival probability $\mathcal{P}_p(t)$, approximately, according to a scaling law, over long times \cite{Gxiv2018}. Similar scaling relation is found, under certain conditions, in the present transformation of the decay laws of moving two-mass unstable quantum states. In fact, if the low-mass power-law profiles of the MDDs $\omega_1\left(m\right)$ and $\omega_2\left(m\right)$ differ, $\alpha_1\neq\alpha_2$, the survival probability $\mathcal{P}_p(t)$, which is given by Eq. (\ref{P2MLt}), is described by the term $\mathcal{P}^{({\rm p.l.})}_p(t)$, over long times. In this case, the survival probability at rest $\mathcal{P}_0(t)$ transforms in the survival probability $\mathcal{P}_p(t)$ according to the following scaling law over long times, \begin{eqnarray} \mathcal{P}_p(t)\sim \mathcal{P}_0\left(\frac{t}{\chi_{p,k}}\right), \label{Pdilation} \end{eqnarray} for $t \gg 1/m_s$. The scaling factor $\chi_{p,k}$ is given by the expression below, \begin{eqnarray} \chi_{p,k}=\sqrt{1+\frac{p^2}{\mu_{0,k}^2}}. \label{chipk} \end{eqnarray} The index $k$ takes the value $1$ or $2$ and is selected by the lower of the powers $\alpha_1$ and $\alpha_2$.
The long-time scaling transformation (\ref{Pdilation}), which is induced by the change of reference frame, represents a dilation of times with scaling factor $\chi_{p,k}$. This scaling factor is determined by the linear momentum $p$ of the moving unstable system and by the lower bound $\mu_{0,k}$ of the MDD.
Similarly to the case of a moving single-mass unstable quantum system \cite{Gxiv2018}, the scaling factor $\chi_{p,k}$ coincides with the relativistic Lorentz factor of a mass at rest $\mu_{0,k}$ which moves with linear momentum $p$. Consequently, also for the moving two-mass unstable quantum states under study, the scaling transformation of the survival probability can be interpreted as the effect of the relativistic time dilation. This interpretation holds if the lower bound $\mu_{0,k}$ is considered to be the effective mass at rest of the unstable system over long times, since the unstable system moves with linear momentum $p$. This interpretation suggests the value $1/\sqrt{1+\mu_{0,k}^2/p^2}$ as the constant asymptotic velocity of the moving two-mass system.
For the sake of completeness, we consider also a further situation. Let $\mathcal{P}_{p^{\prime}}(t)$ be the survival probability which is detected in the reference frame where the two-mass unstable system moves with constant linear momentum $p^{\prime}$. Relation (\ref{Pdilation}) suggests that the survival probability $\mathcal{P}_{p}(t)$ is linked to the survival probability $\mathcal{P}_{p^{\prime}}(t)$, over long times, by the following scaling transformation, \begin{eqnarray} \mathcal{P}_p(t)\sim \mathcal{P}_{p^{\prime}}\left(\frac{\chi_{p^{\prime},k}}{\chi_{p,k}}t\right), \label{PpPpprime} \end{eqnarray} for $t \gg 1/m_s$. The corresponding scaling factor is the ratio $\chi_{p,k}/\chi_{p^{\prime},k}$. If the linear momentum $p$ is greater than $p^{\prime}$, i.e., $0\leq p^{\prime}<p$, the survival probability $\mathcal{P}_{p^{\prime}}\left(t\right)$ transforms in the survival probability $\mathcal{P}_p(t)$ according to a time dilation, since the scaling factor $\chi_{p,k}/\chi_{p^{\prime},k}$ is greater than unity.
\begin{figure*}
\caption{(Color online) Quantity $\left(m_s t\right)^{2+\alpha_1+\alpha_2}\mathcal{P}_p(t)$ versus $m_s t$, for $ 10 \leq m_s t \leq 50$, and different values of the linear momentum $p$. The computed initial two-mass states are described by Eq. (\ref{2Minitial}) with $l_1=l_2=1/\sqrt{2}$. The corresponding auxiliary functions are given by Eq. (\ref{Ofigs}) with $\xi_{0,1}=1$ and $\xi_{0,2}=2$, for common values of the powers $\alpha_1$ and $\alpha_2$. Curve $(a)$ corresponds to $\alpha_1=\alpha_2=1$ and $p= 0 m_s$. Curve $(b)$ corresponds to $\alpha_1=\alpha_2=1$ and $p= m_s$. Curve $(c)$ corresponds to $\alpha_1=\alpha_2=1$ and $p= 2 m_s$. Curve $(d)$ corresponds to $\alpha_1=\alpha_2=2$ and $p= 0 m_s$. }
\label{fig8}
\end{figure*}
In Figure \ref{fig9}, each curve tends to the same asymptotic horizontal line, at ordinate $1$, with either oscillatory or monotone behavior. This asymptotic condition confirms the scaling transformation (\ref{Pdilation}) of the survival probability and, consequently, the dilation of times which occurs by changing reference frame. Notice that the scaling law holds if the low-mass power-law profiles of the MDDs differ, $\alpha_1 \neq \alpha_2$, both in presence, $\mu_{0,1}\neq\mu_{0,2}$, and in absence, $\mu_{0,1}=\mu_{0,2}$, of damped oscillations of the survival probability.
The long-time scaling transformation (\ref{Pdilation}) of the survival probability is lost if the MDDs exhibit the same power-law profiles, $\alpha_1=\alpha_2$, and the lower bounds of the mass spectra differ, $\mu_{0,1}\neq\mu_{0,2}$. Under this condition the long-time survival probability $\mathcal{P}_p(t)$ is described by Eqs. (\ref{P2MLt}), (\ref{P2MLt12}) and (\ref{P2MLtosc}), and exhibits the damped oscillations which are given by Eq. (\ref{P2MLtosc}). The period $T_p$ of the long-time damped oscillations is given by Eq. $(\ref{Tp})$ and holds for every nonnegative value $p$ of the linear momentum. Consequently, the ratio between the period $T_p$ and the period at rest $T_0$ shows how the oscillations transform, \begin{eqnarray} \frac{T_p}{T_0}=\frac{\varpi_0}{\varpi_p}=\sum_{j=0}^1\frac{\mu_{0,j}}{\mu_{0,1}+\mu_{0,2}} \chi_{p,j}. \label{TpT0} \end{eqnarray} The above relation suggests that the long-time oscillations of the survival probability are dilated by the change of the reference frame. In fact, the period $T_0$ of the oscillations at rest transforms in the reference frame where the unstable system moves with linear momentum $p$, according to a factor which is larger than unity. This factor is the weighted mean of the scaling factors $\chi_{p,1}$ and $\chi_{p,2}$. The non-normalized weights are the lower bounds of the mass spectra. Similar transformation of the frequency of the damped oscillations is obtained from the detailed analysis which is performed in Ref. \cite{HEP_Shir2004}. The period of those oscillations transforms according to the relativistic dilation of times if the rest masses, appearing in the Breit-Wigner forms of the MDDs, are approximately equal. See Ref. \cite{HEP_Shir2004} for details.
For the sake of completeness, consider the decay which is observed in the reference frame where the unstable system moves with constant linear momentum $p^{\prime}$. If $p> p^{\prime}\geq 0$, relation (\ref{TpT0}) suggests that the period $T_p$ of the oscillations is enlarged with respect to the period $T_{p^{\prime}}$, according to the following factor, \begin{eqnarray} \frac{T_p}{T_{p^{\prime}}}=\frac{\varpi_{p^{\prime}}}{\varpi_p}=\frac{\sum_{j=0}^1\mu_{0,j} \chi_{p,j}}{\sum_{j=0}^1 \mu_{0,j} \chi_{p^{\prime},j}}. \label{TpTpprime} \end{eqnarray}
The periodic oscillations appearing in Figure \ref{fig8}, show a dilation in the period of the damped oscillations of the survival probability which occurs by changing the reference frame. The magnitudes of the dilated periods are in accordance with the factor which is given by Eq. (\ref{TpT0}).
\begin{figure*}
\caption{ \small (Color online) Ratio $\mathcal{P}_p\left(t\right)/\mathcal{P}_0\left(t/\chi_{p,1}\right)$ for $5\leq m_s t \leq 35$, and different values of the linear momentum $p$. The computed initial two-mass states are described by Eq. (\ref{2Minitial}) with $l_1=l_2=1/\sqrt{2}$. The corresponding auxiliary functions are given by Eq. (\ref{Ofigs}) with $\alpha_1=1$ and $\alpha_2=2$, for various values of the parameters $\xi_{0,1}$ and $\xi_{0,2}$. Curve $(a)$ corresponds to $\xi_{0,1}=\xi_{0,2}=1$, and $p= m_s$. Curve $(b)$ corresponds to $\xi_{0,1}=\xi_{0,2}=2$, and $p= 2 m_s$. Curve $(c)$ corresponds to $\xi_{0,1}=1$, $\xi_{0,2}=2$, and $p=m_s$. Curve $(d)$ corresponds to $\xi_{0,1}=1$, $\xi_{0,2}=2$, and $p=m_s$.
}
\label{fig9}
\end{figure*}
\section{Summary and conclusions}\label{5}
We have considered an unstable quantum state which is initially prepared in a superposition of two mass states. The decay is studied in a laboratory reference frame where the unstable system is moving with constant linear momentum $p$. Each mass state is an eigenstate of the linear momentum to the common eigenvalue $p$. The evolution of each mass state is assumed to be approximately orthogonal to the other mass state. This assumption is based upon the almost exact CP-conservation in the decay of unstable meson systems \cite{HEP_Shir2004}. The two mass states are described by MDDs which are different from each other and are tailored as power laws, with powers $\alpha_1$ and $\alpha_2$, near the non-vanishing lower bounds, $\mu_{0,1}$ and $\mu_{0,2}$, respectively, of the mass spectra. The MDDs are arbitrarily tailored over higher values of the mass variable, except for some conditions which involve the orthogonality of the two mass states, and the differentiability and integrability of the MDDs.
The survival probability $\mathcal{P}_p(t)$ has been analyzed over short and long times in the reference frame where the unstable system moves with constant linear momentum $p$. Due to the generality of the linear momentum, the ultrarelativistic and non-relativistic limits have been evaluated as particular cases. Over short times, the survival probability $\mathcal{P}_p(t)$ decays algebraically. Over long times, the survival probability exhibits a dominant inverse-power-law decay or damped oscillations which are enveloped in an inverse-power-law profile. The appearance of each regime is determined by the low-mass properties of the two MDDs.
If the powers $\alpha_1$ and $\alpha_2$ differ, the dominant inverse-power-law decay $\tau^{-2(1+\alpha_k)}$ manifests over long times. The power $\alpha_k$ is the minimum between the powers $\alpha_1$ and $\alpha_2$. In this case the long-time survival probability $\mathcal{P}_p(t)$ is approximately related to the survival probability at rest $\mathcal{P}_0(t)$ by the following scaling law, $\mathcal{P}_p(t)\sim \mathcal{P}_0 \left(t/\chi_{p,k}\right)$. This scaling property suggests that the long-time survival probability at rest transforms in the laboratory frame where the unstable system moves with linear momentum $p$, according to a dilation of times. The corresponding scaling factor $\chi_{p,k}$ is determined by the MDD with the lower power-law profile and reads $\sqrt{1+p^2/\mu_{0,k}^2}$. Consequently, the transformation of the long-time survival probability consists in a time dilation if the two MDDs are tailored with different power laws near the lower bound of the corresponding mass spectrum. The time dilation appears independently of the (non-vanishing) values of the lower bounds.
The scaling factor $\chi_{p,k}$ of the time dilation coincides with the relativistic Lorentz factor of a mass at rest $\mu_{0,k}$ which moves with linear momentum $p$. This observation suggests the following interpretation of the transformation of the decay laws in terms of the theory of special relativity. The present time dilation reproduces the relativistic dilation of times if the lower bound $\mu_{0,k}$ of the mass spectrum is accounted as the mass at rest of the unstable system, which moves with constant linear momentum $p$.
If the powers $\alpha_1$ and $\alpha_2$ coincide and the lower bounds $\mu_{0,1}$ and $\mu_{0,2}$ of the mass spectra differ, the survival probability $\mathcal{P}_p(t)$ decays over long times according to damped oscillations. The oscillations are enveloped in the inverse-power-law profile $\tau^{-2(1+\alpha)}$, where $\alpha$ is the common value of the two powers. The frequency of the oscillations is determined by the lower bounds $\mu_{0,1}$ and $\mu_{0,2}$ and by the linear momentum $p$, and reads $\left|\mu_{0,2} \chi_{p,2}-\mu_{0,1} \chi_{p,1}\right|$. If the powers $\alpha_1$ and $\alpha_2$ coincide, the long-time scaling relation which links the survival probability $\mathcal{P}_p$ and the survival probability at rest $\mathcal{P}_0(t)$, is lost. The period $T_p$ of the oscillations dilates with respect to the period $T_0$ of the oscillations at rest. Equivalently, the frequency $\left|\varpi_p\right|$ diminishes with respect to the frequency at rest $\left|\varpi_0\right|$. The factor $T_p/T_0$, or, equivalently, $\left|\varpi_0/\varpi_p\right|$, consists in the weighted average of the scaling factors $\chi_{p,1}$ and $\chi_{p,2}$. The non-normalized weights are the lower bounds $\mu_{0,1}$ and $\mu_{0,2}$ of the mass spectra.
In conclusion, we have found decay laws of a moving two-mass unstable quantum state which consist in dominant inverse power laws or in damped oscillations, over long times. The appearance of each regime is determined by the low-mass differences between the MDDs. In the long-time inverse-power-law regime the transformation of the decay laws, which is induced by the change of the reference frame, consists in a time dilation. In the long-time damped oscillatory regime the time dilation is lost. Still, the period of the oscillations transforms regularly. In both these regimes the properties of the transformed long-time decays are determined by the (constant) linear momentum of the moving unstable system and by the (non-vanishing) lower bounds of the mass spectra.
\appendix\label{A} \section{Details}
The short- and long-time behavior of the survival probability of a moving two-mass unstable quantum system are evaluated from the asymptotic forms of the survival amplitude of a single-mass unstable quantum system which moves with constant linear momentum $p$ in the laboratory frame of an observer. These asymptotic forms are reported below, for the sake of clarity, by following Ref. \cite{Gxiv2018}.
If the auxiliary function of the MDD of a single-mass unstable quantum system vanishes as $\Omega\left(\xi \right)= \mathcal{O}\left(\xi^{-1-l_0}\right)$ for $\xi\to+\infty$, with $l_0>5$, the survival amplitude decays algebraically, approximately, over short times, \begin{eqnarray} A_p(t)\sim 1-\imath a_0 t-a_1 t^2+ \imath a_2 t^3, \label{Aptshort} \end{eqnarray} for $t \ll 1/ m_s$. The constants $a_0$, $a_1$ and $a_2$ read \begin{eqnarray}
&&\hspace{-3em}a_0=\int_{\mu_0}^{\infty}\omega\left(m\right)\sqrt{p^2+m^2} dm, \hspace{1em}a_1=\frac{1}{2}\int_{\mu_0}^{\infty}\omega\left(m\right) \left(p^2+m^2\right) dm, \nonumber
\\&&\hspace{-3em}a_2=\frac{1}{6}\int_{\mu_0}^{\infty}\omega\left(m\right)\left(p^2+m^2\right)^{3/2} dm. \nonumber \end{eqnarray} The above asymptotic form of the survival amplitude provides the short-time behavior of the survival probability, which is given by Eq. (\ref{Pptshort}), with $\pi_0=2 a_1-a_0^2$.
Over long times the survival amplitude of the moving single-mass unstable quantum system results in the following expression \cite{Gxiv2018}, \begin{eqnarray} A_p(t)\sim c_0 e^{-\imath \left(\left(\pi/2\right) \left(1+\alpha\right)+\sqrt{\mu_0^2+p^2} t\right)} \left(\frac{\chi_p}{m_s t}\right)^{1+\alpha}, \label{Aplongt} \end{eqnarray} for $t \gg 1/ m_s$, where $c_0=\Gamma\left(1+\alpha\right) \Omega_0\left(\xi_0\right)$. The factor $\chi_p$ is given by Eq. (\ref{Chip}). The asymptotic form (\ref{Aplongt}) provides the long-time expressions of the survival probability $\mathcal{P}_p(t)$ and of the survival probability at rest $\mathcal{P}_0(t)$, which are given by the asymptotic forms (\ref{Pplongt}) and (\ref{P0longt}), respectively. The comparisons of these expressions provides the long-time scaling property (\ref{PpP0L}).
In case the system is initially prepared in the two-mass unstable quantum state (\ref{2Minitial}), the survival amplitude is given by Eq. (\ref{Ap12}), due to the approximate vanishing of the cross terms
$\langle\phi_1\left|e^{-\imath H t}\right|\phi_2 \rangle$ and $\langle\phi_2\left|e^{-\imath H t}\right|\phi_1 \rangle$. If the MDDs which describe the mass states $|\phi_1\rangle$ and $|\phi_2\rangle$, belong to the class which is introduced in Section \ref{21}, the short-time behavior of the terms $A_{p,1}(t)$ and $A_{p,2}(t)$ is evaluated via Eq. (\ref{Aptshort}). The survival probability of the moving two-mass unstable state is obtained as the square modulus of the right hand side of Eq. (\ref{Ap12}), \begin{eqnarray}
\mathcal{P}_p(t)=\sum_{j=1}^{2} \left|l_j\right|^4 \left|A_{p,j}(t)\right|^2+2
\left|l_1\right|^2\left|l_2\right|^2 \mathrm{ Re}\left\{A_{p,1}(t)A^{\ast}_{p,2}(t)\right\}. \label{PpAp} \end{eqnarray} The short-time expression of the survival probability is found from Eqs. (\ref{Aptshort}) and (\ref{PpAp}), and results in the asymptotic form (\ref{Pptshort2M}).
Over long times, the survival probability is obtained from Eq. (\ref{PpAp}) and from the long-time forms of the terms $A_{p,1}(t)$ and $A_{p,2}(t)$, which are evaluated via the asymptotic expression (\ref{Aplongt}). In this way, we find that the long-time behavior of the survival probability is given by
the term $\left|A_{p,1}(t)\right|^2$, if $\alpha_1<\alpha_2$, and by the term $\left|A_{p,2}(t)\right|^2$, if $\alpha_2<\alpha_1$, and Eqs. (\ref{P2MLt1}) and (\ref{Chipj}) are obtained. If $\alpha_1=\alpha_2$, every term of right hand side of Eq. (\ref{PpAp}) contributes to the dominant part of the asymptotic expansion of the survival probability, over long times. The damped oscillations are generated by the last term which appears in the right hand side of Eq. (\ref{PpAp}), if $\mu_{0,1}\neq\mu_{0,2}$. This term and the asymptotic form (\ref{Aplongt}) provide Eqs. (\ref{P2MLtosc})-(\ref{Tp}).
The long-time behavior of the survival probability in the non-relativistic regime, or, equivalently, at rest, is given by Eqs. (\ref{P2MLtp0})-(\ref{P00}), and is obtained from Eqs. (\ref{P2MLt})-(\ref{Pp0}), via the relation $\chi_{0,j}=1$, which holds for every $j=1,2$. The relation is obtained from Eq. (\ref{Chipj}) in case $p=0$. In the ultrarelativistic limit, $p\gg \max \left\{\mu_{0,1}, \mu_{0,2}\right\}$, the survival probability is described over long times by Eqs. (\ref{P2MLt})-(\ref{Pp0}). The involved factors, $\mathfrak{P}^{({\rm p.l.})}_{p,1}$, $\mathfrak{P}^{({\rm p.l.})}_{p,1,2}$, $\mathfrak{P}^{({\rm osc})}_{p,1,2}$, and $\mathfrak{P}_{p,0}$, are reported in Section \ref{32}. These factors are obtained from the exact forms, which are given in Section \ref{31}, via the following approximations, $\chi_{p,1}\simeq p/\mu_{0,1}$, and $\chi_{p,2}\simeq p/\mu_{0,2}$. These approximations hold for $p\gg \max \left\{\mu_{0,1}, \mu_{0,2}\right\}$.
The long-time scaling relation (\ref{Pdilation}) is found by comparing Eq. (\ref{P2MLt1}) with the form which is obtained from Eq. (\ref{P2MLt1}) for $p=0$. The long-time relation (\ref{PpPpprime}) is obtained from the scaling property (\ref{Pdilation}) by considering the values $p$ and $p^{\prime}$ of the linear momentum. If $\alpha_1=\alpha_2$, the scaling relation (\ref{Pdilation}) fails due to the presence of the oscillatory term and of the coefficients $\mathfrak{P}^{({\rm p.l.})}_{p,1,2}$ and $\mathfrak{P}^{({\rm osc})}_{p,1,2}$. The ratio (\ref{TpT0}) is obtained from Eq. (\ref{Tp}). This concludes the demonstration of the present results.
\end{document} |
\begin{document}
\title[Estimates for parabolic measures]{On the fine properties of parabolic measures\\ associated to strongly degenerate parabolic\\ operators of Kolmogorov type}
\address{Malte Litsg{\aa}rd \\Department of Mathematics, Uppsala University\\ S-751 06 Uppsala, Sweden} \email{malte.litsgard@math.uu.se}
\address{Kaj Nystr\"{o}m\\Department of Mathematics, Uppsala University\\ S-751 06 Uppsala, Sweden} \email{kaj.nystrom@math.uu.se}
\thanks{K. N was partially supported by grant 2017-03805 from the Swedish research council (VR)}
\author{Malte Litsg{\aa}rd and Kaj Nystr{\"o}m} \maketitle \begin{abstract} \noindent
We consider strongly degenerate parabolic operators of the form
\begin{eqnarray*}
\mathcal L:=\nabla_X\cdot(A(X,Y,t)\nabla_X)+X\cdot\nabla_Y-\partial_t
\end{eqnarray*}
in unbounded domains
\begin{eqnarray*}
\Omega=\{(X,Y,t)=(x,x_{m},y,y_{m},t)\in\mathbb R^{m-1}\times\mathbb R\times\mathbb R^{m-1}\times\mathbb R\times\mathbb R\mid x_m>\psi(x,y,t)\}.
\end{eqnarray*}
We assume that $A=A(X,Y,t)$ is bounded, measurable and uniformly elliptic (as a matrix in $\mathbb R^{m}$) and concerning $\psi$ and $\Omega$ we assume that $\Omega$ is what we call an (unbounded) Lipschitz domain: $\psi$ satisfies a
uniform Lipschitz condition adapted to the dilation structure and the (non-Euclidean) Lie group underlying
the operator $\mathcal L$. We prove, assuming in addition that $\psi$ is independent of the variable $y_m$, that $\psi$ satisfies an additional regularity condition formulated in terms of a Carleson measure, and additional conditions on $A$, that the associated parabolic measure is absolutely continuous with respect to a surface measure and that
the associated Radon-Nikodym derivative defines an $A_\infty$-weight with respect to the surface measure.\\
\noindent 2000 {\em Mathematics Subject Classification.} 35K65, 35K70, 35H20, 35R03. \noindent
\noindent {\it Keywords and phrases: Kolmogorov equation, parabolic, ultraparabolic, hypoelliptic, operators in divergence form, Lipschitz domain, doubling measure, parabolic measure, Carleson measure, $A_\infty$, Lie group.} \end{abstract}
\setcounter{equation}{0} \setcounter{theorem}{0}
\section{Background and motivation}
In this paper we are concerned with the fine properties of parabolic measures, defined with respect to appropriate domains $\Omega$, and associated to the operator
\begin{eqnarray}\label{e-kolm-nd}
\mathcal L=\mathcal L_A:=\nabla_X\cdot(A(X,Y,t)\nabla_X)+X\cdot\nabla_Y-\partial_t,
\end{eqnarray}
in $\mathbb R^{N+1}$, $N=2m$, $m\geq 1$, equipped with coordinates $(X,Y,t):=(x_1,...,x_{m},y_1,...,y_{m},t)\in \mathbb R^{m}\times\mathbb R^{m}\times\mathbb R$. We assume that $A=A(X,Y,t)=\{a_{i,j}(X,Y,t)\}_{i,j=1}^{m}$ is a real-valued $m\times m$-dimensional symmetric matrix
satisfying
\begin{eqnarray}\label{eq2}
\kappa^{-1}|\xi|^2\leq \sum_{i,j=1}^{m}a_{i,j}(X,Y,t)\xi_i\xi_j,\quad \ \ |A(X,Y,t)\xi\cdot\zeta|\leq \kappa|\xi||\zeta|,
\end{eqnarray}
for some $\kappa\in [1,\infty)$, and for all $\xi,\zeta\in \mathbb R^{m}$, $(X,Y,t)\in\mathbb R^{N+1}$. We refer to $\kappa$ as the constant of $A$. Throughout the paper we will also assume that
\begin{eqnarray}\label{eq2+}
a_{i,j}\in C^\infty(\mathbb R^{N+1})
\end{eqnarray}
for all $i,j\in\{1,...,m\}$. While the assumption in \eqref{eq2+} will only be used in a qualitative fashion, the constants of our quantitative estimates will depend on $m$ and $\kappa$.
The starting point for our analysis is the recent results concerning the local regularity of weak solutions to the equation $\mathcal L u=0$ established in \cite{Ietal}. In \cite{Ietal} the authors extended the De Giorgi-Nash-Moser (DGNM) theory, which in its original form only considers elliptic or parabolic equations in divergence form, to hypoelliptic equations with rough coefficients including the ones in \eqref{e-kolm-nd} assuming \eqref{eq2} and, implicitly, also \eqref{eq2+}. Their result is the correct scale- and translation-invariant estimates for local H{\"o}lder continuity and the Harnack inequality for weak solutions.
We recall that the prototype for the operators in \eqref{e-kolm-nd}, i.e. $A\equiv 1_m$ and the operator
$$\mathcal K:=\nabla_X\cdot \nabla_X+X\cdot\nabla_Y-\partial_t,$$
was originally introduced and studied by Kolmogorov in a famous note published in 1934 in Annals of Mathematics, see \cite{K}. Kolmogorov noted that $\mathcal K$ is an example of a degenerate parabolic operator having strong regularity properties and he proved that $\mathcal K$ has a fundamental solution which is smooth off its diagonal. As a consequence, \begin{eqnarray}\label{uu3}
\mathcal K u = f \in C^\infty \quad \Rightarrow \quad u \in C^\infty, \end{eqnarray} for every distributional solution of $\mathcal K u=f$. These days, using the terminology introduced by H{\"o}rmander, see \cite{Hm}, the property in \eqref{uu3} is stated \begin{eqnarray}\label{uu2} \mbox{$\mathcal K$ is hypoelliptic}. \end{eqnarray} Naturally, for operators as in \eqref{e-kolm-nd}, assuming only measurable coefficients and \eqref{eq2}, the methods of Kolmogorov and H{\"o}rmander can not be directly applied to establish the DGNM theory and related estimates.
The results in \cite{Ietal} represent an important achievement which paves the way for developments concerning operators as in \eqref{e-kolm-nd} in several fields of analysis and in the theory of PDEs. In this paper we contribute to the understanding of the fine properties of the Dirichlet problems for operators of the form stated in \eqref{e-kolm-nd}
in appropriate domains $\Omega\subset\mathbb R^{N+1}$ and we note that in general there is a rich interplay between the operators considered, applications and geometry. Indeed, today the Kolmogorov operator, and the more general operators of Kolmogorov-Fokker-Planck type with variable coefficients considered in this paper, play central roles in many application in analysis, physics and finance and depending on the application different model cases for the local geometry of $\Omega$ may be relevant:
\begin{eqnarray}\label{dom-mod}
(i)&&\{(X,Y,t)=(x,x_{m},y,y_{m},t)\in\mathbb R^{N+1}\mid x_m>\psi_1(x,Y,t)\},\notag\\
(ii)&&\{(X,Y,t)=(x,x_{m},y,y_{m},t)\in\mathbb R^{N+1}\mid y_m>\psi_2(X,y,t)\},\\
(iii)&&\{(X,Y,t)=(x,x_{m},y,y_{m},t)\in\mathbb R^{N+1}\mid t>\psi_3(X,Y)\}.\notag \end{eqnarray} In particular, in finance and in the context of option pricing and associated free boundary problems, case $(i)$ is relevant. In kinetic theory it is relevant to restrict the particles to a container making case $(ii)$ relevant. Case $(iii)$ captures, as a special case, the initial value or Cauchy problem.
In this paper we consider solutions to $\mathcal L u=0$ in $\Omega$ assuming \eqref{eq2} and \eqref{eq2+}. Concerning $\Omega$ we restrict ourselves to case $(i)$ and unbounded domains $\Omega\subset\mathbb R^{N+1}$ of the form \begin{eqnarray}\label{dom-}
\Omega=\{(X,Y,t)=(x,x_{m},y,y_{m},t)\in\mathbb R^{N+1}\mid x_m>\psi(x,y,t)\}.
\end{eqnarray}
We impose restrictions on $\psi$ of Lipschitz character and the importance of the additional assumption that $\psi$ is independent of $y_m$ will be explained.
Assuming that $\Omega\subset\mathbb R^{N+1}$ is a (unbounded) Lipschitz domain in the sense of
Definition \ref{car} below, it follows that given $\varphi\in C_0(\partial\Omega)$, there exists a unique weak solution $u=u_\varphi$, $u\in C(\bar \Omega)$, to the Dirichlet problem \begin{equation} \label{e-bvpuu} \begin{cases}
\mathcal L u = 0 &\text{in} \ \Omega, \\
u = \varphi & \text{on} \ \partial \Omega. \end{cases} \end{equation} Furthermore, there exists, for every $(Z, t):=(X,Y,t)\in \Omega$, a unique probability measure $\omega(Z,t,\cdot)$ on $\partial\Omega$ such that \begin{eqnarray} \label{1.1xxuu} u(Z,t)=\iint_{\partial\Omega}\varphi(\tilde Z,\tilde t)\, \mathrm{d} \omega(Z,t,\tilde Z,\tilde t). \end{eqnarray} The measure $\omega(Z,t,E)$ is referred to as the parabolic measure associated to $\mathcal L$ in $\Omega$ and at $(Z, t)\in \Omega$ and of $E\subset\partial\Omega$. Properties of $\omega(Z,t,\cdot)$ govern the Dirichlet problem in \eqref{e-bvpuu}.
If $\Omega=\Omega_\psi\subset\mathbb R^{N+1}$ is an unbounded ($y_m$-independent) Lipschitz domain we introduce the (physical) measure
$\sigma$ on $\partial\Omega$ as
\begin{eqnarray}\label{surfac+}d\sigma(X,Y,t):=\sqrt{1+|\nabla_{x}\psi(x,y,t)|^2}\, \mathrm{d} x\, \mathrm{d} Y\, \mathrm{d} t,\ (X,Y,t)\in\partial\Omega.
\end{eqnarray} We will refer to $\sigma$ as the surface measure on $\partial\Omega$.
Two fundamental questions concerning $\omega(Z,t,\cdot)$ can be stated as follows. Under what assumptions on $A$ and $\psi$, $\Omega$ as in \eqref{dom-}, is it true that \begin{align} \label{problems} (i)&\mbox{ $\omega(Z,t,\cdot)$ is a doubling measure ?}\notag\\ (ii)&\mbox{ $\omega(Z,t,\cdot)$ satisfies scale-invariant absolute continuity estimates with respect}\\ &\mbox{ to the physical (surface) measure $\sigma$ on $\partial\Omega$ ?}\notag \end{align}
In \cite{LN} we developed a potential theory for operators $\mathcal L$ as in \eqref{e-kolm-nd}, assuming only \eqref{eq2} and \eqref{eq2+}, in unbounded ($y_m$-independent) Lipschitz domains in the sense of Definition \ref{car} below. As part of this theory we proved that $\omega(Z,t,\cdot)$ is a doubling measure, hence establishing \eqref{problems} $(i)$. The additional assumption that the function $\psi$ defining the domain is independent of $y_m$ was cruical in this part of \cite{LN}. In this paper we refine the result of \cite{LN} considerably by proving, under additional assumptions on $\psi$ ( i.e. on $\Omega$) and the coefficients $A$, that $\omega(Z,t,\cdot)$ defines an $A_\infty$ weight with respect to the surface measure $\sigma$ in \eqref{surfac+} giving a quantitative answer to \eqref{problems} $(ii)$. In the prototype case $A\equiv 1_m$, i.e. in the case of the operator $\mathcal K$, the corresponding results were established in \cite{NP} and \cite{N1}, respectively, and this seems to be the only previous results of their kind for operators of Kolmogorov type.
To put the results of \cite{LN} and this paper into perspective it is relevant to outline the progress on the corresponding problems in the case of uniformly parabolic equations in $\mathbb R^{m+1}$, i.e. in the case when all dependence on the variable $Y$ is removed in \eqref{e-kolm-nd} leaving us with the operator
\begin{eqnarray} \nabla_X\cdot(A(X,t)\nabla_X)-\partial_t.
\end{eqnarray}
In this setting the questions in \eqref{problems} have in recent times been discussed and resolved in a number of fundamental papers and we here highlight the main contributions to the field.
First, for uniformly parabolic equations with bounded measurable coefficients in Lipschitz type domains, scale and translation invariant boundary comparison principles, boundary Harnack inequalities and doubling properties of associated parabolic measures were settled in a number of fundamental papers including \cite{FS}, \cite{FSY}, \cite{SY}, \cite{FGS} and \cite{N}. This type of results find their applications in many fields of analysis including the analysis of free boundary problems, see \cite{C1}, \cite{C2} and \cite{ACS} for instance.
Second, in \cite{LS}, \cite{LM}, \cite{HL}, \cite{H}, see also \cite{HL1}, the correct notion
of time-dependent Lipschitz type cylinders, correct from the perspective of parabolic measure, parabolic singular integral operators, parabolic
layer potentials, as well as from the perspective of the Dirichlet, Neumann and Regularity problems with data in $L^p$ for the heat operator, was found. In particular, in \cite{LS}, \cite{LM} the mutual absolute continuity of the parabolic measure with respect to surface measure, and the $A_\infty$-property, was studied/established and in \cite{HL} the authors solved the Dirichlet, Neumann and Regularity problems with data in $L^2$. For further related results concerning the fine properties of parabolic measures we refer to the impressive and influential work \cite{HL2}. In \cite{HL2} the authors consider equations modeled on certain refined pull-backs of the heat operator to the parabolic upper half space $\mathbb R^{m+1}_+=\{(x,x_m,t)\mid x_m>0\}$. These pull-back operators take the form \begin{eqnarray}\label{e-kolm-ndfl+a}
\nabla_X\cdot(A\nabla_Xu)+B\nabla_Xu-\partial_tu=0,
\end{eqnarray}
where the coefficient $B$ now gives rise to a singular drift term and the regularity of $A$ and $B$ are measured using certain Carleson measures. The singular drift term complicates matters considerably as there seem to be no positive answer to \eqref{problems} $(i)$ in this case. It should be mentioned that in
\cite{NR}, \cite{DPP}, parts of \cite{HL2} have been simplified.
Third, very recently there has been significant progress in the theory of boundary value problems for second order parabolic equations (and systems) of the form
\begin{eqnarray}\label{eq1} \nabla_X\cdot(A(x,t)\nabla_Xu)-\partial_tu=0,
\end{eqnarray}
in the parabolic upper half space $\mathbb R_+^{m+1}$ with boundary determined by $x_m=0$, assuming only bounded, measurable, uniformly elliptic and complex coefficients. In~\cite{N2, CNS, N3}, the solvability for Dirichlet, Regularity and Neumann problems with data in $L^2$ were established for the class of parabolic equations \eqref{eq1} under the additional assumptions that the elliptic part is also independent of the time variable $t$ and that it has either constant (complex) coefficients, real symmetric coefficients, or small perturbations thereof. Focusing on parabolic measure, a particular consequence of Theorem 1.3 in~\cite{CNS} is the generalization of~\cite{FSa} to equations of the form \eqref{eq1} but with $A$ real, symmetric and time-independent. This analysis in \cite{N2, CNS, N3} was advanced further in~\cite{AEN}, where a first order strategy to study boundary value problems of parabolic systems with second order elliptic part in the upper half-space was developed. The outcome of~\cite{AEN} was the possibility to address arbitrary parabolic equations (and systems) as in \eqref{eq1} with coefficients depending also on time and on the transverse variable with additional transversal regularity. Finally, in \cite{AEN1} the authors consider parabolic equations as in \eqref{eq1}, assuming that the coefficients are real, bounded, measurable, uniformly elliptic, but not necessarily symmetric. They prove that the associated parabolic measure is absolutely continuous with respect to the surface measure on
$\mathbb R^{m+1}$ (i.e. $\mathrm{d} x\, \mathrm{d} t$) in the sense defined by the Muckenhoupt class $A_\infty(\mathrm{d} x\, \mathrm{d} t)$.
In light of the above outline concerning the progress on uniformly parabolic equations, \cite {LN} and the main result of this paper, Theorem \ref{Ainfty} stated below, represent important steps towards a corresponding theory concerning the Dirichlet problem for operators of Kolmogorov type with bounded and measurable coefficients in Lipschitz type domains adapted to the (non-Euclidean) group structure.
The rest of the paper is organized as follows. Section \ref{sec2} is of preliminary nature. In Section \ref{sec3} we state our main result, Theorem \ref{Ainfty}, the proof of which we start in
Section \ref{sec4}. In Section \ref{sec4} we prove how Theorem \ref{Ainfty} can be reduced to three lemmas: Lemmas \ref{existcover}-\ref{lemmacruc+}. We consider the proof of Lemma \ref{lemmacruc+} a rather difficult part in the proof of Theorem \ref{Ainfty} and in Section \ref{sec4} we show that this lemma can be reduced to one key lemma: Lemma \ref{Carleson}. Section \ref{sec5} is devoted to the proof of Lemma \ref{Carleson}. Finally, in Section \ref{sec6} we prove Lemma \ref{existcover} and Lemma \ref{lemmacruc} by partially relying on a number of estimates for non-negative solutions recently established in \cite{LN}.
\setcounter{equation}{0} \setcounter{theorem}{0}
\section{Preliminaries}\label{sec2}
\subsection{Group law and metric} The natural family of dilations for $\mathcal L$, $(\delta_r)_{r>0}$, on $\mathbb R^{N+1}$, is defined by \begin{equation}\label{dil.alpha.i}
\delta_r (X,Y,t) =(r X, r^3 Y,r^2 t),
\end{equation} for $(X,Y,t) \in \mathbb R^{N +1}$, $r>0$. Our class of operators is closed under the group law \begin{equation}\label{e70}
(\tilde Z,\tilde t)\circ (Z,t)=(\tilde X,\tilde Y,\tilde t)\circ (X, Y,t)=(\tilde X+X,\tilde Y+Y-t\tilde X,\tilde t+t), \end{equation} where $(Z,t),\ (\tilde Z,\tilde t)\in \mathbb R^{N+1}$. Note that \begin{equation}\label{e70+} (Z,t)^{-1}=(X,Y,t)^{-1}=(-X,-Y-tX,-t), \end{equation} and hence \begin{equation}\label{e70++}
(\tilde Z,\tilde t)^{-1}\circ (Z,t)=(\tilde X,\tilde Y,\tilde t)^{-1}\circ (X,Y,t)=(X-\tilde X,Y-\tilde Y+( t-\tilde t)\tilde X,t-\tilde t), \end{equation} whenever $(Z,t),\ (\tilde Z,\tilde t)\in \mathbb R^{N+1}$. Given $(Z,t)=(X,Y,t)\in \mathbb R^{N+1}$ we let \begin{equation}\label{kolnormint}
\|(Z, t)\|= \|(X,Y, t)\|:=|(X,Y)|\!+|t|^{\frac{1}{2}},\ |(X,Y)|=\big|X\big|+\big|Y\big|^{1/3}. \end{equation} We recall the following pseudo-triangular inequality: there exists a positive constant ${c}$ such that \begin{eqnarray}\label{e-ps.tr.in}
\|(Z,t)^{-1}\|\le {c} \| (Z,t) \|,\quad \|(Z,t)\circ (\tilde Z,\tilde t)\| \le {c} (\| (Z,t) \| + \| (\tilde Z,\tilde t)
\|), \end{eqnarray} whenever $(Z,t),(\tilde Z,\tilde t)\in \mathbb R^{N+1}$. Using \eqref{e-ps.tr.in} it follows directly that \begin{equation} \label{e-triangularap}
\|(\tilde Z,\tilde t)^{-1}\circ (Z,t)\|\le c \, \|(Z,t)^{-1}\circ (\tilde Z,\tilde t)\|, \end{equation} whenever $(Z,t),(\tilde Z,\tilde t)\in \mathbb R^{N+1}$. Let \begin{equation}\label{e-ps.distint}
d((Z,t),(\tilde Z,\tilde t)):=\frac 1 2\bigl( \|(\tilde Z,\tilde t)^{-1}\circ (Z,t)\|+\|(Z,t)^{-1}\circ (\tilde Z,\tilde t)\|). \end{equation} Using \eqref{e-triangularap} it follows that \begin{equation}\label{e-ps.dist}
\|(\tilde Z,\tilde t)^{-1}\circ (Z,t)\|\approx d((Z,t),(\tilde Z,\tilde t))\approx \|(Z,t)^{-1}\circ (\tilde Z,\tilde t)\| \end{equation} for all $(Z,t),(\tilde Z,\tilde t)\in \mathbb R^{N+1}$ and with uniform constants. Again using \eqref{e-ps.tr.in} we also see that \begin{equation} \label{e-triangular}
d((Z,t),(\tilde Z,\tilde t))\le {c} \bigl(d((Z,t),(\hat Z,\hat t))+d((\hat Z,\hat t),(\tilde Z,\tilde t))\bigr ), \end{equation} whenever $(Z,t),(\hat Z,\hat t),(\tilde Z,\tilde t)\in \mathbb R^{N+1}$, and hence $d$ is a symmetric quasi-distance. Based on $d$ we introduce the balls \begin{equation}\label{e-BKint}
\mathcal{B}_r(Z,t):= \{ (\tilde Z,\tilde t) \in\mathbb R^{N+1} \mid d((\tilde Z,\tilde t),(Z,t)) < r\}, \end{equation}
for $(Z,t)\in \mathbb R^{N+1}$ and $r>0$. The measure of the ball $\mathcal{B}_r(Z,t)$ is $|\mathcal{B}_r(Z,t)|\approx r^{{\bf q}}$, independent of $(Z,t)$, and where $${\bf q}:=4m+2.$$ Similarly, given $(z,t)=(x,y,t)\in \mathbb R^{N-1}=\mathbb R^{m-1}\times\mathbb R^{m-1}\times\mathbb R$ we let \begin{equation}\label{e-BKint+}
\mathcal{B}_r(z,t):= \{ (\tilde z,\tilde t) \in\mathbb R^{N-1} \mid d((\tilde x,0,\tilde y, 0,\tilde t),(x,0,y,0,t)) < r\}. \end{equation}
The measure of the ball $\mathcal{B}_r(z,t)$ is $|\mathcal{B}_r(z,t)|\approx r^{{\bf q}-4}$, independent of $(z,t)$. We will by $\mathcal{B}_r(Z,t)$ always denote a ball in $\mathbb R^{N+1}$, with capital $Z$, and by $\mathcal{B}_r(z,t)$ we will always denote a ball in $\mathbb R^{N-1}$, with lowercase $z$.
\subsection{Geometry} We consider domains of the form stated in \eqref{dom-} and we here outline the assumptions we impose on the defining function $\psi$. Let
$\mathcal P\in C_0^\infty(\mathcal{B}_1(0,0))$, $\mathcal{B}_1(0,0)\subset\mathbb R^{N-1}$, be a standard approximation of the identity. Let
$$\mathcal P_\lambda(x,y,t)=\lambda^{-{(\bf q}-4)}\mathcal P(\lambda^{-1}x,\lambda^{-3}y,\lambda^{-2}t),$$ for $\lambda>0$. Given a function $f$ defined on $\mathbb R^{N-1}$ we let \begin{eqnarray}\label{eq1vi} \mathcal P_{\lambda}f(x,y, t)&:=&\iint_{\mathbb R^{N-1}}f(\bar x,\bar y,\bar t)\mathcal P_\lambda((\bar x,\bar y, \bar t)^{-1}\circ (x,y, t))\, \, \mathrm{d}\bar x \, \mathrm{d}\bar y \, \mathrm{d}\bar t\notag\\ &=&\iint_{\mathbb R^{N-1}}f(\bar x,\bar y,\bar t)\mathcal P_\lambda(x-\bar x,y-\bar y+(t-\bar t)\bar x,t-\bar t)\, \, \mathrm{d}\bar x \, \mathrm{d}\bar y \, \mathrm{d}\bar t. \end{eqnarray} $\mathcal P_{\lambda}f$ represents a regularization of $f$. Given $(\tilde z, \tilde t)\in \mathbb R^{N-1}$, $\lambda>0$, we let $\gamma_\psi(\tilde z, \tilde t,\lambda)$ denote the number \begin{eqnarray*}\label{eq1apa}
\biggl (\lambda^{-{({\bf q}-4)}}\iint_{\mathcal{B}_\lambda(\tilde z, \tilde t)}\biggl |\frac {\psi(\bar x,\bar y,\bar t)-\psi(\tilde x,\tilde y,\tilde t)-\mathcal P_\lambda(\nabla_x\psi)(\tilde x,\tilde y,\tilde t)(\bar x-\tilde x)}{\lambda}\biggr |^2\, \, \mathrm{d}\bar x\, \mathrm{d}\bar y\, \mathrm{d}\bar t\biggr )^{1/2}. \end{eqnarray*}
\begin{definition}\label{car} Assume that there exist constants $0<M_1,M_2<\infty$, such that
\begin{eqnarray}\label{Lip-++a1}
|\psi(z,t)-\psi(\tilde z,\tilde t)|\leq M_1||(\tilde z,\tilde t)^{-1}\circ(z,t)||, \end{eqnarray} whenever $(z,t),\ (\tilde z,\tilde t)\in\mathbb R^{N-1}$ and such that
\begin{eqnarray}\label{Lip-++a2} \sup_{(z,t)\in\mathbb R^{N-1},\ r>0}\quad r^{-{({\bf q}-4)}}\int_0^r\iint_{\mathcal{B}_\lambda(z,t)}\gamma_\psi^2(\tilde z, \tilde t,\lambda)\, \frac {\, \mathrm{d}\tilde z\, \mathrm{d}\tilde t\, \mathrm{d}\lambda}\lambda\leq M_2. \end{eqnarray} Let $\Omega=\Omega_\psi$ be defined as in \eqref{dom-}. We say that $\Omega$, defined by a function $\psi$ satisfying \eqref{Lip-++a1}, is
a ($y_m$-independent) Lipschitz domain with constant $M_1$. We say that $\Omega$, defined by a function $\psi$ satisfying \eqref{Lip-++a1} and \eqref{Lip-++a2}, is an admissible ($y_m$-independent) Lipschitz domain with constants $(M_1,M_2)$. \end{definition}
Given $\rho>0$ and $\Lambda>0$ we introduce points of reference in $\mathbb R^{m-1}\times\mathbb R\times\mathbb R^{m-1}\times\mathbb R\times\mathbb R$, \begin{align}\label{pointsref2} A_{\rho,\Lambda}^\pm:= \left(0,\Lambda\rho,0,\mp\tfrac 2 3\Lambda\rho^3,\pm\rho^2\right),\ A_{\rho,\Lambda}&:=\left(0,\Lambda\rho,0,0,0\right). \end{align} Given $(Z_0,t_0)\in\mathbb R^{N+1}$ we let $$A_{\rho,\Lambda}^\pm(Z_0,t_0):=(Z_0,t_0)\circ A_{\rho,\Lambda}^\pm,\ A_{\rho,\Lambda}(Z_0,t_0):=(Z_0,t_0)\circ A_{\rho,\Lambda}.$$
\subsection{Dyadic grids, Whitney cubes and Carleson boxes}\label{dya} Assuming that $\Omega=\Omega_\psi\subset\mathbb R^{N+1}$ is a Lipschitz domain, with constant $M_1$, in the sense of Definition \ref{car}, we let $$\Sigma:=\partial \Omega=\{(x,x_{m},y,y_{m},t)\in\mathbb R^{N+1} \mid x_m=\psi(x,y,t)\}.$$ Then $(\Sigma,d,d\sigma)$, where the symmetric quasi-distance $d$ was introduced in \eqref{e-ps.distint}, is a space of homogeneous type in the sense of \cite{CW} with homogeneous dimension ${\bf q}-1$. Furthermore, $(\mathbb R^{N+1},d,dZdt)$ is also a space of homogeneous type in the sense of \cite{CW}, but with homogeneous dimension ${\bf q}$. By the results in \cite{Ch} there exists what we here will refer to as a dyadic grid on
$\Sigma$ having a number of important properties in relation to $d$. To formulate this we introduce, for any $(Z,t)=(X,Y,t)\in\Sigma$ and $E\subset \Sigma$, \begin{equation}
{\rm dist} ((Z,t),E):=\inf \{ d((Z,t),(\tilde Z,\tilde t)) \mid (\tilde Z,\tilde t)\in E\}, \end{equation} and we let \begin{equation}
\operatorname{diam}(E):=\sup \{ d((Z,t),(\tilde Z,\tilde t)) \mid (Z,t),\ (\tilde Z,\tilde t)\in E\}. \end{equation} Using \cite{Ch} we can conclude that there exist constants $ \alpha>0,\, \beta>0$ and $c_*<\infty$, such that for each $k \in \mathbb{Z}$ there exists a collection of Borel sets, $\mathbb{D}_k$, which we will call cubes, such that $$ \mathbb{D}_k:=\{Q_{j}^k\subset\Sigma \mid j\in \mathfrak{I}_k\},$$ where $\mathfrak{I}_k$ denotes some index set depending on $k$, satisfying \begin{eqnarray}\label{cubes} (i)&&\mbox{$\Sigma=\cup_{j}Q_{j}^k\,\,$ for each $k\in{\mathbb Z}$.}\notag\\ (ii)&&\mbox{If $m\geq k$ then either $Q_{i}^{m}\subset Q_{j}^{k}$ or $Q_{i}^{m}\cap Q_{j}^{k}=\emptyset$.}\notag\\ (iii)&&\mbox{For each $(j,k)$ and each $m<k$, there is a unique $i$ such that $Q_{j}^k\subset Q_{i}^m$.}\notag\\ (iv)&&\mbox{$\operatorname{diam}\big(Q_{j}^k\big)\leq c_* 2^{-k}$.}\notag\\ (v)&&\mbox{Each $Q_{j}^k$ contains $\Sigma\cap \mathcal{B}_{\alpha2^{-k}}(Z^k_{j},t^k_{j})$ for some $(Z^k_{j},t^k_j)\in\Sigma$.}\notag\\ (vi)&&\mbox{$\sigma(\{(Z,t)\in Q^k_j\mid{\rm dist}((Z,t),\Sigma\setminus Q^k_j)\leq \rho \,2^{-k}\big\})\leq c_*\,\rho^\beta\,\sigma(Q^k_j),$}\notag\\ &&\mbox{for all $k,j$ and for all $\rho\in (0,\alpha)$.} \end{eqnarray} In the setting of a general space of homogeneous type, this result is due to Christ \cite{Ch}, with the dyadic parameter $1/2$ replaced by some constant $\delta \in (0,1)$. In fact, one may always take $\delta = 1/2$, see \cite[Proof of Proposition 2.12]{HMMM}. We shall denote by $\mathbb{D}=\mathbb{D}(\Sigma)$ the collection of all $Q^k_j$, i.e. $$\mathbb{D} := \cup_{k} \mathbb{D}_k.$$ Note that \eqref{cubes} $(iv)$ and $(v)$ imply that for each cube $Q\in\mathbb{D}_k$, there is a point $(Z_Q,t_Q)=(X_Q,Y_Q,t_Q)\in \Sigma$, and a ball $\mathcal{B}_{r}(Z_Q,t_Q)$ such that $r\approx 2^{-k} \approx {\rm diam}(Q)$ and \begin{equation}\label{cube-ball} \Sigma\cap\mathcal{B}_{r}(Z_Q,t_Q)\subset Q \subset \Sigma\cap \mathcal{B}_{cr}(Z_Q,t_Q),\end{equation} for some uniform constant $c$. We will denote the associated surface ball by \begin{equation}\label{cube-ball2} \Delta_Q:= \Sigma\cap \mathcal{B}_{r}(Z_Q,t_Q)\end{equation} and we shall refer to the point $(Z_Q,t_Q)$ as the center of $Q$. Given a dyadic cube $Q\subset\Sigma$, we define its $\gamma$ dilate by \begin{equation}\label{dilatecube} \gamma Q:= \Sigma\cap \mathcal{B}_{\gamma \operatorname{diam}(Q)}(Z_Q,t_Q). \end{equation} For a dyadic cube $Q\in \mathbb{D}_k$, we let $\ell(Q) = 2^{-k}$, and we shall refer to this quantity as the length of $Q$. Clearly, $\ell(Q)\approx \operatorname{diam}(Q).$ For a dyadic cube $Q \in \mathbb{D}$, we let $k(Q)$ denote the dyadic generation to which $Q$ belongs, i.e. we set $k = k(Q)$ if $Q\in \mathbb{D}_k$, thus, $\ell(Q) =2^{-k(Q)}$. For any $Q\in \mathbb D(\Sigma)$, we set $\mathbb D_Q:= \{Q'\in\mathbb D \mid Q'\subset Q\}\,.$
Using that also $(\mathbb R^{N+1},d,dZdt)$ is a space of homogeneous type we see that we can partition $\Omega$ into a collection of (closed) dyadic Whitney cubes $\{I\}$, in the following denoted $\mathcal{W}=\mathcal W(\Omega)$, such that the cubes in $\mathcal{W}$ form a covering of $\Omega$ with non-overlapping interiors, and \begin{equation}\label{eqWh1} 4\, {\rm{diam}}\,(I)\leq \operatorname{dist}(4 I,\Sigma) \leq \operatorname{dist}(I,\Sigma) \leq 40 \, {\rm{diam}}\,(I)\end{equation} and \begin{equation}\label{eqWh2}\operatorname{diam}(I_1)\approx \operatorname{diam}(I_2), \mbox{ whenever $I_1$ and $I_2$ touch.} \end{equation} Given $I\in \mathcal{W}$ we let $\ell(I)$ denote its size. Given $Q\in \mathbb D(\Sigma)$ we set \begin{equation}\label{eq2.1} \mathcal W_Q:= \left\{I\in \mathcal W\mid \,100^{-1} \ell(Q)\leq \ell(I) \leq 100\,\ell(Q),\, {\rm and}\, \operatorname{dist}(I,Q)\leq 100\, \ell(Q)\right\}. \end{equation} We fix a small, positive parameter $\tau$, and given $I\in\mathcal W$, we let \begin{equation}\label{eq2.3*}I^* =I^*(\tau) := (1+\tau)I \end{equation} denote the corresponding ``fattened" Whitney cube. Choosing $\tau$ small we see that the cubes $I^*$ will retain the usual properties of Whitney cubes; in particular, that $$\operatorname{diam}(I) \approx \operatorname{diam}(I^*) \approx \operatorname{dist}(I^*,\Sigma) \approx \operatorname{dist}(I,\Sigma)\,.$$ We then define a Whitney region with respect to $Q$ by setting \begin{equation}\label{eq2.3} U_Q:= \bigcup_{I\in \mathcal W_Q}I^*\,. \end{equation} Given $Q\in \mathbb D(\Sigma)$ we let \begin{equation}\label{eq2.box-} T_Q:={\rm int}\left( \bigcup_{Q'\in \mathbb D_Q} U_{Q'}\right), \end{equation} denote the Carleson box associated to $Q$. Furthermore, given $\gamma\geq 1$ we let \begin{equation}\label{eq2.box} T_{\gamma Q}:={\rm int}\left( \bigcup_{Q':\ Q'\cap (\gamma Q)\neq \emptyset} U_{Q'}\right), \end{equation} denote the Carleson set associated to the $\gamma$ dilate of $Q$. Finally, given $Q\in\mathbb{D}$ and $\Lambda>0$, we let \begin{equation}\label{pointsref2apa} \begin{split} A_{Q,\Lambda}^\pm&:=(Z_Q,t_Q)\circ (0,\Lambda l(Q),0,\mp\frac 2 3\Lambda l(Q)^3,\pm l(Q)^2),\\ A_{Q,\Lambda}&:=(Z_Q,t_Q)\circ (0,\Lambda l(Q),0,0,0). \end{split} \end{equation}
\subsection{Weak solutions} Consider $U_X\times U_Y\times J\subset\mathbb R^{N+1}$ with $U_X\subset\mathbb R^{m}$, $U_Y\subset\mathbb R^{m}$ being bounded domains, i.e, open, connected and bounded sets, and $J=(a,b)$ with $-\infty<a<b<\infty$. Then
$u$ is said to be a weak solution to the equation
\begin{eqnarray}\label{e-kolm-nd-}
\mathcal L u=\nabla_X\cdot(A(X,Y,t)\nabla_Xu)+X\cdot\nabla_Yu-\partial_tu=0,
\end{eqnarray}
in $U_X\times U_Y\times J\subset\mathbb R^{N+1}$ if
\begin{eqnarray}\label{weak1}
u\in L_{Y,t}^2(U_Y\times J,H_X^1(U_X)),
\end{eqnarray}
and
\begin{eqnarray}\label{weak2}
-X\cdot\nabla_Yu+\partial_tu\in L_{Y,t}^2(U_Y\times J,H_X^{-1}(U_X)),
\end{eqnarray}
and if $\mathcal L u=0$ in the sense of distributions, i.e,
\begin{eqnarray}\label{weak3}
\iiint_{}\ \bigl(A(X,Y,t)\nabla_Xu\cdot \nabla_X\phi+(X\cdot \nabla_Y\phi)u-u\partial_t\phi\bigr )\, \, \mathrm{d} X \, \mathrm{d} Y \, \mathrm{d} t=0,
\end{eqnarray}
whenever $\phi\in C_0^\infty(U_X\times U_Y\times J)$.
We say that $u$ is a weak solution to the equation $\mathcal L u=0$ in $\Omega$ if $u$ is a weak solution to $\mathcal L u=0$ in $U_X\times U_Y\times J\subset\mathbb R^{N+1}$, where $U_X\subset\mathbb R^{m}$, $U_Y\subset\mathbb R^{m}$ are bounded domains, and $J=(a,b)$ with $-\infty<a<b<\infty$, whenever $U_X\times U_Y\times J$ is compactly contained in $\Omega$.
\setcounter{equation}{0} \setcounter{theorem}{0}
\section{Statement of the main result}\label{sec3} Assume that $\Omega=\Omega_\psi\subset\mathbb R^{N+1}$ is a Lipschitz domain, with constant $M_1$, in the sense of Definition \ref{car}, and recall that $\mathbb{D}$ is the set of dyadic cubes on $\partial\Omega$. Given $Q\in\mathbb{D}$, recall the definitions of $l(Q)$, $(Z_Q,t_Q)$, $\gamma Q$, $T_Q$, $A_{Q,\Lambda}^\pm$, introduced in Subsection \ref{dya}.
Using this notation a version of one of the main results (namely, Theorem 3.6) proved in \cite{LN} can be stated as follows.
\begin{theorem}\label{dub} Let $\Omega\subset\mathbb R^{N+1}$ is an unbounded ($y_m$-independent) Lipschitz domain with constant $M_1$ in the sense of Definition \ref{car}. Assume that $A$ satisfies \eqref{eq2} with constant $\kappa$, \eqref{eq2+} and that \begin{eqnarray}\label{struct} A(X,Y,t)=A(x,x_m,y,y_m,t)=A(x,x_m,y,t) \end{eqnarray} whenever $(x,x_{m},y,y_{m},t)\in\mathbb R^{N+1}$, i.e. also $A$ is assumed to be independent of the variable $y_m$. Then there exist
$\Lambda=\Lambda(m,M_1)$, $1\leq \Lambda<\infty$, $c=c(m,\kappa,M_1)$, $1\leq c<\infty$, such that the following is true. Consider $Q_0\in\mathbb{D}$ and let $\omega(\cdot):=\omega\bigl (A_{cQ_0,\Lambda}^+,\cdot\bigr )$. Then \begin{eqnarray*} \omega\bigl (2Q\bigr )\leq c\omega\bigl (Q\bigr ) \end{eqnarray*} for all $Q\in\mathbb{D}$ such that $4Q\subset Q_0$. \end{theorem}
Given an unbounded ($y_m$-independent) Lipschitz domain $\Omega=\Omega_\psi\subset\mathbb R^{N+1}$ we let $\delta=\delta(X,Y,t)$ denote the distance from $(X,Y,t)\in\Omega$ to $\partial\Omega$, i.e. \begin{equation}\label{deltadist}
\delta(X,Y,t)=\min\{d((X,Y,t),(\tilde X,\tilde Y,\tilde t)) \mid (\tilde X,\tilde Y,\tilde t)\in\partial\Omega\}. \end{equation} Consider the following measures $\mu_1$ and $\mu_2$ defined on $\Omega$: \begin{equation}\label{measure1} \begin{split}
\, \mathrm{d}\mu_1(X,Y,t)&:=|\nabla_XA(X,Y,t)|^2\delta(X,Y,t)\ \, \mathrm{d} X\, \mathrm{d} Y\, \mathrm{d} t,\\
\, \mathrm{d}\mu_2(X,Y,t)&:=|(X\cdot\nabla_Y-\partial_t)A(X,Y,t)|^2\delta^3(X,Y,t)\ \, \mathrm{d} X\, \mathrm{d} Y\, \mathrm{d} t. \end{split} \end{equation} We say that $\mu_1$ and $\mu_2$ are Carleson measures on $\Omega$ with constant $\Gamma$ if \begin{equation}\label{measure2} \begin{split} \sup_{Q\in\mathbb{D}}\quad l(Q)^{-{({\bf q}-1)}}\iiint_{T_Q}\, \mathrm{d}\mu_1(\tilde X,\tilde Y,\tilde t)&\leq\Gamma,\\ \sup_{Q\in\mathbb{D}}\quad l(Q)^{-{({\bf q}-1)}}\iiint_{T_Q}\, \mathrm{d}\mu_2(\tilde X,\tilde Y,\tilde t)&\leq\Gamma. \end{split} \end{equation}
The following is the main result proved in this paper. \begin{theorem}\label{Ainfty} Assume that $\Omega\subset\mathbb R^{N+1}$ is an (unbounded) admissible ($y_m$-independent) Lipschitz domain with constants $(M_1,M_2)$ in the sense of Definition \ref{car}. Assume that $A$ satisfies \eqref{eq2} with constant $\kappa$, \eqref{eq2+} and \eqref{struct}, i.e. also $A$ is independent of $y_m$. Assume that the measures $\mu_1$ and $\mu_2$ defined in \eqref{measure1} are Carleson measures on $\Omega$ with constant $\Gamma$ in the sense of \eqref{measure2}. Then there exist
$\Lambda=\Lambda(m,M_1)$, $1\leq \Lambda<\infty$, $c=c(m,\kappa,M_1)$, $1\leq c<\infty$, $\tilde c=\tilde c(m,\kappa, M_1,M_2,\Gamma)$, $1\leq \tilde c<\infty$,
$\eta=\eta(m,\kappa,M_1,M_2,\Gamma)$, $0<\eta<1$, such that the following is true. Consider $Q_0\in\mathbb{D}$ and let $\omega(\cdot):=\omega\bigl (A_{cQ_0,\Lambda}^+,\cdot\bigr )$. Then
\begin{eqnarray*}
\quad\tilde c^{-1}\biggl (\frac{ \sigma ( E ) }{ \sigma(Q)}\biggr )^{1/\eta}\leq \frac {\omega\bigl (E\bigr )}{\omega\bigl ( Q\bigr )}\leq \tilde c\biggl (\frac{ \sigma ( E ) }{ \sigma(Q)}\biggr )^\eta \end{eqnarray*} whenever $E\subset Q$ for some $Q\in\mathbb{D}$ such that $Q\subseteq Q_0$. \end{theorem}
As mentioned before, in the prototype case $A\equiv 1_m$, i.e. in the case of the operator $\mathcal K$, Theorem \ref{Ainfty} is proved in \cite{N} and this seems to be the only previous result of its kind for operators of Kolmogorov type.
\section{Proof of Theorem \ref{Ainfty}: preliminary reductions}\label{sec4}
Using Lemma \ref{lemmacruc-} and Lemma \ref{T:doubling} below it follows that it suffices to prove Theorem \ref{Ainfty} with $Q=Q_0$. In the following we let $Q_0\in \mathbb{D}$ and we let $\omega(\cdot)$ be as in the statement of Theorem \ref{Ainfty}. Our proof of Theorem \ref{Ainfty} is based on ideas introduced in \cite{KKPT} in the context of elliptic measures and we will use the notion of good $\epsilon_0$ covers.
\begin{definition}\label{deff1} Let $E\subset {Q_0}$ be given, let $\epsilon_0\in (0,1)$ and let $k$ be an integer. A good $\epsilon_0$ cover of $E$, of length $k$, is a collection $\{\mathcal{O}_l\}_{l=1}^k$ of nested (relatively) open subsets of ${Q_0}$, together with collections $\mathcal F_l=\{\Delta_i^l\}_i\subset Q_0$, $\Delta_i^l\in \mathbb D$, such that \begin{eqnarray}\label{cover1} E\subset \mathcal{O}_k\subset\mathcal{O}_{k-1}\subset....\subset\mathcal{O}_1\subset Q_0, \end{eqnarray} \begin{eqnarray}\label{cover2} \mathcal{O}_l=\bigcup_{\mathcal F_l}\Delta_i^l, \end{eqnarray} and \begin{eqnarray}\label{cover3}\omega(\mathcal{O}_l\cap \Delta_i^{l-1})\leq \epsilon_0\omega(\Delta_i^{l-1}),\mbox{ for all }\Delta_i^{l-1}\in\mathcal F_{l-1}. \end{eqnarray} \end{definition}
Using the notion of good $\epsilon_0$ covers we can reduce the proof of Theorem \ref{Ainfty} to the proof of the following three lemmas.
\begin{lemma}\label{existcover} Let $E\subset {Q_0}$ be given, consider $\epsilon_0\in (0,1)$ and let $k$ be a positive integer. There exist $\gamma=\gamma(m,\kappa,M_1)$, $0<\gamma\ll 1$, and $\Upsilon=\Upsilon(m,\kappa,M_1)$, $1\ll\Upsilon$, such that if we let $\delta_0=\gamma(\epsilon_0/\Upsilon)^k$, and if $\omega(E)\leq\delta_0$, then $E$ has a good $\epsilon_0$ cover of length $k$. \end{lemma}
\begin{lemma}\label{lemmacruc} Let $\Upsilon\gg 1$ be given and consider $\delta_0\in (0,1)$. Assume that $E\subset {Q}_0$ with $\omega(E)\leq\delta_0$. If $\delta_0=\delta_0(m,\kappa,M_1,\Upsilon)$ is chosen sufficiently small, then there exists a Borel set $S\subset\partial\Omega$, and a constant $c=c(m,\kappa,M_1)$, $1\leq c<\infty$, such that if we let $u(Z,t):=\omega(Z,t,S)$, then
$$\Upsilon^2\sigma(E)\leq c \iiint_{T_{cQ_0}}|\nabla_Xu|^2\delta\, \, \mathrm{d} Z\, \mathrm{d} t.$$ Here $\delta=\delta(Z,t)$ is as in \eqref{deltadist}, i.e. the distance from $(Z,t)\in \Omega$ to $\Sigma$, and $T_{cQ_0}$ is the Carleson set associated to $cQ_0$ as defined in \eqref{eq2.box}. \end{lemma}
\begin{lemma}\label{lemmacruc+} Let $u(Z,t):=\omega(Z,t,S)$ and $c$ be as stated in Lemma \ref{lemmacruc}. Then there exists $\tilde c=\tilde c(m,\kappa, M_1,M_2,\Gamma)$, $1\leq \tilde c<\infty$, such that
$$\iiint_{ T_{cQ_0}}|\nabla_Xu|^2\delta\, \, \mathrm{d} Z\, \mathrm{d} t\leq \tilde c\sigma(Q_0).$$ \end{lemma}
The proofs of Lemmas \ref{existcover}-\ref{lemmacruc+} are given in the forthcoming sections of the paper. To prove Theorem \ref{Ainfty} using these auxiliary lemmas, we note that first using Lemma \ref{lemmacruc} and Lemma \ref{lemmacruc+} we can, for $\Upsilon\gg 1$ given, choose $\delta_0=\delta_0(m,M_1,\Upsilon)$, so that if $E\subset {Q_0}$ with $\omega(E)\leq\delta_0$, then \begin{eqnarray} \Upsilon^2\sigma(E)\leq \hat c\sigma(Q_0), \end{eqnarray} for some $\hat c=\hat c(m,\kappa, M_1,M_2,\Gamma)$, $1\leq \hat c<\infty$. In particular, we can conclude that there exists, for every $\varepsilon>0$, a positive $\delta_0=\delta_0(m,\kappa,M_1,M_2,\Gamma,\varepsilon)$ such that \begin{eqnarray}\omega(E)\leq\delta_0\leq c\delta_0\omega({Q_0})\implies\sigma(E)\leq \varepsilon\sigma({Q_0}), \end{eqnarray} where we have also applied Lemma \ref{bourg} stated below. Theorem \ref{Ainfty} now follows from the doubling property of $\omega$, see Lemma \ref{T:doubling}, and the classical result in \cite{CF}.
The rest of the paper is devoted to the proofs of Lemmas \ref{existcover}-\ref{lemmacruc+} and we consider the proof of Lemma \ref{lemmacruc+} a rather difficult part in the proof of Theorem \ref{Ainfty}. We here show how to reduce Lemma \ref{lemmacruc+} to a core technical estimate. To prove Lemma \ref{lemmacruc+} we can without loss of generality assume that $(Z_{Q_0},t_{Q_0})=(0,0)$ and we let $\rho_0:=l(Q_0)$. Throughout the rest of the paper we let $\mathcal P$ denote a parabolic approximation of the identity: $\mathcal P\in C_0^\infty(\mathcal B_1(0,0))$, $\mathcal B_1(0,0)\subset\mathbb R^{N-1}$, $\mathcal P\geq 0$ is real-valued, and $\iint \mathcal P\, \, \mathrm{d} z \, \mathrm{d} t=1$. We will assume, as we may by imposing a product structure on $\mathcal P$, that $\mathcal P$ is even in the sense that \begin{eqnarray}\label{even} \iint x_i\mathcal P(z,t)\, \, \mathrm{d} z\, \mathrm{d} t=\iint y_i\mathcal P(z,t)\, \, \mathrm{d} z\, \mathrm{d} t=\iint t\mathcal P(z,t)\, \, \mathrm{d} z\, \mathrm{d} t=0 \end{eqnarray} for $i\in\{1,...,m-1\}$. We set $\mathcal P_\lambda(z,t)=\mathcal P_\lambda(x,y,t)=\lambda^{-{({\bf q}-4)}}\mathcal P(\lambda^{-1}x,\lambda^{-3}y,\lambda^{-2}t)$ whenever $\lambda>0$. Given $\mathcal P$ we let $\mathcal P_\lambda$ define a convolution operator as introduced in \eqref{eq1vi}. To prove Lemma \ref{lemmacruc+} we need to enable partial integration and we therefore use the mapping,
\begin{eqnarray}\label{dom+ggaint}
U \owns (w,w_m,y,y_m,t) \mapsto (w,w_m+\mathcal P_{\gamma w_m}\psi(w,y,t),y,y_m,t),
\end{eqnarray} where
\begin{eqnarray}\label{dom+gint}
U&=&\{(W,Y,t)=(w, w_m,y,y_m, t)\in\mathbb R^{m-1}\times\mathbb R\times\mathbb R^{m-1}\times\mathbb R\times\mathbb R \mid w_m>0\}.
\end{eqnarray} We will need the following two lemmas proved in \cite{N1}. Lemma \ref{carlemma-} and Lemma \ref{carlemma} correspond to Lemma 2.1 and Lemma 2.2 in \cite{N1}, respectively.
\begin{lemma}\label{carlemma-} Let $\psi$ be a function satisfying \eqref{Lip-++a1} for some constant $0<M_1<\infty$, let
$\gamma\in (0,1)$ and let $\mathcal P_{\gamma w_m}\psi$ be defined as above for $w_m>0$. Let
$\theta,\tilde\theta\geq 0$ be integers and let $(\phi_1,..,\phi_{m-1})$ and $(\tilde\phi_1,..,\tilde\phi_{m-1})$ denote multi-indices.
Let $\ell:=(\theta+|\phi|+3|\tilde\phi|+2\tilde\theta)$. Then
\begin{eqnarray}\label{con1}
\biggl |\frac {\partial^{\theta+|\phi|+|\tilde\phi|}}{\partial w_m^{\theta}\partial w^{\phi}\partial y^{\tilde\phi}} \biggl ((w\cdot\nabla_y-\partial_t)^{\tilde \theta}(\mathcal P_{\gamma w_m}\psi(w,y,t)) \biggr )\biggr |\leq c(m,l)\gamma^{1-(l-\theta)}w_m^{1-l}M_1,
\end{eqnarray}
whenever $(W,Y,t)\in U$.
\end{lemma}
\begin{lemma}\label{carlemma} Let $\psi$ be a function satisfying \eqref{Lip-++a1} and \eqref{Lip-++a2} for some constants $0<M_1,M_2<\infty$, let
$\gamma\in (0,1)$ and let $\mathcal P_{\gamma w_m}\psi$ be defined as above for $w_m>0$. Let
$\theta,\tilde\theta\geq 0$ be integers and let $(\phi_1,..,\phi_{m-1})$ and $(\tilde\phi_1,..,\tilde\phi_{m-1})$ denote multi-indices. Let $\ell:=(\theta+|\phi|+3|\tilde\phi|+2\tilde\theta)$. Let
\begin{eqnarray*}
\, \mathrm{d}\mu=\, \mathrm{d}\mu(W,Y,t):=\biggl |\frac {\partial^{\theta+|\phi|+|\tilde\phi|}}{\partial w_m^{\theta}\partial w^{\phi}\partial y^{\tilde\phi}} \biggl (( w\cdot\nabla_y-\partial_t)^{\tilde \theta}(\mathcal P_{\gamma w_m}\psi( w,y,t)) \biggr )\biggr |^2 w_m^{2l-3} \, \mathrm{d} W\, \mathrm{d} Y\, \mathrm{d} t,
\end{eqnarray*}
be defined on $U$. Then
\begin{eqnarray*}\label{con2}
\mu(U\cap \mathcal{B}_r)\leq c(m,l,M_1,M_2)\gamma^{2-2(l-\theta)}r^{{\bf q}-1}, \end{eqnarray*} for all balls $\mathcal{B}_r=\mathcal{B}_r(Z_0,t_0)\subset\mathbb R^{N+1}$ centered on $\partial U$, $r>0$.
\end{lemma}
Using Lemma \ref{carlemma-} we see that that there exists $\hat\gamma=\hat\gamma(m,M_1)\in (0,1)$ such that if $\gamma\in (0,\hat\gamma)$ then
\begin{eqnarray}\label{1-1}\frac 1 2\leq 1+\frac {\partial}{\partial w_m}(\mathcal P_{\gamma w_m}\psi)(w,y,t)\leq \frac 32,
\end{eqnarray}
whenever $(w,w_m,y,y_m,t)\in U$. This implies, in particular, that the map in \eqref{dom+ggaint} is one-to-one.
Defining $v$ as the pull-back of $u(Z,t):=\omega(Z,t,S)$ under the map in \eqref{dom+ggaint}, i.e.
\begin{eqnarray}v(w,w_m,y,y_m,t):=u(w,w_m+\mathcal P_{\gamma w_m}\psi(w,y,t),y,y_m,t)),
\end{eqnarray}
we see that to prove Lemma \ref{lemmacruc+} it suffices to prove that
\begin{eqnarray}\label{keyestalla}
I_\epsilon :=\iiint_{\mathbb R^{N+1}_+}|\nabla_{W}v|^2\Psi_\epsilon^2w_m\, \, \mathrm{d} W\, \mathrm{d} Y\, \mathrm{d} t \leq c(m,\kappa, M_1, M_2,\Gamma)\rho_0^{{\bf q}-1},
\end{eqnarray}
for $\epsilon>0$ small, where $$\mathbb R^{N+1}_+=\mathbb R^{m-1}\times\lbrace w_m>0 \rbrace\times \mathbb R^{m}\times \mathbb R,$$
and where $\Psi_\epsilon$ is a smooth cut-off function such that $\Psi_\epsilon\equiv 1$ on $$([-c,c]^m\times [-c,c]^m\times [-c,c])\cap \{w_m\geq 2\epsilon\},$$ and $\Psi_\epsilon\equiv 0$ on $$\bigl (([-2c,2c]^m\times [-2c,2c]^m\times [-2c,2c])\setminus ([-c,c]^m\times [-c,c]^m\times [-c,c])\bigr )\cap \{w_m<\epsilon\},$$ where $c=c(m,\kappa,M_1)\gg 1$.
Furthermore, the pull-back $v$ is a (weak) solution to
\begin{eqnarray}\label{e-kolm-ndggha-int}
\tilde{\mathcal L} v=\nabla_{W}\cdot (\tilde A\nabla_{W} v)+\tilde B\cdot\nabla_{W} v+ D\cdot\nabla_{Y,t} v=0
\end{eqnarray}
in $U$ where the $\tilde A=(\tilde a_{i,j})$ and $\tilde B=(\tilde b_i)$ depend on $\mathcal L$ and the pull-back map in \eqref{dom+ggaint}. Here and in the following $\nabla_{Y,t}=(\nabla_Y,\partial_t)$ and $D$ is the vector valued function
\begin{eqnarray}\label{e-kolm-ndggha-intD}
D:=(w,w_m+\mathcal P_{\gamma w_m}\psi(w,y,t),-1).
\end{eqnarray}
Using that $\Omega\subset\mathbb R^{N+1}$ is an (unbounded) admissible ($y_m$-independent) Lipschitz domain with constants $(M_1,M_2)$ in the sense of Definition \ref{car}, that $A$ satisfies \eqref{eq2} with constant $\kappa$, \eqref{struct}, and Lemma \ref{carlemma-}, it follows that
$\tilde A$ and $\tilde B$ are measurable and locally bounded satisfying
\begin{eqnarray}\label{eq2++}
\tilde\kappa^{-1}|\xi|^2\leq \sum_{i,j=1}^{m}\tilde a_{i,j}(W,Y,t)\xi_i\xi_j,\quad \ \ |\tilde A(W,Y,t)\xi\cdot\zeta|\leq \tilde\kappa|\xi||\zeta|,
\end{eqnarray}
for some $\tilde\kappa\in [1,\infty)$, and for all $\xi,\zeta\in \mathbb R^{m}$, $(W,Y,t)\in\mathbb R^{N+1}$, and
\begin{eqnarray}\label{eq2++a}
w_m|\nabla_W \tilde A(W,Y,t)|+w_m|\tilde B(W,Y,t)|\leq c.
\end{eqnarray}
Here $\tilde \kappa$ and $c$ depends on $m$, $\kappa$ and $M_1$ only. In addition it is important to note that $\tilde A$ is symmetric. Furthermore, using Lemma \ref{carlemma}, and that the measures $\mu_1$ and $\mu_2$ defined in \eqref{measure1} are Carleson measures on $\Omega$ with constant $\Gamma$, we see that if we introduce $\, \mathrm{d}\tilde \mu_i=\, \mathrm{d}\tilde\mu_i(W,Y,t)$, $i\in\{1,2,3\}$,
\begin{equation}
\begin{split}
\, \mathrm{d}\tilde\mu_1&:= |\nabla_W\tilde A|^2w_m\, \, \mathrm{d} W\, \mathrm{d} Y\, \mathrm{d} t,\\
\, \mathrm{d}\tilde\mu_2&:=|\tilde B|^2w_m\, \, \mathrm{d} W\, \mathrm{d} Y\, \mathrm{d} t,\\
\, \mathrm{d}\tilde\mu_3&:=|D\cdot\nabla_{Y,t}\tilde A|^2w_m^3\, \, \mathrm{d} W\, \mathrm{d} Y\, \mathrm{d} t,
\end{split}
\end{equation} as measures on $U$, then
\begin{eqnarray}\label{con2+}
\tilde\mu_i(U\cap \mathcal{B}_\rho(w_0,0,Y_0,t_0))\leq c(m,\kappa, M_1, M_2,\Gamma)\rho^{{\bf q}-1}, \end{eqnarray} whenever $(w_0,0,Y_0,t_0)\in\partial U$, $\rho>0$, and $\mathcal{B}_\rho(w_0,0,Y_0,t_0)\subset\mathbb R^{N+1}$, and for $i\in\{1,2,3\}$. In particular, all measures in $\{\tilde\mu_i\}$ define Carleson measures on $U$. Furthermore, we emphasize that by our assumptions
\begin{eqnarray}\label{e-kolm-ndggha-lla+jja}
\mbox{$\tilde A$ and $\tilde B$ are independent of $y_m$}.
\end{eqnarray}
To prove \eqref{keyestalla} it suffices to prove the following lemma.
\begin{lemma}\label{Carleson} Let $\sigma\in (0,1)$ be a given degree of freedom. Then there exists a finite constant $c=c(m,\kappa, M_1, M_2,\Gamma,\sigma)$, such that
\begin{eqnarray*}
I_{\epsilon}\leq \sigma I_{\epsilon}+c\rho_0^{{\bf q}-1}.
\end{eqnarray*} \end{lemma}
Note that by construction $I_{\epsilon}$ is finite. The proof of Lemma \ref{Carleson} is given in the next section and from here on, and hence also in the proof, we will not indicate the dependence on $\epsilon$ and simply write $I$ for $I_\epsilon$ and we note -- and this is a consequence of the introduction of $\epsilon$ -- that no boundary terms will survive when we perform partial integration. In addition we will also here, with a slight abuse of notation, let $Z:=(W,Y)$ and $\, \mathrm{d} Z\, \mathrm{d} t:=\, \mathrm{d} W\, \mathrm{d} Y\, \mathrm{d} t$.
In the proof of Lemma \ref{Carleson} we will also use the quantities
\begin{eqnarray}\label{e-kolm-ndggha-lla+gg}
J&:=&\iiint_{\mathbb R^{N+1}_+}|D\cdot\nabla_{Y,t}v|^2\Psi^4w_m^3\, \, \mathrm{d} Z\, \mathrm{d} t,\notag\\
K&:=&\sum_{i=1}^m\iiint_{\mathbb R^{N+1}_+}\ |\nabla_{W}(\partial_{w_i}v)|^2 \Psi^4w_m^3\, \, \mathrm{d} Z\, \mathrm{d} t,\notag\\
L&:=&\iiint_{\mathbb R^{N+1}_+}\ |\nabla_{Y}v|^2 \Psi^6w_m^5\, \, \mathrm{d} Z\, \mathrm{d} t,\\
L_{i}&:=&
\iiint_{\mathbb R^{N+1}_+}|\partial_{y_i} v|^2\Psi^6w_m^5\, \, \mathrm{d} Z\, \mathrm{d} t,\notag\\
M&:=&\sum_{i=1}^m \iiint_{\mathbb R^{N+1}_+} |\nabla_W(\partial_{y_i}v)|^2\Psi^8w_m^7\, \, \mathrm{d} Z\, \mathrm{d} t.\notag
\end{eqnarray}
In the rather technical proof to follow, the crucial estimate in the proof of Lemma \ref{Carleson} is stated in \eqref{sv} below and states that
\begin{eqnarray*} L_{m}\lesssim M^{1/2}J^{1/2}+I+J,
\end{eqnarray*}
where $\lesssim$ means that we can control the constants. This estimate uses, in a crucial way it seems, that $\psi$ and $A$, and hence $\tilde A$, do not depend on $y_m$. It seems that this additional degree of freedom is crucial for us to be able to complete the argument.
\section{Proof of Lemma \ref{Carleson}}\label{sec5} We will first prove that
\begin{eqnarray}\label{auxest1}
I\leq c\rho_0^{{\bf q}-1}+\sigma I+\tilde\sigma J
\end{eqnarray}
where $\sigma,\tilde\sigma\in (0,1)$ are degrees of freedom and $c$ is a positive constant which, unless otherwise stated, only depends on $(m,\kappa, M_1, M_2,\Gamma)$ and $\sigma, \tilde\sigma$. In general, in the following $c$ will denote a generic such constant, not necessarily the same at each instance. We often write $c_1\lesssim c_2$ and this means that $c_1/c_2$ is bounded by a constant depending only on $(m,\kappa, M_1, M_2,\Gamma)$, $\sigma$ and $\tilde\sigma$.
To start the proof of \eqref{auxest1} we note, using ellipticity, that
\begin{eqnarray*}\label{est4}
I\lesssim \sum_{i,j=1}^m I_{i,j},
\end{eqnarray*}
where
\begin{align*}
I_{i,j}:=2\iiint_{\mathbb R^{N+1}_+}\tilde a_{m,m}^{-1}\tilde a_{i,j}(\partial_{w_i}v)(\partial_{w_j}v)\Psi^2w_m\, \, \mathrm{d} Z\, \mathrm{d} t.
\end{align*}
Assume first that $i\neq m$. Then, integrating by parts in $I_{i,j}$ with respect to $w_i$ we see that
\begin{eqnarray*}
I_{i,j}&=&-2\iiint_{\mathbb R^{N+1}_+}\tilde a_{m,m}^{-1}v\partial_{w_i}(\tilde a_{i,j}\partial_{w_j}v)\Psi^2w_m\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\
&&-2\iiint_{\mathbb R^{N+1}_+}\partial_{w_i}\tilde a_{m,m}^{-1}\tilde a_{i,j}v(\partial_{w_j}v)\Psi^2w_m\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\
&&-4\iiint_{\mathbb R^{N+1}_+}\tilde a_{m,m}^{-1} {\tilde a_{i,j}}v(\partial_{w_j}v)w_m\Psi\partial_{w_i}\Psi\, \, \mathrm{d} Z\, \mathrm{d} t.
\end{eqnarray*}
Similarly we see that
\begin{eqnarray*}
I_{m,j}&=&-2\iiint_{\mathbb R^{N+1}_+}\tilde a_{m,m}^{-1}v\partial_{w_m}(\tilde a_{m,j}\partial_{w_j}v)\Psi^2w_m\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\
&&-2\iiint_{\mathbb R^{N+1}_+}\partial_{w_m}\tilde a_{m,m}^{-1}\tilde a_{m,j} v(\partial_{w_j}v)\Psi^2w_m\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\
&&-2\iiint_{\mathbb R^{N+1}_+}{\tilde a_{m,m}^{-1}}{\tilde a_{m,j}}v(\partial_{w_j}v)\Psi^2\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\
&&-4\iiint_{\mathbb R^{N+1}_+}{\tilde a_{m,m}^{-1}}{\tilde a_{m,j}}v(\partial_{w_j}v)w_m\Psi\partial_{w_m}\Psi\, \, \mathrm{d} Z\, \mathrm{d} t.
\end{eqnarray*}
Put together
\begin{eqnarray*}
I\leq I_1+I_2+I_3+I_4,
\end{eqnarray*}
where
\begin{eqnarray*}
I_1&:=&-2\sum_{i,j}\iiint_{\mathbb R^{N+1}_+}\tilde a_{m,m}^{-1}v\partial_{w_i}(\tilde a_{i,j}\partial_{w_j}v)\Psi^2w_m\, \, \mathrm{d} Z\, \mathrm{d} t,\notag\\
I_2&:=&-2\sum_{i,j}\iiint_{\mathbb R^{N+1}_+}\partial_{w_i}\tilde a_{m,m}^{-1}\tilde a_{i,j}v(\partial_{w_j}v)\Psi^2w_m\, \, \mathrm{d} Z\, \mathrm{d} t,\notag\\
I_3&:=&-4\sum_{i,j}\iiint_{\mathbb R^{N+1}_+}\tilde a_{m,m}^{-1} {\tilde a_{i,j}}v(\partial_{w_j}v)w_m\Psi\partial_{w_i}\Psi\, \, \mathrm{d} Z\, \mathrm{d} t,\notag\\
I_4&:=&-2\sum_{j}\iiint_{\mathbb R^{N+1}_+}{\tilde a_{m,m}^{-1}}{\tilde a_{m,j}}v(\partial_{w_j}v)\Psi^2\, \, \mathrm{d} Z\, \mathrm{d} t.
\end{eqnarray*}
We first analyze $I_1$. Using the equation, i.e. \eqref{e-kolm-ndggha-int}, we obtain
\begin{eqnarray*}
I_1&=&2\iiint_{\mathbb R^{N+1}_+}\tilde a_{m,m}^{-1}v (D\cdot\nabla_{Y,t}v)\Psi^2w_m\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\
&&+2\sum_{i}\iiint_{\mathbb R^{N+1}_+}\tilde a_{m,m}^{-1}vb_i\partial_{w_i}v\Psi^2w_m\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\
&=:&I_{11}+I_{12},
\end{eqnarray*}
and \begin{eqnarray*}
I_{12}&\leq&c\biggl (\iiint_{\mathbb R^{N+1}_+}v^2|\tilde B|^2\Psi^2w_m\, \, \mathrm{d} Z\, \mathrm{d} t\biggr )^{1/2}
\biggl (\iiint_{\mathbb R^{N+1}_+}|\nabla_W v|^2\Psi^2w_m\, \, \mathrm{d} Z\, \mathrm{d} t\biggr )^{1/2}\notag\\
&\leq& c\rho_0^{{\bf q}-1}+\sigma I,
\end{eqnarray*}
by \eqref{con2+} applied to $\tilde\mu_2$. Furthermore, integrating by parts with respect to $w_m$ we see that
\begin{eqnarray*}
I_{11}=I_{111}+I_{112}+I_{113}+I_{114},
\end{eqnarray*}
where
\begin{eqnarray*}
I_{111}&=&-\iiint_{\mathbb R^{N+1}_+}\partial_{w_m}\tilde a_{m,m}^{-1}v(D\cdot\nabla_{Y,t}v) \Psi^2w_m^2\, \, \mathrm{d} Z\, \mathrm{d} t,\notag\\
I_{112}&=&-\iiint_{\mathbb R^{N+1}_+}\tilde a_{m,m}^{-1}\partial_{w_m}v(D\cdot\nabla_{Y,t}v) \Psi^2w_m^2\, \, \mathrm{d} Z\, \mathrm{d} t,\notag\\
I_{113}&=&-\iiint_{\mathbb R^{N+1}_+}\tilde a_{m,m}^{-1}v\partial_{w_m}(D\cdot\nabla_{Y,t}v) \Psi^2w_m^2\, \, \mathrm{d} Z\, \mathrm{d} t,\notag\\
I_{114}&=&-2\iiint_{\mathbb R^{N+1}_+}\tilde a_{m,m}^{-1}v(D\cdot\nabla_{Y,t}v) w_m^2\Psi \partial_{w_m}\Psi\, \, \mathrm{d} Z\, \mathrm{d} t.
\end{eqnarray*}
Focusing on $I_{111}$ we see that
\begin{eqnarray*}
I_{111}&=&\iiint_{\mathbb R^{N+1}_+}\biggl (\frac {\partial_{w_m} \tilde a_{m,m}}{\tilde a_{m,m}^2}\biggr )v(D\cdot\nabla_{Y,t}v)\Psi^2w_m^2\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\
&\leq&c\biggl (\iiint_{\mathbb R^{N+1}_+}|\partial_{w_m}\tilde a_{m,m}|^2v^2 w_m\, \, \mathrm{d} Z\, \mathrm{d} t\biggr)^{1/2}J^{1/2}\notag\\
&\leq & c\rho_0^{{\bf q}-1}+\tilde\sigma J,
\end{eqnarray*}
by \eqref{con2+} applied to $\tilde\mu_1$. To continue we see that
\begin{eqnarray*}
I_{113}&=&-\iiint_{\mathbb R^{N+1}_+}\tilde a_{m,m}^{-1}v(D\cdot\nabla_{Y,t}\partial_{w_m}v) \Psi^2w_m^2\, \, \mathrm{d} Z\, \mathrm{d} t,\notag\\
&&-\iiint_{\mathbb R^{N+1}_+}\tilde a_{m,m}^{-1}v(1+\partial_{w_m}\mathcal P_{\gamma w_m}\psi(w,y,t))(\partial_{y_m}v) \Psi^2w_m^2\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\
&=:&I_{1131}+I_{1132}.
\end{eqnarray*}
To estimate $I_{1132}$ we write
\begin{eqnarray*}
I_{1132}&=&-\frac 12\iiint_{\mathbb R^{N+1}_+}\tilde a_{m,m}^{-1}(1+\partial_{w_m}\mathcal P_{\gamma w_m}\psi(w,y,t))(\partial_{y_m}v^2) \Psi^2w_m^2\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\
&=&\iiint_{\mathbb R^{N+1}_+}\tilde a_{m,m}^{-1}(1+\partial_{w_m}\mathcal P_{\gamma w_m}\psi(w,y,t)) v^2 w_m^2\Psi\partial_{y_m}\Psi\, \, \mathrm{d} Z\, \mathrm{d} t,
\end{eqnarray*}
where we have used that $\tilde a_{m,m}$ and $\psi$ are independent of $y_m$. In particular, $|I_{1132}|\leq c\rho_0^{{\bf q}-1}$. Focusing on
$I_{1131}$,
\begin{eqnarray*}
I_{1131}&=&\iiint_{\mathbb R^{N+1}_+}((D\cdot\nabla_{Y,t})\tilde a_{m,m}^{-1})v(\partial_{w_m}v) \Psi^2w_m^2\, \, \mathrm{d} Z\, \mathrm{d} t,\notag\\
&&+\iiint_{\mathbb R^{N+1}_+}\tilde a_{m,m}^{-1}(D\cdot\nabla_{Y,t}v)(\partial_{w_m}v) \Psi^2w_m^2\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\
&&+2\iiint_{\mathbb R^{N+1}_+}\tilde a_{m,m}^{-1}v(\partial_{w_m}v) w_m^2\Psi(D\cdot\nabla_{Y,t})\Psi\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\
&=:&I_{11311}+I_{11312}+I_{11313}.
\end{eqnarray*}
Again using \eqref{con2+} applied to $\tilde\mu_3$ and elementary estimates we see that
\begin{eqnarray*}
|I_{11311}|+|I_{11313}|\leq c\rho_0^{{\bf q}-1}+\sigma I.
\end{eqnarray*}
Furthermore, \begin{eqnarray*}
I_{11312}=-I_{112}.
\end{eqnarray*}
In particular, we have proved that
\begin{eqnarray*}
I_1\leq c\rho_0^{{\bf q}-1}+\sigma I+\tilde\sigma J+|I_{114}|.
\end{eqnarray*}
To estimate $I_{114}$ we write
\begin{eqnarray*}
I_{114}&=&-\iiint_{\mathbb R^{N+1}_+}\tilde a_{m,m}^{-1}(D\cdot\nabla_{Y,t}v^2)w_m^2\Psi \partial_{w_m}\Psi\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\
&=&\iiint_{\mathbb R^{N+1}_+}((D\cdot\nabla_{Y,t})\tilde a_{m,m}^{-1})v^2w_m^2\Psi \partial_{w_m}\Psi\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\
&&+\iiint_{\mathbb R^{N+1}_+}\tilde a_{m,m}^{-1}v^2w_m^2\Psi((D\cdot\nabla_{Y,t}) \partial_{w_m}\Psi)\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\
&&+\iiint_{\mathbb R^{N+1}_+}\tilde a_{m,m}^{-1}v^2w_m^2 ((D\cdot\nabla_{Y,t})\Psi)\partial_{w_m}\Psi\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\
&=:&I_{1141}+I_{1142}+I_{1143}.
\end{eqnarray*}
Using \eqref{con2+} applied to $\tilde\mu_3$, and by now familiar arguments, we see that $|I_{114}|\leq c\rho_0^{{\bf q}-1}$. Put together we can conclude that
\begin{eqnarray*}
I_1\leq c\rho_0^{{\bf q}-1}+\sigma I+\tilde\sigma J.
\end{eqnarray*}
It is straightforward to see that
\begin{eqnarray*}
|I_2|+|I_3|\leq c\rho_0^{{\bf q}-1}+ \sigma I. \end{eqnarray*}
To estimate $I_4$ we write
\begin{eqnarray*}
I_4=-2\sum_{j}\iiint_{\mathbb R^{N+1}_+}{\tilde a_{m,m}^{-1}} {\tilde a_{m,j}}v(\partial_{w_j}v)\Psi^2\, \, \mathrm{d} Z\, \mathrm{d} t.\notag\\
\end{eqnarray*}
We first consider the term in the definition of $I_4$ which corresponds to $j=m$ and we note that
\begin{eqnarray*}
\biggl|\iiint_{\mathbb R^{N+1}_+}\partial_{w_m}(v^2)\Psi^2\, \, \mathrm{d} Z\, \mathrm{d} t\biggr |=2\biggl|\iiint_{\mathbb R^{N+1}_+}v^2\Psi\partial_{w_m}\Psi\, \, \mathrm{d} Z\, \mathrm{d} t\biggr|\leq c\rho_0^{{\bf q}-1}.
\end{eqnarray*}
Next we consider the terms
in the definition of $I_4$ which corresponds to $j\neq m$. By integration by parts we see that
\begin{equation*}
\begin{split}
-2&\iiint_{\mathbb R^{N+1}_+}{\tilde a_{m,m}^{-1}} {\tilde a_{m,j}}v(\partial_{w_j}v)\partial_{w_m}(w_m)\Psi^2\, \, \mathrm{d} Z\, \mathrm{d} t\\
&= 2\iiint_{\mathbb R^{N+1}_+}\partial_{w_m}({\tilde a_{m,m}^{-1}} {\tilde a_{m,j}})v\partial_{w_j}v\Psi^2w_m\, \, \mathrm{d} Z\, \mathrm{d} t\\
&\quad +2\iiint_{\mathbb R^{N+1}_+}{\tilde a_{m,m}^{-1}} {\tilde a_{m,j}}\partial_{w_m} v\partial_{w_j}v\Psi^2w_m\, \, \mathrm{d} Z\, \mathrm{d} t\\
&\quad +2\iiint_{\mathbb R^{N+1}_+}{\tilde a_{m,m}^{-1}} {\tilde a_{m,j}}v\partial_{w_mw_j}v\Psi^2w_m\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\
&\quad +4\iiint_{\mathbb R^{N+1}_+}{\tilde a_{m,m}^{-1}} {\tilde a_{m,j}}v\partial_{w_j}vw_m\Psi\partial_{w_m}\Psi\, \, \mathrm{d} Z\, \mathrm{d} t.
\end{split}
\end{equation*}
Let
\begin{eqnarray*}
I_{41}&:=&2\sum_{j\neq m}\iiint_{\mathbb R^{N+1}_+}{\tilde a_{m,m}^{-1}} {\tilde a_{m,j}}\partial_{w_m}v\partial_{w_j}v\Psi^2w_m\, \, \mathrm{d} Z\, \mathrm{d} t,\notag\\
I_{42}&:=&2\sum_{j\neq m}\iiint_{\mathbb R^{N+1}_+}{\tilde a_{m,m}^{-1}} {\tilde a_{m,j}}v\partial_{w_mw_j}v\Psi^2w_m\, \, \mathrm{d} Z\, \mathrm{d} t.
\end{eqnarray*}
By the above deductions, and using by now familiar arguments, we can conclude that \begin{eqnarray*}
|I_4-I_{41}-I_{42}|\leq c\rho_0^{{\bf q}-1}+\sigma I.
\end{eqnarray*}
To estimate $I_{42}$ we use that $j\neq m$. Integrating by parts
\begin{eqnarray*}
I_{42}&=&-2\sum_{j\neq m}\iiint_{\mathbb R^{N+1}_+}\partial_{w_j}({\tilde a_{m,m}^{-1}} {\tilde a_{m,j}})v\partial_{w_m}v\Psi^2w_m\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\
&&-2\sum_{j\neq m}\iiint_{\mathbb R^{N+1}_+}{\tilde a_{m,m}^{-1}} {\tilde a_{m,j}}\partial_{w_j}v\partial_{w_m}v\Psi^2w_m\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\
&&-4\sum_{j\neq m}\iiint_{\mathbb R^{N+1}_+}{\tilde a_{m,m}^{-1}} {\tilde a_{m,j}}v\partial_{w_m}vw_m\Psi\partial_{w_j}\Psi\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\
&=:&I_{421}+I_{422}+I_{423}. \end{eqnarray*}
Note that
\begin{eqnarray*} I_{422}=-I_{41},
\end{eqnarray*}
and that
\begin{eqnarray*}
|I_{421}|+|I_{423}|\leq c\rho_0^{{\bf q}-1}+\sigma I,
\end{eqnarray*}
by familiar arguments. Summarizing we can conclude that
\begin{eqnarray*}
I&\lesssim&|I_1|+|I_2|+|I_3|+|I_4|\leq c\rho_0^{{\bf q}-1}+\sigma I+\tilde\sigma J,
\end{eqnarray*}
where $\sigma$, $\tilde\sigma$, are degrees of freedom. This completes the proof of the estimate in \eqref{auxest1}.
The next step is to estimate $J$ in a similar fashion. \subsection{Estimating the term $J$}
To estimate $J$ we write
\begin{eqnarray*}\label{e-kolm-ndggha-lla+ggco}
J=-\iiint_{\mathbb R^{N+1}_+}(D\cdot\nabla_{Y,t}v)
\bigl (\nabla_{W}\cdot (\tilde A\nabla_{W} v)+\tilde B\cdot\nabla_{W}v\bigr)\Psi^4w_m^3\, \, \mathrm{d} Z\, \mathrm{d} t,
\end{eqnarray*}
and
\begin{eqnarray} J=J_{1}+J_{2}+J_{3}+J_{4}, \end{eqnarray} where \begin{eqnarray*} J_{1}&:=&-\sum_{j}\iiint_{\mathbb R^{N+1}_+}\ (D\cdot\nabla_{Y,t}v)\partial_{w_m}(\tilde a_{m,j}\partial_{w_j}v) \Psi^4w_m^3\, \, \mathrm{d} Z\, \mathrm{d} t,\notag\\ J_{2}&:=&-\sum_{i\neq m}\iiint_{\mathbb R^{N+1}_+}\ (D\cdot\nabla_{Y,t}v)\partial_{w_i}(\tilde a_{i,m}\partial_{w_m}v) \Psi^4w_m^3\, \, \mathrm{d} Z\, \mathrm{d} t,\notag\\ J_{3}&:=&-\sum_{i\neq m}\sum_{j\neq m}\iiint_{\mathbb R^{N+1}_+}\ (D\cdot\nabla_{Y,t}v) \partial_{w_i}(\tilde a_{i,j}\partial_{w_j}v) \Psi^4w_m^3\, \, \mathrm{d} Z\, \mathrm{d} t,\notag\\ J_{4}&:=&-\sum_{i}\iiint_{\mathbb R^{N+1}_+}\ (D\cdot\nabla_{Y,t}v){\tilde b_i}\partial_{w_i}v \Psi^4w_m^3\, \, \mathrm{d} Z\, \mathrm{d} t. \end{eqnarray*} Using \eqref{eq2++a} we immediately see that \begin{eqnarray}\label{bound}
|J_{1}|+|J_{2}|+|J_{4}|\lesssim I^{1/2}J^{1/2}+\biggl (\iiint_{\mathbb R^{N+1}_+}\ |\nabla_W (\partial_{w_m}v)|^2 \Psi^4w_m^3\, \, \mathrm{d} Z\, \mathrm{d} t\biggr )^{1/2}J^{1/2}. \end{eqnarray}
Focusing on $J_{3}$, and integrating by parts with respect to $w_i$, we see that \begin{eqnarray} J_{3}&=&\sum_{i\neq m}\sum_{j\neq m}\iiint_{\mathbb R^{N+1}_+}\ \partial_{w_i}(D\cdot\nabla_{Y,t}v)(\tilde a_{i,j}\partial_{w_j}v) \Psi^4w_m^3\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\ &&+4\sum_{i\neq m}\sum_{j\neq m}\iiint_{\mathbb R^{N+1}_+}\ (D\cdot\nabla_{Y,t}v)(\tilde a_{i,j}\partial_{w_j}v) \partial_{w_i}(\Psi)w_m^3\Psi^3 \, \, \mathrm{d} Z\, \mathrm{d} t\\ &=:&J_{31}+J_{32},\notag \end{eqnarray}
and that $|J_{32}|\leq cI^{1/2}J^{1/2}$. Furthermore, \begin{eqnarray*} J_{31}&=&\sum_{i\neq m}\sum_{j\neq m}\iiint_{\mathbb R^{N+1}_+}\ (\partial_{y_i}v)(\tilde a_{i,j}\partial_{w_j}v) \Psi^4w_m^3\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\ &&+\sum_{i\neq m}\sum_{j\neq m}\iiint_{\mathbb R^{N+1}_+}\ (D\cdot\nabla_{Y,t}(\partial_{w_i}v))(\tilde a_{i,j}\partial_{w_j}v) \Psi^4w_m^3\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\ &&+\sum_{i\neq m}\sum_{j\neq m}\iiint_{\mathbb R^{N+1}_+}\ (\partial_{w_i}\mathcal P_{\gamma w_m}\psi(w,y,t))(\partial_{y_m}v)(\tilde a_{i,j}\partial_{w_j}v) \Psi^4w_m^3\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\ &=:&J_{311}+J_{312}+J_{313}. \end{eqnarray*} Then \begin{eqnarray}
|J_{311}|+|J_{313}|\lesssim \biggl (\iiint_{\mathbb R^{N+1}_+}\ |\nabla_{Y}v|^2 \Psi^6w_m^5\, \, \mathrm{d} Z\, \mathrm{d} t\biggr )^{1/2} I^{1/2}. \end{eqnarray} To estimate $J_{312}$ we lift the vector field $D\cdot\nabla_{Y,t}$ through partial integration and use the symmetry of the matrix $\{\tilde a_{i,j}\}$ to see that \begin{eqnarray*} 2J_{312}&=&-\sum_{i\neq m}\sum_{j\neq m}\iiint_{\mathbb R^{N+1}_+}\ (\partial_{w_i}v)(D\cdot\nabla_{Y,t}(\tilde a_{i,j}))\partial_{w_j}v \Psi^4w_m^3\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\ &&-4\sum_{i\neq m}\sum_{j\neq m}\iiint_{\mathbb R^{N+1}_+}\ (\partial_{w_i}v)(\tilde a_{i,j}\partial_{w_j}v) (D\cdot\nabla_{Y,t}(\Psi))w_m^3\Psi^3\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\ &=:&J_{3121}+J_{3122}. \end{eqnarray*} Then, by familiar arguments, \begin{eqnarray}
|J_{3121}|+|J_{3122}|\leq cI. \end{eqnarray}
Let $K$ and $L$ be as introduced in \eqref{e-kolm-ndggha-lla+gg}. Then, putting all estimates together we can conclude that \begin{equation} \begin{split}
J&\leq |J_{1}|+|J_{2}|+|J_{3}|+|J_{4}|\\ &\lesssim I+I^{1/2}J^{1/2}+J^{1/2}K^{1/2}+I^{1/2}L^{1/2}. \end{split} \end{equation} Hence \begin{eqnarray} J&\lesssim& I+K+I^{1/2}L^{1/2}. \end{eqnarray} To proceed we have to estimate $K$ and $L$.
\subsection{Estimating the term $K$} To start the argument for $K$ we introduce $\tilde v=\partial_{w_i} v$ and we use \eqref{e-kolm-ndggha-int} to conclude that
$\tilde v$ solves \begin{equation}\label{e-kolm-nd+a} \begin{split}
&\nabla_{W}\cdot (\tilde A\nabla_{W} \tilde v)+\tilde B\cdot\nabla_{W} \tilde v+(D\cdot\nabla_{Y,t})\tilde v\\
&=-\nabla_{W}\cdot (\partial_{w_i} \tilde A\nabla_{W} v)-\partial_{w_i} \tilde B\cdot\nabla_{W} v-\partial_{y_i}v-\partial_{w_i}\mathcal P_{\gamma w_m}\psi(w,y,t)\partial_{y_m}v
\end{split}
\end{equation}
in $U$.
Multiplying the equation in \eqref{e-kolm-nd+a} with $\tilde v\Psi^4w_m^3$, integrating and using Cauchy-Schwarz we see that
\begin{eqnarray}\label{Lintbparts}
K\lesssim I+|K_1|+|K_2|+|K_3|+|K_4|, \end{eqnarray} where \begin{eqnarray*} K_1&:=&\iiint_{\mathbb R^{N+1}_+} \bigl ((D\cdot\nabla_{Y,t})\tilde v\bigr )\tilde v\Psi^4w_m^3\, \, \mathrm{d} Z\, \mathrm{d} t,\notag\\ K_2&:=& \iiint_{\mathbb R^{N+1}_+} \bigl (\nabla_{W}\cdot ((\partial_{w_i} \tilde A)\nabla_{W}v)\bigr )\tilde v\Psi^4w_m^3\, \, \mathrm{d} Z\, \mathrm{d} t,\notag\\ K_3&:=& \iiint_{\mathbb R^{N+1}_+} \bigl (\partial_{w_i}\tilde B\cdot \nabla_{W}v\bigr )\tilde v\Psi^4w_m^3\, \, \mathrm{d} Z\, \mathrm{d} t,\notag\\ K_4&:=&\iiint_{\mathbb R^{N+1}_+} (\partial_{y_i}v+\partial_{w_i}\mathcal P_{\gamma w_m}\psi(w,y,t)\partial_{y_m}v)\tilde v\Psi^4w_m^3\, \, \mathrm{d} Z\, \mathrm{d} t. \end{eqnarray*} Using \eqref{eq2++a} we immediately see that
\begin{eqnarray}
|K_2|+|K_3|+|K_4|\lesssim I+I^{1/2}K^{1/2}+I^{1/2}L^{1/2}. \end{eqnarray} Furthermore,
\begin{equation}\label{acom}
\begin{split}
2|K_1| &\leq 4\biggl |\iiint_{\mathbb R^{N+1}_+} \tilde v^2\bigl ((w,w_m+\mathcal P_{\gamma w_m}\psi(w,y,t))
\cdot \nabla_Y-\partial_t\bigr)(\Psi)w_m^3\Psi^3\, \, \mathrm{d} Z\, \mathrm{d} t\biggr |\\
&\lesssim I,
\end{split} \end{equation} and we can conclude that
\begin{eqnarray}
K\lesssim I+|K_1|+|K_2|+|K_3|+|K_4|\lesssim K\lesssim I+I^{1/2}K^{1/2}+I^{1/2}L^{1/2}. \end{eqnarray} Hence
\begin{eqnarray} K\lesssim I+I^{1/2}L^{1/2}. \end{eqnarray}
\subsection{Estimating the term $L$ ($L_i$)} Focusing on $L$ we write
\begin{eqnarray}
L=\sum_{i=1}^m L_{i} \end{eqnarray} where $L_i$ is defined in \eqref{e-kolm-ndggha-lla+gg}. Note that
\begin{eqnarray}\label{dyivrel}
\partial_{y_i} v=-(D\cdot\nabla_{Y,t})(\partial_{w_i}v)+\partial_{w_i}(D\cdot\nabla_{Y,t})(v)-(\partial_{w_i}\mathcal P_{\gamma w_m}\psi(w,y,t))\partial_{y_m}v. \end{eqnarray} Hence,
\begin{eqnarray*}
L_{i}&=&-\iiint_{\mathbb R^{N+1}_+} (\partial_{y_i} v)(D\cdot\nabla_{Y,t})(\partial_{w_i}v)w_m^5\Psi^6\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\
&&+\iiint_{\mathbb R^{N+1}_+} (\partial_{y_i} v)\partial_{w_i}(D\cdot\nabla_{Y,t})(v)w_m^5\Psi^6\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\
&&-\iiint_{\mathbb R^{N+1}_+} (\partial_{y_i} v)((\partial_{w_i}\mathcal P_{\gamma w_m}\psi(w,y,t))\partial_{y_m}v)w_m^5\Psi^6\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\
&=:&L_{i,1}+L_{i,2}+L_{i,3}. \end{eqnarray*} Using partial integration we immediately see that
\begin{eqnarray}
|L_{i,2}|\lesssim M^{1/2}J^{1/2}+L_{i}^{1/2}J^{1/2}
\end{eqnarray} where also $M$ was defined in \eqref{e-kolm-ndggha-lla+gg}. Furthermore, \begin{eqnarray*} L_{i,1}&=&\iiint_{\mathbb R^{N+1}_+} (D\cdot\nabla_{Y,t})(\partial_{y_i} v)(\partial_{w_i}v)w_m^5\Psi^6\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\ &&+6\iiint_{\mathbb R^{N+1}_+} (\partial_{y_i} v)(\partial_{w_i}v)w_m^5\Psi^5(D\cdot\nabla_{Y,t})\Psi\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\ &=&\iiint_{\mathbb R^{N+1}_+} \partial_{y_i} (D\cdot\nabla_{Y,t})(v)(\partial_{w_i}v)w_m^5\Psi^6\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\ &&-\iiint_{\mathbb R^{N+1}_+} (\partial_{y_i} \mathcal P_{\gamma w_m}\psi(w,y,t))(\partial_{y_m} v)(\partial_{w_i}v)w_m^5\Psi^6\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\ &&+6\iiint_{\mathbb R^{N+1}_+} (\partial_{y_i} v)(\partial_{w_i}v)w_m^5\Psi^5(D\cdot\nabla_{Y,t})\Psi\, \, \mathrm{d} Z\, \mathrm{d} t. \end{eqnarray*} Integrating by parts we have
\begin{eqnarray*} L_{i,1} &=&-\iiint_{\mathbb R^{N+1}_+} (D\cdot\nabla_{Y,t})(v)(\partial_{w_iy_i}v)w_m^5\Psi^6\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\ &&-6\iiint_{\mathbb R^{N+1}_+} (D\cdot\nabla_{Y,t})(v)(\partial_{w_i}v)w_m^5\Psi^5\partial_{y_i} \Psi\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\ &&-\iiint_{\mathbb R^{N+1}_+} (\partial_{y_i} \mathcal P_{\gamma w_m}\psi(w,y,t))(\partial_{y_m} v)(\partial_{w_i}v)w_m^5\Psi^6\, \, \mathrm{d} Z\, \mathrm{d} t\notag\\ &&+6\iiint_{\mathbb R^{N+1}_+} (\partial_{y_i} v)(\partial_{w_i}v)w_m^5\Psi^5(D\cdot\nabla_{Y,t})\Psi\, \, \mathrm{d} Z\, \mathrm{d} t. \end{eqnarray*} Hence we can first conclude that
\begin{eqnarray}
|L_{i,1}|\lesssim J^{1/2}M^{1/2}+I^{1/2}J^{1/2}+I^{1/2}(L_{i}^{1/2}+L_{m}^{1/2}) \end{eqnarray} and then by collecting the estimates
\begin{eqnarray}\label{hata}
L_{i}\lesssim I^{1/2}J^{1/2}+I^{1/2}(L_{i}^{1/2}+L_{m}^{1/2})+M^{1/2}J^{1/2}+L_{i}^{1/2}J^{1/2}+|L_{i,3}|. \end{eqnarray}
We now first consider the case $i=m$. Using \eqref{1-1} and the above we immediately see that
\begin{eqnarray}\label{sv} L_{m}\lesssim M^{1/2}J^{1/2}+I+J.
\end{eqnarray}
Consider now $i\neq m$. Then, using \eqref{hata} we have
\begin{eqnarray}\label{hata+} L_{i}\lesssim I^{1/2}J^{1/2}+I^{1/2}(L_{i}^{1/2}+L_{m}^{1/2})+M^{1/2}J^{1/2}+L_{i}^{1/2}J^{1/2}+L_{i}^{1/2}L_{m}^{1/2}. \end{eqnarray} Hence
\begin{eqnarray} L_{i}\lesssim I+I^{1/2}J^{1/2}+I^{1/2}L_{m}^{1/2}+M^{1/2}J^{1/2}+J+L_{m}
\end{eqnarray}
for $i\neq m$. In particular, using \eqref{sv}
\begin{eqnarray} L_{i}\lesssim I^{1/2}J^{1/2}+M^{1/2}J^{1/2}+I+J\mbox{ for all }i\in\{1,...,m\}.
\end{eqnarray}
Still the auxiliary term $M$ has to be estimated.
\subsection{Estimating the term $M$} To estimate $M$ we introduce $\tilde v=\partial_{y_i} v$ and using the equation we see that
$\tilde v$ solves \begin{equation}\label{e-kolm-nd+auu} \begin{split}
&\nabla_{W}\cdot (\tilde A\nabla_{W} \tilde v)+\tilde B\cdot\nabla_{W} \tilde v+(D\cdot\nabla_{Y,t})\tilde v\\
&=-\nabla_{W}\cdot (\partial_{y_i}\tilde A\nabla_{W} v)-\partial_{y_i}\tilde B\cdot\nabla_{W} v+\partial_{y_i}\mathcal P_{\gamma w_m}\psi(w,y,t)\partial_{y_m}v,
\end{split} \end{equation}
in $U$. Multiplying this equation with $\tilde v w_m^7\Psi^8$ and arguing similarly as to the estimates in the case of the expression $K$ we derive
\begin{eqnarray}\label{acomuu} M\lesssim L+I^{1/2}L^{1/2}+M^{1/2}L^{1/2}+K^{1/2}L^{1/2}. \end{eqnarray} Hence,
\begin{eqnarray}\label{acomuu+} M\lesssim L+I^{1/2}L^{1/2}+K^{1/2}L^{1/2}. \end{eqnarray}
\subsection{Completing the proof of Lemma \ref{Carleson}} We are now ready to complete the proof of Lemma \ref{Carleson} by collecting all the terms and estimates developed above. To summarize we have proved that \begin{equation}\label{acomuu++}
\begin{split}
I&\leq c\rho_0^{{\bf q}-1}+\sigma I+\tilde\sigma J,\\
J&\lesssim I+K+I^{1/2}L^{1/2},\\
K&\lesssim I+I^{1/2}L^{1/2},\\
L&\lesssim I^{1/2}J^{1/2}+M^{1/2}J^{1/2}+I+J,\\
M&\lesssim L+I^{1/2}L^{1/2}+K^{1/2}L^{1/2}.
\end{split} \end{equation} We again note that by construction of the test function $\Psi$ we can ensure that $I,\dots,M$ are finite. Using \eqref{acomuu++} we first see that
\begin{eqnarray*}\label{acomuu++a}
J+K&\lesssim& I+\epsilon_1L,\notag\\
L&\lesssim& I +J+\epsilon_2M,\notag\\ M&\lesssim& L+I+\epsilon_3K, \end{eqnarray*} where $\epsilon_1,\epsilon_2$ and $\epsilon_3$ are positive degrees of freedom. Using the estimates for $L$ and $M$ we have
\begin{eqnarray*}\label{acomuu++b}
L&\lesssim& I +J+\epsilon_4K. \end{eqnarray*} Hence
\begin{eqnarray*}\label{acomuu++c}
J+K\leq c(I+c\epsilon_1( I +J+\epsilon_4K)), \end{eqnarray*} and we can conclude that
\begin{eqnarray*}\label{acomuu++d}
J+K\lesssim I. \end{eqnarray*} In particular, \begin{eqnarray*}
I\leq c\rho_0^{{\bf q}-1}+\sigma I+\tilde \sigma I \end{eqnarray*} and the proof is complete. \qed
\section{Proof of auxiliary lemmas}\label{sec6}
In this section we prove Lemma \ref{existcover} and Lemma \ref{lemmacruc} and in the proof we need a number of estimates for non-negative solutions recently established in \cite{LN}. Lemma \ref{lem4.7bol} and Lemma \ref{lem4.5-Kyoto1} below are Lemma 4.15 and Lemma 5.2 in \cite{LN}, respectively.
\begin{lemma}\label{lem4.7bol} Let $\Omega\subset\mathbb R^{N+1}$ is an unbounded ($y_m$-independent) Lipschitz domain with constant $M_1$ in the sense of Definition \ref{car}. Then there exist
$\Lambda=\Lambda(m,M_1)$, $1\leq \Lambda<\infty$, $c=c(m,\kappa,M_1)$, $1\leq c<\infty$, and $\gamma=\gamma(m,\kappa,M_1)$, $0<\gamma<\infty$, such that the following is true. Let $(Z_0,t_0)\in\partial\Omega$ and $r>0$. Assume that $u$ is a non-negative (weak) solution to $\mathcal L u=0$ in $\Omega\cap \mathcal{B}_{2r}(Z_0,t_0)$ and consider $\rho$, $\tilde\rho$, $0<\tilde\rho\leq\rho<r/c$. Then \begin{equation}\label{coneset-lem39} \begin{split} &u(A_{\tilde\rho,\Lambda}^+(Z_0,t_0))\leq c(\rho/\tilde\rho)^\gamma u(A_{\rho,\Lambda}^+(Z_0,t_0)),\\ &u(A_{\tilde\rho,\Lambda}^-(Z_0,t_0))\geq c^{-1} (\tilde\rho/\rho)^\gamma u(A_{\rho,\Lambda}^-(Z_0,t_0)). \end{split} \end{equation} \end{lemma}
\begin{lemma}\label{lem4.5-Kyoto1} Let $\Omega\subset\mathbb R^{N+1}$ is an unbounded ($y_m$-independent) Lipschitz domain with constant $M_1$ in the sense of Definition \ref{car}. Let $(Z_0,t_0)\in\partial\Omega$ and $r>0$. Let $\theta\in (0,1)$ be given. Then there exists $c=c(m,\kappa,M_1,\theta)$, $1 \leq c < \infty$, such that following holds. Assume that $u$ is a non-negative (weak) solution to $\mathcal L u=0$ in $\Omega\cap \mathcal{B}_{2r}(Z_0,t_0)$, vanishing continuously on $\partial\Omega\cap \mathcal{B}_{2r}(Z_0,t_0)$. Then \begin{eqnarray} \sup_{\Omega\cap \mathcal{B}_{r/c}(Z_0,t_0)}u\leq \theta\sup_{\Omega\cap \mathcal{B}_{2r}(Z_0,t_0)}u. \end{eqnarray} \end{lemma}
\begin{remark}\label{remnot} Let $\Omega\subset\mathbb R^{N+1}$ be an unbounded ($y_m$-independent) Lipschitz domain with constant $M_1$ in the sense of Definition \ref{car}. The constants $\Lambda=\Lambda(m,M_1)$, $1\leq \Lambda<\infty$, $c=c(m,\kappa,M_1)$, $1\leq c<\infty$, referred to in Lemma \ref{lem4.7bol} are fixed in \cite{LN}. In the following we also let $\Lambda$ and $c$ be determined accordingly. \end{remark}
\begin{lemma}\label{bourg} Let $\Omega\subset\mathbb R^{N+1}$ is an unbounded ($y_m$-independent) Lipschitz domain with constant $M_1$ in the sense of Definition \ref{car}. Let $\Lambda=\Lambda(m,M_1)$ be in accordance with Remark \ref{remnot}. Let $(Z_0,t_0)\in\partial\Omega$ and $r>0$. Then
\begin{eqnarray*} &&\omega( A_{r/c,\Lambda}^+(Z_0,t_0),\partial\Omega\cap \mathcal{B}_{r}(Z_0,t_0))\geq c^{-1}. \end{eqnarray*} \end{lemma}
\begin{proof} This follows immediately from Lemma \ref{lem4.5-Kyoto1}. \end{proof}
Lemmas \ref{T:doubling}-\ref{lemmacruc-} below are Theorem 3.6, Lemma 12.2 and Lemma 12.3 in \cite{LN}, respectively.
\begin{lemma}\label{T:doubling} Let $\Omega\subset\mathbb R^{N+1}$ is an unbounded ($y_m$-independent) Lipschitz domain with constant $M_1$ in the sense of Definition \ref{car}. Let $\Lambda=\Lambda(m,M_1)$ be in accordance with Remark \ref{remnot}. Let $(Z_0,t_0)\in\partial\Omega$ and $r>0$. Then there exists $c=c(m,\kappa,M_1)$, $1\leq c<\infty$, such that
\begin{eqnarray*} &&\omega( A_{r,\Lambda}^+(Z_0,t_0),\partial\Omega\cap \mathcal{B}_{2\tilde r}(\tilde Z_0,\tilde t_0))\leq c \omega( A_{r,\Lambda}^+(Z_0,t_0),\partial\Omega\cap \mathcal{B}_{\tilde r}(\tilde Z_0,\tilde t_0)) \end{eqnarray*} whenever $(\tilde Z_0,\tilde t_0)\in\partial\Omega$, $\mathcal{B}_{\tilde r}(\tilde Z_0,\tilde t_0)\subset \mathcal{B}_{r/c}(Z_0,t_0)$. \end{lemma}
\begin{lemma}\label{lem4.5-Kyoto1ha}Let $\Omega\subset\mathbb R^{N+1}$ is an unbounded ($y_m$-independent) Lipschitz domain with constant $M_1$ in the sense of Definition \ref{car}. Let $\Lambda=\Lambda(m,M_1)$ be in accordance with Remark \ref{remnot}. Let $(Z_0,t_0)\in\partial\Omega$ and $r>0$. Let $(\tilde Z_0,\tilde t_0)\in\partial\Omega$ and $\tilde r>0$ be such that $\mathcal{B}_{\tilde r}(\tilde Z_0,\tilde t_0)\subset \mathcal{B}_{r}(Z_0,t_0)$. Then there exists $c=c(m,\kappa,M_1)$, $1\leq c<\infty$, such that
\begin{eqnarray}\label{ad} K(A_{c\tilde r,\Lambda}^+(\tilde Z_0,\tilde t_0),\bar Z,\bar t):=\lim_{\bar r\to 0}\frac{\omega(A_{c\tilde r,\Lambda}^+(\tilde Z_0,\tilde t_0),\partial\Omega\cap\mathcal{B}_{\bar r}(\bar Z,\bar t))}{\omega(A_{cr,\Lambda}^+(Z_0,t_0),\partial\Omega\cap\mathcal{B}_{\bar r}(\bar Z,\bar t))} \end{eqnarray} exists for $\omega(A_{cr,\Lambda}^+(Z_0,t_0),\cdot)$-a.e. $(\bar Z,\bar t)\in \partial\Omega\cap\mathcal{B}_{\tilde r}(\tilde Z_0,\tilde t_0)$, and \begin{eqnarray}\label{ad1} c^{-1}\leq {\omega(A_{cr,\Lambda}^+(Z_0,t_0), \partial\Omega\cap\mathcal{B}_{\tilde r}(\tilde Z_0,\tilde t_0))}K(A_{c\tilde r,\Lambda}^+(\tilde Z_0,\tilde t_0),\bar Z,\bar t)\leq c \end{eqnarray} whenever $(\bar Z,\bar t)\in \partial\Omega\cap\mathcal{B}_{\tilde r}(\tilde Z_0,\tilde t_0)$. \end{lemma}
\begin{lemma}\label{lemmacruc-} Let $\Omega\subset\mathbb R^{N+1}$ is an unbounded ($y_m$-independent) Lipschitz domain with constant $M_1$ in the sense of Definition \ref{car}. Let $\Lambda=\Lambda(m,M_1)$ be in accordance with Remark \ref{remnot}. Let $(Z_0,t_0)\in\partial\Omega$ and $r>0$. Let $(\tilde Z_0,\tilde t_0)\in\partial\Omega$ and $\tilde r>0$ be such that $\mathcal{B}_{\tilde r}(\tilde Z_0,\tilde t_0)\subset \mathcal{B}_{r}(Z_0,t_0)$. Then there exist $c=c(m,\kappa,M_1)$, $1\leq c<\infty$, and $\tilde c=\tilde c(m,\kappa,M_1)$, $1\leq \tilde c<\infty$, such that
\begin{eqnarray*}
\quad \tilde c^{-1}\omega(A_{c\tilde r,\Lambda}^+(\tilde Z_0,\tilde t_0),E)\leq \frac {\omega(A_{cr,\Lambda}^+(Z_0,t_0),E)}{\omega(A_{cr,\Lambda}^+(Z_0,t_0),\partial\Omega\cap \mathcal{B}_{\tilde r}(\tilde Z_0,\tilde t_0))}\leq \tilde c \omega(A_{c\tilde r,\Lambda}^+(\tilde Z_0,\tilde t_0),E), \end{eqnarray*} whenever $E\subset \mathcal{B}_{\tilde r}(\tilde Z_0,\tilde t_0)$. \end{lemma}
\subsection{Proof of Lemma \ref{existcover}} Let in the following $Q_0\in \mathbb{D}$, $\epsilon_0\in (0,1)$, and let $\omega(\cdot)$ be as in the statement of Theorem \ref{Ainfty}. Let $k\in \mathbb Z_+$ be given. Let $\gamma$, $0<\gamma\ll 1$, and $\Upsilon$, $1\ll\Upsilon$, be degrees of freedom to be chosen depending only on $m$, $\kappa$ and $M_1$. Let $\delta_0=\gamma(\epsilon_0/\Upsilon)^k$. Suppose that $\omega(E)\leq\delta_0$. Using that $\omega$ is a regular Borel measure, we see that there exists a (relatively) open subset of ${Q}_0$, containing $E$, which we denote by $\mathcal{O}_{k+1}$, satisfying $\omega(\mathcal{O}_{k+1})\leq 2\omega(E)$. Using Lemma \ref{bourg} and the Harnack inequality, see Lemma \ref{lem4.7bol}, we see that there exists $c=c(m,\kappa,M_1)$, $1\leq c<\infty$, such that \begin{eqnarray}\label{yy1} \omega(\mathcal{O}_{k+1})\leq 2\delta_0\leq c\delta_0\omega({Q}_0)\leq \frac 1 2 \biggl (\frac {\epsilon_0}{\Upsilon}\biggr )^k\omega({Q}_0) \end{eqnarray} if we let $\gamma:=1/(2c)$. Let $f\in L^1_{\mbox{loc}}(\Sigma,\, \mathrm{d}\omega)$, and let
$$M_{\omega}(f)(Z,t):=\sup_{\{\mathcal{B}_r(\tilde Z,\tilde t):\ (\tilde Z,\tilde t)\in\Sigma,\ (Z,t)\in\mathcal{B}_r(\tilde Z,\tilde t)\}}\frac 1{\omega(\mathcal{B}_r(\tilde Z,\tilde t))}\iint_{\mathcal{B}_r(\tilde Z,\tilde t)} |f|\, \, \mathrm{d}\omega,$$ denote the Hardy-Littlewood maximal function of $f$, with respect to $\omega$, and where the supremum is taken over all balls $\mathcal{B}_r(\tilde Z,\tilde t)$, $(\tilde Z,\tilde t)\in\Sigma$, containing $(Z,t)$. Set \begin{equation}\label{lem:coverexists_Okdef}
\mathcal{O}_k:=\{(Z,t)\in {Q}_0\mid M_{\omega}(\chi_{\mathcal{O}_{k+1}})\geq {\epsilon_0}/{\bar c}\}, \end{equation} where $\chi_{\mathcal{O}_{k+1}}$ denotes the indicator function for the set $\mathcal{O}_{k+1}$, and where we let $\bar c=\bar c(m,\kappa,M_1)$, $1\leq \bar c<\infty$, denote the constant appearing in Lemma \ref{T:doubling}. Then, by construction, $\mathcal{O}_{k+1}\subset\mathcal{O}_k$, $\mathcal{O}_k$ is relatively open in ${Q}_0$ and $\mathcal{O}_k$ is properly contained in ${Q}_0$. As $\omega$ is doubling, see Lemma \ref{T:doubling}, we have that $(2Q_0,d,\omega)$ is a space of homogeneous type and weak $L^1$ estimates for the Hardy-Littlewood maximal function apply. Hence \begin{eqnarray}\label{yy2}\omega(\mathcal{O}_k)\leq \tilde c\frac {\bar c}{\epsilon_0}\omega(\mathcal{O}_{k+1})\leq \frac 1 2 \biggl (\frac {\epsilon_0}{\bar c}\biggr )^{k-1}\omega({Q}_0), \end{eqnarray} if we let $\Upsilon:=\tilde c\bar c$ and where $\tilde c=\tilde c(m,\kappa,M_1)$, $1\leq \tilde c<\infty$. By definition and by the construction, see \eqref{cubes} $(i)$-$(iii)$, ${Q}_0$ can be dyadically subdivided, and we can select a collection $\mathcal F_k=\{\Delta_i^k\}_i\subset {{Q}_0}$, comprised of the cubes that are maximal with respect to containment in $\mathcal{O}_k$, and thus $\mathcal{O}_k := \cup_i \Delta^k_i$. The cubes in $\mathcal F_k$ are maximal in the sense that \begin{equation}\label{Fkmaximal}
\Delta_i^k \in \mathcal F_k \:\iff\:\Delta_i^k\subset\mathcal{O}_k \:\text{and}\: Q \subset \Delta_i^k, \:\forall Q\in\mathbb{D}_{Q_0}\:\text{such that}\:Q\subset\mathcal{O}_k. \end{equation} Using \eqref{Fkmaximal}, \eqref{lem:coverexists_Okdef}, and Lemma \ref{T:doubling}, we see that \begin{equation}\label{yy3} \begin{split}
\omega(\mathcal{O}_{k+1}\cap\Delta_i^k) &\leq \omega(\mathcal{O}_{k+1}\cap 2\Delta_i^k)\\
&\leq \omega(2\Delta_i^k)\frac{1}{\omega(2\Delta_i^k)}\iint_{2\Delta_i^k}\chi_{\mathcal{O}_{k+1}}\, \mathrm{d}\omega\\&\leq \epsilon_0\omega(\Delta_i^k), \end{split} \end{equation} for all $\Delta_i^k\in\mathcal F_k$. We now iterate this argument, to construct $\mathcal{O}_{j-1}$ from $\mathcal{O}_j$, for $2\leq j\leq k$, just as we constructed $\mathcal{O}_k$ from $\mathcal{O}_{k+1}$. It is then a routine matter to verify that the sets $\mathcal{O}_1$,...., $\mathcal{O}_k$, form a good $\epsilon_0$ cover of $E$. We omit further details. \qed
\subsection{Additional notation}
\begin{remark}\label{gc} In the following we let $\Pi(Z,t)$ denote the projection of $(Z,t)\in\mathbb R^{N+1}$ along $x_m$ onto $\partial\Omega$. Furthermore, from now on we fix two small dyadic numbers $\eta_1=2^{-k_1}$ and $\eta_2=2^{-k_2}$ where $1\leq k_1\ll k_2$ are to be chosen depending at most on $m$, $\kappa$ and $M_1$. Given $Q\in \mathbb D$, we let $A_{\eta_1 Q}^+:=A_{c\eta_1 l(Q),\Lambda}^+(Z_Q,t_Q)$, we consider the point $A_{c\eta_1^2 l(Q),\Lambda}^-(Z_Q,t_Q)$ and we let $\tilde Q\in \mathbb D$ be such that $l(\tilde Q)=\eta_1^2 l(Q)$ and such that $\tilde Q$ contains the point $\Pi(A_{c\eta_1^2 l(Q),\Lambda}^-(Z_Q,t_Q))$. We can and will choose $\eta_1$ so small that $$\Pi(A_{\eta_1 Q}^+)\subset \frac 1 4 Q$$ and such that $$\tilde Q\subset Q.$$ \end{remark}
\begin{remark}\label{gc+} Given $Q\in \mathbb D$ and $A_{\eta_1 Q}^+=A_{c\eta_1l(Q),\Lambda}^+(Z_Q,t_Q)$ as in Remark \ref{gc}, consider the point $\Pi(A_{\eta_1 Q}^+)\in\partial\Omega$. We let
$\hat Q\in \mathbb D$ be such that $l(\hat Q)=\eta_2 l(Q)$ and such that $\hat Q$ contains the point $\Pi(A_{\eta_1 Q}^+)$. We let $d_Q:=|A_{\eta_1 Q}^+-\Pi(A_{\eta_1 Q}^+)|$. Furthermore, we let $\bar Q\in \mathbb D$ be such that $l(\bar Q)=\eta_2^2 l(\hat Q)$ and $\bar Q$ contains the point $(Z_{\hat Q},t_{\hat Q})$. Note that by construction, if we choose $\eta_1$ (and hence $\eta_2$) small enough, $$\bar Q\subset\hat Q\subset Q.$$ Given $\bar Q$ we let \begin{equation}\label{yy4-} \begin{split} &S_{\bar Q}^+:=\{(x,\psi(x,y,t)+d_Q,y,y_m,t)\mid\ (x,\psi(x,y,t),y,y_m,t)\in \bar Q\},\\ &S_{\bar Q}^-:=\{(x,\psi(x,y,t)+l(\bar Q),y,y_m,t)\mid\ (x,\psi(x,y,t),y,y_m,t)\in \bar Q\}. \end{split} \end{equation} Then $S_{\bar Q}^-$ and $S_{\bar Q}^+$ are two pieces of surfaces above $\partial\Omega$ in the direction of $x_m$, $S_{\bar Q}^+$ is above $S_{\bar Q}^-$, and each point on $S_{\bar Q}^-$ can be connected to a point on $S_{\bar Q}^+$ by a straight line in the direction of $x_m$. \end{remark}
\begin{remark} Note that Remark \ref{gc} and Remark \ref{gc+} are generic constructions for dyadic cubes. Consider now the special case $\Delta := \Delta_i^l\in\mathcal F_l$, i.e. $\Delta$ is a cube arising in some good $\epsilon_0$ cover. We then set $\tilde \Delta_i^l:=\tilde \Delta$, where $\tilde \Delta$ is defined as in Remark \ref{gc}, and we define \begin{eqnarray}\label{yy4}\tilde \mathcal O_l:=\bigcup_{\Delta_i^l\in\mathcal F_l}\tilde \Delta_i^l. \end{eqnarray} Furthermore, let $E\subset {Q}_0$ and consider the set up of Lemma \ref{existcover}. We note that for every $(Z_0,t_0)\in E$ we have $(Z_0,t_0)\in \mathcal{O}_l$, for all $l=1,2,...,k$, and that therefore there exists, for each $l$, a cube $\Delta_i^l=\Delta_i^l(Z_0,t_0)\in\mathcal{F}_l$ containing $(Z_0,t_0)$. \end{remark}
\subsection{Proof of Lemma \ref{lemmacruc}} Let in the following $Q_0\in \mathbb{D}$ and let $\omega(\cdot)$ be as in the statement of Theorem \ref{Ainfty}. To prove Lemma \ref{lemmacruc}, let $\epsilon_0>0$ be a degree of freedom to be specified below and depending only on $m,\kappa,M_1$, let
$\delta_0=\gamma(\epsilon_0/\Upsilon)^k$ be as specified in Lemma \ref{existcover} where $k$ is to be chosen depending only on $m,\kappa,M_1$ and $\Upsilon$. Consider $E\subset {Q_0}$ with $\omega(E)\leq\delta_0$. Using Lemma \ref{existcover} we see that $E$ has a good $\epsilon_0$ cover of length $k$, $\{\mathcal{O}_l\}_{l=1}^k$ with corresponding collections $\mathcal F_l=\{\Delta_i^l\}_i\subset Q_0$. Let $\{\tilde{\mathcal{O}}_l\}_{l=1}^k$ be defined as in \eqref{yy4}. Using this good $\epsilon_0$ cover of $E$ we let $$F(Z,t):=\sum_{j=2}^k \chi_{\tilde\mathcal O_{j-1}\setminus\mathcal O_j}(Z,t),$$ where $\chi_{\tilde\mathcal O_{j-1}\setminus\mathcal O_j}$ is the indicator function for the set $\tilde\mathcal O_{j-1}\setminus\mathcal O_j$. Then $F$ equals the indicator function of some Borel set $S\subset\Sigma$ and we let $u(Z,t):=\omega(Z,t, S)$. Consider \mbox{$(Z_0,t_0)\in E$} and an index \mbox{$l\in \{1,...,k\}$}. Let in the following $$\mbox{$\Delta_i^l\in \mathcal F_l$ be a cube in the collection $\mathcal F_l$ which contains $(Z_0,t_0)$.}$$ Given $k_0\in\mathbb Z_+$ and $A_{\eta\Delta_i^l}^+=A_{c\eta l(\Delta_i^l),\Lambda}^+(Z_{\Delta_i^l},t_{\Delta_i^l})$ we let $$\mbox{$\hat\Delta_i^l$, $\bar\Delta_i^l$, $S_{\bar\Delta_i^l}^-$ and $S_{\bar\Delta_i^l}^+$}$$ be as defined as in Remark \ref{gc+} relative to $\Delta_i^l$ and using $\eta_j:=2^{-k_j}$. Hence, based on $(Z_0,t_0)\in E$ and an index $l\in \{1,...,k\}$, we have specified $\Delta_i^l$, $\hat \Delta_i^l$, $\bar \Delta_i^l$ and the surfaces $S_{\bar\Delta_i^l}^-$ and $S_{\bar\Delta_i^l}^+$, and, by construction, $$ \bar \Delta_i^l\subset\hat \Delta_i^l\subset \Delta_i^l,\ \tilde \Delta_i^l\subset \Delta_i^l.$$ We first intend to prove that there exists $\beta>0$, depending only on $m,\kappa,M_1$, such that if $\epsilon_0$ and $\eta_j=2^{-k_j}$ are chosen sufficiently small, then \begin{eqnarray}\label{yy6}u(P+d_{\Delta_i^l}e_m)-u( P+l(\bar\Delta_i^l)e_m)\geq \beta,\ \forall P\in\{(x,\psi(x,y,t),y,y_m,t)\in \bar\Delta_i^l\}, \end{eqnarray} where $e_m$ denotes the unit vector in $\mathbb R^{N+1}$ which points in the direction of $x_m$. Given $P\in\{(x,\psi(x,y,t),y,y_m,t)\in \bar\Delta_i^l\}$ we let $$P^+:=P+d_{\Delta_i^l}e_m,\ P^-:=P+l(\bar\Delta_i^l)e_m,$$ and we want to estimate $u(P^+)-u(P^-)$. To start the proof we note that, by construction, $\tilde \Delta_i^l\subset\Delta_i^l$ and by using Lemma \ref{bourg} and Lemma \ref{lem4.7bol} we see that there exists $ c_{\eta_1}=c_{\eta_1}(m,\kappa,M_1,\eta_1)$, $1\leq c_{\eta_1}<\infty$, for $\eta_1$ small enough, such that \begin{eqnarray}\label{boundbelow}\omega(P^+,\tilde \Delta_i^l)\geq c_{\eta_1}^{-1}. \end{eqnarray} To estimate $u( P^+)$ we first note that \begin{equation} \begin{split} u(P^+)&\geq\iint_{\tilde \Delta_i^l}\chi_{\tilde{\mathcal{O}}_{l}\setminus\mathcal{O}_{l+1}}\, \, \mathrm{d}\omega(P^+,\bar Z,\bar t)\\
&=\omega(P^+,\tilde \Delta_i^l)-\omega( P^+,\tilde\Delta_i^l\cap \mathcal{O}_{l+1}),
\end{split} \end{equation} as all terms in the definition of $F$ are non-negative. Consider $(\bar Z,\bar t)\in \Delta_i^l$. Then, using Lemma \ref{lem4.5-Kyoto1ha} we have that \begin{eqnarray} K( P^+,\bar Z,\bar t):=\lim_{\rho\to 0}\frac{\omega( P^+,\partial\Omega\cap\mathcal{B}_\rho(\bar Z,\bar t))}{\omega(\partial\Omega\cap\mathcal{B}_\rho(\bar Z,\bar t))}, \end{eqnarray} exists for $\omega$-a.e. $(\bar Z,\bar t)\in \Delta_i^l$, and \begin{eqnarray} K(P^+,\bar Z,\bar t)\leq \frac c{\omega(\Delta_i^l)}\mbox{ whenever }(\bar Z,\bar t)\in \Delta_i^l. \end{eqnarray} In the last conclusion we have also used Lemma \ref{T:doubling}. Using this estimate, and the fact that by construction $\tilde \Delta_i^l\subset\Delta_i^l$, we see that \begin{eqnarray} \omega( P^+,\tilde\Delta_i^l\cap \mathcal{O}_{l+1})\leq \frac {c_{\eta_1}}{\omega(\Delta_i^l)}\omega(\tilde\Delta_i^l\cap \mathcal{O}_{l+1})\leq C_{\eta_1}\epsilon_0, \end{eqnarray} by the construction. In particular, using \eqref{boundbelow} we deduce \begin{eqnarray}\label{yy10} u(P^+)\geq c_{\eta_1}^{-1}-C_{\eta_1}\epsilon_0. \end{eqnarray} To estimate $u( P^-)$ we write \begin{eqnarray*} u( P^-)&=&\iint_{Q_0\setminus \hat\Delta_i^l}F(\bar Z,\bar t)\, \, \mathrm{d}\omega( P^-,\bar Z,\bar t)+\iint_{\hat\Delta_i^l}F(\bar Z,\bar t)\, \, \mathrm{d}\omega( P^-,\bar Z,\bar t)\notag\\ &=:&I+{II}. \end{eqnarray*} Using Lemma \ref{lem4.5-Kyoto1} and the definition of $ P^-$ we see that \begin{eqnarray}
|I|\leq \omega(P^-,Q_0\setminus \hat\Delta_i^l)\leq c\eta_2^\sigma, \end{eqnarray} for some $c=c(m,\kappa,M_1)$, $\sigma=\sigma(m,\kappa,M_1)\in (0,1)$. We split ${II}$ as \begin{eqnarray} {II}= {II}_1+{II}_2+{II}_3, \end{eqnarray} where \begin{eqnarray} {II}_1&:=& \sum_{j=2}^l\iint_{\hat\Delta_i^l}1_{\tilde{\mathcal{O}}_{j-1}\setminus\mathcal{O}_j}\, \, \mathrm{d}\omega( P^-,\bar Z,\bar t),\notag\\ {II}_2&:=&\sum_{j=l+2}^k\iint_{\hat\Delta_i^l}1_{\tilde{\mathcal{O}}_{j-1}\setminus\mathcal{O}_j}\, \, \mathrm{d}\omega( P^-,\bar Z,\bar t),\\ {II}_3&:=&\iint_{\hat\Delta_i^l}1_{\tilde{\mathcal{O}}_{l}\setminus\mathcal{O}_{l+1}}\, \, \mathrm{d}\omega( P^-,\bar Z,\bar t).\notag \end{eqnarray} Note that if $j\leq l$, then $\hat\Delta_i^l\subset\Delta_i^l\subset\mathcal{O}_l\subset \mathcal{O}_j$ and $(\tilde{\mathcal{O}}_{j-1}\setminus\mathcal{O}_j)\cap \Delta_i^l=\emptyset$. Hence $II_1 = 0$. Furthermore, \begin{equation} \begin{split}
|II_2|&\leq\sum_{j=l+2}^k\omega( P^-,(\tilde{\mathcal{O}}_{j-1}\setminus\mathcal{O}_j)\cap \Delta_i^l)\\ &\leq c_{\eta_2}\sum_{j=l+2}^k\omega( A_{\hat\Delta_i^l}^+(\Pi(A_{\eta\Delta_i^l}^+)),(\tilde{\mathcal{O}}_{j-1}\setminus\mathcal{O}_j)\cap \Delta_i^l), \end{split} \end{equation} by the Harnack inequality, see Lemma \ref{lem4.7bol}. Consider $(\bar Z,\bar t)\in (\tilde{\mathcal{O}}_{j-1}\setminus\mathcal{O}_j)\cap \Delta_i^l$. Then, again using Lemma \ref{lem4.5-Kyoto1ha} we have that \begin{eqnarray} K( A_{\hat\Delta_i^l}^+(\Pi(A_{\eta\Delta_i^l}^+)),\bar Z,\bar t):=\lim_{\rho\to 0}\frac{\omega( A_{\hat\Delta_i^l}^+(\Pi(A_{\eta\Delta_i^l}^+)),\partial\Omega\cap\mathcal{B}_\rho(\bar Z,\bar t))}{\omega(\partial\Omega\cap\mathcal{B}_\rho(\bar Z,\bar t))}, \end{eqnarray} exists for $\omega$-a.e. $(\bar Z,\bar t)\in \Delta_i^l$, and \begin{eqnarray} K(A_{\hat\Delta_i^l}^+(\Pi(A_{\eta\Delta_i^l}^+)),\bar Z,\bar t)\leq \frac c{\omega(\Delta_i^l)}\mbox{ whenever }(\bar Z,\bar t)\in \Delta_i^l. \end{eqnarray} In the last conclusion we have also used Lemma \ref{T:doubling}. Using these facts, and using the definition of the good $\epsilon_0$ cover, we see that \begin{equation}\label{yy8} \begin{split}
|II_2|&\leq\frac {c_{\eta_2}}{\omega(\Delta_i^l)}\sum_{j=l+2}^k\omega((\tilde{\mathcal{O}}_{j-1}\setminus\mathcal{O}_j)\cap \Delta_i^l)\\ &\leq\frac {c_{\eta_2}}{\omega(\Delta_i^l)}\sum_{j=l+2}^k\omega(\mathcal{O}_{j-1}\cap \Delta_i^l)\leq \frac {c_{\eta_2}}{ \omega(\Delta_i^l)}\sum_{j=l+2}^k\epsilon_0^{j-1-l}\omega(\Delta_i^l)\leq C_{\eta_2}\epsilon_0. \end{split} \end{equation} To estimate the term $II_3$ we first observe that $\hat \Delta_i^l\cap \tilde{\mathcal{O}}_{l}=\emptyset$ by the definition of $\tilde{\mathcal{O}}_{l}$. Hence, \begin{eqnarray} II_3&=&\omega( P^-,\hat\Delta_i^l\cap (\tilde{\mathcal{O}}_{j-1}\setminus\mathcal{O}_j))=0 \end{eqnarray} and we can conclude that \begin{eqnarray}\label{yy11} u( P^-)\leq c\eta_2^\sigma+C_{\eta_2}\epsilon_0. \end{eqnarray} Combining \eqref{yy10} and \eqref{yy11} we can conclude, in either case, that \begin{eqnarray} u( P^+)-u( P^-)\geq c_{\eta_1}^{-1}-C_{\eta_1}\epsilon_0-c\eta_2^\sigma-C_{\eta_2}\epsilon_0. \end{eqnarray} We now first choose $\eta_1=\eta_1(m,\kappa,M_1)$ small. We then choose $\eta_2=\eta_2(m,\kappa,M_1)$ so that $c_{\eta_1}^{-1}=2c\eta_2^\sigma$. Having fixed $\eta_1$ and $\eta_2$ we choose $\epsilon_0=\epsilon_0(m,\kappa,M_1)$ so that $c\eta_2^\sigma=2(C_{\eta_1}+C_{\eta_2})\epsilon_0$. By these choices we can conclude that there exists $0<\beta=\beta(m,\kappa,M_1)\ll 1$ such that \begin{eqnarray}\label{yy12} u(P+d_{\Delta_i^l}e_m)-u( P+l(\bar\Delta_i^l)e_m)\geq \beta,\ \forall P\in\{(x,\psi(x,y,t),y,y_m,t)\in \bar\Delta_i^l\}. \end{eqnarray} In particular, fix $P\in\{(x,\psi(x,y,t),y,y_m,t)\in \bar\Delta_i^l\}$. Then \eqref{yy12} implies \begin{eqnarray}
\beta^2\leq cl(\Delta_i^l)\int_{P^-}^{P^+}|\partial_{x_m}u(x,x_m,y,y_m,t)|^2\, \, \mathrm{d} x_m. \end{eqnarray} Integrating with respect to $P\in \bar\Delta_i^l$ we see that \begin{eqnarray}\label{yy14}
\beta^2\sigma(\bar\Delta_i^l)\leq cl(\Delta_i^l)\iiint_{R_{\bar\Delta_i^l}}|\nabla_Xu(Z,t)|^2\, \, \mathrm{d} Z\, \mathrm{d} t, \end{eqnarray} where $R_{\bar\Delta_i^l}$ is a naturally defined Whitney type region. Recall that $\sigma(\bar\Delta_i^l)\approx \sigma(\Delta_i^l)$. In particular, by an elementary connectivity/covering argument we see that \begin{eqnarray*}
\quad c^{-1}\beta^2\leq \iiint_{\tilde W_{\Delta_i^l}} |\nabla_Xu|^2\delta^{2-{\bf q}}\, dZdt, \end{eqnarray*} where $\tilde W_{\Delta_i^l}$ is a natural Whitney type region associated to $\Delta_i^l$, $\delta=\delta(Z,t)$ is the distance from $(Z,t)$ to $\Sigma$, and $c=c(m,M_1,\kappa)$, $1\leq c<\infty$. Consequently, for
$(Z_0,t_0)\in E$ fixed we find, by summing over all indices $i$, $l$, such that $(Z_0,t_0)\in \Delta_i^l$, that \begin{eqnarray}
\quad\quad c^{-1}\beta^2k\leq\sum_{i,l: (Z_0,t_0)\in \Delta_i^l} \biggl (\iiint_{\tilde W_{\Delta_i^l}} |\nabla_Xu|^2\delta^{2-{\bf q}}\, \, \mathrm{d} Z\, \mathrm{d} t\biggr ). \end{eqnarray} The construction can be made so that the Whitney type regions $\{\tilde W_{\Delta_i^l}\}$ have bounded overlaps measured by a constant depending only on $m$, $M_1$, and such that $W_{\Delta_i^l}\subset T_{cQ_0}$ for some $c=c(m,M_1)$, $1\leq c<\infty$, where $T_{cQ_0}$ is defined in \eqref{eq2.box}. Hence, integrating with respect to $\, \mathrm{d} \sigma$, we deduce that \begin{eqnarray}\label{yy13+}
\quad c^{-1}\beta^2k\sigma(E)\leq \biggl (\iiint_{ T_{cQ_0}} \bigl |\nabla_Xu|^2\delta\, \, \mathrm{d} Z\, \mathrm{d} t\biggr ) \end{eqnarray} where, resolving the dependencies, $c=c(m,\kappa,M_1)$, $1\leq c<\infty$. Furthermore, $$k\approx \frac {\log(\delta_0)}{\log (\epsilon_0)},$$ where $\eta$ and $\epsilon_0$ now have been fixed, and $\delta_0$ is at our disposal. Given $\Upsilon$ we obtain the conclusion of the lemma by specifying $\delta_0=\delta_0(m,\kappa,M_1,\Upsilon)$ sufficiently small. This completes the proof of Lemma \ref{lemmacruc}. \qed
\end{document} |
\begin{document}
\title{
Markov bases for
two-way change-point models of\
ladder determinantal tables
}
\begin{abstract} To evaluate the goodness-of-fit of a statistical model to given data, calculating a conditional $p$ value by a Markov chain Monte Carlo method
is one of the effective approaches. For this purpose, a Markov basis
plays an important role because it
guarantees the connectivity of the chain, which is needed for
unbiasedness of the
estimation, and therefore is investigated in various settings such as
incomplete tables or subtable sum constraints. In this paper, we
consider the two-way change-point model for the ladder determinantal
table, which is an extension of these two previous works, i.e., works on incomplete tables by Aoki and Takemura (2005, {\it
J. Stat. Comput. Simulat.}) and subtable
some constraints by Hara, Takemura and Yoshida (2010, {\it J. Pure
Appl. Algebra}). Our main
result is based on the theory of Gr\"obner basis for the distributive
lattice. We give a numerical example for actual data. \end{abstract}
\section{Introduction} In the analysis of contingency tables, computing conditional $p$ values by a Markov chain Monte Carlo method is one of the common approaches to evaluate a fitting of a statistical model to given data. In this method, a key notion is a {\it Markov basis} that guarantees the connectivity of the chain for unbiasedness of the estimation. In Diaconis and Sturmfels (\cite{Diaconis-Sturmfels-1998}), a notion of a Markov basis is presented with algebraic algorithms to compute it. This first work is based on a discovery of the relation between a Markov basis and a set of binomial generators of a toric ideal of a polynomial ring, which is the first connection between commutative algebra and statistics. After this first paper, Markov bases are studied intensively by many researchers both in the fields of commutative algebra and statistics, which yields an attractive new field called {\it computational algebraic statistics}. See \cite{Pistone-Riccomagno-Wynn-2001} for the first textbook of this field, and \cite{Aoki-Hara-Takemura-2012} for various theoretical results and examples on Markov bases.
The first result on the Markov bases in the setting of two-way contingency tables
is a Markov basis for the independence model. For two-way contingency tables with fixed row sums and column sums, which is the minimal sufficient statistics under the independence model, the set of square-free moves of degree $2$ forms a Markov basis. This result is generalized to the decomposable models of higher dimensional contingency tables by \cite{Dobra-2003}. The reader can find various results on the structure of Markov bases of decomposable models in Chapter 8 of \cite{Aoki-Hara-Takemura-2012}.
On the other hand, it is known that the structure of a Markov basis becomes complicated under various additional constraints to the two-way setting. One of such cases is the {\it incomplete two-way contingency table}, i.e., a contingency table with {\it structural zeros}, considered in \cite{Aoki-Takemura-2005}. Another case is the {\it subtable sum problem} considered in \cite{Hara-Takemura-Yoshida-2010} and \cite{TWOWAY}. In these works, it is shown that moves of higher degrees are needed for Markov bases. The problem we consider in this paper is two-way contingency tables with both structural zeros and subtable sum constraints.
We consider the two-way contingency tables with specific types of structural zeros called {\it ladder determinantal tables}, with specific types of subtable sums called {\it two-way change-point model}. The two-way change-point model is considered in \cite{Hirotsu-1997} for exponential families, including the Poisson distribution for complete two-way contingency tables. We also consider the Poisson distribution and two-way change-point model for incomplete cases in this paper. The purpose of this paper is to show that a Markov basis for this setting is constructed as the set of square-free degree $2$ moves.
This paper is organized as follows. In Section 2, we illustrate the Markov chain Monte Carlo methods for the subtable sum problem of incomplete two-way contingency tables and the two-way change-point models of ladder determinant tables. In Section 3, we give the structure of the minimal Markov bases for our problems, which is the main result of this paper. The arguments and the proof of our main theorem are based on the theory of Gr\"obner bases for distributive lattices, which is summarized in Section 3. A numerical example for actual data is given in Section 4.
\section{Preliminaries} \subsection{Markov chain Monte Carlo methods for subtable sum problem of
incomplete contingency tables} \label{subsec:MCMC-incomplete-table} First we illustrate the Markov chain Monte Carlo methods for the subtable sum problem of incomplete two-way contingency tables. Though we only consider the two-way change-point model in this paper, we describe the methods in the setting of general subtable sum problems considered in \cite{Hara-Takemura-Yoshida-2010}. Note that a specification of the subtable reduces to the two-way change-point model.
Let $\mathbb{N} = \{0,1,2,\ldots\}$ be the set of nonnegative integers. To consider $I\times J$ contingency tables with structural zeros, let $S \subset \{(i,j)\, :\, 1 \leq i \leq I, 1 \leq j \leq J\}$ be the set of cells that are not structural zeros. Let $q = |S|$ be the number of the cells. Let ${\bf x} = \{x_{ij}\} \in \mathbb{N}^q$ be an incomplete contingency table with the set of cells $S$, where $x_{ij} \in \mathbb{N}$ is an entry of the cell $(i,j) \in S$. Similarly to the ordinary (i.e., complete) two-way contingency tables, denote the row sums and column sums of ${\bf x}$ by \[
x_{i+} = \sum_{\{j\, :\, (i,j) \in S\}}x_{ij},\ \ i = 1,\ldots,I, \] \[
x_{+j} = \sum_{\{i\, :\, (i,j) \in S\}}x_{ij},\ \ j = 1,\ldots,J. \] We assume that there is at least one $(i,j) \in S$ in each row and each column. Let $B$ be a subset of $S$. We also define the subtable sum $x_B$ by \[
x_B = \sum_{(i,j) \in B}x_{ij}. \] Denote the set of the row sums, column sums and the subtable sum $x_B$ by an $(I+J+1)$-dimensional column vector \begin{equation}
{\bf t} = (x_{1+},\ldots,x_{I+},x_{+1},\ldots,x_{+J},x_B)' \in
\mathbb{N}^{I+J+1}, \label{eqn:minimal-sufficient-t} \end{equation} where ${}'$ is the transpose. We also treat ${\bf x}$ as a $q$-dimensional column vector as ${\bf x} = (x_{11},x_{12},\ldots,x_{IJ})'$, by lexicographic ordering of the cells in $S$. Then the relation between ${\bf x}$ and ${\bf t}$ is written by \begin{equation}
A{\bf x} = {\bf t}, \label{eqn:Ax=t} \end{equation} where $A$ is an $(I+J+1) \times p$ matrix consisting of $0$'s and $1$'s. We call $A$ a {\it configuration matrix}. Though we specify $S$ and $B$ in Section \ref{subsec:tables-and-models}, we show an example here. \begin{Example} Consider a $4\times 4$ incomplete contingency table with $6$ structural
zeros as follows. \[
\begin{array}{|c|c|c|c|}\hline x_{11} & x_{12} & x_{13} & [0]\\ \hline [0] & x_{22} & x_{23} & x_{24}\\ \hline [0] & [0] & x_{33} & x_{34}\\ \hline [0] & [0] & x_{43} & x_{44}\\ \hline \end{array} \] In this paper, we denote a structural zero as $[0]$ to distinguish it
from a sample zero described as $0$. Then the set $S$ is \[
S = \{(1,1), (1,2), (1,3), (2,2), (2,3), (2,4), (3,3), (3,4), (4,3), (4,4)\} \] and $p = 10$. Suppose a subset $B \subset S$ is given by \[
B = \{(1,1), (1,2), (1,3), (2,2), (2,3)\}. \] Then the configuration matrix is the following $9\times 10$ matrix. \[ A = \left(
\begin{array}{cccccccccc} 1 & 1 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\ 0 & 0 & 0 & 1 & 1 & 1 & 0 & 0 & 0 & 0\\ 0 & 0 & 0 & 0 & 0 & 0 & 1 & 1 & 0 & 0\\ 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 & 1\\ 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\ 0 & 1 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0\\ 0 & 0 & 1 & 0 & 1 & 0 & 1 & 0 & 1 & 0\\ 0 & 0 & 0 & 0 & 0 & 1 & 0 & 1 & 0 & 1\\ 1 & 1 & 1 & 1 & 1 & 0 & 0 & 0 & 0 & 0 \end{array} \right) \] As we see in Section \ref{subsec:tables-and-models}, the
configuration matrix considered in this
paper satisfies the homogeneity assumption, i.e., the row vector
$(1,\ldots,1)$ is in the real vector space spanned by the rows of
$A$. This is a natural assumption for statistical models. See Lemma 4.14 of \cite{Sturmfels-1996} for the algebraic
aspect of the homogeneity. \hspace*{\fill}$\Box$ \end{Example}
To clarify the statistical meaning of the configuration matrix $A$ and the relation (\ref{eqn:Ax=t}), consider the cell probability ${\bf p} = \{p_{ij}\} \in \Delta_{q-1}$, where \[
\Delta_{q-1} = \left\{ \{p_{ij}\} \in \mathbb{R}^q_{\geq 0}\, :\, \sum_{(i,j) \in S}p_{ij} = 1 \right\} \] is called a $(q-1)$-dimensional probability simplex, and $\mathbb{R}_{\geq 0}$ is the set of nonnegative real numbers. The probability simplex $\Delta_{q-1}$ is a statistical model called a saturated model. In statistical data analysis, our interest is in a statistical model that is a subset of $\Delta_{q-1}$. The two-way change-point model we consider in this paper is written in general form by \begin{equation}
{\cal M} = \{{\bf p} = (p_{ij})\in \Delta_{q-1}\, :\, \log p_{ij} =
\alpha_i + \beta_j + \gamma{\bf 1}_B(i,j)\ \mbox{for some}\
(\alpha_i), (\beta_j), \gamma\}, \label{eqn:null-model-M} \end{equation} where ${\bf 1}_B(i,j)$ is an indicator function given by \[
{\bf 1}_B(i,j) = \left\{\begin{array}{ll} 1, & (i,j) \in B\\ 0, & (i,j) \in S \setminus B. \end{array} \right. \] Here the term $\gamma{\bf 1}_B(i,j)$ represents a departure from the independence structure of the log-linear model. The model ${\cal M}$ becomes a quasi-independence model for the cells $S$ by $\gamma = 0$. The quasi-independence model is a fundamental statistical model for the incomplete contingency tables (see Chapter 5 of \cite{Bishop-Fienberg-Holland-1975} for detail). Sometimes, the term ``quasi-independence'' is also used for the model of independence except for the diagonal cells. In this paper, we use the term ``quasi-independence'' for a larger class of models. Markov bases for the quasi-independence model are considered in \cite{Aoki-Takemura-2005}. Also, the model ${\cal M}$ for the case that there are no structural zeros, i.e., $S = \{1,\ldots,I\} \times \{1,\ldots,J\}$, corresponds to the setting considered in \cite{Hara-Takemura-Yoshida-2010}. The two-way change-point model we consider corresponds to the case \begin{equation}
B = \{(i,j) \in S\, :\, i \leq i^*, j \leq j^*\} \label{eqn:B-two-way-change-point} \end{equation} for a fixed $(i^*, j^*) \in S$.
In this paper, we consider the fitting of the model ${\cal M}$ by the statistical hypothesis test \begin{equation}
\begin{array}{ll} \mbox{H}_0:\ {\bf p} \in {\cal M},\\ \mbox{H}_1:\ {\bf p} \in \Delta_{p-1}. \end{array} \label{eqn:test-problem} \end{equation} Under the null hypothesis H$_0$, $(\alpha_i), (\beta_j), \gamma$ in (\ref{eqn:null-model-M}) are nuisance parameters. For testing a null hypothesis in the presence of nuisance parameters, a common approach is to base the inference on the conditional distribution given a minimal sufficient statistics for the nuisance parameters. This approach is also known as the Rao-Blackwellization of the test statistics. Using this conditional distribution, the conditional $p$ value is defined. See \cite{Agresti-1992} or Chapter 1 of \cite{Aoki-Hara-Takemura-2012} for detail. For our case, the minimal sufficient statistics under the null model (\ref{eqn:null-model-M}) is ${\bf t} = A{\bf x}$ in (\ref{eqn:minimal-sufficient-t}), that is the statistical meaning of the configuration matrix $A$. Therefore the conditional distribution under $\mbox{H}_0$, called a {\it null distribution}, is written by \[
f({\bf x}\ |\ A{\bf x} = {\bf t}) = C^{-1}\prod_{(i,j) \in S}\frac{1}{x_{ij}!}, \] where $C$ is the normalizing constant written by \[
C = \sum_{{\bf y} \in {\cal F}_{{\bf t}}}\left(\prod_{(i,j) \in
S}\frac{1}{y_{ij}!}\right), \] where \[
{\cal F}_{{\bf t}} = \left\{{\bf y} \in \mathbb{N}^q\, :\, A{\bf y} = {\bf t} \right\}. \] ${\cal F}_{{\bf t}}$, called a ${\bf t}$-{\it fiber}, is the set of contingency tables with given values of row sums, column sums and subtable sum. For the observed contingency table ${\bf x}^o$, the conditional $p$ value for the test (\ref{eqn:test-problem}) based on a test statistic $T({\bf x})$ is defined by \[
p = \sum_{{\bf x} \in {\cal F}_{A{\bf x}^o}}\phi({\bf x})f({\bf x}\ |\ A{\bf x} = A{\bf x}^o), \] where $\phi({\bf x})$ is the test function of $T({\bf x})$ given by \[
\phi({\bf x}) = \left\{\begin{array}{ll} 1, & T({\bf x}) \geq T({\bf x}^o),\\ 0, & \mbox{otherwise}. \end{array} \right. \] To evaluate the conditional $p$ value, a Monte Carlo approach is to generate samples from the null distribution
$f({\bf x}\ |\ A{\bf x} = A{\bf x}^o)$ and calculate the null distribution of the test statistics. In particular, if a connected Markov chain over ${\cal F}_{A{\bf x}^o}$ is constructed, the chain can be modified to give a connected and aperiodic Markov chain with stationary distribution
$f({\bf x}\ |\ A{\bf x} = A{\bf x}^o)$ by a Metropolis procedure, and we can use the transitions ${\bf x}^{(M+1)}, {\bf x}^{(M+2)}, \ldots \in {\cal F}_{A{\bf x}^o}$ of the chain after a large number of steps $M$, called burn-in steps, as samples from the null distribution. This is a {\it Markov chain Monte Carlo method}. See Chapter 2 of \cite{Aoki-Hara-Takemura-2012} or \cite{Hastings-1970} for detail.
To construct a connected Markov chain over ${\cal F}_{A{\bf x}^o}$, one of the common approaches is to use a {\it Markov basis} introduced in \cite{Diaconis-Sturmfels-1998}. An integer array ${\bf z} \in \mathbb{Z}^p$ satisfying $A{\bf z} = {\bf 0}$ is called a {\it move} for the configuration $A$, where $\mathbb{Z}$ is the set of integers. Let \[
{\cal F}_0(A) = \{{\bf z} \in \mathbb{Z}^p\, :\, A{\bf z} = {\bf 0}\} \] denote the set of moves for $A$. \begin{Definition}[\cite{Diaconis-Sturmfels-1998}] A Markov basis for $A$ is a finite set of moves ${\cal B} =
\{{\bf z}_1,\ldots,{\bf z}_L\} \subset {\cal
F}_0(A)$ such that, for any ${\bf t} \in \mathbb{N}^{I+J+1}$ and ${\bf x}, {\bf y}
\in {\cal F}_{{\bf t}}$ , there exist $N > 0, (\varepsilon_1,
{\bf z}_{\ell_1}),\ldots,(\varepsilon_N, {\bf z}_{\ell_N}) \in {\cal
B}$ with $\varepsilon_n \in \{-1,1\}$ such that \[
{\bf y} = {\bf x} + \sum_{s = 1}^N\varepsilon_s{\bf z}_{\ell_s}\ \ \mbox{and}\ \
{\bf x} + \sum_{s = 1}^n\varepsilon_s{\bf z}_{\ell_s} \in {\cal F}_A\
\mbox{for}\ 1\leq n\leq N. \] \end{Definition} We also define the minimality and uniqueness of the Markov basis.\\ \begin{Definition} A Markov basis ${\cal B}$ is minimal if no proper subset of ${\cal B}$
is a Markov basis. A minimal Markov basis is unique if all minimal
Markov bases differ only by sign changes of the elements. \end{Definition} The fundamental results on
uniqueness and minimality of Markov bases are given in Chapter 5 of \cite{Aoki-Hara-Takemura-2012}. For the independence model of the complete $I\times J$ contingency tables, where the minimal sufficient statistics $A{\bf x}$ is the row sums and column sums, it is known that the set of square-free moves of degree $2$, \[
{\cal B} = \{{\bf z}(i_1,i_2;j_1,j_2),\ \ 1\leq i_1 < i_2 \leq I, 1 \leq j_1
< j_2\leq J\}, \] where ${\bf z}(i_1,i_2;j_1,j_2) = \{z_{ij}\} \in {\cal F}_0(A)$ is given by \begin{equation}
z_{ij} = \left\{\begin{array}{rl} 1, & (i,j) = (i_1,j_1), (i_2,j_2),\\ -1, & (i,j) = (i_1,j_2), (i_1,j_2),\\ 0, & \mbox{otherwise} \end{array} \right. \label{eqn:basic-move-elements} \end{equation} is a unique minimal Markov basis. The square-free moves of degree $2$ above, displayed as \[
\begin{array}{r|r|r|} \multicolumn{1}{c}{} & \multicolumn{1}{c}{j_1} &
\multicolumn{1}{c}{j_2}\\ \cline{2-3} i_1 & 1 & -1\\ \cline{2-3} i_2 & -1 & 1\\ \cline{2-3} \end{array}\ , \] is called a {\it basic move}. In the presence of the structural zeros, the set of the basis moves is not a Markov basis in general. For example, as shown in \cite{Aoki-Takemura-2005}, incomplete tables with structural zeros as the diagonal elements, moves of degree $3$ displayed as \[
\begin{array}{|c|c|c|}\hline [0] & +1 & -1\\ \hline -1 & [0] & +1\\ \hline +1 & -1 & [0]\\ \hline \end{array} \] are needed for Markov bases. Also, as shown in \cite{Hara-Takemura-Yoshida-2010}, if the subtable sum $x_B$ is fixed for the patterns such as \[
(i_1,j_1), (i_2,j_2) \in B,\ \ (i_1,j_2), (i_1,j_3), (i_2,j_1),
(i_2,j_3) \not\in B, \] moves such as \[
\begin{array}{c|c|c|c|} \multicolumn{1}{c}{} & \multicolumn{1}{c}{j_1} & \multicolumn{1}{c}{j_2}
& \multicolumn{1}{c}{j_3} \\ \cline{2-4} i_1 & +1 & +1 & -2\\ \cline{2-4} i_2 & -1 & -1 & +2\\ \cline{2-4} \end{array} \] are needed for Markov bases. In this paper, we consider a pattern of structural zeros $S$, called a ladder determinantal table, and a subtable pattern (\ref{eqn:B-two-way-change-point}) corresponding to a two-way change-point model and show that the set of basic moves forms a unique minimal Markov basis for this setting.
\subsection {Two-way change-point models of ladder determinantal tables} \label{subsec:tables-and-models} Now we specify $S$ considered in this paper. \begin{Definition}\label{def:ladder-determinantal} A ladder determinantal table is an incomplete contingency table with the
set of cells $S \subset
\{1,\ldots,I\} \times \{1,\ldots,J\}$ satisfying \[
(1,1), (I,J) \in S \] and has the form \begin{equation}
S = \displaystyle\bigcup_{i = 1}^I\{(i,j),\ \ell_i \leq j \leq u_i\}, \label{eqn:def-ladder-determinantal} \end{equation} where $\ell_i \leq \ell_{i+1}, u_i \leq u_{i+1}$ and $u_i \geq \ell_{i+1}$
hold for $i = 1,\ldots,I-1$. \end{Definition} Clearly the condition (\ref{eqn:def-ladder-determinantal}) is also written by \[
S = \displaystyle\bigcup_{j = 1}^J\{(i,j),\ \ell'_j \leq i \leq u'_j\}, \] where $\ell'_j \leq \ell'_{j+1}, u'_j \leq u'_{j+1}$ and $u'_j \geq \ell'_{j+1}$
hold for $j = 1,\ldots,J-1$. Figure \ref{fig:example-incomplete-tables} illustrates examples of incomplete contingency tables. Figure \ref{fig:example-incomplete-tables}($a$) and
($b$) are examples of the ladder determinantal tables, whereas ($c$) is
not. Figure \ref{fig:example-incomplete-tables}($c$) does not satisfy
the condition $u_3 \geq \ell_4$ of Definition \ref{def:ladder-determinantal} because $u_3 = 3 < 4 = \ell_4$. \begin{Remark} The ladder determinantal table above is a special case of a {\it block-stairway incomplete table}. As we see in Chapter 5 of \cite{Bishop-Fienberg-Holland-1975}, an
incomplete table
is called a block-stairway table if it is a ladder determinantal table after permutation of rows and columns. In this paper, we do not consider permutations of rows and columns because we consider ordered categorical tables. The terminology ``ladder determinantal'' is
used in algebraic fields. We see the relation between ladder
determinantal tables and distributive lattices in Section 3. \end{Remark} \begin{Remark} The condition $u_i \geq \ell_{i+1}$ for $i = 1,\ldots,I-1$ in Definition \ref{def:ladder-determinantal} corresponds to the inseparability of
incomplete tables. See Chapter 5 of
\cite{Bishop-Fienberg-Holland-1975}. We leave this condition because the inseparability is also a natural
condition in our change-point models. However, it is not essential condition in our result, i.e., Theorem \ref{thm:Markov-bases}
also holds for separable incomplete tables. \end{Remark}
\begin{figure}
\caption{Examples of incomplete contingency tables. (a) and (b) are
ladder determinantal tables, whereas (c) is not.}
\label{fig:example-incomplete-tables}
\end{figure}
For the ladder determinantal tables ${\bf x}$, we consider a two-way change-point model, i.e., the model (\ref{eqn:null-model-M}) with a subtable $B$ of the form (\ref{eqn:B-two-way-change-point}). Though the two-way change-point model is considered in \cite{Hirotsu-1997} for complete contingency tables, it can be also considered for incomplete cases. We see an example in Section \ref{sec:example}.
\section{Markov bases of two-way change-point models for ladder
determinantal tables} In this section we show the minimal Markov basis for two-way change-point models for ladder determinantal tables and its uniqueness. Note that the set of the basic moves, i.e, square-free moves of degree $2$, is written by \[
{\cal B}^* = \left\{{\bf z}(i_1,i_2; j_1,j_2)\ \left|\ \begin{array}{cl} & (i_1,j_1), (i_1,j_2), (i_2,j_1), (i_2,j_2) \in B\\ \mbox{or} & (i_1,j_1), (i_1,j_2) \in B, (i_2,j_1), (i_2,j_2) \in
S\setminus B\\ \mbox{or} & (i_1,j_1), (i_2,j_1) \in B, (i_1,j_2), (i_2,j_2) \in
S\setminus B\\ \mbox{or} & (i_1,j_1), (i_1,j_2), (i_2,j_1), (i_2,j_2) \in
S\setminus B\\ \end{array} \right.\right\} , \] where ${\bf z}(i_1,i_2; j_1,j_2) \in {\cal F}_0(A)$ is given by (\ref{eqn:basic-move-elements}). To show the set ${\cal B}^*$ constitutes a Markov basis, we use the arguments of distributive lattice.
Recall that a {\em partial order} on a set $P$ is a binary relation $\leq$ on $P$ such that, for all $a, b, c \in P$, one has \begin{itemize} \item $a \leq a$ (reflexivity); \item $a \leq b$ and $b \leq a$ $\Rightarrow$ $a = b$ (antisymmetric); \item $a \leq b$ and $b \leq c$ $\Rightarrow$ $a \leq c$ (transitivity). \end{itemize} A partially ordered set (``poset'' for short) is a set $P$ with a partial order $\leq$. When $P$ is a finite set, we call $P$ a finite poset. A {\em lattice} is a poset $L$ for which any two elements $a$ and $b$ belonging to $L$ possess a greatest lower bound (``meet'') $a \wedge b$ and a least upper bound (``join'') $a \vee b$.
\begin{Example} Let $B_n$ denote the set of subsets of $[n] = \{1,2,\ldots,n\}$ and define the partial order $\leq$ on $B_n$ by setting $X \leq Y$ if $X \subset Y (\subset [n])$. Then, in $B_n$, one has $X \cap Y = X \wedge Y$ and $X \cup Y = X \vee Y$. Thus $B_n$ is a finite lattice, which is called the {\em boolean lattice} of rank $n$. \end{Example}
A lattice is called {\em distributive} if, for all $a, b, c \in L$ one has \[ a \wedge (b \vee c) = (a \wedge b) \vee (a \wedge c), \, \, \, \, \, a \vee (b \wedge c) = (a \vee b) \wedge (a \vee c). \] For example, the boolean lattice of rank $n$ is a distributive lattice.
Let $P$ be a finite poset. A {\em poset ideal} of $P$ is a subset $\alpha \subset P$ such that \[ a \in \alpha, \, \, b \in P, \, \, b \leq a \, \, \Rightarrow \, \, b \in \alpha. \] In particular $P$ itself as well as the empty set $\emptyset$ is a poset ideal of $P$. Furthermore, if $\alpha$ and $\beta$ are poset ideals of $P$, then both $\alpha \cap \beta$ and $\alpha \cup \beta$ are poset ideals of $P$.
Given a finite poset $P$, we write $L = {\mathcal J}(P)$ for the set of all poset ideals of $P$. We then define a partial order $\leq$ on $L$ by setting $\alpha \leq \beta$ if $\alpha \subset \beta$, where $\alpha$ and $\beta$ are poset ideals of $P$. It follows that $L = {\mathcal J}(P)$ is a finite distributive lattice.
A {\em totally ordered subset} of a finite poset $P$ is a subset $C$ of $P$ such that, for $a, b \in C$, one has either $a \leq b$ or $b \leq a$. A totally ordered subset of $P$ is also called a {\em chain} of $P$.
Now, a finite distributive lattice $L = {\mathcal J}(P)$ is called {\em planar} if \begin{enumerate} \item[(i)] $P$ itself is {\em not} a chain of $P$; \item[(ii)] $P$ can be decomposed into the disjoint union of two chains of $P$. \end{enumerate}
\begin{Example} Let $P = \{a,b,c,d\}$ be a finite poset with $a < c, b < c, b < d$. Then $P$ is a disjoint union of chains $C = \{a, c\}$ and $D = \{b, d\}$. The finite planar distributive lattice $L = {\mathcal J}(P)$ is Figure \ref{fig:distributive-lattice-L}. \begin{figure}
\caption{Distributive lattice $L = {\mathcal J}(P)$}
\label{fig:distributive-lattice-L}
\end{figure}
\end{Example}
Suppose that $L = {\mathcal J}(P)$ is a planar distributive lattice for which $P$ is the disjoint union of chains $C = \{a_{1}, \ldots, a_{n}\}$ and $D = \{b_{1}, \ldots, b_{m}\}$ of $P$ with $a_{1} < \cdots < a_{n}$ and $b_{1} < \cdots < b_{m}$, where $n \geq 1$ and $m \geq 1$. Let \[ K[{\bf x}, {\bf y}, s, t] = K[x_{1}, \ldots, x_{n}, y_{1}, \ldots, y_{m}, s, t] \] denote the polynomial ring in $n + m + 2$ variables over a field $K$. We fix a poset ideal $S$ of $L$ with $S \neq \emptyset$ and $S \neq L$. Given $\alpha \in L$ with
$i_{0} = \max\{ i \, : \, a_{i} \in \alpha \}$ and
$j_{0} = \max\{ j \, : b_{j} \in \alpha \}$,
one can associate the monomial $u_{\alpha} \in K[{\bf x}, {\bf y}, s, t]$ with \begin{eqnarray*} u_{\alpha} = \left\{ \begin{array}{ll} x_{i_{0}}y_{j_{0}}s & \text{if} \, \, \, \alpha \in S, \\ x_{i_{0}}y_{j_{0}}t & \text{if} \, \, \, \alpha \in L \setminus S. \end{array} \right. \end{eqnarray*} We write ${\mathcal R}_{K}[L;S]$ $( \subset K[{\bf x}, {\bf y}, s, t] )$ for the toric ring generated by those monomials $u_{\alpha}$ with $\alpha \in L$.
Let $K[L] = K[z_{\alpha} \, : \, \alpha \in L]$ denote the polynomial ring in $|L|$ variables over $K$ and fix the reverse lexicographic order $<_{\rm rev}$ on $K[L]$ induced by an ordering of the variables of $K[L]$ with the property that $z_{\alpha} < z_{\beta}$ if $\alpha < \beta$ in $L$. We define the surjective ring homomorphism $\pi : K[L] \to {\mathcal R}_{K}[L;S]$ by setting $\pi(z_{\alpha}) = u_{\alpha}$ with $\alpha \in L$. Let $I_{(L;S)}$ $( \subset K[L] )$ denote the kernel of $\pi$, which will be called the {\em toric ideal} of ${\mathcal R}_{K}[L;S]$. We refer the reader to, e.g., \cite{dojoEN} for the foundation of Gr\"obner bases and toric ideals.
Let ${\mathcal A}$ be the set of those $2$-element subsets $\{\alpha, \beta\}$ of $L$, where $\alpha$ and $\beta$ are incomparable in $L$, satisfying one of the following: \begin{itemize} \item $\{\alpha, \beta, \alpha \vee \beta\} \subset S$; \item $\{\alpha, \beta, \alpha \wedge \beta\} \subset L \setminus S$; \item $\alpha \in S$ and $\beta \in L \setminus S$. \end{itemize} It then follows that, for each $\{\alpha, \beta\} \in {\mathcal A}$, the binomial \begin{equation} f_{\alpha, \beta} = z_{\alpha}z_{\beta} - z_{\alpha \wedge
\beta}z_{\alpha \vee \beta} \label{eqn:binom-f-a-b} \end{equation} belongs to $I_{(L;S)}$.
\begin{Example} Consider the distributive lattice for Table \ref{tbl:hydra-data} we will consider in Section 4. The set
of the cells of Table \ref{tbl:hydra-data} displayed as follows. \[
\begin{array}{c|r|r|r|r|r|r|r|}
\multicolumn{1}{c}{} & \multicolumn{1}{c}{1} & \multicolumn{1}{c}{2} &
\multicolumn{1}{c}{3} & \multicolumn{1}{c}{4} & \multicolumn{1}{c}{5} &
\multicolumn{1}{c}{6} & \multicolumn{1}{c}{7}\\ \cline{2-2}
1 & (1,1) & \multicolumn{6}{|c}{}\\ \cline{2-3}
2 & (2,1) & (2,2) & \multicolumn{5}{|c}{}\\ \cline{2-4}
3 & (3,1) & (3,2) & (3,3) & \multicolumn{4}{|c}{}\\ \cline{2-5}
4 & (4,1) & (4,2) & (4,3) & (4,4) & \multicolumn{3}{|c}{}\\ \cline{2-6}
\multicolumn{1}{c}{5} & \multicolumn{1}{c|}{} & (5,2) & (5,3) & (5,4) & (5,5) &
\multicolumn{2}{|c}{}\\ \cline{3-7}
\multicolumn{1}{c}{6} & \multicolumn{2}{c|}{} & (6,3) & (6,4) & (6,5) & (6,6) &
\multicolumn{1}{|c}{}\\
\cline{4-8}
\multicolumn{1}{c}{7} & \multicolumn{3}{c|}{} & (7,4) & (7,5) & (7,6) & (7,7) \\
\cline{5-8} \end{array} \] Hereafter we ignore the cells $(1,1)$ and $(7,7)$ because the frequencies
$x_{11}$ and $x_{77}$ are fixed
under the model. Then the corresponding planar distributive lattice $L$ is displayed in Figure
\ref{fig:appendix-example}(a). In Figure
\ref{fig:appendix-example}(a), the set of black vertices $\bullet$
represents a
corresponding poset $P$ where $L = {\mathcal J}(P)$, which is also displayed in Figure
\ref{fig:appendix-example}(b). Note that each vertex $\circ$ or $\bullet$ in Figure
\ref{fig:appendix-example}(a) represents a poset ideal of the poset
consisting of all $\bullet$'s under or equal to it. For example, the vertex $\circ$ at $(5,4)$ in Figure \ref{fig:appendix-example}(a) represents a poset ideal \[ \{(2,2), (3,1), (3,3), (4,1), (4,4), (5,2)\}, \] of $P$. The poset ideal $S \subset L$ of Figure \ref{fig:appendix-example}(c) corresponds to the two-way change-point model we have considered in Section
\ref{sec:example}.
\begin{figure}
\caption{The planar distributive lattice for Table
\ref{tbl:hydra-data} (a), the corresponding poset (b) and the poset
ideal for the two-way change-point model (c).}
\label{fig:appendix-example}
\end{figure} The poset $P$ is written by the disjoint union of chains \[
C = \{(3,1), (4,1), (5,2), (6,3), (7,4)\} = \{a_3, a_4, a_5, a_6, a_7 \} \] and \[
D = \{(2,2), (3,3), (4,4), (5,5), (6,6) \}
= \{b_2, b_3, b_4, b_5, b_6\}. \] Note here that we are shifting the indices of $\{a_i\}, \{b_j\}$, so as
to correspond $a_i$ to $i$-th row, and $b_j$ to $j$-th column, respectively. Then for $(i,j) \in L$, we see that $i_0 = i$ and $j_0 = j$, and the ring homomorphism $\pi$ is
written by $\pi(z_{ij}) = x_iy_js$ for $(i,j) \in S$ and $\pi(z_{ij}) = x_iy_jt$ for $(i,j) \in L\setminus S$, respectively.
For the planar distributive lattice $L$ displayed in Figure \ref{fig:appendix-example}(a) and for the poset ideal $S \subset L$ displayed in Figure \ref{fig:appendix-example}(c), there are $14$ incomparable $2$-element subsets in the set ${\mathcal A}$ as follows. \begin{itemize} \item $\{\alpha, \beta, \alpha \vee \beta\} \subset S$; \[ \begin{array}{l}
\{(2,2),(3,1)\}, \{(2,2),(4,1)\}, \{(3,2),(4,1)\}, \{(3,3),(4,1)\},
\{(3,3),(4,2)\},\\
\{(3,3),(5,2)\}, \{(4,3),(5,2)\}, \end{array} \] \item $\{\alpha, \beta, \alpha \wedge \beta\} \subset L \setminus S$; \[ \begin{array}{l} \{(5,5),(6,4)\}, \{(5,5),(7,4)\}, \{(6,5),(7,4)\}, \{(6,6),(7,4)\},
\{(6,6),(7,5)\}, \end{array}\] \item $\alpha \in S$ and $\beta \in L \setminus S$: \[
\{(4,4),(5,2)\}, \{(4,4),(5,3)\}. \] \end{itemize} The set of the corresponding binomials (\ref{eqn:binom-f-a-b}) for these $14$ pairs coincides the set of $14$ square-free degree $2$
moves of (\ref{eqn:minimal-MB-example}).\hspace*{\fill}$\Box$ \end{Example}
\begin{Theorem} \label{GBtheorem} Let ${\mathcal G}$ be the set of those binomials $f_{\alpha, \beta}$ with $\{\alpha, \beta\} \in {\mathcal A}$. Then ${\mathcal G}$ is the reduced Gr\"obner basis of $I_{(L;S)}$ with respect to $<_{\rm rev}$. \end{Theorem}
The proof of this theorem is in Appendix. From this theorem, we have the following result on the Markov basis for our problem.
\begin{Theorem}\label{thm:Markov-bases} ${\cal B}^*$ is an unique minimal Markov basis for $A$ of two-way
change-point models
for ladder determinantal tables. \end{Theorem}
The uniqueness of the minimal Markov basis is from the following known result. \begin{Lemma}[Corollary 5.2 of
\cite{Aoki-Hara-Takemura-2012}]\label{lem:unique-minimal} The unique minimal Markov basis exists if and only if the set of
indispensable moves forms a Markov basis. In this case, the set of
indispensable moves is the unique minimal Markov basis. \end{Lemma}
\noindent ({\it Proof of Theorem \ref{thm:Markov-bases}}.)\ We show ${\cal B}^*$ corresponds to the reduced Gr\"obner basis of the
corresponding toric ideal, and therefore a Markov basis, in Theorem
\ref{GBtheorem}. Because each element of ${\cal B}^*$ is an indispensable move, i.e., a difference of
$2$-element fiber, ${\cal B}^*$ is a unique minimal Markov basis from Lemma \ref{lem:unique-minimal}. \hspace*{\fill}$\Box$
\section{Example}\label{sec:example} Table \ref{tbl:hydra-data} is an example of the ladder determinantal tables from Table 4.4-13 of \cite{Bishop-Fienberg-Holland-1975}.
In this experiment, annuli from donor hydra was grafted to host hydra and observed for foot formation. The object of this experiment is to evaluate the influence of donor and grafted annulus positions on foot generation. The frequencies are the cases of foot formation out of $25$ trials, and the row and column indicate the positions $1,\ldots,7$ from foot (position $1$) to head (position $7$) of hydra. \begin{table}[htbp] \begin{center} \caption{Basal disc regeneration in hydra from Table 4.4-13 of
\cite{Bishop-Fienberg-Holland-1975}} \label{tbl:hydra-data} \[
\begin{array}{lc|c|c|c|c|c|c|c|} & \multicolumn{7}{c}{\mbox{Donor annulus position}}\\ & \multicolumn{1}{c}{} & \multicolumn{1}{c}{1} & \multicolumn{1}{c}{2} &
\multicolumn{1}{c}{3} & \multicolumn{1}{c}{4} & \multicolumn{1}{c}{5} &
\multicolumn{1}{c}{6} & \multicolumn{1}{c}{7}\\ \cline{3-3}
& 1 & 4 & \multicolumn{6}{|c}{}\\ \cline{3-4}
& 2 & 4 & 0 & \multicolumn{5}{|c}{}\\ \cline{3-5}
\mbox{Position of graft} & 3 & 19 & 5 & 1 & \multicolumn{4}{|c}{}\\ \cline{3-6}
\mbox{in host} & 4 & 24 & 15 & 4 & 5 & \multicolumn{3}{|c}{}\\ \cline{3-7}
& \multicolumn{1}{c}{5} & \multicolumn{1}{c|}{} & 19 & 18 & 18 & 8 &
\multicolumn{2}{|c}{}\\ \cline{4-8}
& \multicolumn{1}{c}{6} & \multicolumn{2}{c|}{} & 24 & 21 & 16 & 5 &
\multicolumn{1}{|c}{}\\
\cline{5-9}
& \multicolumn{1}{c}{7} & \multicolumn{3}{c|}{} & 23 & 22 & 8 & 1 \\
\cline{6-9} \end{array} \] \end{center} \end{table} For this data, though it is more natural to consider binomial sampling model, we assume Poisson sampling model here to illustrate our method. Then we consider the fitting of the two-way change-point model of \[
B = \{(1,1),(2,1),(2,2),(3,1),(3,2),(4,1),(4,2)\}. \] The configuration matrix $A$ is $15\times 22$ matrix written by \[
A = \left(\begin{array}{c} 1000000000000000000000\\ 0110000000000000000000\\ 0001110000000000000000\\ 0000001111000000000000\\ 0000000000111100000000\\ 0000000000000011110000\\ 0000000000000000001111\\ 1101001000000000000000\\ 0010100100100000000000\\ 0000010010010010000000\\ 0000000001001001001000\\ 0000000000000100100100\\ 0000000000000000010010\\ 0000000000000000000001\\ 1111101100000000000000 \end{array} \right). \] The fitted value of the two-way change-point model is displayed in Table \ref{tbl:fitted-value}. \begin{table}[htbp] \begin{center} \caption{Fitted value of the two-way change-point model $(i^*,j^*) =
(4,2)$ for Table Table 4.4-13 of \ref{tbl:hydra-data}} \label{tbl:fitted-value} \[
\begin{array}{lc|r|r|r|r|r|r|r|} & \multicolumn{7}{c}{\mbox{Donor annulus position}}\\ & \multicolumn{1}{c}{} & \multicolumn{1}{c}{1} & \multicolumn{1}{c}{2} &
\multicolumn{1}{c}{3} & \multicolumn{1}{c}{4} & \multicolumn{1}{c}{5} &
\multicolumn{1}{c}{6} & \multicolumn{1}{c}{7}\\ \cline{3-3}
& 1 & 4.00 & \multicolumn{6}{|c}{}\\ \cline{3-4}
& 2 & 2.81 & 1.19 & \multicolumn{5}{|c}{}\\ \cline{3-5}
\mbox{Position of graft} & 3 & 15.94 & 6.78 & 2.28 & \multicolumn{4}{|c}{}\\ \cline{3-6}
\mbox{in host} & 4 & 28.26 & 12.03 & 4.05 & 3.67 & \multicolumn{3}{|c}{}\\ \cline{3-7}
& \multicolumn{1}{c}{5} & \multicolumn{1}{c|}{} & 19.00 & 17.17 & 15.54 & 11.29 &
\multicolumn{2}{|c}{}\\ \cline{4-8}
& \multicolumn{1}{c}{6} & \multicolumn{2}{c|}{} & 23.50 & 21.27 & 15.45 & 5.79 &
\multicolumn{1}{|c}{}\\
\cline{5-9}
& \multicolumn{1}{c}{7} & \multicolumn{3}{c|}{} & 26.52 & 19.26 & 7.21 & 1.00 \\
\cline{6-9} \end{array} \] \end{center} \end{table}
As a test statistic, we use Pearson's goodness-of-fit $\chi^2$ \[
\chi^2 = \sum_{(i,j) \in S}\frac{(x_{ij} - m_{ij})^2}{m_{ij}}, \] where ${\bf m} = (m_{ij})$ is the fitted value in Table \ref{tbl:fitted-value}. We have $\chi^2 = 7.814$ with $8$ degrees of freedom. From Theorem \ref{thm:Markov-bases}, a unique minimal Markov basis is the set of $14$ square-free degree $2$ moves below, \begin{equation} \begin{array}{ccccc} {\bf z}(2,3; 1,2), & {\bf z}(2,4; 1,2), & {\bf z}(3,4; 1,2), & {\bf z}(3,4; 1,3), & {\bf z}(3,4; 2,3),\\ {\bf z}(4,5; 3,4), & {\bf z}(4,6; 3,4), & {\bf z}(5,6; 3,4), & {\bf z}(5,6; 3,5), & {\bf z}(5,6; 4,5), \\ {\bf z}(5,7; 4,5), & {\bf z}(6,7; 4,5), & {\bf z}(6,7; 4,6), & {\bf z}(6,7; 5,6), & \end{array} \label{eqn:minimal-MB-example} \end{equation} where ${\bf z}(i_1,i_2; j_1,j_2)$ is given by (\ref{eqn:basic-move-elements}). Using the above Markov basis, we calculate the conditional $p$ value by the Markov chain Monte Carlo method. Starting from the observed data, after discarding $50000$ burn-in samples, we generate $100000$ samples from the Markov chain and have the estimate $\hat{p} = 0.46$. Note that the asymptotic $p$ value based on the asymptotic $\chi_8^2$ distribution of the test statistics is $0.452$, which means good fitting of the asymptotic distribution for Table \ref{tbl:hydra-data}. Figure \ref{fig:hydra-histgram} is a histogram of Pearson's goodness-of-fit $\chi^2$ generated by the Markov chain, which also shows the good fitting of the asymptotic distribution. \begin{figure}
\caption{A histogram of Pearson's goodness-of-fit $\chi^2$ generated by
the Markov
chain. Dotted line is the asymptotic $\chi^2_8$ distribution.}
\label{fig:hydra-histgram}
\end{figure}
Similarly, we check the goodness-of-fits of all the two-way change-point models for each $(i^*, j^*)$, and find that the model with $(i^*, j^*) = (4,2)$ is the best two-way change-point model for Table \ref{tbl:hydra-data}, i.e., the model with the maximal estimated $p$ value.
\section{Discussion} In this paper, we give a unique minimal Markov basis for two-way change-point models of ladder determinantal tables. Our setting is an extension of two papers, \cite{Aoki-Takemura-2005} and \cite{Hara-Takemura-Yoshida-2010}. The two-way change-point model is an example of subtable sum problems considered in \cite{Hara-Takemura-Yoshida-2010}, and the ladder determinantal table is an example of incomplete contingency tables considered in \cite{Aoki-Takemura-2005}. We consider both constraints at once in this paper.
Our main result is based on the theory of Gr\"obner bases for the distributive lattice. As we see in Section 3, the ladder determinantal table is treated as the distributive lattice. One important point is that we can consider any poset ideal as the two-way change-point models, even if it is not a rectangular shape as (\ref{eqn:B-two-way-change-point}). Therefore our method is also used for any $B$ as long as it corresponds to a poset ideal of the distributive lattice.
In the analysis of two-way contingency tables, several extensions of the independence model
are considered from the viewpoint of algebraic statistics. For example, a weakened independence model by \cite{Carlini-Rapallo} is constructed from the set of $2\times 2$ adjacent minors.
\appendix
\section{Proof of Theorem \ref{GBtheorem}} \begin{proof} Once we know that ${\mathcal G}$ is a Gr\"obner basis of $I_{(L;S)}$ with respect to $<_{\rm rev}$, it follows immediately that ${\mathcal G}$ is reduced. The initial monomial ${\rm in}_{<_{\rm rev}}(f_{\alpha, \beta})$ of $f_{\alpha, \beta}$ is ${\rm in}_{<_{\rm rev}}(f_{\alpha, \beta}) = z_{\alpha}z_{\beta}$. Let ${\rm in}_{<_{\rm rev}}({\mathcal G})$ denote the ideal of $K[L]$ generated by those monomials ${\rm in}_{<_{\rm rev}}(f_{\alpha, \beta})$ with $f_{\alpha, \beta} \in {\mathcal G}$. Clearly ${\rm in}_{<_{\rm rev}}({\mathcal G}) \subset {\rm in}_{<_{\rm rev}}(I_{(L;S)})$, where ${\rm in}_{<_{\rm rev}}(I_{(L;S)})$ is the initial ideal of $I_{(L;S)}$ with respect to $<_{\rm rev}$. In order to show that ${\mathcal G}$ is a Gr\"obner basis of $I_{(L;S)}$ with respect to $<_{\rm rev}$, by virtue of the technique \cite[Lemma 1.1]{AHH}, what we must prove is that, for monomials $u$ and $v$, where $u \neq v$, belonging to $K[L]$ with $u \not\in {\rm in}_{<_{\rm rev}}({\mathcal G})$ and $v \not\in {\rm in}_{<_{\rm rev}}({\mathcal G})$, one has $\pi(u) \neq \pi(v)$. One can assume that $u$ and $v$ are relatively prime and, furthermore, \begin{eqnarray*} u = z_{\alpha_{1}} \cdots z_{\alpha_{p}} z_{\beta_{1}} \cdots z_{\beta_{q}}, \, \, \, \, \, v = z_{\alpha'_{1}} \cdots z_{\alpha'_{p}} z_{\beta'_{1}} \cdots z_{\beta'_{q}}, \end{eqnarray*} where each $\alpha_{i} \in S$, each $\alpha'_{i} \in S$, each $\beta_{j} \in L \setminus S$ and each $\beta'_{j} \in L \setminus S$. Since $z_{\alpha}z_{\beta} \in {\rm in}_{<_{\rm rev}}({\mathcal G})$ if $\alpha$ and $\beta$ are incomparable in $L$ with $\alpha \in S$ and $\beta \in L \setminus S$, the condition that
\noindent \hspace{0.5cm} $(\sharp)$ \,for each $i$ and for each $j$, one has $\alpha_{i} < \beta_{j}$ and $\alpha'_{i} < \beta'_{j}$
\noindent is satisfied.
If $\alpha_{i} \vee \alpha_{i'} \in S$, then $\alpha_{i}$ and $\alpha_{i'}$ must be comparable in $L$. Thus in particular, if $\alpha_{i} \vee \alpha_{i'} \in S$ for each $i$ and for each $i'$ with $1 \leq i < i' \leq p$, then $\{\alpha_{1}, \ldots, \alpha_{p}\}$ is a multichain of $L$. On the other hand, suppose that there exist $1 \leq i < i' \leq p$ with $\alpha_{i} \vee \alpha_{i'} \in L \setminus S$. Then, by $(\sharp)$, for each $j$ and for each $j'$ with $1 \leq j < j' \leq q$, one has $\beta_{j} \wedge \beta_{j'} \in L \setminus S$, so that $\beta_{j}$ and $\beta_{j'}$ must be comparable in $L$. Hence $\{\beta_{1}, \ldots, \beta_{q}\}$ is a multichain of $L$.
Now, suppose that \begin{enumerate} \item[(i)] for each $i$ and for each $i'$ with $1 \leq i < i' \leq p$, one has $\alpha_{i} \vee \alpha_{i'} \in S$; \item[(ii)] there exist $1 \leq k < k' \leq p$ for which $\alpha'_{k} \vee \alpha'_{k'} \in L \setminus S$. \end{enumerate}
Then each of $\{\alpha_{1}, \ldots, \alpha_{p}\}$ and $\{\beta'_{1}, \ldots, \beta'_{q}\}$ is a multichain of $L$. Ignoring the variables $s$ and $t$, the toric ring ${\mathcal R}_{K}[L]$ introduced in \cite{Hibiring} arises. Working in the frame of \cite{Hibiring}, if $u^{*} = z_{\gamma_{1}} \cdots z_{\gamma_{p+q}}$ is the standard monomial expression of $u$ and $v^{*} = z_{\gamma'_{1}} \cdots z_{\gamma'_{p+q}}$ is
that of $v$, then again by $(\sharp)$ one has $|\{ i \, : \, \gamma_{i} \in S\}| \geq p$ and
$|\{ j \, : \, \gamma'_{j} \in S\}| < p$. Hence $u^{*} \neq v^{*}$. Thus $\pi(u) \neq \pi(v)$.
The same argument as above shows that if we suppose \begin{enumerate} \item[(i')] for each $j$ and for each $j'$ with $1 \leq j < j' \leq q$, one has $\beta_{j} \wedge \beta_{j'} \in L \setminus S$; \item[(ii')] there exist $1 \leq \ell < \ell' \leq q$ for which $\beta'_{\ell} \wedge \beta'_{\ell'} \in S$, \end{enumerate} then $\pi(u) \neq \pi(v)$.
Let $\pi(u) = \pi(v)$. Then one can assume one of the following conditions: \begin{enumerate} \item[$(\clubsuit)$] for each $i$ and for each $i'$ with $1 \leq i < i' \leq p$, one has $\alpha_{i} \vee \alpha_{i'} \in S$ and $\alpha'_{i} \vee \alpha'_{i'} \in S$; \item[$(\spadesuit)$] for each $j$ and for each $j'$ with $1 \leq i < i' \leq q$, one has $\beta_{j} \wedge \beta_{j'} \in L \setminus S$ and $\beta'_{j} \wedge \beta'_{j'} \in L \setminus S$. \end{enumerate}
Suppose $(\clubsuit)$. Then each of $\{\alpha_{1}, \ldots, \alpha_{p}\}$ and $\{\alpha'_{1}, \ldots, \alpha'_{p}\}$ is a multichain of $L$. Hence, by $(\sharp)$ together with \cite{Hibiring}, one has $\{\alpha_{1}, \ldots, \alpha_{p}\} = \{\alpha'_{1}, \ldots, \alpha'_{p}\}$ as multichains of $L$. Since $u$ and $v$ are relatively prime, one has $p = 0$.
Let $p = 0$ and $q \geq 2$. Let $\pi(z_{\beta_{j}}) = x_{\xi_{j}}y_{\zeta_{j}}t$ for $1 \leq j \leq q$. Set $\xi = \min\{ \xi_{j} \, : \, 1 \leq j \leq q \}$ and write $\zeta$ for the smallest integer for which there is $1 \leq j_{0} \leq q$ with $\pi(z_{\beta_{j_{0}}}) = x_{\xi}y_{\zeta}t$. Then there exist $\beta'_{j_{1}}$ and $\beta'_{j_{2}}$ such that $\pi(\beta'_{j_{1}}) = x_{\xi}y_{j_{*}}t$ and $\pi(\beta'_{j_{2}}) = x_{i_{*}}y_{\zeta}t$. One has $i_{*} > \xi$ and $j_{*} > \zeta$. Hence $\beta'_{j_{1}} \wedge \beta'_{j_{2}} = \beta_{j_{0}}$. Since $\beta_{j_{0}} \in L \setminus S$ and since $\beta'_{j_{1}}$ and $\beta'_{j_{2}}$ are incomparable in $L$, one has $z_{\beta'_{j_{1}}}z_{\beta'_{j_{2}}} \in {\rm in}_{<_{\rm rev}}({\mathcal G})$, which contradicts $v \not\in {\rm in}_{<_{\rm rev}}({\mathcal G})$.
Finally, the same argument as above is also valid if we suppose $(\spadesuit)$.
This completes proving that ${\mathcal G}$ is the reduced Gr\"obner basis of $I_{(L;S)}$ with respect to $<_{\rm rev}$. \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \, \end{proof}
\end{document} |
\begin{document}
\title[The structure of finitely generated modules]{On the structure of finitely generated modules over quotients of Cohen-Macaulay local rings}
\author[N.T. Cuong]{Nguyen Tu Cuong} \address{Institute of Mathematics, 18 Hoang Quoc Viet Road, 10307 Hanoi, Vietnam} \email{ntcuong@math.ac.vn}
\author[P.H. Quy]{Pham Hung Quy} \address{Department of Mathematics FPT University, Hanoi, Vietnam} \email{quyph@fe.edu.vn} \thanks{2010 {\em Mathematics Subject Classification\/}: 13H10, 13D45, 13H15.\\ This work is partially supported by funds of Vietnam National Foundation for Science and Technology Development (NAFOSTED)}
\keywords{Cohen-Macaulay module; local cohomology; system of parameters; unmixed component; Cohen-Macaulay deviated sequence; extended degree; unmixed degree.}
\begin{abstract} Let $(R, \frak m)$ be a homomorphic image of a Cohen-Macaulay local ring and $M$ a finitely generated $R$-module. We use the splitting of local cohomology to shed a new light on the structure of non-Cohen-Macaulay modules. Namely, we show that every finitely generated $R$-module $M$ is associated by a sequence of invariant modules. This modules sequence expresses the deviation of $M$ with the Cohen-Macaulay property. Our result generalizes the unmixed theorem of Cohen-Macaulayness for any finitely generated $R$-module. As an application we construct a new extended degree in sense of Vasconcelos. \end{abstract} \maketitle
\tableofcontents \section{Introduction} Throughout this paper, let $(R, \frak m)$ be a Noetherian local ring and $M$ a finitely generated $R$-module of dimension $d$. Let $x_1, \ldots, x_d$ be a system of parameters of $M$.\\
\noindent {\bf Standard setting.} We always assume that $R$ is a homomorphic image of a Cohen-Macaulay local ring.\\
Cohen-Macaulay rings and modules are the central objects of commutative algebra. The unmixed theorem says that $M$ is Cohen-Macaulay if and only if for every $i < d$ all associated prime ideals of $M/(x_1, \ldots, x_i)M$ have the same height $i$ (or dimension $d-i$), that is, $M/(x_1, \ldots, x_i)M$ is an unmixed module for all $i < d$ and for every system of parameters $x_1, \ldots, x_d$. Suppose $\cap_{\frak p \in \mathrm{Ass}M}N(\frak p) = 0$ is a reduced primary decomposition of the zero submodule of $M$, then the {\it unmixed component} of $M$ is defined by $$U_M(0) = \bigcap_{\frak p \in \mathrm{Ass}M, \dim R/\frak p = d}N(\frak p).$$ Then $U_M(0)$ is just the largest submodule of $M$ of dimension strickly less than $d$. The following is the unmixed component version of unmixed theorem.\\
\noindent {\bf The unmixed theorem.} {\it A finitely generated $R$-module $M$ is Cohen-Macaulay if and only if for some (and hence for all) system of parameters $x_1, \ldots, x_d$ of $M$ all unmixed components $$U_M(0), U_{M/x_1M}(0), \ldots, U_{M/(x_1, \ldots, x_{d-1})M}(0)$$ are vanished.}\\
The unmixed theorem can be expressed in another form as follows. A finitely generated $R$-module $M$ is Cohen-Macaulay if and only if every system of parameters $x_1, \ldots, x_d$ of $M$ is an $M$-regular sequence. Recall that $x_1, \ldots, x_d$ is an $M$-regular sequence if for all $i \le d$ all relations $$x_1 a_1 + \cdots + x_i a_i = 0$$
are trivial, that is, $a_i \in (x_1, \ldots, x_{i-1})M$ for all $i \le d$. In general we have $a_i \in (x_1, \ldots, x_{i-1})M:x_i$, so $x_1, \ldots, x_d$ is an $M$-regular sequence if the sub-quotient module $$\frac{(x_1, \ldots, x_{i-1})M:x_i}{(x_1, \ldots, x_{i-1})M} = 0$$ for all $i = 1, \ldots, d$. Since $$((x_1, \ldots, x_{i-1})M:x_i)/(x_1, \ldots, x_{i-1})M = (0:x_i)_{M/(x_1,\ldots ,x_{i-1})M}$$ is a submodule of $M/(x_1, \ldots, x_{i-1})M$ of dimension less than or equal to $d-i = \dim M/(x_1, \ldots, x_{i-1})M -1$, we have $$((x_1, \ldots, x_{i-1})M:x_i)/(x_1, \ldots, x_{i-1})M \subseteq U_{M/(x_1, \ldots, x_{i-1})M}(0)$$ for all $i < d$. Set $$\frak b(M) = \bigcap_{\underline{x}, i \le d} \mathrm{Ann} \frac{(x_1, \ldots, x_{i-1})M:x_i}{(x_1, \ldots, x_{i-1})M},$$ where $\underline{x} = x_1, \ldots, x_d$ runs over all systems of parameters of $M$. It is clear that the ideal $\frak b(M)$ kills all non-trivial relations of systems of parameters of $M$.\\ The Cohen-Macaulayness of $M$ can be characterized by local cohomology: $M$ is Cohen-Macaulay if and only if the local cohomology $H^i_{\frak m}(M) = 0$ for all $i<d = \dim M$. Thus if $M$ is not Cohen-Macaulay, then $H^i_{\frak m}(M) \neq 0$ for some $i < d$. Notice that $H^i_{\frak m}(M)$ is always Artinian but it is rarely Noetherian. So $H^i_{\frak m}(M)$ may not be annihilated by $\frak m$-primary ideals. The ideals $\frak a_i(M) = \mathrm{Ann}H^i_{\frak m}(M)$, $i = 0 , \ldots, d$, play important role in many areas in commutative algebra such as the homological conjectures, the tight closure theory, ect. Set $\frak a(M) = \frak a_0(M) \ldots \frak a_{d-1}(M)$. Schenzel proved the following inclusions \cite[Satz 2.4.5]{Sch82} $$\mathfrak{a}(M) \subseteq \mathfrak{b}(M) \subseteq \mathfrak{a}_0(M) \cap \cdots \cap \mathfrak{a}_{d-1}(M).$$ Notice that our ring is always a homomorphic image of a Cohen-Macaulay local ring. This condition gives us a critical fact that $\dim M/ \frak a(M)< \dim M$ for all finitely generated $R$-modules. Therefore we can choose a parameter element $x$ contained in $\frak a(M)$ (and hence in $\frak b(M)$). Furthermore, we have a special system of parameters satisfying that $$x_d \in \frak a(M), x_{d-1} \in \frak a(M/x_dM), \ldots, x_1 \in \frak a(M/(x_2, \ldots, x_d)M).$$ Such a system of parameters is called a {\it $p$-standard system of parameters} \cite{C95}. The $p$-standard systems of parameters play a key igredient in Kawasaki's proof for the Macaulayfication problem \cite{K00}. By \cite[Theorem 1.2]{CC17} $R$ is a homomorphism of a Cohen-Macaulay local ring if and only if every finitely generated $R$-module admits a $p$-standard system of parameters.\\
In this paper, we will use a kind of $p$-standard system of parameters to study the splitting of local cohomology modules. As mentioned above we know that $0:x \subseteq U_M(0)$ for every parameter element $x$ of $M$. Moreover, if $x \in \frak b(M)$ then we have $0:x = U_M(0)$, so we get the following short exact sequence $$0 \to M/U_M(0) \overset{x}{\to} M \to M/xM \to 0.$$ Furthermore if $x \in \frak b(M)^2$ then the above short exact sequence deduces the short exact sequence of local cohomology for any ideal $I$ (see Lemma \ref{B3.2.3}) $$0 \rightarrow H^i_I(M) \rightarrow H^i_I(M/xM) \rightarrow H^{i+1}_I(M/U_M(0)) \rightarrow 0$$ for all $i < d - \dim R/I - 1$. Using \cite{CQ11} we can study the splitting of of these local cohomology exact sequences. Namely, the following is the first main result of this paper.
\begin{theorem}\label{T1.1} Let $I$ be an ideal of $R$ and $x$ a parameter element of $M$ contained in $\frak b(M)^3$. Then for all $i < d - \dim R/I - 1$ we have
$$H^i_I(M/xM) \cong H^i_I(M) \oplus H^{i+1}_I(M/U_M(0)).$$ \end{theorem} In the case $I = \frak m$ we have the following consequence. \begin{corollary}\label{C1.2} Let $x $ be a parameter element of $M$ contained in $\frak b(M)^3$. Then $$H^i_{\mathfrak{m}}(M/xM) \cong H^i_{\mathfrak{m}}(M) \oplus H^{i+1}_{\mathfrak{m}}(M/U_M(0))$$ for all $i<d-1$, and $$0:_{H^{d-1}_{\mathfrak{m}}(M/xM)}\mathfrak{b}(M) \cong H^{d-1}_{\mathfrak{m}}(M) \oplus 0:_{H^{d}_{\mathfrak{m}}(M)}\mathfrak{b}(M).$$ \end{corollary} These splitting results lead a new kind of system of parameters $x_1, \ldots, x_d$ satisfying that $$x_d \in \frak b(M)^3, x_{d-1} \in \frak b(M/x_dM)^3, \ldots, x_1 \in \frak b(M/(x_2, \ldots, x_d)M)^3.$$ We call such a system of parameters a {\it $C$-system of parameters} of $M$. Similar to $p$-standard system of parameters, every finitely generated $R$-module admits $C$-systems of parameters if and only if $R$ is a quotient of a Cohen-Macaulay local ring. It should be noted that the right hand sides of the above isomorphisms do not depend of the choice of $C$-parameter element $x \in \frak b(M)^3$. Thus the local cohomology modules $H^i_I(M/xM)$, $i < d - \dim R/I-1$, are invariants (up to an isomorphism). As consequences, we can expect several invariant properties of quotient modules $M/(x_i, \ldots, x_d)M$ regarding $C$-systems of parameters. For example, by using the fact $U_M(0) = H^0_{\frak b(M)}(M)$, as the second main result of this paper, we generalize the unmixed theorem for any finitely generated $R$-module.
\begin{theorem}\label{T1.3} Let $M$ be a finitely generated $R$-module of dimension $d$ and $\underline{x} = x_1, \ldots, x_d$ a $C$-system of parameters of $M$. Then the unmixed component $U_{M/(x_{i+1}, \ldots,x_d)M}(0)$ is independent of the choice of $\underline{x}$ for all $1 \leq i \leq d$ (up to an isomorphism). \end{theorem}
The above theorem assigns to any finitely generated $R$-module $M$ of dimension $d$ a sequence of modules $U_0(M), \ldots, U_{d-1}(M)$, which satisfies that $U_i(M) \cong U_{M/(x_{i+2}, \ldots,x_d)M}(0)$ for every $C$-system of parameters $x_1, \ldots, x_d$ of $M$. Notice that $M$ is Cohen-Macaulay if and only if $U_i(M) = 0$ for all $i = 0, \ldots, d-1$ by the unmixed theorem. This modules sequence gives information about the distance between $M$ and the Cohen-Macaulayness. We call $U_0(M), \ldots, U_{d-1}(M)$ the {\it Cohen-Macaulay deviated sequence} of $M$. The name of Cohen-Macaulay deviated sequence comes from the notion of {\it Cohen-Macaulay deviation} of Vasconcelos in his theory of extended degrees.\\
Let $I$ be an $\frak m$-primary ideal. We denote by $\mathrm{deg}(I, M)$ the ordinary multiplicity of $M$ with respect to $I$, and call the {\it degree} of $M$ with respect to $I$. The degree, $\mathrm{deg}(I, M)$, is a basic invariant that measures the complexity of $M$ with respect to $I$. Vasconcelos et al. \cite{DGV98, V98-1, V98-2} introduced the notion of {\it extended degree} in order to capture the size of a module along with some of the complexity of its structure. It is a numerical function on the category of finitely generated modules over local or graded rings which generalizes the ordinary degree. Let $\mathcal{M}(R)$ be the category of finitely generated $R$-modules. An {\it extended degree} on $\mathcal{M}(R)$ with respect to $I$ is a numerical function $$ \mathrm{Deg}(I, \bullet) : \mathcal{M}(R) \to \mathbb{R} $$ satisfying the following conditions \begin{enumerate}[{(i)}] \item $\mathrm{Deg} (I, M) = \mathrm{Deg}(I, \overline{ M}) + \ell(H^0_{\frak m}(M))$, where $\overline{M} = M/H^0_{\frak m}(M)$. \item (Bertini's rule) $\mathrm{Deg}(I, M) \geq \mathrm{Deg}(I, M/xM)$ for every generic element $x \in I\setminus \frak mI$ of $M$. \item If $M$ is Cohen-Macaulay then $\mathrm{Deg}(I, M) = \mathrm{deg}(I, M)$. \end{enumerate} The difference $\mathrm{Deg} (I, M) - \mathrm{deg} (I, M)$ is called the {\it Cohen-Macaulay deviation} of $M$ with respect to $I$. The prototype of an extended degree is the {\it homological degree}, $\mathrm{hdeg}(I, M)$, was introduced and studied by Vasconselos in \cite{V98-1} (see Definition \ref{D3.3.4}). Until nowadays, the homological degree is the unique extended degree that we can describe in an explicit formula. Using the Cohen-Macaulay deviated sequence we introduce a new degree of $M$, which we call the {\it unmixed degree} of $M$ with respect to $I$, and denote by $\mathrm{udeg}(I, M)$. We define $$\mathrm{udeg}(I, M) = \mathrm{deg}(I, M) + \sum_{i=0}^{d-1}\delta_{i, \dim U_i(M)}\mathrm{deg}(I, U_i(M)),$$ where $\delta_{i, \dim U_i(M)}$ is Kronecker's symbol. The unmixed degree is a natural generalization of the ordinary degree as well as the {\it arithmetic degree} (for the definition of arithmetic degree, $\mathrm{adeg}(I,M)$, we refer to Definition \ref{adeg}). We prove the last main result of this paper as follows.
\begin{theorem}\label{T1.4}
The unmixed degree $\mathrm{udeg}(I, \bullet)$ is an extended degree on the category of finitely generated $R$-modules $\mathcal{M}(R)$. \end{theorem}
Let us talk about the structure of this paper.
In the next section we collect useful results about the annihilator of local cohomologogy, the unmixed component and some special systems of parameters. We also mention the method of \cite{CQ11} to study the splitting of local cohomology.
Section 3 is devoted the splitting of local cohomology Theorem \ref{T1.1} and Corollary \ref{C1.2} (see Theorem \ref{D3.2.4} and Corollary \ref{H3.2.5}). Then we introduce the notion of $C$-system of parameters, that plays a key role in this paper.
Theorem \ref{T1.3} is proved in Section 4. We also prove the invariance of local cohomology of quotient modules regarding $C$-systems of parameters (cf. Theorem \ref{D3.2.7}). As an application of the Cohen-Macaulay deviated sequence $U_0(M), \ldots, U_{d-1}(M)$, we compute the length function $\ell(M/(x_1^{n_1}, \ldots, x_d^{n_d})M)$ when $x_1, \ldots, x_d$ is a $C$-system of parameters (cf. Proposition \ref{M3.2.13}). Other applications for sequentially Cohen-Macaulay modules and the Serre condition $(S_2)$ are also given.
The unmixed degree will be introduced in Section 5. Theorem \ref{T1.4} follows from Proposition \ref{M3.3.9}, Theorems \ref{D3.3.8} and \ref{D3.3.17}. The most difficulty is to prove the Bertini rule of unmixed degree. For that we show that for certain {\it superficial element} $x$ of $M$ with respect to $I$ we have $\mathrm{udeg}(I, M/xM) \le \mathrm{udeg}(I, M)$. We also compare the unmixed degree with the ordinary degree, the arithmetic degree and the homological degree.
\section{Preliminaries}
We start with the notion of annihilator of local cohomology which will be used frecequently in this paper. \begin{notation} \rm Let $(R, \frak m)$ be a Noetherian local ring and $M$ a finitely generated $R$-module of dimension $d>0$. \begin{enumerate}[{(i)}] \item For all $i < d$ we set $\frak a_i(M) = \mathrm{Ann}H^{i}_\mathfrak{m}(M)$, and set $\frak a(M) = \frak a_0(M) \ldots \frak a_{d-1}(M)$. \item Put $\mathfrak{b}(M) = \bigcap_{\underline{x};i=1}^d \mathrm{Ann}(0:x_i)_{M/(x_1,\ldots ,x_{i-1})M}$ where $\underline{x} = x_1, \ldots, x_d$ runs over all systems of parameters of $M$. \end{enumerate} \end{notation} \begin{remark}\label{C3.1.2}\rm \begin{enumerate}[{(i)}] \item Schenzel \cite[Satz 2.4.5]{Sch82} proved that $$\mathfrak{a}(M) \subseteq \mathfrak{b}(M) \subseteq \mathfrak{a}_0(M) \cap \cdots \cap \mathfrak{a}_{d-1}(M).$$ \item If $R$ is a homomorphic image of a Cohen-Macaulay local ring, then $\dim R/\mathfrak{a}_i(M) \leq i$ for all $i< d$ \cite[Theorem 1.2]{CC17}. Furthermore, $\dim R/\mathfrak{a}_i(M) = i$ if and only if there exists $\frak p \in \mathrm{Ass}M$ such that $\dim R/\frak p = i$ (see \cite[Theorem 8.1.1]{BH98}). \item If $R$ is a homomorphic image of a Cohen-Macaulay local ring, then Faltings' annihilator theorem claims that $\frak p \in \mathrm{supp}(M)$ and $\frak p \notin V(\frak a(M))$ if and only if $M_{\frak p}$ is Cohen-Macaulay and $\dim M_{\frak p} + \dim R/\frak p = d$ (see \cite[9.6.6]{BS98}, \cite{CC17}). \item The condition that $R$ is a homomorphic image of a Cohen-Macaulay local ring can not be removed in (ii) and (iii) by Nagata's example \cite[Example 2, pp. 203$-$205]{N62}. \end{enumerate} \end{remark} Since we always assume that $(R, \frak m)$ is a homomorphic image of a Cohen-Macaulay local ring, Remark \ref{C3.1.2} (ii) ensures that $\dim R/\frak a(M) < d$. Therefore we can choose a parameter element $x \in \frak a(M)$. Following \cite{C95} such a parameter element is called {\it $p$-standard}. \begin{definition}\rm A system of parameters $x_1,\ldots,x_d$ of $M$ is called {\it $p$-standard} if $x_d \in \frak a(M)$ and $x_i \in \frak a(M/(x_{i+1},\ldots,x_d)M)$ for all $i = d-1,\ldots,1$. \end{definition} We recall a property of $p$-standard system of parameters which will be used in the sequel. Let $\underline{x} = x_1,\ldots,x_d$ be a system of parameters of $M$. Let $\underline{n} = (n_1,\ldots,n_d)$ be a $d$-tuple of positive integers and $\underline{x}^{\underline{n}} = x_1^{n_1},\ldots,x_d^{n_d}$. We consider the difference $$I_{M,\underline{x}}(\underline{n}) = \ell(M/(\underline{x}^{\underline{n}})M) - e(\underline{x}^{\underline{n}};M)$$ as function in $\underline{n}$, where $e(\underline{x};M)$ is the Serre multiplicity of $M$ with respect to the sequence $\underline{x}$. Although $I_{M,\underline{x}}(\underline{n})$ may be not a polynomial for $n_1,\ldots,n_d$ large enough, it is bounded above by polynomials. Moreover, the first author in \cite{C91} proved that the least degree of all polynomials in $\underline{n}$ bounding above $I_{M,\underline{x}}(\underline{n})$ is independent of the choice of $\underline{x}$, and it is denoted by $p(M)$. The invariant $p(M)$ is called the {\it polynomial type} of $M$. If $(R, \frak m)$ is a homomorphic image of a Cohen-Macaulay local ring, then $p(M) = \dim R/\frak a(M)$ (see \cite{C92}). In addition, if $\underline{x} = x_1,\ldots,x_d$ is $p$-standard then we have the following. \begin{proposition}[\cite{C95}, Theorem 2.6 (ii)]\label{M3.1.4} Let $x_1,\ldots,x_d$ be a $p$-standard system of parameters of $M$. Then for all $n_1,\ldots,n_d>0$ we have $$I_{M,\underline{x}}(\underline{n}) = \sum_{i=0}^{p(M)} n_1\ldots n_i e_i,$$ where $e_i = e(x_1,\ldots,x_i; 0:_{M/(x_{i+2},\ldots,x_d)M}x_{i+1})$ and $e_0 = \ell(0:_{M/(x_{2},\ldots,x_d)M}x_{1})$. \end{proposition} Recently, Cuong and the first author introduced the notion of {\it $dd$-sequence} which is a special case of the notion of {\it $d$-sequences} of Huneke. \begin{definition}[\cite{Hu82,GY86}]\rm A sequence of elements $\underline{x} = x_1,\ldots,x_s$ is called a {\it $d$-sequence} of $M$ if $(x_1,\ldots,x_{i-1})M:x_j = (x_1,\ldots,x_{i-1})M:x_ix_j$ for all $i \leq j \leq s$. A sequence $\underline{x} = x_1,\ldots,x_s$ is called a {\it strong $d$-sequence} if $\underline{x}^{\underline{n}} = x_1^{n_1},\ldots,x_s^{n_s}$ is a $d$-sequence for all $\underline{n} = (n_1,\ldots,n_s) \in \mathbb{N}^s$. \end{definition}
For important properties of $d$-sequence we refer to \cite{Hu82,Tr83}.
\begin{definition}[\cite{CC07-1}]\rm
A sequence of elements $\underline{x} = x_1,\ldots,x_s$ is call a {\it $dd$-sequence} of $M$ if $\underline{x}$ is a strong $d$-sequence of $M$ and the following conditions are satisfied \begin{enumerate}[{(i)}] \item $s=1$ or, \item $s>1$ and $\underline{x}' = x_1,\ldots,x_{s-1}$ is a $dd$-sequence of $M/x_s^n$ for all $n \geq 1$. \end{enumerate} \end{definition} The following is a characterization of $dd$-sequence in terms of $I_{M,\underline{x}}(\underline{n})$ (\cite[Theorem 1.2]{CC07-1}). \begin{proposition}\label{M3.1.7} A system of parameters $\underline{x} = x_1,\ldots,x_d$ of $M$ is a $dd$-sequence if and only if for all $n_1,\ldots,n_d>0$ we have $$I_{M,\underline{x}}(\underline{n}) = \sum_{i=0}^{p(M)} n_1\ldots n_i e_i,$$ where $e_i = e(x_1,\ldots,x_i; 0:_{M/(x_{i+2},\ldots,x_d)M}x_{i+1})$ and $e_0 = \ell(0:_{M/(x_{2},\ldots,x_d)M}x_{1})$. \end{proposition}
\begin{remark}\rm \label{R dd seq} \begin{enumerate}[{(i)}] \item By Propositions \ref{M3.1.4} and \ref{M3.1.7}, if a system of parameter $x_1,\ldots,x_d$ of $M$ is $p$-standard, then it is a $dd$-sequence. Conversely, if $x_1,\ldots,x_d$ is a $dd$-sequence then $x_1^{n_1},\ldots,x_d^{n_d}$ with $n_i \geq i, i = 1, \ldots,d$, is $p$-standard (see \cite[Section 3]{CC07-1}). \item An $R$-module $M$ admits a $p$-standard (or $dd$-sequence) system of parameters if and only if $R/\mathrm{Ann}M$ is a homomorphic image of a Cohen-Macaulay local ring \cite[Theorem 1.2]{CC17}. \end{enumerate} \end{remark} We next recall the notion of {\it unmixed component} of $M$ and its relations with the ideal $\frak b(M)$. \begin{definition}\label{Dn3.2.1} \rm The largest submodule of $M$ of dimension less than $d$ is called the {\it unmixed component} of $M$, and denoted by $U_M(0)$. \end{definition}
\begin{remark}\label{C3.2.2} \rm \begin{enumerate}[{(i)}] \item If $\cap_{\frak p \in \mathrm{Ass}M}N(\frak p) = 0$ is a reduced primary decomposition of the zero submodule of $M$, then $U_M(0) = \cap_{\dim R/\frak p \in \mathrm{Assh}M}N(\frak p)$, where $\mathrm{Assh}M = \{\frak p \in \mathrm{Ass}M \mid \dim R/\frak p = d\}$. \item Since $\dim U_M(0) < d$, there exists a parameter element $x$ of $M$ contained in $\mathrm{Ann}\, U_M(0)$. Therefore $U_M(0) \subseteq 0:x$. But $x$ is a parameter element, so $\dim (0:x) < d$. Hence $U_M(0) = 0:x$. Following the definition of $\frak b(M)$ we have $\frak b(M) \subseteq \mathrm{Ann}U_M(0)$. Thus if $x \in \frak b(M)$ is a parameter element of $M$ then $U_M(0) = 0:x$. We also have $U_M(0) \cong H^0_{\frak b(M)}(M)$. \item By (ii) we have $\cap_{x} \mathrm{Ann}(0:_Mx) = \mathrm{Ann}U_M(0)$, where $x$ runs over all parameter elements of $M$. Therefore \begin{eqnarray*} \mathfrak{b}(M) &=& \bigcap_{\underline{x};i=1}^d \mathrm{Ann}\,(0:x_i)_{M/(x_1,\ldots,x_{i-1})M}\\ &=& \bigcap_{\underline{x};i=1}^d \mathrm{Ann}\,U_{M/(x_1,\ldots,x_{i-1})M}(0), \end{eqnarray*} where $\underline{x} = x_1,\ldots,x_d$ runs over all systems of parameters of $M$. \end{enumerate} \end{remark}
Problem of the splitting of local cohomology is started in \cite{CQ11}. For convenience we recall some results of \cite{CQ11} (with slight generalizations). Suppose we are given an integer $t$, an ideal $\frak a$ of $R$ and a submodule $U$ of $M$. Set $\overline{M} =M/U$. We say that an element $x \in \mathfrak{a}$ satisfies the condition $(\sharp)$ if $0:_Mx = U$ and the short exact sequence $$0 \longrightarrow \overline{M} \overset{x}{\longrightarrow} M \longrightarrow M/xM \longrightarrow 0$$ induces short exact sequences $$0 \longrightarrow H^{i}_\mathfrak{a}(M) \longrightarrow H^{i}_\mathfrak{a}(M/xM) \longrightarrow H^{i+1}_\mathfrak{a}(\overline{M}) \longrightarrow 0$$ for all $i<t-1$. When this is the case, we consider the above exact sequence as an extension of $H^{i+1}_\mathfrak{a}(\overline{M})$ by $H^{i}_\mathfrak{a}(M)$, therefore as an
element of $\mathrm{Ext}^1_R(H^{i+1}_\mathfrak{a}(\overline{M}), H^{i}_\mathfrak{a}(M))$ (see \cite[Chapter 3]{Mac75}). We denote this element by $E_x^i$. Especially, if $H^{t}_\mathfrak{a}(\overline{M}) \cong H^{t}_\mathfrak{a}(M)$, then we have the short exact sequence $$0 \longrightarrow H^{t-1}_\mathfrak{a}(M) \longrightarrow H^{t-1}_\mathfrak{a}(M/xM)
\longrightarrow 0:_{H^{t}_\mathfrak{a}(\overline{M})}x \longrightarrow 0.$$ Let $\frak b$ be an ideal such that $x\in \frak b$. We denote by $F^{t-1}_x$ the element of $\mathrm{Ext}^1_R(0:_{H^{t}_\mathfrak{a}(\overline{M})}\mathfrak{b}, 0:_{H^{t-1}_\mathfrak{a}(M)}\mathfrak{b})$ which represented by the following short exact sequence $$0 \longrightarrow 0:_{H^{t-1}_\mathfrak{a}(M)}\mathfrak{b} \longrightarrow 0:_{H^{t-1}_\mathfrak{a}(M/xM)}\mathfrak{b}
\longrightarrow 0:_{H^{t}_\mathfrak{a}(\overline{M})}\mathfrak{b} \longrightarrow 0$$ provided the exact sequence is determined by applying the $\mathrm{Hom}(R/\frak b, \bullet)$ functor.
It should be noted here that an extension of $R$-module $A$ by an $R$-module $C$ is split if it is the zero-element of
$\mathrm{Ext}^1_R(C, A)$. The two next theorems can be proven by the same method as used in \cite[Theorem 2.2]{CQ11} \begin{theorem}\label{T2.13} Let $t$ be a positive integer and $U$ a submodule of $M$. Let $\overline{M} = M/U$. Suppose $x$ and $y$ are elements satisfying the condition $(\sharp)$ and $0:_M (x+y)=U$. Then \begin{enumerate}[{(i)}]\rm \item {\it $x+y$ also satisfies the condition $(\sharp)$ and $E_{x+y}^i = E_x^i + E_y^i$ for all $i<t-1$.} \item {\it If $H^{t}_\mathfrak{a}(\overline{M}) \cong H^{t}_\mathfrak{a}(M)$ and $F^{t-1}_x, F^{t-1}_{y}$ are determined, then $F^{t-1}_{x+y}$ is determined and $F^{t-1}_{x+y} = F^{t-1}_x + F^{t-1}_{y}$.} \end{enumerate}
\end{theorem}
\begin{theorem}\label{T2.14} Let $t$ be a positive integer and $U$ a submodule of $M$. Let $\overline{M} = M/U$. Suppose $x$ and $y$ are elements such that $x$ satisfies the condition $(\sharp)$ and $0:_M xy=U$. Then \begin{enumerate}[{(i)}]\rm \item {\it $xy$ satisfies the condition $(\sharp)$ and $E_{xy}^i = yE_x^i$ for all $i<t-1$. Suppose that $H^{t}_\mathfrak{a}(\overline{M}) \cong H^{t}_\mathfrak{a}(M)$ and $F^{t-1}_x$ is determined. Then $F^{t-1}_{xy}$ is determined and $F^{t-1}_{xy}=yF^{t-1}_x$.} \item {\it Suppose that $H^{t}_\mathfrak{a}(\overline{M}) \cong H^{t}_\mathfrak{a}(M)$ and $yH^{i}_\mathfrak{a}(M)=0$ for all $i<t$. Then $E_{xy}^i =0$ for all $i<t-1$. Moreover, $F^{t-1}_{xy}$ is determined and $F^{t-1}_{xy} = 0$.} \end{enumerate} \end{theorem}
The following is a prime avoidance theorem for a product of ideals. \begin{lemma}[\cite{CQ11} Lemma 3.1] \label{L2.15} Let $(R, \mathfrak{m})$ be a Noetherian local ring, $\mathfrak{a}$, $\mathfrak{b}$ ideals and $\mathfrak{p}_1, \ldots, \mathfrak{p}_n$ prime ideals such that $\mathfrak{ab} \nsubseteq \mathfrak{p}_j$ for all $j \leq n$. Let $x \in \mathfrak{ab}$ with $x \notin \mathfrak{p}_j$ for all $j \leq n$. There are elements $a_1, \ldots, a_r \in \mathfrak{a}$ and $ b_1, \ldots, b_r \in \mathfrak{b}$ such that $x=a_1b_1+ \cdots + a_rb_r$, and that $a_ib_i \notin \mathfrak{p}_j$ and $a_1b_1+ \cdots +a_ib_i \notin \mathfrak{p}_j$ for all $i \leq r$ and all $j \leq n$. \end{lemma}
\begin{corollary}\label{C2.16}
Let $(R, \mathfrak{m})$ be a Noetherian local ring, $M$ a finitely generated $R$-module of dimension $d>0$, $\frak a$ and $\frak b$ two ideals such that $\dim R/\frak a \frak b<d$.
Let $x \in \frak a \frak b$ be a parameter element of $M$. There exist parameter elements $a_1, \ldots, a_r \in \frak a$ and $b_1, \ldots, b_r \in \mathfrak{b}$ of $M$ such that $x=a_1b_1+ \cdots + a_rb_r$, and that $a_1b_1+ \cdots +a_ib_i $ is a parameter element for all $i \leq r$. \end{corollary} \begin{proof} Note that an element $x$ is a parameter element of $M$ if and only if $x \notin \frak p$ for all $\frak{p} \in \mathrm{Assh}M$. The assertion now follows from Lemma \ref{L2.15}. \end{proof} \section{The splitting of local cohomology} In this section we prove splitting theorems for local cohomology in local rings. These results lead a new kind of systems of parameters. We need the following key ingredient about the annihilator of local cohomology supported at an arbitrary ideal that is of independent interest. \begin{proposition}\label{M3.1.11} Let $M$ be a finitely generated $R$-module of dimension $d$ and $I$ an ideal of $R$. We have $\mathfrak{b}(M) H^i_{I}(M)=0$ for all $i < d - \dim R/I$. \end{proposition} To prove the above result we use the following isomorphism of Nagel and Schenzel (see \cite[Proposition 3.4]{NS94}). Recall that a sequence $x_1,\ldots,x_t$ of elements contained in $I$ is an {\it $I$-filter regular sequence} of $M$ if $$\mathrm{Supp}\, ((x_1,\ldots,x_{i-1})M:x_i)/(x_1,\ldots,x_{i-1})M \subseteq V(I)$$ for all $i = 1,\ldots,t$, where $V(I)$ denotes the set of prime ideals containing $I$. This condition is equivalent to that $x_i \notin \frak{p}$ for all $\frak{p} \in \mathrm{Ass}_R M/(x_1, \ldots, x_{i-1})M \setminus V(I)$ for all $i = 1, \ldots, t$. Moreover we can choose an $I$-filter regular sequence on $M$ of any length by the prime avoidance theorem. \begin{lemma}[Nagel-Schenzel's isomorphism]\label{B3.1.12} Let $I$ be an ideal of $R$ and $x_1, \ldots, x_t$ an $I$-filter regular sequence of $M$. For each $j \leq t$ we have $$ H^j_{I}(M)\cong \begin{cases} H^j_{(x_1,\ldots,x_t)}(M) \quad \quad \quad\,\, \text{with}\,\, j<t\\ H^{j-t}_I(H^t_{(x_1,\ldots,x_t)}(M))\,\, \text{with}\,\, j\geq t.\\ \end{cases} $$ \end{lemma}
\begin{proof}[Proof of Proposition \ref{M3.1.11}] Set $ t = d - \dim R/I$. Suppose $t<d$, by the prime avoidance theorem we can choose an element $x_1 \in I$ such that $x_1 \notin \frak p$ for all $\frak p \in \mathrm{Assh}\, M \cup (\mathrm{Ass}\, M \setminus V(I) )$. Thus $x_1$ is a parameter element of $M$ that is also an $I$-filter regular element. We continue this progress to obtain a part of a system of parameters $x_1, \ldots, x_t$ of $M$ that is also an $I$-filter regular on $M$. By Lemma \ref{B3.1.12} for $i<t$, we have \begin{eqnarray*} H^i_{I}(M) &\cong& H^{0}_I(H^i_{(x_1,\ldots,x_i)}(M))\\
&\cong& H^{0}_I(\lim_{\longrightarrow}M/{(x_1^n,\ldots,x_i^n)}M)\\
&\cong& \lim_{\longrightarrow} H^{0}_I(M/{(x_1^n,\ldots,x_i^n)}M)\\
&\cong& \lim_{\longrightarrow}
\frac{(x_1^n,\ldots,x_i^n)M : I^\infty}{(x_1^n,\ldots,x_i^n)M}\\
&\cong& \lim_{\longrightarrow}
\frac{(x_1^n,\ldots,x_i^n)M :
x_{i+1}^\infty}{(x_1^n,\ldots,x_i^n)M}, \end{eqnarray*} where $(x_1^n,\ldots,x_i^n)M : I^\infty = \cup_{k \ge 1} (x_1^n,\ldots,x_i^n)M : I^k$. Since $x_1, \ldots, x_t$ is a part of a system of parameters of $M$ and $(x_1^n,\ldots,x_i^n)M : x_{i+1}^\infty = (x_1^n,\ldots,x_i^n)M :
x_{i+1}^k$ for some $k$, we have $$\frak b(M) \frac{(x_1^n,\ldots,x_i^n)M :
x_{i+1}^\infty}{(x_1^n,\ldots,x_i^n)M} = 0$$ for all $n \ge 1$ by the definition of $\frak b(M)$. Hence $\mathfrak{b}(M) H^i_{I}(M)=0$ for all $i < d - \dim R/I$. The proof is complete. \end{proof}
\begin{lemma}\label{B3.2.3} Let $I$ be an ideal of $R$ and $x, y \in \mathfrak{b}(M)$ parameter elements of $M$. Let $U_M(0)$ be the unmixed component of $M$. Put $\overline{M} = M/U_M(0)$ and $t = d -\dim R/I$. Then for all $i<t-1$ we have the following short exact sequence $$0 \rightarrow H^i_I(M) \rightarrow H^i_I(M/xyM) \rightarrow H^{i+1}_I(\overline{M}) \rightarrow 0.$$ Furthermore, if $H^{t}_I(M) \cong H^{t}_I(\overline{M})$ then we have the short exact sequence $$0 \rightarrow H^{t-1}_I(M) \rightarrow H^{t-1}_I(M/xyM) \rightarrow 0:_{H^{t}_I(M)}xy \rightarrow 0.$$ \end{lemma}
\begin{proof} By Remark \ref{C3.2.2} (ii) we have $U_M(0) = 0:_Mx = 0:_Mxy$. Therefore the following diagram commutes \[\divide\dgARROWLENGTH by 2 \begin{diagram} \node{0}\arrow{e}\node{\overline{M}} \arrow{e,t}{x}\arrow{s,l}{\mathrm{id}}\node{M}\arrow{e}\arrow{s,l}{y} \node{M/xM}\arrow{s}\arrow{e} \node{0}\\ \node{0}\arrow{e}\node{\overline{M}} \arrow{e,t}{xy}\node{M}\arrow{e}\node{M/xyM}\arrow{e} \node{0.} \end{diagram} \] Applying the functor $H^{i}_I(\bullet)$ to the above diagram we obtain the following commutative diagram for all $i < t-1$ \[\divide\dgARROWLENGTH by 2 \begin{diagram} \node{\cdots}\arrow{e}\node{H^{i}_I(\overline{M})} \arrow{e,t}{\psi^i}\arrow{s,l}{\mathrm{id}}\node{H^{i}_I(M)}\arrow{e}\arrow{s,l} {y} \node{H^{i}_I(M/xM)}\arrow{s}\arrow{e} \node{\cdots}\\ \node{\cdots}\arrow{e}\node{H^{i}_I(\overline{M})} \arrow{e,t}{\varphi^i}\node{H^{i}_I(M)}\arrow{e}\node{H^{i}_I(M/xyM)}\arrow{e} \node{\cdots,} \end{diagram} \] where $\psi^i$ and $\varphi^i$ are derived from homomorphisms $\overline{M }\overset{x}{\to}M$ and $\overline{M} \overset{xy}{\to}M$, respectively. By Proposition \ref{M3.1.11}, $yH^{i}_I(M)=0$ for all $i \leq t-1$, so $\varphi^i =0$ for all $i \leq t-1$. Thus we have the short exact sequences $$0 \rightarrow H^i_I(M) \rightarrow H^i_I(M/xyM) \rightarrow H^{i+1}_I(\overline{M}) \rightarrow 0$$ for all $i < t-1$. Thus we have the exact sequence $$0 \rightarrow H^{t-1}_I(M) \rightarrow H^{t-1}_I(M/xyM) \rightarrow H^{t}_I(\overline{M}) \overset{xy}{\rightarrow} H^t_I(M).$$ Moreover, if $H^{t}_I(M) \cong H^{t}_I(\overline{M})$ then we have the following short exact sequence $$0 \rightarrow H^{t-1}_I(M) \rightarrow H^{t-1}_I(M/xyM) \rightarrow 0:_{H^{t}_I(M)}xy \rightarrow 0.$$ The proof is complete. \end{proof}
Let $xy$ be a parameter element of $M$ such that $x, y \in \frak b(M)$. Lemma \ref{B3.2.3} says that $xy$ satisfies the condition $(\sharp)$ mentioned in Section 2 with $t = d - \dim R/I$ and $U = U_M(0)$. Let $x \in \mathfrak{b}(M)^2$ be a parameter element of $M$, for all $i < t-1$, we denote by $E^i_x$ the element in $\mathrm{Ext}(H^{i+1}_I(\overline{M}), H^i_I(M))$ represented by the following short exact sequence provided it is determined $$0 \rightarrow H^i_I(M) \rightarrow H^i_I(M/xM) \rightarrow H^{i+1}_I(\overline{M}) \rightarrow 0.$$ In the case $i=t-1$ and assume that $H^{t}_I(M) \cong H^{t}_I(\overline{M})$, we have the short exact sequence $$0 \rightarrow H^{t-1}_I(M) \rightarrow H^{t-1}_I(M/xM) \rightarrow 0:_{H^{t}_I(M)}x \rightarrow 0.$$ Suppose we obtain the following short exact sequence by applying the $\mathrm{Hom}(R/ \frak b(M), \bullet)$ to above short exact sequence $$0 \rightarrow H^{t-1}_I(M) \rightarrow 0:_{H^{t-1}_I(M/xM)}\mathfrak{b}(M)
\rightarrow 0:_{H^{t}_I(M)}\mathfrak{b}(M) \rightarrow 0.$$ Then we denote by $F^{t-1}_{x}$ the element of $\mathrm{Ext}(0:_{H^{t}_I(M)}\mathfrak{b}(M), H^{t-1}_I(M))$ represented by the above short exact sequence. The main result of this section as follows. \begin{theorem}\label{D3.2.4} Let $M$ be a finitely generated $R$-module of dimension $d$, $I$ an ideal of $R$ and $x$ a parameter element of $M$. Let $U_M(0)$ be the unmixed component of $M$ and set $\overline{M} = M/U_M(0)$. Let $t = d -\dim R/I$. Then \begin{enumerate}[{(i)}]\rm \item {\it If $x \in \mathfrak{b}(M)^2$ then $E^i_x$ is determined for all $i<t-1$.} \item {\it If $x \in \mathfrak{b}(M)^3$ then $E^i_x = 0$ for all $i<t-1$. Moreover, if $H^{t}_I(M) \cong H^{t}_I(\overline{M})$ then $F^{t-1}_{x} = 0$.} \end{enumerate} \end{theorem} \begin{proof} (i) Notice that $\frak b(M) \nsubseteq \frak p$ for all $\frak p \in \mathrm{Assh}M$. By Corollary \ref{C2.16} there exist parameter elements $a_1, \ldots, a_r, b_1, \ldots, b_r \in \mathfrak{b}(M)$ of $M$ such that $x=a_1b_1+ \cdots + a_rb_r$, and $a_1b_1+ \cdots +a_jb_j $ are parameter elements for all $j \leq r$. By Lemma \ref{B3.2.3} $E^i_{a_kb_k}$ is determined for all $i < t-1$ and for all $1 \leq k \leq r$. By Theorem \ref{T2.13} we have $$E^i_x = E^i_{a_1b_1} + \cdots + E^i_{a_rb_r}$$ is determined for all $i < t-1$.\\ (ii) Similarly, we choose parameter elements $a_1, \ldots, a_r \in \frak b(M)^2$ and $b_1, \ldots, b_r \in \mathfrak{b}(M)$ of $M$ such that $x=a_1b_1+ \cdots + a_rb_r$, and $a_1b_1+ \cdots +a_jb_j $ are parameter elements for all $j \leq r$. By Theorem \ref{T2.14} (ii) we have $E^i_{a_kb_k} = 0$ for all $i < t-1$ and for all $1 \leq k \leq r$. So $E^i_x = 0$ for all $i<t-1$. For the last assertion, by the same method, it is sufficient to show that $F^{t-1}_{ab} = 0$ for all parameter elements $a \in \frak b(M)^2$ and $b \in \frak b(M)$ provided $H^t_I(M) \cong H^t_I(\overline{M})$. Indeed, since $E^i_a$ and $E^i_{ab}$ are determined for all $i<t-1$, the commutative diagram \[\divide\dgARROWLENGTH by 2 \begin{diagram} \node{0}\arrow{e}\node{\overline{M}} \arrow{e,t}{a}\arrow{s,l}{\mathrm{id}}\node{M}\arrow{e}\arrow{s,l}{b} \node{M/aM}\arrow{s}\arrow{e} \node{0}\\ \node{0}\arrow{e}\node{\overline{M}} \arrow{e,t}{ab}\node{M}\arrow{e}\node{M/abM}\arrow{e} \node{0.} \end{diagram} \] deduces the following diagram \[\divide\dgARROWLENGTH by 2 \begin{diagram} \node{0}\arrow{e}\node{H^{t-1}_I(M)} \arrow{e,t}{i}\arrow{s,l}{b}\node{ H^{t-1}_I(M/aM)}\arrow{e}\arrow{s,l}{\beta} \node{0:_{H^{t}_I(M)}a}\arrow{s,l}{\alpha}\arrow{e} \node{0}\\ \node{0}\arrow{e}\node{H^{t-1}_I(M)} \arrow{e,t}{\delta}\node{ H^{t-1}_I(M/abM)}\arrow{e,t}{\pi}\node{0:_{H^{t}_I(M)}ab}\arrow{e} \node{0,} \end{diagram} \] where $\alpha: 0:_{H^{t}_I(M)}a \to 0:_{H^{t}_I(M)}ab$ is injective. By Proposition \ref{M3.1.11} $b H^{t-1}_I(M) = 0$, so $\beta \circ i = 0$. Thus we have a homomorphism $\epsilon: 0:_{H^{t}_I(M)}a \to H^{t-1}_I(M/abM)$ which makes the following diagram \[\divide\dgARROWLENGTH by 2 \begin{diagram} \node{0}\arrow{e}\node{H^{t-1}_I(M)} \arrow{e,t}{i}\arrow{s,l}{b}\node{ H^{t-1}_I(M/aM)}\arrow{e}\arrow{s,l}{\beta} \node{0:_{H^{t}_I(M)}a}\arrow{s,l}{\alpha}\arrow{sw,t}{\epsilon}\arrow{e} \node{0}\\ \node{0}\arrow{e}\node{H^{t-1}_I(M)} \arrow{e,t}{\delta}\node{ H^{t-1}_I(M/abM)}\arrow{e,t}{\pi}\node{0:_{H^{t}_I(M)}ab}\arrow{e} \node{0,} \end{diagram} \] By applying the $\mathrm{Hom}_R(R/\frak b(M), \bullet)$ to the above diagram we have the following diagram \[\divide\dgARROWLENGTH by 2 \begin{diagram} \node{}\node{} \node{} \node{0:_{H^{t}_I(M)} \frak b(M)}\arrow{sw,t}{\epsilon}\arrow{s,l}{\mathrm{id}}\\ \node{0}\arrow{e}\node{H^{t-1}_I(M)} \arrow{e}\node{ 0:_{H^{t-1}_I(M/abM)} \frak b(M)}\arrow{e,t}{\pi}\node{0:_{H^{t}_I(M)}\frak b(M),} \end{diagram} \] where the row is an exact sequence and the vertical map is an identification. Since $\pi \circ \epsilon = \mathrm{id}$, the homomorphism $\pi$ is split. Thus $F^{t-1}_{ab} = 0$. The proof is complete. \end{proof} In the case $I = \frak m$, the following is a generalization of \cite[Corollary 4.1]{CQ11} and \cite[Proposition 3.4]{Q12}. \begin{corollary}\label{H3.2.5} Let $x \in \mathfrak{b}(M)^3$ be a parameter element of $M$. Let $U_M(0)$ be the unmixed component of $M$ and set $\overline{M} = M/U_M(0)$.
Then $$H^i_{\mathfrak{m}}(M/xM) \cong H^i_{\mathfrak{m}}(M) \oplus H^{i+1}_{\mathfrak{m}}(\overline{M})$$ for all $i<d-1$, and $$0:_{H^{d-1}_{\mathfrak{m}}(M/xM)}\mathfrak{b}(M) \cong H^{d-1}_{\mathfrak{m}}(M) \oplus 0:_{H^{d}_{\mathfrak{m}}(M)}\mathfrak{b}(M).$$ \end{corollary}
By the above splitting theorems it is natural to consider the following system of parameters. \begin{definition}[\cite{MQ16}, Definition 2.15] \rm A parameter element $x\in \frak b(M)^3$ is called a {\it $C$-parameter element} of $M$. A system of parameters $x_1, ..., x_d$ is called a {\it $C$-system of parameters} of $M$ if $x_d \in \mathfrak b(M)^3$ and $x_i \in \mathfrak b(M/(x_{i+1}, ..., x_d)M)^3$ for all $i = d-1, ..., 1$. A sequence of elements $x_i, \ldots, x_d$ is called {\it a part of a $C$-system of parameters} if we can expand it to a $C$-system of parameters $x_1, \ldots, x_d$. \end{definition}
It is envident that $C$-systems of parameters are closely related with $p$-standard systems of parameters. Lemmas below will be very useful in the sequel.
\begin{lemma} \label{B3.1.9} Let $x$ be a parameter element of $M$. Then $\frak b(M) \subseteq \frak b(M/xM)$. \end{lemma} \begin{proof} It follows from the definition of $\frak b(M)$. \end{proof} \begin{lemma}\label{B3.1.10} Let $x_1, \ldots, x_d$ be a $C$-system of parameters of $M$. Then, for all $j \leq d$ we have $x_1, \ldots, x_{j-1},x_{j+1},\ldots,x_d$ is a $C$-system of parameters $M/x_jM$. \end{lemma} \begin{proof} The case $j=d$ is clear. For $j \neq d$ by Lemma \ref{B3.1.9} we have $\frak b(M) \subseteq \frak b(M/x_jM)$. Therefore $x_d$ is a $C$-parameter element of $M/x_jM$. Notice that $x_1, \ldots,x_{d-1}$ is a $C$-system of parameters of $M/x_dM$. The claim follows from the induction on $d$. \end{proof}
\section{The Cohen-Macaulay deviated sequences}
In this section we use the splitting theorem \ref{D3.2.4} to shed a new light on the structure of non-Cohen-Macaulay modules. Let $M$ be a finitely generated $R$-module of dimension $d$. The unmixed characterization of Cohen-Macaulay modules says that $M$ is Cohen-Macaulay if and only if for some (and hence for all) system of parameters $x_1,\ldots,x_d$ we have $U_{M/(x_{i+1},\ldots,x_d)M}(0) = 0$ for all $1 \leq i \leq d$. If $M$ is a generalized Cohen-Macaulay module and $\frak m^{n_0}H^i_{\mathfrak{m}}(M) = 0$ for all $i<d$ and for some positive integer $n_0$, then by \cite[Corollary 4.2]{CQ11} we have $$U_{M/(x_{i+1},\ldots,x_d)M}(0) = H^0_{\mathfrak{m}}(M/(x_{i+1},\ldots,x_d)M) \cong \bigoplus_{j=0}^{d-i} H^j_{\mathfrak{m}}(M)^{\binom{d-i}{j}}.$$ for any system of parameters $x_1,\ldots,x_d \in \frak m^{2n_0}$. Thus $U_{M/(x_{i+1},\ldots,x_d)M}(0)$ is independent of the choice of system of parameters $x_1,\ldots,x_d$ contained in $\frak m^{2n_0}$ for all $1 \leq i \leq d$ (up to an isomorphism). The main aim this section is to generalize this fact for any finitely generated $R$-module. Concretely, we will show that for all $1 \leq i \leq d$ the modules $U_{M/(x_{i+1},\ldots,x_d)M}(0)$ is independent (up to an isomorphism) of the choice of a $C$-system of parameters $x_1, \ldots, x_d$. We start with the following result about the invariance of local cohomology of quotient modules regarding $C$-systems of parameters.
\begin{theorem} \label{D3.2.7} Let $\underline{x} = x_1, \ldots, x_d$ be a $C$-system of parameters of $M$. Then the local cohomology module $H^j_{\mathfrak{m}}(M/(x_{i+1}, \ldots,x_d)M)$ is independent of the choice of $\underline{x}$ for all $j < i < d$ (up to an isomorphism). \end{theorem} \begin{proof} We set $M_i = M/(x_{i+1}, \ldots,x_d)M$ for all $i < d$. We consider another $C$-system of parameters $\underline{y} = y_1, \ldots, y_d$ of $M$, and put $M_i' = M/(y_{i+1},\ldots,y_d)M$ for all $i < d$. We proceed by induction on $d$ that $H^j_{\mathfrak{m}}(M_i) \cong H^j_{\mathfrak{m}}(M_i')$ for all $j < i < d$. The assertion is trivial if $d=1$. For $d>1$ and $i=d-1$ since $x_d$ and $y_d$ are $C$-parameter elements, Corollary \ref{H3.2.5} implies that $$H^j_{\mathfrak{m}}(M_{d-1}) \cong H^j_{\mathfrak{m}}(M) \oplus H^{j+1}_{\mathfrak{m}}(M/U_M(0)) \cong H^j_{\mathfrak{m}}(M_{d-1}')$$ for all $j<d-1$. Suppose $i< d-1$. Since $\dim R/\frak b(M_{i+1}) < \dim M_{i+1} = i+1$ and $\dim R/\frak b(M_{i+1}') < \dim M'_{i+1} = i+1$ we can choose a $C$-parameter element $z$ of both $M_{i+1}$ and $M_{i+1}'$. By the inductive hypothesis we have $$H^j_{\mathfrak{m}}(M_{i}) = H^j_{\mathfrak{m}}(M_{i+1}/x_{i+1}M_{i+1}) \cong H^j_{\mathfrak{m}}(M/(z, x_{i+2},\ldots,x_d)M), \quad \quad \quad \quad (1)$$ and $$H^j_{\mathfrak{m}}(M_{i}') = H^j_{\mathfrak{m}}(M_{i+1}'/y_{i+1}M_{i+1}') \cong H^j_{\mathfrak{m}}(M/(z, y_{i+2},\ldots,y_d)M)\quad \quad \quad \quad (2)$$ for all $j<i$. Notice that $z, x_{i+2},\ldots,x_d$ and $z, y_{i+2},\ldots,y_d$ are parts of $C$-systems of parameters of $M$. By Lemma \ref{B3.1.10} we have $x_{i+2},\ldots,x_d$ and $y_{i+2},\ldots,y_d$ are parts of $C$-systems of parameters of $M/zM$. Applying the inductive hypothesis for $M/zM$ we have $$H^j_{\mathfrak{m}}(M/(z, x_{i+2},\ldots,x_d)M) \cong H^j_{\mathfrak{m}}(M/(z, y_{i+2},\ldots,y_d)M) \quad \quad \quad \quad (3)$$ for all $j<i$. The assertion follows from the isomorphisms $(1)$, $(2)$ and $(3)$. \end{proof}
\begin{corollary}\label{C invar ann}
Let $\underline{x} = x_1, \ldots, x_d$ be a $C$-system of parameters of $M$. Then the ideals $\frak a(M/(x_{i+1}, \ldots,x_d)M)$ and $\sqrt{\frak a(M/(x_{i+1}, \ldots,x_d)M)} = \sqrt{\frak b(M/(x_{i+1}, \ldots,x_d)M)}$ are independent of the choice of $\underline{x}$ for all $i < d$. \end{corollary}
We need the following result. \begin{lemma}\label{B3.2.8} Let $x $ be a $C$-parameter element of $M$. Then $U_{M/xM}(0)$ is independent of the choice of $x$ (up to an isomorphism). \end{lemma} \begin{proof} By Corollary \ref{C invar ann} we have the ideal $$\mathfrak{b}' = \sqrt{\mathfrak{a}(M/xM)} = \sqrt{\mathfrak{b}(M/xM)}$$ is independent of the choice of $C$-parameter element $x$. By Remark \ref{C3.2.2} (ii) we have $U_{M/xM}(0) \cong H^0_{\mathfrak{b}'}(M/xM)$. Since $\dim R/\mathfrak{b}' \leq \dim M/xM -1 = d-2$, Theorem \ref{D3.2.4} (ii) implies that $$H^0_{\mathfrak{b}'}(M/xM) \cong H^0_{\mathfrak{b}'}(M) \oplus H^1_{\mathfrak{b}'}(M/U_M(0)),$$ and the right hand side does not depend on $x$. Thus the unmixed component $U_{M/xM}(0)$ is independent of the choice of $C$-parameter element $x$ (up to an isomorphism). \end{proof}
Using Lemma \ref{B3.2.8} and by the same method as used in the proof of Theorem \ref{D3.2.7} we obtain the main result of this section as follows. \begin{theorem}\label{D3.2.9} Let $M$ be a finitely generated $R$-module of dimension $d$ and $\underline{x} = x_1, \ldots, x_d$ a $C$-system of parameters of $M$. Then for all $1 \leq i \leq d$, the unmixed component $U_{M/(x_{i+1}, \ldots,x_d)M}(0)$ is independent of the choice of $\underline{x}$ (up to an isomorphism). \end{theorem} \begin{definition} \rm For all $0 \leq i \leq d-1$ we denote by $U_i(M)$ the module satisfying that $U_i(M) \cong U_{M/(x_{i+2}, \ldots,x_d)M}(0)$ for all $C$-systems of parameters $x_1, \ldots, x_d$ of $M$. Notice that $\dim U_i(M) \leq i$ for all $0 \leq i \leq d-1$, and $U_{d-1}(M) \cong U_M(0)$. We call the modules sequence $U_0(M), \ldots, U_{d-1}(M)$ the {\it Cohen-Macaulay deviated sequence} of $M$. Notice that the Cohen-Macaulay deviated sequence of $M$ vanishes if and only if $M$ is Cohen-Macaulay. \end{definition} We next use the Cohen-Macaulay deviated sequence to prove some properties of $C$-systems of parameters. \begin{corollary}\label{H3.2.11} Let $\underline{x} = x_i, \ldots, x_d, i > 1$, be a part of a $C$-system of parameters of $M$. Then $\mathfrak{b}(M/(x_i, \ldots,x_d)M) = \mathfrak{b}(M/(x_i^{n_i}, \ldots,x_d^{n_d})M)$ for all $n_j \geq 1$ and all $i \leq j \leq d$. \end{corollary} \begin{proof} For $i = d$, notice that $\underline{y} = y_1,\ldots,y_{d-1}$ is a system of parameters of $M/x_dM$ if and only if it is also a system of parameters of $M/x_d^{n_d}M$ for all $n_d \geq 1$. By Lemma \ref{B3.1.9} we have $x_d$ and hence $x_d^{n_d}$ are contained in $ \frak b(M/(y_1,\ldots,y_{j-1})M)^3$ for all $1 \leq j \leq d-1$. So Theorem \ref{D3.2.9} claims that $$U_{M/(y_1,\ldots,y_{j-1},x_d)M}(0) \cong U_{M/(y_1,\ldots,y_{j-1},x_d^{n_d})M}(0)$$ for all $1 \leq j \leq d-1$. By Remark \ref{C3.2.2} (iii) we have \begin{eqnarray*} \mathfrak{b}(M/x_dM) &=& \bigcap_{\underline{y}; j=1}^{d-1} \mathrm{Ann}\,U_{M/(y_1,\ldots,y_{j-1},x_d)M}(0)\\ &=& \bigcap_{\underline{y}; j=1}^{d-1} \mathrm{Ann}\,U_{M/(y_1,\ldots,y_{j-1},x_d^{n_d})M}(0)\\
&=&\mathfrak{b}(M/x_d^{n_d}M), \end{eqnarray*} where $\underline{y} = y_1,\ldots,y_{d-1}$ runs over all systems of parameters of $M/x_dM$.\\ We now proceed by induction on $d$. The case $d=2$ follows from the above fact since $i = 2$. Suppose $d \geq 3$ and $i<d$. Applying the inductive hypothesis for $M/(x_{i+1},\ldots,x_d)M$ we have $$\frak b(M/(x_i,x_{i+1},\ldots,x_d)M) = \frak b(M/(x_i^{n_i},x_{i+1},\ldots,x_d)M)$$ for all $n_i \geq 1$. By Lemma \ref{B3.1.10} we have $x_{i+1},\ldots,x_d$ is a part of a $C$-system of parameters of $M/x_i^{n_1}M$. By using the inductive hypothesis for $M/x_i^{n_i}M$ we obtain $$\frak b(M/(x_i^{n_i},x_{i+1},\ldots,x_d)M) = \frak b(M/(x_i^{n_i},x_{i+1}^{n_{i+1}}\ldots,x_d^{n_d})M)$$ for all $n_{i+1},\ldots,n_{d} \geq 1$. The proof is complete. \end{proof} \begin{corollary} \label{H3.2.12} Let $\underline{x} = x_1, \ldots, x_d$ be a $C$-system of parameters of $M$. Then for all $d$-tuples of positive integers $\underline{n} =(n_1,\ldots,n_d)$ we have $x_{1}^{n_1}, \ldots,x_d^{n_d}$ is also a $C$-system of parameters. \end{corollary} \begin{proof} The assertion follows immediately from Corollary \ref{H3.2.11} and the definition of $C$-system of parameters. \end{proof} \noindent {\bf An application to $dd$-sequences.} We use the Cohen-Macaulay deviated sequence to compute the function $I_{M,\underline{x}}(\underline{n})$. \begin{proposition} \label{M3.2.13} Let $\underline{x} = x_1, \ldots, x_d$ be a $C$-system of parameters of $M$. Let $U_i(M)$, $0 \leq i \leq d-1$, be the Cohen-Macaulay deviated sequence of $M$. Then the difference $$I_{M,\underline{x}}(\underline{n}) = \ell(M/(x_1^{n_1},\ldots,x_d^{n_d})M) - n_1\ldots n_d e(x_1,\ldots,x_d;M)$$ is a polynomial in $\underline{n} = n_1,\ldots,n_d$. More precisely $$I_{M,\underline{x}}(\underline{n}) = \sum_{i=0}^{p(M)}n_1\ldots n_i e(x_1,\ldots,x_i;U_i(M))$$ for all $n_i \geq 1$, where $p(M)$ is the polynomial type of $M$. In particular, $\underline{x} = x_1, \ldots, x_d$ is a $dd$-sequence system of parameters. \end{proposition} \begin{proof} For all $d$-tuples of positive integers $\underline{n} = (n_1,\ldots,n_d)$ by Corollary \ref{H3.2.12} we have $x_{1}^{n_1}, \ldots,x_d^{n_d}$ is a $C$-system of parameters. By Theorem \ref{D3.2.9} and Remark \ref{C3.2.2} (ii) we have $${(x_{i+2}^{n_{i+2}},\ldots,x_d^{n_{d}})M:_M x_{i+1}^{n_{i+1}}}/{(x_{i+2}^{n_{i+2}},\ldots,x_d^{n_{d}})M} \cong U_i(M)$$ for all $0 \leq i \leq d-1$. By the Auslander-Buchsbaum formula (cf. \cite[Corollary 4.3]{AB58}) we have \begin{eqnarray*} I_{M,\underline{x}}(\underline{n}) &=& \sum_{i=0}^{d-1}e(x_{1}^{n_{1}}, \ldots,x_i^{n_i};{(x_{i+2}^{n_{i+2}},\ldots,x_d^{n_{d}})M:_M x_{i+1}^{n_{i+1}}}/{(x_{i+2}^{n_{i+2}},\ldots,x_d^{n_{d}})M})\\ &=& \sum_{i=0}^{d-1}e(x_{1}^{n_{1}}, \ldots,x_i^{n_i};U_i(M))\\ &=& \sum_{i=0}^{d-1}n_1\ldots n_i e(x_1,\ldots,x_i;U_i(M)) \end{eqnarray*} is a polynomial in $n_1,\ldots,n_d$. By Remark \ref{C3.2.2} (iii) we have $\mathrm{Ann}U_i(M) \supseteq \frak b(M)$ for all $i \leq d-1$. Thus $\dim U_i \leq p(M)$ for all $i \leq d-1$ since $\dim R/\frak b(M) = \dim R/\frak a(M) = p(M)$. Therefore $e(x_1,\ldots,x_i;U_i(M)) = 0$ for all $p(M) < i \leq d-1$. Hence $$I_{M,\underline{x}}(\underline{n}) = \sum_{i=0}^{p(M)}n_1\ldots n_i e(x_1,\ldots,x_i;U_i(M)).$$ The last assertion follows from Proposition \ref{M3.1.7}. The proof is complete. \end{proof} The following is in some sense a generalization of Proposition \ref{M3.1.7} (see also \cite[Theorem 3.7]{CN}). \begin{corollary}
Let $\underline{x} = x_1, \ldots, x_d$ be a $dd$-sequence system of parameters of $M$. Let $U_i(M)$, $0 \leq i \leq d-1$, be the Cohen-Macaulay deviated sequence of $M$. Then the difference $$I_{M,\underline{x}}(\underline{n}) = \sum_{i=0}^{p(M)}n_1\ldots n_i e(x_1,\ldots,x_i;U_i(M))$$ for all $n_i \geq 1$, where $p(M)$ is the polynomial type of $M$. \end{corollary} \begin{proof} Notice that if $\underline{x} = x_1, \ldots, x_d$ is a $dd$-sequence system of parameters of $M$, then $\underline{x}^k = x_1^k, \ldots, x_d^k$ is a $C$-system of parameters for some $k \ge 1$ (see Remark \ref{R dd seq}). So we have $$I_{M,\underline{x}^k}(\underline{n}) = \sum_{i=0}^{p(M)}k^in_1\ldots n_i e(x_1,\ldots,x_i;U_i(M))$$ for all $n_i \geq 1$. By Proposition \ref{M3.1.7} we have $$I_{M,\underline{x}}(kn_1, \ldots, kn_d) = \sum_{i=0}^{p(M)} k^in_1\ldots n_i e(x_1,\ldots,x_i; 0:_{M/(x_{i+2},\ldots,x_d)M}x_{i+1})$$ for all $n_i \geq 1$. However it is clear that $I_{M,\underline{x}^k}(\underline{n}) = I_{M,\underline{x}}(kn_1, \ldots, kn_d)$. By the above equality we have $$e(x_1,\ldots,x_i;U_i(M)) = e(x_1,\ldots,x_i; 0:_{M/(x_{i+2},\ldots,x_d)M}x_{i+1})$$ for all $i \le p(M)$. Therefore $$I_{M,\underline{x}}(\underline{n}) = \sum_{i=0}^{p(M)}n_1\ldots n_i e(x_1,\ldots,x_i;U_i(M))$$ for all $n_i \geq 1$ by Proposition \ref{M3.1.7} again. The proof is complete. \end{proof}
\noindent{\bf Sequentially Cohen-Macaulay modules.} We give an application of the Cohen-Macaulay deviate sequence to characterize {\it sequentially Cohen-Macaulay} modules. This notion firstly introduced by Stanley in the graded rings \cite{St96}, and for modules over local rings by Schenzel in \cite{Sch98}, and by Nhan and the first author in \cite{CN03}. \begin{remark}[\cite{CC07-2}] \rm \begin{enumerate}[{(i)}] \item The filtration of submodules $\mathcal{D}: D_0 \subset D_1 \subset \cdots \subset D_t =M$ of $M$ is called the {\it the dimension filtration} if $D_i = U_{D_{i+1}}(0)$ for all $i \leq t-1$. \item We call $M$ is a {\it sequentially Cohen-Macaulay} module if $D_{i+1}/D_i$ is Cohen-Macaulay for all $i \leq t-1$. \item A system of parameters $\underline{x} = x_1,\ldots,x_d$ of $M$ is called {\it good} if $D_i \cap (x_{d_i+1},\ldots,x_d)M = 0$ for $i= 0, 1, \ldots, t-1$, where $d_i = \dim D_i$ for all $i \leq t$. Notice that every $dd$-sequence system of parameters is good. \end{enumerate} \end{remark}
\begin{remark}\label{Q3.2.14} \rm Let $M$ be a finitely generated $R$-module of dimension $d$ with the dimension filtration
$$\mathcal{D}: D_0 \subset D_1 \subset \cdots \subset D_t =M,$$ with $d_i = \dim D_i $ for all $i \leq t$. Let $\underline{x} = x_1, \ldots, x_d$ be a $C$-system of parameters of $M$. For each $i<t$ and $d_i \leq j \leq d-1$ we have $$D_i \cap (x_{j+2},\ldots,x_d)M = 0.$$ Therefore we can identify $D_i$ with a submodule of $M/(x_{j+2},\ldots,x_d)M$. Moreover, since $\dim D_i = d_i < j+1 = \dim M/(x_{j+2},\ldots,x_d)M$, $D_i$ is isomorphism to a submodule of $U_j(M)$ for all $d_i \leq j \leq d-1$. So without of any confusion we write $D_i \subseteq U_j(M)$ for all $d_i \leq j \leq d-1$. \end{remark} The following is a characterization of sequentially Cohen-Macaulay modules. \begin{proposition}\label{M3.2.15} Let $M$ be a finitely generated $R$-module of dimension $d$ with the dimension filtration
$$\mathcal{D}: D_0 \subset D_1 \subset \cdots \subset D_t =M,$$ with $d_i = \dim D_i $ for all $i \leq t$. Let $U_i(M)$, $0 \leq i \leq d-1$, be the Cohen-Macaulay deviated sequence of $M$. The following statements are equivalent \begin{enumerate}[{(i)}]\rm \item {\it $M$ is a sequentially Cohen-Macaulay modules.} \item {\it $D_i = U_j(M)$ for all $i<t$ and for all $d_i \leq j <d_{i+1}$.} \end{enumerate} \end{proposition} \begin{proof} $\mathrm{(i)}\Rightarrow \mathrm{(ii)}$ Let $\underline{x} = x_1, \ldots, x_d$ be a $C$-system of parameters of $M$. By Proposition \ref{M3.2.13} it is a $dd$-sequence. By \cite[Lemma 6.4]{CC07-1}, $M/(x_{j+2},\ldots,x_d)M$ is a sequentially Cohen-Macaulay module with the dimension filtration \\ $$ D_0 \cong \frac{D_0+(x_{j+2},\ldots,x_d)M}{(x_{j+2},\ldots,x_d)M} \subset \cdots \subset D_i\cong \frac{D_i+(x_{j+2},\ldots,x_d)M}{(x_{j+2},\ldots,x_d)M} \subset M/(x_{j+2},\ldots,x_d)M$$ for all $i<t$ and for all $d_i \leq j <d_{i+1}$. Thus $D_i = U_j(M)$ for all $i<t$ and for all $d_i \leq j <d_{i+1}$.\\ $\mathrm{(ii) }\Rightarrow \mathrm{(i)}$ Let $\underline{x} = x_1, \ldots, x_d$ be a $C$-system of parameters of $M$. By Proposition \ref{M3.2.13} we have $$I_{M,\underline{x}}(\underline{n}) = \sum_{j=0}^{d-1}n_1\ldots n_j e(x_1,\ldots,x_j;U_j(M))$$ for all $n_1,..,n_d \geq 1$. Since $D_i = U_j(M)$ for all $i<t$ and for all $d_i \leq j <d_{i+1}$ we have $e(x_1,\ldots,x_j;U_j(M)) = 0$ for all $i<t$ and for all $d_i < j <d_{i+1}$. Therefore $$I_{M,\underline{x}}(\underline{n}) = \sum_{i=0}^{t-1}n_1\ldots n_{d_i} e(x_1,\ldots,x_{d_i};D_i)$$ for all $n_1,..,n_d \geq 1$. Hence $M$ is a sequentially Cohen-Macaulay module by \cite[Theorem 4.2]{CC07-2}. The proof is complete. \end{proof}
\noindent{ \bf Relation with the Serre condition ($S_2$).} For each $R$-module $M$ we have a set of invariant modules $U_i(M)$, $0 \leq i \leq d-1$, as Theorem \ref{D3.2.9}. Therefore we have a special set of prime ideals, $\cup_{i=0}^{d-1}\mathrm{Ass}\, U_i(M)$, attached to $M$. If $\frak p \in \mathrm{Ass}\, M$ and $\dim R/\frak p < d$, then $\frak p \in \mathrm{Ass}\, U_M(0) = \mathrm{Ass}\, U_{d-1}(M)$. In the following we consider the relation between $\mathrm{Ass}\, U_{d-2}(M)$ and {\it the Serre condition $(S_2)$}. \begin{definition}\rm For all $n \geq 1$, we say that $M$ satisfies {\it the Serre condition $(S_n)$} at the prime ideal $\frak p \in \mathrm{Supp}(M)$ if $$\mathrm{depth}M_\frak p \geq \min \{\dim M_{\frak p}, n\}.$$ Moreover, $M$ has property $(S_n)$ if it satisfies the Serre condition $(S_n)$ at all $\frak p \in \mathrm{Supp}(M)$. \end{definition}
It is obvious that $R$ satisfies the condition $(S_1)$ if and only if $\mathrm{Ass}\, R = \mathrm{minAss}R$. Furthermore, if $R$ satisfies the condition $(S_2)$ and $R$ is {\it cartenary} (this condition is always true if $R$ is a homomorphic image of a Cohen-Macaulay ring), then $\mathrm{Ass}\, R = \mathrm{Assh}R$ (see \cite[Corollary 2.24]{Sch98-1}). Conversely, Goto and Nakamura \cite[Lemma 3.2]{GN01} proved that if $\mathrm{Ass}\, R \subseteq \mathrm{Assh}R \cup \{\frak m\}$, then the set
$$\mathcal{F}(R) = \{\mathfrak{p}\in \mathrm{Spec}(R)\,|\, \dim R_{\mathfrak{p}}> 1=\mathrm{depth}R_{\mathfrak{p}},\, \mathfrak{p} \neq \mathfrak{m} \}$$ is finite, i.e. $R$ does not satisfy the Serre condition $(S_2)$ at only finitely many prime ideals. The set $\mathcal{F}(R)$ can be described as follows. \begin{proposition}\label{M3.2.17} Suppose that $\mathrm{Ass}\, M \subseteq \mathrm{Assh}M \cup \{\frak m\}$. Set
$$\mathcal{F}(M) = \{\mathfrak{p}\in \mathrm{Supp}(M)\,|\, \dim M_{\mathfrak{p}}> 1=\mathrm{depth}M_{\mathfrak{p}},\, \mathfrak{p} \neq \mathfrak{m} \}.$$ Then $\mathcal{F}(M) = \mathrm{Ass}\, U_{d-2}(M) \setminus \{\frak m\}$. \end{proposition} \begin{proof} Let $x$ be a $C$-parameter element of $M$. For all $\frak p \in \mathrm{Ass}\, U_{d-2}(M) \setminus \{\frak m\}$ we have $\frak p \in \mathrm{Ass}\, M/xM$ and $\dim R/\frak p \leq d-2$. Hence $\dim M_{\mathfrak{p}}> 1=\mathrm{depth}M_{\mathfrak{p}}$. So $\mathrm{Ass}\, U_{d-2}(M) \setminus \{\frak m\} \subseteq \mathcal{F}(M)$.\\ Conversely, let $\mathfrak{p}\in \mathcal{F}(M)$. Since $\mathrm{depth}M_{\mathfrak{p}} = 1$, for every parameter element $z \in \mathfrak{p}$ we have $\mathfrak{p}\in \mathrm{Ass}\, M/zM$. Therefore $\mathfrak{p}\in \mathrm{Ass}\, M/(xz)M$. Notice that $xz$ is a $C$-parameter element of $M$ and $\dim R/\frak p \leq d-2$, so $\frak p \in \mathrm{Ass}\, U_{M/(xz)M}(0) \cong \mathrm{Ass}\, U_{d-2}(M)$. The proof is complete. \end{proof}
\begin{remark}\label{C3.2.18}\rm Let $M$ be a finitely generated $R$-module. \begin{enumerate}[{(i)}] \item Suppose that $\mathrm{Ass}\, M \subseteq \mathrm{Assh}M \cup \{\frak m\}$ and $\mathcal{F}(M)$ as the previous proposition. Let $x$ be a parameter element of $M$ such that $x \notin \frak p$ for all $\frak p \in \mathcal{F}(M)$. Then $M$ satisfies the Serre condition $(S_2)$ at all prime ideals $\frak p \in \mathrm{supp}M$ containing $x$ and $\frak p \neq \frak m$. So $M/xM$ satisfies the Serre condition $(S_1)$ at all $\frak p \in \mathrm{Supp}(M/xM)$ and $\frak p \neq \frak m$. Hence $$\mathrm{Ass}\, (M/xM) \subseteq \mathrm{minAss}(M/xM) \cup \{\frak m\} = \mathrm{Assh}(M/xM) \cup \{\frak m\}.$$ \item Set $\overline{M} = M/U_M(0)$. Let $x \in \frak b(M)^3 \cap \frak b(\overline{M})^3$ be a parameter element of $M$ and hence of $\overline{M}$. Put $\frak b' = \frak b(M/xM)$, $\frak b'' = \frak b(\overline{M}/x\overline{M})$ and $\frak b = \frak b' \cap \frak b''$. We have $\dim R/\frak b \leq d-2$. By Remark \ref{C3.2.2} (i) we have $U_{d-2}(M) \cong H^0_{\frak b'}(M/xM) \subseteq H^0_{\frak b}(M/xM)$. However $\dim H^0_{\frak b}(M/xM) < d-1$, so $U_{d-2}(M) \cong H^0_{\frak b}(M/xM)$. Similarly, we have $U_{d-2}(\overline{M}) \cong H^0_{\frak b}(\overline{M}/x\overline{M})$. By the proof of Lemma \ref{B3.2.8} we have $$U_{d-2}(M) \cong H^0_{\frak b}(M) \oplus H^1_{\frak b}(\overline{M}/x\overline{M})$$ and $$U_{d-2}(\overline{M}) \cong H^0_{\frak b}(\overline{M}) \oplus H^1_{\frak b}(\overline{M}/x\overline{M}) = H^1_{\frak b}(\overline{M}/x\overline{M}).$$ Therefore $U_{d-2}(\overline{M})$ is isomorphism to a direct summand of $U_{d-2}(M)$. \end{enumerate} \end{remark}
The following plays an important role in the next section. \begin{proposition}\label{M3.2.19} Let $M$ be a finitely generated $R$-module of dimension $d \geq 2$. Let $x$ be a parameter element of $M$ such that $x \notin \frak p$ for all $\frak p \in \big( \mathrm{Ass}\, U_{M}(0) \cup \mathrm{Ass}\, U_{d-2}(M) \big) \setminus \{\frak m\}$. Then we have the following short exact sequence $$0 \to U_M(0)/xU_M(0) \to U_{M/xM}(0) \to H^0_{\frak m}(\overline{M}/x\overline{M}) \to 0,$$ where $\overline{M} = M/U_M(0)$. \end{proposition} \begin{proof} Since $U_M(0) \cap xM = x(U_M(0) :_Mx) = xU_M(0)$, we have the following short exact sequence $$0 \to U_M(0)/xU_M(0) \overset{\varphi}{\to} M/xM \to \overline{M}/x\overline{M} \to 0.$$ If $\dim U_M(0) = 0$ then $\dim U_M(0)/xU_M(0) < d-1$. If $\dim U_M(0) > 0$ then $x$ is a parameter element of both $M$ and $U_M(0)$ so $\dim U_M(0)/xU_M(0) = \dim U_M(0) - 1 < d-1$. Notice that $\mathrm{Im}(\varphi) = (U_M(0) + xM)/xM$. Thus we always have $(U_M(0) + xM)/xM$ is a submodule of $M/xM$ of dimension less than $d-1$. Hence $\mathrm{Im}(\varphi) = (U_M(0) + xM)/xM \subseteq U_{M/xM}(0)$. So we have the short exact sequence $$0 \to U_M(0)/xU_M(0) \to U_{M/xM}(0) \to U_{\overline{M}/x\overline{M}}(0) \to 0.$$ On the other hand $x \notin \frak p$ for all $\frak p \in \mathrm{Ass}\, U_{d-2}(M) \setminus \{\frak m\}$. So $x \notin \frak p$ for all $\frak p \in \mathrm{Ass}\, U_{d-2}(\overline{M}) \setminus \{\frak m\}$ by Remark \ref{C3.2.18} (ii). By Remark \ref{C3.2.18} (i) we have $$\mathrm{Ass}\, (\overline{M}/x\overline{M}) \subseteq \mathrm{Assh}(\overline{M}/x\overline{M}) \cup \{\frak m\}.$$ Therefore $U_{\overline{M}/x\overline{M}}(0) = H^0_{\frak m}(\overline{M}/x\overline{M})$. Thus we obtain the short exact sequence $$0 \to U_M(0)/xU_M(0) \to U_{M/xM}(0) \to H^0_{\frak m}(\overline{M}/x\overline{M}) \to 0.$$ The proof is complete. \end{proof}
\section{The unmixed degree} In this section let $I$ be an $\frak m$-primary ideal and $M$ a finitely generated $R$-module of dimension $d > 0$. Let $U_i(M)$, $0 \leq i \leq d-1$, be the Cohen-Macaulay deviated sequence of $M$. The purpose of this section is to construct a new degree for $M$ in terms of $U_i(M)$. Firstly, recalling that the length function $\ell(M/I^nM)$ becomes a polynomial of degree $d$ when $n \gg 0$ and $$\ell(M/I^{n+1}M) = \sum_{i=0}^d(-1)^i e_i(I,M) \binom{n+d-i}{d-i}.$$ The coefficients $e_i(I, M)$, $i = 0,\ldots,d$ are called the Hilbert coefficients of $M$ with respect to $I$. Especially, the leading coefficient $e_0(I, M)$ is called {\it the Hilbert-Samuel multiplicity} of $M$ with respect to $I$. If $I=\frak m$, the multiplicity is written by $e_0(M)$ for simply. In the present paper we denote by $\mathrm{deg}(I, M)$ (resp. $\mathrm{deg}(M)$) the multiplicity $e_0(I, M)$ (resp. $e_0(M)$) and call {\it the degree} of $M$ with respect to $I$ (resp. the degree of $M$). The following associativity formula for degree says that $\mathrm{deg}(I, M)$ depends only on the associated prime ideals of the highest dimension (see \cite[Corollary 4.7.8]{BH98}) $$\mathrm{deg}(I, M) = \sum_{\frak p \in \mathrm{Assh}M}\ell_{R_{\frak p}}(M_{\frak p}) \mathrm{deg}(I, R/\frak p). $$ Notice that if $\frak p \in \mathrm{minAss}M$, then $M_{\frak p}$ has finite length and $M_{\frak p} = H^0_{\frak p R_{\frak p}}(M_{\frak p})$. So we have $$\mathrm{deg}(I, M) = \sum_{\frak p \in \mathrm{Assh}M} \ell_{R_{\frak p}}(H^0_{\frak p R_{\frak p}}(M_{\frak p})) \mathrm{deg}(I, R/\frak p). $$ We next recall some other degrees of $M$ related to $\mathrm{deg}(I, M)$ (see \cite{V98-2}). \begin{definition}\label{adeg}\rm The {\it arithmetric degree} of $M$ with respect to $I$, denoted by $\mathrm{adeg}(I, M)$, is the integer $$\mathrm{adeg}(I, M) = \sum_{\frak p \in \mathrm{Ass}\, M}\ell_{R_{\frak p}}(H^0_{\frak p R_{\frak p}}(M_{\frak p})) \mathrm{deg}(I, R/\frak p). $$ \end{definition}
\begin{remark} \label{C3.3.2} \rm \begin{enumerate}[{(i)}] \item Let $\mathcal{D} : D_0 \subseteq D_1 \subseteq \cdots \subseteq D_t = M$ be the dimension filtration of $M$ we have $\mathrm{adeg}(I, M) = \sum_{i=0}^t \mathrm{deg}(I, D_i)$. So $\mathrm{adeg}(I, M) \geq \mathrm{deg}(I, M)$ and the equation occurs if and only if $U_M(0) = 0$. \item Moreover, if $(R, \frak m)$ is a homomorphic image of a Gorenstein local ring $(S, \frak n)$ of dimension $n$. Then $\mathrm{adeg}(I, M)$ can be determined without the knowledge of the primary decomposition as follows $$\mathrm{adeg}(I, M) = \sum_{i} \mathrm{deg}(I, \mathrm{Ext}^{i}_S(\mathrm{Ext}^{i}_S(M, S), S)).$$ \end{enumerate} \end{remark}
Vasconcelos et al. \cite{DGV98, V98-1, V98-2} introduced the notion of {\it extended degree of graded modules} in order to capture the size of a module along with some of the complexity of its structure. The prototype of an extended degree is the {\it homological degree} was introduced and studied by Vasconselos in \cite{V98-1} (see also \cite{V98-2}). The extended degree for local rings was considered by Rossi, Trung and Valla in \cite{RTV03}. This notion is associated by an $\frak m$-primary ideal $I$ in \cite{L05}.
\begin{definition}\label{D3.3.3}\rm Let $\mathcal{M}(R)$ be the category of finitely generated $R$-modules. An {\it extended degree} on $\mathcal{M}(R)$ with respect to $I$ is a numerical function $$ \mathrm{Deg}(I, \bullet) : \mathcal{M}(R) \to \mathbb{R} $$ satisfying the following conditions \begin{enumerate}[{(i)}] \item $\mathrm{Deg} (I, M) = \mathrm{Deg}(I, \overline{ M}) + \ell(H^0_{\frak m}(M))$, where $\overline{M} = M/H^0_{\frak m}(M)$; \item (Bertini's rule) $\mathrm{Deg}(I, M) \geq \mathrm{Deg}(I, M/xM)$ for every generic element $x \in I\setminus \frak mI$ of $M$; \item If $M$ is Cohen-Macaulay then $\mathrm{Deg}(I, M) = \mathrm{deg}(I, M)$. \end{enumerate} \end{definition}
The homological degree is a typical extended degree that is defined as follows. \begin{definition}[\cite{V98-1}] \label{D3.3.4} \rm Supppose that $(R, \frak m)$ be a homomorphic image of a Gorenstein local ring $(S, \frak n)$ of dimension $n$, and $M$ a finitely generated $R$-module of dimension $d$. Then the {\it homological degree}, $\mathrm{hdeg}(I, M)$, of $M$ with respect to $I$ is defined by the following recursive formula $$\mathrm{hdeg}(I, M) = \mathrm{deg}(I, M) + \sum_{i=n-d+1}^n \binom{d-1}{i-n+d-1} \mathrm{hdeg}(I, \mathrm{Ext}^i_S(M, S)).$$ \end{definition}
\begin{remark} \label{C3.3.5} \rm \begin{enumerate}[{(i)}] \item The Definition \ref{D3.3.4} is recursive on dimension since $\dim \mathrm{Ext}^i_S(M, S) \le n-i < d$ for all $i = n-d+1,\ldots, n$. \item $\mathrm{hdeg}(I,\bullet)$ is an extended degree on $\mathcal{M}(R)$, and $\mathrm{hdeg}(I,M) = \mathrm{deg}(I, M)$ if and only if $M$ is Cohen-Macaulay. \item If $M$ is a generalized Cohen-Macaulay module, then $\ell(\mathrm{Ext}^{n-i}_S(M, S)) = \ell(H^i_{\frak m}(M))$ for all $i = 0, \ldots, d-1$ by the local duality theorem. We have $$\mathrm{hdeg}(I, M) = \mathrm{deg}(I, M) + \sum_{i=0}^{d-1} \binom{d-1}{i} \ell(H^i_{\frak m}(M)).$$ \item (\cite[Proposition 3.5]{V98-2}) If $\dim M = \dim S =2$ then $$\mathrm{hdeg}(I, M) = \mathrm{adeg}(I, M) + \ell(\mathrm{Ext}^2_S(\mathrm{Ext}^1_S(M, S),S)).$$ \end{enumerate} \end{remark}
Until nowadays, the homological degree is the uniquely explicit extended degree. The purpose of this section is to introduce an other extended degree on $\mathcal{M}(R)$ in terms of the Cohen-Macaulay deviated sequence $U_i(M)$, $i = 0,\ldots,d-1$. Notice that $\dim U_i(M) \leq i$ for all $0 \leq i \leq d-1$. \begin{definition}\rm Let $M$ be a finitely generated $R$-module of dimension $d$ and $U_i(M)$, $0 \leq i \leq d-1$, the Cohen-Macaulay deviated sequence of $M$. We define the {\it unmixed degree} of $M$ with respect to $I$, $\mathrm{udeg}(I, M)$, as follows $$\mathrm{udeg}(I, M) = \mathrm{deg}(I, M) + \sum_{i=0}^{d-1}\delta_{i, \dim U_i(M)}\mathrm{deg}(I, U_i(M)),$$ where $\delta_{i, \dim U_i(M)}$ is the Kronecker symbol. \end{definition} It is worth noting that in the above definition and Proposition \ref{M3.2.13} we consider the subsequence of modules of the Cohen-Macaulay deviated sequence consisting $U_i(M)$ with $\dim U_i(M) = i$. We call this subsequence the {\it reduced Cohen-Macaulay deviated sequence} of $M$. In the rest of this paper, we shall prove that the unmixed degree is an extended degree. The first condition of Definition \ref{D3.3.3} follows from the following.
\begin{proposition}\label{M3.3.9} Let $N$ be a submodule of finite length of $M$. Then $$\mathrm{udeg}(I,M) = \mathrm{udeg}(I,M/N) + \ell(N).$$ \end{proposition} \begin{proof} Let $x_1,\ldots,x_d$ be a $C$-system of parameters of both $M$ and $M/N$. By Proposition \ref{M3.2.13} $x_1,\ldots,x_d$ is a $dd$-sequence of $M$. So $H^0_{\frak m}(M) \cap (x_1,\ldots,x_d)M = 0$. For all $0 \leq j \leq d-1$, we have the short exact sequence $$0 \to N \to M/(x_{j+2},\ldots,x_d)M \to M/(N+(x_{j+2},\ldots,x_d)M) \to 0.$$ Therefore $U_j(M/N) \cong U_j(M)/N$ for all $0 \leq j \leq d-1$. Thus $$\delta_{j, \dim U_j(M/N)} \mathrm{deg}(I,U_j(M/N)) =
\delta_{j, \dim U_j(M)} \mathrm{deg}(I,U_j(M))$$ for all $1 \leq j \leq d-1$ and $$\delta_{0, \dim U_0(M/N)} \mathrm{deg}(I,U_0(M/N)) = \delta_{0, \dim U_0(M)} \mathrm{deg}(I,U_0(M)) - \ell(N).$$ The claim is now obvious. \end{proof} The next result shows that $\mathrm{udeg}(M)$ agrees with $\mathrm{hdeg}(M)$ for generalized Cohen-Macaulay modules. \begin{proposition} \label{M3.3.10} Let $M$ be a generalized Cohen-Macaulay $R$-module of dimension $d$. Then $$\mathrm{udeg}(I,M) = \mathrm{deg}(I,M) + \sum_{j=0}^{d-1} \binom{d-1}{j}\ell(H^j_\frak{m}(M)).$$ \end{proposition} \begin{proof} Let $x_1,\ldots,x_d$ be a $C$-system of parameters of $M$. By Corollary \ref{H3.2.5} (see also \cite[Corollary 4.2]{CQ11}) we have $$U_i(M) \cong H^0_{\mathfrak{m}}(M/(x_{i+2},\ldots,x_d)M) \cong \bigoplus_{j=0}^{d-i-1} H^j_{\mathfrak{m}}(M)^{\binom{d-i-1}{j}}$$ for all $0 \leq i \leq d-1$. So $\dim U_i(M) = 0$ for all $i \le d-1$. Therefore $\delta_{i, \dim U_i(M)}\mathrm{deg}(I,U_i(M)) = 0$ for all $1 \leq i \leq d-1$ and $$\delta_{0, \dim U_0(M)}\mathrm{deg}(I,U_0(M)) = \sum_{j=0}^{d-1} \binom{d-1}{j}\ell(H^j_\frak{m}(M)).$$ The proof is complete. \end{proof} We next compute the unmixed degree when $\dim M$ is small. \begin{proposition}\label{M3.3.11} The following statements hold true. \begin{enumerate}[{(i)}]\rm \item {\it If $d = 1$ then $\mathrm{udeg}(I,M) = \mathrm{adeg}(I,M)$.} \item {\it If $d = 2$ then $\mathrm{udeg}(I,M) = \mathrm{adeg}(I,M) + \ell(H^1_{\frak m}(M/U_M(0)))$.} \end{enumerate} \end{proposition} \begin{proof} (i) It is clear.\\ (ii) We consider the following two cases.\\ The case $\dim U_M(0) = 0$, we have $M$ is a generalized Cohen-Macaulay modules. Therefore by Proposition \ref{M3.3.10} we have \begin{eqnarray*} \mathrm{udeg}(I,M) &=& \mathrm{deg}(I,M) + \ell(H^0_{\frak m}(M)) + \ell(H^1_{\frak m}(M))\\ &=& \mathrm{adeg}(I,M) + \ell(H^1_{\frak m}(M/H^0_{\frak m}(M))). \end{eqnarray*} The case $\dim U_M(0) = 1$. Consider the dimension filtration $H^0_{\frak m}(M) \subset U_M(0) \subset M$ of $M$. By Remark \ref{C3.3.2} (i) we have $$\mathrm{adeg}(I,M) = \mathrm{deg}(I,M) + \mathrm{deg}(I,U_M(0)) + \ell(H^0_{\frak m}(M)).$$ On the other hand $U_1(M) \cong U_M(0)$ so $\delta_{1, \dim U_1(M)}\mathrm{deg}(I,U_1(M)) = \mathrm{deg}(I,U_M(0)) $. Let $x_2$ be a $C$-parameter element of $M$. By Corollary \ref{H3.2.5} we have $$U_0(M) \cong H^0_{\frak m}(M/x_2M) \cong H^0_{\frak m}(M) \oplus H^1_{\frak m}(M/U_M(0)).$$ Thus $\delta_{0, \dim U_0(M)}\mathrm{deg}(I,U_0(M)) = \ell (H^0_{\frak m}(M)) + \ell (H^1_{\frak m}(M/U_M(0)))$. Therefore we also have $$\mathrm{udeg}(I,M) = \mathrm{adeg}(I,M) + \ell(H^1_{\frak m}(M/U_M(0))).$$ The proof is complete. \end{proof} \begin{corollary} Suppose $(R, \frak m)$ is a homomorphic image of a Gorenstein local ring and $\dim M = 2$. Then $\mathrm{udeg}(I,M) = \mathrm{hdeg}(I,M) $. \end{corollary} \begin{proof} Without loss of generality we may assume that $(R, \frak m)$ is a Gorenstein local ring of dimension two. If $U_M(0) = H^0_{\frak m}(M)$ we have $M$ is generalized Cohen-Macaulay, the claim follows from Proposition \ref{M3.3.10} and Remark \ref{C3.3.5} (iii). Suppose $\dim U_M(0) = 1$, by Proposition \ref{M3.3.11} and Remark \ref{C3.3.5} (iv) we need only to show that $$\ell (H^1_{\frak m}(M/U_M(0))) = \ell(\mathrm{Ext}^2_R(\mathrm{Ext}^1_R(M, R),R)).$$ Since $\mathrm{Ass}\, M/U_M(0) = \{\frak p \mid \frak p\in \mathrm{Ass}\, M, \dim R/\frak p = 2 \}$ we have $\mathrm{Ext}^1_R(M/U_M(0),R)$ is a module of finite length, and $\ell(\mathrm{Ext}^1_R(M/U_M(0),R)) = \ell(H^1_{\frak m}(M/U_M(0)))$ by local duality theorem. By local duality theorem again we have $\ell(\mathrm{Ext}^2_R(\mathrm{Ext}^1_R(M, R),R)) = \ell (H^0_{\frak m}(\mathrm{Ext}^1_R(M, R)))$. So it is enough to prove that $$\ell(\mathrm{Ext}^1_R(M/U_M(0),R)) = \ell (H^0_{\frak m}(\mathrm{Ext}^1_R(M, R))).$$ Indeed, consider the short exact sequence $$0 \to U_M(0) \to M \to M/U_M(0) \to 0.$$ Since $\dim U_M(0) = 1$ and $\mathrm{depth}M/U_M(0)>0$ we have $\mathrm{Hom}_R(U_M(0),R) = \mathrm{Ext}^2_R(M/U_M(0),R) = 0$. So we have the following short exact sequence $$0 \to \mathrm{Ext}^1_R(M/U_M(0),R) \to \mathrm{Ext}^1_R(M,R) \to \mathrm{Ext}^1_R(U_M(0),R) \to 0.$$ By \cite[Lemma 1.9]{Sch98-1} (v) we have $\mathrm{Ext}^1_R(U_M(0),R)$ is $(S_2)$ and hence it is a Cohen-Macaulay module of dimension one. Thus $H^0_{\frak m}(\mathrm{Ext}^1_R(U_M(0),R)) = 0$. Therefore $$\mathrm{Ext}^1_R(M/U_M(0),R)) = H^0_{\frak m}(\mathrm{Ext}^1_R(M/U_M(0),R))) \cong H^0_{\frak m}(\mathrm{Ext}^1_R(M, R)).$$ The proof is complete. \end{proof} In the following we prove the third condition of Definition \ref{D3.3.3}. Moreover we also give a characterization of sequentially Cohen-Macaulay modules in terms of unmixed degrees. \begin{theorem}\label{D3.3.8} Let $M$ be a finitely generated $R$-module of dimension $d$. We have $$\mathrm{deg}(I, M) \leq \mathrm{adeg}(I, M) \leq \mathrm{udeg}(I, M).$$ Furthermore \begin{enumerate}[{(i)}]\rm \item {\it $\mathrm{deg}(I, M) = \mathrm{udeg}(I, M)$ if and only if $M$ is a Cohen-Macaulay module.} \item {\it $\mathrm{adeg}(I, M) = \mathrm{udeg}(I, M)$ if and only if $M$ is a sequentially Cohen-Macaulay module.} \end{enumerate} \end{theorem} \begin{proof} The first inequality is clear. Let
$$\mathcal{D}: D_0 \subset D_1 \subset \cdots \subset D_t =M$$ be the dimension filtration of $M$ with $d_i = \dim D_i $ for all $i \leq t$. Recalling that $$\mathrm{adeg}(I,M) = \mathrm{deg}(I,M) + \sum_{i=0}^{t-1}\mathrm{deg}(I,D_i).$$ For all $i<t$ by Remark \ref{Q3.2.14} we have $D_i \subseteq U_{d_i}(M)$. So $\dim U_{d_i}(M) = d_i $ and then $$\mathrm{deg}(I,D_i) \leq \mathrm{deg}(I,U_{d_i}(M)) = \delta_{d_i, \dim U_{d_i}(M)}\mathrm{deg}(I, U_{d_i}(M)).$$ Thus $\mathrm{adeg}(I,M) \leq \mathrm{udeg}(I,M)$. \\ We have (i) follows from (ii), so it is enough to prove (ii). If $M$ is sequentially Cohen-Macaulay, then by Proposition \ref{M3.2.15} we have $\mathrm{adeg}(I,M) = \mathrm{udeg}(I,M)$.\\ Conversely, suppose $\mathrm{adeg}(I,M) = \mathrm{udeg}(I,M)$. We have $$\mathrm{deg}(I,D_i) = \mathrm{deg}(I,U_{d_i}(M)) \quad \quad \quad (\star)$$ for all $i<t$, and $$\delta_{j, \dim U_j(M)}\mathrm{deg}(I,U_j(M)) = 0 \quad \quad \quad (\star \star)$$ for all $i< t$ and $d_i < j < d_{i+1}$. Let $\underline{x} = x_1, \ldots, x_d$ be a $C$-system of parameters of $M$. By $(\star)$ and the associative formula we have $$e(x_1, \ldots, x_{d_i}; D_i) = \mathrm{deg}((\underline{x}), D_i) = \mathrm{deg}((\underline{x}), U_{d_i}(M)) = e(x_1, \ldots, x_{d_i}; U_{d_i}(M))$$ for all $i < t$. By $(\star \star)$ we have $\dim U_j(M)<j$ for all for all $d_i < j < d_{i+1}$ and $i< t$, so $$e(x_1, \ldots, x_j; U_j(M)) = 0$$ for all $d_i < j < d_{i+1}$ and $i< t$. By Proposition \ref{M3.2.13} we have $$I_{M,\underline{x}}(\underline{n}) = \sum_{j=0}^{d-1}n_1\ldots n_j e(x_1,\ldots,x_j;U_j(M)).$$ for all $n_1, \ldots, n_d \ge 1$. Thus we have $$I_{M,\underline{x}}(\underline{n}) = \sum_{i=0}^{t-1}n_1\ldots n_{d_i} e(x_1,\ldots,x_{d_i};D_i).$$ for all $n_1, \ldots, n_d \ge 1$. Hence $M$ is a sequentially Cohen-Macaulay module by \cite[Theorem 4.2]{CC07-2}. The proof is complete. \end{proof} In order to prove the Bertini rule of Definition \ref{D3.3.3}, we will show that the unmixed degree has good behavior by passing to the quotient modules regarding certain {\it superficial} elements. \begin{definition}\rm An element $x \in I \setminus \frak mI$ is called a {\it superficial} element of $M$ with respect to $I$ if there exists a positive integer $c$ such that $$(I^{n+1}M:x) \cap I^cM = I^nM$$ for all $n \geq c$. \end{definition} \begin{remark}\label{C3.3.14}\rm \begin{enumerate}[{(i)}] \item Let $G_I(R) = \oplus_{n \geq 0} I^n/I^{n+1}$ be the associated graded ring of $R$ with respect to $I$ and $G_I(M)= \oplus_{n \geq 0} I^nM/I^{n+1}M$ the graded $G_I(R)$-module. Set $(G_I(R))_+ = \oplus_{n \geq 1} I^n/I^{n+1}$. Then $x$ is a superficial element of $M$ with respect to $I$ if and only if the {\it initial} $x^*$ of $x$ in $G_I(R)$ is a $(G_I(R))_+$-filter regular element of $G_I(M)$ i.e. $\ell (0:_{G_I(M)}x^*) < \infty$ (notice that in our context $I$ is $\frak m$-primary). Moreover, if $x$ is a superficial element, then it is an $I$-filter regular element of $M$. \item A superficial element of $M$ with respect to $I$ always exist if the residue field $R/\frak m$ is infinite, a hypothesis which never cause us any problem because we can replace $R$ by the local ring $R[X]_{\frak mR[X]}$, where $X$ is an indeterminate. In the sequel we assume that the residue field is infinite. \item (cf. \cite[22.6]{N62}) Let $x$ be a superficial element of $M$ with respect to $I$. For $n \gg 0$ we have $I^{n+1}M:_Mx = 0:_Mx + I^n M$ so $$\ell(M/(I^{n+1}+(x))M) = \ell(M/I^{n+1}M) - \ell(M/I^nM) + \ell(0:_Mx)$$ for all $n \gg 0$. \item Let $x$ be a superficial element of $M$ with respect to $I$. By (iii) we have $\mathrm{deg}(I,M/xM) = \mathrm{deg}(I,M) $ if $d \geq 2$, and $\ell(M/xM) = \mathrm{deg}(I,M/xM) = \mathrm{deg}(I,M) + \ell(0:_Mx)$ if $d=1$. \end{enumerate} \end{remark}
We need some lemmas before proving the Bertini rule of unmixed degrees. \begin{lemma}\label{B3.3.15} Let $M$ be a finitely generated $R$-module of dimension $d \geq 2$. Let $x$ be a parameter element of $M$ such that $x$ is a superficial element of $U_M(0)$ with respect to $I$ and $x \notin \frak p$ for all $\frak p \in \mathrm{Ass}\, U_{d-2}(M) \setminus \{\frak m\}$. Then $$\delta_{d-2, \dim U_{M/xM}(0)}\mathrm{deg}(I,U_{M/xM}(0)) = \delta_{d-1, \dim U_M(0)}\mathrm{deg}(I,U_{M}(0))$$ if $d \geq 3$, and $$\delta_{0, \dim U_{M/xM}(0)}\mathrm{deg}(I,U_{M/xM}(0)) = \delta_{1, \dim U_M(0)}\mathrm{deg}(I,U_{M}(0)) + \ell(0:_{H^0_{\frak m}(M)}x) + \ell(0:_{H^1_{\frak m}(M/U_{M}(0))}x)$$ if $d=2$. \end{lemma} \begin{proof} Put $\overline{M} = M/U_M(0)$, by Proposition \ref{M3.2.19} we have the short exact sequence $$0 \to U_M(0)/xU_M(0) \to U_{M/xM}(0) \to H^0_{\frak m}(\overline{M}/x\overline{M}) \to 0.$$ The case $d \geq 3$. If $\dim U_M(0) < d-1$ then $\dim U_M(0)/xU_M(0) < d-2$. Therefore $\dim U_{M/xM}(0) < d-2$. Hence $$\delta_{d-2, \dim U_{M/xM}(0)}\mathrm{deg}(I,U_{M/xM}(0)) = 0 = \delta_{d-1, \dim U_M(0)}\mathrm{deg}(I,U_{M}(0)).$$ If $\dim U_M(0) = d-1$ we have $\dim U_{M/xM}(0) = d-2 > 0$. So $\mathrm{deg}(I,U_{M/xM}(0)) = \mathrm{deg}(I,U_M(0)/xU_M(0))$. By Remark \ref{C3.3.14} (iv) we have $\mathrm{deg}(I,U_{M}(0)) = \mathrm{deg}(I,U_M(0)/xU_M(0))$. Thus we also have $$\delta_{d-2, \dim U_{M/xM}(0)}\mathrm{deg}(I,U_{M/xM}(0)) =\delta_{d-1, \dim U_M(0)}\mathrm{deg}(I,U_{M}(0)).$$ The case $d=2$, we have $U_{M/xM}(0)$ has finite length. Therefore $$\delta_{0, \dim U_{M/xM}(0)}\mathrm{deg}(I,U_{M/xM}(0)) = \ell(U_{M/xM}(0)) = \ell(U_M(0)/xU_M(0)) + \ell(H^0_{\frak m}(\overline{M}/x\overline{M})).$$ If $\dim U_M(0) = 1$, by Remark \ref{C3.3.14} (iv) we have $$\ell(U_M(0)/xU_M(0)) = \mathrm{deg}(I,U_{M}(0)) + \ell(0:_{U_M(0)}x) = \delta_{1, \dim U_M(0)}\mathrm{deg}(I,U_{M}(0)) + \ell(0:_{H^0_{\frak m}(M)}x).$$ If $\dim U_M(0) = 0$ then
we have $U_{M}(0) = H^0_{\frak m}(M)$ and hence $\delta_{1, \dim U_M(0)}\mathrm{deg}(I,U_{M}(0)) = 0 $. Moreover one can check that $\ell(H^0_{\frak m}(M)/xH^0_{\frak m}(M)) = \ell(0:_{H^0_{\frak m}(M)}x)$. Thus we always have $$\ell(U_M(0)/xU_M(0)) = \delta_{1, \dim U_M(0)}\mathrm{deg}(I,U_{M}(0)) + \ell(0:_{H^0_{\frak m}(M)}x).$$ On the other hand the short exact sequence $$0 \to \overline{M} \overset{x}{\to} \overline{M} \to \overline{M}/x\overline{M} \to 0$$ induces the exact sequence of local cohomology modules $$0 \to H^0_{\frak m}(\overline{M}/x\overline{M}) \to H^1_{\frak m}(\overline{M}) \overset{x}{\to} H^1_{\frak m}(\overline{M}).$$ Therefore $\ell(H^0_{\frak m}(\overline{M}/x\overline{M})) = \ell(0:_{H^1_{\frak m}(\overline{M})}x)$. Hence $$\delta_{0, \dim U_{M/xM}(0)}\mathrm{deg}(I,U_{M/xM}(0)) = \delta_{1, \dim U_M(0)}\mathrm{deg}(I,U_{M}(0)) + \ell(0:_{H^0_{\frak m}(M)}x) + \ell(0:_{H^1_{\frak m}(M/U_{M}(0))}x).$$ The proof is complete. \end{proof} We need one more technical lemma. \begin{lemma}\label{B3.3.16} Let $M$ be a finitely generated $R$-module of dimension $d \geq 2$. Let $x$ be a parameter element of $M$ such that
$x \notin \frak p$ for all $\frak p \in \mathrm{Ass}\, U_M(0) \setminus \{\frak m\}$. Then we can choose a $C$-parameter element $x_d$ of $M$ such that $x$ is a parameter element of $M/x_dM$. \end{lemma} \begin{proof}
If $\dim U_M(0) < d-1$ then $\dim R/\frak b(M) \leq d-2$ by Remark \ref{C3.1.2} (ii). Therefore we can choose a $C$-parameter element $x_d$ such that $x$ and $x_d$ is a part of a system of parameters of $M$ by the prime avoidance theorem. Hence $x$ is a parameter element of $M/x_dM$.\\ We now assume that $\dim U_M(0) = d-1$. Set $\overline{M} = M/U_M(0)$. The short exact sequence $$0 \to U_M(0) \to M \to \overline{M} \to 0.$$ induces the exact sequence of local cohomology modules $$ \cdots \to H^i_{\frak m}(U_M(0)) \to H^i_{\frak m}(M) \to H^i_{\frak m}(\overline{M}) \to \cdots.$$ Hence $\frak a_i(M) = \mathrm{Ann}\, H^i_{\frak m}(M) \supseteq \mathrm{Ann}\, U_M(0) \, \frak a_i(\overline{M})$ for all $i \geq 0$. So $$\sqrt{\frak b(M)} = \sqrt{\frak a(M)} \supseteq \sqrt{\mathrm{Ann}\, U_M(0)\,\frak a(\overline{M})} = \sqrt{\mathrm{Ann}\, U_M(0)\,\frak b(\overline{M})}.$$ We claim that $\frak b(M) \nsubseteq \frak q$ for all $\frak q \in \mathrm{Assh}\, M/xM$. Indeed, by Remark \ref{C3.1.2} (ii) we have $\dim R/\frak b(\overline{M}) \leq d-2$. Therefore $\frak b(\overline{M}) \nsubseteq \frak q$. Suppose $\mathrm{Ann}\, U_M(0) \subseteq \frak q$. Then $\frak q \in \mathrm{Assh}\, U_M(0)$ since $\dim U_M(0) = \dim R/\frak q = d-1$. It contrasts to our assumption that $x \notin \frak p$ for all $\frak p \in \mathrm{Ass}\, U_M(0) \setminus \{\frak m\}$. So $\mathrm{Ann}\, U_M(0) \nsubseteq \frak q$, and hence $\frak b(M) \nsubseteq \frak q$ for all $\frak q \in \mathrm{Assh}\, M/xM$. Thus there exists $x_d \in \frak b(M)^3$ such that $x_d$ is a parameter element of $M/xM$ by the prime avoidance theorem. Such an element $x_d$ satisfies the requirements. The proof is complete. \end{proof} We are now ready to prove that the unmixed degrees satisfy the Bertini rule of extended degrees. \begin{theorem}\label{D3.3.17} Let $M$ be a finitely generated $R$-module of dimension $d$. Let $x$ be a superficial element of $M$ and of all $U_i(M)$, $1 \leq i \leq d-1$, with respect to $I$. Then $$\mathrm{udeg}(I,M) \ge \mathrm{udeg}(I,M/xM).$$ \end{theorem} \begin{proof} Notice that since $x$ is a superficial element of $U_i(M)$, $1 \leq i \leq d-1$, with respect to $I$ we have $x \notin \frak p$ for all $\frak p \in \mathrm{Ass}\, U_i(M) \setminus \{\frak m\}$, $1 \leq i \leq d-1$ by Remark \ref{C3.3.14} (i). The case $d=1$ is clear since $\mathrm{udeg}(I,M) = \mathrm{deg}(I,M) + \ell(H^0_{\frak m}(M))$ and $\mathrm{udeg}(I,M/xM) = \ell(M/xM) = \mathrm{deg}(I,M) + \ell(0:_Mx)$. Suppose $d \geq 2$, by Lemma \ref{B3.3.16} we can choose a part of a $C$-system of parameters $x_2,\ldots,x_d$ of $M$ such that $x, x_2,\ldots,x_d$ is also a system of parameters of $M$. By Lemma \ref{B3.1.9} we have $x_2, \ldots, x_d$ is a $C$-system of parameters of $M/xM$. Therefore, we have \begin{eqnarray*} \mathrm{udeg}(I,M) &=& \mathrm{deg}(I,M) + \sum_{i=0}^{d-1}\delta_{i, \dim U_i(M)}\mathrm{deg}(I,U_i(M))\\ &=& \mathrm{deg}(I,M) + \sum_{j=2}^{d+1}\delta_{j-2, \dim U_{M/(x_j,\ldots,x_d)M}(0)}\mathrm{deg}(I,U_{M/(x_j,\ldots,x_d)M}(0)), \end{eqnarray*} and \begin{eqnarray*} \mathrm{udeg}(I,M/xM) &=& \mathrm{deg}(I,M/xM) + \sum_{i=0}^{d-2}\delta_{i, \dim U_i(M/xM)}\mathrm{deg}(I,U_i(M/xM))\\ &=& \mathrm{deg}(I,M/xM) + \sum_{j=3}^{d+1}\delta_{j-3, \dim U_{M/(x,x_j,\ldots,x_d)M}(0)}\mathrm{deg}(I,U_{M/(x,x_j,\ldots,x_d)M}(0)). \end{eqnarray*} Since $x$ is a superficial element of $M$ with respect to $I$ we have $\mathrm{deg}(I,M/xM) = \mathrm{deg}(I,M)$. For $j>3$ we have $\dim M/(x_j,\ldots,x_d)M = j-1 \geq 3$. By Lemma \ref{B3.3.15} we obtain $$\delta_{j-2, \dim U_{M/(x_j,\ldots,x_d)M}(0)}\mathrm{deg}(I,U_{M/(x_j,\ldots,x_d)M}(0)) = \delta_{j-3, \dim U_{M/(x,x_j,\ldots,x_d)M}(0)}\mathrm{deg}(I,U_{M/(x,x_j,\ldots,x_d)M}(0))$$ for all $3<j \leq d+1$. For $j=3$, set $M' = M/(x_3,\ldots,x_d)M $ we have $\dim M' = 2$. By Lemma \ref{B3.3.15} we have $$\delta_{0, \dim U_{M'/xM'}(0)}\mathrm{deg}(I,U_{M'/xM'}(0)) = \delta_{1, \dim U_{M'}(0)}\mathrm{deg}(I,U_{M'}(0)) + \ell(0:_{H^0_{\frak m}(M')}x) + \ell(0:_{H^1_{\frak m}(M'/U_{M'}(0))}x).$$ By Corollary \ref{H3.2.5} we have $$U_0(M') = H^0_{\frak m}(M'/x_2M') \cong H^0_{\frak m}(M') \oplus H^1_{\frak m}(M'/U_{M'}(0)).$$ So \begin{eqnarray*} \delta_{0, \dim U_0(M')}\mathrm{deg}(I,U_0(M')) &=& \ell(H^0_{\frak m}(M')) + \ell(H^1_{\frak m}(M'/U_{M'}(0)))\\ &\ge& \ell(0:_{H^0_{\frak m}(M')}x) + \ell(0:_{H^1_{\frak m}(M'/U_{M'}(0))}x). \end{eqnarray*} Therefore $$\delta_{0, \dim U_{M'/xM'}(0)}\mathrm{deg}(I,U_{M'/xM'}(0)) \le \delta_{1, \dim U_{M'}(0)}\mathrm{deg}(I,U_{M'}(0)) +\delta_{0, \dim U_0(M')}\mathrm{deg}(I,U_0(M')).$$ More precisely, we have $$\delta_{0, \dim U_{M/(x,x_3,\ldots,x_d)M}(0)}\mathrm{deg}(I,U_{M/(x,x_3,\ldots,x_d)M}(0)) \le \sum_{j=2}^{3}\delta_{j-2, \dim U_{M/(x_j,\ldots,x_d)M}(0)}\mathrm{deg}(I,U_{M/(x_j,\ldots,x_d)M}(0)).$$ In conlusion, $\mathrm{udeg}(I,M) \ge \mathrm{udeg}(I,M/xM)$. The proof is complete. \end{proof} \begin{remark}\rm By the prime avoidance theorem we always can choose $x$ satisfying the condition of Theorem \ref{D3.3.17}. Furthermore, according to the above proof we have $\mathrm{udeg}(I,M/xM) = \mathrm{udeg}(I,M)$ provided $x$ annihilates
$H^0_{\frak m}(M')$ and $H^1_{\frak m}(M'/U_{M'}(0))$, where $M' = M/(x_3,\ldots,x_d)M$. This is the case if $xU_0(M) = 0$ by Corollary \ref{H3.2.5}. \end{remark}
By Proposition \ref{M3.3.9}, Theorems \ref{D3.3.8} and \ref{D3.3.17} we have the main result of this section. \begin{theorem} For every $\frak m$-primary ideal $I$, the unmixed degree $\mathrm{udeg}(I, \bullet)$ is an extended degree on the category of finitely generated $R$-modules $\mathcal{M}(R)$. \end{theorem} We next compare the unmixed degree and the homological degree for sequentially Cohen-Macaulay modules. \begin{remark}\rm Suppose $(R, \frak m)$ be a homomorphic image of a Gorenstein local ring $S$ of dimension $n$, and $M$ a sequentially Cohen-Macaulay $R$-module. It is easy to see that $\mathrm{Ext}^i_S(M, S)$ is either a Cohen-Macaulay module or zero module for all $i$. By Theorem \ref{D3.3.8} we have $$\mathrm{udeg}(I,M) = \mathrm{adeg}(I,M) = \mathrm{deg}(I,M) + \sum_{i=0}^{d-1} \mathrm{deg}(\mathrm{Ext}^{n-i}_S(M, S))$$ for the last equation see \cite[Theorem 3.11]{NR06}. Furthermore by \cite[Theorem 3.5]{NR06} we have $$\mathrm{hdeg}(I,M) = \mathrm{deg}(I,M) + \sum_{i=0}^{d-1} \binom{d-1}{i}\mathrm{deg}(\mathrm{Ext}^{n-i}_S(M, S)).$$ Therefore $\mathrm{udeg}(I,M) \leq \mathrm{hdeg}(I,M)$. The equation occurs if and only if $\mathrm{Ext}^{n-i}_S(M, S) = 0$ for all $1 \leq i \leq d-2$. In this case the dimension filtration of $M$ is either $H^0_{\frak m}(M) \subseteq M$ or $H^0_{\frak m}(M) \subseteq U_M(0) \subseteq M$ with dim $U_M(0) = d-1$. \end{remark} We close this paper with some examples and an open question. \begin{example}\rm Let $R = k[[X_1,\ldots,X_4]]/(X_1^2, X_1X_2, X_1X_3)$ where $k$ is a field and $X_i, 1 \leq i \leq 4,$ are indeterminates. We denote by $x_i$ the image of $X_i$ in $R$. We have $R$ is a sequentially Cohen-Macaulay ring of dimension $3$ with the dimension filtration $\mathcal{D}: 0 \subseteq (x_1) \subseteq R$. We have $$\mathrm{deg}(R) = 1 < \mathrm{adeg}(R) = \mathrm{udeg}(R) = 2 < \mathrm{hdeg}(R) = 3.$$ \end{example}
\begin{example}\rm Let $R = k[[X_1,\ldots,X_7]]/(X_1,X_2, X_3) \cap (X_4,X_5,X_6)$ where $k$ is a field and $X_i, 1 \leq i \leq 7,$ are indeterminates. It is easy to see that $\mathrm{deg}(R) = \mathrm{adeg}(R) = 2$. Moreover, we can compute that $\mathrm{hdeg}(R) = 5$ and $\mathrm{udeg}(R) = 4$. \end{example} \begin{question}\rm Is it true that $\mathrm{udeg}(I, M) \leq \mathrm{hdeg}(I, M)$ for all finitely generated $R$-modules $M$ and all $\frak m$-primary ideals $I$? \end{question}
\end{document} |
\begin{document}
\begin{abstract} In 1976, Gallagher showed that the Hardy--Littlewood conjectures on prime $k$-tuples imply that the distribution of primes in log-size intervals is Poissonian. He did so by computing average values of the singular series constants over different sets of a fixed size $k$ contained in an interval $[1,h]$ as $h \to \infty$, and then using this average to compute moments of the distribution of primes. In this paper, we study averages where $k$ is relatively large with respect to $h$. We then apply these averages to the tail of the distribution. For example, we show, assuming appropriate Hardy--Littlewood conjectures and in certain ranges of the parameters, the number of intervals $[n,n +\lambda \log x]$ with $n\le x$ containing at least $k$ primes is $\ll x\exp(-k/(\lambda e)).$ \end{abstract} \maketitle
\section{Introduction}
The Hardy--Littlewood prime $k$-tuple conjectures state that if $\mathcal H = \{h_1, \dots, h_k\}$ is a set of $k$ distinct integers, then as $x \to \infty$, \begin{equation} \sum_{n \le x} \prod_{i=1}^k \Lambda(n+h_i) = (\mathfrak S(\mathcal H) + o(1))x, \end{equation} where $\mathfrak S(\mathcal H)$ is the singular series \begin{equation}\label{eq:bg:singseries} \mathfrak S(\mathcal H) = \prod_{p \text{ prime}} \frac{1-\nu_{\mathcal H}(p)/p}{(1-1/p)^k}, \end{equation} and $\nu_{\mathcal H}(p)$ denotes the number of distinct residue classes modulo $p$ occupied by the elements of $\mathcal H$.
Many aspects of the distribution of primes can be understood through the lens of the Hardy--Littlewood conjectures. For example, in \cite{gallaghershortintervals}, Gallagher showed that the Hardy--Littlewood conjectures imply that the distribution of primes in log-size intervals is Poissonian. He did so by showing that for fixed $k$ and as $h \to \infty$, \begin{equation}\label{eq:bigksums:gallagheraverage} \subsum{h_1, \dots, h_k \le h \\ \text{distinct}} \mathfrak S(h_1, \dots, h_k) \sim \subsum{h_1, \dots, h_k \le h \\ \text{distinct}} 1, \end{equation} so that singular series for sets of size $k$ have value $1$ on average as their elements grow large. In \cite{MontgomerySoundararajanPrimesIntervals}, Montgomery and Soundararajan computed second-order terms of this average in order to show that, assuming a version of the Hardy--Littlewood conjectures with a stronger error term, primes in somewhat longer intervals obey an appropriate Gaussian distribution. For both of these analyses, $k$ is fixed throughout.
What about when $k$ is not fixed? Here we study sums of singular series for sets of size $k$ with elements in $[1,h]$, where $k\to \infty$ as $h \to \infty$. Put another way, we study the rate at which the average value of $\mathfrak S(\mathcal H)$ converges to $1$, in order to extend Gallagher's proof to larger $k$. \begin{theorem}\label{thm:bigksums:extendinggallagher} Fix $\delta > \frac 12$, and let $h, k \in \N$ with $k = O((\log h)^{1-\delta})$. Let $T_k(h)$ be given by \begin{equation}\label{eq:bigksums:defofTkh} T_k(h) := \sum_{\substack{h_1, \dots, h_k \le h \\ \text{distinct}}} \mathfrak S(h_1, \dots, h_k), \end{equation} Then there exists a $\beta > 0$, dependent only on $\delta > \frac 12$, with \begin{equation*} T_k(h) = h^k + O(h^{k-\beta}). \end{equation*} \end{theorem} In particular, Theorem \ref{thm:bigksums:extendinggallagher} states that \eqref{eq:bigksums:gallagheraverage} holds whenever $k = O((\log h)^{1-\delta})$ for some $\delta > \frac 12$. One might expect the average value of $1$ to extend to still larger $k$; for example, it is reasonable to conjecture that \eqref{eq:bigksums:gallagheraverage} would hold whenever $k = O((\log h)^2)$. For arbitrarily large $k$, Theorem \ref{thm:bigk:sumboundallk} provides a bound on the average value of $k$-term singular series over sets with elements in $[1,h]$. \begin{theorem}\label{thm:bigk:sumboundallk} Let $k, h \in \N$, with no conditions on their relative growth rates. Define $T_k(h)$ by \eqref{eq:bigksums:defofTkh}. Then \begin{equation}\label{eq:bigk:sumboundallk} T_k(h) \ll h^k \prod_{p \le k^3} \frac 1{(1-1/p)^k} \ll h^k (3\log k)^k. \end{equation} \end{theorem} This upper bound is likely much weaker than the truth, but it has the advantage of bounding the average value only in terms of $k$. Instead of taking $p \le k^3$, in our proof we can take $p \le k^{2 +\varepsilon}$ for any $\varepsilon > 0$, which has the effect of replacing the $3^k$ in the final bound with a $(2+\varepsilon)^k$. However, the final bound is in any case $e^{O(k \log\log k)}$. Theorems \ref{thm:bigksums:extendinggallagher} and \ref{thm:bigk:sumboundallk} are proven in Section \ref{sec:bigksums:gallagher}.
In the second half of this paper, we discuss one application of sums of singular series for sets of size $k$ when $k$ varies: namely, the tail of the distribution of primes. The \emph{maximum} number of primes in an interval of size $\lambda \log x$ is closely connected to the study of small gaps between primes, and has been studied in, among other places, \cite{GranvilleLumley}, \cite{MR3272929}, and \cite{MR3171761}. In \cite{GranvilleLumley}, Granville and Lumley conjecture that if $y \le \log x$ and $x,y \to \infty$, the lim sup of the number of primes in intervals $(x, x+y]$ is $\sim \frac{y}{\log y}$, and if $\log x \le y = o((\log x)^2)$, the lim sup should be given by $\frac{\log x}{\log\left(\frac{(\log x)^2}{y}\right)}$. They also formulate conjectures for larger intervals.
The \emph{expected} number of primes in such an interval is much smaller; for a constant $\lambda$, there are on average $\lambda$ primes in an interval $(x,x + \lambda \log x]$, and Gallagher \cite{gallaghershortintervals} showed that for fixed $\lambda > 0$, assuming the Hardy--Littlewood conjectures, \begin{equation*} \lim_{x \to \infty} \frac 1x \#\{n \le x : \pi(n+\lambda \log x) - \pi(n) = k\} = \frac{\lambda^k e^{-\lambda}}{k!}. \end{equation*}
Again, we consider the situation where $k \to \infty$. What bounds can be proven on the tail of this distribution away from the extreme values? For example, one can ask how frequently intervals of size $\lambda \log x$ contain at least $\log \log x$ primes, where the Poisson prediction is that \begin{equation}\label{eq:poissonpredictionforloglogprimes} \frac 1x \#\{n \le x: \pi(n + \lambda \log x) - \pi(n) \ge \log \log x \} \approx \frac{(e\lambda)^{\log \log x} e^{-\lambda}}{(\log \log x)^{\log \log x}}. \end{equation}
Since $k=|\mathcal H|$ grows with $x$ in our setting, our results rely on a version of the Hardy--Littlewood conjectures which admits uniformity in the size of the set $\mathcal H$; we state this version here. \begin{conjecture}[Hardy--Littlewood $k$-tuples conjecture, uniform version]\label{conj:bg:HLuniform} There exist two absolute constants $\epsilon > 0$ and $C > 0$ such that for all $x$, for all $k \le (\log \log x)^3$, and for all admissible tuples $\mathcal H = \{h_1,\dots, h_k\} \subset [0, (\log x)^2]$, \begin{equation}
\left|\sum_{n \le x} \mathbf 1_{\mathcal P}(n+h_1) \cdots \mathbf 1_{\mathcal P}(n+h_k) - \mathfrak S(\mathcal H) \mathrm{li}_k(x)\right| \le Cx^{1-\varepsilon}. \end{equation}
Equivalently, for possibly different values of $\varepsilon$ and $C$, \begin{equation}
\left|\sum_{n \le x} \Lambda(n+h_1) \cdots \Lambda(n+h_k) - \mathfrak S(\mathcal H)x\right| \le Cx^{1-\varepsilon}. \end{equation} \end{conjecture} Here $\mathrm{li}_k(x)$ is the $k$-th logarithmic integral, given by \[\mathrm{li}_k(x) := \int_2^x \frac{\mathrm{d}y}{(\log y)^k}.\]
When $k = 10$, this conjecture would suggest that for $x=5500$, the error term above is bounded by $Cx^{1-\varepsilon}$ for some $\varepsilon$ and some $C$. For several sets of size $10$, computer tests found that bounds of $(\log x)^6x^{1/2}$ held for all $x \le 5500$; in fact this and other tests for small values of $k$ suggest that the error term is far smaller, and for example may be bounded by $Cx^{1/2}(\log x)^k$ in this range of $k$ and $h$. It is also likely possible to extend the range of $k$ and $h$ in this conjecture; if $k$ is as large as $\log x$, then $\mathrm{li}_k(x)$ is small enough that the conjecture is not so meaningful, but it is difficult to say when Hardy--Littlewood convergencee should break down.
Gallagher's proof in \cite{gallaghershortintervals} that the distribution of primes in log-size intervals is (conditionally) Poissonian proceeds by computing moments. One strategy towards understanding the tail of the distribution is to estimate higher moments of the distribution, or equivalently, to understand how quickly the $r$th moment of the distribution of primes converges to the $r$th moment of a Poisson distribution. Using Conjecture \ref{conj:bg:HLuniform} as well as Theorems \ref{thm:bigksums:extendinggallagher} and \ref{thm:bigk:sumboundallk}, we can prove the following result on moments of the distribution of primes. \begin{theorem}\label{thm:distributiontail:momentboundsmallk} Assume Conjecture \ref{conj:bg:HLuniform}. Let $x > 0$, and assume that $h = \lambda \log x$ and that $r \ll (\log h)^{1-\delta}$ for some $\delta > \frac 12$. Define the $r$th moment $m_r(x,h)$ of the distribution of primes in intervals of size $h$ by \begin{equation}\label{eq:rmomentlogdef} m_r(x,h) = \frac 1x \sum_{n \le x} \left(\pi(n + h) - \pi(n) \right)^r. \end{equation} Then \begin{equation*} m_r(x,h) = \left(\sum_{\ell = 1}^r \stirlingii{r}{\ell} \lambda^\ell\right)(1 + o(1)), \end{equation*} where $\stirlingii{r}{\ell}$ denotes the Stirling numbers of the second kind. \end{theorem} \begin{remark} Note that $\lambda$ need not be fixed as $x \to \infty$. \end{remark}
Theorem \ref{thm:distributiontail:momentboundsmallk} then implies bounds on the tail of the distribution of primes, and in particular yields the following two corollaries.
\begin{corollary}\label{cor:distributiontail:smallktailbound} Assume Conjecture \ref{conj:bg:HLuniform}. Let $x > 0$ and set $h = \lambda \log x$, where $\lambda(x)$ is nondecreasing as $x \to \infty$. Let $k \ll (\log h)^{1-\delta}$ for some $\delta > \frac 12$ and assume that $\frac{k}{\lambda + 1} \to \infty$ as $x\to \infty$. Let $I(x;k,h)$ be given by \begin{equation}\label{eq:distributiontail:Ixkhdef} I(x;k,h) := \#\left\{ n \le x : \pi(n+h) - \pi(n) \ge k\right\}. \end{equation} If $\lambda \ge 1$, then as $x \to \infty$, \begin{equation*} I(x;k,h) \ll x\mathrm{exp}\left(-\frac{k}{\lambda e}\right). \end{equation*} Otherwise, \begin{equation*} I(x;k,h) \ll x\mathrm{exp}\left(-\frac{k}{(\lambda+1)e}\right). \end{equation*} \end{corollary}
\begin{corollary}\label{cor:distributiontail:biggerktailbound} Assume Conjecture \ref{conj:bg:HLuniform}. Let $x > 0$, and assume that $h = \lambda \log x$; let $k = k(x)$ be an integer with no growth rate assumptions. Let $I(x;k,h)$ be defined as in \eqref{eq:distributiontail:Ixkhdef}. Then for any $\delta > \frac 12$, as $x \to \infty$, \begin{equation*} I(x;k,h) \ll_\delta x\mathrm{exp}\left((\log h)^{1-\delta}(\log (\lambda+1) + (1-\delta)\log \log h - \log k)\right). \end{equation*} \end{corollary}
For example, taking $k = \log h$, Corollary \ref{cor:distributiontail:biggerktailbound} says that for all $\delta > \frac 12$, assuming Conjecture \ref{conj:bg:HLuniform}, \begin{equation*} I(x;\log h, h) \ll_\delta x\exp\left((\log h)^{1-\delta}(\log\lambda- \delta\log \log h)\right). \end{equation*} In \cite{MR3530450}, Maynard proves lower bounds on the same problem, showing that for any $x,y \ge 1$ there are $\gg x \exp\left(-\sqrt{\log x}\right)$ integers $n \le x$ such that $\pi(n+y)-\pi(n) \gg \log y$, which in this case corresponds to the condition that there are $\gg \log \log x$ primes in intervals of width $\lambda \log x$. Both upper and lower bounds are reasonably far from the Poisson prediction in \eqref{eq:poissonpredictionforloglogprimes}.
It seems reasonable to conjecture that the Poisson prediction should still hold when $k \sim \log h$ or $k \sim (\log h)^2$, and perhaps even larger. At this point, both upper and lower bounds are far from matching this conjecture. \begin{conjecture}\label{conj:distributiontail:poissontailforprimes} Let $x > 1$ and let $h = \lambda \log x$, with $\lambda = o((\log x)^\varepsilon)$ for all $\varepsilon > 0$. Let $k \ll (\log h)^2$. Define \begin{equation}\label{eq:distributiontail:pikxhforpoissontail} \pi_k(x;h) := \#\{n \le x: \pi(n+h)-\pi(n) = k\}. \end{equation} Then $\pi_k(x;h) \sim x \frac{\lambda^k e^{-\lambda}}{k!}$ as $x \to \infty$. \end{conjecture}
In Section \ref{sec:distributiontail:unconditionalbounds}, we prove unconditional bounds on the tail of the distribution of primes. For these arguments we use a Selberg sieve bound instead of applying the Hardy--Littlewood conjectures. The Selberg sieve bound for prime $k$-tuples has an extra factor of $2^k k!$ from the Hardy--Littlewood prediction. This factor is larger than our bound on the average of $k$-term singular series in Theorem \ref{thm:bigk:sumboundallk}, so the following unconditional bound is weaker than the moment bounds in Theorem \ref{thm:distributiontail:momentboundsmallk}. However, this weaker bound applies for much larger moments; in particular, for the $r$th moment when $r = o((\log x)^{1/4})$. \begin{theorem}\label{thm:distributiontail:unconditionalmomentbound} Let $x > 0$, let $h = \lambda \log x = o(x)$ and let $r = o((\log x)^{1/4})$. Define the $r$th moment $m_r(x,h)$ of the distribution of primes in intervals of size $h$ as in \eqref{eq:rmomentlogdef}. Then \begin{equation*} m_r(x,h) \ll (\lambda+1)^r r^{2r} e^{O(r \log \log r)}. \end{equation*} \end{theorem} As before, this bound on moments yields the following corollary on intervals containing many primes. \begin{corollary}\label{cor:distributiontailuncondtionalbound} Let $x> 0$, let $h =\lambda \log x$, where $\lambda$ is a nondecreasing function of $x$. Let $k$ be an integer dependent on $x$ and assume that $k = o((\log x)^{1/6})$ and that $k/\lambda \to \infty$ as $x\to \infty$. Define $I(x;k,h)$ as in \eqref{eq:distributiontail:Ixkhdef}. Then for some constant $C$, \begin{equation*} I(x;k,h) \ll x\exp\left(-\sqrt{\frac{k}{(\lambda+1)e}}2^{C/2}\left(\log \frac{k}{(\lambda+1)e}\right)^{-C/2}\right). \end{equation*} \end{corollary}
It may also be possible to achieve weaker, yet nontrivial, bounds for larger $k$, along the lines of the bounds in Corollary \ref{cor:distributiontail:biggerktailbound}. We predict that the bound in Corollary \ref{cor:distributiontail:smallktailbound} should hold for any $k \ll (\log h)^2$, instead of merely $k \ll (\log h)^{1-\delta}$.
\begin{conjecture}\label{conj:distributiontail:weakertailforprimes} Let $x > 1$ and let $h = \lambda \log x$, with $\lambda = o((\log x)^\varepsilon)$ for all $\varepsilon > 0$. Let $k \ll (\log h)^2$, and define $\pi_k(x;h)$ as in \eqref{eq:distributiontail:pikxhforpoissontail}. Then $\pi_k(x;h) \ll x\mathrm{exp}\left(-\frac{k}{\lambda e}\right)$ as $x \to \infty$. \end{conjecture}
To put these results in perspective, let us consider the case when $\lambda = 1$, i.e. primes in intervals of width $\log x$. In \cite{gallaghershortintervals}, Gallagher shows that for fixed $k$, the number of $n\le x$ such that the interval $(n,n+\log x]$ contains exactly $k$ primes is asymptotic to the Poisson prediction $\frac{x}{ek!}$, assuming the Hardy--Littlewood conjectures. Unconditionally, Gallagher shows in \cite{gallaghershortintervals} that the number of $n \le x$ such that $(n,n+\log x]$ contains exactly $k$ primes is $\lesssim xe^{-Ck}$, for an absolute constant $C$.
We instead consider the probability that an interval $(n,n+\log x]$ contains at least $\log \log x$ primes. In this case, the Poisson prediction for the probability that an interval contains $\log \log x$ primes is $\frac{(\log x) e^{-1}}{(\log \log x)^{\log \log x}}$, which for any $A > 0$ is $\ll \frac 1{(\log x)^A}$. In \cite{MR3530450}, Maynard proves a lower bound; namely, that at least $\gg x\exp(-\sqrt{\log x})$ intervals $(n,n+\log x]$, with $n \le x$, contain $\gg \log\log x$ primes. In Corollary \ref{cor:distributiontail:biggerktailbound}, we show that, assuming the uniform Hardy--Littlewood conjectures, for all $\delta > \frac 12$, the number of $n\le x$ such that $(n,n+\log x]$ contains at least $\log \log x$ primes is $\ll_{\delta} x\exp\left(-\delta\log\log\log x(\log \log x)^{1-\delta}\right)$. Unconditionally, we show in Corollary \ref{cor:distributiontailuncondtionalbound} that there exists $C > 0$ such that the number of $(n,n+\log x]$ containing at least $\log \log x$ primes is $\ll x \exp\left(-\sqrt{2^C/e}(\log \log x)^{1/2}(\log \log \log x -1)^{-C/2}\right)$.
When $k$ is slightly smaller than $\log \log x$, that is, when $k \ll (\log h)^{(1-\delta)}$, Corollary \ref{cor:distributiontail:smallktailbound} achieves the bound of $\frac 1{(\log x)^A}$ with $A = \frac 1{\lambda e}$. Bounding this probability by $\frac 1{(\log x)^A}$ for any $A > 0$ may be within reach even if the Poisson prediction itself is not. On the other hand, many questions concerning the tail of the distribution of primes are quite delicate, especially concerning the maximum and minimum number of primes in an interval of a certain size. For example, in \cite{MR783576}, Maier proved that intervals of size $(\log x)^A$ for $A > 2$ can contain surprisingly few or surprisingly many primes. For more information, see \cite{GranvilleLumley}. The tail of the distribution of primes is also studied in \cite{MR3822615}.
\section{Averages for large sets}\label{sec:bigksums:gallagher}
In this section, we prove Theorems \ref{thm:bigksums:extendinggallagher} and \ref{thm:bigk:sumboundallk}. We begin with Theorem \ref{thm:bigksums:extendinggallagher}, whose proof closely follows Gallagher's original proof in \cite{gallaghershortintervals}.
\begin{proof}[Proof of Theorem \ref{thm:bigksums:extendinggallagher}, following Gallagher.]
Let $\nu_{\mathcal H}(p):= \#\mathcal H \bmod p$. For a set $\mathcal H = \{h_1, \dots, h_k\}$, write $D_{\mathcal H} = \prod_{i < j} (h_i-h_j)$, so that $\nu_{\mathcal H}(p) = k$ unless $p|D_{\mathcal H}$. Define \begin{equation*} a(p,\nu):= \frac{p^k - \nu p^{k-1}-(p-1)^k}{(p-1)^k}, \end{equation*} so that the $p$th factor of $\mathfrak S(\mathcal H)$ is given by $\frac{1-\nu_{\mathcal H}(p)/p}{(1-1/p)^k} = 1 + a(p,\nu_{\mathcal H}(p))$.
For any prime $p > k$, \begin{equation*} a(p,k) = \sum_{j=2}^k (p-1)^{-j} \binom kj (1-j) \ll k^2 (p-1)^{-2}. \end{equation*} This and a similar computation for $\nu < k$ shows that for $p > k$, \begin{equation}\label{eq:bigksums:gallapboundsbigp}
|a(p,\nu)| \ll \begin{cases} k^2 (p-1)^{-2} & \text{ if } \nu = k \\ k^2 (p-1)^{-1} &\text{ if } \nu < k. \end{cases} \end{equation} For $p \le k$, \begin{equation}\label{eq:bigksums:gallapboundssmallp}
|a(p,\nu)| = \left|-1 + \frac{1-\nu/p}{(1-1/p)^{k}}\right| \le \left(1 - \frac 1p\right)^{-k} < e^{2k/p}, \end{equation} since $\left(1-\frac 1p\right)^{-k}=\exp\left(k\sum_{j=1}^\infty \frac 1{jp^j}\right) < \exp\left(\frac{k}{p(1-1/p)}\right) \le \exp(2k/p)$.
For squarefree $q$, write $a_{\mathcal H}(q) := \prod_{p|q} a(p,\nu_{\mathcal H}(p))$, so that \begin{equation*}
\mathfrak S(\mathcal H) = \prod_{p \le k} \frac{1-\nu_{\mathcal H}(p)/p}{(1-1/p)^k} \subsum{q \ge 1 \\ p|q {\Rightarrow} p > k} \mu^2(q)a_{\mathcal H}(q). \end{equation*} Using the bounds on $a(p,\nu)$, for any $x$, \begin{equation*}
\subsum{q > x \\ p|q {\Rightarrow} p>k} |a_{\mathcal H}(q)| \le \subsum{q > x \\ p|q {\Rightarrow} p> k} \frac{\mu^2(q) (Ck^2)^{\omega(q)}}{\phi^2(q)}\phi((q,D_{\mathcal H})), \end{equation*} where $\omega(q)$ is the number of prime factors of $q$ and $C$ is an absolute positive constant.
Writing $q = de$ with $d|D_{\mathcal H}$ and $(e,D_{\mathcal H}) = 1$, this is \begin{equation}\label{eq:bigk:termsbiggerthanx}
\subsum{d|D_{\mathcal H} \\ p|d {\Rightarrow} p > k} \frac{\mu^2(d)(Ck^2)^{\omega(d)}}{\phi(d)} \subsum{e>x/d \\ (e,D_{\mathcal H}) = 1 \\ p|e {\Rightarrow} p > k} \frac{\mu^2(e) (Ck^2)^{\omega(e)}}{\phi^2(e)}. \end{equation} Apply Rankin's trick to bound the inner sum, so that for any choice of fixed $\alpha$ with $0 < \alpha < 1$, \begin{align*}
\subsum{e>x/d \\ (e,D_{\mathcal H}) = 1 \\ p|e {\Rightarrow} p > k} \frac{\mu^2(e) (Ck^2)^{\omega(e)}}{\phi^2(e)} &\le \subsum{e \ge 1 \\ (e,D_{\mathcal H}) = 1 \\ p|e {\Rightarrow} p > k} \left(\frac{e}{x/d}\right)^\alpha \frac{\mu^2(e) (Ck^2)^{\omega(e)}}{\phi^2(e)}. \\ \end{align*} By multiplicativity, this is \begin{align*} &= \left(\frac dx \right)^{\alpha} \prod_{\substack{p > k \\ p\nmid D_{\mathcal H}}} \left( 1 + \frac{Ck^2 p^{\alpha}}{(p-1)^2}\right) \le \left(\frac dx \right)^{\alpha} \mathrm{exp}\left(Ck^2 \sum_{\substack{p > k}} \frac{p^{\alpha}}{(p-1)^2}\right) \ll \left(\frac dx \right)^{\alpha} e^{Ck^{1 + \alpha}}. \end{align*}
Since $\frac 12 < \delta < 1$, we can choose $\alpha > 0$ small enough that $(1-\delta)(2+2\alpha) + \alpha < 1$, which also implies that $(1-\delta)(1+\alpha) < 1$. Plugging the bound for the inner sum into \eqref{eq:bigk:termsbiggerthanx}, we get that \eqref{eq:bigk:termsbiggerthanx} is \begin{align*}
&\ll \sum_{d|D_{\mathcal H}} \frac{\mu^2(d)(Ck^2)^{\omega(d)}}{\phi(d)} \left(\frac dx\right)^{\alpha} e^{Ck^{1 + \alpha}} = \frac{e^{Ck^{1+\alpha}}}{x^\alpha} \sum_{d|D_{\mathcal H}} \frac{\mu^2(d) (Ck^2)^{\omega(d)}d^{\alpha}}{\phi(d)}. \end{align*} For any $d \le D_{\mathcal H}$, we have that $\frac{d}{\phi(d)} \ll \log \log D_{\mathcal H}$, so that this expression becomes \begin{align*}
&\ll \frac{e^{Ck^{1+\alpha}}}{x^\alpha} (\log \log D_{\mathcal H}) \sum_{d|D_{\mathcal H}} \frac{\mu^2(d) (Ck^2)^{\omega(d)}d^{\alpha}}{d} = \frac{e^{Ck^{1+\alpha}}}{x^\alpha}(\log \log D_{\mathcal H}) \prod_{p|D_{\mathcal H}} \left(1 + \frac{Ck^2}{p^{1-\alpha}}\right). \end{align*} Since $(1-\delta)(1+\alpha) < 1$, $\frac{e^{Ck^{1+\alpha}}}{x^\alpha} \ll_\varepsilon h^{\varepsilon}/x^\alpha$. Moreover, the quantity $D_{\mathcal H}$ is at most $h^{\binom{k}{2}}$, since it is a product of $\binom{k}{2}$ quantities $h_i-h_j$, each of which are $<h$. Thus $\log \log D_{\mathcal H} \le \log \log h^{\binom{k}{2}} \ll \log \log h$, so in fact the product of all terms outside the product are $\ll_\varepsilon h^{\varepsilon}/x^\alpha$. It remains to understand the product, which is bounded by \begin{align*}
\prod_{p|D_{\mathcal H}}\left(1 + \frac{Ck^2}{p^{1-\alpha}}\right) &\le \exp\left(\sum_{p|D_{\mathcal H}}\frac{Ck^2}{p^{1-\alpha}}\right) \\ &\le \exp\left(2Ck^2\sum_{p \le \binom{k}{2}\log h} \frac{1}{p^{1-\alpha}}\right). \end{align*} The sum over primes satisfies \begin{equation*} \sum_{p \le \binom k 2 \log h} \frac 1{p^{1-\alpha}} = \frac{\binom{k}{2}^{\alpha} (\log h)^{\alpha}}{\alpha \log(\binom k2 \log h)}(1+o(1)), \end{equation*} for example by applying partial summation and L'H\^opital's rule, so that \begin{align*} \exp\left(2Ck^2\sum_{p \le \binom{k}{2}\log h} \frac{1}{p^{1-\alpha}}\right) &\ll \exp\left(\frac{4Ck^2}{\alpha}\binom{k}{2}^{\alpha} \frac{(\log h)^{\alpha}}{\log \log h}\right) \\ &\ll \exp\left(\frac{4C}{2^{\alpha}\alpha} k^{2+2\alpha}\frac{(\log h)^{\alpha}}{\log\log h}\right) \\ &\ll \exp\left(\frac{4C}{2^{\alpha} \alpha} (\log h)^{(1-\delta)(2+2\alpha)+\alpha}/\log \log h\right). \end{align*} Since $(1-\delta)(2+2\alpha)+\alpha < 1$, this quantity is $\ll_{\varepsilon} h^{\varepsilon}$, so \eqref{eq:bigk:termsbiggerthanx} is $\ll_\varepsilon h^{2\varepsilon}/x^\alpha$, say. Set $x=h^{1/2}$, and choose $\varepsilon > 0$ small enough that $2 \varepsilon < \frac 12 \alpha$.
This is true for any set $\mathcal H = \{h_1, \dots, h_k\}$, so it follows that \begin{equation}\label{eq:bigksums:qrexpansion}
T_k(h)= \subsum{q \le x \\ p|q {\Rightarrow} p > k} \subsum{r \ge 1 \\ p|r {\Rightarrow} p \le k} \subsum{h_1, \dots, h_k \le h \\ \text{distinct}} a_{\mathcal H}(qr) + O\left(\frac{h^{2\varepsilon}}{x^\alpha} \subsum{h_1, \dots, h_k \le h \\ \text{distinct}} \prod_{p \le k} \frac{1-\nu_{\mathcal H}(p)/p}{(1-1/p)^k}\right), \end{equation} where we have additionally expanded the terms of the product with $p \le k$ into the sum over $r$. First consider the error term in \eqref{eq:bigksums:qrexpansion}, which by \eqref{eq:bigksums:gallapboundssmallp} is \begin{align*} &\ll \frac{h^{2\varepsilon}}{x^\alpha} h^k \prod_{p \le k} \left(e^{2k/p}\right) \\ &\ll \frac{h^{2\varepsilon}}{x^\alpha} h^k e^{2k\sum_{p \le k} \tfrac 1p} \\ &\ll \frac{h^{2\varepsilon}}{x^\alpha} h^k e^{O(k\log \log k)} \ll \frac{h^{2\varepsilon}}{x^\alpha} h^k e^{(\log h)^{1-\delta/2}} \ll \frac{h^{2\varepsilon}}{x^\alpha} h^{k + o_{\delta}(1)}. \end{align*}
Now consider the main term. The sum over $h_1,\dots, h_k \le h$ in the main term of \eqref{eq:bigksums:qrexpansion} can also be written as \begin{equation*}
\subsum{\vec{\nu} = (\nu_p)_{p|qr} \\ \nu_p \le \min\{p-1,k\}} \prod_{p|qr} a(p,\nu_p) \left( N(\vec{\nu})+ O(kh^{k-1})\right), \end{equation*}
where $N(\vec{\nu})$ is the number of $k$-tuples of not necessarily distinct integers $h_1, \dots, h_k$ with $1 \le h_1, \dots, h_k \le h$ which occupy exactly $\nu_p$ residue classes mod $p$ for each $p|qr$. We can estimate $N(\vec{\nu})$ by counting for each $p|qr$ the number of $h_1, \dots, h_k \bmod p$ that occupy exactly $\nu_p$ residue classes and applying the Chinese Remainder Theorem. Thus \begin{equation*}
N(\vec{\nu}) = \prod_{p|qr} \binom{p}{\nu_p} \sigma(k,\nu_p) \left( \frac{h}{qr} + O(1)\right)^k, \end{equation*} where $\sigma(k,j)$ denotes the number of surjective maps $[1,k] \twoheadrightarrow [1,j]$; we also have $\sigma(k,j) = j! \stirlingii{k}{j}$, where $\stirlingii{k}{j}$ is the Stirling number of the second kind. Expanding, we get \begin{equation*}
N(\vec{\nu}) = \prod_{p|qr} \binom{p}{\nu_p} \sigma(k,\nu_p)\left(\left(\frac h{qr}\right)^k + O\left( \sum_{j=0}^{k-1} \left(\frac h{qr}\right)^j \binom kj\right)\right). \end{equation*}
Since $x = h^{1/2}$ and $\prod_{p \le k} p = e^{O(k)} = e^{O((\log h)^{1-\delta})} = h^{o(1)}$, for any $q \le k$ and $r$ with all prime factors $\le k$, \[\sum_{j=0}^{k-1} \left(\frac{h}{qr}\right)^j \binom kj \ll k^2 \left(\frac{h}{qr}\right)^{k-1}.\] Thus the inner sum in the main term of \eqref{eq:bigksums:qrexpansion} is \begin{equation}\label{eq:bigksums:ABCdecomposition} \left(\frac h{qr} \right)^k A(qr) + O\left(k^2 \left(\frac{h}{qr}\right)^{k-1} B(qr) \right) + O(kh^{k-1} C(qr)), \end{equation} where \begin{align*}
A(qr) &= \subsum{\vec{\nu} = (\nu_p)_{p|qr} \\ \nu_p \le \min\{p-1,k\}} \prod_{p|qr} a(p,\nu_p) \binom{p}{\nu_p} \sigma(k,\nu_p), \\
B(qr) &= \subsum{\vec{\nu} = (\nu_p)_{p|qr} \\ \nu_p \le \min\{p-1,k\}} \prod_{p|qr} |a(p,\nu_p)| \binom{p}{\nu_p} \sigma(k,\nu_p), \text{ and} \\
C(qr) &= \subsum{\vec{\nu} = (\nu_p)_{p|qr} \\ \nu_p \le \min\{p-1,k\}} \prod_{p|qr} |a(p,\nu_p)|. \end{align*} Just as in \cite{gallaghershortintervals}, $A(q) = 0$ for $q > 1$, and $A(1) = 1$.
Now consider $C(qr)$, which can be estimated using the bounds \eqref{eq:bigksums:gallapboundsbigp} and \eqref{eq:bigksums:gallapboundssmallp} for $a(p,\nu)$. Write \begin{equation*}
C(qr) = \prod_{p|qr} \left(\sum_{\nu = 1}^{\min\{p-1,k\}} |a(p,\nu)|\right). \end{equation*} If $p > k$, the $p$th factor is $\ll \frac{pk^2}{p-1}$, whereas if $p \le k$, this factor is $\ll pe^{2k/p}.$ Thus \begin{equation}\label{eq:bigksums:Cqr}
C(qr) \le (C_1k^2)^{\omega(q)} \frac{q}{\phi(q)} C_1^{\omega(r)}r e^{2k\sum_{p|r} 1/p}, \end{equation} for some absolute constant $C_1$, where without loss of generality $C_1 \ge 1$.
After summing \eqref{eq:bigksums:Cqr} over all $q$ and $r$, the contribution to $T_k(h)$ from the factors coming from $C(qr)$ in \eqref{eq:bigksums:ABCdecomposition} is bounded by \begin{align*}
&\ll kh^{k-1} \subsum{q \le x \\ p|q {\Rightarrow} p > k} \subsum{r \ge 1 \\ p|r {\Rightarrow} p \le k} (C_1k^2)^{\omega(q)} \frac{q}{\phi(q)} C_1^{\omega(r)}r e^{2k\sum_{p|r} 1/p} \\
&\ll kh^{k-1} \prod_{p \le k} \left(1 + C_1pe^{2k/p}\right) \subsum{q \le x \\ p|q {\Rightarrow} p > k} (C_1k^2)^{\omega(q)}\frac{q}{\phi(q)} \\
&\ll kh^{k-1} (2C_1)^{\pi(k)} e^{\sum_{p \le k} \log p} e^{2k\sum_{p \le k} \tfrac 1p}\subsum{q \le x \\ p|q {\Rightarrow} p > k} (C_1k^2)^{\omega(q)}\frac{q}{\phi(q)}. \numberthis \label{eq:Cqr-summed-over-q-r-almost-there} \end{align*} The terms outside the sum are $\ll h^{k-1+o(1)}$ in the range where $k \ll (\log h)^{1-\delta}$. We now examine the inside sum using Rankin's trick. First note that $\frac{q}{\phi(q)} \ll \log \log q \ll \log \log h$ for $q \le x = h^{1/2}$, so we may omit $\frac{q}{\phi(q)}$ from the sum while only losing a factor of $h^{o(1)}$. Thus for any $0 < \gamma < 2$, \begin{align*}
\subsum{q \le x \\ p|q {\Rightarrow} p > k} (C_1k^2)^{\omega(q)}\frac{q}{\phi(q)} &\le h^{o(1)} \subsum{q \\ p|q {\Rightarrow} k < p \le x} (C_1k^2)^{\omega(q)} \left(\frac{x}{q}\right)^{2-\gamma} \\ &\ll h^{o(1)} x^{2-\gamma} \prod_{k < p \le x} \left(1 + \frac{C_1k^2}{p^{2-\gamma}}\right) \\ &\ll h^{o(1)} x^{2-\gamma} \mathrm{exp}\left(\sum_{k < p \le x} \frac{C_1k^2}{p^{2-\gamma}}\right) \\ &\ll h^{o(1)} x^{2-\gamma} \mathrm{exp} \left(\frac{C_2k^{1 + \gamma}}{\log k}\right), \end{align*} for a possibly different positive constant $C_2 > 0$. We can now choose any $\gamma >0$ such that $(1-\delta)(1+\gamma) < 1$; for example, choose $\gamma = \alpha$. Then since $x = h^{1/2}$ and $k \ll (\log h)^{1-\delta}$, \begin{equation}\label{eq:subsum-over-q-for-B-and-C}
\subsum{q \le x \\ p|q {\Rightarrow} p > k} (C_1k^2)^{\omega(q)}\frac{q}{\phi(q)} \ll h^{o(1)}x^{2-\alpha} \mathrm{exp}\left(\frac{C_2k^{1+\alpha}}{\log k}\right) \ll h^{1-\alpha/2 + o_\delta(1)}. \end{equation} Plugging \eqref{eq:subsum-over-q-for-B-and-C} into \eqref{eq:Cqr-summed-over-q-r-almost-there} shows that the contribution to $T_k(h)$ from the factors corresponding to $C(qr)$ is $\ll h^{k-\alpha/2+o_\delta(1)}$.
Finally, consider $B(qr)$. Just as with $C(qr)$, $B(qr)$ is multiplicative, and the $p$th factor of $B(qr)$ is given by
\[\sum_{\nu = 1}^{\min\{p-1,k\}} |a(p,\nu)| \binom p{\nu} \sigma(k,\nu), \] which by \eqref{eq:bigksums:gallapboundssmallp}, \eqref{eq:bigksums:gallapboundsbigp}, and the fact that $\sum_{\nu = 1}^p \binom p{\nu} \sigma(k,\nu) = p^k,$ is \[\ll \begin{cases} \frac{k^2 p^k}{p-1} &\text{ if } p > k \\ e^{2k/p}p^k &\text{ if } p \le k. \end{cases}\] Thus for some absolute constant $C_2 \ge 1$, and after summing over all $q$ and $r$, the contribution to $T_k(h)$ from the $B(qr)$ factors in \eqref{eq:bigksums:ABCdecomposition} is \begin{align*}
&\ll \subsum{q \le x \\ p|q {\Rightarrow} p > k} \subsum{r \ge 1 \\ p|r {\Rightarrow} p \le k} k^2 \left(\frac{h}{qr}\right)^{k-1} \frac{(C_2k^2)^{\omega(q)} q^k}{\phi(q)} C_2^{\omega(r)} r^k e^{2k\sum_{p|r} \tfrac 1p} \\
&\ll k^2 h^{k-1} \subsum{q \le x \\ p|q {\Rightarrow} p > k} \frac{(C_2k^2)^{\omega(q)} q}{\phi(q)} \prod_{p \le k} \left(1 + C_2e^{2k/p} p \right). \end{align*} For $k \ll (\log h)^{1-\delta}$, the product over $p \le k$ is \[\prod_{p \le k} \left(1+C_2e^{2k/p}p\right) \ll (2C_2)^k \prod_{p \le k} e^{2k/p}p = (2C_2)^k \exp\left(\sum_{p \le k} \frac{2k}p + \log p\right) \ll (2C_2)^k e^{3k\log\log k} = h^{o_{\delta}(1)},\] so the overall sum is \begin{align*}
&\ll k^2 h^{k-1 + o_{\delta}(1)} \subsum{q \le x \\ p|q {\Rightarrow} p > k} \frac{(C_2k^2)^{\omega(q)} q}{\phi(q)}. \end{align*} Applying \eqref{eq:subsum-over-q-for-B-and-C} with the same choice of $\gamma$ shows that the contribution to $T_k(h)$ from the $B(qr)$ factors is $\ll h^{k-\alpha/2 + o_\delta(1)}$.
Combining the contributions from $A(1)$, the sums over $q$ and $r$ of $B(qr)$ and $C(qr)$, and the error term in \eqref{eq:bigksums:qrexpansion}, we get that \[T_k(h) = h^k + O\left(h^{k-\alpha/2 + o_{\delta}(1)} + \frac{h^{2\varepsilon}}{x^\alpha} h^{k + o_{\delta}(1)}\right), \] which for $x = h^{1/2}$ is $h^k + O(h^{k-\beta})$ for $\beta < \frac 12 \alpha - 2 \varepsilon$. Our choice of $\alpha$ depends only on $\delta$, so $\beta$ also depends only on $\delta$, as desired. \end{proof}
The techniques relying on the Chinese Remainder Theorem break down for larger $k$, where they do not give a bound with the correct power of $h$. We will now turn to Theorem \ref{thm:bigk:sumboundallk}, which bounds $T_k(h)$ for any $k$ where the dependence on $h$ is $h^k$, which is approximately the number of terms in the sum. In other words, Theorem \ref{thm:bigk:sumboundallk} provides a bound that is uniform in $h$ on the average value of $k$-term singular series for sets with elements that are at most $h$. The proof of Theorem \ref{thm:bigk:sumboundallk} relies on Lemma \ref{lem:amgmbound}, which is a uniform bound on $\mathfrak S(\mathcal H)$ for $\mathcal H \subset [1,h]$ satisfying the conditions of Theorem \ref{thm:bigk:sumboundallk}.
\begin{lemma}\label{lem:amgmbound} Let $\mathcal H=\{h_1, \dots,h_k\}$ be a set of distinct integers. Define $\mathfrak S(\mathcal H)$ as in \eqref{eq:bg:singseries}. Then \begin{equation}
\mathfrak S(\mathcal H) \ll \prod_{p \le k^3} \frac{1}{(1-1/p)^k}\prod_{p > k^3} \frac{1-k/p}{(1-1/p)^k} \binom{k}{2}^{-1}\sum_{1 \le i < j \le k} \exp \Big(2 \binom k2 \sum_{\substack{p|(h_i-h_j) \\ p > k^3}} \frac 1p \Big). \end{equation} \end{lemma} \begin{proof} By definition, \begin{align} \mathfrak S(\mathcal H) &= \prod_{p \text{ prime}} \frac{1-\nu_{\mathcal H}(p)/p}{(1-1/p)^k} \nonumber \\ &\le \prod_{\substack{p \text{ prime} \\ p \le k^3}} \frac 1{(1-1/p)^k} \prod_{\substack{p \text{ prime} \\ p > k^3}} \frac{1-\nu_{\mathcal H}(p)/p}{(1-1/p)^k} \nonumber \\
&= \prod_{p \le k^3} \frac{1}{(1-1/p)^k}\prod_{p > k^3} \frac{1-k/p}{(1-1/p)^k}\prod_{\substack{p|\Delta(\mathcal H) \\ p > k^3}} \frac{p-\nu_{\mathcal H}(p)}{p-k}. \label{eq:splitproductintoprimeregions} \end{align} Rewrite the product inside via \begin{align*}
\prod_{\substack{p|\Delta(\mathcal H) \\ p > k^3}} \frac{p-\nu_{\mathcal H}(p)}{p-k} &\le \exp\Big(\sum_{\substack{p|\Delta(\mathcal H) \\ p > k^3}} \frac{p-\nu_{\mathcal H}(p)}{p-k}\Big) \\
&\ll \exp\Big(2 \sum_{1 \le i < j\le k} \sum_{\substack{p|(h_i-h_j) \\ p > k^3}} \frac 1p \Big) \\
&\le \binom{k}{2}^{-1}\sum_{1 \le i < j \le k} \exp \Big(2 \binom k2 \sum_{\substack{p|(h_i-h_j) \\ p > k^3}} \frac 1p \Big), \end{align*} where the last step comes from Jensen's inequality; plugging this into \eqref{eq:splitproductintoprimeregions} yields the result. \end{proof}
With Lemma \ref{lem:amgmbound} in hand, we now turn to the proof of Theorem \ref{thm:bigk:sumboundallk}.
\begin{proof}[Proof of Theorem \ref{thm:bigk:sumboundallk}] By Lemma \ref{lem:amgmbound}, for each set $\mathcal H = \{h_1, \dots, h_k\}$ with $h_1, \dots, h_k \le h$ distinct,
\[\mathfrak S(\mathcal H) \ll \prod_{p \le k^3} \frac{1}{(1-1/p)^k}\prod_{p > k^3} \frac{1-k/p}{(1-1/p)^k} \binom{k}{2}^{-1}\sum_{1 \le i < j \le k} \exp \Big(2 \binom k2 \sum_{\substack{p|(h_i-h_j) \\ p > k^3}} \frac 1p \Big).\] Sum over $\mathcal H$ to get \begin{equation*}
T_k(h) \ll \prod_{p \le k^3} \frac{1}{(1-1/p)^k}\prod_{p > k^3} \frac{1-k/p}{(1-1/p)^k} \subsum{1\le h_1, \dots, h_k \le h \\ \text{distinct}} \binom{k}{2}^{-1}\sum_{1 \le i < j \le k} \exp \Big(2 \binom k2 \sum_{\substack{p|(h_i-h_j) \\ p > k^3}} \frac 1p \Big). \end{equation*} Let $S_k(h)$ refer to the sums above, so that \begin{equation*}
S_k(h) := \subsum{1\le h_1, \dots, h_k \le h \\ \text{distinct}}\binom{k}{2}^{-1} \sum_{1 \le i < j \le k} \exp \Big(2 \binom k2 \sum_{\substack{p|(h_i-h_j) \\ p > k^3}} \frac 1p \Big). \end{equation*} Then \begin{align*}
S_k(h)&\ll \binom{k}{2}^{-1} \sum_{1 \le i < j \le k} \subsum{1\le h_1, \dots, h_k \le h \\ \text{distinct}} \exp \Big(2\binom k2 \sum_{\substack{p|(h_i-h_j)\\mathfrak p>k^3}} \frac 1p \Big) \\
&\ll \binom{k}{2}^{-1} \sum_{1 \le i < j \le k} \sum_{\substack{h_i,h_j \le h \\ \text{distinct}}} \exp\Big(2\binom k2 \sum_{\substack{p|(h_i-h_j) \\ p > k^3}} \frac 1p \Big)h^{k-2} \\
&= h^{k-2} \sum_{\substack{\ell \le h}} (h-\ell)\exp\Big(2\binom k2 \sum_{\substack{p|\ell \\ p > k^3}} \frac 1p \Big)\\
&\le h^{k-1} \sum_{\substack{\ell \le h}} \exp\Big(2\binom k2 \sum_{\substack{p|\ell \\ p > k^3}} \frac 1p \Big). \end{align*}
The sum over $\ell$ is a sum over a multiplicative function $f_k(\ell)$, with $f_k(p^j) = 1$ if $p \le k^3$ and $f_k(p^j) = \exp(k(k-1)/p)$ if $p>k^3$, regardless of $j$. The function $f_k(\ell)$ satisfies \begin{equation*}
f_k(\ell) = \sum_{d|\ell} g_k(d), \end{equation*} where $g_k$ is a multiplicative function given by $g_k(p^j) = 0$ if $p \le k^3 $ or $j \ge 2$ and $g_k(p) = \exp(k(k-1)/p)-1$ for $p > k^3$ prime. Then \begin{equation*}
\sum_{\ell \le h} f_k(\ell) = \sum_{\ell \le h} \sum_{d|\ell} g_k(d) = \sum_{d\le h} g_k(d)\left\lfloor \frac hd\right\rfloor \le h\sum_{d=1}^\infty \frac{g_k(d)}{d}. \end{equation*} The sum over $d$ can be rewritten as \begin{align*} \prod_{p > k^3} \Big(1 + \frac{\exp\left(\tfrac{k(k-1)}{p}\right)-1}{p}\Big) &= \exp\Big(\sum_{\substack{p \text{ prime}\\mathfrak p>k^3}} \sum_{j\ge 1} \frac 1p \left(\frac{k(k-1)}{p}\right)^j\Big) \\ &< \exp\Big(\sum_{\substack{p \text{ prime} \\ p>k^3}} \sum_{j \ge 1} \frac 1{p^{1+j/3}}\Big). \end{align*} The sum in the exponent is bounded by a constant independent of $k$, and thus the sum over $d$ is bounded by a constant independent of $k$, so that $S_k(h) \ll h^k$.
Finally, return to the contribution from the small primes and $T_k(h)$, which is bounded by \begin{equation*} T_k(h) \ll h^k \prod_{p \le k^3} \frac 1{(1-1/p)^k} \ll h^k (3\log k)^k, \end{equation*} as desired. \end{proof}
\section{Proof of Theorem \ref{thm:distributiontail:momentboundsmallk} and its corollaries}\label{sec:distributiontail:HLbounds}
Throughout, consider an interval of size $h = \lambda \log x$. Fix $\delta > \frac 12$ and assume that $r \ll (\log h)^{1-\delta}$. We begin with the proof of Theorem \ref{thm:distributiontail:momentboundsmallk}.
The $r$th moment $m_r(x,h)$, defined in \eqref{eq:rmomentlogdef}, is given by \begin{align*} m_r(x,h) &= \subsum{1 \le h_1, \dots, h_r \le h} \frac 1x \sum_{\substack{n \le x}} \mathbf 1_{\mathcal P}(n+h_1) \cdots \mathbf 1_{\mathcal P}(n+h_r) \\ &= \sum_{\ell=1}^r \frac{\sigma(r,\ell)}{\ell!} \subsum{1 \le h_1, \dots, h_\ell \le h \\ \text{distinct}} \frac 1x \sum_{n \le x} \mathbf 1_{\mathcal P}(n+h_1) \cdots \mathbf 1_{\mathcal P}(n+h_\ell), \end{align*} where $\sigma(r,\ell)$ is the number of surjective maps $[1,r] \twoheadrightarrow [1,\ell]$, and $\frac{\sigma(r,\ell)}{\ell!} = \stirlingii{r}{\ell}$, the Stirling number of the second kind.
Apply Conjecture \ref{conj:bg:HLuniform} to replace the sum over correlations of primes with a sum over singular series, yielding \begin{equation*} m_r(x,h) = \sum_{\ell=1}^r \stirlingii{r}{\ell} \frac 1{(\log x)^\ell} \subsum{1 \le h_1, \cdots, h_\ell \le h \\ \text{distinct}} (\mathfrak S(\{h_1, \dots, h_\ell\}) + o(1)). \end{equation*} Estimating this moment now depends on the average of the singular series constants, and in particular how quickly this average converges to $1$. We apply our results from Section \ref{sec:bigksums:gallagher} bounding sums of singular series for large sets, and in particular Theorem \ref{thm:bigksums:extendinggallagher}, which requires our assumption that $r \ll (\log h)^{1-\delta}$. For larger $r$, one could also apply the weaker result in Theorem \ref{thm:bigk:sumboundallk} to yield a weaker moment bound.
By Theorem \ref{thm:bigksums:extendinggallagher}, for any $\ell \le r \ll (\log h)^{1-\delta}$ and for some $\beta > 0$ dependent only on $\delta > \frac 12$, \begin{equation*} \sum_{\substack{1 \le h_1, \cdots, h_\ell \le h \\ \text{distinct}}} \mathfrak S(h_1, \dots, h_\ell) = h^\ell + O(h^{\ell-\beta}). \end{equation*} Then for any $r \ll (\log h)^{1-\delta}$, \begin{align*} m_r(x,h) &= \sum_{\ell = 1}^r \stirlingii{r}{\ell} \frac 1{(\log x)^\ell}\left(h^\ell + O(h^{\ell-\beta})\right) + o\left(\sum_{\ell = 1}^r \frac 1{(\log x)^\ell}\stirlingii{r}{\ell} h^\ell\right) \\ &= \left(\sum_{\ell = 1}^r \stirlingii{r}{\ell} \lambda^\ell\right)(1 + o(1)), \end{align*} where the error term is uniform in $r$. This completes the proof of Theorem \ref{thm:distributiontail:momentboundsmallk}. We now proceed to prove Corollary \ref{cor:distributiontail:smallktailbound}.
\begin{proof}[Proof of Corollary \ref{cor:distributiontail:smallktailbound}] Let $r \ll (\log h)^{1-\delta}$. Applying a Markov bound to the $r$th moment $m_r(x,h)$, we get that \begin{equation*} I(x;k,h) \le \frac{x}{k^r} m_r(x,h) \ll \frac{x}{k^r} \sum_{\ell = 1}^r \stirlingii{r}{\ell}\lambda^\ell. \end{equation*} As shown in \cite[Theorem 3]{MR241310}, Stirling numbers of the second kind are bounded above by $\stirlingii{r}{\ell} \le \frac 12 \binom r{\ell} \ell^{r-\ell},$ so \begin{equation*} I(x;k,h) \ll \frac{x}{k^r} \sum_{\ell = 1}^r \binom r{\ell} \ell^{r-\ell} \lambda^\ell \ll \frac{x}{k^r} (\lambda + r)^r. \end{equation*} If $\lambda \ge 1$, then as $x \to \infty$ eventually $\frac{k}{\lambda} \ge \frac{\lambda e}{\lambda - 1}$, which in turn implies that we can choose $r = \frac{k}{\lambda e}$ and get $(\lambda + r)^r \le (\lambda r)^r$. With this choice of $r$, we thus get $I(x;k,h)\ll xe^{-k/\lambda e}$, as desired.
Meanwhile if $\lambda < 1$, we can choose $r = \frac{k}{(\lambda + 1)e}$ to get the desired result, since $(\lambda+r)^r \le ((\lambda+1)r)^r$. \end{proof}
Corollary \ref{cor:distributiontail:biggerktailbound} follows via the same argument as the proof of Corollary \ref{cor:distributiontail:smallktailbound}, but where $r$ is taken to be $(\log h)^{1-\delta}$.
\section{Unconditional bounds}\label{sec:distributiontail:unconditionalbounds}
We can also achieve weaker unconditional bounds on the moments $m_r(x, h)$ and the tail of the distribution via replacing the use of the Hardy--Littlewood conjectures by an application of the Selberg sieve. More precisely, we will make use of the following theorem, which is proven in Section \ref{sec:distributiontail:selbergsievethm}. \begin{theorem}\label{thm:distributiontail:selbergsievethm} Let $x \ge 2$, let $k = o((\log x)^{1/4})$, and let $\mathcal H = \{h_1, \dots, h_k\}$ be a set of $k$ distinct natural numbers. For any $\varepsilon > 0$, \begin{align*}
&\left|\left\{n \le x: n + h_i \text{ prime for all } i \right\}\right| \\
&\le (2+\varepsilon)^k k! \mathfrak S(\mathcal H) \frac{x}{\log^k x} \left(1 + O\left(\frac{\log\log(3x) + k^4 + k\log\log(3|D_{\mathcal H}|)}{\log x}\right) \right), \end{align*} where $D_{\mathcal H} := \prod_{i < j} (h_i - h_j)$. \end{theorem} Theorem \ref{thm:distributiontail:selbergsievethm} extends the work of Klimov in \cite{MR0097372}, who shows an analogous bound for $k$ fixed as $x \to \infty$.
We now turn to the proofs of Theorem \ref{thm:distributiontail:unconditionalmomentbound}, as well as that of Corollary \ref{cor:distributiontailuncondtionalbound}. \begin{proof}[Proof of Theorem \ref{thm:distributiontail:unconditionalmomentbound}] As in the proof of Theorem \ref{thm:distributiontail:momentboundsmallk}, we have \begin{equation*} m_r(x,h) = \sum_{\ell=1}^r \frac{\sigma(r,\ell)}{\ell!} \subsum{1 \le h_1, \cdots, h_\ell \le h \\ \text{distinct}} \frac 1x \sum_{n \le x} \mathbf 1_{\mathcal P}(n+h_1) \cdots \mathbf 1_{\mathcal P}(n+h_\ell). \end{equation*} For our choice of $h$ and $r$, the error term in Theorem \ref{thm:distributiontail:selbergsievethm} is $O(1)$. Applying Theorem \ref{thm:distributiontail:selbergsievethm}, the $r$th moment is then bounded by \begin{align*} m_r(x,h) &\ll \sum_{\ell=1}^r \frac{\sigma(r,\ell)}{\ell!(\log x)^\ell} \subsum{1 \le h_1, \cdots, h_\ell \le h \\ \text{distinct}} (2+\varepsilon)^\ell \ell! \mathfrak S(\mathcal H) \\ &\ll \sum_{\ell=1}^r \stirlingii{r}{\ell} \ell! \frac{1}{(\log x)^\ell} (2+\varepsilon)^\ell h^\ell e^{O(\ell \log \log \ell)}, \end{align*} where the last step follows by applying Theorem \ref{thm:bigk:sumboundallk}. Since $h = \lambda\log x$, this sum is then \begin{align*} &\ll r! (2 + \varepsilon)^r e^{O(r\log\log r)} \sum_{\ell = 1}^r \stirlingii{r}{\ell} \lambda^\ell. \end{align*} As seen in the proof of Corollary \ref{cor:distributiontail:smallktailbound}, $\sum_{\ell = 1}^r \stirlingii{r}{\ell} \lambda^\ell \le (\lambda+r)^r\le ((\lambda+1)r)^r,$ so that \begin{align*} m_r(x,h) &\ll r! (2+\varepsilon)^r e^{O(r\log \log r)}(\lambda+1)^r r^r \\ &\ll r^{2r} e^{O(r\log \log r)} (\lambda+1)^r, \end{align*} which gives the result. \end{proof}
\begin{proof}[Proof of Corollary \ref{cor:distributiontailuncondtionalbound}] The proof of this corollary proceeds along the same lines as the proofs of Corollaries \ref{cor:distributiontail:smallktailbound} and \ref{cor:distributiontail:biggerktailbound}. In this case, we know unconditionally from Theorem \ref{thm:distributiontail:unconditionalmomentbound} that \begin{equation*} I(x;k,h) \le \frac{x}{k^r} m_r(x,h) \ll \frac{x}{k^r}(\lambda+1)^r r^{2r} e^{O(r\log\log r)}. \end{equation*} Hence there exists some constant $C > 0$ with \begin{equation*} I(x;k,h) \ll \frac{x}{k^r} (\lambda+1)^r r^{2r} e^{Cr\log\log r} = x\exp(r\log \frac{\lambda+1}{k} + 2r\log r + Cr\log\log r). \end{equation*} Choose $r = \left(\frac{k}{(\lambda+1) e}\right)^{1/2}2^{C/2}\left(\log \frac{k}{(\lambda+1) e}\right)^{-C/2}$, so that \begin{align*} \log r &= \frac 12 \log \frac{k}{(\lambda +1)e} +\frac C2 \log 2- \frac C2 \log \log \frac{k}{(\lambda+1)e}, \text{ and} \\ \log \log r &= \log \frac 12 + \log (\log \frac{k}{(\lambda+1)e} - \frac C2 \log \log \frac{k}{(\lambda +1)e} + C\log 2) \\ &\le \log \frac 12 + \log \log \frac{k}{(\lambda + 1)e}, \end{align*} where the inequality holds for large enough $x$ since $\frac{k}{\lambda} \to \infty$. Plugging in these expressions for $\log r$ and $\log \log r$ gives \begin{align*} r&\log \frac{\lambda +1}{k} + 2r\log r + Cr\log \log r \le -r, \end{align*} so that $I(x;k,h) \ll x e^{-r}$, which completes the proof. \end{proof}
\subsection{Selberg's Sieve: Proof of Theorem \ref{thm:distributiontail:selbergsievethm}} \label{sec:distributiontail:selbergsievethm}
Selberg's sieve has previously been used to bound the frequency of prime $k$-tuples; see for example \cite{MR0424730}, which we will refer to throughout this section, as well as \cite{MR2647984} and \cite{MR0097372}. In \cite{MR0424730}, Halberstam and Richert proceed along a very similar calculation, with the only material difference being that we are not taking $k$ to be a constant in terms of the other parameters, and thus we keep track of the dependence on $k$ throughout. We proceed along the lines of \cite[Theorem 5.7]{MR0424730}. To do so, there are several lemmas that we will want to adapt to this setting.
We begin by defining notation. Let $\mathcal P$ be the set of all primes, and for $z > 0$, let $P(z) := \prod_{p \le z} p$. Let $\mathcal H = \{h_1, \dots, h_k\}$ be a set of $k$ distinct natural numbers, so that $D_{\mathcal H} \ne 0$. Define \begin{equation*} \mathcal A := \left\{ \prod_{i=1}^k (n + h_i) : n \le x \right\}, \end{equation*}
and define $A_p$ to be the number of elements of $\mathcal A$ that are divisible by a prime $p$, with $A_p = \frac{\nu_{\mathcal H}(p)}{p}x + O(\nu_{\mathcal H}(p)).$ Let $A_d$ be the number of elements of $\mathcal A$ that are divisible by $d$, so that $A_d = \frac{\nu_{\mathcal H}(d)}{d} x + O(\nu_{\mathcal H}(d)),$ where $\nu_{\mathcal H}(d) = \prod_{p|d} \nu_{\mathcal H}(p)$. Let $R_d = A_d - \frac{\nu_{\mathcal H}(d)}{d} x$, so that $|R_d| \le \nu_{\mathcal H}(d)$. Our goal is to estimate the quantity \begin{equation}\label{eq:distributiontail:selberg:SAPzdef}
S(\mathcal A;\mathcal P,z):=|\{a:a \in \mathcal A, (a,P(z)) = 1\}|. \end{equation}
Halberstam and Richert define three conditions on a sieve problem in order to apply the Selberg sieve, which they denote ($R$), ($\Omega_1$), and $(\Omega_2(\kappa,L))$, where the parameter $\kappa$ is the dimension of the sieve, which in this case is equal to $k$. The three conditions are: \begin{align}
|R_d| &\le \nu_{\mathcal H}(d) \text{ if } \mu(d) \ne 0; \tag{$R$} \\ 0 &\le \frac{\nu_{\mathcal H}(p)}{p} \le 1 - \frac 1{\alpha_1} \text{ for some constant $\alpha_1 \ge 1$}; \tag{$\Omega_1$} \\ -L &\le \sum_{w \le p < z} \frac{\nu_{\mathcal H}(p) \log p}{p} - \kappa \log \frac zw \le \alpha_2 \text{ for any } z\ge w \ge 2. \tag{$\Omega_2(\kappa,L)$} \end{align} For the final condition, $\alpha_2$ and $L$ are constants, each $\ge 1$, which are independent of $z$ and $w$. For our purposes, we will need to keep track of the values $\alpha_1$, $\alpha_2$, and $L$, and in particular their dependence on $k$.
\begin{lemma}\label{lem:conditions-r-omegas-hold}
For a set $\mathcal H = \{h_1, \dots, h_k\}$ and $\nu_{\mathcal H}$, $D_{\mathcal H}$, $\mathcal A$, $A_d$, and $R_d$ defined as above, the conditions ($R$), ($\Omega_1$), and ($\Omega_2(\kappa,L)$) are satisfied with $\alpha_1 = k +1$, $\alpha_2 = O(k)$, $\kappa = k$, and $L = k \log\log(3|D_{\mathcal H}|)$. \end{lemma} \begin{proof}
We first see that condition ($R$) is satisfied, since $|R_d| \le \nu_{\mathcal H}(d)$. We also have $\frac{\nu_{\mathcal H}(p)}{p} \le \frac{\min\{k,p-1\}}{p} \le 1-\frac{1}{k+1},$ so that ($\Omega_1$) is satisfied with $\alpha_1 = k+1$.
For any $z$ and any $w < z$, \[\sum_{w \le p < z} \frac{\nu_{\mathcal H}(p)\log p}{p} = k \sum_{w \le p < z} \frac{\log p}{p} - \sum_{w \le p < z} \frac{k-\nu_{\mathcal H}(p)}{p} \log p,\] so that \[\sum_{w \le p < z} \frac{\nu_{\mathcal H}(p) \log p}{p} \le k \sum_{w \le p < z} \frac{\log p}{p} \le k \log \frac zw + O(k).\]
By \cite[Lemma 5.1]{MR0424730}, for any natural number $n$, $\sum_{p|n} \frac{\log p}{p} \ll \log \log(3n)$, so
\[\sum_{w \le p < z} \frac{\nu_{\mathcal H}(p) \log p}{p} \ge k \log \frac zw - O(k) - \sum_{p|D_{\mathcal H}} \frac{k}{p} \log p \ge k \log \frac zw - O(k) - O(k\log\log(3|D_{\mathcal H}|)),\]
and thus ($\Omega_2(\kappa,L)$) is satisfied with $\kappa = k$, $\alpha_2 = O(k)$ and $L = O(k\log\log(3|D_{\mathcal H}|))$. \end{proof}
For $d$ squarefree, let $g(d) = \frac{\nu_{\mathcal H}(d)}{d \prod_{p|d}(1-\nu_{\mathcal H}(p)/p)}$, let $G(z) = \sum_{d < z} \mu(d)^2g(d),$ and more generally for any $x$ define $G(x,z) = \sum_{\substack{d < x \\ d|P(z)}} \mu(d)^2 g(d)$, so that $G(z) = G(z,z)$. Let $W(z) = \prod_{p<z}\left(1-\frac{\nu_{\mathcal H}(p)}{p}\right).$ \begin{lemma}[After Lemma 5.4 from \cite{MR0424730}]\label{lem:distributiontail:selberg:HR5.4}
Fix a set $\mathcal H = \{h_1, \dots, h_k\}$, and define $\nu_{\mathcal H}$, $g(d)$ $G(x,z),$ and $W(z)$. Let $L = k\log\log(3|D_{\mathcal H}|)$, and let $z > 0$ be a number such that for sufficiently large constants $B_L$ and $B_k$, $L \le \frac 1{B_L} \log z$ and $k^2 \le \frac 1{B_k} \log z$. Then \begin{equation*} \frac 1{G(z)} = W(z)e^{\gamma k} \Gamma(k+1)\left(1 + O\left(\frac{L + k^4}{\log z}\right)\right). \end{equation*} \end{lemma}
\begin{proof} This proof closely follows the proof of \cite[Lemma 5.4]{MR0424730}, so here we simply highlight the differences, which arise only in that here we keep track of the dependence on $k$ in the form of the constants $\alpha_1, \alpha_2$, $\kappa$, and $L$. By Lemma \ref{lem:conditions-r-omegas-hold}, conditions ($R$), ($\Omega_1$), and ($\Omega_2(k,L)$) hold.
In \cite{MR0424730}, Halberstam and Richert show that for $0 < z \le x$, \begin{align*}
\subsum{d < x \\ d|P(z)} g(d) \log d = &\subsum{d < x \\ d|P(z)} g(d) \sum_{p < \min\{x/d,z\}} \frac{\nu_{\mathcal H}(p)}{p} \log p \\
&+ \subsum{xz^{-2} \le d < x \\ d|P(z)} g(d) \subsum{\sqrt{x/d} \le p < \min\{x/d,z\} \\ p \nmid d} \frac{g(p)\nu_{\mathcal H}(p)}{p} \log p. \end{align*} Halberstam and Richert use ($\Omega_2(k,L)$) to evaluate the first inner sum. For the second inner sum, note that $\frac{\nu_{\mathcal H}(p)}{p} \log p \le \alpha_2$ (see \cite[Equation (2.3.8)]{MR0424730}), and by \cite[Equation (2.3.11)]{MR0424730} we have for any $2 \le a \le b$ that \begin{equation}\label{eq:sum-g-over-primes-bound} \sum_{a \le p < b} g(p) \le k \log \frac{\log b}{\log a} + \frac{\alpha_2}{\log a} + \frac{\alpha_1\alpha_2}{\log a}\left(k+\frac{\alpha_2}{\log a}\right) = k \log \frac{\log b}{\log a} + O\left(\frac{k^3}{\log a}\right), \end{equation} which implies that \begin{align*} \subsum{\sqrt{x/d} \le p < \min\{x/d,z\} \\ p \nmid d} \frac{g(p)\nu_{\mathcal H}(p)}{p} \log p &\le \alpha_2 \subsum{\sqrt{x/d} \le p < \min\{x/d,z\} \\ p \nmid d}g(p) \\ &\ll \alpha_2k + \alpha_2^2 + \alpha_1\alpha_2^2k + \alpha_1\alpha_2^3 \ll k^4. \end{align*} Combining these estimates, we get \begin{align*}
\subsum{d < x \\ d|P(z)} g(d) \log d = &\subsum{x/z \le d < x \\ d|P(z)} g(d) \left(k\log \frac xd + O(L)\right) \\
&+ \subsum{d<x/z \\ d|P(z)} g(d) (k \log z + O(L)) + O(k^4G(x,z)) \\
&= k \subsum{d<x \\ d|P(z)} g(d) \log \frac xd - k \subsum{d<x/z \\ d|P(z)} g(d) \log \frac{x/z}{d} + O((L + k^4)G(x,z)). \end{align*}
This expression is identical to what appears in \cite[Page 149]{MR0424730} in the proof of Lemma 5.4, except that the factor of $L$ in the error term is replaced by a factor of $L + k^4$. The rest of the proof applies to our situation without change, except for replacing factors of $L$ in the error term with $L+k^4$, so that, as in \cite[Equations (3.10) and (3.13)]{MR0424730}, we get \begin{equation*} G(z) = \frac 1{\mathfrak S(\mathcal H)\Gamma(k+1)} (\log z)^k \left(1+ O\left(\frac{L+k^4}{\log z}\right)\right). \end{equation*}
By following the proof of \cite[Lemma 5.2]{MR0424730} and using \eqref{eq:sum-g-over-primes-bound} in place of \cite[Equation (2.3.4)]{MR0424730}), we get that for some constant $C \in \R$, \begin{equation}\label{eq:distributiontail:lemma5.2hrbound} C\frac{L}{\log a} \le \sum_{a \le p < b} \frac{\nu_{\mathcal H}(p)}{p}-\kappa \sum_{a \le p < b} \frac 1p \le O\left(\frac{k^3}{\log a}\right). \end{equation} Note that by ($\Omega_1$) and \cite[Equation (2.3.9)]{MR0424730}, we have $\sum_{a \le p < b} g^2(p) = O(k^4/\log a)$. Thus, by following the proof of \cite[Lemma 5.3]{MR0424730} and using \eqref{eq:distributiontail:lemma5.2hrbound}, we get that \begin{equation*} \prod_{p \ge z} \left(1 - \frac{\nu_{\mathcal H}(p)}{p}\right)^{-1}\left(1-\frac 1p\right)^k = 1 + O\left(\frac{L+k^4}{\log z}\right). \end{equation*} This implies that \begin{equation}\label{eq:distributiontail:Wzestimate} W(z) = \mathfrak S(\mathcal H) \frac{e^{-\gamma k}}{(\log z)^k}\left( 1+O\left(\frac{L+k^4}{\log z}\right)\right), \end{equation} which corresponds to \cite[Equation (5.2.5)]{MR0424730} and completes the proof. \end{proof}
We also make use of \cite[Theorem 3.1]{MR0424730}, which we cite without modification. \begin{theorem}[Theorem 3.1, Halberstam--Richert, \cite{MR0424730}]\label{thm:distributiontai:hrthm3.1} Using the notation of this section, with $S(\mathcal A; \mathcal P,z)$ defined in \eqref{eq:distributiontail:selberg:SAPzdef} and satisfying ($R$) and ($\Omega_1$), we have \begin{equation*} S(\mathcal A;\mathcal P,z) \le \frac{x}{G(z)} + \frac{z^2}{W^3(z)}. \end{equation*} \end{theorem}
We are now ready to prove Theorem \ref{thm:distributiontail:selbergsievethm}, following the proof of \cite[Theorem 5.7]{MR0424730}. Fix $z < x$ to be chosen later; we estimate $S(\mathcal A;\mathcal P,z)$, which is an upper bound for our desired quantity. By Theorem \ref{thm:distributiontai:hrthm3.1}, \begin{equation*} S(\mathcal A;\mathcal P,z) \le \frac{x}{G(z)} + \frac{z^2}{W^3(z)}. \end{equation*} Applying Lemma \ref{lem:distributiontail:selberg:HR5.4} yields \begin{equation*} S(\mathcal A;\mathcal P,z) \le xW(z)e^{\gamma k}\Gamma(k+1)\left(1+O\left(\frac{L+k^4}{\log z}\right)\right) + x\frac{z^2}{xW^3(z)}. \end{equation*} By equation (2.3.12) in \cite{MR0424730}, \begin{equation*} \frac 1{W(z)} \ll e^{O(k^3)}(\log z)^k, \end{equation*} which implies that \begin{equation*} \frac{z^2}{W^3(z)} = xW(z)\frac{z^2}{xW^4(z)} = xW(z)O\left(\frac{z^2(\log z)^{4k}e^{O(k^3)}}{x}\right), \end{equation*} and thus \begin{equation*} S(\mathcal A;\mathcal P,z) \le xW(z)\left(e^{\gamma k}\Gamma(k+1)\left(1+O\left(\frac{L+k^4}{\log z}\right)\right) + O\left(\frac{z^2(\log z)^{4k}e^{O(k^3)}}{x}\right)\right). \end{equation*}
Plugging in \eqref{eq:distributiontail:Wzestimate}, we get \begin{equation*} S(\mathcal A;\mathcal P,z) \le \Gamma(k+1) \mathfrak S(\mathcal H) \frac{x}{(\log z)^k} \left(1 +O\left(\frac{L+k^4}{\log z}\right) + O\left(\frac{z^2(\log z)^{4k}e^{O(k^3)}}{x}\right)\right). \end{equation*} Set $z = x^{1/(2 +\varepsilon)}$ to complete the proof, keeping in mind that $k = o((\log x)^{1/4})$.
\end{document} |
\begin{document}
\newcommand{\spacing}[1]{\renewcommand{\baselinestretch}{#1}\large\normalsize} \spacing{1.14}
\title{On the curvature of invariant Kropina metrics}
\author {H. R. Salimi Moghaddam}
\address{Department of Mathematics, Faculty of Sciences, University of Isfahan, Isfahan,81746-73441-Iran.} \email{salimi.moghaddam@gmail.com and hr.salimi@sci.ui.ac.ir}
\keywords{invariant metric, flag curvature, $(\alpha,\beta)-$metric, Kropina metric, homogeneous space, Lie group\\ AMS 2010 Mathematics Subject Classification: 22E60, 53C60, 53C30.}
\begin{abstract} In the present article we compute the flag curvature of a special type of invariant Kropina metrics on homogeneous spaces. \end{abstract}
\maketitle
\section{\textbf{Introduction}}\label{intro} Let $M$ be a smooth $n-$dimensional manifold and $TM$ be its tangent bundle. A Finsler metric on $M$ is a non-negative function $F:TM\longrightarrow \Bbb{R}$ which has the following properties: \begin{enumerate}
\item $F$ is smooth on the slit tangent bundle
$TM^0:=TM\setminus\{0\}$,
\item $F(x,\lambda y)=\lambda F(x,y)$ for any $x\in M$, $y\in T_xM$ and $\lambda
>0$,
\item the $n\times n$ Hessian matrix $[g_{ij}(x,y)]=[\frac{1}{2}\frac{\partial^2 F^2}{\partial y^i\partial
y^j}]$ is positive definite at every point $(x,y)\in TM^0$. \end{enumerate} For a smooth manifold $M$ suppose that $g$ and $b$ are a Riemannian metric and a 1-form respectively as follows: \begin{eqnarray}
g&=&g_{ij}dx^i\otimes dx^j \\
b&=&b_idx^i. \end{eqnarray} An important family of Finsler metrics is the family of $(\alpha,\beta)-$metrics which is introduced by M. Matsumoto (see \cite{Ma}) and has been studied by many authors. An interesting and important example of such metrics is the Kropina metrics with the following form: \begin{eqnarray}
F(x,y)=\frac{\alpha(x,y)^2}{\beta(x,y)}, \end{eqnarray} where $\alpha(x,y)=\sqrt{g_{ij}(x)y^iy^j}$ and $\beta(x,y)=b_i(x)y^i$.\\
In a natural way, the Riemannian metric $g$ induces an inner product on any cotangent space $T^\ast_xM$ such that $<dx^i(x),dx^j(x)>=g^{ij}(x)$. The induced inner product on $T^\ast_xM$ induces a linear isomorphism between $T^\ast_xM$ and $T_xM$ (for more details see \cite{DeHo}.). Then the 1-form $b$ corresponds to a vector field $\tilde{X}$ on $M$ such that \begin{eqnarray}
g(y,\tilde{X}(x))=\beta(x,y). \end{eqnarray}
Therefore we can write the Kropina metric $F=\frac{\alpha^2}{\beta}$ as follows: \begin{eqnarray}
F(x,y)=\frac{\alpha(x,y)^2}{g(\tilde{X}(x),y)}. \end{eqnarray} Flag curvature, which is a generalization of the concept of sectional curvature in Riemannian geometry, is one of the fundamental quantities which associates with a Finsler space. Flag curvature is computed by the following formula: \begin{eqnarray}\label{flag}
K(P,Y)=\frac{g_Y(R(U,Y)Y,U)}{g_Y(Y,Y).g_Y(U,U)-g_Y^2(Y,U)}, \end{eqnarray}
where $g_Y(U,V)=\frac{1}{2}\frac{\partial^2}{\partial s\partial t}(F^2(Y+sU+tV))|_{s=t=0}$, $P=span\{U,Y\}$, $R(U,Y)Y=\nabla_U\nabla_YY-\nabla_Y\nabla_UY-\nabla_{[U,Y]}Y$ and $\nabla$ is the Chern connection induced by $F$ (see \cite{BaChSh} and \cite{Sh}.).\\ In general, the computation of the flag curvature of Finsler metrics is very difficult, therefore it is important to find an explicit and applicable formula for the flag curvature. In \cite{EsSa}, we have studied the flag curvature of invariant Randers metrics on naturally reductive homogeneous spaces and in \cite{Sa1} we generalized this study on a general homogeneous space. Also in \cite{Sa2} we considered $(\alpha,\beta)-$metrics of the form $\frac{(\alpha+\beta)^2}{\alpha}$ and gave the flag curvature of these metrics. In this paper we study the flag curvature of invariant Kropina metrics on homogeneous spaces.
\section{\textbf{Flag curvature of invariant Kropina metrics on homogeneous spaces}} Let $G$ be a compact Lie group, $H$ a closed subgroup, and $g_0$ a bi-invariant Riemannian metric on $G$. Assume that $\frak{g}$ and $\frak{h}$ are the Lie algebras of $G$ and $H$ respectively. The tangent space of the homogeneous space $G/H$ is given by the orthogonal complement $\frak{m}$ of $\frak{h}$ in $\frak{g}$ with respect to $g_0$. Each invariant metric $g$ on $G/H$ is determined by its restriction to $\frak{m}$. The arising $Ad_H$-invariant inner product from $g$ on $\frak{m}$ can extend to an $Ad_H$-invariant inner product on $\frak{g}$ by taking $g_0$ for the components in $\frak{h}$. In this way the invariant metric $g$ on $G/H$ determines a unique left invariant metric on $G$ that we also denote by $g$. The values of $g_0$ and $g$ at the identity are inner products on $\frak{g}$. We denote them by $<.,.>_0$ and $<.,.>$. The inner product $<.,.>$ determines a positive definite endomorphism $\phi$ of $\frak{g}$ such that $<X,Y>=<\phi X,Y>_0$ for all $X, Y\in\frak{g}$.\\ T. P\"uttmann has shown that the curvature tensor of the invariant metric $<.,.>$ on the compact homogeneous space $G/H$ is given by \begin{eqnarray}\label{puttmans formula}
<R(X,Y)Z,W> &=& -\{\frac{1}{2}(<B_-(X,Y),[Z,W]>_0+<[X,Y],B_-(Z,W)>_0) \nonumber \\
&+& \frac{1}{4}(<[X,W],[Y,Z]_{\frak{m}}>-<[X,Z],[Y,W]_{\frak{m}}> \\
&-& 2<[X,Y],[Z,W]_{\frak{m}}>)+(<B_+(X,W),\phi^{-1}B_+(Y,Z)>_0 \nonumber\\
&-&<B_+(X,Z),\phi^{-1}B_+(Y,W)>_0)\}\nonumber, \end{eqnarray} where $B_+$ and $B_-$ are defined by \begin{eqnarray*}
B_+(X,Y) &=& \frac{1}{2}([X,\phi Y]+[Y,\phi X]), \\
B_-(X,Y) &=& \frac{1}{2}([\phi X,Y]+[X,\phi Y]), \end{eqnarray*} and $[.,.]_{\frak{m}}$ is the projection of $[.,.]$ to $\frak{m}$.(see \cite{Pu}.).
\textbf{Notice.} We added a minus to the P\"uttmann's formula because our definition of the curvature tensor $R$ is different from the P\"uttmann's definition in a minus sign.
\begin{theorem}\label{flagcurvature} Let $G, H, \frak{g}, \frak{h}, g, g_0$ and $\phi$ be as above. Assume that $\tilde{X}$ is an invariant vector field on $G/H$ and $X:=\tilde{X}_H$. Suppose that $F=\frac{\alpha^2}{\beta}$ is the Kropina metric arising from $g$ and $\tilde{X}$ such that its Chern connection coincides to the Levi-Civita connection of $g$. Suppose that $(P,Y)$ is a flag in $T_H(G/H)$ such that $\{Y,U\}$ is an orthonormal basis of $P$ with respect to $<.,.>$. Then the flag curvature of the flag $(P,Y)$ in $T_H(G/H)$ is given by \begin{equation}\label{main-flag-cur-formula}
K(P,Y)=\frac{3<U,X><R(U,Y)Y,X>+2<Y,X><R(U,Y)Y,U>}{2(\frac{<U,X>}{<Y,X>})^2+2}, \end{equation} where \begin{eqnarray}
<R(U,Y)Y,X>&=&-\frac{1}{4}(<[\phi U,Y]+[U,\phi Y],[Y,X]>_0+<[U,Y],[\phi Y,X]+[Y,\phi X]>_0)\nonumber\\
&&-\frac{3}{4}<[Y,U],[Y,X]_\frak{m}>-\frac{1}{2}<[U,\phi X]+[X,\phi U],\phi^{-1}([Y,\phi Y])>_0\\
&&+\frac{1}{4}<[U,\phi Y]+[Y,\phi U],\phi^{-1}([Y,\phi X]+[X,\phi
Y])>_0,\nonumber
\end{eqnarray} and \begin{eqnarray}
<R(U,Y)Y,U>&=&-\frac{1}{2}<[\phi U,Y]+[U,\phi Y],[Y,U]>_0\nonumber \\
&& \ \ \ -\frac{3}{4}<[Y,U],[Y,U]_{\frak{m}}>-<[U,\phi U],\phi^{-1}([Y,\phi Y])>_0 \\
&& \ \ \ +\frac{1}{4}<[U,\phi Y]+[Y,\phi U],\phi^{-1}([Y,\phi U]+[U, \phi Y])>_0.\nonumber \end{eqnarray} \end{theorem}
\begin{proof} The Chern connection of $F$ coincides on the Levi-Civita connection of $g$. Therefore the Finsler metric $F$ and the Riemannian metric $g$ have the same curvature tensor. We denote it by $R$.\\ By using the definition of $g_Y(U,V)$ and some computations for $F$ we have: \begin{eqnarray}\label{g_Y}
g_Y(U,V)&=& \frac{1}{g^4(Y,X)}\{(2g(Y,U)g(Y,X)-g(U,X)g(Y,Y))(2g(Y,V)g(Y,X)-g(V,X)g(Y,Y))\nonumber \\
&&+g(Y,Y)(g(Y,X)(2g(U,V)g(Y,X)+2g(Y,V)g(U,X)-2g(V,X)g(Y,U))\\
&&-2g(U,X)(2g(Y,V)g(Y,X)-g(V,X)g(Y,Y)))\}\nonumber \end{eqnarray} By attention to this consideration that $\{Y,U\}$ is an orthonormal basis for $P$ with respect to $g$ and (\ref{g_Y}) we have \begin{eqnarray}\label{eq1}
g_Y(R(U,Y)Y,U)&=&\frac{1}{<Y,X>^4}\{\nonumber\\
&&<U,X>(3<R(U,Y)Y,X>-2<Y,R(U,Y)Y><Y,X>)\\
&&+2<Y,X>(<R(U,Y)Y,U><Y,X>-<U,X><Y,R(U,Y)Y>)\},\nonumber \end{eqnarray} and \begin{eqnarray}\label{eq2}
g_Y(Y,Y).g_Y(U,U)-g^2_Y(U,Y)&=&
\frac{2<U,X>^2}{<Y,X>^6}+\frac{2}{<Y,X>^4}. \end{eqnarray} Now by using P\"uttmann's formula [6, eq. (\ref{puttmans formula})] we have: \begin{eqnarray}\label{eq3} <X,R(U,Y)Y>&=&-\frac{1}{4}(<[\phi U,Y]+[U,\phi Y],[Y,X]>_0+<[U,Y],[\phi Y,X]+[Y,\phi X]>_0)\nonumber\\
&&-\frac{3}{4}<[Y,U],[Y,X]_\frak{m}>-\frac{1}{2}<[U,\phi X]+[X,\phi U],\phi^{-1}([Y,\phi Y])>_0\\
&&+\frac{1}{4}<[U,\phi Y]+[Y,\phi U],\phi^{-1}([Y,\phi X]+[X,\phi
Y])>_0,\nonumber \end{eqnarray}
\begin{eqnarray}\label{eq4}
<R(U,Y)Y,Y>=0, \end{eqnarray} and \begin{eqnarray}\label{eq5}
<R(U,Y)Y,U>&=&-\frac{1}{2}<[\phi U,Y]+[U,\phi Y],[Y,U]>_0\nonumber \\
&& \ \ \ -\frac{3}{4}<[Y,U],[Y,U]_{\frak{m}}>-<[U,\phi U],\phi^{-1}([Y,\phi Y])>_0 \\
&& \ \ \ +\frac{1}{4}<[U,\phi Y]+[Y,\phi U],\phi^{-1}([Y,\phi U]+[U, \phi Y])>_0.\nonumber \end{eqnarray} Substituting the equations (\ref{eq1}), (\ref{eq2}), (\ref{eq3}), (\ref{eq4}) and (\ref{eq5}) in the equation (\ref{flag}) completes the proof.
\end{proof}
Now we continue our study with a special type of Riemannian homogeneous spaces which has been named naturally reductive. We remind that a homogeneous space $M=G/H$ with a $G-$invariant indefinite Riemannian metric $g$ is said to be naturally reductive if it admits an $ad(H)$-invariant decomposition $\frak{g}=\frak{h}+\frak{m}$ satisfying the condition \begin{eqnarray}
B(X,[Z,Y]_{\frak{m}})+B([Z,X]_{\frak{m}},Y)=0 \hspace{1.5cm}\mbox{for} \ \ \ X, Y, Z \in
\frak{m}, \end{eqnarray} where $B$ is the bilinear form on $\frak{m}$ induced by $\frak{g}$ and $[,]_{\frak{m}}$ is the projection to $\frak{m}$ with respect to the decomposition $\frak{g}=\frak{h}+\frak{m}$ (For more details see \cite{KoNo}.).\\ In this case the above formula for the flag curvature reduces to a simpler equation.
\begin{theorem} In the previous theorem let $G/H$ be a naturally reductive homogeneous space. Then the flag curvature of the flag $(P,Y)$ in $T_H(G/H)$ is given by \ref{main-flag-cur-formula} where, \begin{eqnarray}
R(U,Y)Y&=&\frac{1}{4}[Y,[U,Y]_{\frak{m}}]_{\frak{m}}+[Y,[U,Y]_{\frak{h}}] \end{eqnarray} \end{theorem}
\begin{proof} By using Proposition 3.4 in \cite{KoNo} (page 202) the claim clearly follows. \end{proof}
If the invariant Kropina metric is defined by a bi-invariant Riemannian metric on a Lie group then there is a simpler formula for the flag curvature, we give this formula in the following theorem. \begin{theorem} Let $G$ be a Lie group, $g$ be a bi-invariant Riemannian metric on $G$, and $\tilde{X}$ be a left invariant vector field on $G$. Suppose that $F=\frac{\alpha^2}{\beta}$ is the Kropina metric defined by $g$ and $\tilde{X}$ on $G$ such that the Chern connection of $F$ coincides on the Levi-Civita connection of $g$. Then for the flag curvature of the flag $P=span\{Y,U\}$, where $\{Y,U\}$ is an orthonormal basis for $P$ with respect to $g$, we have: \begin{eqnarray}\label{flagbi-invariant}
K(P,Y)=\frac{-3<U,X><[[U,Y],Y],X>-2<Y,X><[[U,Y],Y],U>}{8(\frac{<U,X>}{<Y,X>})^2+8}, \end{eqnarray} \end{theorem}
\begin{proof} $g$ is bi-invariant. Therefore we have $R(U,Y)Y=-\frac{1}{4}[[U,Y],Y]$. Now by using Theorem \ref{main-flag-cur-formula} the proof is completed. \end{proof}
\large{\textbf{Acknowledgment}}\\ This work was supported by the research grant from Shahrood University of Technology.
\end{document} |
\begin{document}
\title{Two invariant surface-tensors determine CSL of massive body wave function} \author{Lajos Di\'osi} \email{diosi.lajos@wigner.mta.hu} \affiliation{Wigner Research Center for Physics\\
H-1525 Budapest 114, P.O.Box 49, Hungary}
\date{\today}
\begin{abstract} Decoherence of massive body wave function under Continuous Spontaneous Localization is reconsidered. It is shown for homogeneous probes with wave functions narrow in position and angle that decoherence is a surface effect. Corresponding new surface integrals are derived as the main result. Probe's constant density and two completely geometric surface-dependent invariant tensors encode full dependence of positional and angular decoherence of masses, irrespective of their microscopic structure. The two surface-tensors offer a new insight into CSL and a flexible approach to design laboratory test masses. \end{abstract}
\maketitle
\section{Introduction}\label{I} Spontaneous decoherence and collapse models, reviewed e.g. by \cite{BasGhi03,Basetal13} share the form of modified von Neumann equation of motion for the quantum state $\ro$: \begin{equation}\label{ME} \frac{d\ro}{dt}=-\frac{\im}{\hbar}[\Ho,\ro]+\Dcal\ro, \end{equation} where $\Ho$ is the many-body Hamiltonian of masses $m_a$ with positions $\xbh_a$ and momenta $\pbh_a$, resp., for $a=1,2,\dots$. The term of spontaneous decoherence takes this generic form: \begin{equation}\label{Dec0} \Dcal\ro=-\int\int D(\rb-\rbp)[\vro(\rb),[\vro(\rbp),\ro]]\dd\rb\dd\rbp, \end{equation} containing the mass density operator at location $\rb$: \begin{equation}\label{densop} \vro(\rb)=\sum_a m_a\delta(\rb-\xbh_a). \end{equation} The non-negative decoherence kernel $D(\rb-\rbp)$ is model dependent. In a conference talk \cite{Dio14}, I compared some characteristic features of the two leading proposals, the CSL of Ghirardi, Pearle, and Rimini, and the DP-model of Penrose and myself \cite{Pen96,Dio87}. I visualized some observations on CSL in Fig. \ref{fig-1} that have been waiting for mathematical formulation until now. In recent literature, the central mathematical object is the \emph{geometric factor} of decoherence: \begin{equation}\label{gf} \mu_\kb=\sum_a m_a \erm^{-\im\kb\rb_a}, \end{equation} introduced by \cite{NimHorHam14}, also discussed by \cite{AdlBasCar19} in this volume. This object is the Fourier-transform of the classical mass density in the c.o.m. frame: \begin{equation} \mu(\rb)=\sum_a m_a \delta(\rb-\rb_a). \end{equation}
Usually, the contribution of the geometric factor
is evaluated in the Fourier-representation. I am going to show that working in the physical space
instead of Fourier's is not only possible but even desirable.
In Sec. \ref{II} we recapitulate the decoherence of c.o.m. motion in terms of the geometric factor. For constant density probes, Sec. \ref{III} derives a new practical expression of the decoherence in terms of a simple surface integral, the method is applied for angular (rotational) decoherence in Sec. \ref{IV}. Possible generalizations towards probes with unsharp edges and for wider superpositions are outlined in Sec. \ref{IV}, while Sec. \ref{V} is for conclusion and outlook.
\begin{figure}
\caption{For a generic shape, both position \& angle decohere (left). For a sphere, angle does not decohere (middle). $N$ perpendicular gaps enhance longitudinal decoherence by a factor about (N+1) (right).}
\label{fig-1}
\end{figure}
\section{Center-of-mass decoherence}\label{II} The CSL model introduces two universal parameters, collapse rate $\lambda=10^{-16}s^{-1}$, localization $\sigma=10^{-5}cm$, and it contains the nuclear mass $m_N$. The decoherence kernel $D(\rb-\rbp)$ is a Gaussian whose nonlocal effect can be absorbed by a Gaussian smoothening of the mass density $\vro(\rb)$. The key quantity is the $\sigma$-smoothened mass distribution operator: \begin{equation}\label{mdens} \vro_\sigma(\rb)=\sum_a m_a G_\sigma(\rb-\xbh_a), \end{equation} where $G_\sigma(\rb)$ is the central symmetric Gaussian distribution of width $\sigma$. Then the decoherence term (\ref{Dec0}) becomes a single-integral: \begin{equation}\label{Dec} \Dcal\ro=-\frac{4\pi^{3/2}\lambda\sigma^3}{m_N^2}\int[\vro_\sigma(\rb),[\vro_\sigma(\rb),\ro]]\dd\rb. \end{equation} Inserting Eq. (\ref{mdens}), Fourier-representation yields this equivalent form: \begin{equation}\label{DecFou} \Dcal\ro=-\frac{\lambda\sigma^3}{2\pi^{2/3}m_N^2}\int\erm^{-\kb^2\sigma^2}\sum_{a,b}m_a m_b[\erm^{\im\kb\xbh_a},[\erm^{-\im\kb\xbh_b},\ro]]\dd\kb. \end{equation} We are interested in the c.o.m. dynamics of the total mass $M=\sum_a m_a$: \begin{equation}\label{MEcom} \frac{d\ro_\mathrm{cm} }{dt}=-\frac{\im}{\hbar}[\Ho_\mathrm{cm} ,\ro_\mathrm{cm} ]+\Dcal_\mathrm{cm} \ro_\mathrm{cm} , \end{equation} where $\Xbh,\Pbh$ will stand for the c.o.m. coordinate and momentum. To derive the c.o.m. decoherence term, substitute $\xbh_a=\Xbh+\rb_a$ in (\ref{DecFou}),
where $\rb_a$ are the equilibrium values of the constituent coordinates in the c.o.m. frame. Then Eq. (\ref{DecFou}) reduces to the following c.o.m. decoherence term: \begin{equation}\label{Deccom} \Dcal_\mathrm{cm} \ro_\mathrm{cm} =-\frac{\lambda\sigma^3}{\pi^{3/2}m_N^2}\int\erm^{-\kb^2\sigma^2}\vert\mu_\kb\vert^2\left(\erm^{\im\kb\Xbh}\ro_\mathrm{cm} \erm^{-\im\kb\Xbh}-\ro_\mathrm{cm} \right)\dd\kb, \end{equation} where we recognize the presence of the geometric factor $\mu_\kb$. At small quantum uncertainties, when $\vert\Delta\Xb\vert\ll\sigma$, we use the momentum-diffusion equation as a good approximation: \begin{equation}\label{Deccomxx} \Dcal_\mathrm{cm} \ro_\mathrm{cm} =-\frac{\lambda\sigma^3}{2\pi^{3/2}m_N^2}\int\erm^{-\kb^2\sigma^2}\vert\mu_\kb\vert^2[\kb\Xbh,[\kb\Xbh,\ro_\mathrm{cm} ]]\dd\kb. \end{equation}
\section{Invariant surface-tensor for c.o.m. decoherence}\label{III} As we see, the geometric factor $\mu_\kb$ itself does not matter but its squared modulus does. We consider the approximation (\ref{Deccomxx}) which allows for a spectacular simple geometric interpretation of the relevant structure \begin{equation}\label{geom1} \int\erm^{-\kb^2\sigma^2}\vert\mu_\kb\vert^2(\kb\circ\kb)\dd\kb =(2\pi)^3\int \nabla\mu_\sigma(\rb)\circ\nabla\mu_\sigma(\rb)\dd\rb. \end{equation} We can recognize $\mu_\sigma(\rb)$ as the $\sigma$-smoothened mass density in the c.o.m. frame. This latter form becomes amazingly useful if the bulk is much larger than $\sigma$ and possesses constant density $\varrho$ when averaged over the scale of $\sigma$. If, furthermore, we assume the density drops sharply from $\varrho$ to zero through the surface then $\nabla\mu_\sigma(\rb)$ is vanishing everywhere but in about a $\sigma$-layer around the surface. Let $\nb$ stand for the normal vector of the surface at a given point $\rb$ and let $h$ be the height above the surface, then \begin{equation}\label{profile0} \nabla\mu_\sigma(\rb+h\nb)=-\varrho \nb g_\sigma(h), \end{equation} $g_\sigma(h)$ is the central Gaussian of width $\sigma$. The volume integral can be rewritten, with good approximation, as an integral along $h$ and a subsequent surface integral: \begin{equation}\label{geom2} (2\pi)^3\int \nabla\mu_\sigma(\rb)\circ\nabla\mu_\sigma(\rb)\dd\rb=(2\pi)^3\varrho^2\oint \nb\circ\nb\left(\int g_\sigma^2(h)\dd h\right)\dd S=\frac{(2\pi)^3\varrho^2}{2\pi^{1/2}\sigma}\oint(\nb\circ\nb)\dd S. \end{equation} Using Eqs. (\ref{geom1}) and (\ref{geom2}), the decoherence term (\ref{Deccomxx}) obtains the attractive form \begin{equation}\label{Deccomxxgeom} \Dcal_\mathrm{cm} \ro_\mathrm{cm} =-\frac{2\pi\lambda\sigma^2\varrho^2}{m_N^2}\oint[\nb\Xbh,[\nb\Xbh,\ro_\mathrm{cm} ]]\dd S. \end{equation} \emph{This is our main result.} It shows that the c.o.m. decoherence is completly determined by the constant density $\varrho$ and the shape of the body, through the \emph{surface-tensor} \begin{equation}\label{shape1} \mathrm{S}=:\oint(\nb\circ\nb)\dd S. \end{equation} In CSL, the c.o.m. decoherence of homogeneous bulks is a \emph{surface effect}!
Observe that the main result (\ref{Deccomxxgeom}) remains valid if the probe has cavities in it. This allows us to multiply the CSL decoherence By carving cavities inside the otherwise homogeneous probe, CSL decoherence can be multipled (cf. Fig. \ref{fig-1}). This explains the reason of enhanced decoherence in layered structures, proposed by \cite{CarVinBas18}.
The heating rate $\Gamma_\mathrm{cm} =\Dcal_\mathrm{cm} (\Pbh^2/2M)$ of the c.o.m. motion is now easy to write in a more explicite form than before. Reading $\Dcal_\mathrm{cm} $ off from (\ref{Deccomxxgeom}), one immediately obtains \begin{equation}\label{heatcm} \Gamma_\mathrm{cm} =\frac{2\pi\lambda\sigma^2\varrho^2}{m_N^2}\frac{S}{M} =\frac{2\pi\lambda\sigma^2\varrho}{m_N^2}\frac{S}{V}, \end{equation} where $S$ is the total surface (including cavities' internal surfaces) and $V$ is the total volume (excluding cavities). Note that $\Gamma_\mathrm{cm} $ is the same if we start from the general dynamics (\ref{Deccom}) not restricted by $\vert\Delta\Xb\vert\ll\sigma$. Interestingly, c.o.m. heating is inverse proportional to the size of the bulk. Recall the total heating rate \begin{equation} \Gamma=\Dcal\sum_a\frac{\pbh^2}{2m_a}=\frac{3\hbar^2\lambda}{2m_N^2\sigma^2}M, \end{equation} always much larger than the c.o.m. heating. For a sphere of radius $R$ we get $\Gamma_\mathrm{cm}/\Gamma=3(\sigma/R)^4$.
\emph{Examples.} Consider the longitudinal motion of a cylinder, Eq. (\ref{Deccomxxgeom}) reduces to \begin{equation}\label{Deccomxxgeomrod} \Dcal_\mathrm{cm} \ro_\mathrm{cm} =-\frac{2\pi\lambda\sigma^2\varrho^2}{m_N^2}S_\perp[\hat x,[\hat x,\ro_\mathrm{cm} ]], \end{equation} where $S_\perp$ is the total surface perpendicular to the motion (i.e.: the area of both faces of the cylinder). At a given constant density $\varrho$, the decoherence is independent of the length of the cylinder. It can be squeezed to become a plate or elongated to become a rod. This invariance of the decoherence offers a fair guidance when we design laboratory probes. However, the same invariance may raise conceptual questions as well. With increasing length of the rod while decoherence rate remains constant, CSL might leave the longitudinal superposition of our massive rod with counter-intuitive long coherence times. An other remarkable feature of the surface-tensor $\mathrm{S}$ is that spontaneous decoherence in one direction can be decreased by tilted edges instead of perpendicular ones. If the faces of the cylinder are replaced by cones of apex angle $\theta$ then spontaneous longitudinal decoherence becomes suppressed by the factor $\sin(\theta/2)$. E.g.: sharp pointed needles become extreme insensitive to longitudinal CSL.
\section{Rotational decoherence}\label{IV} Our main result (\ref{Deccomxxgeom}) on decoherence of lateral superpositions tells us how to calculate decoherence of angular superpositions. It turns out that rotational decoherence, too, is a surface effect. Let us consider small rotations around a single axis $\nb_\mathrm{rot}$ for convenience. The small lateral displacement $\nb\Xbh-\langle\nb\Xbh\rangle$ ---effective in (\ref{Deccomxxgeom})--- will be replaced by the small rotational displacement $\nb(\rb\times\nb_\mathrm{rot})(\hat\varphi-\langle\hat\varphi\rangle)$ where $\hat\varphi$ is the angle of rotation. Then, with the scalar triple product notation, the main equation (\ref{Deccomxxgeom}) reads: \begin{equation}\label{Deccomphiphigeom} \Dcal_\mathrm{cm} ^\mathrm{rot}\ro_\mathrm{cm} =-\frac{2\pi\lambda\sigma^2\varrho^2}{m_N^2} \oint [\rb,\nb,\nb_\mathrm{rot}]^2 \dd S~[\hat\varphi,[\hat\varphi,\ro_\mathrm{cm} ]]. \end{equation} Rotational decoherence is determined by the constant density $\varrho$ and the \emph{rotational surface-tensor}: \begin{equation} \mathrm{S}_\mathrm{rot}=:\oint(\rb\times\nb)\circ(\rb\times\nb)\dd S. \end{equation} Remember, our starting equation (\ref{Deccomxxgeom}) was valid for $\vert\Delta \Xb\vert\ll\sigma$ only, hence the validity of (\ref{Deccomphiphigeom}) requests the corresponding smallness of the angular uncertainties.
Calculation of the spontaneous heating rate of the rotational degrees of freedom is straightforward, yielding \begin{equation}\label{heatrot} \Gamma_\mathrm{rot} =\frac{2\pi\lambda\sigma^2\varrho}{m_N^2}\mathrm{Tr}(\mathrm{I}^{-1}\mathrm{S}_\mathrm{rot}), \end{equation} where $\mathrm{I} =\int (\rb\circ\rb)\dd\rb$ is the inertia tensor of the probe.
\emph{Examples.} Consider the rotation of a long cylindric rod of length $L$ and radius $R\ll L$, around a perpendicular axis through its center. All along the rod ---except for its short middle part of size $\sim\!R$--- the expression $[\rb,\nb,\nb_\mathrm{rot}]=r\sin(\Phi)$ is a good approximation where $r\in(-L/2,L/2)$ is the axial coordinate and $\Phi$ is the azimuthal angle of the surface position $\rb$. Using this approximation, we can easily evaluate the axial element of the rotational surface-tensor $\mathrm{S}_\mathrm{rot}$ that controls the angular decoherence (\ref{Deccomphiphigeom}): \begin{equation} \oint [\rb,\nb,\nb_\mathrm{rot}]^2 \dd f = \frac{\pi RL^3}{12}. \end{equation} As another example, consider our cylinder rotating around its axis of symmetry: CSL predicts zero decoherence (cf. Fig. \ref{fig-1}). But we introduce a small elliptical excentricity $e\ll1$ of the cross section. In leading order, we have $[\rb,\nb,\nb_\mathrm{rot}]=\frac{1}{2} R e^2\sin(2\Phi)$, yielding the following contribution of the shape to the strength of angular decoherence: \begin{equation} \oint [\rb,\nb,\nb_\mathrm{rot}]^2 \dd f = \frac{e^4}{4}\pi R^2 L, \end{equation} that is $e^4/4$ times the volume of the cylinder. Recall that $e^2=2\Delta R/R$ where $\Delta R$ is the small difference between the main diameters of the elliptic cross section. The obtained result may raise the same conceptual problem that we mentioned for the longitudinal superposition of the massive rod/needle: azimuthal superpositions of massive cylinders of low excentricity may become practically insensitive to CSL.
\section{Outlines of generalizations}\label{V} That in CSL the c.o.m and rotational decoherences are surface effects for homogeneous probes has been explicitly shown in Secs. \ref{III} and \ref{IV} for ideal sharp edges and for spatial superpositions much smaller than $\sigma$. Both of the latter restrictions can be relaxed and $\Dcal_\mathrm{cm} $ still remains a surface integral.
The case of unsharp edges is not much different from the ideal case. If the profile $H(h)\varrho$ of how the density drops from the constant $\varrho$ down to zero through a thin layer defining the surface where the layer's thickness is small w.r.t. the sizes of the probe then the following generalization of Eq. (\ref{profile0}) helps: \begin{equation}\label{profile1} \nabla\mu_\sigma(\rb+h\nb)=\varrho \nb \int g_\sigma(h-h^\prime)\dd H(h^\prime). \end{equation} The rest of constructing the surface integral is the same as for Eq. (\ref{profile0}) which described the special case where $H$ was the (descending) step function.
The case of not necessarily small quantum positional and angular quantum uncertainties was described by Eq. (\ref{Deccom}). It takes an equivalent closed form in coordinate representation: \begin{equation}\label{Deccomcoord} \Dcal_\mathrm{cm} \ro_\mathrm{cm} (\Xb,\Yb)=-\frac{\lambda\sigma^3}{\pi^{3/2}m_N^2} (2\pi)^3\int\left[\mu_\sigma(\rb+\Xb)\mu_\sigma(\rb+\Yb)-\mu_\sigma^2(\rb)\right]\dd\rb~\ro_\mathrm{cm} (\Xb,\Yb). \end{equation} The relevant structure is the integral, which we write as \begin{equation} (2\pi)^3\int\left[\mu_\sigma(\rb+\Xb-\Yb)-\mu_\sigma(\rb)\right]\mu_\sigma(\rb)\dd\rb. \end{equation} As long as the quantum uncertainty $\vert\Xb-\Yb\vert$ is much smaller than the sizes of the probe, the integral is vanishing everywhere in the bulk except for a thin layer of thickness $\sim\!\vert\Xb-\Yb\vert$ around the surface. Accordingly, CSL decoherence remains a surface effect and, investing some harder mathematical work, $\Dcal_\mathrm{cm}$ as well as $\Dcal_\mathrm{rot}$ would take a form of surface integral, generalizing (\ref{Deccomxxgeom}) and (\ref{Deccomphiphigeom}) beyond their quadratic approximations in $\Xbh$ and $\hat\varphi$.
\section{Concluding remarks}\label{VI} We have discussed CSL for constant density test masses and proved that spontaneous decoherence of both translational and rotational motion is determined by the density $\varrho$ and by two invariant surface-tensors of the bodies: $$ \mathrm{S}=\oint(\nb\circ\nb)\dd S, $$ $$ \mathrm{S}_\mathrm{rot}=\oint(\rb\times\nb)\circ(\rb\times\nb)\dd S. $$ These two fully encode the relevant features of the probe's geometry. Previously, these features were encoded by the so-called geometric factor $$ \mu_\kb=\varrho\int \erm^{-\im\kb\rb}\dd\rb, $$ an integral over the probe's volume and a function of the wave number $\kb$. In case of general heavily inhomogeneous test masses the necessity of using the geometric factor is certainly doubtless. But for homogeneous probes, the surface-tensors should take over the role.
Important is the new insight into the physics of CSL in motion of a general massive bulk as a whole. First, microscopic structure is totally irrelevant, only the $\sigma$-smoothened density matters. Furthermore, displacements of homogeneous regions are not decohered at all. Only the displacements of inhomogeneities are decohered. The sharper the inhomogenity, the stronger the decoherence it induces. In a constant density probe, the only inhomogeneous part is its surface, hence is CSL decoherence a surface effect for it --- that we have here exploited. But surface inhomogeneity is a sharpest possible one, and decoherence for probes with smooth inside inhomogeneities is likely to remain dominated by the surface, our method of surface-tensors might remain valid for them! Layer inhomogeneities with thin walls between them are competitive, their effect is surface effect and our surface-tensors could be generalized to include them. Whether and when lower than two-dimensional inhomogeneities could play a role --- that worth investigation. \vskip1cm
The author thanks the National Research Development and Innovation Office of Hungary Projects Nos. 2017-1.2.1-NKP-2017-00001 and K12435, and the EU COST Action CA15220 for support.
\end{document} |
\begin{document}
\title{Maxwell's Equations with Scalar Impedance: Direct and Inverse Problems} \author{Yaroslav Kurylev \thanks{Loughborough University} \and Matti Lassas\thanks{University of Helsinki} \and Erkki Somersalo\thanks {Helsinki University of Technology}} \date{28 Oct. 2002} \maketitle
{\bf Abstract:} The article deals with electrodynamics in the presence of anisotropic materials having scalar wave impedance. Maxwell's equations written for differential forms over a 3-manifold are analysed. The system is extended to a Dirac type first order elliptic system on the Grassmannian bundle over the manifold. The second part of the article deals with the dynamical inverse boundary value problem of determining the electromagnetic material parameters from boundary measurements. By using the boundary control method, it is proved that the dynamical boundary data determines the electromagnetic travel time metric as well as the scalar wave impedance on the manifold. This invariant result leads also to a complete characterization of the non-uniqueness of the corresponding inverse problem in bounded domains of ${\mathbb R}^3$.
\section*{Introduction}\label{introduction}
Classically, the laws of electromagnetism expressed by Maxwell's equations are written for vector fields representing the electric and magnetic fields. However, it is possible to rephrase these equations in terms of differential forms. It turns out that this alternative formulation has several advantages both from the theoretical and practical point of view. First, the formulation of electromagnetics with differential forms reflect the way in which the fields are actually observed. For instance, flux quantities are expressed as 2--forms while field quantities that correspond to forces are naturally written as 1--forms. This point of view has been adopted in modern physics at least when fields in free space are dealt with, see \cite{Frankel}. Furthermore, the formulation distinguishes the topological properties of
the electromagnetic media
from those that depend on geometry. It is understood that geometry is related to the properties of the material where the waves propagate. The distinction between non-geometric and geometric properties has consequences also to the numerical treatment of the equations by so called Whitney forms. An extensive treatment of this topic can be found in \cite{bossavit1}, \cite{bossavit2}. For the original reference concerning Whitney elements see \cite{whitney}.
The present work is divided in two parts. In the first part, we pursue further the invariant formulation of Maxwell's equations to model the wave propagation in certain anisotropic materials. More precisely, we consider anisotropic materials with scalar wave impedance. Physically, scalar wave impedance is tantamount to a single propagation speed of waves with different polarization. The invariant approach leads us to formulate Maxwell's equations on 3-manifolds as a first order Dirac type system. From the operator theoretic point of view, this formulation is based on an elliptization procedure by extending Maxwell's equations to a Grassmannian bundle over the manifold. This is a generalization of the elliptization of Birman and Solomyak and Picard (see\cite{Birman},\cite{picard}).
In the second part of the work, we consider the inverse boundary value problem for Maxwell's equations. In terms of physics, the
goal is to determine material parameter tensors, electric permittivity $\epsilon$ and magnetic permeability $\mu$, in a bounded domain from field observations at the boundary of that domain. As it is already well established, for anisotropic inverse problems it is natural to consider the problem in two parts. First, we consider the invariant problem on a Riemannian manifold, where we recover the travel time metric and the wave impedance on the manifold. As a second step, we consider the consequences of the invariant result when the manifold is imbedded to ${\mathbb R}^3$.
Although inverse problems in electrodynamics have a great significance in physics and applications, results concerning the multidimensional inverse problems are relatively recent. One-dimensional results have existed starting from the 30'ies, see e.g. \cite{langer}, \cite{schlichter}. The first breakthrough in multidimensional inverse problems for electrodynamics was based on the use of complex geometrical optics \cite{SIC}, \cite{CP}, \cite{OPS}, \cite{OS}. In these papers, the inverse problem of recovering the scalar material parameters from complete fixed frequency boundary data was solved even in the non-selfadjoint case, i.e., in the presence of electric conductivity. These works were based on ideas previously developed in references \cite{SyU},\cite{Na1},\cite{Na2} to solve the scalar Calder\'{o}n problem, that obtained its present formulation in \cite{Cl}.
In the dynamical case, a method to solve an isotropic inverse boundary problem based on ideas of integral geometry is developed in \cite{rom}. The method, however, is confined to the case of a geodesically simple manifolds and, at the moment, is limited to finding some combinations of material parameters, including electric conductivity. An alternative method to tackle the inverse boundary value problem is the boundary control (BC) method, originated in \cite{Be1}. Later, this method was developed for the Laplacian on Riemannian manifolds \cite{BeKu3} and for anisotropic self-adjoint \cite{Ku1} -- \cite{Ku3} and certain non-selfadjoint inverse problems \cite{KL2}. The first application of the BC method to electrodynamics was done in \cite{Be4}, \cite{BeIsPSh}. The authors of these articles show that, when the material parameters $\epsilon$ and $\mu$ are real scalars or alternatively when $\epsilon=\mu$, the boundary data determines the wave speed in the vicinity of the boundary. These works employed the Hodge-Weyl decomposition in the domain of influence near the boundary. The real obstruction for this technique is that, as time grows, the domain of influence can become non-smooth and the topology may be highly involved. For these reasons, our paper is based on different ideas.
In this article, there are essentially two new leading ideas. First, we characterize the subspaces controlled from the boundary by duality, thus avoiding the difficulties arising from the complicated topology of the domain of influence. The second idea is to develop a method of waves focusing at a single point of the manifold. This enables us to recover pointwise values of the waves on the manifold. The geometric techniques of the paper are presented in \cite{Ku5} and
the book \cite{KKL}.
The main results of this paper can be summarized as follows. \begin{enumerate} \item The knowledge of the complete dynamical boundary data over a sufficiently large finite period of time determines uniquely the compact manifold endowed with the electromagnetic travel time metric as well as the scalar wave impedance (Theorem 4.1). \item For the corresponding anisotropic inverse boundary value problem with scalar wave impedance for bounded domains in ${\mathbb R}^3$, the non-uniqueness is completely characterized by describing the class of possible transformations between material tensors that are indistinguishable from the boundary (Theorem 11.1). \end{enumerate} To the best knowledge of the authors, no global uniqueness results for inverse problems for systems with anisotropic coefficients have been previously known.
{\bf Acknowledgements:} We would like to give our warmest thanks to professor Alexander Katchalov for numerous useful discussions. His lectures on non-stationary Gaussian beams \cite{Ka1}, \cite{Ka2} at Helsinki University of Technology were paramount for our understanding of the subject. This work was accomplished during several visits of the authors at each others' home institutions. We wish to thank Helsinki University of Technology, Loughborough University and University of Helsinki for their kind hospitality and financial support. Furthermore, the financial support of the Academy of Finland and Royal Society is acknowledged.
\section{Maxwell's equations for forms}\label{maxwell for forms}
In this chapter we derive an invariant form
for Maxwell equations, consider initial boundary value problem for them and show how energy of fields can be found using boundary measurements.
We start with Maxwell equations in domain $\Omega\subset {\mathbb R}^3$ equipped with the standard Euclidean structure. Since our objective is to write Maxwell equations in
an invariant form, we generalize the setting in very beginning and instead of domain $\Omega$ consider manifolds.
Let $(M,g_0)$ be a connected, oriented Riemannian 3-manifold possibly with a boundary $\partial M\neq \emptyset$. We assume that all objects in this paper are $C^\infty$--smooth. Consider Maxwell's equations on $M$, \begin{eqnarray}
{\rm curl}\,E &=& - B_t,\mbox{ (Maxwell--Faraday)},\label{MF vector}\\ \noalign{\vskip4pt}
{\rm curl }\,H &=& \phantom{-}D_t,\mbox{ (Maxwell--Amp\`{e}re)}, \label{MA vector} \end{eqnarray} where $E$ and $H$ are the electric and magnetic fields, and $B$ and $D$ are the magnetic flux density and electric displacement, assumed for the time being to be smooth mappings $M\times{\mathbb R}\to TM$. Here $TM$ denotes the tangent bundle over $M$. The curl operator as well as divergence appearing later will be defined invariantly in formula (\ref{A 23}) below. The sub-index $t$ in the equations (\ref{MF vector})--(\ref{MA vector}) denotes differentiation with respect to time. We denote the collection of these vector fields as $\Gamma(M\times{\mathbb R})$. At this point, we do not specify the initial and boundary values. To avoid non-physical static solutions, the above equations are augmented with the conditions \begin{equation}\label{div eqs}
{\rm div}B=0,\quad {\rm div}D=0. \end{equation}
Furthermore, the fields $E$ and $D$, and similarly the fields $H$ and $B$ are interrelated through the constitutive relations. In anisotropic and non-dispersive medium, the constitutive relations assume the simple form \begin{equation}\label{constitutive}
D=\epsilon E,\quad B=\mu H, \end{equation} where $\epsilon,\mu$ are smooth and strictly positive definite tensor fields of type $(1,1)$ on $M$. Our aim is to write the above equations using differential forms.
Given the metric $g_0$, we can associate in a canonical way a differential 1--form to correspond each vector field. Let us denote by $\wedge^k T^*M$ the $k$:th exterior power of the cotangent bundle. We define the mapping \[
TM\to T^*M,\quad X\mapsto X^\flat \] through the formula $g_0(X,Y) = X^\flat(Y)$. This mapping is one-to-one and it has the following well-known properties (See e.g. \cite{Sc}): For a scalar field $u\in C^\infty(M)$, $ ({\rm grad}\,u)^\flat = du, $ where $d$ is the exterior differential and for a vector field $X\in\Gamma(M)$, we have \beq \label{A 23} ({\rm curl}\,X)^\flat = *_0 dX^\flat,\quad {\rm div}\,X = -\delta_0 X^\flat, \eeq where $*_0$ denotes the Hodge--$*$ operator with respect to the metric $g_0$, \[
*_0:\wedge^k T^*M\to\wedge^{3-k}T^*M, \] and $\delta_0$ denotes the codifferential \footnote{Cf. with $\delta_0=(-1)^{nk+n+1}*_0d*_0$ for Riemannian $n$--manifolds}, \[
\delta_0 = (-1)^k*_0 d *_0: \Omega^k M\rightarrow \Omega^{k-1} M. \] Here, $\Omega^k M$ denotes the smooth sections $M\to\wedge^k T^* M$, i.e. differential $k-$forms. Applying now the operator $\flat$ on Maxwell's equations (\ref{MF vector})--(\ref{MA vector}) yields \[ dE^{\flat} = - *_0B^{\flat}_t,\quad
dH^{\flat} = *_0D^{\flat}_t, \] where we used the identity $*_0*_0 = {\rm id}$ valid in 3--geometry\footnote{For Riemannian $n$--manifold, we have in general $*_0*_0=(-1)^{k(n-k)}$}. The divergence equations (\ref{div eqs}) read \[
\delta_0 D^{\flat}=0,\quad \delta_0 B^{\flat}=0. \]
Consider now the constitutive relations (\ref{constitutive}). Starting with the equation $D=\epsilon E$, we pose the following question: Is it possible to find a {\em metric} $g_\epsilon$ such that the Hodge-$*$ operator with respect to this metric, denoted by $*_\epsilon$, would satisfy the identity \[
*_0 D^\flat =*_0(\epsilon E)^\flat =
*_\epsilon E^\flat? \] Assume that such a metric $g_\epsilon$ exists. By writing out the above formula in given {\nottopapertext local coordinates $(x^1,x^2,x^3)$} and recalling the definition of the Hodge-$*$ operator, the left side yields \begin{eqnarray*}
*_0(\epsilon E)^\flat &=&
\sqrt{{\rm g}_0} g_0^{ij}e_{jpq}g_{0,ij} \epsilon^j_kE^k dx^p\wedge dx^q \\ \noalign{\vskip6pt} &=&\sqrt{{\rm g}_0}e_{jpq}\epsilon^j_k E^k
dx^p\wedge dx^q, \end{eqnarray*} where $e$ is the totally antisymmetric permutation index and ${\rm g}_0={\rm det}(g_{0,ij})$. Likewise, the right side reads \[
*_\epsilon E^\flat = \sqrt{{\rm g}_\epsilon}
g_\epsilon^{ij}e_{jpq}g_{0,ik} E^k dx^p\wedge
dx^q, \] so evidently the desired equality ensues if we set \[
\sqrt{{\rm g}_\epsilon}
g_\epsilon^{ij}g_{0,ik} =\sqrt{{\rm g}_0}
\epsilon_j^k. \] By taking determinants of both sides we find that \[
\sqrt{{\rm g}_\epsilon}=\sqrt{{\rm g}_0}
{\rm det}(\epsilon). \] Thus we see that the appropriate form for the metric tensor in the contravariant form is \beq\label{connection e and g}
g_\epsilon^{ij} = \frac 1{{\rm det}(\epsilon)} g_0^{ik}\epsilon^j_k. \eeq In the same fashion, we find a metric $g_\mu$ such that \[
*_0 B^\flat =*_0(\mu H)^\flat =
*_\mu H^\flat. \] In general, the metrics $g_\mu$ and $g_\epsilon$ can be very different from each other. In this article, we consider a particular case. Indeed, assume that the material has a {\em scalar wave impedance}, i.e., the tensors $\epsilon$ and $\mu$ satisfy \[
\mu =\alpha^2 \epsilon, \] where the wave impedance, $\alpha =\alpha(x)$, is a smooth function on $M$. Now we define two families of 1-- and 2--forms on $M$ as follows. We set \[
\omega^1 = E^\flat,\quad \omega^2 = *_0B^\flat. \] Similarly, we define \beq\label{nu field 1}
\nu^1 = \alpha H^\flat,\quad \nu^2 =*_0\alpha D^\flat. \eeq Observe that the wave impedance scaling renders $\omega^1$ and $\eta^1$ to have the same physical dimensions, and the same holds for the 2--form. Now it is a straightforward matter to check that the constitutive relations assume the form \[
\nu^2 =\alpha*_\epsilon\omega^1,\quad
\omega^2=\frac 1\alpha *_\mu \nu^1. \] We can make these equations even more symmetric by proper scaling of the metrics. Indeed, since $ \alpha^{-1}\mu =\alpha\epsilon, $ we have a new metric $g$ that is defined as \[
g^{ij}=g_{\alpha\epsilon}^{ij}=g_{\alpha^{-1}\mu}^{ij}. \] We have, by direct substitution that \begin{equation}\label{travel time metric}
g^{ij}=\frac 1{\alpha^2}g_{\epsilon}^{ij}
=\alpha^2 g_{\mu}^{ij}. \end{equation} This new metric will be called the {\em travel time metric} in the sequel.
Assume that \[
*:\wedge^j T^* M\to \wedge^{3-j}T^* M \] denotes the Hodge--$*$ operator with respect to some metric $\widehat{g}$. {\nottopapertext
If we perform a scaling of the metric as \[
\widehat{g}^{ij}\rightarrow \widetilde g^{ij}=r^2\widehat{g}^{ij}, \] the corresponding Hodge operator is scaled as \[
*\rightarrow \widetilde * = r^{2j-3}*. \] Therefore, if we denote by $*$ the Hodge--$*$ operator with respect to the travel time metric, we have }
\[
* = \alpha *_\epsilon = \frac 1\alpha *_\mu :
\wedge^1 T^* M\to \wedge^2 T^* M. \] But this means simply that, in terms of the travel time metric, we have \begin{equation}\label{hodge}
\nu^2 =*\omega^1,\quad \omega^2=*\nu^1. \end{equation}
Consider now Maxwell's equations for these forms. After eliminating the $\nu$--forms using the constitutive equations (\ref{hodge}), Maxwell--Faraday and Maxwell--Amp\`{e}re equations assume the form \begin{equation}\label{apu1}
d\omega^1=-\omega_t^2,\quad
\delta_\alpha\omega^2 = \omega_t^1, \quad
\delta_\alpha =
(-1)^k*\alpha d \frac 1\alpha*: \Omega^k M\rightarrow \Omega^{k-1} M \end{equation} and the divergence equations (\ref{div eqs}) read \begin{equation}\label{apu2}
d\omega^2 =0,\quad \delta_\alpha\omega^1=0. \end{equation} {\newtext In the sequel, we call equations (\ref{apu1}) and (\ref{apu2}) {\it Maxwell's equations}.}
It turns out to be useful to define auxiliary forms that vanish in the electromagnetic theory. Let us introduce the auxiliary forms $\omega^0$ and $\omega^3$ via the formulas \[
\omega_t^0 = \delta_\alpha\omega^1,\quad
-\omega^3_t=d\omega^2. \] Furthermore, we define the corresponding $\nu$--forms as \beq\label{nu field 2}
\nu^0=*\omega^3,\quad \nu^3=*\omega^0. \eeq Since these auxiliary forms
are all vanishing, we may modify the equations (\ref{apu1}) to have \begin{equation}\label{apu1 bis}
d\omega^1 -\delta_\alpha\omega^3=-\omega_t^2, \quad
d\omega_0 -\delta_\alpha\omega^2 = -\omega_t^1. \end{equation} Putting the obtained equations together in a matrix form, we arrive at the equation \begin{equation}\label{complete}
\omega_t + {\cal M}\omega=0, \end{equation} where \[
\omega = (\omega^0,\omega^1,\omega^2,\omega^3) \] and the operator ${\cal M}$ (without defining its domain at this point, i.e., defined as a differential expression) is given as \begin{equation}\label{M} {\cal M} = \left(\begin{array}{cccc} 0 &-\delta_{\alpha} &0 &0 \\ d & 0 &-\delta_{\alpha} &0 \\ 0 & d &0 &-\delta_{\alpha} \\ 0 & 0 &d &0 \end{array}\right). \end{equation} The equation (\ref{complete}) is called {\em the complete Maxwell system}. In the next section, we treat more systematically this operator.
{\bf Remark 1.} The operator ${\mathcal M}$ has the property \[
{\mathcal M}^2 = -{\rm diag}(\Delta_\alpha^0, \Delta_\alpha^1,\Delta_\alpha^2,\Delta_\alpha^3)
=-{\bf \Delta}_\alpha, \] where the operator $\Delta_\alpha^k$ acting on $k$--forms is \[
\Delta_\alpha^k = d\delta_\alpha + \delta_\alpha d
=\Delta^{k}_g + Q(x,D), \] with $\Delta_g^k$ denoting the Laplace-Beltrami operator on $k$--forms with respect to the travel time metric and $Q(x,D)$ being a first order perturbation. Hence, if $\omega$ satisfies the equation (\ref{complete}), we have \[
(\partial_t^2 + {\bf \Delta}_\alpha)\omega
=(\partial_t-{\mathcal M})(\partial_t +{\mathcal M})\omega =0. \] In particular, we observe that the assumption that the impedance is scalar implies a unique propagation speed for the system.
{\bf Remark 2.} Denote by $\Omega M=\oplus_{k=0}^3 \Omega^k M$ the Grassmannian algebra of differential forms, where $\Omega^k M$ are the differential $k$-forms.
Then the operator ${\mathcal M}$ in formula (\ref{M}) can be also considered as a Dirac operator $d-\delta_\alpha:\Omega M\to \Omega M$.
Before leaving this section, let us briefly consider the energy integrals in terms of the differential forms. In terms of the vector fields, the energy of the electric field at a given moment $t$ is obtained as the integral \[
{\mathcal E}(E) = \int_M \epsilon E\cdot E dV
=\int_M g_0(E,D)dV = \int_M E^\flat\wedge *_0D^\flat \] where $dV$ is volume form of $(M,g_0)$. By plugging in the defined forms we arrive at \[
{\mathcal E}(E) =\int_M\frac 1\alpha \omega^1\wedge
*\omega^1. \] In the same fashion, we find that the energy of the magnetic field reads \[
{\mathcal E}(B) =\int_M\frac 1\alpha \omega^2\wedge
*\omega^2. \] These formulas serve as a motivation for our definition of the inner product in the following section.
\subsection{Maxwell operator}
In this section we establish a number of notational conventions and definitions concerning the differential forms used in this work.
We define the $L^2$--inner products for $k$--forms in $\Omega^k M$ as \[
(\omega^k,\eta^k)_{L^2} = \int_M \frac 1\alpha
\omega^k\wedge * \eta^k,\quad
\omega^k,\;\eta^k\in\Omega^k M. \] Further, we denote by
$L^2(\Omega^k M)$ the completion of $\Omega^k M$ with respect to the norm defined by the above inner products. We also define \[
{\bf L}^2(M) =
L^2(\Omega^0 M)\times L^2(\Omega^1 M)\times L^2(\Omega^2 M)\times L^2(\Omega^3 M). \] Similarly, we define Sobolev spaces ${\bf H}^s(M), \, s \in \Bbb{R}$, \[ {\bf H}^s(M) = H^s(\Omega^0M)\times H^s(\Omega^1 M)\times H^s(\Omega^2 M)\times H^s(\Omega^3 M), \] \[ {\bf H}_0^s(M) = H^s_0(\Omega^0M)\times H^s_0(\Omega^1 M)\times H^s_0(\Omega^2 M)\times H^s_0(\Omega^3 M), \] where $H^s(\Omega^kM)$ are Sobolev spaces of $k-$ forms.
At last, $H^s_0(\Omega^k M)$ is the closure in $H^s(\Omega^k M)$ of $\Omega^k M^{{\rm int}}$, i.e. the subspace of $\Omega^k M$ of $k-$ forms which vanish near $\p M$.
The domain of the exterior derivative $d$ in the $L^2$--space of $k$--forms is \[
H(d,\Omega^k M) = \left\{ \omega^k\in L^2
(\Omega^k M)\mid
d\omega^k \in L^2(\Omega^{k+1} M)\right\}. \] Similarly, we set \[
H(\delta_\alpha,\Omega^k M) = \left\{ \omega^k
\in L^2(\Omega^k M)\mid
\delta_\alpha\omega^k \in L^2(\Omega^{k-1} M)\right\}, \] where $\delta_\alpha$ is the weak extension of the operator $ \delta_\alpha:\Omega^k M\to\Omega^{k-1}M. $ In the sequel, we shall drop the sub-index $\alpha$ from the codifferential.
The codifferentiation $\delta$ is adjoint to the exterior derivative in the sense that for $C^\infty _0$--forms on $M$, \[ (d\omega^k,\eta^{k+1})_{L^2} = (\omega^k,\delta\eta^{k+1})_{L^2}. \] To extend the adjoint formula for less regular forms, let us first fix some notations. For $\omega^k \in\Omega^k M$, we define the {\em tangential} and {\em normal} boundary data at $\partial M$ as \begin{eqnarray*} {\bf t}\omega^k = i^*\omega^k,\quad {\bf n}\omega^k = i^*(\frac 1\alpha*\omega^k), \end{eqnarray*} respectively, where $i^*:\Omega^k M \to\Omega^k\partial M$ is the pull-back of the natural imbedding $i:\partial M\to M$.
Sometimes, we denote ${\bf n}={\bf n}_\alpha$ to indicate a particular
choice $\alpha$. With these notations, let us write \[
\int_{\partial M}i^*\omega^k \wedge i^*(\frac 1\alpha * \eta^{k+1})=\langle{\bf t}\omega^k, {\bf n}\eta^{k+1}\rangle. \] We add here a small {\em caveat} that the above formula does not define an inner product as $\omega^k$ and $\eta^{k+1}$ are differential forms of different order. For $\omega^k\in\Omega^kM$ and $\eta^{k+1}\in \Omega^{k+1}M$, the Stokes formula for forms can be written as \begin{equation}\label{stokes1}
(d\omega^k,\eta^{k+1})_{L^2}- (\omega^k,\delta
\eta^{k+1})_{L^2} =
\langle{\bf t} \omega^k, {\bf n} \eta^{k+1}\rangle. \end{equation}
{\nottopapertext This formula allows the extension of the boundary trace operators ${\bf t}$ and ${\bf n}$ to $H(d,\Omega^k M)$ and $H(\delta,\Omega^k M)$, respectively. Indeed, if $\omega^k\in H^1(\Omega^k M)$, then
${\bf t}\omega^k\in H^{1/2}(\Omega^k\partial M)$ and, by formula (\ref{stokes1}), we may extend \[
{\bf t}:H(d,\Omega^k M)\to H^{-1/2}(\Omega^k\partial M). \] In the same way, equation (\ref{stokes1}) gives us the natural extension \[
{\bf n}: H(\delta,\Omega^{k+1} M) = H^{-1/2}(\Omega^{2-k}\partial M), \] In fact, a stronger result holds. } \begin{proposition}\label{paquet} The operators ${\bf t}$ and ${\bf n}$ can be extended to continuous surjective maps \begin{eqnarray*} {\bf t}: H(d,\Omega^k M) &\to& H^{-1/2}(d,\Omega^k\partial M), \\ {\bf n}: H(\delta,\Omega^{k+1} M) &\to& H^{-1/2}(d,\Omega^{2-k}\partial M), \\ \end{eqnarray*} where the space $H^{-1/2}(d,\Omega^k\partial M)$ is the space of $k$-forms $\omega^k$ on $\partial M$ satisfying \[
\omega^k\in H^{-1/2}(\Omega^k \partial M),\quad
d\omega^k\in H^{-1/2}(\Omega^{k+1} \partial M). \] \end{proposition}
This result is due to Paquet \cite{paquet}.
The formula (\ref{stokes1}) can be used also to define function spaces with vanishing boundary data. Indeed, let us define \begin{eqnarray*}
\Hnull (d,\Omega^k M) =\{ \omega^k\in H(d,\Omega^k M) &\mid& (d\omega^k,\eta^{k+1})_{L^2} =(\omega^k,\delta\eta^{k+1})_{L^2} \\
& &\mbox{for all $\eta^{k+1}\in H(\delta,\Omega^{k+1}M)$}\},\\
\Hnull(\delta,\Omega^{k+1} M)_ =\{ \eta^{k+1}\in H(\delta,\Omega^{k+1} M)&\mid& (d\omega^k,\eta^{k+1})_{L^2} =(\omega^k,\delta\eta^{k+1})_{L^2} \\
& &\mbox{for all $\omega^k\in H(d,\Omega^kM)$}\}. \end{eqnarray*} It is not hard to see that indeed \[
\Hnull(d,\Omega^k M)={\bf t}^{-1}\{0\}, \quad \Hnull(\delta,\Omega^{k+1}M)= {\bf n}^{-1}\{0\}. \]
{\nottopapertext We are now in the position prove the following lemma.}
\begin{lemma}\label{adjoints} The adjoint of the operator \[
d:L^2(\Omega^k M)\supset H(d,\Omega^k M)\to L^2(\Omega^{k+1} M) \] is the operator \[
\delta:L^2(\Omega^{k+1}M) \supset\Hnull(\delta,\Omega^{k+1}M) \to L^2(\Omega^kM) \] and {\em vice versa}. Similarly, the adjoint of \[
\delta:L^2(\Omega^{k+1}M) \supset H(\delta,\Omega^{k+1}M) \to L^2(\Omega^kM) \] is the operator \[
d:L^2(\Omega^k M)\supset \Hnull(d,\Omega^k M)\to L^2(\Omega^{k+1} M) \] \end{lemma}
{\nottopapertext {\em Proof:} We prove only the first claim, the other having a similar proof.
Let $\eta^{k+1}\in{\cal D}(d^*)$, where $d^*$ denotes the adjoint of $d$. By definition, there exists $\vartheta^k\in L^2(\Omega^k M)$ such that \[
(d\omega^k,\eta^{k+1})_{L^2} = (\omega^k,\vartheta^k)_{L^2} \] for all $\omega^k\in H(d,\Omega^k M)$. In particular, if $\omega^k\in\Omega^k M^{{\rm int}}$, we see that, in the weak sense, \[
(d\omega^k,\eta^{k+1})_{L^2} =(\omega^k, \delta\eta^{k+1})_{L^2} = (\omega^k,\vartheta^k)_{L^2}, \] i.e., $\delta\eta^{k+1}=\vartheta^k \in L^2(\Omega^k M)$. Thus, $\eta^{k+1} \in H(\delta,\Omega^{k+1} M)$, and the claim follows now since we have \[
(d\omega^k,\eta^{k+1})_{L^2} = (\omega^k,\delta\eta^{k+1})_{L^2} \] for all $\omega^k\in H(d,\Omega^k M)$, i.e., $\delta = d^*$.
$\Box$ }
In the sequel, we will write for brevity $H(d)=H(d,\Omega^k M)$, etc. when there is no risk of confusion concerning the order of the forms.
For later reference, let us point out that the Stokes formula for the complete Maxwell system can be written compactly as \begin{equation}\label{stokes for system}
(\eta,{\cal M}\omega)_{{\bf L}^2} + ({\cal M} \eta, \omega)_{{\bf L}^2} = \langle{\bf t}\omega,{\bf n}\eta\rangle + \langle{\bf t}\eta,{\bf n}\omega\rangle, \end{equation} where $\omega \in{\bf H}$ with \beq \label{H} {\bf H} = H(d)\times[H(d)\cap H(\delta)]\times[H(d)\cap H(\delta)]\times H(\delta) \eeq and $\eta \in {\bf H}^1(M)$ and we use the notations \begin{eqnarray*} {\bf t}\omega = ({\bf t}\omega^0,{\bf t}\omega^1,{\bf t}\omega^2)\ \quad {\bf n}\omega = ({\bf n} \omega^3,{\bf n} \omega^2,{\bf n}
\omega^1), \end{eqnarray*}
and, naturally, \[ \langle{\bf t}\omega,{\bf n}\eta\rangle = \langle{\bf t}\omega^0, {\bf n}\eta^1\rangle + \langle{\bf t}\omega^1, {\bf n}\eta^2\rangle +\langle{\bf t}\omega^2, {\bf n}\eta^3\rangle. \]
With these notations, we give the following definition of the Maxwell operators with electric and magnetic boundary conditions, respectively.
\begin{definition}\label{d. 2} The Maxwell operator with the electric boundary condition, denoted by \[
{\cal M}_{\rm e}:{\cal D}({\cal M}_{\rm e})\to {\bf L}^2(M), \] is defined through the differential expression (\ref{M}), with the domain ${\cal D}({\cal M}_{\rm e}) \subset {\bf L}^2(M)$ defined as \[
{\cal D}({\cal M}_{\rm e})=\Hbnull_{\bf t}:= \Hnull(d) \times[\Hnull(d)\cap H(\delta)]\times
[\Hnull(d)\cap H(\delta)] \times H(\delta). \] Similarly, the Maxwell operator with the magnetic boundary condition, denoted {\bf by } \[
{\cal M}_{\rm m}:{\cal D}({\cal M}_{\rm m})\to {\bf L}^2(M), \] is defined through the differential expression (\ref{M}), with the domain ${\cal D}({\cal M}_{\rm m}) \subset {\bf L}^2(M)$ defined as \[
{\cal D}({\cal M}_{\rm m})=\Hbnull_{\bf n}:= H(d) \times[H(d)\cap \Hnull(\delta)]\times
[H(d)\cap \Hnull(\delta)] \times \Hnull(\delta). \] \end{definition}
Before further discussion, let us comment the boundary conditions in terms of physics. For vectorial representations of the electric and magnetic fields, the electric boundary condition is associated with electrically perfectly conducting boundaries, i.e., $n\times E =0$, $n\cdot B=0$, where $n$ is the exterior normal vector at the boundary. In terms of differential forms, this means simply that ${\bf t} E^\flat={\bf t}\omega^1 = 0$ and ${\bf t} *_0 B^\flat= {\bf t}\omega^2 = 0$. On the other hand, the magnetic boundary conditions represent a magnetically perfectly conducting boundaries, i.e., $n\times H=0$, $n\cdot D=0$, which again in terms of forms reads as ${\bf t} H^\flat = {\bf t}(1/\alpha)\nu^1 = 0$ or ${\bf t}(1/\alpha)*\omega^2 ={\bf n}\omega^2 =0$ and ${\bf t} *_0 D^\flat = {\bf t}(1/\alpha)\nu^2=0$, or in terms of $\omega^1$, ${\bf t}(1/\alpha)*\omega^1 = {\bf n}\omega^1=0$.
There is an obvious duality between these conditions. It is therefore sufficient to consider the operator with the electric boundary condition only. This observation is related to the well-known Maxwell {\it duality principle.}
Consider the intersections of spaces appearing in the domains of definition in the previous definition. Let us denote \ba & & \Hnull^1_{\bf t}(\Omega^k M)=\{\omega^k\in H^1 (\Omega^k M)\mid {\bf t}\omega^k=0\},\\ & & \Hnull^1_{\bf n}(\Omega^k M)=\{\omega^k\in H^1 (\Omega^k M)\mid {\bf n}\omega^k=0\}. \ea It is a direct consequence of Gaffney's inequality (see \cite{Sc}) that \ba & & \Hnull(d,\Omega^k M)\cap H(\delta,\Omega^k M)
=\Hnull^1_{\bf t}(\Omega^k M),\\ & & H(d,\Omega^k M)\cap \Hnull(\delta,\Omega^k M)
=\Hnull^1_{\bf n}(\Omega^k M). \ea
The following lemma is a direct consequence of Lemma \ref{adjoints} and classical results on Hodge-Weyl decomposition\cite{Sc}.
\begin{lemma}\label{lem 2.4}
The electric Maxwell operator has the following properties: \begin{enumerate}
\item[i.] The operator ${\cal M}_{\rm e}$ is skew-adjoint. \item[ii.] The operator ${\cal M}_{\rm e}$ defines an elliptic differential operator in $M^{int}.$ \item[iii.] Ker$\,({\cal M}_{\rm e})=\{(0,\omega^1,\omega^2, \omega^3)\in \Hbnull_{\bf n}:\ d\omega^1=0,\ \delta\omega^1=0,\ d\omega^2=0,\ \delta\omega^2=0,\ \delta\omega^3=0\}$.
\item[iv.] Ran$\,({\cal M}_{\rm e}) =
L^2(\Omega ^0M)\times (\delta H(\delta,\Omega^2 M) +d\Hnull(d,\Omega^0 M))\times \\
(\delta H(\delta,\Omega^2 M) +d\Hnull (d,\Omega^1 M))\times d\Hnull (d,\Omega^2 M)$. \end{enumerate} \end{lemma}
By the skew-adjointness, it is possible to define weak solutions to initial-boundary-value problems needed later. In the sequel we denote the forms $\omega(x,t)$ just by $\omega(t)$ when there is no danger of misunderstanding.
\begin{definition}\label{weak solution} By the {\em weak solution} to the initial boundary value problem \[ \omega_t+ {\mathcal M}\omega =\rho \in L^1_{loc}({\mathbb R},{\bf L}^2(M)), \] \beq \label{ibvp}
{\bf t}\omega\big|_{\partial M\times {\mathbb R}}=0,\quad \omega(\;\cdot\;,0)=\omega_0\in{\bf L}^2, \eeq we mean the form \[
\omega(t) ={\mathcal U}(t)\omega_0+\int_0^t{\mathcal U} (t-s)\rho(s)ds, \] where ${\mathcal U}(t)={\rm exp}(-t{\mathcal M}_ {\rm e})$ is the unitary operator generated by ${\mathcal M}_{\rm e}$. \end{definition}
In the analogous manner, we define weak solutions with initial data given on $t=T$, $T\in {\mathbb R}$.
Assuming $\rho \in C({\mathbb R},{\bf L}^2(M))$ and using
the theory of unitary groups, we immediately obtain the regularity result \[
\omega\in C({\mathbb R},{\bf L}^2)\cap C^1({\mathbb R},{\bf H}'). \]
where ${\bf H}'$ denotes the dual of ${\bf H}$.
We shall need later the boundary traces of the weak solution. To define them, let $(\omega_{0n},\rho_n)\in{\mathcal D}({\mathcal M} _{\rm e})\times C({\mathbb R},{\mathcal D}({\mathcal M}_{\rm e}))$ be an approximating sequence of the pair $(\omega_0,\rho)$ in ${\bf L}^2 \times C({\mathbb R},{\bf L}^2)$. We define \[
\omega_n ={\mathcal U}(t)\omega_{0n}+\int_0^t{\mathcal U} (t-s)\rho_n(s)ds, \] whence $\omega_n\in C({\mathbb R},{\mathcal D}({\mathcal M}_{\rm e})) \cap C^1({\mathbb R},{\bf L}^2)$. Let $\varphi =(\varphi^0,\varphi^1,\varphi^2)$ be a test form, $\varphi^j\in C^\infty_0({\mathbb R}, \Omega^j\partial M)$. Let $\eta$ be a strong solution of the initial boundary value problem \[ \eta_t+ {\mathcal M}\eta =0, \] \[ {\bf t}\eta=\varphi,\quad \eta(\;\cdot\;,0)=0. \] We have \begin{eqnarray*}
(\eta(T),\omega_n(T))_{{\bf L}^2}&=& \int_0^T\partial_t
(\eta,\omega_n)dt \\ \noalign{\vskip6pt}
&=& -\int_0^T\big(({\mathcal M}\eta,\omega_n) +(\eta, {\mathcal M}\omega_n)\big) dt, \end{eqnarray*} and, by applying Stokes theorem, we deduce \[ (\eta(T),\omega_n(T))_{{\bf L}^2} = -\int_0^T\langle
\varphi,{\bf n} \omega_n\rangle. \] Hence, we observe that, when going to the limit $n\to\infty$, the above formula defines ${\bf n}\omega = \lim_{n\to\infty}{\bf n} \omega_n \in {\mathcal D}'({\mathbb R}, {\bf {\mathcal D}'}(\partial M))$, where \[
{\bf {\mathcal D}'}(\partial M) = {\mathcal D}'(\Omega^0 \partial M) \times {\mathcal D}'(\Omega^1 \partial M) \times {\mathcal D}'(\Omega^2 \partial M). \] We conclude this section with the following result.
\begin{lemma}\label{weak is maxwell} Assume that the initial data $\omega_0$ is of the form $\omega_0=(0,\omega_0^1,\omega_0^2,0)$, where $\delta\omega_0^1 =0$, $d\omega_0^2=0$ and we have $\rho=0$. Then the weak solution $\omega$ of Definition \ref{weak solution} satisfies also Maxwell's equations (\ref{apu1}), (\ref{apu2}), i.e., $\omega^0=0$ and $\omega^3=0$. \end{lemma}
{\em Proof:} As observed in Remark 1, $\omega$ and, in particular, $\omega^0$ satisfies the wave equation \[
\Delta^0_{\alpha}\omega^0 + \omega^0_{tt}=0, \] in the distributional sense, along with the Dirichlet boundary condition ${\bf t}\omega^0 =0$. The initial data for $\omega^0$ is \[
\omega^0(0) = \omega_0^0 =0, \] and \[
\omega^0_t(0) = \delta\omega^1\big|_{t=0}
= \delta\omega^1_0=0. \] Hence, we deduce that also $\omega^0=0$.
Similarly, $\omega^3$ satisfies the wave equation with the initial data \[
\omega^3(0) = \omega_3^0 =0, \] and \[
\omega^3_t(0) = -d\omega^2\big|_{t=0}
= -d\omega^2_0=0. \] As for the boundary condition, we observe that \[
{\bf t} \delta\omega^3 ={\bf t}\omega^2_t - {\bf t} d\omega^1 =\partial_t{\bf t}\omega^2 - d{\bf t}\omega^1 =0, \] corresponding to the vanishing Neumann data for the function $*\omega^3$. Thus, also $\omega^3=0$.
$\Box$
\subsection{Initial--boundary value problem}
Our next goal is to consider the forward problem and the Cauchy data on the lateral boundary $\p M\times {\mathbb R}$ for solutions of Maxwell's equations. Assume that $\omega$ is a solution of the complete system. The complete Cauchy data of this solution consists of \[
({\bf t}\omega(x,t),{\bf n}\omega(x,t))
,\quad (x,t)\in\partial M\times{\mathbb R}_+. \]
Assume now that $\omega$ corresponds to the solution of Maxwell's equations, i.e., we have $\omega^0=0$ and $\omega^3=0$. Consider
the Maxwell-Faraday equation in (\ref{apu1}), \[
\omega^2_t + d\omega^1 = 0. \] By taking the tangential trace, we find that $
{\bf t}\omega^2_t= -d{\bf t}\omega^1, $ and further, \[
{\bf t}\omega^2(x,t)=\omega^2(0)-\int_0^t d{\bf t}\omega^1(x,t')dt', \quad x\in\partial M. \] Similarly, by taking the normal trace of the Maxwell-Amp\`{e}re equation in (\ref{apu1}), \[
\omega^1_t -\delta\omega^2 =0, \] we find that $ {\bf n} \omega_t^1=d{\bf n} \omega^2, $ so likewise, \beq \label{2.12.1} {\bf n}\omega^1(x,t)= {\bf n} \omega^1(0)+ \int_0^t d{\bf n}\omega^2(x,t')dt', \quad x\in\partial M. \eeq In the sequel we shall mainly consider the case $\omega(0)=0$, when the lateral Cauchy data for the original problem of electrodynamics is simply \begin{eqnarray} {\bf t}\omega &=& (0,f,-\int_0^td f(t')dt'), \label{t-data}\\ \noalign{\vskip4pt} {\bf n}\omega &=& (0,g,\int_0^td g(t')dt') \label{n-data} \end{eqnarray} where $f$ and $g$ are functions of $t$ with values in $\Omega^1 \partial M$.
The following theorem implies that solutions of Maxwell's equations are solutions of the complete Maxwell system and gives sufficient conditions for the converse result.
\begin{theorem}\label{equivalence} If $\omega(t)\in C({\mathbb R},{\bf H}^1)\cap C^1({\mathbb R},{\bf L}^2)$ satisfies the equation \begin{equation}\label{M-eq}
\omega_t +{\cal M}\omega = 0,\quad t>0 \end{equation} with vanishing initial data $\omega(0)=0$, and $\omega^0(t)=0$, $\omega^3(t)=0$, then the Cauchy data is of the form (\ref{t-data})--(\ref{n-data}).
Conversely, if the lateral Cauchy data is of the form (\ref{t-data})--(\ref{n-data}) for $0 \leq t \leq T$, and $\omega$ satisfies the equation (\ref{M-eq}), with vanishing initial data, then $\omega(t)$ is a solution to Maxwell's equations, i.e., $\omega^0(t)=0$, $\omega^3(t)=0$. \end{theorem}
{\em Proof:} The first part of the theorem follows from the above considerations if
we show that $\omega(t)$ is sufficiently regular.
Since $\omega^2 \in C( {\mathbb R},H^1(\Omega^2M))$ we see that ${\bf n}\omega^2 \in C( {\mathbb R},H^{1/2}(\Omega^2 \partial M))$ with $d{\bf n}\omega^2 \in C( {\mathbb R},H^{-1/2}(\Omega^2 \partial M))$. Furthermore, as $\delta \omega_t^1(t) = \delta \delta \omega^2(t) =0$, \[ {\bf n}\omega_t^1 \in C( {\mathbb R},H^{-1/2}(\Omega^2 \partial M)), \quad \delta \omega^2 \in C( {\mathbb R},H^{-1/2}(\Omega^2 \partial M)), \] which verifies (\ref{n-data}).
To prove (\ref{t-data}) we use Maxwell duality: Consider the forms
\[
\eta^{3-k}=(-1)^k*\frac 1\alpha\omega^k. \] Then $\eta=(\eta^0,\eta^1,\eta^2,\eta^3)$ satisfies Maxwell's equations $\eta_t+\widetilde {\mathcal M} \eta=0$ where $\widetilde {\mathcal M}$ is the Maxwell operator with metric $g$ and scalar impedance $\alpha^{-1}$. In sequel, we call Maxwell's equation with these parameters the adjoint Maxwell equations and the forms $\eta^j$ the adjoint solution. Now the formula (\ref{n-data}) for adjoint solution implies (\ref{t-data}) for $\omega$.
To prove the converse, it suffices to show that $\omega^0(t)=0$. Indeed, the claim $\omega^3(t)=0$ follows then by Maxwell duality described earlier. From the equations \begin{eqnarray}
\omega^0_t -\delta\omega^1 &=& 0,\\
\omega^1_t +d\omega^0 -\delta\omega^2, &=&0\label{aux} \end{eqnarray} it follows that $\omega^0$ satisfies the wave equation \[
\omega_{tt}^0 +\delta d\omega^0=0. \] It also satisfies the initial condition $\omega^0(0)=0$ and $\omega^0_t(0)=0$
and, from (\ref{t-data}), boundary condition ${\bf t}\omega^0 =0$. Thus, $\omega^0 = 0$ for $0\leq t \leq T$.
$\Box$
The following definition fixes the solution of the forward problem considered in this work.
\begin{definition} Let $f=(f^0,f^1,f^2)\in C^{\infty}([0,T]; {\bf \Omega}(\partial M))$ be a smooth boundary source of the form (\ref{t-data}), i.e., $f^0=0$, $f^2_t = -df^1$. Further, let $R$ be any right inverse of the mapping ${\bf t}$. The solution of the initial-boundary value problem \[ \omega_t+ {\cal M}\omega=0,\quad t>0, \] \[
\omega(0)=\omega_0\in {\bf L}^2(M),\quad {\bf t}\omega = f, \] is given by \[
\omega = Rf + {\mathcal U}(t)\omega_0
- \int_0^t{\mathcal U}(t-s)({\mathcal M}Rf(s) +Rf_s(s))ds. \] \end{definition}
We remark that the boundary data $f$ could be chosen from a wider class $f\in H^{1/2}(\partial M\times [0,T])$.
Theorem \ref{equivalence} motivates the following definition.
\begin{definition} \label{27.11.d} For solution $\omega$ of Maxwell equations (\ref{apu1})--(\ref{apu2}) we use the following notations: \begin{enumerate} \item[i.] The lateral Cauchy data for a solution $\omega$ of Maxwell's equations with vanishing initial data in the interval $0\leq t\leq T$ is given by the pair \[ ({\bf t}\omega^1(x,t),{\bf n} \omega^2(x,t)),\quad (x,t)\in\partial M\times [0,T]. \] \item[ii.] When $\omega$ satisfies initial condition $\omega(0)=0$ the mapping \[ {\mathcal Z}^T: C^{\infty}_{00}([0,T],\Omega^1(\partial M))\to
C^{\infty}_{00}([0,T],\Omega^1(\partial M)), \] \[
{\mathcal Z}^T({\bf t} \omega^1)={\bf n} \omega^2|_{\partial M \times [0,T]}, \] is well defined. We call this map the {\em admittance map}. \end{enumerate} \end{definition} Here $ C^{\infty}_{00}([0,T], {\it B})$ consists of $C^{\infty}$ functions of $t$ with values in a space ${\it B}$, i.e. ${\it B} =\Omega^1(\partial M)$ in definition \ref{27.11.d}, which vanish near $t=0$.
Note that in the classical terminology for the electric and magnetic fields, ${\mathcal Z}^T$
maps the tangential electric field
$n\times E|_{\partial M\times [0,T]}$ to the tangential magnetic field
$n\times H|_{\partial M\times [0,T]}$.
The boundary data and the energy of the field inside $M$ are closely related. The following result, crucial from the point of view of boundary control, is a version of the Blagovestchenskii formula (see \cite{BeBl} for the case of the scalar wave equation). Observe that the following theorem is formulated for any solutions of the complete system, not only for those that correspond to Maxwell's equations.
\begin{theorem}\label{blacho} Let $\omega$ and $\eta$ be smooth solutions of the complete system (\ref{complete}). Then the knowledge of the lateral Cauchy data \[
({\bf t}\omega,{\bf n}\omega),\quad ({\bf t}\eta,{\bf n}\eta), \quad 0\leq t\leq 2T, \] is sufficient for the determination of the inner products \[
(\omega^j(t),\eta^j(s))_{L^2},\quad 0\leq j\leq 3,\;
0\leq s,t\leq T \] over the manifold $M$. \end{theorem}
{\em Proof:} The proof is based on the observation that, having the lateral Cauchy data of a solution $\omega$, we also have access to the forms $d{\bf t} \omega$ and $d{\bf n} \omega$ at the boundary. On the other hand, ${\bf t}$ commutes with $d$ so that \[
{\bf t} d\omega^j = d{\bf t}\omega^j,\quad
{\bf n}\delta\omega^j = {\bf t} **d\frac 1\alpha*\omega^j =
d{\bf t}\frac 1\alpha*\omega^j = d{\bf n} \omega^j. \] Let us define the function \[
F^j(s,t) = (\omega^j(s),\eta^j(t)). \] From the complete system, it follows that \begin{eqnarray}\label{A-fomrmu}
(\partial_s^2 -\partial_t^2)F^j(s,t)
&=& (\omega_{ss}^j(s),\eta^j(t))_{L^2}-
(\omega^j(s),\eta_{tt}^j(t))_{L^2}\\ \noalign{\vskip6pt} \nonumber &=& -((d\delta+\delta d)\omega^j(s),\eta^j(t))_{L^2}+
(\omega^j(s),(d\delta+\delta d)\eta^j(t))_{L^2} \\ \noalign{\vskip6pt} \nonumber
&=& f^j(s,t). \end{eqnarray} By applying Stokes theorem we obtain further that \[
f^j(s,t)=\langle {\bf n}\omega^j,{\bf t} \delta\eta^j\rangle +\langle {\bf t}\omega^j,{\bf n} d\eta^j\rangle -\langle {\bf t} \delta\omega^j,{\bf n}\eta^j\rangle -\langle {\bf n} d\omega^j,{\bf t}\eta^j\rangle, \] where we suppressed for brevity the dependence
of the boundary values on $s$ and $t$. Now the complete system implies that \[
d\omega^j=-\omega_s^{j+1}+\delta\omega^{j+2}, \quad
\delta\omega^j = \omega_s^{j-1} + d\omega^{j-2}, \] and, similarly, \[
d\eta^j=-\eta_t^{j+1}+\delta\eta^{j+2}, \quad
\delta\eta^j = \eta_t^{j-1} + d\eta^{j-2}. \] A substitution to the above formulas then gives \begin{eqnarray*} f^j(s,t) &=& \langle{\bf n}\omega^j,{\bf t}\eta_t^{j-1} +d{\bf t}\eta^{j-2} \rangle + \langle{\bf t}\omega^j,-{\bf n}\eta_t^{j+1} +d{\bf n}\eta^{j+2} \rangle \\ \noalign{\vskip6pt} & & -\langle{\bf t}\omega_s^{j-1}+d{\bf t}\omega^{j-2},{\bf n}\eta^j
\rangle -\langle-{\bf n}\omega_s^{j+1}+d{\bf n}\omega^{j+2},{\bf t}\eta^j
\rangle, \end{eqnarray*} where $d$ stands for the exterior derivative on $\p M$. Hence, $f^j$ is completely determined by the lateral Cauchy data. What is more, we have \beq\label{B-fo}
F^j(0,t)=F^j(s,0)=0,\quad F_s^j(0,t)=F_t^j(s,0)=0. \eeq Hence, we can solve $F(s,t)$ using (\ref{A-fomrmu}) and (\ref{B-fo}) as claimed.
$\Box$
{\bf Remark 3.} If $\omega$ and $\eta$ are solutions to Maxwell's equations (\ref{apu1}-\ref{apu2}), the formulas above simplify. We have \[
f^0(s,t) = f^3(s,t) = 0, \] and \begin{eqnarray*}
f^1(s,t) = \langle {\bf n}\omega^2_s,{\bf t}\eta^1\rangle - \langle {\bf t}\omega^1,{\bf n}\eta^2_t\rangle,\quad
f^2(s,t) = \langle {\bf n}\omega^2,{\bf t}\eta^1_t\rangle - \langle {\bf t}\omega^1_s,{\bf n}\eta^2\rangle. \end{eqnarray*} Then, for $j=1$ the inner product $ (\omega^1(t),\omega^1(t))_{L^2}$
defines the energy of the electric field. Similarly, for $j=2$ the inner product $ (\omega^2(t),\omega^2(t))_{L^2}$ defines the energy of the magnetic field.
\section{Inverse problem}
The main objective of this chapter is to prove the following uniqueness result for the inverse boundary value problem.
\begin{theorem}\label{ip} Given $\partial M$ and the admittance map ${\mathcal Z}_T$, $T>8 \diam(M)$, for Maxwell's equations, (\ref{apu1})-- (\ref{apu2}), it is possible to uniquely reconstruct the Riemannian manifold, $(M,g)$ and the scalar wave impedance, $\alpha$. \end{theorem}
Observe that, once we know the travel time metric $g$ as well as the wave impedance $\alpha$, formula (\ref{travel time metric}) gives the metrics $g_\mu$ and $g_\epsilon$, which correspond to the material parameters $\mu$ and $\epsilon$.
The proof of the above result is divided in several parts. The first step,
which is discussed in the next sections, is to prove necessary boundary controllability results. These results are used, in a similar fashion as in \cite{Ku1}, \cite{KKL}, to reconstruct the manifold and the travel time metric.
\subsection{Unique continuation results}
In the following lemma, we consider extensions of differential forms
outside the manifold $M$. Let $\Gamma\subset\partial M$ be open. Assume that $\widetilde M$ is an extension of $M$ across $\Gamma$, i.e. $M\subset\widetilde M$, $\Gamma\subset {\rm int}(\widetilde M)$ and $\partial M\setminus\Gamma\subset\partial \widetilde M$. Furthermore, we assume that the metric $g$ and impedance $\alpha$ are extended smoothly into $\widetilde M$ as $\widetilde{g}, \, \widetilde{\alpha}$. In this case, we say that the manifold with scalar impedance $(\widetilde M, \widetilde{g},\widetilde{\alpha})$ is an {\em extension of $(M,g,\alpha)$ across $\Gamma$.} (See Figure \ref{pic 1}).
\begin{figure}
\caption{Manifold $\widetilde M$ is obtained by gluing an ``ear'' to $M$.}
\label{pic 1}
\end{figure}
{\nottopapertext We have the following simple result.}
\begin{lemma} Assume that $\widetilde M$ is an extension of $M$ across an open set $\Gamma\subset \partial M$. Let $\omega^k$ be a $k$-form on $M$ and $\widetilde\omega^k$ be its extension by zero to $\widetilde M$. Then \begin{enumerate} \item If $\omega^k\in H(d,\Omega^k M)$ and
${\bf t}\omega^k\big|_\Gamma=0$, then $\widetilde \omega^k\in H(d,\Omega^k\widetilde M)$. \item If $\omega^k\in H(\delta, \Omega^k M)$ and
${\bf n}\omega^k\big|_\Gamma=0$, then $\widetilde \omega^k\in H(\delta,\Omega^k\widetilde M)$. \end{enumerate} \end{lemma}
{\nottopapertext {\em Proof:}
External differential, in terms of distributions, of $\widetilde\omega^k$ can be defined by \[
(d\widetilde\omega^k,\varphi^k)_{L^2}
=(\widetilde\omega^k,\delta \varphi)_{L^2}, \] where $\varphi^{k}\in \Omega^k\widetilde M^{{\rm int}}$ is arbitrary. However, by the formula (\ref{stokes1}), \[
(\widetilde\omega^k,\delta\varphi^k)_{L^2(\widetilde M)}=
(\omega^k,\delta\varphi^k)_{L^2(M)}= (d\widetilde\omega^k,\varphi^k)_{L^2(M)} +
\langle{\bf t}\omega^k,{\bf n}\varphi^k\rangle. \] Moreover, since $ {\rm supp}\,(\varphi^k) \subset \subset \widetilde M^{{\rm int}}$, then ${\rm supp}({\bf n}\varphi^k) \subset \Gamma$, where ${\bf t}\omega^k$ vanishes. Thus, \[ (\widetilde\omega^k,\delta\varphi^k)_{L^2(\widetilde M)} = (d\omega^k,\varphi^k)_{L^2}, \] i.e.
$d\widetilde\omega^k$ is the zero extension of $d\omega^k$. In particular, $d\widetilde\omega^k \in L^2(\widetilde M)$, so that $\widetilde \omega^k\in H(d,\Omega^k\widetilde M)$.
The claim concerning the codifferential is proved by a similar argument.
$\Box$ }
As a consequence of this result, we obtain the following.
\begin{theorem}
Let $\omega\in C^1({\mathbb R},{\bf L}^2)\cap C({\mathbb R},{\bf H}), \, $ ${\bf t}\omega|_{\Gamma \times [0,T]}=0,
\, {\bf n}\omega|_{\Gamma \times [0,T]}=0$, be a solution of the equation $\omega_t+{\mathcal M}\omega =0$ in $M\times [0,T]$. Let $\widetilde\omega$ be its extension by zero across $\Gamma\subset \partial M$. Then the extended form, $\widetilde{w}(t)$ satisfies the complete Maxwell's system on $(\widetilde M, \widetilde g, \widetilde{\alpha})$, i.e. $\widetilde\omega_t+{\widetilde {\mathcal M}} \widetilde\omega=0$ in $\widetilde M\times [0,T]$. \end{theorem}
We are particularly interested in the solutions of Maxwell's equations. The following result is not directly needed but we have included it,
since the basic idea is useful when we will prove the main result of this section.
\begin{lemma} \label{2.12l} Assume that $\omega$ in the above theorem satisfies Maxwell's equations, i.e., $\omega^0=0$ and $\omega^3=0$, and $\omega(x,0)=0$. If ${\bf t}\omega^1=0$ and ${\bf n}\omega^2=0$ on $\Gamma\times [0,T]$, then $\omega$ satisfies Maxwell's equations in the extended domain $\widetilde M\times [0,T]$. \end{lemma}
{\em Proof:} From Theorem \ref{equivalence} it follows that, since $\omega$ satisfies Maxwell's equations, \[
{\bf t} \omega = (0,{\bf t}\omega^1,-\int_0^td{\bf t}\omega^1dt') =0,\quad
{\bf n} \omega =
(0,{\bf n}\omega^2,\int_0^td{\bf n}\omega^2 dt') =0 \] in $\Gamma\times[0,T]$. Therefore, the previous theorem shows that the continuation by zero across $\Gamma, \,$ $\widetilde \omega (t)$, satisfies the complete system in $\widetilde M\times [0,T]$.
However, $\widetilde \omega^0(t)=0$, $\widetilde \omega^3(t)=0$ in $\widetilde M\times [0,T],$ i.e., $\widetilde \omega(t)$ satisfies Maxwell's equations with vanishing initial data in the extended manifold $\widetilde M$.
$\Box$
When we deal with a general solution to Maxwell's equations, (\ref{apu1})--(\ref{apu2}), which may not satisfy zero initial conditions, and try to extend them by zero across $\Gamma$, the arguments of Lemma \ref{2.12l} fail. Indeed, if $\omega_0 \neq 0$, then (\ref{2.12.1}) show that ${\bf n} \omega^2 =0$ is not sufficient for ${\bf n} \omega^1 =0$.
However, by differentiating with respect to time, the parasite term ${\bf n}\omega^1(0)$ vanishes. This is the motivation why, in the following theorem, we consider the time derivatives of the weak solutions.
Denote by $\tau(x,y)$ the geodesic distance between $x$ and $y$ on $(M,g)$. Let $\Gamma\subset\partial M$ be open and $T >0$.
We use the notation \[
K(\Gamma,T)=\{(x,t)\in M\times [0,2T]\mid
\tau(x,\Gamma)<T-|T-t|\} \] for the double cone of influence with base on the slice $t=T$. (see Figure \ref{pic 2}.)
\begin{figure}
\caption{Double cone of influence.}
\label{pic 2}
\end{figure}
\begin{theorem}\label{UCP} Let $\omega(t)$ be a weak solution of Maxwell's system in the sense of Definition \ref{weak solution} with $\omega_0 =(0,\omega_0^1, \omega^2_0,0)$. Assume, in addition, that $\delta\omega^1_0=0$, $d\omega_0^2=0$ and $\rho =0$. If ${\bf n}\omega^2=0$ in $\Gamma\times]0,2T[$, then $\partial_t\omega=0$ in the double cone $K(\Gamma,T)$. \end{theorem}
{\em Proof:}
Let $\psi \in C^{\infty}_0([-1,1]), \, \int_{-1}^1 \psi(s)ds=1$ be a Friedrich's mollifier. Then, for any $\sigma >0$ and $\omega(t) \in C(]0,2T[), {\bf L}^2(M))$ satisfying conditions of the Theorem, denote by $\omega_{\sigma}(t)$ its time-regularization, \[ \omega_{\sigma} = \psi_{\sigma} * \omega, \quad \psi_{\sigma}(t) = (1/\sigma) \psi(t/\sigma). \] Then $\omega_{\sigma} \in C^{\infty}([\sigma, 2T-\sigma[, {\bf L}^2(M))$ continue to be weak solutions to the Maxwell system and, moreover, to Maxwell's equations (\ref{apu1})--(\ref{apu2}). Thus, \[ {\mathcal M} \omega_{\sigma} = -\partial _t \omega_{\sigma}
\in C^{\infty}([\sigma, 2T-\sigma[, {\bf L}^2(M)), \] i.e. $\omega_{\sigma} \in C^{\infty}([\sigma, 2T-\sigma[, {\mathcal D}({\mathcal M}_{{\rm e}}))$. Repeating these arguments, \[ \omega_{\sigma} \in C^{\infty}([\sigma, 2T-\sigma[, {\mathcal D}({\mathcal M}^{\infty}_{{\rm e}})), \] with $ {\mathcal D}({\mathcal M}^{\infty}_{{\rm e}})= \bigcap _{N>1}
{\mathcal D}({\mathcal M}^N_{{\rm e}})$.
As ${\bf n} \omega_{\sigma} = \psi_{\sigma}* {\bf n} \omega$, \[ {\bf n} (\omega_{\sigma})^2 = 0 \quad {\rm on} \,\, \Gamma \times [\sigma, 2T-\sigma[. \] Applying (\ref{2.12.1}), we see that ${\bf n} \partial_t\omega_{\sigma} = 0$ on $\Gamma \times [\sigma, 2T-\sigma[.$
Denote by $\widetilde \omega$ the extension by zero of $\omega$ across $\Gamma$ and $\widetilde{\eta}_{\sigma}$ that of $\partial_t\omega_{\sigma}$. We claim that, in the distributional sense, $\widetilde{\eta}_{\sigma}$ satisfies the complete Maxwell system, for $\sigma<t<2T-\sigma$. Indeed, let $\varphi=(\varphi^0,\varphi^1, \varphi^2,\varphi^3)\in C^\infty_0(]\sigma, 2T-\sigma[, {\bf \Omega}\widetilde M^{{\rm int}})$ be a test form. Using the brackets $[\;\cdot\;,\;\cdot\;]$ to denote the distribution duality, that extends the inner product \[
[\psi,\phi]=\int_0^{2T}(\psi(t),\phi(t))_{{\bf L}^2(\widetilde M)}dt, \] we have \begin{eqnarray*} [\partial_t\widetilde\eta_{\sigma}+{\widetilde {\mathcal M}}\widetilde\eta_{\sigma},\varphi] &=&
-[\widetilde\eta_{\sigma},{\widetilde {\mathcal M}}\varphi +\varphi_t] \\ \noalign{\vskip6pt} = [\widetilde\omega_{\sigma}, {\widetilde {\mathcal M}}(\varphi_t) + \varphi_{tt}]
&=& [\omega_{\sigma},
{\mathcal M}(\varphi_t) + \varphi_{tt}]. \end{eqnarray*} As ${\bf t} \omega_{\sigma} =0$, it follows from the Stokes' theorem and the fact that $\omega_{\sigma}$ satisfies Maxwell's equations, that \begin{eqnarray*} [\omega, {\mathcal M}\varphi_t + \varphi_{tt}] =
\int_0^{2T}(\omega_{\sigma},{\mathcal M} \varphi_t + \varphi_{tt})_{{\bf L}^2(M)} dt = \int_0^{2T}\langle{\bf n}\omega_{\sigma}. {\bf t}\varphi_t\rangle dt, \end{eqnarray*} As ${\rm supp}({\bf t} \varphi) \subset \Gamma \times ]\sigma, 2T-\sigma[$, where ${\bf n} \omega_{\sigma} =0$, the right side of this equation equals to $0$. In addition, $\widetilde{{\bf t}} \widetilde \omega_{\sigma} =0$ for $ t \in ]\sigma, 2T-\sigma[$, where $\widetilde{{\bf t}}$ is the tangential component on $\partial \widetilde M$. Thus, the claim follows.
However, $
\widetilde\eta_{\sigma}\in C^\infty(]\sigma,2T-\sigma[,{\bf L}^2(\widetilde M)). $ Therefore, similar considerations to the above shows that this implies that \[ \widetilde \eta_{\sigma} \in C^{\infty}([\sigma, 2T-\sigma[, {\mathcal D}^{\infty}({\mathcal M}_{{\rm e}})), \] i.e. $\widetilde \eta_{\sigma} $ is infinitely smooth in $\widetilde M^{{\rm int}} \times [\sigma, 2T-\sigma[$. Since $\widetilde\eta_{\sigma}=0$ outside $M\times {\mathbb R}$, the unique continuation result of Eller-Isakov-Nakamura-Tataru \cite{EIsNkTa}, that is based on result of Tataru \cite{Ta1},\cite{Ta3}
for smooth solutions, implies that $\widetilde\eta_{\sigma}=0$ in the double cone $
\widetilde{\tau}(x,\widetilde M \setminus M)<T - \sigma-|T-t|, \quad x \in \widetilde M, $ where $ \widetilde{\tau}$ is the distance on $(\widetilde M, \widetilde g)$. As $\widetilde\eta_{\sigma}= \partial_t \omega_{\sigma}$ in $M$, this implies that $\partial_t \omega_{\sigma}=0$ in the double cone \beq \label{3.12.1}
\tau(x,\Gamma)<T - \sigma-|T-t|, \quad x \in M. \eeq When $\sigma \to 0$, $\widetilde\eta_{\sigma}\to\partial_t \omega$, in the distributional sense, while the cone (\ref{3.12.1}) tend to $K(\Gamma,T)$ and the claim of the theorem follows.
$\Box$
We note that the unique continuation result of \cite{EIsNkTa} is related to scalar $\epsilon$, $\mu$. However, it is easily generalized to the scalar impedance case due to the single velocity of the wave propagation.
Following the proof of Theorem \ref{UCP}, we can show the following variant of Theorem \ref{equivalence}.
\begin{corollary}\label{1.7+2.5} Let $\omega(t)$ be a weak solution to the complete Maxwell system in the sense of definition \ref{weak solution}, with $\rho=0$, and, in addition, (\ref{n-data})
on $\Gamma \times ]0,T[.$ If $T> 2\,\hbox{diam}(M)$, then $\omega^0(t) =0, \, \omega^3(t)=0$ and $\omega(t)$ is a solution of Maxwell's system for $0<t<T$. \end{corollary}
{\em Proof:} We will consider only $\omega^0$ using the n Maxwell duality for $\omega^3$.
By remark 1 and (\ref{t-data}), \beq \label{wave}
\omega^0_{tt} +\delta d \omega^0=0, \quad {\bf t}\omega^0|_{\partial M \times [0,T]} =0. \eeq Also \[ \omega^1_t +d\omega^0 -\delta \omega^2 =0, \] imply, together with (\ref{n-data}), that \[ {\bf n} d \omega^0 = {\bf n} \delta \omega^2 - {\bf n} \omega^1_t= d{\bf n} \omega^2- {\bf n} \omega^1_t =0 \] on $\Gamma \times [0,T]$. Together with the boundary condition in (\ref{wave}), this shows that the lateral Cauchy data of $\omega(t)$ vanishes on $\Gamma \times [0,T]$. Using now the wave equation in (\ref{wave}), this imply that, due to Tataru's unique continuation \cite{Ta1}, \cite{Ta3}, $\omega_0 = 0$ in the double cone $K(\Gamma,T)$. As $T > 2\hbox{diam}(M)$, this yield that $\omega^0(T/2) = \omega_t^0(T/2)=0$. It now follows from (\ref{wave}) that
$\omega^0(t) =0$ for $0<t<T$.
$\Box$
\subsection{Introduction for controllability}
In this section we derive the controllability results for the Maxwell system. We divide these results in {\em local results}, i.e., controllability of the solutions at short times and in {\em global results}, where the time of control is long enough so that the controlled electromagnetic waves fill the whole manifold. Both types of results are based on the unique continuation of Theorem \ref{UCP} and representation of inner products of electromagnetic fields over $M$, in a time slice,
in terms of integrals of the lateral Cauchy data over the boundary $\partial M$ over a time interval which is given by Theorem \ref{blacho} .
Consider the initial boundary value problem \begin{equation}\label{maxwell eq}
\omega_t +{\mathcal M}\omega =0,
\quad t>0, \end{equation} with the initial data $\omega(0)=0$ and the electric boundary data of Maxwell type, \begin{equation}\label{maxwell data}
{\bf t}\omega = (0,f,-\int_0^tdf(t')dt'), \end{equation} where we assume that $f\in C^\infty_0 ({\mathbb R}_+, \Omega^1\partial M)$. By Theorem \ref{equivalence}, we know that $\omega^0(t)=0$ and $\omega^3(t)=0$.
Let $\widetilde\omega$ denote the weak solution of Definition \ref{weak solution} with $\rho=0$ and $\widetilde \omega(T)=\omega_0$. Assume, in addition, that the conditions of Lemma \ref{weak is maxwell} are satisfied so that $\widetilde\omega$ satisfies also Maxwell's equations.
As we have seen, Stokes formula implies the identity \begin{equation}\label{control identity} (\omega(T),\omega_0) =-\int_0^T\langle{\bf t}\omega,{\bf n}\widetilde\omega \rangle dt. \end{equation} We refer to this identity as the {\em control identity} in the sequel.
\subsection{Local controllability}\label{local controllability section}
In this section, we study differential $1-$forms in $M$
that can be generated
by using appropriate boundary sources active for short periods of time. Instead of a complete characterization of these forms, we show that there is a large enough subspace in $ L^2(\Omega^1M)$ that can be produced by boundary sources. The difficulty that prevents a complete characterization is related to the topology of the domain of influence, which can be very complicated.
Let $\Gamma\subset\partial M$ be an open subset of the boundary and $T>0$ arbitrary. We define the {\em domain of influence} as \[
M(\Gamma,T)=\{x\in M\mid \tau(x,\Gamma)< T\}, \] where $\tau$ is the distance with respect to the travel time metric $g$. Observe that $M(\Gamma,T) = K(\Gamma,T) \cap \{t=T\}.$
Furthermore, let $\omega$ be the strong solution of the initial-boundary value problem \[
\omega_t+{\mathcal M}\omega =0,\quad \omega(0)=0, \] with the boundary value \[
{\bf t}\omega = (0,f,-\int_0^t df(t')dt'), \] where $f\in C^\infty_0(]0,T[,\Omega^1\Gamma)$ with $C^\infty_0(]0,T[,\Omega^1\Gamma)$ being a subspace of forms in $C^\infty_0(]0,T[,\Omega^1\partial M)$ with support in $\Gamma$. To emphasize the dependence of $\omega$ on $f$, we write occasionally \[ \omega=\omega^f = (0,(\omega^f)^1,(\omega^f)^2,0). \] We denote \beq \label{3.12.2}
X(\Gamma,T)={\rm cl}_{L^2}\{ (\omega^f)^1(T)\mid f\in
C^\infty_0(]0,T[, \Omega^1\Gamma )\}, \eeq i.e., $X(\Gamma,T)$ is the $L^2$--closure of the set of the electric fields that are generated by $C^\infty_0$--boundary sources on $\Gamma\times ]0,T[$. Furthermore, we use the notation \[ H(\delta,M(\Gamma,T)) =\{ \omega^2\in H( \delta,M),\;{\rm supp}\,(\omega^2) \in \overline{M(\Gamma,T)}\}. \] We will prove the following result.
\begin{theorem}\label{local control th} The set $X(\Gamma,T)$ satisfies \[
\delta H^1_0(\Omega^2M(\Gamma,T))\subset
X(\Gamma,T)\subset{\rm cl}_{L^2}\bigg(\delta H( \delta,M(\Gamma,T) )\bigg). \]
\end{theorem}
Here $H^1_0(\Omega^2S), \, S \subset M$ is a subspace of $H^1_0(\Omega^2M)$ of forms with support in ${\rm cl}(S)$.
{\em Proof:} The right inclusion is straightforward: Since $\omega$ satisfies Maxwell's equations, we have $\omega^0(t) = 0$ and, hence, \[
\omega^1(T) = \int_0^T\delta\omega^2dt \in\delta H(\delta,M(\Gamma,T)). \]
To prove the left inclusion, we show that any field of the form $\nu^1 =\delta\eta^2$ with $\eta^2 \in H^1_0(M(\Gamma,T))$ is in $(X(\Gamma,T)^\perp) ^\perp$. To this end, let us first assume that $\omega_0^1\in L^2(\Omega^1M)$ is a 1--form such that \[
(\omega_0^1,\omega^1)_{L^2}=0 \] for all $\omega^1=(\omega^f(T))^1$ generated by boundary sources $f\in C^\infty_0(]0,T[,\Omega^1\Gamma)$. Since $\omega^1=\delta\omega^2$, it suffices to consider only those forms $\omega_0^1$ that are of the form $\omega_0^1=\delta \eta^2_0$ for some $\eta^2_0\in H(\delta)$. Indeed, by Hodge decomposition (see \cite{Sc}) in ${\bf L}^2(M)$, we have \ba \omega^1_0=\widehat \omega^1_0+\delta \eta^2_0, \ea where $d\widehat \omega^1_0=0$, ${\bf t} \widehat \omega^1_0=0$ so $\widehat \omega^1_0\perp \omega^1$ automatically.
Let $\widetilde\omega$ be a weak solution, at the time interval $[0,T]$, of the initial boundary value problem (\ref{ibvp}) with ${\bf t}\widetilde\omega=0$, and \[
\widetilde\omega(\;\cdot\;,T)= (0,\omega_0^1,0,0)=\omega_0. \] By our assumption, \[
(\omega(T),\omega_0)_{{\bf L}^2}
= (\omega^1(T),\omega_0^1)_{L^2} =0, \] and thus, by the control identity, (\ref{control identity}) and conditions (\ref{t-data}), (\ref{n-data}), \[
\int_0^T\langle {\bf t} \omega,{\bf n}\widetilde\omega\rangle
=\int_0^T\langle {\bf t}\omega^1,{\bf n}\widetilde\omega^2 \rangle = \int_0^T\langle f,{\bf n}\widetilde\omega^2 \rangle=0, \] for all differential 1-forms $f \in C^\infty_0(]0,T[, \Omega^1\Gamma )$. Thus, we have \[
{\bf n}\widetilde\omega^2=0 \mbox{ on $\Gamma \times ]0,T[$}. \] Furthermore, it is easy to see that, for $T+t\in [T,2T]$, we have \[
\widetilde\omega(T+t)=(0,\widetilde\omega^1(T-t),
-\widetilde\omega^2(T-t),0), \] and, therefore, also \[
{\bf n}\widetilde\omega^2=0 \mbox{ on $\Gamma \times ]T,2T[$}. \] But this implies that, as a distribution, ${\bf n}\widetilde\omega^2$ vanishes on the whole interval $]0,2T[$
since it is in $L^2_{\rm loc}({\mathbb R},H^{-1/2}(\partial M))$. By applying the Theorem \ref{UCP}, we can deduce that $\widetilde\omega_t=0$ in the double cone $K(\Gamma,T)$. In particular, we have that $d\omega^1_0 = \widetilde{\omega}^2_t(T)=0$ in $M(\Gamma,T)$.
Let now $\nu^1=\delta\eta^2 \in\delta H^1_0(\Omega^2M(\Gamma,T))$. Then \[
(\nu^1,\omega^1_0)_{L^2}
= (\eta^2,d\omega^1_0)_{L^2}=0. \] This holds for arbitrary $\omega_0^1\in X(\Gamma,T)^\perp$, i.e., $\nu \in (X(\Gamma,T)^\perp)^\perp =X(\Gamma,T)$.
$\Box$
{\bf Remark 4.} Later in this work, we are mainly interested in controlling the time derivatives of
electromagnetic fields. Let us denote \[
\Cnull(\Gamma, T)= \{\int_0^t f(t')dt'\mid f\in C^\infty_0(]0,T[, \Omega^1\Gamma) \}. \] With this notation, we have \[
X(\Gamma, T) = {\rm cl}_{L^2}\{(\omega^f_t(T))^1\mid f\in\Cnull(\Gamma,T)\}. \] Indeed, if $\omega^1 =(\omega^f)^1\in X(\Gamma,T)$, then $(\omega^f)^1 = (\omega^F_t)^1$, where \[
F(t)=\int_0^t f(t')dt'. \] Conversely, the time derivative of a field $\omega^f$, $f\in \Cnull(\Gamma,T)$ satisfies the initial-boundary value problem with the boundary source $f_t\in C_0^\infty(]0,T[, \Omega^1\Gamma)$.
\subsection{Global controllability}
We start by introducing some notations. Let $\omega$ be the strong solution of the initial-boundary value problem \[
\omega_t+{\mathcal M}\omega =0,\quad \omega(0)=0, \] with the boundary value \[
{\bf t}\omega = (0,f,-\int_0^t df(t')dt'), \] where $f\in C^\infty_0(]0,T_0[, \Omega^1\Gamma)$, $T_0>0$ and $\Gamma\subset\partial M$ is an open subset.
For $T\geq T_0$, we define \beq \label{3.12.3}
Y(\Gamma,T) = \{\omega^f_t(T)\mid
f\in C^\infty_0(]0,T_0[, \Omega^1\Gamma)\}. \eeq
For $\Gamma=\partial M$ we denote $Y(T)=Y(\partial M,T)$. Our objective is to give a characterization of the set $Y(T)$ for $T_0$ large enough.
In the following, \beq \label{3.12.4} {\rm rad}(M)=\max_{x\in M} \tau(x,\p M). \eeq
We prove the following result.
\begin{theorem}\label{global control th} Assume that $T_0> 2{\rm rad}(M). $ Then, for $T\geq T_0$, ${\rm cl}_{{\bf L}^2(M)}Y(T)$ is independent of $T$, i.e. ${\rm cl}_{{\bf L}^2(M)}Y(T)=Y$, and, moreover, \beq \label{3.12.5} Y = \{0\}\times\delta H(\delta)\times
d\Hnull(d)\times\{0\}. \eeq \end{theorem}
\noindent {\bf Remark 5.} The result holds also for $Y$ replaced with $Y(\Gamma,T)$, when \ba T_0>2\max_{x\in M} \tau(x,\Gamma). \ea
{\em Proof:} Let $\omega=\omega^f$ be a solution. As $f=0$ for $T\geq T_0$, we have \[
{\bf t}\omega^1(T)=0, \] and, consequently, \begin{eqnarray*}
\omega_t(T) &=& -{\mathcal M}\omega(T)
= (0,\delta\omega^2(T),-d\omega^1(T),0) \\ \noalign{\vskip4pt} &\in&\{0\}\times\delta H(\delta)\times
d\Hnull (d)\times\{0\}. \end{eqnarray*} To prove the converse inclusion, we show that the space $Y(T)$ is dense in $\{0\}\times\delta H(\delta)\times
d\Hnull (d)\times\{0\}$. To this end, let $\omega_0 \in\{0\}\times\delta H(\delta)\times
d\Hnull (d)\times\{0\}$ and
$\omega_0\perp Y(T)$. This means that, for arbitrary $\omega=\omega^f$ satisfying the initial-boundary value problem (\ref{complete}), \begin{equation}\label{orthogonality} (\omega_0,\omega_t(T))_{{\bf L}^2} =(\omega_0^1,\omega_t^1(T))_{L^2} +(\omega_0^2,\omega_t^2(T))_{L^2} =0. \end{equation}
Let $\widetilde{\omega}$ denote the weak solution of the problem \ba & & \widetilde{\omega}_t + {\mathcal M}\widetilde{\omega} =0, \\ & &
{\bf t} \widetilde{\omega} =0,\quad \widetilde{\omega}(T)=\omega_0. \ea Observe that the initial value $\omega_0$ satisfies \[
\delta\omega_0^1 =0,\quad d\omega_0^2 =0, \] which implies that
$\widetilde{\omega}$ satisfies Maxwell's equations. Consider the function $F:{\mathbb R}\to{\mathbb R}$, \[
F(t) = (\widetilde{\omega}(t),\omega_t(t))_{{\bf L}^2}. \] We have, by using Maxwell's equations, that \begin{eqnarray*}
F_t(t)&=& (\widetilde{\omega},\omega_{tt})_{{\bf L}^2} + (\widetilde{\omega}_t, \omega_t)_{{\bf L}^2} \\ \noalign{\vskip4pt}
&=& -(\widetilde{\omega}^1,\delta d\omega^1)_{L^2} - (\widetilde{\omega}^2,
d\delta\omega^2)_{L^2} +(d\widetilde{\omega}^1,d\omega^1)_{L^2} + (\delta \widetilde{\omega}^2,\delta\omega^2)_{L^2}, \end{eqnarray*} and further, by using Stokes' theorem, \[
F_t(t) = -\langle{\bf t} \widetilde{\omega}^1(t),{\bf n} d\omega^1(t)\rangle
-\langle{\bf n} \widetilde{\omega}^2(t),{\bf t}\delta\omega^2(t)\rangle. \] However, ${{\bf t}}\widetilde \omega =0$ and $\delta \omega^2 =\omega^1_t$. Thus, \[
F_t(t) = -\langle{\bf n} \widetilde{\omega}^2,{\bf t}\omega^1_t\rangle
=-\langle{\bf n} \widetilde{\omega}^2,f_t\rangle. \] On the other hand, the initial condition $\omega(0)=0$, together with the orthogonality condition (\ref{orthogonality}), imply that $F(0)=F(T)=0$, so that \[
\int_0^T\langle{\bf n} \widetilde{\omega}^2,f_t\rangle dt = -\int_0^T
F_t(t)dt =0. \] Since
$\, f \in C^{\infty}_0(]0,T[, \Omega^1 \Gamma)$ is arbitrary, this implies
that \[
{\bf n} \widetilde{\omega}^2_t =0 \mbox{ in $\Gamma\times]0,T[$}. \] But now Theorem \ref{UCP} implies that $\widetilde{\omega}_{tt}$ vanishes in the double cone $K(\Gamma, T/2)$. By the assumption $T_0> 2{\rm rad}(M)$, this double cone contains a cylinder \[
C = M\times]T/2-s,T/2+s[ \] \begin{figure}
\caption{The double cone contains a slice $\{T/2\}\times M$ and the waves vanish near the slice $t=T/2$.}
\label{pic 3}
\end{figure} with some $s>0$ (See Figure \ref{pic 3}). Therefore, $\widetilde{\omega}_{tt}$ that satisfies Maxwell's equations and
homogeneous boundary condition ${\bf t} \widetilde{\omega}_{tt}=0$. Therefore, it vanishes in the whole $M\times{\mathbb R}$. In particular, this means that, with some time-independent forms $\omega_1$ and $\omega_2$, \[
\widetilde{\omega}(t)=\omega_1 + t\omega_2, \] with ${\bf t}\omega_1=0$, ${\bf t}\omega_2=0$. Again, by Maxwell's equations, we have \[
\omega_2 = \omega_t ={\mathcal M}\omega_1 +
t{\mathcal M}\omega_2, \] for all $t$. Therefore, \[
\omega_2 ={\mathcal M}\omega_1,\quad {\mathcal M}\omega_2 =0. \] But then, Stokes' theorem implies that \[
(\omega_2,\omega_2)_{{\bf L}^2} = (\omega_2,{\mathcal M}\omega_1)_{{\bf L}^2}
= -({\mathcal M}\omega_2,\omega_1)_{{\bf L}^2}=0, \] i.e., $\omega_2=0$ and ${\mathcal M}\omega_1=0$. Observe that, by the assumption of the Theorem, \[
\omega_1 = \widetilde{\omega}(T)=\omega_0 = (0,-\delta\nu^2, d\nu^1,0)= {\mathcal M}\nu, \] for some $\nu\in \{0\}\times \Hnull (d)\times H(\delta) \times\{0\}$. Therefore, a further application of Stokes theorem gives \[ (\omega_1,\omega_1)_{{\bf L}^2} = (\omega_1,{\mathcal M}\nu)_{{\bf L}^2}
= -({\mathcal M}\omega_1,\nu)_{{\bf L}^2}=0, \] i.e., also $\omega_1=\omega_0=0$. The proof is therefore complete.
$\Box$
\subsection{Generalized sources}\label{generalized sources section}
So far, we have treated only smooth boundary sources and the corresponding fields. For later use, we need more general boundary sources.
Let $Y={\rm cl}_{{\bf L}^2(M)}Y(\partial M,T)$ be the space of the time derivatives of electromagnetic fields satisfying Maxwell's equations, see (\ref{3.12.3}), (\ref{3.12.5}). We define the wave operator \[
W^T:C^\infty_0(]0,T[, \Omega^1\partial M )\to Y, \quad f\mapsto \omega_t^f(T), \] where $T\geq T_0$ and $T_0>2\,{\rm rad}\,(M)$. By means of the wave operator, we define the ${\mathcal F}-$norm on the space of boundary sources as \begin{equation}\label{f norm}
\|f\|_{{\mathcal F}} = \|W^T f\|_{{\bf L}^2}. \end{equation} The definition of this norm is independent of the choice of $T\geq T_0$ by conservation of energy.
Notice that by Theorem \ref{blacho}, the knowledge of the admittance map ${\mathcal Z}^{2T}$ enables us to calculate explicitly the ${\mathcal F}$--norm of any smooth boundary source.
To complete the space of boundary sources, let us define the equivalence $\sim$ of sources by setting \[
f\sim g\mbox{ iff $W^Tf=W^Tg$}. \] Further, we define the space ${\mathcal F}([0,T_0])$ as \[
{\mathcal F}([0,T_0])= C^\infty_0(]0,T[, \Omega^1\partial M )/\sim. \] Finally, we complete ${\mathcal F}([0,T_0])$ with respect to the norm (\ref{f norm}). Hence, this space, denoted by ${\overline{\mathcal F}}([0,T_0])$ consists of Cauchy sequences with respect to the norm (\ref{f norm}), denoted as \[
\widehat f = (f_j)_{j=0}^\infty,\quad f_j\in C^\infty_0(]0,T[, \Omega^1\partial M ). \] Note that, for any $\widehat f\in \overline {\mathcal F}$, we can find $\widehat h\in \overline {\mathcal F}$ such that
$\widehat h=\widehat f$ and $\widehat h=(h_j)_{j=1}^\infty$,
$h_j\in C^\infty_0(]\e,T[, \Omega^1\partial M )$ for some $\e>0$. The reason for this is that Theorem \ref{global control th} is valid also with $T_0$ replaced with $T_0-\e$, when $\e$ is small enough. Thus, for small $\e>0$, we can define, for any $\widehat f= (f_j)_{j=0}^\infty\in \overline {\mathcal F}$, the translation
$\widehat f(\cdotp+\e)=(f_j(\cdotp+\e))_{j=0}^\infty\in \overline {\mathcal F}$.
These sources are called {\em generalized sources} in the sequel. The corresponding electromagnetic waves are denoted as \beq \label{gener}
\omega_t^{\widehat f}(t) = \lim_{j\to \infty}\omega_t^{f_j}(t)\quad \hbox{for }t\geq T_0. \eeq By the isometry of the wave operator, the above limit exists in ${\bf L}^2$ for all generalized sources.
We note that the above construction of the space of generalized sources in well-known in PDE-control, e.g. \cite{Ru}, \cite{LTr}.
{\bf Remark 6.} Observe that since the wave operator $W^T$ is an isometry and ${\overline{\mathcal F}}([0,T_0])$ was defined by closing $C^\infty_0(]0,T[, \Omega^1\partial M )$ with respect to the norm (\ref{f norm}), the wave operator extends to a one-to-one isometry \[
\widehat f\mapsto \omega_t^{\widehat f}(T),\quad {\overline {\mathcal F}}([0,T_0])\to {\rm cl}_{{\bf L}^2}(Y(\p M,T)), \] where the target space is completely characterized in the previous section.
We say that ${\hat h}\in \overline {\mathcal F}$ is a generalized time derivative of ${\hat f}\in \overline {\mathcal F}$, if for $T=T_0$, \beq\label{gen derivative} & &\lim_{\sigma\to 0+}
||\frac{{\hat f}(\cdotp+\sigma)-{\hat f}(\cdotp)}\sigma-{\hat h}||_{\overline {\mathcal F}}= \\ \nonumber &=&\lim_{\sigma\to 0+}
||\frac{\omega^{\hat f}_t(T+\sigma)-\omega^{\hat f}_t(T)}\sigma-\omega^{\hat h}_t(T)||_ {{\bf L}^2(M)}=0 \eeq In this case we denote ${\hat h}={\mathbb D} {\hat f}$, or just ${\hat h}=\p_t{\hat f}$. In the following, we use spaces $ {\mathcal F}^s={\mathcal D}({\mathbb D}^s),\ s \in \Bbb{Z}_+$, which are spaces of generalized sources that have $s$ generalized derivatives. Note that, if (\ref{gen derivative}) is valid for $T=T_0$, it is valid for all $T \geq T_0$ due to the conservation of $L^2$-norm for Maxwell's equation (energy conservation). Thus, if ${\hat f}\in {\mathcal F}^s$, we have, for $T\geq T_0$, \beq\label{note 2}
{\mathcal M}^s \omega^{\hat f}_t(T)=\p_t^s \omega^{\hat f}_t(t)|_{t=T}\in {\bf L}^2(M). \eeq Note that ${\mathcal M}$ here is the differential expression given by (\ref{M}), rather than an operator with some boundary conditions. Since ${\bf t} \omega^{\hat f}_t=0$ on $\p M\times ]T_0,\infty[$, we see that ${\bf t} (\p_t^j\omega^{\hat f}_t)(t)=0$ for $t>T_0$ and $j\leq s-1$. Thus, for ${\hat f}\in {\mathcal F}^s$ and $T\geq T_0$, \beq\label{smoothness of gen. wave} \omega^{\hat f}_t\in \bigcap_{j=0}^s (C^{s-j}([T,\infty[,{\mathcal D}({\mathcal M}_e^j)) \cap \hbox{Ran}\,({\mathcal M}_e)). \eeq Moreover, by (\ref{smoothness of gen. wave}) and Lemma \ref{lem 2.4}, \ba \omega^{\hat f}_t(T)\in {\bf H}^s_{loc}(M^{int})\quad\hbox{for }T\geq T_0. \ea
Next we consider dual spaces to the domains of powers of ${\mathcal M}_e$. Since ${\bf H}_0^s\subset {\mathcal D}({\mathcal M}_{\rm e}^s)$, we have $({\mathcal D}({\mathcal M}_{\rm e}^s))'\subset {\bf H}^{-s}$. Similarly, we see that
$ {\bf H}^{-s}_0\subset ({\mathcal D}({\mathcal M}_{\rm e}^s))'$. These facts will be needed later in the construction of focusing sources.
\subsection{Reconstruction of the manifold}
In this section we will show how to
determine the manifold, $M$ and the travel time metric, $g$ from the boundary measurements of the admittance map ${\mathcal Z}$. We will show that the boundary data determines the set of {\em boundary distance functions}. The basic idea is to use a slicing principle, when we control the supports of the waves generated by boundary sources.
We start by fixing certain notations. Let times $T_0<T_1<T_2$ satisfy \[
T_0>2\,{\rm rad}(M),\quad T_1\geq T_0+{\rm diam}(M), \quad T_2\geq 2 \,T_1 \] We assume in this section that the admittance map ${\mathcal Z}^{T_2}$ is known.
\begin{figure}
\caption{The sources $\widehat f$ of the waves $\omega^{\hat f}(x,t)$ are supported on the time-interval $[0,T_0]$ which enables us to control the waves at times $t>T_0$. In the construction of the manifold, the supports of the waves are considered at time $t=T_1$. To this end, we use the unique continuation in double-cones (triangle in the figure) intersecting the boundary in the layer $\p M\times [T_0,T_1]$. Note that in this layer it is crucial that $\widehat f=0$. }
\label{pic 4}
\end{figure}
Let $\Gamma_j\subset\partial M$ be open disjoint sets, $1\leq j\leq J$ and $\tau_j^-$ and $\tau_j^+$ be positive times with \[
0<\tau_j^-<\tau_j^+\leq {\rm diam}(M),
\quad 1\leq j\leq J. \] We define the set $S = S(\{\Gamma_j,\tau_j^-, \tau_j^+\})\subset M$ as an intersection of slices, \begin{equation}\label{def of S}
S =\bigcap_{j=1}^J \left( M(\Gamma_j,\tau_j^+)\setminus M(\Gamma_j,\tau_j^-) \right). \end{equation} Our first goal is to find out, by boundary measurements, whether the set $S$ contains an open ball or not. To this end, we give the following definition.
\begin{definition}\label{support sources} The set $Z = Z(\{\Gamma_j,\tau_j^-, \tau_j^+\})_{j=1}^J$ consists of those generalized sources $\widehat f\in {\mathcal F}^\infty([0,T_0])$ that produce waves $\omega_t=\omega^{{\hat f}}_t$
with \begin{enumerate} \item $\omega_t^1(T_1)\in X(\Gamma_j,\tau_j^+)$, for all $j$, $1\leq j\leq J$, \item $\omega_t^2(T_1) = 0$, \item $\omega_{tt}(T_1)=0$ in $M(\Gamma_j,\tau_j^-)$, for all $j$, $1\leq j\leq J$. \end{enumerate} \end{definition}
{\bf Remark 7.} Observe that, since $\omega_t$ satisfies Maxwell's equations, we have, in particular, \[
\omega_{tt}^2 = -d\omega_t^1,\quad
\omega_{tt}^1 =\delta\omega_t^2. \] These identities imply that, at $t=T_1$, $\omega_{tt}$ is of the form \[
\omega_{tt}(T_1)= (0,0,\omega_{tt}^2(T_1),0)
=(0,0,d\eta^1,0), \] for the 1--form $\eta^1=-\omega_t^1$, and \[
{\rm supp}(d \eta^1)\subset S. \] This observation is crucial later when we will discuss focusing waves.
The central tool for reconstruction the manifold is the following theorem.
\begin{figure}
\caption{In Definition \ref{support sources} we can consider e.g. the case $\Gamma_1=\Gamma$, $\tau^+_1=s_1$, $\tau^-_1=0$, and $\Gamma_2=\p M$, $\tau^+_2=\diam(M)$, $\tau^-_1=s_2$. Then the waves that satisfy the definition have the following properties: By 1., the wave $(\omega^{\hat f}_t)^1(T_1)$ coincides with a wave that is supported in $M(\Gamma,s_1)$. This domain of influence on the figure is the upper part of the cone of influence. Thus, $d(\omega^{\hat f}_t)^1(T_1)=(\omega^{\hat f}_{tt})^2(T_1)$
is supported in $M(\Gamma,s_1)$. By 2., the wave $(\omega^{\hat f}_{tt})^2(T_1)$ vanish in the boundary layer $M(\p M,s_2)$. Combining these, we see that $\omega_{tt}^{\widehat f}(T_1)$ is supported in $A=M(\Gamma,s_1)\setminus M(\p M,s_2)$. }
\label{pic 5}
\end{figure}
\begin{theorem}\label{alternative} Let $S$ and $Z$ be defined as above. The following alternative holds: \begin{enumerate} \item If $S$ contains an open ball, then ${\rm dim}(Z)=\infty$, \item If $S$ does not hold an open ball, then $Z = \{0\}$. \end{enumerate} \end{theorem}
In order to prove the above alternative, we need the following observability result that will be also useful
later.
\begin{theorem}\label{is in Z th} Given the boundary map ${\mathcal Z}^{T_2}$, we can determine whether a given boundary source $\widehat f \in{\mathcal F}^\infty([0,T_0])$ is in the set $Z$ or not. \end{theorem}
{\em Proof:} Let $\widehat f=(f_k)_{k=0}^\infty \in {\mathcal F}^\infty([0,T_0])$ be a generalized source. Consider first the question whether $(\omega_t^{\widehat f})^1(T_1) \in X(\Gamma_j,\tau_j^+)$. By Remark 4, this is equivalent to the existence of a sequence, \[
\widehat h=(h_\ell)_{\ell=0}^\infty,\quad h_\ell \in\Cnull(\Gamma_j, \tau_j^+), \] such that \beq \label{20.11.1}
\lim_{k,\ell \to \infty} \| (\omega_t^{f_k})^1(T_1)
-(\omega_t^{h_\ell})^1(\tau_j^+)\| =0. \eeq By linearity of the initial-boundary value problem, we have \[
\| (\omega_t^{f_k})^1(T_1)
-(\omega_t^{h_\ell})^1(\tau_j^+)\| =\|(\omega^{g_{k,\ell}})^1(T_1)\|, \] where the source $g_{k,\ell}$ is \[
g_{k,\ell}(t) = (f_k)_t(t) - (h_\ell)_t(t+\tau_j^+ - T_1)\in C_0^\infty ( ]0,T_1[;\Omega^1 \p M). \] However,
by Lemma \ref{blacho}, $\|(\omega^{g_{k,\ell}})^1(T_1)\|$ is completely
determined by the admittance map, ${\mathcal Z}^{T_2}$ making possible to verify (\ref{20.11.1}).
In a similar fashion, Condition 2 of the definition of $Z$ is valid for $\widehat f$, if \[
\lim_{k\to \infty}\|(\omega_t^{f_k}(T_1))^2\|= 0, \] and this condition can also be verified via the admittance map, ${\mathcal Z}^{T_2}$.
Finally, consider Condition 3. We assume here that we already know that $\widehat f$ satisfies Conditions 1--2. First, we observe that $\omega_{tt}=\omega_{tt}^{\widehat f}$ satisfies \[
(\partial_t+{\mathcal M})\omega_{tt}=0 \mbox{ in $M\times{\mathbb R}_+$}, \] along with the boundary condition \[
{\bf t}\omega_{tt} =0 \mbox{ in $\partial M\times [T_0,\infty[$}. \] If Condition 3 holds, by the finite propagation speed, $\omega_{tt}$ vanishes in a double cone around $\Gamma_j$, i.e., \[
\omega_{tt}=0\mbox{ in $K_j=\{(x,t)\in M\times{\mathbb R}_+\mid \tau(x,\Gamma_j)
+|t-T_1|<\tau_j^-\}$}, \] for all $j=1,\ldots,J$. In particular, this means that, in each $K_j$, $\omega_t$ does not depend on time, and Condition 2 implies that $\omega_t^2=0$ in $K_j$. Hence, we have \begin{equation}\label{cond 3}
{\bf n}\omega_t^2= {\mathcal Z^{T_2}f}=0\mbox{ on $\Gamma_j\times
]T_1-\tau_j^-,T_1+\tau_j^-[$}. \end{equation} Conversely, assume that condition (\ref{cond 3}) holds together with Conditions 1--2. Then $\omega_t$ satisfies \[
(\partial_t+{\mathcal M})\omega_t =0\mbox{ in $M\times{\mathbb R}_+$} \] with the boundary conditions \[
{\bf t}\omega_t^1=0,\quad {\bf n}\omega_t^2=0\mbox{ in
$\Gamma_j\times]T_1-\tau_j^-,T_1+\tau_j^-[$}. \] Here we used the fact that $T_1-\tau_j^->T_0$, so that $\widehat{f}=0$ in $\Gamma_j\times]T_1-\tau_j^-,T_1+\tau_j^-[$. Now the Unique Continuation Principle, given by Theorem \ref{UCP}, implies that $\omega_{tt}=0$ in $K_j$ and, in particular, Condition 3 is valid. The proof is complete as it is clear that the condition (\ref{cond 3}) is readily observable if the admittance map, ${\mathcal Z}^{T_2}, \, T_2 > T_1 + \tau _j,$
is known.
$\Box$
Now we can give the proof of Theorem \ref{alternative}.
{\em Proof of Theorem \ref{alternative}:} Assume that there is an open ball $B\subset S$. Let $0\neq\varphi\in \Omega^2 B$ be an arbitrary smooth 2--form with
${\rm supp}\,(\varphi)\subset B$. From the global controllability result, Theorem \ref{global control th}, it follows the existence of a generalized source $\widehat f \in \overline{{\mathcal F}([0,T_0])}$ such that \beq \label{20.11.2}
\omega^{\widehat f}_t(T_1)=(0,\delta\varphi,0,0). \eeq Moreover, $\varphi \in \Omega^2B$ implies that $\varphi \in {\mathcal D}({\mathcal M}_e^s)$ for any $s>0$ so that $\widehat f \in {\mathcal F}^{\infty}([0,T_0])$.
We will now show that $\widehat f\in Z$. Indeed, Conditions 1--2 are obvious from the definition (\ref{20.11.2}) of $\widehat{f}$.
Finally, we observe that \[
\omega^{\widehat f}_{tt}(T_1) = -{\mathcal M} \omega_t^{\widehat f}(T_1) = (0,0,-d\delta\varphi,0), \] so Condition 3 is also satisfied. This proves the first statement of the theorem.
To prove the second part, assume that $S$ does not contain an open ball. Suppose, on the contrary to the claim, that there is a non-vanishing source $\widehat f\in Z$ which produces the wave $\omega(t) =\omega^{\widehat f}(t)$. Then, by Conditions 1 and 2 in Definition \ref{support sources}, \[
{\rm supp}(\omega_t(T_1))\subset \bigcap_{j=1}^J M(\Gamma_j,\tau_j^+) = S^+. \] Furthermore, \[
\omega_{tt}(T_1)=-{\mathcal M}\omega_t(T_1), \] so that
\[
{\rm supp}(\omega_{tt}(T_1))\subset S^+. \] On the other hand, Condition 3 in Definition \ref{support sources} imply that \[
\omega_{tt}(T_1)=0\mbox{ in $\bigcup_{j=1}^J M(\Gamma_j,\tau_j^-)=S^-$.} \] Thus ${\rm supp}\omega_{tt}(T_1) \subset S^+\setminus S^-$. However, if the set $S$ does not contain an open ball, then the set $S^+\setminus S^-$ in nowhere dense. Since $\omega_{tt}(T_1)$ is smooth in $M^{{\rm int}}$, it
vanishes in $M$. In particular, Maxwell's equations imply that \beq\label{refe +}
d\omega_t^1(T_1)= -\omega_{tt}^2(T_1) =0. \eeq On the other hand, $\omega_t\in {\rm cl}_{{\bf L}^2}(Y(\partial M,T_1))$, so Theorem \ref{global control th} implies that $\omega_t^1(T_1)$ is of the form \[
\omega_t^1(T_1) = \delta\eta^2 , \] for some 2--form $\eta$. Since, in addition, ${\bf t}\omega_t^1(T_1)=0$ it then follows, by
Stokes formula, that \[
(\omega_t^1(T_1),\omega_t^1(T_1))_{L^2} = (\delta\eta^2,\omega_t^1(T_1))_{L^2}
=(\eta^2,d\omega^1_t(T_1))_{L^2} = 0. \] Together with Condition 2, this implies \[
\omega_t(T_1) =0, \] contradicting to the assumption
$\widehat f \neq 0$. The proof is complete.
$\Box$
We are now ready to construct the set of the boundary distance functions. For each $x\in M$, the corresponding boundary distance function, $r_x$ is a continuous function on $\p M$ given by \[
r_x: \p M\to{\mathbb R}_+,\quad r_x(z)=\tau(x,z), \quad z \in \partial M. \] They define {\it the
boundary distance map} ${\mathcal R}:M\to C(\p M)$, ${\mathcal R}(x)=r_x$, which is continuous and injective (see \cite {Ku5}, \cite{KKL}). We shall denote the set of all boundary distance functions, i.e., the image of ${\mathcal R}$, by \[
{\mathcal R}(M)=\{r_x\in C(\partial M)\mid x\in M\}. \] It can be shown (see \cite{Ku5}, {\cite{KKL})
that, given
${\mathcal R}(M) \subset L^\infty(\partial M)$ we can endow it, in a natural way,
with a differentiable structure and a metric tensor $\widetilde g$, so that $({\mathcal R}(M),\widetilde g)$ becomes an isometric copy of $(M,g)$, \[
({\mathcal R}(M),\widetilde g)\cong (M,g). \] Hence, in order to reconstruct the manifold (or more precisely, the isometry type of the manifold),
it suffices to determine the set, ${\mathcal R}(M),$ of the boundary distance functions. The following result is therefore crucial.
\begin{theorem} Let the admittance map
${\mathcal Z}^{T_2}$ be given. Then, for any $h\in C(\partial M)$, we can find out whether $h\in{\mathcal R}(M)$. \end{theorem}
{\em Proof:} The proof is based on a discrete approximation process. First, we observe that the condition $h\in {\mathcal R}(M)$ is equivalent to the condition that for any sampling $z_1,\ldots,z_J\in\partial M$ of the boundary, there must be $x\in M$ such that \[
h(z_j)= \tau(x,z_j),\quad 1\leq j\leq J. \] Let us denote $\tau_j = h(z_j)$. By the continuity of the distance function, $\tau(x,z)$ in $x \in M,\,z \in \p M$, we deduce that the above condition is equivalent to the following one:
\noindent For any $\varepsilon>0$, the points $z_j$ have neighborhoods $\Gamma_j\subset \partial M$ with ${\rm diam}(\Gamma_j)<\varepsilon$, such that \begin{equation}\label{int condition} {\rm int}\bigg(\bigcap_{j=1}^J M(\Gamma_j,\tau_j+\varepsilon) \setminus M(\Gamma_j,\tau_j-\varepsilon)\bigg)\neq \emptyset. \end{equation} On the other hand, by Theorem \ref{alternative}, condition (\ref{int condition}) is equivalent to \[
{\rm dim}\bigg( Z(\{\Gamma_j,\tau_j+\varepsilon, \tau_j-\varepsilon\}\bigg)=\infty, \] that, by means of Theorem \ref{is in Z th},
can be verified via boundary data.
$\Box$
As a consequence, we obtain the main result of this section.
\begin{corollary} The knowledge of the admittance ${\mathcal Z}^{T_2}$ is sufficient for the reconstruction of the manifold, $M$ endowed with the travel time metric, $g$. \end{corollary}
Having the manifold reconstructed, the rest of this article is devoted to the reconstruction of the wave impedance, $\alpha$.
\subsection{Focusing sources}
In the previous section it was shown
that, using boundary data, one can control supports
of the 2--forms $(\omega_{tt}^{\widehat{f}})^2(t)$. In this section, the goal is to construct a sequence of sources, $(\widehat{f}_p), \, p=1,2,\cdots$ such that, when $p \to \infty$, the corresponding forms $(\omega_{tt}^{\widehat{f}_p})^2(T_1)$
become
supported at a single point, while $(\omega_{tt}^{\widehat{f}_p})^1(T_1)=0$. For $t \geq T_1$, these fields behave like point sources, a fact that turns out to be useful for reconstructing the wave impedance.
In the following, let ${\underline \delta}_y$ denote the Dirac delta at $y\in M^{{\rm int}}$, i.e., \[
\int_M{\underline \delta}_y(x)\phi(x)dV_g(x)=\phi(y), \] where $\phi\in C^\infty_0(M)$ and $dV_g$ is volume form of $(M,g)$.
Since the Riemannian manifold $(M,g)$ is already found, we can choose $\Gamma_{jp}\subset\partial M$, $\,0<\tau_{jp}^-<\tau_{jp}^+<{\rm diam}(M)$, so that \beq \label{20.11.3}
S_{p+1}\subset S_p,\quad \bigcap_{p=1}^\infty S_p
= \{y\},\; y\in M^{int}. \eeq Then,
$Z_p=Z(\{\Gamma_{jp},\tau_{jp}^-, \tau_{jp}^+\}_{j=1}^{J(p)}\})$ is the corresponding set of generalized sources defined in Definition \ref{support sources}.
\begin{definition} Let $S_p, \, p=1,2,\cdots,$ satisfy (\ref{20.11.3}).
We call the sequence $\widetilde f =(\widehat f_p), \, p=1,2,\cdots$
with $\widehat f_p\in Z_p$, a {\em focusing sequence}
of generalized sources of order $s$ (for brevity, focusing sources), $ s \in \Bbb{Z}_+$, if there is a distribution-form $A=A_y$ on $M$ such that \[
\lim_{p\to \infty} (\omega^{\partial_t\widehat f_p}_t(T_1) ,\eta)_{{\bf L}^2} = (A_y,\eta)_{{\bf L}^2}, \] for all $\eta\in {\mathcal D}({\mathcal M}_{\rm e}^s)$. \end{definition}
\noindent {\bf Remark 8.} Observe that, by the identity, \begin{equation}\label{omega tt}
\omega^{\partial_t \widehat f_p}_t = \omega_{tt}^{\widehat f_p} \end{equation} and Remark 7, the electromagnetic wave $\omega^{\partial_t \widehat f_p}_t(T_1)$ is supported in ${\rm cl}(S_p)$, so $A_y$ must be supported on $\{y\}$.
We will show the following result.
\begin{lemma}
Let the admittance map ${\mathcal Z}^{T_2}$ be given. Then, for any $s \in \Bbb{Z}_+$ and any sequence of generalized sources, $(\widehat f_p), \, p=1,2\cdots$, one can determine if $(\widehat f_p)$ is a focusing sequence or not. \end{lemma}
{\em Proof:} Let $\eta\in{\mathcal D}({\mathcal M} _{\rm e}^s)$. We decompose $\eta$ as $
\eta=\eta_1+\eta_2, $ where \[
\eta_1\in {\mathcal D}({\mathcal M} _{\rm e}^s)\cap{\rm cl}(Y),\quad \eta_2\in {\mathcal D}({\mathcal M} _{\rm e}^s)\cap Y^\perp. \] By the global controllability result, Theorem \ref{global control th}, and isometry of the wave operator $W^T$, $T\geq T_0$, \[
\eta_1 = \omega_t^{\widehat h},\quad
\widehat h\in {\mathcal F}^s([0,T_0]). \] Since $\omega_t^{\partial_t \widehat f_p}\in {\rm cl}_{{\bf L}^2}(Y)$, so that $\omega_t^{\partial_t \widehat f_p} \perp \eta _2$, the condition that $\widetilde f$ is a focusing source is tantamount to the existence of the limit \begin{equation}\label{limit}
(A_y,\eta)=\lim_{p\to \infty} (\omega^{\partial_t \widehat f_p}_t(T_1),\omega_t^{\widehat h})_{L^2}, \end{equation} for all $\widehat h\in{\mathcal F}^s([0,T_0])$. However, by Theorem \ref{blacho}, the existence of this limit can be verified if we are given ${\mathcal Z}^{T_2}$.
Conversely, assume that the limit (\ref{limit}) does exist for all $\widehat h\in{\mathcal F}^s([0,T_0])$. Then, by the Principle of Uniform Boundedness, the mappings \[
\eta\mapsto(\omega^{\partial_t \widehat f_p}_t(T_1) ,\eta)_{L^2}, \quad p \in \Bbb{Z}_+, \] form a uniformly bounded family in the dual of ${\mathcal D}({\mathcal M}_{\rm e}^s)$. By the Banach-Alaoglu theorem, we find a weak$^*$-convergent subsequence \[
\omega^{\partial_t \widehat f_p}_t(T_1)\to
A_y\in \bigg({\mathcal D}({\mathcal M}_{\rm e}^s) \bigg)', \] which is the sought after limit distribution-form.
$\Box$
Since ${\rm supp}(A_y)$ is a point, $A_y$ consists of the Dirac delta and its derivatives. The role of the smoothness index, $s$ is just to select the order of this distribution, as is seen in the following result.
\begin{lemma} \label{Lm2.15} Let $ A_y = \lim_{p\to \infty} \omega^{\partial_t \widehat f_p}_t(T_1)$ is a distribution of order $s=3$. Then $A_y$ is of the form \begin{equation}\label{Ay}
A_y(x)= (0,0,d(\lambda{\underline \delta}_y(x)),0), \end{equation} where $\lambda$ is a 1--form at $y$, $\, \lambda \in T_y^*M$. Furthermore, for any $ \lambda \in T_y^*M$ there is a focusing source $\widetilde{f}=$ with $A_y$ of form (\ref{Ay}). \end{lemma}
{\em Proof:} From the results in Section \ref{generalized sources section},
we deduce that, when $s=3$, \[
A_y\in\left({\mathcal D}({\mathcal M}_{\rm e}^3) \right)'\subset {\bf H}^{-3}. \] Furthermore, from Remark 7, the electromagnetic waves
(\ref{omega tt}) are of the form \beq\label{refe A} \omega^{\partial_t\widehat f_p}_t(T_1) =(0,0,d\eta_p,0), \eeq for some 1-forms $\eta_p$. Combining ( these with the fact that ${\rm supp}(A_y) =\{y\}$, we see that $
A_y = (0,0,A^2_y,0). $ Here $A^2_y$, expressed, for example, in Riemann normal coordinates $(x^1,x^2,x^3)$ near $y$, must be of the form \[
A^2_y(x)=a^j{\underline \delta}_y(x) \theta_j + b^{jk} \partial_k{\underline \delta}_y(x)
\theta_j, \] where $\theta_j = (1/2)e_{j k\ell}dx^k\wedge dx^\ell$
and $e_{j k\ell}$ is the totally antisymmetric permutation symbol. Furthermore, by (\ref{refe A}), \[
dA^2_y =
(a^j\partial_j{\underline \delta}_y(x) + b^{jk}\partial_k\partial_j
{\underline \delta}_y(x))dV_g=0. \] Let $\varphi$ be a compactly supported test function and, in the vicinity $U$ of $y$, \[
\varphi(x) = x^j,\quad j=1,2,3. \] It follows that \[ 0= (dA^2_y,\varphi)= a^j. \] Further, let $\psi$ be a compactly supported test function and, in the vicinity $U$ of $y$, \[
\psi(x) = x^jx^k,\quad j,k=1,2,3. \] As before, we obtain \[ 0=(dA^2_y,\psi)= b^{jk}+b^{kj}. \] Thus, $b^{jk}$ may be represented as $b^{jk} =e^{jk\ell}\lambda_\ell, \, \lambda_\ell \in T^*_yM$, implying that \begin{equation}\label{a2}
A^2_y(x) = e^{jk\ell}\lambda_\ell\partial_k{\underline \delta}_y(x) \theta_j. \end{equation} By the properties of the permutation symbols, $e^{jk\ell}$, \[
e^{jk\ell}\theta_j = \frac 12 e^{jk\ell}e_{jpq}dx^p
\wedge dx^q = \delta^k_p\delta^\ell_q dx^p
\wedge dx^q. \] Substituting this expression back to (\ref{a2}), we finally obtain \[
A^2_y(x) = \lambda_\ell\partial_k{\underline \delta}_y(x)
dx^k\wedge dx^\ell
= d({\underline \delta}_y(x)\lambda_\ell dx^\ell), \] as claimed.
$\Box$
By the above results, for any $y\in M^{int}$ and $\lambda \in T^*_yM$, we can, in principle, find focusing sequences $\widetilde{f}$ such that $\omega_{tt}^{\widetilde{f}}(T_1) =A_y$, where $A_y$ is of form (\ref{Ay}). We should, however, stress that, at this stage, we can not control the corresponding $\lambda=\lambda (y)$.
Consider now a family of focusing sources
$\widetilde{f}_y, \, y \in M^{{\rm int}}$, with the corresponding $1-$forms $\lambda (y)$.
\begin{lemma} \label{lem: 9.3}
Given the admittance map ${\mathcal Z}^{T_2}$, it is possible to determine whether the map $y\mapsto\lambda_y$ is a nowhere vanishing 1-form valued $C^\infty$--function. \end{lemma}
{\bf Proof:} Let $\varphi \in \Omega^1M^{{\rm int}}$ be an arbitrary compactly supported test $1-$form. By Theorem \ref{global control th}, there is a generalized source $\widehat h\in{\mathcal F}^\infty$ such that \[
(\omega_t^{\widehat h})^1(T_1) = \varphi. \] Let $\widetilde f = (\widehat f_p), p=1,2,\cdots,$ be a focusing source of order $s=3$. Then, by Lemma \ref{Lm2.15} and the definition of the focusing sources, we have \begin{eqnarray} \lim_{p\to \infty}(\omega_{tt}^{\widehat{ f}_p}(T_1),
\omega^{\widehat h}(T_1)) &=& (A_y,\omega^{\widehat h}
(T_1)) \\ \noalign{\vskip4pt} = (d(\lambda{\underline \delta}_y),(\omega^{\widehat h})^2(T_1)) &=& \int_M \lambda_y {\underline \delta}_y\wedge*\delta(\omega^{\widehat h})^2(T_1). \nonumber \end{eqnarray} Further, by Maxwell's equations, \begin{eqnarray*}
\lambda_y\wedge*\delta(\omega^{\widehat h})^2(T_1)
&=&\lambda_y\wedge*(\omega^{\widehat h}_t)^1(T_1)\\ \noalign{\vskip4pt}
&=&\lambda_y\wedge *\varphi(y),
\end{eqnarray*} Here, for $\lambda, \, \eta \in T^*_yM$, \[ \lambda\wedge *\eta = \langle \lambda, \eta \rangle _y \, dV_g = g^{jk} \lambda _j \eta _k \, dV_g. \] Thus, \beq \label{test focus} \lim_{p\to \infty}(\omega_{tt}^{\widehat{ f}_p}(T_1),
\omega^{\widehat h}(T_1)) = \langle \lambda,\, \varphi(y) \rangle _y . \eeq By Theorem \ref{blacho}, the inner products on the left side of equation (\ref{test focus}) are obtainable from the boundary data. Thus, we can find the map $y \to \langle \lambda, \varphi(y) \rangle _y, \, y \in M^{{\rm int}}$. Since $\varphi \in \Omega^1M^{{\rm int}}$ is arbitrary, this determines whether $\lambda \in \Omega^1 M^{{\rm int}}$. It also
determines whether $\lambda_y =0$ or not for any $y \in M^{{\rm int}}$. This yields the claim.
$\Box$
Another way to look at Lemma \ref{lem: 9.3} is that the admittance map, ${\mathcal Z}^{T_2}$
determines, for any boundary source $h \in C_0^{\infty}(]0,T_0[; \Omega^1 \p M)$, the values, at any $y \in M^{{\rm int}}$, of $\langle \lambda, (\omega ^h_{tt})^1(t)\rangle _y$ for some unknown $\lambda \in \Omega^1 M^{{\rm int}}$ and $T_1 <t<T_2- {\rm diam}(M)$. Moreover, using this map, we can verify that the $1-$forms $\lambda_k (y)$, corresponding to three families of focusing sources $\widetilde{f}_k(y), \, k=1,2,3$, are linearly independent at any $y \in M^{{\rm int}}$. These give rise to the following result.
\begin{lemma}\label{gauge2}
Let ${\mathcal Z}^{T_2}$ be the admittance map of the Riemannian manifold with impedance $(M,g,\alpha)$. Then, for $T_1\leq t \leq T_2 -{\rm diam}(M)$ and ${\hat h}\in \overline {{\mathcal F} (]0,T_0[)},$ it is possible to find the forms \ba L(y)(\omega^{\hat h}_t(y,t))^1,\quad K(y)(\omega^{\hat h}_t(y,t))^2, \ea at any $ y\in M^{{\rm int}}$. Here $L(y): T_y^*M\to T_y^*M$ and
$K(y): \wedge^2 T_y^*M\to \wedge^2T_y^*M$ are smooth sections of ${\rm End}(T^*M^{{\rm int}})$ and ${\rm End}(\Lambda^2T^*M^{{\rm int}})$, correspondingly. \end{lemma}
We emphasize that, at this stage, $L(y)$ and $K(y)$ are unknown. However, they are independent of $t$ or ${\hat h}$.
{\em Proof:} As $M$ is already found, we can choose three
differential $1-$forms, $\xi_k \in \Omega^1M^{{\rm int}}$ which, at any $y \in M^{{\rm int}}$, form a basis in $T^*_yM$. Using the families $\widetilde{f}_k(y)$ of focusing sources introduced earlier, we can construct, for any ${\hat h} \in {\mathcal F}^\infty(]0,T_0[)$, the differential $1-$ form, \beq \label{25.11.1} \rho^{\widehat{h}}(y,t) := \langle \lambda_k(y), \,
(\omega_t^{\widehat{h}}(y,t))^1 \rangle_y \, \xi_k(y). \eeq This defines a smooth section, $L(y)$ of ${\rm End}(T^*M^{{\rm int}})$, \ba L(y)(\omega^{\hat h}_t(y,t))^1 = \rho^{\widehat{h}}(y,t) \in \Omega^1M^{{\rm int}}, \ea proving the assertion for $(\omega^{\hat h}_t(y,t))^1$ with ${\hat h}\in {\mathcal F}^\infty(]0,T_0[)$.
Its extension to $\,{\hat h} \in \overline{{\mathcal F}(]0,T_0[)}$ is an immediate corollary of the fact that ${\mathcal F}^\infty(]0,T_0[)$ is dense in $\overline{{\mathcal F}(]0,T_0[)}$ in the ${\mathcal F}-$norm.
To analyse $(\omega^{\hat h}_t(y,t))^2$ , consider the form \beq \label{dual} \eta=(\frac 1\alpha *\omega^3,\frac 1\alpha *\omega^2,\frac 1\alpha *\omega^1, \frac 1\alpha*\omega^0) \eeq (cf. $\nu$-forms in formulae (\ref{hodge}) and (\ref {nu field 2})). This form satisfies the complete Maxwell system \[ \eta_t+\widetilde{{\mathcal M}} \eta=0, \]
where $\widetilde{{\mathcal M}} $ is the differential expression (\ref{M}), corresponding to the manifold
$(M,g,\alpha^{-1})$. Then the admittance map ${\bf t} \eta^1|_{\p M\times ]0,T_2[}\to
{\bf n}_{\alpha^{-1}} \eta^2|_{\p M\times ]0,T_2[}$ is the inverse of the given admittance
map ${\mathcal Z}^{T_2}:{\bf t} \omega^1|_{\p M\times ]0,T_2[}\to
{\bf n}_{\alpha} \omega^2|_{\p M\times ]0,T_2[}$. Thus, ${\mathcal Z}^{T_2}$ determine the admittance map, $\widetilde{{\mathcal Z}}^{T_2}$ for
$\widetilde{{\mathcal M}} $ and we can apply the results for $(\omega^{{\hat h}}_t)^1$ to
$(\eta^{{\hat f}}_t)^1$, where ${\hat f}={\mathcal Z}^{T_2}{\hat h}$. Namely, we can find $\widetilde L(y)(\eta_t(y,t))^1$, where $L(y): T_y^*M\to T_y^*M$ is a smooth section of ${\rm End}(T^*M^{{\rm int}})$ which, at this stage, is unknown. At last, since \ba *\widetilde L(y)(\eta^{\widehat f}_t(y,t))^1=K(y)(\omega^{\widehat h}_t(y,t))^2, \ea for some smooth section, $K(y)$ of ${\rm End}(\Lambda^2T^*M^{{\rm int}})$, the assertion follows.
$\Box$
We note for the further reference, that, similar to the case of the $1-$forms, the construction of $K(y)$ involves a choice of three differential $2-$forms, that we denote by $\mu_k \in \Omega^2M^{{\rm int}}$, and three families of generalized sources $\widetilde{\kappa}_k(y)$ that satisfy \beq \label{25.11.2}
\omega^{\partial_t \widetilde{ \kappa_k}}_t(T_1)= (0,\delta(\mu_k{\underline \delta}_y(x)),0,0), \quad \mu_k \in \Lambda^2T^*_yM,\quad k=1,2,3. \eeq Below we call the generalized sources $\widetilde{f}_k(y)$ the focusing sources for $2$--forms and $\widetilde{\kappa}_k(y)$ the focusing sources for $1$--forms.
Before going to a detailed discussion in the next sections of the reconstruction of $\alpha$, let us explain briefly the main outline of this construction. It follows from Lemma \ref{gauge2} that, using the admittance map ${\mathcal Z}$, we can find the electromagnetic waves $\omega^f_t(t),$ $T_1<t<T_2-\diam(M)$,
up to unknown linear transformations, $L$ and $K$. We observe that, by Theorem \ref{global control th}, for any basis $\xi_k(y), \,k=1,2,3$, there are families $\widetilde{f}_k(y)$ of focusing sources, such that the corresponding transformation $L$ is just identity. Indeed, to achieve this goal, we should choose $\widetilde{f}_k(y)$ in such a manner that, at any $y \in M^{{\rm int}}$, \beq \label{26.11.1} (\omega_{tt}^{\widetilde{f}_k(y)})^2(T_1) = d(\lambda_k(y) {\underline \delta}_y), \eeq where $\lambda_k(y)$ is dual to $\xi_k(y)$, \beq \label{26.11.2} \langle \lambda_k(y),\, \xi_j(y) \rangle_y = \delta _{kj}. \eeq In the next sections we will identify conditions on $\widetilde{f}_k(y)$ and $\widetilde{\kappa}_k(y)$, verifiable in terms of ${\mathcal Z}$, which make $L$ and $K$
to be identities.
\subsection{Reconstruction of the wave impedance}
In the previous section, it was shown how to select a family $\widetilde{f}_y, \, y \in M^{{\rm int}}$ of focusing sequences such that the corresponding electromagnetic fields
concentrate at $t=T_1$ at a single point, $y$, \[
\lim_{p\to\infty}\omega_{tt}^{f_p}(T_1)
=(0,0,d(\lambda{\underline \delta}_y),0). \] Here $\lambda=\lambda_y \in T^*_yM$ is yet unknown. Moreover, we can select this family in such a manner that, as a function of $y, \, \lambda_y \in \Omega^1M^{{\rm int}}$. In particular, it means that for times $0<t<t_y=\tau(y,\partial M)$, the electromagnetic wave defined as \begin{equation}\label{el green}
G_{\rm e}(y)=G_{\rm e}(x,y,t) = G_{\rm e}[\lambda](x,y,t)
= \lim_{p\to\infty}\omega_{tt}^{f_p}(t + T_1), \end{equation}
satisfies the
initial-boundary value problem \begin{eqnarray}\label{green equations}
(\partial_t + {\mathcal M})G_{\rm e} (y)&=& 0\mbox{ in $M\times]0,t_y[$}, \nonumber \\ \noalign{\vskip4pt}
{\bf t} G_{\rm e}(y) &=& 0\mbox{ in $\partial M\times]0,t_y[$}, \\ \noalign{\vskip4pt}
G_{\rm e}(y)|_{t=0} &=& (0,0,d(\lambda{\underline \delta}_y),0).\nonumber \end{eqnarray} The solution to this problem is called the {\em electric Green's function}. We will use this solution to reconstruct the scalar wave impedance, $\alpha$ on $M$. We start with analysis of some properties of $G_{\rm e}$. To this end, we will represent $G_{\rm e}$ in terms of the standard Green's function, $G=G(x,y,t)$
for the wave equation on $1-$ forms. Thus, $G$ is defined as the solution to the following initial-boundary value problem \begin{eqnarray}\label{1-form Green's function} (\partial_t^2+d\delta+\delta d)G(x,y,t)&= & (\partial_t^2+\Delta^1_{\alpha}) G(x,y,t) =\lambda {\underline \delta}_y(x){\underline \delta}(t) \hbox{ in $M\times {\mathbb R}$}\nonumber \\ \noalign{\vskip4pt}
{\bf t} G(x,y,t)=& 0,& \\ \noalign{\vskip4pt}
G(x,y,t)|_{t<0 } &=&0, \nonumber \end{eqnarray} where $\lambda\in T^*_yM$ is a given 1--form.
This Green's function has the following asymptotic behaviour.
\begin{lemma}\label{asymptotics of green} For $0<t<t_y$, Green's function $G(x,y,t)$ for the 1--form wave equation (\ref{1-form Green's function}), has the representation \[ G(x,y,t)={\underline \delta}(t-\tau(x,y))Q(x,y)\lambda+r(x,y,t). \] Here $Q(x,y):T^*_yM\to T^*_xM$ is a bijective
map that corresponds to a $(1,1)$--tensor depending smoothly on $(x,y)\in M^{{\rm int}}\times M^{{\rm int}}\setminus \hbox{diag}(M^{{\rm int}})$. The remainder $r(x,y,t)$ is a bounded function, when $t<t_y$, where $t_y$ is small enough. \end{lemma}
The proof of this lemma is postponed in the Appendix.
In the following, {we fix $y\in M^{{\rm int}}$ and $\lambda=\lambda_y \in T^*_yM$.
By operating with the exterior derivative $d$ on the both sides of the differential equation in (\ref{1-form Green's function}), we see that \[ (\partial_t^2+\Delta ^2_{\alpha})dG(x,y,t)=d(\lambda{\underline \delta}_y) {\underline \delta}(t). \] Hence, using the decomposition $
(\partial_t^2 + {\bf \Delta})=
(\partial_t +{\mathcal M})(\partial_t -{\mathcal M}), $ we find that the form $\omega(t)=(0,0,dG(x,y,t),0)$ satisfies the equation \[ (\partial_t+{\mathcal M})\bigg((\partial_t-{\mathcal M}) (0,0,dG(x,y,t),0)\bigg)=D_{y,\lambda} {\underline \delta}(t), \] where \[
D_{y,\lambda}=(0,0,d(\lambda{\underline \delta}_y),0). \]
Let $\widetilde{G}_{{\rm e}}(y)= \widetilde{G}_{{\rm e}}(x,y,t)$ be defined as \begin{eqnarray*}
\widetilde{G}_{\rm e}(x,y,t) &=&(\partial_t-{\mathcal M}) (0,0,dG(x,y,t),0)\\ \noalign{\vskip4pt}
&=& (0,\delta d G(x,y,t),\partial_t d G(x,y,t),0). \end{eqnarray*}
Then, due to the finite propagation speed, $G(y) \big|_{\p M \times ]0,t_y[} =0$, so that $\widetilde{G}_{\rm e}(x,y,t)$ satisfies the boundary condition ${\bf t}\widetilde{G}_{\rm e}(y) =0$ for $t<t_y$. Invoking the uniqueness of solution for (\ref{green equations}), we see that $\widetilde{G}_{\rm e}(y)=G_{\rm e}(y), \, t<t_y$.
Now, using Lemma \ref{asymptotics of green}, we obtain that \ba dG^1_e(x,y,t)=(Q(x,y)\lambda_y\wedge d\tau(x,y)) \underline \delta^{(1)} (t-\tau(x,y))+r_1(x,y,t), \ea where $\underline{\delta}^{(1)}$ is derivative of the delta-distribution and the residual $r_1$ is sum of the delta-distribution on $\partial B_y(t)$, where $B_y(t)$ is the ball of radius $t$ centered in $y$,
and a bounded function.
Thus, we see that \beq \label{21.11.1} \nonumber \p_tdG^1_{{\rm e}}(x,y,t)&=&(Q(x,y)\lambda_y\wedge d\tau(x,y)) \underline \delta^{(2)} (t-\tau(x,y))+r_2(x,y,t),\\ \nonumber \delta dG^1_{{\rm e}}(x,y,t)&=&*\left(d \tau(x,y) \wedge *(Q(x,y)\lambda_y\wedge d\tau(x,y)) \right) \underline \delta^{(2)} (t-\tau(x,y)) \\ & &+r_3(x,y,t), \eeq where residuals $r_2$ and $r_3$ are sums of first and zeroth derivatives of the delta-distribution on
$\partial B_y(t)$ and a bounded function. Moreover, by formulae (\ref{21.11.1}), \beq \label{21.11.2} {\bf t}_{B_y(t)} [ Q(x,y)\lambda_y \wedge d\tau(x,y)]=0, \eeq \beq \label{21.1.3} {\bf n}_{B_y(t)} \left[*\left(d \tau(x,y) \wedge *(Q(x,y)\lambda_y\wedge d\tau(x,y)) \right) \right]=0, \eeq
where ${\bf t}_{B_y(t)} \omega^k,\, {\bf n}_{B_y(t)}\omega^k$ are the tangential and normal components of $\omega^k$ on $\p B_y(t)$. This corresponds to the physical fact that the wavefronts of the electric and magnetic fields are perpendicular to the propagation direction. Let now $\widetilde{f}_y$ be a focusing source for a point $y \in M^{{\rm int}}$. Due to the definition (\ref{limit}) and the definition of the generalized source (\ref{gener}), there is a sequence $(f^y_p)_{p=1}^\infty$, $f^y_p \in C^{\infty}_0(]0,T_0[;\Omega^1 \p M),\,
p=1,2,\cdots$ such that}
\ba \omega^{\widetilde f_y}_t(T_1)=\lim_{p\to \infty} \omega^{f^y_p}_t(T_1), \ea and the right side is understood in the sense of the distribution-forms on ${\bf \Omega} M^{{\rm int}}$. Then, for $t \geq 0$, \beq \label{21.11.4} \omega^{\widetilde f_y}_t(t+T_1)=\lim_{p\to \infty} \omega^{f^y_p}_t(t+T_1). \eeq Applying Lemma \ref{gauge2}, it is possible to find, via given ${\mathcal Z}^{T_2}$, the magnetic components $K(x)(\omega^{f}_t(x,t+T_1))^2$, $f= f^y_p$ of these fields with $K$ being a smooth section of ${\rm End}(\Lambda^2T^*M^{{\rm int}})$. At last, using (\ref{21.11.4}), we find \ba K(x)(\omega^{\widetilde f_y}_t(x,t+T_1))^2 =\lim_{p\to \infty}K(x)(\omega^{ f^y_p}_t(x,t+T_1))^2 \, \in {\cal D}'(\Omega^2M^{{\rm int}}). \ea
Since \ba \omega^{\widetilde f_y}_{tt}(T_1)=(0,0,d(\lambda \underline\delta_y),0), \ea we see that \ba \omega^{\widetilde f_y}_{tt}(t+T_1)=G_{e}(\cdotp,y,t), \ea when $t<t_y$. In particular, we can find the singularities of Green's function up to a linear transformation $K(x)$.
Hence we have shown: \begin{lemma}\label{values of Maxwell} Let $\widetilde f_y=(f^y_p), \,p=1,2,\cdots,$ be a focusing source for a point $y$. Then, given the admittance map, ${\mathcal Z}^{T_2}$, it is possible to find the distribution $2-$form $K G_{{\rm e}}(y)^2$ for all $x$ satisfying $\tau(x,y)<\widehat t_y,$ where $\widehat t_y$ is small enough. In particular, the leading singularity of this form determine the $2-$form \beq\label{wave front value}
K(x)(Q(x,y)\lambda_y\wedge d\tau(x,y)). \eeq \end{lemma}
\begin{figure}
\caption{Vector $\vec v$ is the right singularity of the electromagnetic wave in the plane $M\times \{t\}$. The reconstructed singularity $\vec w$ has wrong direction, if the transformation matrix $K(x)$ is not isotropic. }
\label{pic 6}
\end{figure}
As shown at the end of the previous section, $K \in {\rm End}(\Lambda^2T^*M^{{\rm int}})$ was obtained by using three focusing sources $\widetilde{\kappa}_{k}(x),$ $k=1,2,3$.
Our next goal is to formulate conditions, verifiable using boundary data, for $K$ to be isotropic, i.e. \beq\label{K-requirement} K(x)=c(x)I, \eeq with $c(x)$ being a smooth scalar function.
We start with observation that, for a given $\widetilde{\kappa}_k(x), \, k=1,2,3,$ and any $\widetilde{f}_y$, we can find
${\bf t}_{B_y(t)}K(\cdot)(Q(\cdot,y)\lambda\wedge d\tau(\cdot,y))\big|_x$,
for $x \in \partial B_y(t)$ and small $t>0$. Here $K(x)$ is the linear transformation corresponding to the chosen $\widetilde{\kappa}_k(x)$. This follows from Lemma \ref{values of Maxwell} and the fact that the underlying Riemannian manifold $(M,g)$ is already found. When $K$ is isotropic, it follows from (\ref{21.11.2}) that \beq \label{orthog} {\bf t}_{B_y(t)}K(\cdot)(Q(\cdot,y) \lambda \wedge d\tau(\cdot,y))=0. \eeq Let us show that the condition (\ref{orthog}), which is verifiable via boundary data, actually guarantees that $K$ is of form (\ref{K-requirement}). }
{Indeed, for a given $\lambda \in T^*_yM$ and any $x,t$ with sufficiently small $t=\tau(x,y)$, (\ref{orthog}) means that $K(x)(Q(x,y) \lambda \wedge d\tau(x,y))$ continues to be normal to the $2-$dimensional subspace, $T_x(\partial B_y(t)) \subset T_xM$, i.e. \ba \left[K(x)(Q(x,y) \lambda \wedge d\tau(x,y)) \right](X,Y) =0, \quad X,Y \in (T_x\partial B_y(t)). \ea As $Q(x,y): T^*_yM \to T^*_xM$ is bijective, $Q(x,y) \lambda \wedge d\tau(x,y)$ runs through the whole $2-$dimensional subspace in $\Lambda^2 T^*_xM$, normal to $T_x(\partial B_y(t))$, when $\lambda$ runs through $T^*_yM$. Therefore, (\ref{orthog}) implies that $K(x)$ keeps this subspace invariant.
Let us now vary $y$ and $t$ keeping $x$ fixed. Then $T_x(\partial B_y(t))$ runs through the whole Grassmannian manifold, $G_{3,2}(T_xM)$. Therefore, (\ref{orthog}) implies that, if $\omega^2 \in \Lambda^2 T^*_xM$ is normal to a subspace ${\mathcal L} \in G_{3,2}$, then $K(x) \omega^2 $ remains to be normal to ${\mathcal L}$. This is implies that the eigenspace of $K(x)$ has dimension three and hence we must have \ba K(x)=c(x)I, \ea where $c(x)$ is a scalar function and $I$ is the identity map in $\Lambda^2 T^*_xM$.
In the following, we always take focusing sources which satisfy
(\ref{K-requirement}).
Thus, we can find the values of the 2-forms \ba (\widetilde \omega_t^{\widehat{f}}(x,T_1)^2=_{def}c(x) (\omega_t^{\widehat{f}}(x,T_1))^2, \quad \widehat f \in {\cal F}. \ea
Our further considerations are based on the equation, \beq \label{25.11.3} d(\omega_t^{\widehat{f}}(T_1)^2)=0. \eeq Thus, we intend to choose those focusing sources $\widetilde{\kappa}_k(x)$ which produce $K(x) =c(x)I$ with \ba d( \widetilde \omega_t^{\widehat{f}}(T_1))^2=0, \quad \widehat f\in {\cal F}^\infty. \ea In this case, \ba 0=d(c(x) (\omega_t^{\widehat{f}}T_1))^2)= dc(x) \wedge (\omega_t^{\widehat{f}}(T_1))^2, \ea due to (\ref{25.11.3}). By the global controllability, the form $(\omega_t^{\widehat{f}})^2(x,T_1)$ runs through the whole $\Lambda^2T^*_xM$.
This implies that $dc(x)=0$ at any $x \in M^{{\rm int}}$. Hence, $ c(x)=c_0 $
is a constant. Thus, we choose focusing sources, $\widetilde{\kappa}_k(x)$, so that \ba (\widetilde \omega_t^{\widehat h}(T_1))^2=c_0 (\omega_t^{\widehat h}(T_1))^2. \ea
Evaluating the inner products, \ba \int_M \widetilde \omega_t^2(x,T_1)\wedge * \widetilde \omega_t^2(x,T_1)= \int_M c_0 \omega_t^2(x,T_1)\wedge *c_0 \omega_t^2(x,T_1), \ea we can compare them with the energy integrals,
\ba \int_M\frac 1{\alpha(x)} \omega_t^2(x,T_1)\wedge * \omega_t^2(x,T_1), \ea which can be found from the boundary data by means of Theorem \ref{blacho} . By considering waves $\omega_t^{\widehat f_j}(x,T_1)$ with \ba \lim_{j\to \infty}
\hbox{supp}(\omega_t^{\widehat f^j}(\cdot,T_1))=\{y\}, \ea we find the ratio \ba \lim_{j\to \infty}\frac {\int_M (\widetilde \omega_t^{\widehat f_j}(x,T_1))^2\wedge * (\widetilde \omega_t^{\widehat f_j}(x,T_1))^2} {\int_M \frac 1{\alpha(x)} (\omega_t^{\widehat f_j}(x,T_1))^2 \wedge * (\omega_t^{\widehat f_j}(x,T_1))^2}= c_0^2\alpha(y). \ea The above considerations imply that, using ${\mathcal Z}^{T_2}$,
we can determine $\alpha(x)$ up to a constant $c_0^2$. Since the impedance map satisfies ${\mathcal Z}_{M,g,c\alpha}= c^{-1}{\mathcal Z}_{M,g,c\alpha}$, we see that, knowing the impedance map, we can also determine $c_0$.
As the fact that ${\mathcal Z}^T, \, T \geq 4\,{\rm diam}M$ determines $(M,g)$ is already proven, this completes the proof of Theorem \ref{ip}.
$\Box$
\subsection{Back to ${\mathbb R}^3$}
In this section we use the obtained uniqueness result for Maxwell equation on a 3-dimensional manifold to analyze to the group of transformations which preserve the boundary data in the dynamical inverse problem for Maxwell's equation (\ref{MF vector}) in a domain $\Omega\subset {\mathbb R}^3$.
Assume that two Maxwell systems with electric and magnetic permittivities $\epsilon^j_k(x)$, $\mu^j_k(x)$ and $\widetilde \epsilon^j_k(x)$, $\widetilde \mu^j_k(x)$, $x\in \Omega$, have the same admittance map ${\mathcal Z}^T$ on $\p \Omega\times [0,T]$, where $T$ is sufficiently large. Denote by $(M,g,\alpha)$ and $(\widetilde M,\widetilde g,\widetilde \alpha)$ the corresponding abstract Riemannian manifolds with impedance. By Theorem \ref{ip}, \ba (\widetilde M,\widetilde g,\widetilde \alpha)=(M,g,\alpha) \ea i.e., there is an isometry $H:(M,g)\to (\widetilde M,\widetilde g)$ and $\alpha=H_*\widetilde \alpha$. We can represent the abstract manifold $(M,g,\alpha)$ as the domain $\Omega$ with the metric tensor, $g_{ij}(x)$, given in Euclidean coordinates by (\ref{connection e and g}), (\ref{travel time metric}), and scalar impedance, $\alpha(x)$ in these coordinates.
Similarly, we represent manifold $(\widetilde M,\widetilde g,\widetilde \alpha)$ using Euclidean coordinates in $\Omega$ and obtain the metric tensor $\widetilde g_{ij}$ and impedance $\widetilde \alpha$. Then $H:\widetilde M\to M$ corresponds to a diffeomorphism, \beq\label{19.0}
\widetilde X:\Omega\to \Omega,\quad \widetilde X|_{\p \Omega}=id|_{\p \Omega}, \eeq and \beq\label{19.1} & &\widetilde g=\widetilde X_* g,\quad\hbox{i.e., } \,\, \widetilde g^{ij}(\widetilde x)=\frac {\p\widetilde x^i} {\p x^p} \frac {\p\widetilde x^j} {\p x^q}g^{pq}(x),\quad \widetilde x=\widetilde X(x),\\ \nonumber & &\widetilde \alpha=\widetilde X_* \alpha,\quad\hbox{i.e., } \,\, \widetilde \alpha(\widetilde x)=\alpha(x). \eeq Using (\ref{19.1}) and (\ref{travel time metric}), we see also that \beq\label{19.2} & &\widetilde g_\epsilon=\widetilde X_* g_\epsilon,\quad\hbox{i.e., } \,\, \widetilde g^{ij}_\epsilon(\widetilde x)=\frac {\p\widetilde x^i} {\p x^p} \frac {\p\widetilde x^j} {\p x^q}g^{pq}_\epsilon(x),\quad \widetilde x=\widetilde X(x). \eeq Employing formula (\ref{connection e and g}), we obtain \beq\label{19.2b} \epsilon^p_q=\sqrt{g_\epsilon} g_\epsilon^{pr}\delta_{pq},\quad \widetilde \epsilon^p_q=\sqrt{\widetilde g_\epsilon} \widetilde g_\epsilon^{pr}\delta_{pq}. \eeq Combining formulae (\ref{19.1})--(\ref{19.2b}) and introducing \ba \epsilon^{pq}=\epsilon^p_r\delta^{jq},\quad \widetilde \epsilon^{pq}=\widetilde \epsilon^p_r\delta^{jq}, \ea we obtain \beq\label{19.3} \widetilde \epsilon^{pq}=\frac 1{\hbox{Det}\,(D\widetilde X)} \frac {\p\widetilde x^i} {\p x^p} \frac {\p\widetilde x^j} {\p x^q}\epsilon^{pq}(x),\quad \widetilde x=\widetilde X(x). \eeq Similarly, \beq\label{19.4} \widetilde \mu^{pq}=\frac 1{\hbox{Det}\,(D\widetilde X)} \frac {\p\widetilde x^i} {\p x^p} \frac {\p\widetilde x^j} {\p x^q}\mu^{pq}(x),\quad \widetilde x=\widetilde X(x)
\eeq Clearly, if $\widetilde X:\Omega\to \Omega$ and $\widetilde X|_{\p \Omega}=id$, the admittance map ${\mathcal Z}^T,$ $T>0$ is preserved in transformations (\ref{19.0}), (\ref{19.3}), (\ref{19.4}).
Thus we have proven the following result.
\begin{theorem} \label{group} The group of transformations for Maxwell's equations (\ref{MF vector})--(\ref{constitutive}) with a scalar wave impedance, which preserves the admittance map ${\mathcal Z}^T,\,$ $T>4\,{\rm diam}(M,g)$, is generated by the group of diffeomorphisms of $\Omega$ satisfying (\ref{19.0}). The corresponding transformations of $\epsilon$ and $\mu$ are then defined by formulae
(\ref{19.3}), (\ref{19.4}). \end{theorem}
{\bf Remark 9.} It follows from (\ref{19.3}), (\ref{19.4}), that $\epsilon^{jk}$ and $\mu^{jk}$
do not transform like tensors. This is due to the special role played by the underlying Euclidean metric $g_0^{ij}=\delta^{ij}$, which does not change by diffeomorphisms $\widetilde X$. It should be noted that this form of transformations is observed also in the study of the Calder\'{o}n inverse conductivity problem. Indeed, it is shown in \cite{Sy} that, for the conductivity equation in $\Omega\subset {\mathbb R}^2$, the boundary measurements determine the anisotropic conductivity up to same group of transformations as described in Theorem \ref{group}. The Calderon problem is closely related to the inverse problem for Maxwell's equation, for instance, the low-frequency limit of the admittance map ${\mathcal Z}^\infty$ is related to the Dirichlet-to-Neumann map for the conductivity equation \cite{La3}.
\subsection{Outlook}
There are several direction to which the present work can be extended.
1. Natural question is the minimal observation time required to parameter reconstruction. It can be shown that the admittance map ${\mathcal Z}^t$ for any $t>0$. Thus, it follows from the above,
that ${\mathcal Z}^T$, $T>2\,{\rm rad}M$ determines uniquely the manifold $M$,
metric $g$ and wave impedance $\alpha$. The reconstruction of ${\mathcal Z}^t$ for any $t>0$ may be obtained by a direct continuation of the admittance map, i.e., without solving the inverse problem. This continuation is a direct generalization to the considered Maxwell's case of
the technique developed in \cite{KL1}, \cite{KKL} for the scalar wave equation. An analogous method has recently be applied to Maxwell's equations in \cite{BI}.
2. Another natural inverse boundary value problem is the inverse boundary spectral problem for the electric Maxwell operator ${\mathcal M}_{\rm e}$ defined in Definition \ref{d. 2}. The problem is to determine the metric $g$ and wave impedance $\alpha$, or, in the other words, $\e$ and $\mu$ from the non-zero eigenvalues $\lambda_j$ of ${\mathcal M}_{\rm e}$ and the normal boundary values of the corresponding eigenforms. This problem was studied in, e.g. \cite{L1}, \cite{L2}, for scalar Maxwell's equations. For the considered anisotropic case,
this requires significant modifications of the method developed in this paper and will be published elsewhere.
3. It often occurs in applications that the measurements are made only on a part of boundary. In formalism of this paper this means that we actually know only the restriction of the admittance map to the part $\Gamma\times [0,T]$ of the lateral boundary, i.e. we are given \ba
{\mathcal Z}^Tf|_{\Gamma\times [0,T]}, \quad f \in C^{\infty}_0([0,T]; \Omega^1\Gamma). \ea For the scalar wave equation the corresponding problem is studied in \cite{KK1}, \cite{KK2} (see also \cite{KL3}, \cite{KKL}, \cite{KKL2}). The combination of these methods and those of the present paper will be useful for analyzing the corresponding problem for Maxwell's equations.
\section*{Appendix: The WKB approximation}
Here we consider asymptotic results for Green's function or, more precisely, for Green's 1-form, $G(x,y,t)=G_{\lambda}(x,y,t)$, which is defined as the solution for the wave equation \beq \label{11.01} & &\p_t^2 G_{\lambda} + (d \delta + \delta d) G_{\lambda} = a {\underline \delta} _y (x){\underline \delta} (t) \quad \hbox{in} \,\, M\times {\mathbb R}_+,\\ & & \nonumber G_{\lambda} (\cdot,y,t) = 0 \quad \hbox{for} \,\, t<0, \quad {\bf t}G_{\lambda} (\cdot,y,t) = 0, \eeq where $\delta = \delta_{\alpha}$. Here, $\lambda $ is a 1-form $ \lambda = \sum_{i=1}^3 \lambda_i \, dx^i $ in normal coordinates $(B_y(\rho),X),\,X=(x^1,x^2,x^3)$, near a point $y \in M^{{\rm int}}, \, X(y)=0$. We assume that $B_y(\rho) \cap \partial M = \emptyset$.
In these coordinates,
$ {\underline \delta} _y (x) = {\underline \delta} (x)$, when $x \in U$. Clearly, we can find, instead of the solution to (\ref{11.01}), the fundamental solution \beq \label{11.06} & &\p_t^2 G + (d \delta + \delta d) G = I {\underline \delta} (x) {\underline \delta} (t) \quad \hbox{in} \,\, M\times {\mathbb R}_+,\\ & & \nonumber G(\cdot,y,t) = 0 \quad \hbox{for} \,\, t<0, \quad {\bf t}G(\cdot,y,t) = 0, \eeq where $I$ is the $3 \times 3$ identity matrix. Equation (\ref{11.06}), written in normal coordinates, becomes a hyperbolic system \beq \label{11.02} \left\{ (\p_t^2 - g^{ij} \p_i \p_j)I + B^i \p_i +C \right \} G = I {\underline \delta} (x) {\underline \delta} (t). \eeq Here $g^{ij}(x)$ is the metric tensor in these coordinates with \beq \label{11.05} g^{ij}(0)= \delta^{ij}, \, \p_kg^{ij}(0)=0, \eeq and $B^i(x), \, C(x)$ are smooth $3 \times 3$ matrices. We note that,
in normal coordinates, $\tau(x,y) =|x|$. However, we prefer to keep the notation $\tau$ to stress the invariant nature of considerations
below.
Following \cite{Co}, \cite{Ba}, which deal with the scalar case, we search for the solution to (\ref{11.02}) in the WKB form: \beq \label{11.03} G(x,t) = G_0(x) \, {\underline \delta} (t^2 - \tau^2) + \sum_{l \geq 1} G_l(x) \, S_{l-1} (t^2 - \tau^2), \eeq where $ S_l(s) = s_+^l /\Gamma (l+1). $ Substitution of expression (\ref{11.03}) into equation (\ref{11.02}) gives rise to the recurrent system of (transport) equations. The principal one is the equation for $G_0$, \beq \label{11.04} 4 \tau \frac{d G_0}{d \tau}(\tau \widehat{x}) + \left \{(g^{ij} (\tau \widehat{x}) \, \p_i\p_j \tau ^2(\tau \widehat{x}) - 6)\,I
+ B^i(\tau \widehat{x}) \, \p_i \tau^2(\tau \widehat{x}) \right \}
G_0(\tau \widehat{x}) = 0, \eeq where $\widehat{x} = x/\tau.$ In addition, to satisfy initial conditions $I {\underline \delta} (x) {\underline \delta} (t)$, corresponding to the right side in the wave equation (\ref{11.06}),
we require that \beq \label{12.02} G_0(0) = \frac{1}{2 \pi} I.
\eeq By (\ref{11.05}), $g^{ij} \p_i\p_j \tau ^2 - 6$ is a smooth function near $x=0$ and $g^{ij} \p_i\p_j \tau ^2\big|_{x=0} - 6=0 $. Also,
$\, \p_i \tau^2 \big|_{x=0} =0$. Therefore, \bfo \frac{1}{4 \tau}\left \{(g^{ij} (\tau \widehat{x}) \, \p_i\p_j \tau ^2(\tau \widehat{x}) - 6)\,I
+ B^i(\tau \widehat{x}) \, \p_i \tau^2 (\tau \widehat{x}) \right \} \efo is a smooth function of $(\tau, \widehat{x})$, so that
$G_0(x)$ is a smooth $3 \times 3$ matrix of $x$ for $\tau >0$.
{\nottopapertext
\noindent Actually, the matrix $G_0(x)$ is smooth everywhere in the neighborhood of $y$, i.e. $x=0$, including $x=0$ itself. Indeed, if we write the Taylor expansion of $(g^{ij}(x) \p_i\p_j \tau ^2(x) - 6) \,I + B^i(x) \p_i \tau^2(x)$ near
$x=0$ and divide the result by $\tau = |x|$, we obtain that \beq \label{12.01} (g^{ij}(x) \p_i\p_j \tau ^2(x) - 6) \,I + B^i(x) \p_i \tau^2(x) =
\sum_{|\beta| \geq 1} D_{\beta} \tau^{|\beta| - 1} \widehat{x}^{\beta}.
\eeq Substituting the Taylor expansion (with respect to $\tau$) of $G_0(\tau, \widehat{x})$, \bfo G_0(\tau, \widehat{x}) = \sum_{p \geq 0} G_{0,p}(\widehat{x})\tau ^p, \efo into (\ref{11.04}) and using (\ref{12.01}), (\ref{12.02}), we obtain that $G_{0,p}(\widehat{x})\tau ^p$ are homogeneous polynomials of $x$ of degree $p$:
\bfo G_{0,p}(\widehat{x})\tau ^p = \sum_{|\beta| = p} G_{0,\beta}x^{\beta}. \efo Then, $
G_0^p(x) = \sum_{|\beta| < p} G_{0,\beta}\,x^{\beta} $ satisfies \beq \label{12.04} 4 \tau \frac{d G_0^p}{d \tau} + \left \{(g^{ij} \, \p_i\p_j \tau ^2 - 6)\,I
+ B^i \, \p_i \tau^2 \right \}
G_0^p = \theta^p, \eeq where \beq \label{12.03} \theta^p(\tau, \widehat{x}) = \theta^p(x) \in C^{\infty}(U), \quad \theta^p(x) = O(\tau^p). \eeq We construct $G_0$ as $G_0^p \,(I+ \widetilde{G}_0^p)$. Substituting this expression into (\ref{11.04}) and using (\ref{12.04}), (\ref{12.03}), we obtain that \bfo 4 \tau \frac{d \widetilde{G}_0^p}{d \tau}(\tau,\widehat{x}) = A^p(\tau,\widehat{x}) + A^p(\tau,\widehat{x})\, \widetilde{G}_0^p(\tau,\widehat{x}), \quad \widetilde{G}_0^p(0)=0, \efo where \bfo A^p(\tau,\widehat{x}) = - \left(G_0^p(\tau,\widehat{x})\right)^{-1} \, \theta^p(\tau,\widehat{x}). \efo Therefore, $A^p(\tau,\widehat{x}) \in C^{\infty}$ as a function of $(\tau, \widehat{x})$ and $A^p(\tau) = O(\tau^p)$. This implies that $\widetilde{G}_0^p(\tau,\widehat{x}) = O(\tau^p)$ and is $C^{\infty}$ smooth as a function of $ (\tau,\widehat{x})$, so that $\widetilde{G}_0^p $, considered as a function of $x =\tau \widehat{x}$ is in $ C^p(B_y(\rho))$. As $p >0$ is arbitrary and the solution $G_0$ of (\ref{11.04}), (\ref{12.02}) is unique, $G_0 \in C^{\infty}(U)$.
For $G_l, \, l \geq 1$, we obtain transport equations \ba & &4 \tau \frac{d G_l}{d \tau} + \left \{(4l -6 + g^{ij}(x) \p_i\p_j \tau ^2(x)) \,I + B^i(x) \p_i \tau^2 \right \} G_l\\ & & =
\left[ g^{ij} \p_i \p_jI - B^i \p_i -C \right ] G_{l-1}, \ea and $G_l(0)=0$. If we write $G_l = G_0 F_l$, we obtain for $F_l$ the equations \beq \label{11.07} 4 \tau \frac{d F_l}{d \tau} +4l\,F_l = G_0^{-1} \, \left[ g^{ij} \p_i \p_jI - B^i \p_i -C \right ] G_{l-1}, \quad F_l(0) =0, \eeq with their solutions \beq \label{11.08} F_l(x) = \frac14 \tau ^{-l} \int _0^{\tau} G_0^{-1}(s \widehat{x}) \, \left\{ \left[ g^{ij} \p_i \p_jI - B^i \p_i -C \right ] G_{l-1} \right \}(s \widehat{x}) \, s ^{l-1} d s, \eeq being a smooth function of $x$.
As (\ref{11.02}) is a hyperbolic system, it is easy to show that the right side of (\ref{11.03}) represents the asymptotics
with respect to smoothness of the Green's $1-$form $G(x,y,t)$, when $t < \tau(y,\partial M)$.
Clearly, (\ref{11.03}) can also be written in the form \bfo G(x,t) = G_0(x) \, {\underline \delta} (t^2 - \tau^2) +r(x,t) \efo where $r(x,t)$ is a bounded $3 \times 3$ matrix.
\end{document} |
\begin{document}
\begin{abstract} We are interested in the normal class of an algebraic hypersurface $\mathcal Z$ in the complexified euclidean projective space $\mathbb P^n$, that is the number of normal lines to $\mathcal Z$ passing through a generic point of $\mathbb P^n$. Thanks to the notion of normal polars, we state a formula for the normal class valid for a general hypersurface $\mathcal Z\subset\mathbb P^n$. We give a generic result and illustrate our formula on examples in $\mathbb P^n$. We define the orthogonal incidence variety and compute the Schubert class of the variety of projective normal lines to a surface of $\mathbb P^3$ in the Chow ring of $\mathbb G(1,3)$. We complete our work with a generalization of Salmon's formula for the normal class of a Pl\"ucker curve to any plane curve with any kind of singularity. \end{abstract} \title{Normal class and normal lines of algebraic hypersurfaces}
\section*{Introduction} The notion of normal lines to an hypersurface of an euclidean space is extended here to the complexified euclidean projective space $\mathbb P^n$ ($n\ge 2$). In this setting, $\mathcal H^\infty$ the hyperplane at infinity is fixed, together with the umbilical at infinity $\mathcal U_\infty\subset\mathcal H^\infty$, the smooth quadric in $\mathcal H^\infty$ corresponding to the intersection of $\mathcal H^\infty$ with any hypersphere (see Section \ref{DEFI0} for details). The aim of the present work is the study the {\bf normal class} $c_\nu(\mathcal Z)$ of a hypersurface $\mathcal Z$ of $\mathbb P^n$, that is the number of $m\in\mathcal Z$ such that the projective normal line $\mathcal N_{m}(\mathcal Z)$ to $\mathcal Z$ at $m$ passing through a generic $m_1\in\mathbb P^n$ (see Section \ref{SEC00} for details). Our estimates provide upper bounds for the number of normal lines, of a real algebraic surface in an $n$-dimensional affine euclidean space $E_n$, passing through a generic point in $E_n$. Let us consider the \textbf{variety $\mathfrak{N}_{\mathcal{Z}}$ of projective normal lines of }$ \mathcal Z$ by \[ \mathfrak{N}_{\mathcal{Z}}:=\overline{\{\mathcal{N}_{m}(\mathcal{Z});m\in \mathcal{Z}\}}\subset
\mathbb{G}(1,n)\subset \mathbb P^{\frac{n(n+1)}2-1} \] and its Schubert class $\mathfrak{n}_{\mathcal{Z}}:=[\mathfrak{N}_{\mathcal{Z} }]\in A^{n-1}(\mathbb{G}(1,n))$ (when $\dim \mathfrak{N}_{\mathcal{Z}}=n-1$). The fact that $PGL(n,\mathbb C)$ {\bf does not preserve normal lines} complicates our study compared to the study of tangent hyperplanes. We prove namely the following result valid for a wide family of surfaces of $\mathbb P^n$. Let $\mathcal Z=V(F)$ be an irreducible hypersurface of $\mathbb P^n$. We write $\mathcal Z_\infty:=\mathcal Z\cap \mathcal H^\infty$. Note that the singular points of $\mathcal Z_\infty$ correspond to the points of tangency of $\mathcal Z$ with $\mathcal H^\infty$.
\begin{thm}\label{thmhypersurface} Let $\mathcal Z\in\mathbb P^n$ be a smooth irreducible hypersurface of degree $d_{\mathcal Z}\ge 2$ such that $\mathcal H^\infty$ is not tangent to $\mathcal Z$ and that at any $m\in\mathcal Z_\infty\cap\mathcal U_\infty$, the tangent planes to $\mathcal Z_\infty$ and to $\mathcal U_\infty$ at $m$ are distinct. Then the normal class $c_\nu(\mathcal Z)$ of $\mathcal Z$ is $$c_\nu(\mathcal Z)=d_{\mathcal Z}\sum_{k=0}^{n-1}(d_{\mathcal Z}-1)^k.$$ In particular, \begin{itemize} \item if $d_\mathcal Z=2$, $c_\nu(\mathcal Z)=n$; \item if $n=2$, $c_\nu(\mathcal Z)=d_{\mathcal Z}$; \item if $n=3$, $c_\nu(\mathcal Z)=d_{\mathcal Z}^3-d_{\mathcal Z}^2+d_{\mathcal Z}$; \item if $n=4$, $c_\nu(\mathcal Z)=d_{\mathcal Z}^4-2d_{\mathcal Z}^3+2d_{\mathcal Z}^2$; \item if $n=5$, $c_\nu(\mathcal Z)=d_{\mathcal Z}^5-3d_{\mathcal Z}^4+4d_{\mathcal Z}^3-2d_{\mathcal Z}^2+d_{\mathcal Z}$. \end{itemize} The normal class of an hyperplane $\mathcal H\subset\mathbb P^n$ (other than $\mathcal H^\infty$) is $c_\nu(\mathcal H)=1$. \end{thm} Actually we establish a general formula which is valid for a wider family of hypersurfaces of $\mathbb P^n$. The notion of normal polars $\mathcal P_{A,\mathcal Z}$ plays an important role in our study. It is a notion analogous to the notion of polars \cite{Dolga}. Given an irreducible hypersurface $\mathcal Z\subset\mathbb P^n$ of degree $d_{\mathcal Z}$, we extend the definition of the line $\mathcal N_m(\mathcal S)$ to any $m\in\mathbb P^n$. We then define a regular map $\alpha_{\mathcal Z}:\mathbb P^n\setminus\mathcal B^{(0)}_{\mathcal Z}\rightarrow \mathbb P^{\frac{n(n+1)}2-1}$ corresponding to $m\mapsto\mathcal N_m(\mathcal Z)$ (where $\mathcal B^{(0)}_\mathcal Z$ is the set of base points of $\alpha_{\mathcal Z}$). We will see that $\mathcal B_{\mathcal Z}:=\mathcal B^{(0)}_{\mathcal Z}\cap\mathcal Z$ corresponds to the union of the set of singular points of $\mathcal Z$, of the set of points of tangency of $\mathcal Z$ with $\mathcal H^\infty$ and of the set of points of tangency of $\mathcal Z_\infty$ with $\mathcal U_\infty$. For any $A\in\mathbb P^n$, we will introduce the notion of {\bf normal polar} $\mathcal P_{A,\mathcal Z}$ of $\mathcal Z$ with respect to $A$ as the set of $m\in\mathbb P^n$ such that either $m\in\mathcal B^{(0)}_{\mathcal Z}$ or $A\in \mathcal N_m(\mathcal Z)$. We will see that, if $\dim\mathcal B^{(0)}_{\mathcal Z}\le 1$, then, for a generic $A\in\mathbb P^n$, $$\dim\mathcal P_{A,\mathcal Z}=1\quad\mbox{and}\quad \deg \left(\mathcal P_{A,\mathcal Z}\right)= \sum_{k=0}^{n-1}(d_{\mathcal Z}-1)^k.$$ \begin{thm}\label{formulegeneralehypersurface} Let $\mathcal Z$ be an irreducible hypersurface of $\mathbb P^n$ with isolated singularities, admitting a finite number of points of tangency with $\mathcal H^\infty$ and such that $\mathcal Z_\infty$ has a finite number of points of tangency with $\mathcal U_\infty$. Then the normal class $c_\nu(\mathcal Z)$ of $\mathcal Z$ is given by \[ c_\nu(\mathcal Z)=d_{\mathcal Z}.\sum_{k=0}^{n-1}(d_{\mathcal Z}-1)^k -\sum_{P\in B_{\mathcal Z}} i_P(\mathcal Z,{\mathcal P}_{A,\mathcal Z})\, , \] for a generic $A\in \mathbb P^n$, where $i_P(\mathcal Z,{\mathcal P}_{A,\mathcal Z})$ is the intersection multiplicity of $\mathcal Z$ with ${\mathcal P}_{A,\mathcal Z}$. \end{thm} In dimension 3, we obtain the following result. \begin{thm}[n=3, normal class and Chow ring]\label{formulegeneralesurface} Let $\mathcal S$ be an irreducible surface of $\mathbb P^3$ with isolated singularities, admitting a finite number of points of tangency with $\mathcal H^\infty$ and such that $\mathcal S_\infty$ has a finite number of (non singular) points of tangency with $\mathcal U_\infty$. Then $$\mathfrak{n}_{\mathcal{S}}=c_\nu(\mathcal S).\sigma_2+d_{\mathcal S}(d_{\mathcal S}-1).\sigma _{1,1}\in A^{2}(\mathbb{G}(1,3)),$$ where the normal class $c_\nu(\mathcal S)$ of $\mathcal Z$ is equal to $d_{\mathcal Z}.\deg(\mathcal P_{A,\mathcal Z}) $ (for a generic $A\in \mathbb P^n$) minus the sum of the intersection multiplicities of $\mathcal S$ with its generic normal polars ${\mathcal P}_{A,\mathcal S}$ at points of $\mathcal B_{\mathcal S}$. \end{thm} \begin{coro}[n=3] For a generic irreducible surface $\mathcal S\subset \mathbb P^3$ of degree $d\ge 2$, we have $c_\nu(\mathcal S)=d^3-d^2+d$ and \[ {\mathfrak n}_{\mathcal{S}}=(d^3-d^2+d).\sigma_2+d(d-1).\sigma _{1,1}\in A^{2}(\mathbb{G}(1,3)). \] \end{coro} In the next statement, we consider smooth surfaces $\mathcal S$ of $\mathbb P^3$ ($\mathbb P^3$ being endowed with projective coordinates $[x:y:z:t]$) such that $\mathcal S_\infty$ has no worse singularities than ordinary multiple points and ordinary cusps. \begin{thm}[n=3]\label{thmsurfaces} Let $\mathcal S\subset\mathbb P^3$ be a smooth irreducible surface of degree $d_{\mathcal S}\ge 2$ such that: \begin{itemize} \item[(i)] in $\mathcal H^\infty$, the curve $\mathcal S_\infty$ has a finite number of points of tangency with $\mathcal U_\infty$, \item[(ii)] any singular point of $\mathcal S_\infty$ is either an ordinary multiple point or an ordinary cusp, \item[(iii)] at any (non singular) point of tangency of $\mathcal S_\infty$ with $\mathcal U_\infty$, the contact is ordinary, \item[(iv)] at any singular point of $\mathcal S_\infty$ contained in $\mathcal U_\infty$, the tangent line to $\mathcal U_\infty$ is not contained in the tangent cone to $\mathcal S_\infty$. \end{itemize} Then \[ \mathfrak{n}_{\mathcal{S}}=c_\nu(\mathcal S).\sigma_2+d_{\mathcal S}(d_{\mathcal S}-1).\sigma _{1,1}\in A^{2}(\mathbb{G}(1,3)) \] and the normal class of $\mathcal S$ is $$c_\nu(\mathcal S)=d_{\mathcal S}^3-d_{\mathcal S}^2+d_{\mathcal S}-\sum_{k\ge 2}((k-1)^2m_\infty^{*(k)}+
k(k-1)\tilde m_\infty^{(k)}) -2\kappa_\infty^* - 3\tilde \kappa_\infty-c_\infty,$$ where \begin{itemize} \item $m_\infty^{*(k)}$ (resp. $\tilde m_\infty^{(k)}$) is the number of ordinary multiple points of order $k$ of $\mathcal S_\infty$ outside (resp. contained in) $\mathcal U_\infty$, \item $\kappa_\infty^*$ (resp. $\tilde \kappa_\infty$) is the number of ordinary cusps of $\mathcal S_\infty$ outside (resp. contained in) $\mathcal U_\infty$, \item $c_\infty$ is the number of ordinary (non singular) points of tangency of $\mathcal S_\infty$ with $\mathcal U_\infty$. \end{itemize} \end{thm} \begin{exa}[n=3] The surface $\mathcal S=V(xzt-tx^2-zt^2-xz^2+y^3)\subset \mathbb P^3$ is smooth, its only point of tangency with $\mathcal H_\infty=V(t)$ is $P[1:0:0:0]$ which is an ordinary cusp of $\mathcal S_\infty=V(t,-xz^2+y^3)$. Moreover $\mathcal S_\infty$ has no point of tangency with $\mathcal U_\infty$. Hence the normal class of $\mathcal S$ is $27-9+3-2=19$. \end{exa} Theorem \ref{thmhypersurface} (resp. \ref{thmsurfaces}) is a consequence of Theorem \ref{formulegeneralehypersurface} (resp. \ref{formulegeneralesurface}). In a more general setting, when $n=3$, we can replace $\alpha_{\mathcal S}$ in $\tilde\alpha_{\mathcal S}=\frac{\alpha_{\mathcal S}}H$ (for some homogeneous polynomial $H$ of degree $d_H$) so that the set $\tilde{\mathcal B}^{(0)}_{\mathcal S}$ of base points of $\tilde\alpha_{\mathcal S}$ has dimension at most 1. In this case, we consider a notion of normal polars associated to $\tilde\alpha_{\mathcal S}$ which have generically dimension 1 and degree $\tilde d_{\mathcal S}^2 -\tilde d_{\mathcal S}+1$ (with $\tilde d_{\mathcal S}=d_{\mathcal S}-d_H$). \begin{thm}[n=3]\label{factorisable} Let $\mathcal S$ be an irreducible surface of $\mathbb P^3$. If the set $\tilde{\mathcal B}^{(0)}_{\mathcal S}\cap\mathcal S$ is finite, then $\mathfrak{n}_{\mathcal{S}}=c_\nu(\mathcal S).\sigma_2+d_{\mathcal S}(\tilde d_{\mathcal S}-1) .\sigma _{1,1}\in A^{2}(\mathbb{G}(1,3))$ and
the normal class $c_\nu(\mathcal S)$ of $\mathcal S$ is equal to $d_{\mathcal S}(\tilde d_{\mathcal S}^2-\tilde d_{\mathcal S}+1) $ minus the intersection multiplicity of $\mathcal S$ with its generic normal polars $\tilde{\mathcal P}_{A,\mathcal S}$ at points $m\in\tilde{\mathcal B}^{(0)}_{\mathcal S}\cap \mathcal S$. \end{thm} When the surface is a "cylinder" or a surface of revolution, its normal class is equal to the normal class of its plane base curve. The normal class of any plane curve is given by the simple formula of Theorem \ref{thmcurves} below, that we give for completness. Let us recall that, when $\mathcal C=V(F)$ is an irreducible curve of $\mathbb P^2$, the evolute of $\mathcal C$ is the curve tangent to the family of normal lines to $\mathcal Z$ and that the evolute of a line or a circle is reduced to a single point. Hence, except for lines and circles, the normal class of $\mathcal C$ is simply the class (with multiplicity) of its evolute. The following result generalizes the result by Salmon \cite[p. 137]{Salmon-Cayley} proved in the case of Pl\"ucker curves (plane curves with no worse multiple tangents than ordinary double tangents, no singularities other than ordinary nodes and cusps) to any plane curve (with any type of singularities). We write $\ell_\infty$ for the line at infinity of $\mathbb P^2$. We define the two cyclic points $I[1:i:0]$ and $J[1:-i:0]$ in $\mathbb P^2$ (when $n=2$, $\mathcal U_\infty=\{I,J\}$). \begin{thm}[n=2]\label{thmcurves} Let $\mathcal C=V(F)$ be an irreducible curve of $\mathbb P^2$ of degree $d\ge 2$ with class $d^\vee$. Then its normal class is $$c_\nu(\mathcal C)=d+d^\vee-\Omega(\mathcal C,\ell_\infty)-\mu_{I}(\mathcal C)-\mu_{J}(\mathcal C),$$ where $\Omega$ denotes the sum of the contact numbers between two curves and where $\mu_P(\mathcal C)$ is the multiplicity of $P$ on $\mathcal C$. \end{thm} In \cite{Fantechi}, Fantechi proved that the evolute map is birational from $\mathcal C$ to its evolute curve unless if\footnote{We write $[x:y:z]$ for the coordinates of $m\in\mathbb P^2$ and $F_x,F_y,F_z$ for the partial derivatives of $F$.} $F_x^2+F_y^2$ is a square modulo $F$ and that in this latest case the evolute map is $2:1$ (if $\mathcal C$ is neither a line nor a circle). Therefore, the normal class $c_\nu(\mathcal C)$ of a plane curve $\mathcal C$ corresponds to the class of its evolute unless $F_x^2+F_y^2$ is a square modulo $F$ and in this last case, the normal class $c_\nu(\mathcal C)$ of $\mathcal C$ corresponds to the class of its evolute times 2 (if $\mathcal C$ is neither a line nor a circle).
The notion of focal loci generalizes the notion of evolute to higher dimension \cite{Trifogli,CataneseTrifogli}. The normal lines of an hypersurface $\mathcal Z$ are tangent to the focal loci hypersurface of $\mathcal Z$ but of course the normal class of $\mathcal Z$ does not correspond anymore (in general) to the class of its focal loci (the normal lines to $\mathcal Z$ are contained in but are not equal to the tangent hyperplanes of its focal loci).
In Section \ref{SEC00}, we introduce normal lines, normal class, normal polars in $\mathbb P^n$ (see also Appendix \ref{NORMAL} for the link between projective orthogonality and affine orthogonality). In Section \ref{SECpolar}, we study normal polars and prove Theorems \ref{formulegeneralehypersurface} and \ref{thmhypersurface}. In Section \ref{incidenceschubert}, we introduce the orthogonal incidence variety $\mathcal I^\perp$ in $\mathbb G(1,n)$, give some recalls on the Schubert classes in the Chow ring of $\mathbb G(1,3)$ and prove Theorems \ref{formulegeneralesurface} and \ref{factorisable}. In Section \ref{sec:proofthm1}, we prove Theorem \ref{thmsurfaces}. In Section \ref{secquadric}, we apply our results on examples in $\mathbb P^3$: we compute the normal class of every quadric and of a cubic surface with singularity $E_6$. In Section \ref{proofcurve}, we prove Theorem \ref{thmcurves}. Appendix \ref{cylindreetrevolution} on the normal class of "cylinders" and of surfaces of revolution in $\mathbb P^n$.
\section{Normal lines, normal class and normal polars}\label{SEC00} \subsection{Definitions and notations}\label{DEFI0} Let $\mathbf V$ be a $\mathbb C$-vector space of dimension $n+1$. Given $\mathcal Z=V(F)\ne\mathcal H^\infty$ an irreducible hypersurface of $\mathbb P^n=\mathbb P(\mathbf V)$ (with $F\in Sym(\mathbf{V}^\vee)\cong\mathbb C[x_1,...,x_{n+1}]$), we consider the rational map $n_{\mathcal Z}:\mathbb P^n\dashrightarrow\mathcal H^\infty$ given by $n_{\mathcal Z}=[F_{x_1}:\cdots :F_{x_n}:0]$. Note that, for nonsingular $m\in \mathcal Z$ such that the tangent hyperplane $\mathcal T_m\mathcal Z$ to $\mathcal Z$ at $m$ is not $\mathcal H^\infty$, $n_{\mathcal Z}(m)$ is the pole of the $(n-2)$-variety at infinity $\mathcal T_m\mathcal Z\cap\mathcal H^\infty\subset\mathcal H^\infty$ with respect to the {\bf umbilical} $\mathcal U_\infty:=V(x_1^2+...+x_n^2)\cap\mathcal H^\infty\subset\mathcal H^\infty$. $\mathcal U_\infty$ corresponds to the set of {\bf circular points at infinity}. \begin{defi} {\bf The projective normal line} $\mathcal N_m\mathcal Z$ to $\mathcal Z$ at $m\in\mathcal Z$ is the line $(m\, n_{\mathcal Z}(m))$ when $n_{\mathcal Z}(m)$ is well defined in $\mathbb P^n$ and not equal to $m$. \end{defi} \begin{rqe} This is a generalization of affine normal lines in the euclidean space $E_n$. Indeed, if $F$ has real coefficients and if $m\in\mathcal Z\setminus\mathcal H_\infty$ has real coordinates $[x^{(0)}_1:\cdots:x^{(0)}_n:1]$, then $\mathcal N_m\mathcal Z$ corresponds to the affine normal line of the affine hypersurface $V(F(x_1,...,x_n,1))\subset E_n$ at the point of coordinates $(x^{(0)}_1,\cdots,x^{(0)}_n)$ (see Section \ref{NORMAL}). \end{rqe} The aim of this work is the study of the notion of normal class. \begin{defi} Let $\mathcal Z$ be an irreducible hypersurface of $\mathbb P^n$. {\bf The normal class} of $\mathcal Z$ is the number $c_\nu(\mathcal Z)$ of $m\in\mathcal Z$ such that $\mathcal N_m(\mathcal Z)$ contains $m_1$ for a generic $m_1\in\mathbb P^n$. \end{defi} Let $\Delta:=\{(m_1,m_2)\in\mathbb P^n\times\mathbb{P}^{n}\ :\ m_1=m_2\}$ be the diagonal of $\mathbb{P}^{n}\times\mathbb{P}^{n}$. Recall that the {\bf Pl\"ucker embedding} $\left( \mathbb{P}^{n}\times\mathbb{P}^{n}\right) \backslash\Delta \overset{Pl}{\hookrightarrow} \mathbb{P} (\bigwedge^{2}\mathbf V)\cong \mathbb{P}^{\frac{n(n+1)}2-1}$ is defined by $$Pl(u,v)=\bigwedge^2({u}, {v})=\left[p_{i,j}=u_iv_j-u_jv_i\right]
_{1\le i<j\le n+1}\in\mathbb P^{\frac{n(n+1)}2-1},$$ with $p_{i,j}=-p_{j,i}$ the $(i,j)$-th Pl\"ucker coordinate, identifying $\mathbb P^{\frac{n(n+1)}2-1}$ with the projective space of $n\times n$ antisymmetric matrices. Its image is the Grassmannian $\mathbb G(1,n)$ (see \cite{Eisenbud-Harris}) given by $$\mathbb G(1,n):=Pl((\mathbb P^n)^2\setminus \Delta)=\bigcap_{(i,j_1,j_2,j_3)\in\mathcal I}V(B_{i,j_1,j_2,j_3}) \subset \mathbb P^{\frac{n(n+1)}2-1},$$ where $B_{i,j_1,j_2,j_3}:=p_{i,j_1}p_{j_2,j_3}-p_{i,j_2}p_{j_1,j_3}+p_{i,j_3}p_{j_1,j_2}$ and where $\mathcal I$ is the set of $(i,j_1,j_2,j_3)\in\{1,...,n+1\}$ such that $j_1<j_2<j_3$ and $j_1,j_2,j_3\ne i$. We recall also that $\dim \mathbb G(1,n)=2n-2$. \begin{rqe} Let $h_{\mathcal Z}:\mathbb{P}^{n}\setminus V(F_{x_1},\cdots,F_{x_n})\rightarrow \mathbb{P}^{n}\times\mathbb{P}^{n}$ be the morphism defined by $j(m)=\left( m,{n}_{\mathcal Z}(m)\right) $. The variety $\mathfrak N_{\mathcal Z}\subset\mathbb G(1,n)$ of projective normal lines to $\mathcal Z$ is the (Zariski closure of the) image of $\mathcal Z$ by the regular map $\alpha_{\mathcal Z}:=Pl\circ h_{\mathcal Z}:\mathbb P^n\setminus\mathcal B^{(0)}_{\mathcal Z}\rightarrow \mathbb{P}^{\frac{n(n+1)}2-1}$, with $\mathcal B^{(0)}_{\mathcal Z}:=V(F_{x_1},...,F_{x_n})\cup j^{-1}(\Delta)$, i.e. $$\mathcal B^{(0)}_{\mathcal Z}:=\left\{ m\in\mathbb{P}^n;\ \bigwedge^2(\mathbf {m}, \mathbf n_{\mathcal Z}(\mathbf m))=\mathbf 0\ \mbox{in}\ \bigwedge^2\mathbf V\right\}.$$ \end{rqe} Note that the number of normal lines to $\mathcal Z$ passing through $A\in\mathbb P^n$ corresponds to the number of $m\in\mathcal Z\setminus\mathcal B_{\mathcal Z}$ satisfying the following sets of equations~: \begin{equation}\label{EQUA000} \bigwedge^3[\mathbf {m}\ \mathbf n_{\mathcal Z}(\mathbf m)\ \mathbf A] =\mathbf 0\quad\mbox{in}\ \bigwedge^3 \mathbf V. \end{equation} \begin{defi} For any $A\in\mathbb P^n$, the set of points $m\in \mathbb P^n$ satisfying \eqref{EQUA000} is called {\bf normal polar} $\mathcal P_{A,\mathcal Z}$ of $\mathcal Z$ with respect to $A$ \end{defi}
\subsection{Projective similitudes} Recall that, for every field $\mathbb{\Bbbk }$, $$GO(n,\mathbb{\Bbbk } )=\left\{ A\in GL(n,\mathbb{\Bbbk });\exists \lambda \in \mathbb{\Bbbk } ^{\ast },A\cdot^{t}A=\lambda \cdot I_{n}\right\} $$ is the \textbf{orthogonal similitude group} (for the standard products) and that $GOAff(n,\mathbb{\Bbbk })=\mathbb{\Bbbk }^{n}\rtimes GO(n,\mathbb{\Bbbk })$ is the \textbf{orthogonal similitude affine group}. We have a natural monomorphism of groups $\kappa :Aff(n,\mathbb{R})=\mathbb{R}^{n}\rtimes GL(n,\mathbb{R} )\longrightarrow GL(n+1,\mathbb{R})$ given by \begin{equation}\label{similitude} \kappa (b,A)=\left( \begin{array}{ccccccc} a_{11} & ... & & & ... & a_{1n} & b_{1} \\ a_{21} & ... & & & ... & a_{2n} & b_{2} \\ & & & & & & \\ a_{n1} & .. & & & ... & a_{nn} & b_{n} \\ 0 & ... & & & 0 & 0 & 1 \end{array} \right) \end{equation} and, by restriction,
$\kappa |_{GOAff(n,\mathbb{R})}:GOAff(n,\mathbb{R})=\mathbb{R}^{n}\rtimes GO(n,\mathbb{R})\longrightarrow GL(n+1,\mathbb{R})$. Analogously we have a natural monomorphism of groups
$\kappa ^{\prime }:=(\kappa\otimes 1) |_{GOAff(n,\mathbb{C})}:GOAff(n,\mathbb{C})= \mathbb{C}^{n}\rtimes GO(n,\mathbb{C})\longrightarrow GL(n+1,\mathbb{C})$. Composing with the canonical projection $\pi :GL(n+1,\mathbb{C} )\longrightarrow \mathbb{P}(GL(n+1,\mathbb{C}))$ we obtain the \textbf{ projective complex similitude Group}: \begin{equation*} \widehat{Sim_{\mathbb{C}}(n)}:=(\pi \circ \kappa ^{\prime })(G OAff(n,\mathbb{ C})). \end{equation*} which acts naturally on $\mathbb{P}^{n}$. \begin{defi} An element of $\mathbb P(Gl(\mathbf V))$ corresponding to an element of $\widehat{Sim_{\mathbb{C}}(n)}$ with respect to the basis $(\mathbf e_1,\cdots,\mathbf e_n)$ is called a \textbf{projective similitude of }$\mathbb P^{n}.$ \end{defi} The set of projective similitudes of $\mathbb P^n$ is isomorphic to $\widehat{Sim_{\mathbb{C}}(n)}$. \begin{lem}\label{lemmesimilitude} The projective similitude preserves the orthogonality structure in $\mathbb P^n$. They preserve namely the normal lines and the normal class of surfaces of $\mathbb P^n$. \end{lem} This lemma has a straightforward proof that is omitted.
\section{Proof of Theorem \ref{formulegeneralehypersurface}}\label{SECpolar} \subsection{Geometric study of $\mathcal B_{\mathcal Z}:=\mathcal B^{(0)}_{\mathcal Z}\cap{\mathcal Z}$}\label{sec:base} We write $\mathcal Z_\infty:=\mathcal Z\cap\mathcal H^\infty$. Recall that $\mathcal U_\infty:=\mathcal H^\infty\cap V\left(x_1^2+...+x_n^2\right)$. \begin{prop} A point of $\mathcal Z$ is in $\mathcal B_\mathcal Z$ if it is a singular point of $\mathcal Z$ or a tangential point of $\mathcal Z$ at infinity or a tangential point of $\mathcal Z_\infty$ to the umbilical, i.e. $\mathcal B_{\mathcal Z}=\sing(\mathcal Z)\cup \mathcal K_\infty(\mathcal Z)\cup
\Gamma_\infty(\mathcal Z)$, where \begin{itemize} \item $\sing(\mathcal Z)$ is the set of singular points of $\mathcal Z$, \item $\mathcal K_\infty(\mathcal Z)$ is the set of points of $\mathcal Z$ at which the tangent hyperplane is $\mathcal H^\infty$, \item $\Gamma_\infty(\mathcal Z)$ is the set of points of $\mathcal Z_\infty\cap \mathcal U_\infty$ at which the tangent space to $\mathcal Z_\infty$ and to $\mathcal U_\infty$ are the same. \end{itemize} \end{prop} \begin{proof} Let $m\in\mathcal Z$. We have \begin{eqnarray*} m\in\mathcal B_{\mathcal Z} &\Leftrightarrow& \bigwedge^2\left(\mathbf m , \mathbf n_{\mathcal Z}(\mathbf m)\right)=0\\ &\Leftrightarrow& \mathbf n_{\mathcal Z}(\mathbf m)=\mathbf 0\ \mbox{or}\ m= n_{\mathcal Z}( m)\\ &\Leftrightarrow& m\in V(F_{x_1},\cdots,F_{x_n})\ \mbox{or}\ m= n_{\mathcal Z}( m). \end{eqnarray*} Now $m\in V(F_{x_1},\cdots,F_{x_n})$ means either that $m$ is a singular point of $\mathcal S$ or that $\mathcal T_m\mathcal Z=\mathcal H^\infty$.
Let $m=[x_1:\cdots:x_{n+1}]\in\mathcal Z$ be such that $m=n_{\mathcal S}( m)$. So $[x_1:\cdots:x_{n+1}]=[F_{x_1}:\cdots: F_{x_n}:0]$. In particular $x_{n+1}=0$. Due to the Euler identity, we have $0=\sum_{i=1}^{n+1}x_iF_{x_i}=\sum_{i=1}^{n+1} x_i^2$. Hence $m\in\mathcal U_\infty$. Note that the $(n-2)$-dimensional tangent space $\mathcal T_m\mathcal U_\infty$ to $\mathcal U_\infty$ at $m$ has equations $X_{n+1}=0$ and $\langle m,\cdot\rangle$ and that the $(n-2)$-dimensional tangent space $\mathcal T_m\mathcal Z_\infty$ to $\mathcal Z_\infty$ at $m$ has equations $X_{n+1}=0$ and $\langle(n_{\mathcal S}(m),\cdot\rangle$. We conclude that $\mathcal T_m\mathcal U_\infty=\mathcal T_m\mathcal Z_\infty$.
Conversely, if $m=[x_1:\cdots:x_n:0]$ is a nonsingular point of $\mathcal Z_\infty\cap\mathcal U_\infty$ such that $\mathcal T_m\mathcal U_\infty=\mathcal T_m\mathcal Z_\infty$, then the linear spaces $Span(\mathbf{m},\vec e_{n+1})$ and $Span(\nabla F,\vec e_{n+1})$ are equal which implies that $[x_1:\cdots:x_n:0]=[F_{x_1}:\cdots:F_{x_n}:0]$. \end{proof} Recall that the dual variety of $\mathcal Z_\infty\subset\mathcal H^\infty$ is the variety $\mathcal Z_\infty^\vee\subset (\mathcal H^\infty)^\vee\cong (\mathbb P^{n-1})^\vee$ of tangent hyperplanes to $\mathcal Z_\infty$. It corresponds to the (Zariski closure of the) image of $\mathcal Z_\infty$ by the rational map $n_{\mathcal Z}$. We write $\mathcal Z_\infty^\wedge\subset\mathbb P^n$ for this image. With this notation, $\mathcal B_{\mathcal Z}=\sing(\mathcal Z)\cup (\mathcal Z_\infty\cap\mathcal Z_\infty^\wedge)$.
\begin{rqe}\label{Basegenerique} For a generic hypersurface of $\mathbb P^n$, $\mathcal B_{\mathcal Z}=\emptyset$ and so $\dim \mathcal B_{\mathcal Z}^{(0)}\le 0$. \end{rqe} But we will also consider cases for which $\#\mathcal B_{\mathcal Z}<\infty$, and so $\dim\mathcal B^{(0)}_{\mathcal Z}\le 1$. \begin{exa}[n=3]\label{exemple2} For the saddle surface $\mathcal S_1=V(xy-zt)$, the set $\mathcal B_{\mathcal S_1}$ contains a single point $[0:0:1:0]$ which is a point of tangency at infinity of $\mathcal S_1$.
For the ellipsoid $\mathcal E_1:=V(x^2+2y^2+4z^2-t^2)$, the set $\mathcal B_{\mathcal E_1}$ is empty.
For the ellipsoid $\mathcal E_2:=V(x^2+4y^2+4z^2-t^2)$, the set $\mathcal B_{\mathcal E_2}$ has two elements: $[0:1:\pm i:0]$ which are points of tangency of $\mathcal E_2$ with $\mathcal U_\infty$. \end{exa} \begin{exa} For the cuartic $\mathcal Z:=V(x_1^2+x_2^2+(x_3+x_5)x_3+(2x_3+x_4)x_4)\subset \mathbb P^4$, $Sing(\mathcal Z)=\emptyset$, $\mathcal K_\infty(\mathcal Z)=\{[0:0:1:-1:0]\}$ and $\Gamma_\infty(\mathcal Z)=\{I_1,I_2\}$, with $I_1[1:i:0:0:0]$ and $I_2[1:-i:0:0:0]$. \end{exa}
\subsection{Normal polars of $\mathcal Z\subset\mathbb P^n$} \label{polars} Let $\mathcal Z=V(F)\subset \mathbb P^n$ (with $F\in Sym(\mathbf V^\vee)$) be an irreducible hypersurface. For every $A\in\mathbb P^n$, {\bf the normal polar} $\mathcal P_{A,\mathcal Z}$ of $\mathcal Z$ with respect to $A$ is the set of $m\in\mathbb P^n$ satisfying the $\left(\begin{array}{c}n+1\\ 3\end{array}\right)$ equations of \eqref{EQUA000}. For every $m,A\in \mathbb P^n$, we have $$m\in \mathcal P_{A,\mathcal Z}\\ \Leftrightarrow\ m\in\mathcal B^{(0)}_{\mathcal Z}\ \mbox{or}\ A\in \mathcal N_m\mathcal Z,$$ extending the definition of $\mathcal N_m\mathcal Z$ from $m\in\mathcal Z$ to $m\in\mathbb P^n$.
\begin{lem}[The projective similitudes preserve the normal polars] \label{preservpolar} Let $\mathcal Z=V(F)\subset\mathbb P^n$ be a hypersurface and $\varphi$ be any projective similitude, then $\varphi(\mathcal P_{A,\mathcal Z})=\mathcal P_{\varphi(A),\varphi(\mathcal Z)}$. \end{lem} \begin{proof} Due to Lemma \ref{lemmesimilitude}, $\varphi(\mathcal N_m\mathcal Z)=\mathcal N_{\varphi(m)} (\varphi(\mathcal Z))$ which gives the result. \end{proof} Note that $$\mathcal P_{A,\mathcal Z}=\mathcal B^{(0)}_{\mathcal Z}\cup\left(\bigcap_{i<j<k}\alpha_{\mathcal Z}^{-1}\mathcal H_{A,i,j,k}\right),$$ where $\mathcal{H}_{A,i,j,k}$ is the hyperplane of $\mathbb{P}^{\frac{n(n+1)}2-1}$ given by $\mathcal{H}_{A,i,j,k}:= V(D_{i,j,k})\subset \mathbb{P}^{\frac{n(n+1)}2-1}$, with $D_{i,j,k}:=a_ip_{j,k}-a_jp_ {i,k}+a_kp_{i,j}$. On $\mathbb G(1,n)$, $p=Pl(u,v)\in\bigcap_{i<j<k}\mathcal H_{A,i,j,k}$ means that $\bigwedge ^3(\mathbf A,\mathbf u,\mathbf v)=0$. \begin{lem}\label{lemdimG(1,n)} For every $A\in \mathbb P^n$, the set $\bigcap_{i<j<k}\mathcal H_{A,i,j,k}$ is a $(n-1)$-dimensional linear space of $\mathbb P^{\frac{n(n+1)}2-1}$ contained in $\mathbb G(1,n)$. \end{lem} \begin{proof} Let $A[a_1:\cdots:a_{n+1}]\in\mathbb P^n$. Assume for example $a_{j_0}\ne 0$ (the proof being analogous when $a_j\ne 0$ for symetry reason).
Let $p\in\bigcap_{i<j<k}\mathcal H_{A,i,j,k}$. Let us prove that $p\in\mathbb G(1,n)$. Let $i,j_1,j_2,j_3\in\{1,...,n+1\}$ be distinct indices. Due to $D_{j_1,j_2,j_3}=D_{i,j_1,j_2}=0$, we have \begin{multline*} a_{j_1}a_{j_2}p_{i,j_1}p_{j_2,j_3}=a_{j_1}a_{j_2}p_{j_1,j_3}p_{i,j_2}
+a_{j_1}p_{j_1,j_2}(-a_{j_3}p_{i,j_2})\\+a_{j_2}p_{j_1,j_2}(-a_ip_{j_1,j_3})+
p_{j_1,j_2}(a_ia_{j_3}p_{j_1,j_2})\\ =a_{j_1}a_{j_2}p_{j_1,j_3}p_{i,j_2}-a_{j_1}a_{j_2}p_{i,j_3}p_{j_1,j_2}
-a_jp_{j_1,j_2}D_{i,j_2,j_3}+a_ip_{j_1,j_2}D_{j_1,j_2,j_3}. \end{multline*} Hence $a_{j_1}a_{j_2}B_{i,j_1,j_2,j_3}=0$, for every $i,j_1,j_2,j_3\in\{1,...,n+1\}$. So $B_{i,j_1,j_2,j_3}\ne 0$ implies that $a_{j_1}=a_{j_2}=a_{j_3}=0$ (up to a permutation of $(i,j_1,j_2,j_3)$) and so $0=D_{j_0,j_1,j_2}=D_{j_0,j_2,j_3}=D_{j_0,j_1,j_3}$ imply $p_{j_1,j_2}=p_{j_2,j_3}=p_{j_1,j_3}=0$ which contradicts $B_{i,j_1,j_2,j_3}\ne 0$. Since $\bigwedge ^4(\mathbf A,\mathbf A,\mathbf u,\mathbf v)=0$, we get that $a_{j_0}D_{i,j,k}-a_iD_{j_0,j,k}+a_jD_{j_0,i,k}-a_kD_{j_0,i,j}=0$ for every $1\le i<j<k\le n+1$ such that $i,j,k\ne j_0$. Hence $\bigcap_{i<j<k}\mathcal H_{A,i,j,k}=\bigcap_{i<j, i,j\ne j_0}\mathcal H_{A,j_0,i,j}$. Since $a_{1}\ne 0$, the $\frac {n(n-1)}2$ corresponding linear equations are linearly independent and so $\bigcap_{i<j, i,j\ne j_0}\mathcal H_{A,j_0,i,j}\subset \mathbb P^{\frac{n(n+1)}2-1}$ has dimension $\frac{n(n+1)}2-1-\frac{(n-1)n}2=n-1$. \end{proof} \begin{prop}\label{degrepolaire} Let $\mathcal Z=V(F)\subset\mathbb P^n$ be an irreducible hypersurface such that $d_{\mathcal Z}:=\deg\mathcal Z\ge 2$ and $\dim\mathcal B^{(0)}_{\mathcal Z}\le 1$. Then, for a generic $A\in\mathbf V$, we have $\dim \mathcal P_{A,\mathcal Z}=1$ and $$\deg \mathcal P_{A,\mathcal Z}=\sum_{k=0}^{n-1}(d_{\mathcal Z}-1)^k.$$ \end{prop} \begin{proof} Due to the proof of Lemma \ref{lemdimG(1,n)}, for every $A\in\mathbb P^n$ such that $a_{n+1}\ne 0$, we have $\bigcap_{i<j<k}\mathcal H_{A,i,j,k}=\bigcap_{i<j<n+1}\mathcal H_{A,n+1,i,j}\subset\mathbb P^{\frac{n(n+1)}2-1}$ and so $$\mathcal P_{A,\mathcal Z}= \bigcap_{1\le i<j\le n}V(E_{A,i,j})\subset\mathbb P^n,$$ with $$\forall i,j\in\{1,...,n\},\quad E_{A,i,j}:=L_{A,i}F_{x_j}-L_{A,j}F_{x_i}\quad\mbox{and}\quad L_{A,i}:=a_{n+1}x_i-a_ix_{n+1},$$ i.e. $E_{A,i,j}=a_{n+1}(x_iF_{x_j}-x_jF_{x_i})+a_jx_{n+1}F_{x_i}-a_ix_{n+1}F_{x_j}$. Note that \begin{equation}\label{simplification} L_{A,k}E_{A,i,j}-L_{A,j}E_{A,i,k}=L_{A,i}E_{A,k,j}\quad\mbox{and}\quad F_{x_k}E_{A,i,j}-F_{x_j}E_{A,i,k}=F_{x_i}E_{A,k,j}. \end{equation} Hence \begin{equation}\label{n-1eq} \forall i\in\{1,...,n\},\quad \mathcal P_{A,\mathcal Z}\setminus V(L_{A,i},F_{x_i})=\bigcap_{j\in\{1,...,n\}\setminus\{i\}} V(E_{A,i,j})\setminus V(L_{A,i},F_{x_i}), \end{equation} and so $\dim\mathcal P_{A,\mathcal Z}\ge 1$. Recall that $\mathcal B_{\mathcal Z}^{(0)}=\bigcap_{i=1}^nV(x_{n+1}F_{x_i})\cap \bigcap_{i,j=1}^nV(x_iF_{x_j}-x_jF_{x_i})$ and that we have assumed that $\dim \mathcal B_{\mathcal Z}^{(0)}\le 1$. In particular $\mathcal P_{A,\mathcal Z}\cap\mathcal H^\infty=\mathcal B_{\mathcal Z}^{(0)}\cap \mathcal H^\infty$ and $\mathcal B_{\mathcal Z}^{(0)}\setminus\mathcal H^\infty =V(F_{x_1},...,F_{x_n})\setminus\mathcal H^\infty$. This combined with \eqref{n-1eq} and with the expression of $E_{A,i,j}$ leads to $\dim\mathcal P_{A,\mathcal Z}= 1$.
Now let us compute the degree of $\mathcal P_{A,\mathcal Z}$. The idea is to prove an induction formula. Assume that $A\not\in\mathcal H^\infty$ is such that $\dim\mathcal P_{A,\mathcal Z} =1$. Let $\mathcal H=V(\sum_{i=1}^{n+1} \alpha_ix_i)\subset\mathbb P^n$ be an hyperplane such that $\#(\mathcal H\cap \mathcal P_{A,\mathcal Z}) =\deg \mathcal P_{A,\mathcal Z}$ and $\sum_{i=1}^n\alpha_i^2\ne 0$. We compose by a projective similitude $\phi:\mathbb P^n\rightarrow\mathbb P^n$ so that $\phi(A)$ has projective coordinates $[0:\cdots:0:1]$ and that $\hat{\mathcal H}:=\phi(\mathcal H)=V(x_1-b x_{n+1})\subset\mathbb P^n$. Set $\hat{\mathcal Z}:=\phi(\mathcal Z)=V(\hat F)\subset\mathbb P^n$, with $\hat F:= F\circ\phi$. Hence $\phi(\mathcal P_{A,\mathcal Z})=\mathcal P_{\phi(A),\tilde{\mathcal Z}}$ is the set of points $m[x_1:\cdots:x_{n+1}]\in\mathbb P^n$ such that $\bigwedge^2 \left(\left(\begin{array}{c}x_1\\ \vdots\\x_n\\0\end{array}\right),\left( \begin{array}{c}\hat F_{x_1}\\ \vdots\\\hat F_{x_n}\\0\end{array}\right)\right)=0$ in $\bigwedge^2\mathbf V$. We then define $G(x_2,...,x_{n+1}):=\hat F(bx_{n+1},x_2,...,x_{n+1}) \in\mathbb C[x_2,...,x_{n+1}]$ and $H(x_3,...,x_{n+1}):=G(0,x_3,...,x_{n+1})\in\mathbb C[x_3,...,x_{n+1}]$. We set $\mathcal Z_1:=V(G)\subset\mathbb P^{n-1}$, $\mathcal Z_2:=V(H)\subset\mathbb P^{n-2}$ and $B_k[0:...:0:1]\in\mathbb P^{k}$. We then write $\mathcal P_{n-k,B_{n-k},{\mathcal Z}_k}$ for the normal polar in $\mathbb P^{n-k}$ of $\mathcal Z_k\subset\mathbb P^{n-k}$ with respect to $B_{n-k}$, with the conventions $\mathcal P_{0,B_0,{\mathcal Z}_k}=\emptyset$ (if $k=n$) and $\mathcal P_{1,B_1,{\mathcal Z}_k}=\mathbb P^1$ (if $k=n-1$). We will prove that \[ \deg \mathcal P_{A,{\mathcal Z}}=d\times \deg \mathcal P_{n-1,B_{n-1},{\mathcal Z}_1} - (d-1)\times \deg \mathcal P_{n-2,B_{n-2},{\mathcal Z}_2}\, . \] Let $\Pi_1: \mathbb P^n\rightarrow \mathbb P^{n-1}$ and $\Pi_2:\mathbb P^n\rightarrow \mathbb P^{n-2}$ be the projections given by $\Pi_1[x_1:...:x_{n+1}]=[x_2:...:x_{n+1}]$ and $\Pi_2[x_1:...:x_{n+1}]=[x_3:...:x_{n+1}]$. Due to \eqref{simplification}, \begin{equation}\label{decomp} \hat{\mathcal H}\cap V(x_1\hat F_{x_2}-x_2\hat F_{x_1})\cap\Pi_1^{-1}(\mathcal P_{n-1,B_{n-1},{\mathcal Z}_1})=(\hat{\mathcal H}\cap \mathcal P_{\phi(A),\hat{\mathcal Z}})\cup [\hat{\mathcal H}\cap V(x_2,\hat F_{x_2})\cap\Pi_2^{-1}(\mathcal P_{n-2,B_{n-2},{\mathcal Z}_2})]. \end{equation} For a generic $\mathcal H$ and for a good choice of $\phi$, the union in the right hand side of \eqref{decomp} is disjoint and \begin{eqnarray*} \deg \mathcal P_{A,\mathcal Z}&=&\#(\mathcal H\cap \mathcal P_{A,\mathcal Z})\\ &=&\#(\hat{\mathcal H}\cap \deg \mathcal P_{\phi(A),\hat{\mathcal Z}})\\ &=& d_{\mathcal Z}.\deg \mathcal P_{n-1,B_{n-1},{\mathcal Z}_1} -(d_{\mathcal Z}-1).\mathcal P_{n-2,B_{n-2},{\mathcal Z}_2}. \end{eqnarray*} Hence $\deg \mathcal P_{A,\mathcal Z}=d_{\mathcal Z}$ if $n=2$ and
$\deg \mathcal P_{A,\mathcal Z}=d_{\mathcal Z}^2-d_{\mathcal Z}+1$ if $n=3$. The formula in the general case follows by induction. \end{proof} Analogously we have the following. \begin{prop}[n=3]\label{rqebasepoints} If $\mathcal S$ is an irreducible algebraic surface of $\mathbb P^3$ (with projective coordinates $[x:y:z:t]$) and if $\dim \mathcal B^{(0)}_{\mathcal S}=2$, then the two dimensional part of $\mathcal B^{(0)}_{\mathcal S}$ is $V(H)\subset\mathbb P^3$ for some homogeneous polynomial $H\in\mathbb C[x,y,z,t]$ of degree $d_H$. We write $\boldsymbol{\alpha}_{\mathcal S}=H\cdot\boldsymbol{\tilde\alpha}_{\mathcal S}$. Note that the regular map $\tilde\alpha_{\mathcal S}:\mathbb P^3\setminus\tilde{\mathcal B}^{(0)}_{\mathcal S} \rightarrow\mathbb P^3$ (with $\dim \tilde{\mathcal B}^{(0)}_{\mathcal S}\le 1$) associated to $\boldsymbol{\tilde\alpha}_{\mathcal S}$. We then adapt our study with respect to $\tilde\alpha_{\mathcal S}$ instead of $\alpha_{\mathcal S}$ and define the corresponding polar $\tilde {\mathcal P}_{A,\mathcal S}$. Then, we have $\deg \tilde {\mathcal P}_{A,\mathcal S}=( d_{\mathcal S}
-d_H)^2-d_{\mathcal S}+d_H+1$. \end{prop} \begin{exa}\label{exemple1} Note that the only irreducible quadrics $\mathcal S=V(F)\subset\mathbb P^3$ such that $\dim \mathcal B^{(0)}_{\mathcal S}\ge 2$ are the spheres and cones, i.e. with $F$ of the following form $$F(x,y,z,t)=(x-x_0t)^2+(y-y_0t)^2+(z-z_0t)^2+a_0t^2,$$ where $x_0,y_0,z_0,a_0$ are complex numbers (it is a sphere if $a_0\ne 0$ and it is a cone otherwise).
Hence, due to Proposition \ref{degrepolaire}, the degree of a generic normal polar of any irreducible quadric of $\mathbb P^3$ which is neither a sphere nor a cone is 3.
Moreover, for a sphere or for a cone, applying Proposition \ref{rqebasepoints} with $H=t$, $\tilde{\mathcal P}_{A,S}$ is a line for a generic $A\in\mathbb P^3$. \end{exa}
\subsection{Proof of Theorems \ref{thmhypersurface} and \ref{formulegeneralehypersurface}} \begin{proof}[Proof of Theorem \ref{formulegeneralehypersurface}] Let $\mathcal Z$ be an irreducible surface of $\mathbb P^n$ of degree $d_{\mathcal Z}\ge 2$ such that $\#\mathcal B_{\mathcal Z}<\infty$. It remains to prove that \begin{equation}\label{formuleclassenormalesurface} c_\nu(\mathcal S)= d_{\mathcal Z}.\deg \mathcal P_{A,\mathcal Z}
-\sum_{P\in\mathcal B_{\mathcal Z}}i_P\left(\mathcal Z,\mathcal P_{A,\mathcal Z}\right), \end{equation} for a generic $A\in\mathbb P^n$. Note that, for a generic $A\in\mathbb P^n$, since $\overline{\alpha_{\mathcal Z}(\mathcal Z)}$ is irreducible of dimension at most $n-1$, we have $\#\bigcap_{i<j<k}\mathcal H_{A,i,j,k}\cap \overline{\alpha_{\mathcal Z}(\mathcal Z)}<\infty$ and so $\# \mathcal P_{A,\mathcal Z}\cap \mathcal Z<\infty$ (since $\#\mathcal B_{\mathcal Z}<\infty$). Since $\dim \mathcal P_{A,\mathcal Z}=1$ and $\# \mathcal Z\cap \mathcal P_{A,\mathcal Z}<\infty$ for a generic $A\in\mathbb P^n$, due to Proposition \ref{degrepolaire} and to the Bezout formula, we have: \begin{eqnarray*} d_{\mathcal Z}.\deg \left(\mathcal P_{A,\mathcal Z}\right)&=& \deg\left(\mathcal Z\cap \mathcal P_{A,\mathcal Z}\right)\\ &=&\sum_{P\in\mathcal B_{\mathcal Z}}i_P\left(\mathcal Z, \mathcal P_{A,\mathcal Z}\right)+
\sum_{P\in\mathcal S\setminus\mathcal B_{\mathcal Z}}i_P\left(\mathcal Z, \mathcal P_{A,\mathcal Z}\right). \end{eqnarray*} Now let us prove that, for a generic $A\in\mathbb P^n$, \begin{equation}\label{multpolaire} \sum_{P\in\mathcal Z\setminus\mathcal B_{\mathcal Z}}i_P\left(\mathcal Z, \mathcal P_{A,\mathcal Z}\right)= \#((\mathcal Z\cap \mathcal P_{A,\mathcal Z})\setminus\mathcal B_{\mathcal Z}). \end{equation} Since $\alpha_{\mathcal Z}$ defines a rational map, $\overline{\alpha_{\mathcal Z}(\mathcal Z)}$ is irreducible and its dimension is at most $n-1$.
Assume first that $\dim \overline{\alpha_{\mathcal Z}(\mathcal Z)}<n-1$. For a generic $A\in\mathbb P^n$, the plane $\bigcap_{i<j<k}\mathcal H_{A,i,j,k}$ does not meet $\overline{\alpha_{\mathcal Z}(\mathcal Z)}$ and so the left and right hand sides of \refeq{multpolaire} are both zero. So Formula \eqref{formuleclassenormalesurface} holds true with $c_\nu(\mathcal Z)=0$.
Assume now that $\dim \overline{\alpha_{\mathcal Z}(\mathcal Z)}=n-1$. Then, for a generic $A\in\mathbb P^n$, the plane $\bigcap_{i<j<k}\mathcal H_{A,i,j,k}$ meets $\alpha_{\mathcal Z}(\mathcal Z)$ transversally (with intersection number 1 at every intersection point) and does not meet $\overline{\alpha_{\mathcal Z}(\mathcal Z)}\setminus\alpha_{\mathcal Z}(\mathcal Z)$. This implies that, for a generic $A\in\mathbb P^n$, we have $i_P\left(\mathcal Z, \mathcal P_{A,\mathcal Z}\right)=1$ for every $P\in(\mathcal S\cap\mathcal P_{A,\mathcal Z}) \setminus\mathcal B_{\mathcal Z}$ and so \refeq{multpolaire} follows. Hence, for a generic $A\in\mathbb P^n$, we have \begin{eqnarray*} d_{\mathcal Z}.\deg \left(\mathcal P_{A,\mathcal Z}\right) &=&\sum_{P\in\mathcal B_{\mathcal Z}}i_P\left(\mathcal Z, \mathcal P_{A,\mathcal Z}\right)+
\#\{P\in \mathcal Z\setminus \mathcal B_{\mathcal Z} :\ A\in\mathcal N_m\mathcal Z\}\\ &=&\sum_{P\in\mathcal B_{\mathcal Z}}i_P\left(\mathcal S,\mathcal P_{A,\mathcal Z}\right)+c_\nu(\mathcal Z), \end{eqnarray*} which gives \eqref{formuleclassenormalesurface}. \end{proof} \begin{proof}[Proof of Theorem \ref{thmhypersurface}] Let $\mathcal H=V(\sum_{i=1}^{n+1}a_ix_i)$ be an hyperplane such that $\mathcal H\ne\mathcal H^\infty$. For every $m\in\mathcal H$, $n_{\mathcal H}(m)[a_1:\cdots:a_n:0]\in\mathbb P^n$. Hence every $A\in\mathbb P^n$ belong to a single normal line to $\mathcal H$ (the line containing $A$ and $[a_1:\cdots:a_n:0]$).
The case $d_{\mathcal Z}\ge 2$ follows from Theorem \ref{formulegeneralehypersurface} and Remark \ref{Basegenerique}. \end{proof}
\section{Orthogonal incidence variety and Schubert classes}\label{incidenceschubert} \subsection{Orthogonal incidence variety} Let us write as usual $\mathbb G(1,n)$ (resp. $\mathbb G(n-1,n)$) for the grassmannian of the lines (resp. of the hyperplanes) of $\mathbb P^n$. Let us write $pr_{1}:\mathbb{G}(1,n)\times \mathbb{G}(n-1,n)\rightarrow \mathbb{G}(1,n)$ and $pr_{2}:\mathbb{G}(1,n)\times \mathbb{G} (n-1,n)\rightarrow \mathbb{G}(n-1,n)$ for the canonical projections. We define the {\bf orthogonal incidence variety} $\mathcal{I}^{\perp }$ by $$\mathcal{I}^{\perp }:=\{(\mathcal{L}_1,\mathcal{H}_1)\in \mathbb G(1,n)\times \mathbb{G}(n-1,n)\, :\, \mathcal{L}_1{\perp }\mathcal {H}_1\}.$$ Let us write $p_{1}:\mathcal{I}^{\mathbb{\perp }}\rightarrow \mathbb{G} (1,n)$ and $p_{2}:\mathcal{I}^{\mathbb{\perp }}\rightarrow \mathbb{G}(n-1,n)$ for the restrictions of $pr_{1}$ and $pr_{2}$. We want to describe in the Chow ring of $\mathbb{G}(1,n)$ and $\mathbb{G} (n-1,n)\equiv \mathbb{P}^{n\vee }$ the rational equivalence class of $p_{2}p_{1}^{-1}(\mathcal{L)}$ and $ p_{1}p_{2}^{-1}(\mathcal{H)}$. \begin{lem} $p_{2}\circ p_{1}^{-1}:\mathbb{G}(1,n)\setminus\{\mathcal L\subset\mathcal H^\infty\} \rightarrow \mathbb{G}(n-1,n)$ is a line projective bundle and $p_{1}\circ p_{2}^{-1}:\mathbb{G}(n-1,n)\setminus\{\mathcal H^\infty\}\rightarrow\mathbb{G}(1,n)$ and is a plane projective bundle. \end{lem} \begin{proof} Let $\mathcal H=V(a_1x_1+\cdots a_{n+1}x_{n+1})$ be a projective hyperplane of $\mathbb P^n$, which is not $\mathcal H^\infty$. Then $$ p_{1}(p_{2}^{-1}(\mathcal{H}))=\{\mathcal L\in \mathbb G(1,n),\ (a_1,\cdots,a_n,0)\in \mathbf{L}\}.$$ Moreover $ p_{1}(p_{2}^{-1}(\mathcal{H}^\infty))=\mathbb G(1,n)$.\\ Let $\mathcal L\not\subset\mathcal H^\infty$ be a line of $\mathbb P^3$, let $A_0[a_1:\cdots:a_n:0]$ be the only point in $\mathcal L\cap\mathcal H^\infty$, we have $$ p_2(p_1^{-1}(\mathcal L))=\{\mathcal H\in\mathbb G(n-1,n)\, :\ \exists [a:b]\in\mathbb P^1,\
\mathcal H=V(aa_1x_1+\cdots+aa_nx_n+bx_{n+1}) \}.$$ Finally, if $\mathcal L=\mathbb P(\mathbf L)\subset\mathcal H^\infty$ is a projective line, then we have $$ p_2(p_1^{-1}(\mathcal L))=\{\mathcal H\in\mathbb G(n-1,n)\, :\ \exists a,b\in\mathbb C,\ \exists (a_1,\cdots,a_n,0)\in \mathbf{L},\
\mathcal H=V(aa_1x_1+\cdots+aa_nx_n+bx_{n+1}) \}.$$ \end{proof} It follows directly from the proof of this lemma that if $\mathcal H\in\mathbb G(n-1,n)\setminus\{\mathcal H^\infty\}$, the class of $p_{1}(p_{2}^{-1}(\mathcal{\mathcal H)})$ in the Chow ring $A^*(\mathbb{G}(1,n))$ is simply the Schubert class $\sigma_{n-1}$.
\subsection{Schubert classes for $\mathbb G(1,3)$} Given a flag $\mathbf{F}=\{\mathbf{V_1}\subset \mathbf {V_2}\subset \mathbf {V_3}\subset \mathbf {V_4}=\mathbf{V}\}$ of $\mathbf V$ with dim$_{\mathbb{C}}(V_{i})=i$ for all integer $i$, we consider its associated projective flag $\mathcal F$ of $\mathbb{P}^{3}$ (image by the canonical projection $\pi :\mathbf{V\backslash \{0\}}\rightarrow \mathbb{P}^{3}$) \[ \mathcal{F}=\{p\in \mathcal{D}\subset \mathcal{P}\subset \mathbb{P}^{3}\}. \] Let $\mathcal{Z}^{k}$ denote the set of cycles of codimension $k$ in $\mathbb{G}(1,3)$. We recall that the \textbf{Schubert cycles} of $\mathbb{G}(1,3)$ associated to $\mathcal F$ (or to {\bf F}) are given by \begin{equation}\label{EQSchubert} \left\{ \begin{array}{c} \Sigma _{0,0}:=\mathbb{G}(1,3)\in \mathcal{Z}^{0}(\mathbb{G}(1,3)) \\ \Sigma _{1,0}:=\left\{ \mathcal{L}\in \mathbb{G}(1,3);\mathcal{D\cap L\neq \varnothing }\right\} \in \mathcal{Z}^{1}(\mathbb{G}(1,3)) \\ \Sigma _{2,0}:=\left\{ \mathcal{L}\in \mathbb{G}(1,3);p\mathcal{\in L} \right\} \in \mathcal{Z}^{2}(\mathbb{G}(1,3)) \\ \Sigma _{1,1}:=\left\{ \mathcal{L}\in \mathbb{G}(1,3);\mathcal{L\subset P} \right\} \in \mathcal{Z}^{2}(\mathbb{G}(1,3)) \\ \Sigma _{2,1}:=\Sigma _{2,0}\cap \Sigma _{1,1}\in \mathcal{Z}^{3}(\mathbb{G} (1,3)) \\ \Sigma _{2,2}:=\left\{ \mathcal{L}\in \mathbb{G}(1,3);\mathcal{L=D}\right\} \in \mathcal{Z}^{2}(\mathbb{G}(1,3)) \end{array} \right. . \end{equation} We write as usual $A^{\ast }(\mathbb{G}(1,3))$ for the Chow ring of $\mathbb{G}(1,3)$ and $\sigma _{i,j}:=\left[ \Sigma _{i,j}\right] \in A^{i+j}(\mathbb{G}(1,3))$ for \textbf{Schubert classes}. For commodity we will use the notation $\Sigma _k:=\Sigma _{k,0}$ and $\sigma _k:=\sigma _{k,0}$. We recall that $A^{\ast }(\mathbb{G}(1,3))$ is freely generated as graded $\mathbb{Z}$ -module by $\left\{ \sigma _{i,j};0\leq j\leq i\leq 2\right\} $ with the following multiplicative relations \[ (E)\left\{ \begin{array}{c} \sigma _{1,1}=\sigma _{1}^{2}-\sigma _{2} \\ \sigma _{1,1}\cdot \sigma _{1}=\sigma _{1}\cdot \sigma _{2}=\sigma _{2,1} \\ \sigma _{2,1}\cdot \sigma _{1}=\sigma _{1,1}^{2}=\sigma _{2}^{2}=\sigma _{2,2} \\ \sigma _{1,1}\cdot \sigma _{2}^{{}}=0 \end{array} \right. . \] Hence, the Chow ring of the grassmannian is \[ A^{\ast }(\mathbb{G}(1,3))=\frac{\mathbb{Z[}\sigma _{1},\sigma _{2}]}{ (2\sigma _{1}\cdot \sigma _{2}-\sigma _{1}^{3},\sigma _{1}^{2}\cdot \sigma _{2}-\sigma _{2}^{2})}. \]
\subsection{Proofs of Theorems \ref{formulegeneralesurface} and \ref{factorisable}} Recall that we have defined $ \mathfrak{N}_{\mathcal{S}}:=\overline{\{\mathcal{N}_{m}(\mathcal{S});m\in \mathcal{S}\}}\subset
\mathbb{G}(1,3)$ and $\mathfrak{n}_{\mathcal{S}}:=[\mathfrak{N}_{\mathcal{S} }]\in A^{2}(\mathbb{G}(1,3))$. \begin{prop}\label{PROP1} Let $\mathcal S\subset \mathbb P^3$ be an irreducible surface of degree $d\ge 2$ of $\mathbb P^3$. \begin{itemize} \item If $\#\mathcal B_{\mathcal S}<\infty$, we have \[ \mathfrak{n}_{\mathcal{S}}=c_\nu(\mathcal S).\sigma_2+d_{\mathcal S}(d_{\mathcal S}-1).\sigma _{1,1}\in A^{2}(\mathbb{G}(1,3)). \] \item If $\dim\mathcal B^{(0)}_{\mathcal S}=2$ with two dimensional part $V(H)$ and $\#\tilde{\mathcal B}_{\mathcal S}<\infty$ (with the notations of Proposition \ref{rqebasepoints}), then we have \[ \mathfrak{n}_{\mathcal{S}}=c_\nu(\mathcal S).\sigma_2+d_{\mathcal S}(d_{\mathcal S}-d_H-1).\sigma _{1,1}\in A^{2}(\mathbb{G}(1,3)). \] \end{itemize} \end{prop} \begin{proof} Since $\mathfrak{n}_{\mathcal{S}}\in A^{2}(\mathbb{G}(1,3)),$ we have $\mathfrak{n}_{\mathcal{S} }=a.\sigma _{2}+b.\sigma _{1,1}$ for some integers $a$ and $b$. Morever by Kleiman's transversality theorem (see for example \cite[Thm 5.20]{Eisenbud-Harris}), since $\Sigma _{1,1}:=\left\{ \mathcal{L}\in \mathbb{G}(1,3);\mathcal{L\subset P }\right\} \in \mathcal{Z}^{2}(\mathbb{G}(1,3))$, we have $\mathfrak{n}_{\mathcal{S} }\cdot \sigma _{1,1}=\left( a\sigma _{2}+b\sigma _{1,1}\right) \cdot \sigma_{1,1}$ and so, using \eqref{EQSchubert}, we obtain \begin{equation} \mathfrak{n}_{\mathcal{S}}\cdot \sigma _{1,1}=b.\sigma _{1,1}^{2}=b.\sigma_{2,2}=b. \end{equation} Analogously, since $\Sigma _{2}:=\left\{ \mathcal{L}\in \mathbb{G}(1,3);p\in \mathcal{L} \right\} \in \mathcal{Z}^{2}(\mathbb{G}(1,3))$, due to \eqref{EQSchubert}, we have \begin{equation} \mathfrak{n}_{\mathcal{S}}\cdot \sigma _{2}=\left( a\sigma _{2}^{{}}+b\sigma _{1,1}\right) \cdot \sigma _{2}=a\sigma _{2}^{2}=a\sigma_{2,2}=a .\end{equation} Now it remains to compute $\mathfrak{n}_{\mathcal{S}}\cdot \sigma _{2}$ and $\mathfrak{n}_{\mathcal{S}}\cdot \sigma _{1,1}$, i.e. to compute the cardinality of the intersection of $\mathfrak{N }_{\mathcal{S}}$ with $\Sigma _{1,1}$ and with $\Sigma _{2}$.\\ Let us start with the computation of $a=\mathfrak{n}_{\mathcal {S}}\cdot \sigma_{2}$. If $\#\mathcal B_{\mathcal S}<\infty$, then, for a generic $P\in\mathbb P^3$, we have $$\mathfrak{N}_{\mathcal{S}}\cap \Sigma _{2}=\left\{ \mathcal{L}\in \mathfrak{N} _{\mathcal{S}};P\in \mathcal{L}\right\} =\left\{\mathcal{N}_{m}\mathcal{S} ;m\in\mathcal S\setminus\mathcal B_{\mathcal S},\ P\in \mathcal{N}_{m}\mathcal{S}\right\}$$ and if $\dim \mathcal B^{(0)}_{\mathcal S}=2$ and $\#\tilde{\mathcal B}^{(0)}_{\mathcal Z}\cap{\mathcal S}<\infty$, then, for a generic $P\in\mathbb P^3$, we have $$\mathfrak{N}_{\mathcal{S}}\cap \Sigma _{2}=\left\{ \mathcal{L}\in \mathfrak{N} _{\mathcal{S}};P\in \mathcal{L}\right\} =\left\{\mathcal{N}_{m}\mathcal{S} ;m\in\mathcal S\setminus\tilde{\mathcal B}^{(0)}_{\mathcal S},\ P\in \mathcal{N}_{m}\mathcal{S}\right\}.$$ So, in any case, $a=c_{\nu }(\mathcal S)$ by definition of the normal class of $\mathcal S$.\\ Now, for $b$, since $\#\mathcal B_{\mathcal S}<\infty$, we note that, for a generic projective plane $\mathcal H\subset\mathbb P^3$, we have $$\mathfrak{N}_{\mathcal{S}}\cap \Sigma _{1,1}=\left\{ \mathcal{L}\in \mathfrak{ N}_{\mathcal{S}};\mathcal{L\subset H}\right\} =\left\{ \mathcal{N}_{m} \mathcal{S};\ m\in\mathcal S\setminus \mathcal B_{\mathcal S},\ \mathcal{N}_{m}\mathcal{S}\subset \mathcal{H}\right\} .$$ We have $\mathcal{H}=V(a_1 X+a_2 Y+a_3 Z+a_4 T)\subset \mathbb{P}^{3} $ for some complex numbers $a_1$, $a_2$, $a_3$ and $a_4$. Let $m[x:y:z:t]\in \mathbb P^3$. For a generic $\mathcal H$, we have \begin{eqnarray*} m\in\mathcal S\setminus\mathcal B_{\mathcal S},\ \mathcal{N}_{m}\mathcal{S}\subset \mathcal{H} &\Leftrightarrow & m\in\mathcal S\setminus\mathcal B_{\mathcal S},\ \ m\in\mathcal H,\ \
n_{\mathcal S}(m)\in\mathcal H\\ &\Leftrightarrow&\left\{ \begin{array}{c} F(x,y,z,t)=0 \\ a_1 F_{x}+a_2 F_{y}+a_3 F_{z}=0\\ a_1 x+a_2 y+a_3 z+a_4 t=0 \end{array} \right. . \end{eqnarray*} Hence $b=d_{\mathcal S}(d_{\mathcal S}-1)$. Assume now that $\dim\mathcal B^{(0)}_{\mathcal S}=2$ with two dimensional part $V(H)$ and $\#\tilde{\mathcal B}^{(0)}_{\mathcal S}\cap{\mathcal S}<\infty$. For a generic projective plane $\mathcal H=V(A^\vee)\subset \mathbb P^3$, we have \begin{eqnarray*} \mathfrak{N}_{\mathcal{S}}\cap \Sigma _{1,1}&=&\left\{ \mathcal{N}_{m} \mathcal{S};\ m\in\mathcal S\setminus \tilde{\mathcal B}^{(0)}_{\mathcal S},\ \mathcal{N}_{m}\mathcal{S}\subset \mathcal{H}\right\}\\ &=&\{\mathcal N_m\mathcal S;\ m\in\mathcal S\setminus\tilde{\mathcal B}^{(0)}_{\mathcal S},\ \ m\in\mathcal H,\ \
n_{\mathcal S}(m)\in\mathcal H\}. \end{eqnarray*} Now there are two cases: \begin{itemize} \item If $H$ divides $F_x$, $F_y$ and $F_z$ and then $n_{\mathcal S}=[\frac{F_x}H:\frac{F_y}H:\frac{F_z}H]$ and $b=d_{\mathcal S}(d_{\mathcal S}-d_H-1)$. \item Otherwise $H=tH_1$, with $H_1$ dividing $F_x$, $F_y$ and $F_z$ and $V(X)\subset V(x{F_y}-y{F_x},x{F_z}-z{F_x},y{F_z}-z{F_y})$. Hence $n_{\mathcal S}=[\frac{F_x}{H_1}:\frac{F_y}{H_1}:\frac{F_z}{H_1}]$. We have
\[ m\in\mathcal S\setminus(\mathcal H^\infty\cup\tilde{\mathcal B}^{(0)}_{\mathcal S}),\ \mathcal{N}_{m}\mathcal{S}\subset \mathcal{H} \quad\Leftrightarrow\quad \left\{ \begin{array}{c} F(x,y,z,t)=0,\ \ t\ne 0 \\ a_1 \frac{F_{x}}{H_1}+a_2\frac{ F_{y}}{H_1}+a_3\frac {F_{z}}{H_1}=0\\ a_1 x+a_2 y+a_3 z+a_4 t=0 \end{array} \right. \] and \[ m\in\mathcal S_\infty\setminus\tilde{\mathcal B}^{(0)}_{\mathcal S},\ \mathcal{N}_{m}\mathcal{S}\subset \mathcal{H} \quad\Leftrightarrow\quad \left\{ \begin{array}{c} F(x,y,z,t)=0\\ t=0\\ a_1 x+a_2 y+a_3 z=0 \end{array} \right. , \] so $$b=d_{\mathcal S}(d_{\mathcal S}-d_H)+d_{\mathcal S}- \sum_{P\in \mathcal S\cap\mathcal H^\infty\cap\mathcal H}i_P\left(\mathcal S,V(a_1 \frac{F_{x}}{H_1}+a_2\frac{ F_{y}}{H_1}+a_3\frac {F_{z}}{H_1}),\mathcal H\right)$$ (due to the Bezout Theorem). Now let $P\in \mathcal S\cap\mathcal H^\infty\cap\mathcal H$, we have $x\ne 0$ or $y\ne 0$ or $z\ne 0$. Assume for example that $x\ne 0$, we have \begin{eqnarray*} &\ &i_P\left(\mathcal S,V(a_1 \frac{F_{x}}{H_1}+a_2\frac{ F_{y}}{H_1}+a_3\frac {F_{z}}{H_1}),\mathcal H\right)=\\ &=&i_P\left(\mathcal S,V(t(-a_4\frac{F_x}{H_1}+a_2\frac{xF_y-yF_x}H+a_3\frac{xF_z-zF_x}H),\mathcal H\right)\\ &=&1 +i_P\left(\mathcal S,V(-a_4\frac{F_x}{H_1}+a_2\frac{xF_y-yF_x}H+a_3\frac{xF_z-zF_x}H),\mathcal H\right)\\ &=&2 \end{eqnarray*} for a generic $\mathcal H$ and so $b=d_{\mathcal S}(d_{\mathcal S}-d_H+1)$. \end{itemize} \end{proof} Theorem \ref{formulegeneralesurface} follows from Theorem \ref{formulegeneralehypersurface} and Proposition \ref{PROP1}. \begin{proof}[Proof of Theorem \ref{factorisable}] If $\dim\mathcal B^{(0)}_{\mathcal S}=2$, we saw in Proposition \ref{rqebasepoints} that we can adapt our study to compute the degree of the reduced normal polar $\tilde {\mathcal P}_{A,\mathcal S}$ associated to the rational map $\tilde\alpha_{\mathcal S}:\mathbb P^3\setminus\tilde{\mathcal B}^{(0)}_{\mathcal S} \rightarrow\mathbb P^3$ such that $\boldsymbol{\alpha}_{\mathcal S}=H\cdot\boldsymbol{\tilde\alpha}_{\mathcal S}$. Using Proposition \ref{rqebasepoints} and following the proof of Theorem \ref{formulegeneralehypersurface}, we obtain Theorem \ref{factorisable}. \end{proof}
\section{Proof of Theorem \ref{thmsurfaces}}\label{sec:proofthm1} We apply Theorem \ref{formulegeneralesurface}. Note that, since $\mathcal S$ is smooth, it has only a finite number of points of tangency with $\mathcal H_\infty$ (due to Zak's theorem on tangencies \cite[corolloray 1.8]{Zak}). Since the surface is smooth, $\mathcal B_{\mathcal S}$ consists of points of tangency of $\mathcal S$ with $\mathcal H_\infty$ and of points of tangency of $\mathcal S_\infty$ with $\mathcal U_\infty$. It remains to compute the intersection multiplicity of $\mathcal S$ with a generic normal polar at these points. Let us recall that if $A\not\in\mathcal H^\infty$, then $$i_P(\mathcal S,\mathcal P_{A,\mathcal S})=\dim_\mathbb C \left(\left(
\mathbb C[x,y,z,t]/I\right)_P \right)$$ where $I$ is the ideal $(F,E_{A,1,2}E_{A,1,3},E_{A,2,3})$ of $\mathbb C[x,y,z,t]$, with the notation $E_{A,i,j}$ introduced in the proof of Proposition \ref{degrepolaire}.
To compute these quantities, it may be useful to make an appropriate change of coordinates with the use of a projective similitude of $\mathbb P^3$. Note that: \begin{itemize} \item[*] The umbilical $\mathcal U_\infty$ is stable under the action of the group of projective similitudes of $\mathbb P^3$. \item[*] For any $P\in\mathcal H_\infty\setminus\mathcal U_\infty$, there exists a projective similitude $\zeta$ of $\mathbb P^3$ mapping $[1:0:0:0]$ to $P$. \footnote{Let $P[x_0:y_0:z_0:0]$ with $x_0^2+y_0^2+z_0^2=1$. Assume for example that $z_0^2\ne 1$ (up to a permutation of the coordinates) and take $\zeta$ given by $\kappa'(b,A)$ (for any $b\in\mathbb C^n)$ with $A=(u\, v\, w)$ where $u=(x_0,y_0,z_0)$ and $v=(x_0^2+y_0^2)^{-\frac 12}(y_0,-x_0,0)$ and $w=(x_0^2+y_0^2)^{-\frac 12}(x_0z_0,y_0z_0,-x_0^2-y_0^2)$.} \item[*] For any $P\in\mathcal U_\infty$, there exists a projective similitude $\zeta$ of $\mathbb P^3$ mapping $[1:i:0:0]$ to $P$. \footnote{Let $P[x_0:y_0:z_0:0]\in\mathcal U_\infty$. Assume for example that $y_0\ne 0$ and $x_0^2+y_0^2\ne 0$ (up to a composition by a permutation matrix). A suitable $\zeta$ is given by $\kappa'(b,A)$ (for any $b\in\mathbb C^n)$ with $A=\left(\begin{array}{ccc}\frac{x_0(y_0^2-1)}{2y_0^2}&-\frac{ix_0(1+y_0^2)}{2y_0^2}&\frac{\sqrt{x_0^2+y_0^2}}{y_0}\\ \frac{1+y_0^2}{2y_0^2}&\frac{i(1-y_0^2)}{y_0}&0\\ \frac{i(y_0^4+y_0^2x_0^2-y_0^2-x_0^2)}{y_0^2\sqrt{x_0^2+y_0^2}}&\frac{(1+y_0^2)\sqrt{x_0^2+y_0^2}}
{2y_0^2}&\frac{ix_0}{y_0}\end{array}\right)$.}
\end{itemize} We recall that a multiple point of order $k$ of a plane curve is ordinary if its tangent cone contains $k$ pairwise distinct lines and that an ordinary cusp of a plane curve is a double point with a single tangent line in the tangent cone, this tangent line being non contained in the cubic cone of the curve at this point. \begin{itemize} \item \underline{Let $P$ be a (non singular) point of tangency of $\mathcal S$ with $\mathcal H_\infty$.}
We prove the following: \begin{itemize} \item[(a)] $i_P(\mathcal S,\mathcal P_{A,\mathcal S})=k^2$ for a generic $A\in\mathbb P^3 $ if $P$ is an ordinary multiple point of order $k+1$ of $\mathcal S_\infty\setminus \mathcal U_\infty$. \item[(b)] $i_P(\mathcal S,\mathcal P_{A,\mathcal S})=k(k+1)$ for a generic $A\in\mathbb P^3$ if $P$ is an ordinary multiple point of order $k+1$ of $\mathcal S_\infty$, which belongs to $\mathcal U_\infty$ and at which the tangent line to $\mathcal U_\infty$ is not contain in the tangent cone of $\mathcal S_\infty$. \item[(c)] $i_P(\mathcal S,\mathcal P_{A,\mathcal S})=3$ for a generic $A\in\mathbb P^3$ if $P$ is an ordinary cusp of $\mathcal S_\infty$, which belongs to $\mathcal U_\infty$ and at which the tangent line to $\mathcal U_\infty$ is not contain in the tangent cone of $\mathcal S_\infty$. \item[(d)] $i_P(\mathcal S,\mathcal P_{A,\mathcal S})=2$ for a generic $A\in\mathbb P^3$ if $P$ is an ordinary cusp of $\mathcal S_\infty\setminus \mathcal U_\infty$. \end{itemize} Due to Lemma \ref{preservpolar}, we assume that $P[1:\theta:0:0]$ with $\theta=0$ (if $P\in\mathcal H_\infty\setminus\mathcal U_\infty$) or $\theta=i$ (if $P\in\mathcal U_\infty$). Since $\mathcal T_P\mathcal S=\mathcal H_\infty$, we suppose that $F_x(P)=F_y(P)=F_z(P)=0$ and $F_t(P)=1$ (without any loss of generality). Recall that the Hessian determinant $H_F$ of $F$ satisfies\footnote{see for example \cite{fredsoaz3}.}
$$H_F=\frac{(d_{\mathcal S}-1)^2}{x^2}\left|\begin{array}{cccc}
0&F_{y}&F_{z}&F_{t}\\
F_{y}&F_{yy}&F_{yz}&F_{yt}\\
F_{z}&F_{yz}&F_{zz}&F_{zt}\\
F_{t}&F_{yt}&F_{zt}&F_{tt} \end{array}
\right|.$$ Hence $H_F(P)\ne 0\ \Leftrightarrow\ [F_{yy}F_{zz}-F_{yz}^2](P)\ne 0$. For a generic $A\in\mathbb P^3$, we have $$\left(\frac{\mathbb C[x,y,z,t]}{I}\right)_P \cong
\left(\frac{\mathbb C[x,y,z,t]}{(F,A_2,A_3)}\right)_P.$$ Recall that $A_2=atF_z-ctF_x+d(zF_x-xF_z)$ and $A_3=-atF_y+btF_x+d(xF_y-yF_x)$ (with $A[a:b:c:d]$). Using the Euler identity $xF_x+yF_y+zF_z+tF_t=d_{\mathcal S}F$, we obtain that $$\left(\frac{\mathbb C[x,y,z,t]}{I}\right)_P \cong
\left(\frac{\mathbb C[x,y,z,t]}{(F,A'_2,A'_3)}\right)_P \cong \left(\frac{\mathbb C[y,z,t]}{(F_*,A'_{2*},A'_{3*})}
\right)_{(0,0,0)}$$ with $$A'_2:=atxF_z+ct(yF_y+zF_z+tF_t)-d(z(yF_y+zF_z+tF_t)+x^2F_z), $$ $$A'_3:=-atxF_y-bt(yF_y+zF_z+tF_t)+d(x^2F_y+y(yF_y+zF_z+tF_t))$$ and with $G_{*}(y,z,t):=G(1,\theta+y,z,t)$ for any homogeneous $G$. In a neighbourhood of $(0,0,0)$, $V(F_*)$ is given by $t=\varphi(y,z)$ with $\varphi(y,z)\in\mathbb C[[y,z]]$ and \begin{equation}\label{deriveephi} \varphi_y(y,z)=-\frac{F_y(1,\theta+y,z,\varphi(y,z))}{F_t(1,\theta+y,z,\varphi(y,z))}\quad\mbox{and}\quad\varphi_z(y,z)=-\frac{F_z(1,\theta+y,z,\varphi(y,z))}{F_t(1,\theta+y,z,\varphi(y,z))}. \end{equation} So $$\left(\frac{\mathbb C[x,y,z,t]}{I}\right)_P
\cong \frac{\mathbb C[[y,z]]}{(A'_{2**},A'_{3**})},$$ with $G_{**}(y,z):=G(1,\theta+y,z,\varphi(y,z))$. Now due to \eqref{deriveephi}, we have $$H:=A'_{2**}=(F_t)_{**} [-a\varphi\varphi_z+c\varphi(\varphi-(\theta+y)\varphi_y-z\varphi_z)+d(z((\theta+y)\varphi_y+z\varphi_z-\varphi)+\varphi_z)]$$ and $$K:=A'_{3**}=(F_t)_{**} [a\varphi\varphi_y-b\varphi(\varphi-(\theta+y)\varphi_y-z\varphi_z)-d(\varphi_y+(\theta+y)((\theta+y)\varphi_y+z\varphi_z-\varphi))].$$ Hence $$i_P(\mathcal S,\mathcal P_{A,\mathcal S})=i_{(0,0)}(\Gamma_H,\Gamma_K),$$ where $\Gamma_H$ and $\Gamma_K$ are the analytic plane curves of respective equations $H$ and $K$ of $\mathbb C[[y,z]]$.
Note that $(H_y(0,0),H_z(0:0))=d(\varphi_{yz}(0,0),\varphi_{zz}(0,0))$. Analogously, we obtain $(K_y(0,0),K_z(0,0))=-d(1+\theta^2)(\varphi_{y,y}(0,0),\varphi_{yz}(0,0))$. \begin{itemize} \item[(a)] If $P\not\in\mathcal U_\infty$ and if $P$ is an ordinary multiple point of order $k+1$ of $\mathcal S_\infty$, with our change of coordinates we have $P[1:0:0:0]$ (i.e. $\theta=0$) and $V((\varphi_{k+1})_y)$ and $V((\varphi_{k+1})_z)$ have no common lines.\footnote{Recall that the tangent cone $V(\varphi_{k+1})$ of $\Gamma_{\varphi}$ at $(0,0)$ (corresponding to the tangent cone of $V(\mathcal S_\infty)$ at $P$) has pairwise distinct tangent lines if and only if $V((\varphi_{k+1})_y)$ and $V((\varphi_{k+1})_z)$ have no common lines. } Then the first homogeneous parts of $H$ and $K$ have order $k$ and are $H_k=d(\varphi_{k+1})_z$ and $K_k=-d(\varphi_{k+1})_y$ respectively. Since $\Gamma_{H_k}$ and $\Gamma_{K_k}$ have no common lines, we conclude that $i_P(\mathcal S,\mathcal P_{A,\mathcal S})=k^2$. \item[(b)] Assume now that $P\in\mathcal U_\infty$ is an ordinary multiple point of $\mathcal S_\infty$, which belongs to $\mathcal U_\infty$ and at which the tangent line to $\mathcal U_\infty$ is not contain in the tangent cone of $\mathcal S_\infty$. With our changes of coordinates, this means that $P[1:i:0:0]$ (i.e. $\theta=i$) that $y$ does divide $\varphi_{k+1}$ (since $V(y)$ is the tangent line to $\mathcal U_\infty$ at $P$) and that $V((\varphi_{k+1})_y)$ and $V((\varphi_{k+1})_z)$ have no common lines.
Note that the first homogeneous parts of $H$ and $K$ have respective orders $k$ and $k+1$ and are respectively $H_k=d(\varphi_{k+1})_z$ and \begin{eqnarray*} K_{k+1}&=&-di[2y(\varphi_{k+1})_y+z(\varphi_{k+1})_z-\varphi_{k+1}]\\ &=&-di\left[\left(2-\frac 1{k+1}\right) y(\varphi_{k+1})_y+\left(1-\frac 1{k+1}\right)z(\varphi_{k+1})_z\right], \end{eqnarray*} due to the Euler identity applied to $\varphi_{k+1}$. Hence $i_P(\mathcal S,\mathcal P_{A,\mathcal S})=k(k+1)$ if $V((\varphi_{k+1})_z)$ and $V(y(\varphi_{k+1})_y)$ have no common lines, which is true since $V((\varphi_{k+1})_z)$ and $V((\varphi_{k+1})_y)$ have no common lines and since $y$ does not divide $(\varphi_{k+1})_z$. \item[(c)] Assume that $P\in\mathcal U_\infty$ is an ordinary cusp (of order 2) of $\mathcal S_\infty$, which belongs to $\mathcal U_\infty$ and at which the tangent line to $\mathcal U_\infty$ is not contain in the tangent cone of $\mathcal S_\infty$. With our changes of coordinates, this means that $P[1:i:0:0]$ (i.e. $\theta=i$), that $H_F(P)=0$ and $F_{zz}(P)\ne 0$ (since $V(y)$ is the tangent line to $\mathcal U_\infty$ at $P$).
Note that $H=-d(F_{yz}(P)y+F_{zz}(P)z)+...$. In a neighbourhood of $(0,0)$, $H(y,z)=0\ \Leftrightarrow\ z=h(y)$ with
$h(y)=-\frac{F_{yz}(P)}{F_{zz}(P)}y-\frac{ F_{zzz}(P)F^2_{yz}(P)
-2F_{yzz}(P)F_{yz}(P)F_{zz}(P)+F_{yyz}(P)F^2_{zz}(P)}{F_{zz}^3(P)
}y^2+...$
and we obtain that $\val_yK(y,h(y))=3$ if $P\not\in V(F_{yyy}F_{zz}^3-3F_{yyz}F_{zz}^2F_{yz} +3F_{yzz}F_{zz}F_{yz}^2-F_{zzz}^3F_{yz}^3)$ which means that the line $V(F_{yz}(P)y+F_{zz}(P)z)$ (corresponding to the tangent cone of $V(F(1,y,z,0))$) is not contained in the cubic cone of $V(F(1,y,z,0))$. Hence $i_P(\mathcal S,\mathcal P_{A,\mathcal S})=3$ if the node $P$ of $\mathcal S_\infty$ is ordinary. \item[(d)] Assume now that $P$ is an ordinary cusp (of order 2) of $\mathcal S_\infty\setminus \mathcal U_\infty$.
With our change of coordinates, this means that $P[1:0:0:0]$ (i.e. $\theta=0$), that $H_F(P)=0$ and $P\not\in V(F_{yy},F_{yz},F_{zz})$. This implies that $F_{yy}(P)\ne 0$ or $F_{zz}(P)\ne 0$.
If $F_{yy}(P)\ne 0$, the tangent line to $\mathcal S_\infty$ at $P$ is given by $V(t,F_{yy}(P)y+F_{yz}(P)z)$.
If $F_{zz}(P)\ne 0$, the tangent line to $\mathcal S_\infty$ at $P$ is given by $V(t,F_{zz}(P)z+F_{yz}(P)y)$.
The fact that the cusp is ordinary implies also that the tangent line is not contained in the cubic cone of $V(F(1,y,z,0))$, i.e. this tangent line is not contained $V(F_{yyy}(P)y^3+3F_{yyz}(P)y^2z +3F_{yzz}(P)yz^2+F_{zzz}(P)z^3](P)$.
Hence, we have either $$F_{yy}[F_{yyy}F_{yz}^3-3F_{yyz} F_{yz}^2F_{yy}+3F_{yzz}F_{yz}F_{yy}^2-F_{zzz}F_{yy}^3](P)\ne 0$$ or $$F_{zz}[F_{zzz}F_{yz}^3-3F_{yzz} F_{yz}^2F_{zz}+3F_{yyz}F_{yz}F_{zz}^2-F_{yyy}F_{zz}^3](P)\ne 0.$$ Note that, if $F_{yy}(P)$ and $F_{zz}(P)$ are both non null, these two conditions are equivalent.
Assume for example that the first condition holds. In a neighbourhood of $(0,0)$, $H(y,z)=0\,\Leftrightarrow\, y=h(z)$ and $K(y,z)=0\, \Leftrightarrow\, y=k(z)$, with $$h'(z)=-\frac{H_z(h(z),z)}{H_y(h(z),z)}\quad\mbox{and}\quad
k'(z)=-\frac{K_z(h(z),z)}{K_y(h(z),z)}.$$ Hence we have $$\left(\frac{\mathbb C[x,y,z,t]}{I}\right)_P
\cong \frac{\mathbb C[[y,z]]}{(y-h(z),y-k(z))}
\cong \frac{\mathbb C[[z]]}{((h-k)(z))} .$$ We have $h'(0)=k'(0)=-\varphi_{yz}(0,0)/\varphi_{yy}(0,0)$ and
$(h''-k'')(0)=\left[\varphi_{yy}^3\varphi_{zy} \left(\varphi_{yyy}\varphi_{yz}^3-3\varphi_{yyz}\varphi_{yz}^2
\varphi_{yy}+3\varphi_{yzz}\varphi_{yy}^2
\varphi_{yz}-\varphi_{zzz}\varphi_{yy}^3\right)\right](0,0)$ \end{itemize} Hence $i_P(\mathcal S,\mathcal P_{A,\mathcal S})=\dim_{\mathbb C}\frac{\mathbb C[[z]]}{((h-k)(z))}=2$.
\item \underline{Let $P$ be a simple (non singular) point of tangency of $\mathcal S_\infty$ with $\mathcal U_\infty$.}
Let us prove that $i_P(\mathcal S,\mathcal P_{A,\mathcal S})=1$. Due to Lemma \ref{preservpolar}, we can assume that $P=[1:i:0:0]$ (i.e. $\theta=i$) and that $F_t(P)= 1$. As previously, we note that $$\left(\frac{\mathbb C[x,y,z,t]}{I}\right)_P
\cong \frac{\mathbb C[[y,z]]}{(A_{1**},A'_{3**})},$$ with $$A_{1**}(y,z)=(F_t)_{**}[\varphi(b\varphi_z-c\varphi_y)-d((y+i)\varphi_z-z\varphi_y)]$$ and $$A_{3**}=(F_t)_{**} [a\varphi\varphi_y-b\varphi(\varphi-(\theta+y)\varphi_y-z\varphi_z)-d(\varphi_y+(i+y)((i+y)\varphi_y+z\varphi_z-\varphi))].$$
The fact that $P$ is a simple contact point of $\mathcal S_\infty$ with $\mathcal U_\infty$ implies that $[F_x(P):F_y(P):F_z(P)]=[1:i:0]$ and that $\varphi_{zz}(0,0)\ne 1$. Indeed $V(t,F(1,i+y,z,t))$ is given by $t=0,\, y=g(z)$ with $g(0)=0$ and $g'(z)=-\varphi_z(g(z),z)/\varphi_y(g(z),z)$ (in particular $g'(0)=0$), so $$\frac{\mathbb C[[y,z]]}{(y-g(z),1+(i+y)^2+z^2)}\cong
\frac{\mathbb C[[z]]}{(1+(i+g(z))^2+z^2)} $$ and finally $$ i_P(\mathcal S_\infty,\mathcal U_\infty)=
\val_z(1+(i+g(z))^2+z^2)=1+\val_z((i+g(z))g'(z)+z) $$ which is equal to 2 if and only if $\varphi_{zz}(0,0)\ne 1$.
In a neighbourhood of $(i,0)$, $A_{1**}$ can be rewritten $\varphi-\kappa$ with $\kappa=d((y+i)\varphi_z-z\varphi_y)/(b\varphi_z-c\varphi_y)$. Since $ \varphi_{zz}(0)\ne 1$, $\kappa_z(0,0)\ne 0$ and, in a neighbourhood of $0$, $\varphi-\kappa=0$ corresponds to $y=h(z)$ with $h'(0)\ne 0$ (recall that $\varphi_y(0)=i\ne 0$ and that $A$ is generic) which gives $$\left(\frac{\mathbb C[x,y,z,t]}{I}\right)_P
\cong
\frac{\mathbb C[[y,z]]}{(y-h(z),A'_{3**})}$$ and finally $i_P(\mathcal S ,\mathcal P_{A,\mathcal S})=\val_z A'_{3**}(h(z),z)=1$. \end{itemize}
\section{Examples in $\mathbb P^3$} \subsection{Normal class of quadrics}\label{secquadric} The aim of the present section is the study of the normal class of every irreducible quadric. Let $\mathcal S=V(F)\subset\mathbb P^3$ be an irreducible quadric. We recall that, up to a composition by $\varphi\in \widehat{Sim_{\mathbb C}(3)}$, one can suppose that $F$ has the one of the following forms: \begin{eqnarray*} &(a)& F(x,y,z,t)=x^2+\alpha y^2+\beta z^2 +t^2\\ &(b)& F(x,y,z,t)=x^2+\alpha y^2+\beta z^2\\ &(c)& F(x,y,z,t)=x^2+\alpha y^2-2tz\\ &(d)& F(x,y,z,t)=x^2+\alpha y^2+t^2, \end{eqnarray*} with $\alpha,\beta$ two non zero complex numbers. Spheres, ellipsoids and hyperboloids are particular cases of (a), paraboloids (including the saddle surface) are particular cases of (c), (b) correspond to cones and (d) to cylinders.
We will see, in Appendix \ref{cylindreetrevolution}, that in the case (d) (cylinders) and in the cases (a) and (b) with $\alpha=\beta=1$, the normal class of the quadric is naturally related to the normal class of a conic. \begin{prop}\label{quadric} The normal class of a sphere is 2.
The normal class of a quadric $V(F)$ with $F$ given by (a) is 6 if $1,\alpha,\beta$ are pairwise distinct.
The normal class of a quadric $V(F)$ with $F$ given by (a) is 4 if $\alpha=1\ne\beta$.
The normal class of a quadric $V(F)$ with $F$ given by (b) is 4 if $1,\alpha,\beta$ are pairwise distinct.
The normal class of a quadric $V(F)$ with $F$ given by (b) is 2 if $\alpha=1\ne\beta$.
The normal class of a quadric $V(F)$ with $F$ given by (b) is 0 if $\alpha=\beta=1$.
The normal class of a quadric $V(F)$ with $F$ given by (c) is 5 if $\alpha\ne 1$ and 3 if $\alpha=1$.
The normal class of a quadric $V(F)$ with $F$ given by (d) is 4 if $\alpha\ne 1$ and 2 if $\alpha=1$. \end{prop} \begin{coro} The normal class of the saddle surface $\mathcal S_1=V(xy-zt)$ is 5.
The normal class of the ellipsoid $\mathcal E_1=V(x^2+2y^2+4z^2-t^2)$ with three different length of axis is 6.
The normal class of the ellipsoid $\mathcal E_2=V(x^2+4y^2+4z^2-t^2)$ with two different length of axis is 4. \end{coro} \begin{proof}[Proof of Proposition \ref{quadric}] Let $\mathcal S=V(F)$ be a quadric with $F$ of the form (a), (b), (c) or (d). \begin{itemize} \item The easiest cases is (a) with $1,\alpha,\beta$ pairwise distinct since $\mathcal B_{\mathcal S}$ is empty. In this case, since the generic degree of the normal polar curves is 3 and since $\mathcal E_1$ has degree 2, we simply have $c_{\nu}(\mathcal E_1)=2\cdot 3=6$ (due to Theorem \ref{formulegeneralesurface}). \item The case of a sphere $\mathcal S$ is analogous. In this case, $\tilde {\mathcal B}^{(0)}_{\mathcal S}\cap\mathcal S=\emptyset$ and $\deg\tilde{\mathcal P}_{A,\mathcal S}=1$ for a generic $A\in\mathbb P^3$ (see Example \ref{exemple1}). Hence, we have $c_{\nu}(\mathcal E_1)=2\cdot 1=2$ (due to Theorem \ref{factorisable}).
\item In case (a) with $\alpha=1\ne\beta$, the set $\mathcal B_{\mathcal S}$ contains two points $[1:\pm i:0:0]$. We find the parametrization $\psi(y)=[1:\pm i+y:0:0]$ of $\mathcal P_{A,\mathcal S}$ at the neighbourhood of $P[1:\pm i:0:0]$, which gives $i_P(\mathcal S,\mathcal P_{A,\mathcal S})=\val_z(1+(\pm i+y)^2)=1$ and so $c_{\nu}(\mathcal S)=2\cdot 3-1-1=4$. \item In case (b) with $\alpha$, $\beta$ and $1$ are pairwise distinct, the set $\mathcal B_{\mathcal S}$ contains a single point $P[0:0:0:1]$ and a parametrization of ${\mathcal P}_{A,\mathcal S}$ in a neighbourhood of $P$ is \begin{equation}\label{parametrisation1} \psi(x)=\left[x:-\frac{bx}{d(\alpha-1)x-a}:\frac{cx}{a+d(1-\beta)x}:1\right]. \end{equation} Hence $i_P(\mathcal S,\mathcal P_{A,\mathcal S})=\val_x(F(\psi(x)))=2$ and so $c_\nu(\mathcal S)=2\cdot 3-2=4$. \item In case (b) with $\alpha=1\ne\beta$, we have $\mathcal B_{\mathcal S}=\{P,P'_+,P'_-\}$ with $P[0:0:0:1]$ and $P'_{\pm}[1:\pm i:0:0]$. A parametrization of ${\mathcal P}_{A,\mathcal S}$ in a neighbourhood of $P$ is given by \eqref{parametrisation1} with $\alpha=1$ and so $i_P(\mathcal S,\mathcal P_{A,\mathcal S})=2$. A parametrization of ${\mathcal P}_{A,\mathcal S}$ at a neighbourhood of $P'_{\pm}$ is $\psi(z)=[1:\pm i+y:0:0]$ and so $i_{P'_{\pm}}(\mathcal S,\mathcal P_{A,\mathcal S})=1$. Hence
$c_\nu(\mathcal S)=2\cdot 3-2-1-1=2$. \item In case (b) with $\alpha=\beta=1$, for a generic $A\in\mathbb P^3$, we have $\deg\tilde{\mathcal P}_{A,\mathcal S}=1$ (see Example \ref{exemple1}) but here $\tilde{\mathcal B}^{(0)}_{\mathcal S}\cap\mathcal S=\{[0:0:0:1]\}$. We find the parametrization $\psi(x)=[x:(bx/a):(cx/a):1]$ of $\tilde{\mathcal P}_{A,\mathcal S}$ at the neighbourhood of $P[0:0:0:1]$. Hence $i_P(\mathcal S,\tilde{\mathcal P}_{A,\mathcal S})=2$ and so $c_{\nu}(\mathcal S)=2\cdot 1-2=0$. \item In case (c) with $\alpha\ne 1$, the only point of $\mathcal B_{\mathcal S}$ is $P_1[0:0:1:0]$ and a parametrization of ${\mathcal P}_{A,\mathcal S}$ at the neighbourhood of this point is \begin{equation}\label{parametrisation2} \psi(t)=\left[\frac{at^2}{d+(d-c)t}:\frac{bt^2}{t(d-c\alpha)+\alpha d}:1:t\right], \end{equation} which gives $i_{P_1}(\mathcal S,\mathcal P_{A,\mathcal S})=1$. Hence $c_{\nu}(\mathcal S)=2\times 3-1=5$. \item In case (c) with $\alpha=1$, $\mathcal B_{\mathcal S}$ is made of three points: $P_1[0:0:1:0]$, $P_{2,\pm}[1:\pm i:0:0]$. As in the previous case, a parametrization of ${\mathcal P}_{A,\mathcal S}$ at the neighbourhood of $P_1$ is given by \eqref{parametrisation2} with $\alpha=1$ and so $i_{P_1}(\mathcal S,\mathcal P_{A,\mathcal S})=1$. Now, a parametrization of ${\mathcal P}_{A,\mathcal S}$ at the neighbourhood of $P_{2,\pm}$ is $\psi(t)=[1:\pm i+y:0:0]$ and so $i_{P_{2,\pm}}=(\mathcal S,\mathcal P_{A,\mathcal S})=\val_y (1+(y\pm i)^2)=1$. \item For the case (d), due to Proposition \ref{propcylinder}, $c_\nu(\mathcal S)=c_\nu(\mathcal C)$ with $\mathcal C=V(x^2+\alpha y^2+z^2)\subset\mathbb P^2$ which is a circle if $\alpha=1$ and an ellipse otherwise. Hence, due to Theorem \ref{thmcurves}, $c_\nu(\mathcal C)=2+2-0-1-1=2$ if $\alpha=1$ and $c_\nu(\mathcal C)=2+2=4$ otherwise. \end{itemize} \end{proof}
\subsection{Normal class of a cubic surface with singularity $E_6$}\label{cubic} Consider $S=V(F)\subset\mathbb{P}^{3}$ with $F(x,y,z,t):=x^{2}z+z^{2}t+y^{3}$. $\mathcal{S}$ is a singular cubic surface with $E_{6}-$singularity at $p[0:0:0:1]$. Let a generic $A[a:b:c:d]\in\mathbb P^3$. The ideal of the normal polar $\mathcal{P}_{\mathcal{S},A}$ is given by $I(\mathcal{P}_{\mathcal{S},A})=\langle H_1,H_2,H_3\rangle\subset\mathbb{C}[x,y,z,t]$ with $H_1:=(y(x^{2}+2zt)-3y^{2}z)d-b(x^{2}+2zt)t+3y^{2}ct$, $H_2:=(x(x^{2}+2zt)-2xz^{2})d-a(x^{2}+2zt)t+2xztc$ and $H_3:=(-2xzy+3xy^{2} )d-3ay^{2}t+2xztb$. $\mathcal B_{\mathcal S}$ is made of two points: $p$ and $q[0:0:1:0]$. Actually $q$ is the point of tangency of $\mathcal{S}$ with $\mathcal{H}_{\infty}$. This point is an ordinary cusp of $\mathcal{S}_{\infty}$. \begin{enumerate} \item Study at $p$.
Near p the ideal of the normal polar, in the chart $t=1$, $H_3=0$ gives $z=g(x,y):=\frac{3y^{2}(-xd+a)}{2x(-yd+b)}$. Now $V(A_1(x,y,g(x,y),1))$ corresponds to a quintic with a cusp at the origine (and with tangent cone $V(y^{2}))$. Its single branch has Puiseux expansion $y^2=-\frac b{3a}x^3+o(x^3)$, with probranches $y=\varphi_{\varepsilon}(x)$ with $\varphi_\varepsilon(x)=i\varepsilon\sqrt{\frac b{3a}}x^{\frac 32}+o(x^{\frac 32})$ for $\varepsilon\in\{\pm 1\}$. Hence, $g(x,\varphi_\varepsilon(x))=-\frac {x^2}2+o(x^2)$. Hence parametrizations of the probranches of $\mathcal P_{A,\mathcal S}$ at a neighbourhood of $p$ are $$\Gamma_{\varepsilon}(x)=[x:\varphi_\varepsilon(x):g(x,\varphi_\varepsilon(x)):1]$$ and $F(\Gamma_{\varepsilon}(x))=-\frac {x^4}4+o(x^{4})$. Therefore $i_{p}(\mathcal{P}_{\mathcal{S},A},\mathcal S)=8$. \item Study at $q.$
Assume that $b=1$. Near $q[0:0:1:0]$, in the chart $z=1$, $H_3=0$ gives $t=h(x,y):=\frac{d(-2+3y)xy}{3ay^{2}-2x}$ and $V(H_2(x,y,1,h(x,y)))$ is a quartic with a (tacnode) double point in $(0,0)$ with vertical tangent and which has Puiseux expansion $$x=\theta_{\varepsilon}(y)=\omega_{\varepsilon}a\, y^{2}+o(y^{2}),$$ with $\omega_{\varepsilon}=\frac{3-d}{2}+\frac{\varepsilon}{2}\sqrt{d(d-6)}$ for $\varepsilon\in\{\pm 1\}$ and
$h(\theta_\varepsilon(y),y)=-\frac{2d\omega_\varepsilon}
{3-2\omega_\varepsilon}y+o(y)$. Hence parametrizations of the probranches of $\mathcal{P}_{\mathcal{S},A}$ in a neighbourhood of $q$ are given by $$\Gamma_{\varepsilon}(y):=[\theta_{\varepsilon}(y):y:1:h(\theta_\varepsilon(y),y)]$$ for $\varepsilon\in\{\pm 1\}$ and $F(\Gamma_{\varepsilon}(y))=-\frac{2\omega_\varepsilon d}{3-2\omega_\varepsilon}y+o(y)$. Hence $i_{q}(\mathcal{P}_{\mathcal{S},A},\mathcal S)=2$
We can also apply directly Item (c) of Section \ref{sec:proofthm1} to prove that $i_{q}(\mathcal{P}_{\mathcal{S},A},\mathcal S)=2$. \end{enumerate} Therefore, due to Theorem \ref{formulegeneralesurface}, the normal class of $\mathcal S=V(x^{2}z+z^{2}t+y^{3})\subset \mathbb P^3(\mathbb C)$ is $$c_{\nu}(\mathcal{S})=3\cdot(3^{2}-3+1)-8-2=11.$$
\section{Normal class of plane curves~: Proof of Theorem \ref{thmcurves}}\label{proofcurve} Let $\mathbf V$ be a three dimensional complex vector space and set $\mathbb P^2:=\mathbb P(\mathbf V)$ with projective coordinates $x,y,z$. We denote by $\ell_\infty=V(z)$ the line at infinity.
Let $\mathcal C=V(F)\subset \mathbb P^2$ be an irreducible curve of degree $d\ge 2$. For any nonsingular $m[x:y:z]\in\mathcal C$ (with coordinates $\mathbf m=(x,y,z)\in\mathbb C^3$), we write $\mathcal T_m\mathcal C$ for the tangent line to $\mathcal C$ at $m$. If $\mathcal T_m\mathcal C\ne\ell_\infty$, then $n_{\mathcal C}(m)=[F_x:F_y:0]$ is well defined in $\mathbb P^2$ and {\bf the projective normal line} $\mathcal N_m\mathcal C$ to $\mathcal C$ at $m$ is the line $(m\, n_{\mathcal C}(m))$ if $n_{\mathcal C}(m)\ne m$. An equation of this normal line is then given by $\langle \mathbf {N}_\mathcal C({m}),\cdot\rangle$ where $N_\mathcal C:\mathbb P^2\dashrightarrow\mathbb P^2$ is the rational map defined by \begin{equation}\label{NC} \mathbf{N}_\mathcal C(\mathbf{m}):=\mathbf{m}\wedge\left(\begin{array}{c}F_x\\mathcal{F}_y\\0\end{array}\right)
=\left(\begin{array}{c}-zF_y(m)\\ zF_x(m)\\ xF_y(m)-yF_x(m)\end{array}\right). \end{equation} \begin{lem}
The base points of $(N_\mathcal C)_{|\mathcal C}$ are the singular points of $\mathcal C$, the points of tangency with the line at infinity and the points of $\{I,J\}\cap\mathcal C$. \end{lem} \begin{proof} A point $m\in\mathcal C$ is a base point of $N_\mathcal C$ if and only if $F_x=F_y=0$ or $z=xF_y-yF_x=0$. Hence, singular points of $\mathcal C$ are base points of $N_{\mathcal C}$.
Let $m=[x:y:z]$ be a nonsingular point of $\mathcal C$. First $F_x=F_y=0$ is equivalent to $\mathcal T_m\mathcal C=\ell_\infty$. Assume now that $z=xF_y-yF_x=0$ and $(F_x,F_y)\ne(0,0)$. Then $m=[x:y:0]=[F_x:F_y:0]$ and, due to the Euler formula, we have $0=-zF_z=xF_x+yF_y$ and so $x^2+y^2=0$, which implies $m=I$ or $m=J$.
Finally note that if $m\in\{I,J\}\cap\mathcal C$, then $m=[-y:x:0]$ and, due to the Euler formula, $0=-zF_z=xF_x+yF_y=xF_y-yF_x$. \end{proof} Since the degree of each non zero coordinate of $\mathbf \mathbf N_\mathcal C$ is $d$, we have \begin{equation}\label{lemmefondamental} c_\nu(\mathcal C)
=d^2-\sum_{P\in \mathcal \base\left({(N_\mathcal C)}_{\vert\mathcal C}\right)} i_P(\mathcal C,V(\langle L,\mathbf N_{\mathcal C}(\cdot)\rangle)), \end{equation} for a generic $L\in\mathbb P^2$, where we write $\base\left({(N_\mathcal C)}_{\vert\mathcal C}\right)$ for the set of base points of ${(N_\mathcal C)}_{\vert\mathcal C}$. The set $V(\langle L,\mathbf N_{\mathcal C}(\cdot)\rangle)\subset \mathbb P^2$ is called {\bf the normal polar} of $\mathcal C$ with respect to $L$. It satisfies $$m \in V(\langle L, \mathbf N_{\mathcal C}(\cdot)\rangle)\quad \Leftrightarrow\quad \mathbf N_{\mathcal C}(\mathbf m)=0\ \mbox{or}\ L\in \mathcal N_m(\mathcal C).$$ Now, to compute the generic intersection numbers, we use the notion of probranches \cite{Halphen,Wall,Wall2}. See section 4 of \cite{fredsoaz1} for details. Let $P\in\mathcal C$ be an indeterminancy point of $N_{\mathcal C}$ and let us write $\mu_P$ for the multiplicity of $\mathcal C$ at $P$. Recall that $\mu_P=1$ means that $P$ is a nonsingular point of $\mathcal C$. Let $M\in GL(\mathbf V)$ be such that $M(\mathbf O)=\mathbf P$ with $\mathbf O=(0,0,1)$ (we set also $O=[0:0:1]$) and such that $V(x)$ is not contained in the tangent cone of $V(F\circ M)$ at $O$. Recall that the equation of this tangent cone is the homogeneous part of lowest degree in $(x,y)$ of $F(x,y,1)\in\mathbb C[x,y]$ and that this lowest degree is $\mu_P$. Using the combination of the Weierstrass preparation theorem and of the Puiseux expansions, $$F\circ M(x,y,1)=U(x,y)\prod_{j=1}^{\mu_P} (y-g_j(x)),$$ for some $U(x,y)$ in the ring of convergent series in $x,y$ with $U(0,0)\ne 0$ and where $g_j(x)=\sum_{m\ge 1}a_{j,m}x^{\frac m{q_j}}$ for some integer $q_j\ne 0$. The $y=g_j(x)$ correspond to the equations of the probranches of $\mathcal C$ at $P$. Since $V(x)$ is not contained in the tangent cone of $V(F\circ M)$ at $O$, the valuation in $x$ of $g_j$ is strictly larger than or equal to 1 and so the probranch $y=g_j(x)$ is tangent to $V(y-xg_j'(0))$. We write $\mathcal T_P^{(i)}:=M(V(y-xg_j'(0)))$ the associated (eventually singular) tangent line to $\mathcal C$ at $P$ ($\mathcal T_P^{(i)}$ is the tangent to the branch of $\mathcal C$ at $P$ corresponding to this probranch) and we denote by $i_P^{(j)}$ the tangential intersection number of this probranch: $$i_P^{(j)}=\val_x (g_j(x)-xg_j'(0))=\val_x (g_j(x)-xg_j'(x)).$$ We recall that for any homogeneous polynomial $H\in\mathbb C[x,y,z]$, we have \begin{eqnarray*} i_P(\mathcal C,V(H))&=& i_{O}(V(F\circ M),V(H\circ M))\\ &=&\sum_{j=1}^{\mu_P} \val_x(H(M(G_j(x)))), \end{eqnarray*} where $G_j(x):=(x,g_j(x),1)$. With these notations and results, we have $$\Omega(\mathcal C,\ell_\infty)=\sum_{P\in\mathcal C\cap\ell_\infty} (i_P(\mathcal C,\ell_\infty)-\mu_P(\mathcal C)) = \sum_{P\in\mathcal C\cap\ell_\infty} \sum_{j:\mathcal T_P^{(j)}=\ell_\infty} (i_P^{(j)}-1).$$ For a generic $L\in\mathbf V^\vee$, we also have \begin{eqnarray*} i_P(\mathcal C,V(L\circ N_{\mathcal C})) &=&\sum_{j=1}^{\mu_P} \val_x(L(N_{\mathcal C}(M(G_j(x)))))\\ &=&\sum_{j=1}^{\mu_P} \min_k \val_x([N_{\mathcal C}\circ M]_k(G_j(x))), \end{eqnarray*} where $[\cdot ]_k$ denotes the $k$-th coordinate. Moreover, due to \eqref{NC}, as seen in Proposition 16 of \cite{fredsoaz2}, we have
$$\mathbf{N}_{\mathcal C}\circ M(\mathbf m)= Com(M)\cdot (\mathbf{m}\wedge\left[\Delta_{\mathbf{A}}
G(\mathbf m)
\cdot \mathbf{A}+\Delta_{\mathbf{B}} G(\mathbf m)\cdot \mathbf{B}\right]),$$ where $G:=F\circ M$, $\mathbf{A}:=M^{-1}(1,0,0)$, $\mathbf{B}:=M^{-1}(0,1,0)$ and $\Delta_{(x_1,y_1,z_1)}H=x_1H_x+y_1H_y+z_1H_z$. As seen in Lemma 33 of \cite{fredsoaz1}, we have $$\Delta_{(x_1,y_1,z_1)} G(x,g_j(x),1)=R_j(x)W_{(x_1,y_1,z_1),j}(x),$$ where $R_j(x)=U(x,g_j(x))\prod_{j'\ne j}(g_{j'}(x)-g_j(x))$ and $W_{(x_1,y_1,z_1),j}(x):=y_1-x_1g'_j(x)+z_1(xg'_j(x)-g_j(x))$. Therefore, for a generic $L\in\mathbf{V}^\vee$, we have $$ i_P(\mathcal C,V(L\circ N_{\mathcal C}))= V_P+\sum_{j=1}^{\mu_P} \min_k \val_x([G_j(x)\wedge (W_{\mathbf{A},j}(x)\cdot \mathbf A
+W_{\mathbf{B},j}(x)\cdot \mathbf B)]_k) $$ where $V_P:=\sum_{j=1}^{\mu_P}\sum_{j'\ne j}\val(g_{j'}-g_j)$. Now, we write $h_P^{(j)}:=\min_k \val_x([G_j(x)\wedge (W_{\mathbf{A},j}(x)\cdot \mathbf A
+W_{\mathbf{B},j}(x)\cdot \mathbf B)]_k)$ and $h_P:=\sum_{j=1}^{\mu_P}h_P^{(j)}$. Note that $V(P)=0$ if $P$ is a nonsingular point of $\mathcal C$. We recall that, due to Corollary 31 of \cite{fredsoaz1}, we have $$\sum_{P\in \mathcal C\cap \base(N_{\mathcal C})} V_P= d(d-1)-d^\vee$$ and so, due to \eqref{lemmefondamental}, we obtain \begin{equation}\label{lemme fondamental2} c_\nu(\mathcal C)
=d+d^\vee -\sum_{P\in \mathcal C\cap\base(N_\mathcal C)} h_P. \end{equation} Now we have to compute the contribution $h_P^{(j)}$ of each probranch of each $P\in\mathcal C\cap \base(N_{\mathcal C})$. We have seen, in Proposition 29 of \cite{fredsoaz1}, that we can adapt our choice of $M$ to each probranch (or, to be more precise, to each branch corresponding to the probranch). This fact will be useful in the sequel. In particular, for each probranch, we take $M$ such that $g_j'(0)=0$ so $G_j(x)\wedge (W_{\mathbf{A},j}(x)\cdot \mathbf A +W_{\mathbf{B},j}(x)\cdot \mathbf B)$ can be rewritten: \begin{equation}\label{probranche1} \left(\begin{array}{c}x\\ g_j(x)\\1\end{array}\right)\wedge\left(\begin{array}{c}x_Ay_A-(x_A^2+x_B^2)g'_j(x)+x_By_B+(z_Ax_A+z_Bx_B)(xg_j'(x)-g_j(x))\\
y_A^2+y_B^2-(x_Ay_A+x_By_B)g'_j(x)+(z_Ay_A+z_By_B)(xg_j'(x)-g_j(x))\\
y_Az_A+y_Bz_B-(x_Az_A+x_Bz_B)g'_j(x)+(z_A^2+z_B^2)(xg_j'(x)-g_j(x))\end{array}\right) . \end{equation} \begin{itemize} \item Assume first that $P$ is a point of $\mathcal C$ outside $\ell_\infty$. Then for $M$ as above and such that $z_A=z_B=0$, we have $$ G_j(0)\wedge (W_{\mathbf{A},j}(0)\cdot \mathbf A
+W_{\mathbf{B},j}(0)\cdot \mathbf B)=\left(\begin{array}{c}
-y_A^2-y_B^2\\x_Ay_A+x_By_B\\0\end{array}\right)$$ which is non null since $(y_A,y_B)\ne(0,0)$ and since $\mathbf A$ and $\mathbf B$ are linearly independent. So $h_P^{(j)}=0$. \item Assume now that $P\in\mathcal C\cap \ell_\infty\setminus\{I,J\}$ and $\mathcal T_P^{(j)}\ne\ell_\infty$. Then $y_A+iy_B\ne 0$ and $y_A-iy_B\ne 0$ (since $I,J\not\in \mathcal T_P^{(j)}$) and so $y_A^2+y_B^2\ne 0$ which together with \eqref{probranche1} implies that $h_P^{(j)}=0$ as in the previous case. \item Assume that $P\in\mathcal C\cap \ell_\infty\setminus\{I,J\}$ and $\mathcal T_P^{(i)}=\ell_\infty$. Assume that $M(1,0,0)=(1,i,0)$. Hence $\mathbf A+i\mathbf B=(1,0,0)$. Then $y_A=y_B=0$, $x_A+ix_B=1$, $z_A+iz_B=0$. So $z_A^2+z_B^2=0$ and $z_Ax_A+z_Bx_B=z_A\ne 0$ (since $z_B=iz_A$ and $x_B=i(x_A-1)$). Note that $P\ne J$ implies also that $x_A-ix_B\ne 0$. So that $x_A^2+x_B^2\ne 0$. Hence, due to \refeq{probranche1}, $G_j(x)\wedge (W_{\mathbf{A},j}(x)\cdot \mathbf A +W_{\mathbf{B},j}(x)\cdot \mathbf B)$ is equal to $$ \left(\begin{array}{c}x\\ g_j(x)\\1\end{array}\right)\wedge\left(\begin{array}{c}(x_A^2+x_B^2)g'_j(x)+z_A(xg_j'(x)-g_j(x))\\
0\\
z_Ag_j'(x)\end{array}\right) . $$ Therefore we have $h_P^{(j)}=\val_x((x_A^2+x_B^2)g'_j(x))=i_P^{(j)}-1$. \item Assume that $P=I$ and that $\mathcal T_P^{(j)}=\ell_\infty$. Take $M$ such that $M(\mathbf O)=(1,i,0)$, $\mathbf B=(1,0,0)$ and so $\mathbf A=(-i,0,1)$. Due to \refeq{probranche1}, $G_j(x)\wedge (W_{\mathbf{A},j}(x)\cdot \mathbf A +W_{\mathbf{B},j}(x)\cdot \mathbf B)$ is equal to \begin{equation} \left(\begin{array}{c}x\\ g_j(x)\\1\end{array}\right)\wedge\left(\begin{array}{c}-i(xg_j'(x)-g_j(x))\\
0\\
ig_j'(x)+(xg_j'(x)-g_j(x))\end{array}\right) . \end{equation} Note that each coordinate has valuation at least equal to $i_P^{(j)}=\val g_j$ and that the term of degree $i_P^{(j)}$ of the second coordinate is the term of degree $i_P^{(j)}$ of $$-i(xg'_j(x)-g_j(x))+xig'_j(x)=ig_j(x)\ne 0$$ which is non null. Therefore $h_P^{(j)}=i_P^{(j)}$. \item Assume finally that $P=I$ and that $\mathcal T_P^{(j)}\ne\ell_\infty$. Take $M$ such that $M(\mathbf O)=(1,i,0)$, $\mathbf B=(0,1,0)$ and so $\mathbf A=(0,-i,1)$. Due to \refeq{probranche1}, $G_j(x)\wedge (W_{\mathbf{A},j}(x)\cdot \mathbf A +W_{\mathbf{B},j}(x)\cdot \mathbf B)$ is equal to \begin{equation} \left(\begin{array}{c}x\\ g_j(x)\\1\end{array}\right)\wedge\left(\begin{array}{c}0\\-i(xg_j'(x)-g_j(x))\\
-i+(xg_j'(x)-g_j(x))\end{array}\right) . \end{equation} Note that each coordinate has valuation at least equal to $1$ and that the term of degree $1$ of the second coordinate is $ix\ne 0$. Hence $i_P^{(j)}=1$. \end{itemize} Note that the case $P=J$ can be treated in the same way than the case $P=I$.
Theorem \ref{thmcurves} follows from \eqref{lemme fondamental2} and from the previous computation of $h_P$.
\begin{appendix} \section{Dimension decrease}\label{cylindreetrevolution} Here, we consider two particular cases of hypersurfaces the normal class of which is equal to the normal class of a hypersurface of lower dimension: cylinders (i.e. cones at a point at infinity) and revolution hypersurfaces (circles fibers).
Let $n\ge 3$. Let $\tilde F\in\mathbb C[u_1,...,u_n]$ be homogeneous. We call {\bf cylinder of base $\tilde{\mathcal Z}=V(\tilde F)\subset\mathbb P^n$ and of axis $V(x_2,...,x_n)\subset\mathbb P^n$} the hypersurface $V(F)\subset\mathbb P^n$, with $F(x_1,\dots,x_{n+1}):=\tilde F(x_2,\dots,x_{n+1})$.
\begin{prop}\label{propcylinder} Let $n\ge 3$ and $d\ge 2$. Let $\mathcal Z=V(F)\subset\mathbb P^n$ be the cylinder of axis $V(x_2,...,x_n)\subset\mathbb P^n$ and of base $\tilde{\mathcal Z}=V(\tilde F)\subset\mathbb P^{n-1}$. Then $c_{\nu}(\mathcal Z)=c_{\nu}(\tilde{\mathcal Z})$. \end{prop} \begin{proof} Note that $\mathcal Z\cap V(x_2,...,x_{n+1})\subset\sing(\mathcal Z)\subset\mathcal B_{\mathcal Z}$. Let $m[x_1^{(1)}:\cdots:x_{n+1}^{(1)}]\in\mathcal Z\setminus V(x_2,...,x_{n+1})$ and $P[x_1^{(0)}:\cdots:x_{n+1}^{(0)}]\in\mathbb P^n\setminus V(x_2,...,x_{n+1})$. Set $\tilde m[x_2^{(1)}:\cdots:x_{n+1}^{(1)}]\in\tilde{\mathcal Z}$ and $\tilde P[x_2^{(0)}:\cdots:x_{n+1}^{(0)}]\in\mathbb P^{n-1}$. Note that $n_{\mathcal Z}(m)[0:\tilde F_{u_1}(\tilde{\mathbf m}):\cdots:\tilde F_{u_{n-1}}(\tilde{\mathbf m}):0]\in\mathbb P^{n}$. \begin{itemize} \item Let $\mathcal H=V(\alpha x_1+\beta x_{n+1})\subset \mathbb P^n$ be a hyperplane orthogonal to $V(x_2,...,x_n)$ such that $\mathcal H\ne\mathcal H^\infty$ (i.e. $\alpha\ne 0$). Assume $m\in\mathcal H$. Then $m\in\mathcal B_{\mathcal Z}\, \Leftrightarrow\, \tilde m\in\mathcal B_{\tilde{\mathcal Z}}$. If $m\in\mathcal H\cap\mathcal Z\setminus\mathcal B_{\mathcal Z}$, then $\mathcal N_m(\mathcal Z)\subset \mathcal H$. \item Assume $P\in \mathbb P^n\setminus V(x_1,x_{n+1})$. Then $\mathcal H:=V(x_1^{(0)}x_{n+1}-x_{n+1}^{(0)}x_{1})$ is the unique hyperplane orthogonal to $V(x_2,...,x_n)$ containing $P$ and $$P\in \mathcal N_m(\mathcal Z),\ m\in\mathcal Z\setminus\mathcal B_{\mathcal Z}\quad\Leftrightarrow\quad m\in\mathcal H,\ \tilde m\in\tilde{\mathcal Z}\setminus
\mathcal B_{\tilde{\mathcal Z}},\ \tilde P\in\mathcal N_{\tilde m}(\tilde{\mathcal Z}).$$ \end{itemize} Hence $c_\nu(\mathcal Z)=c_{\nu}(\tilde{\mathcal Z})$ \end{proof} Let $\tilde F\in\mathbb C[u_1,...,u_n]$ be a homogeneous polynomial of the form $\tilde F(u_1,...,u_n)=G(u_1^2,...,u_n)$ for some $G\in\mathbb C[u_1,...,u_n]$. Let $\tilde{\mathcal Z}:=V(\tilde F)\subset\mathbb P^{n-1}$.
We call {\bf algebraic hypersurface of revolution of $\tilde{\mathcal Z}$ around the subspace $V(x_1,x_2)$} the hypersurface $\mathcal Z=V(F)\subset\mathbb P^n$ with $F(x_1,....,x_{n+1}):=G(x_1^2+x_2^2,x_3,...,x_{n+1})$.
Note that if $m[x_1^{(1)}:\cdots:x_{n+1}^{(1)}]\in\mathcal Z\setminus\mathcal H^\infty$ with $x_{n+1}^{(1)}=1$, then the "circle" $V(x_1^2+x_2^2-(x_1^{(1)})^2-(x_2^{(1)})^2)\cap\bigcap_{i=3}^{n}V(x_i-x_i^{(1)}x_{n+1})$ of center $[0:0:x_3^{(1)}:\cdots:x_{n+1}^{(1)}]$ that passes through $m$ is contained in $\mathcal Z$. \begin{prop} Let $n\ge 3$ and $d\ge 2$. Let $\mathcal Z=V(F)\subset\mathbb P^n$ be the algebraic hypersurface of revolution of $\tilde{\mathcal Z}=V(\tilde F)\subset \mathbb P^{n-1}$ (with $\tilde F\in\mathbb C[u_1,...,u_n]$ as above) around the subspace $V(x_1,x_2)$, then $c_\nu(\mathcal Z)=c_{\nu}(\tilde{\mathcal Z})$. \end{prop} \begin{proof}
Let $m[x_1^{(1)}:\cdots:x_{n+1}^{(1)}]\in\mathcal Z$ and $P[x_1^{(0)}:\cdots:x_{n+1}^{(0)}]\in\mathbb P^n$. Then $$n_{\mathcal Z}(m)[2x_1^{(1)}G_{u_1}({\mathbf m}_1):2x_2^{(1)}G_{u_1}({\mathbf m}_1): G_{u_2}({\mathbf m}_1):\cdots:G_{u_{n-1}}(\tilde{\mathbf m}_1):0]\in\mathbb P^n,$$ with ${\mathbf m}_1((x_1^{(1)})^2+(x_2^{(1)})^2,x_3^{(1)},...,x_{n+1}^{(1)})\in\mathbb C^n$. Hence if $m\in\mathcal Z\cap V(x_1^2+x_2^2)\setminus\mathcal B_{\mathcal Z}$, then $\mathcal N_m\mathcal Z\subset V(x_1^2+x_2^2)$. Assume from now on that $m\in\mathcal Z\setminus V(x_1^2+x_2^2)$ and that $P\in\mathbb P^n\setminus (V(x_1^2+x_2^2)\cup V(x_1))$.
Let $\tilde m[y_1^{(1)}:x_3^{(1)}:\cdots:x_{n+1}^{(1)}]\in\mathbb P^{n-1}$ and $\tilde P[y_1^{(0)}:x_3^{(0)}:\cdots:x_{n+1}^{(0)}]\in\mathbb P^{n-1}$ with $(y_1^{(i)})^2=(x_1^{(i)})^2+(x_2^{(i)})^2$. Note that $\tilde m\in\tilde{\mathcal Z}$. Then $$n_{\mathcal Z}(m)[x_1^{(1)}\tilde F_{u_1}(\tilde{\mathbf m})/y_1^{(1)}:x_2^{(1)}\tilde F_{u_1}(\tilde{\mathbf m})/y_1^{(1)}: \tilde F_{u_2}(\tilde{\mathbf m}):\cdots:\tilde F_{u_{n-1}}(\tilde{\mathbf m}):0]\in\mathbb P^n.$$ \begin{itemize} \item Note that $m\in\mathcal B_{\mathcal Z}\ \Leftrightarrow\ \tilde m\in\mathcal B_{\tilde{\mathcal Z}}$ (since $x_1^{(1)}$ and $x_1^{(1)}$ are not both null). \item Let $\mathcal H=V(\alpha x_1+\beta x_{2})\subset \mathbb P^n$ be a hyperplane that contains $V(x_1,x_2)$ but not contained in $V(x_1^2+x_2^2)$ (i.e. $\alpha^2+\beta^2\ne 0$). If $m\in\mathcal H\cap\mathcal Z\setminus\mathcal B_{\mathcal Z}$, then $\mathcal N_m\mathcal Z\subset \mathcal H$. \item Let $\mathcal H:=V(x_1^{(0)}x_{2}-x_{2}^{(0)}x_{1})$ be the unique hyperplane that contains $V(x_1,x_2)$ and $P$. Then $$P\in \mathcal N_m(\mathcal Z),\ m\in\mathcal Z\setminus\mathcal B_{\mathcal Z}\quad\Leftrightarrow\quad m\in\mathcal H,\ \tilde m\in\tilde{\mathcal Z}\setminus
\mathcal B_{\tilde{\mathcal Z}},\ \tilde P\in\mathcal N_{\tilde m}(\tilde{\mathcal Z}),$$ by choosing $y_1^{(1)}:=y_1^{(0)}x_1^{(1)}/x_1^{(0)}$. \end{itemize} Hence $c_\nu(\mathcal Z)=c_{\nu}(\tilde{\mathcal Z})$ \end{proof}
\section{Projective orthogonality in $\mathbb P^n$}\label{NORMAL} \subsection{From affine orthogonality to projective orthogonality} Let $E_n$ be an euclidean affine $n$-space of direction the $n$-vector space $\mathbf E_n$ (endowed with some fix basis). Let $\mathbf V:=(\mathbf E_n\oplus \mathbb R)\otimes \mathbb C$ (endowed with the induced basis $\mathbf{e}_1,...,\mathbf{e}_{n+1}$). We consider the complex projective space $\mathbb P^n:=\mathbb P(\mathbf V)$ with projective coordinates $x_1,...,x_{n+1}$. Let us write $\pi:\mathbf V\setminus\{0\}\rightarrow\mathbb P^3$ for the canonical projection. We denote by $\mathcal H^\infty:=V(x_{n+1})\subset \mathbb P^n$ the hyperplane at infinity. We consider the affine space ${A}^{n}:=\mathbb{P}^{n}\setminus \mathcal{H}^{\infty }$ endowed with the vector space $\overrightarrow{\mathbf E}:=Span(\mathbf e_1,\cdots,\mathbf e_n) \subset \mathbf V$ (with the affine structure $m+\overrightarrow{\mathbf v}=\pi(\mathbf m+\overrightarrow{\mathbf v})$ if $\overrightarrow{\mathbf v}\in \overrightarrow{\mathbf E}$ and $m=\pi(\mathbf m)\in A^n$ with $\mathbf m(x_1,\cdots,x_n,1)$).
Let us consider $\mathcal{W}_1= \mathbb{P}(\mathbf W_1) \subset\mathbb P^n$ and $\mathcal{W}_2 =\mathbb{P}(\mathbf W_2)\subset\mathbb P^n$ where $\mathbf W_1$ and $\mathbf W_2$ are two vector subspaces of $\mathbf V$
not contained in $\overrightarrow{\mathbf E}$ such that $\dim \mathbf W_1+\dim\mathbf W_2=n+2$. Since $\mathcal W_i$ is not contained in $\mathcal H^\infty$, $ W_i:=\mathcal{W}_i\setminus\mathcal H^\infty$ is an affine subspace of $A^n$ with vector space $\overrightarrow{\mathbf{W}_i}:=\mathbf W_i \cap\overrightarrow{\mathbf E}$, that is to say that there exists $m_i$ such that $W_i=m_i+\overrightarrow{\mathbf{W}_i}$ in $ A^n$. Consider the usual bilinear symmetric form $\langle u,v\rangle =\sum_{i=0}^3u_{i}v_{i}$ on $\mathbf V$, the associated orthogonality on $\mathbf V$ is written $\boldsymbol{\perp} $. \begin{defi} Let us consider $\mathcal{W}_1= \mathbb{P}(\mathbf W_1) \subset\mathbb P^n$ and $\mathcal{W}_2 =\mathbb{P}(\mathbf W_2)\subset\mathbb P^n$ where $\mathbf W_1$ and $\mathbf W_2$ are two vector subspaces of $\mathbf V$
not contained in $\overrightarrow{\mathbf E}$ and such that $\dim \mathbf W_1+\dim\mathbf W_2=n+2$. With the above notations, we say that $\mathcal W_1$ and $\mathcal W_2$ are orthogonal in $\mathbb{P}^{3}$ if $\overrightarrow {\mathbf W}_1\perp \overrightarrow{\mathbf{W}}_2$. We then write ${\mathcal W_1\boldsymbol{\perp }\mathcal W_2}$. \end{defi} Note that if $\mathcal H\subset \mathbb P^n$ and $\mathcal L \subset\mathbb P^n$ are respectively an hyperplane and a line in $\mathbb P^n$ not contained in $\mathcal H^\infty$, then $\mathcal H\perp\mathcal L$ if and only if the point at infinity of $\mathcal L$ is the pole in $\mathcal H^\infty$ of the line $\mathcal H\cap\mathcal H^\infty\subset\mathcal H^\infty$ with respect to the {\bf umbilical} $\mathcal U_\infty:=V(x_1^2+...+x_n^2)\cap\mathcal H^\infty\subset\mathcal H^\infty$. This leads us to the following generalization of normal lines to an hyperplane. \begin{defi} We say that a projective hyperplane $\mathcal H=V(a_1x_1+\cdots+a_{n+1}x_{n+1})\subset\mathbb P^n$ and a projective line $\mathcal L=\mathbb P(\mathbf L)\subset\mathbb P^n$ are orthogonal in $\mathbb P^n$ if $(a_1,\cdots,a_n,0)\in\mathbf L$. We then write $\mathcal L\perp\mathcal H$. \end{defi} It is worthful to note that, with this definition, an orthogonal line to an hyperplane $\mathcal H$ may be included in $\mathcal H$.
\end{appendix}
\end{document} |
\begin{document}
\title{Transform-Invariant Non-Parametric Clustering of Covariance Matrices and its Application to Unsupervised Joint Segmentation and Action Discovery}
\author{\name Nadia Figueroa \email nadia.figueroafernandez@epfl.ch \\
\name Aude Billard \email aude.billard@epfl.ch \\
\addr Learning Algorithms and Systems Laboratory (LASA)\\
\'Ecole Polytechnique F\'ed\'erale de Lausanne (EPFL)\\
Lausanne, Switzerland - 1015.}
\editor{X} \maketitle
\begin{abstract}
In this work, we tackle the problem of \textit{transform-invariant unsupervised learning} in the space of Covariance matrices and applications thereof. We begin by introducing the Spectral Polytope Covariance Matrix (SPCM) Similarity function; a similarity function for Covariance matrices, invariant to any type of transformation. We then derive the SPCM-CRP mixture model, a \textit{transform-invariant} non-parametric clustering approach for Covariance matrices that leverages the proposed similarity function, spectral embedding and the distance-dependent Chinese Restaurant Process (dd-CRP) \citep{Blei:JMLR:2011}. The scalability and applicability of these two contributions is extensively validated on real-world Covariance matrix datasets from diverse research fields. Finally, we couple the SPCM-CRP mixture model with the Bayesian non-parametric Indian Buffet Process (IBP) - Hidden Markov Model (HMM) \citep{Fox:NIPS:2009}, to jointly segment and discover \textit{transform-invariant} action primitives from complex sequential data. Resulting in a topic-modeling inspired hierarchical model for unsupervised time-series data analysis which we call ICSC-HMM (IBP Coupled SPCM-CRP Hidden Markov Model). The ICSC-HMM is validated on kinesthetic demonstrations of uni-manual and bi-manual cooking tasks; achieving unsupervised human-level decomposition of complex sequential tasks.\\ \end{abstract}
\begin{keywords}
Covariance matrices, similarity measures for Covariance matrices, spectral clustering, spectral graph theory, Bayesian non-parametrics, time-series segmentation \end{keywords}
\section{Introduction} The \textit{Gaussian distribution} is one of the most widely-used representations of data in any field of science and engineering. The reason for its popularity can be explained from an information-theoric point of view. Given only its first and second moments (i.e. the mean and Covariance) one can describe a distribution of data-points with maximum entropy and minimal assumptions \citep{Cover:EIT:1991}. To recall, it is defined by a probability density function over a random vector $\mathbf{x} \in \mathds{R}^{N}$ whose density $\mathcal{N}(\mathbf{\mu},\mathbf{\Sigma})$ can be estimated with a mean $\mathbf{\mu} \in \mathds{R}^{N}$ and a \underline{Covariance matrix} $\mathbf{\Sigma} \in \mathds{R}^{NxN}$ as follows, \begin{equation} \label{eq:gaussian}
f_X(\mathbf{x};\mathbf{\mu},\mathbf{\Sigma}) = \frac{1}{(2\pi)^{d/2}|\mathbf{\Sigma}|^{1/2}} \exp \left\lbrace -\frac{1}{2}(\mathbf{x} - \mu)^{T}\mathbf{\Sigma}^{-1}(\mathbf{x} - \mathbf{\mu}) \right\rbrace. \end{equation} where $\mathbf{\mu} = \mathbb{E}[\mathbf{x}]$ is the formalization of the average value and the Covariance matrix $\mathbf{\Sigma} = \mathbb{E}\left[ (\mathbf{x} - \mathbf{\mathbb{E}[\mathbf{x}]})(\mathbf{x} - \mathbf{\mathbb{E}[\mathbf{x}]})^T \right]$ is the generalization of Covariance in $N$-dimensional space. If we assume the data is centered $\sum_{i=1}^{M}\mathbf{x}_i = 0$ for $M$ samples, the information compressed in the Covariance matrix $\mathbf{\Sigma}$ is sufficient to describe the variance of the distribution in all spatial directions; and consequently, the distribution itself: \begin{equation} \label{eq:zero-mean-gaussian}
f_X(\mathbf{x};0,\mathbf{\Sigma}) = \frac{1}{(2\pi)^{d/2}|\mathbf{\Sigma}|^{1/2}} \exp \left\lbrace -\frac{1}{2}\mathbf{x}^{T}\Sigma^{-1}\mathbf{x} \right\rbrace. \end{equation} This has lead researchers to either: (i) fit/assume their data is distributed by \eqref{eq:gaussian} or \eqref{eq:zero-mean-gaussian} or (ii) use the sample Covariance matrix $\mathbf{C}$ directly as a descriptive representation of data: \begin{equation} \label{eq:sample_cov} \mathbf{C} = \frac{1}{M} \sum_{i=1}^{M} (\mathbf{x}_i - \bar{\mathbf{x}} )(\mathbf{x}_i - \bar{\mathbf{x}} )^T, \end{equation} where $\bar{\mathbf{x}}$ is the sample mean. The use of \eqref{eq:gaussian}, \eqref{eq:zero-mean-gaussian} and \eqref{eq:sample_cov} is inherent in a myriad of \textit{computer vision, action recognition, medical imaging} and \textit{robotics} applications. For example, in \textit{computer vision} and \textit{action recognition}, Covariance matrices are used to characterize statistics and fuse features for texture/action/face recognition, people/object tracking, camera calibration, skeleton-based human action recognition from motion capture data, to name a few \citep{Tuzel:ECCV:2006,Tosato:ECCV:2010,Vemulapalli:CVPR:2013,Wang:ICCV:2015,Cavazza:ICPR:2016}. In the \textit{medical imaging} community, the analysis and interpolation of Covariance is extremely important as Diffusion Tensors (a type of SPD matrices\footnote{Covariance matrices are Symmetric Positive Definite (SPD). In this work, we consider any dataset of SPD matrices as datasets of Covariance matrices.}) are used to identify regions of similar biological tissue structures through the diffusion of water particles in the brain \citep{Dryden:AAS:2009,Cherian:TPAMI:2013}.
In \textit{robotics}, the geometrical representation of the Covariance matrix (i.e. an $N$-dimensional hyper-ellipsoid) is widely used to represent uncertainties in sensed data for planning, localization and recognition tasks \citep{Thrun:PR:2005,Miller:CASE:2015}. Furthermore, $N$-dimensional hyper-ellipsoids are also used in robotic manipulation applications to represent parametrizations for tasks involving interaction or variable impedance learning \citep{Friedman:Cortex:2007}. In \citet{Kronander:ICRA:2012,Ajoudani:ICRA:2015}, \textit{stiffness} ellipsoids, representing the stiffness of the robot's end-effector, are used to characterize impedance controllers. Further, in \citet{Li:JRA:1988,El-Khoury:RAS:2015}, ellipsoids are used to model tasks in the wrench space of a manipulated object or tool\footnote{Otherwise known as the Task Wrench Space (TWS) \citep{Li:JRA:1988}.} , to represent the principal directions of the forces/torques exerted on an object to achieve a manipulation task. Finally, in many Learning from Demonstration (LfD) \citep{Argall:RAS:2009,Billard:SHR:2008} approaches, Gaussian Mixture Models (GMM) and Hidden Markov Models (HMM), are regularly used for motion modeling, action recognition and segmentation \citep{Calinon:SMC:2007,Khansari:TRO:2011,Calinon:ISR:2015}.
Estimating such Covariance matrices (or Gaussian-\textit{distributed} probabilistic models), for any of the previously mentioned applications, relies on careful \textit{acquisition} and \textit{pre-processing} of data. Often, data acquisition procedures generate data subject to a relative \textit{transformation}, due to changes in rotation, translation, scaling, duration (in the case of time-series), shearing, etc. Covariance matrices, although highly descriptive, do not explicitly decouple such transformations. For example, spatial transformations can be exhibited in Covariance features from image sets, where objects/faces/textures are translated, rotated and sheared wrt. the center of the image or camera origin. DTI (Diffusion Tensor Images) can also display spatial transformations, depending on the diffusivity scale used to generate them or the difference in subject tissue texture. Moreover, in continuous data, where GMMs or HMMs are used to model human activities or manipulation tasks, the data might exhibit \textit{transformations}; due to changes in initial positions, reference frames or contexts.
To exploit the exponential increase of data available on-line, it is crucial to have algorithms that are robust to such \textit{transformations}, i.e. are \textit{transform-invariant}. The most typical approach to deal with these nuisance transformations is to manually \textit{pre-process} the collected data, \textit{prior} to applying any machine learning algorithm. This, however, requires for previous knowledge of the corresponding \textit{transformations} and much human intervention. Another approach is to jointly estimate the transformations while learning the structure of the dataset, these approaches require either (i) providing a list of possible transformations and approximating the \textit{optimal} parameters through EM-like algorithms \citep{Frey:TPAMI:2003,Frey:NIPS:2001} or (ii) sampling the transformations from a specified parameter-space through Bayesian non-parametric approaches \citep{Sudderth:NIPS:2005,Hu:ICML:2012}.
The \textit{latter} category, models \textit{transformed} groups of features in images using the transformed Indian Buffet Process (tIBP) and transformed Dirichlet Process (tDP) priors for latent feature models and mixture models, respectively. These approaches, although elegant, rely on sampling simple 2D transformation parameters, such as pixel/feature locations, which are not scalable to $N$-dimensional Covariance matrices. Moreover, they are limited to transformations in an image-to-image sense, not within images. Hence, one must know \textit{a priori} that the data-points/features come from a different source/context. In our work, rather than assuming that we know \textit{a priori} which data-points come from a different source/context and how many sources/contexts exist in the dataset, we seek to discover this from the data itself. In other words, we are interested in tackling the problem of \textit{transform-invariant} \textit{non-parametric} clustering of Covariance matrices (see Figure \ref{fig:problem1}).
\begin{figure*}\label{fig:problem1}
\end{figure*}
\subsection{Transform-Invariance and Non-Parametric Clustering of Covariance Matrices} The problem illustrated in Figure \ref{fig:problem1} is posed as follows; given a dataset of $M$ Covariance matrices; $\mathbf{\Theta} = \{\mathbf{\Sigma}_1, \dots, \mathbf{\Sigma}_M\}$ where $\mathbf{\Sigma}_i \in \mathds{R}^{N\times N}$ describing data in $\mathbf{x} \in \mathds{R}^{N}$; \textit{with} or \textit{without} a corresponding location $\mathbf{\mu} = \{\mathbf{\mu}_1, \dots, \mathbf{\mu}_M\}$ (resulting in a Gaussian distribution if given), we seek to describe the dataset with $K$ \textit{transform-invariant} clusters. Hence, such clustering is \textit{invariant} to any type of transformations \textit{(scaling, translation, rotation)} on the Covariance matrices. Moreover, instead of setting the desired cluster value $K$ or the possible transformations between clusters, we want to \textit{discover} this from the dataset.
One can imagine a plethora of applications that would benefit from such an algorithm; e.g. (i) finding groups or clusters of objects/texture/faces in unstructured/unlabeled datasets, (ii) segmenting DTI images without prior knowledge of the brain region, (iii) discovering \textit{task primitives} from large datasets of task ellipsoids recorded from different tools or subjects, (iv) clustering streams of continuous transformed data, etc. In order to achieve this desiderata, we must address two problems: (i) transform-invariant similarity of Covariance matrices and (ii) non-parametric clustering over (dis)-similarities.
\subsubsection{Transform-Invariant Covariance Matrix Similarity}
The advantages of using Covariance matrices to represent data can be over-shadowed by their non-Euclidean topology. Most machine learning algorithms (be it \textit{supervised} or \textit{unsupervised}) rely on computing distances/norms/similarities in feature space, assuming the features are i.i.d. from an underlying distribution in $\mathds{R}^N$ Euclidean space. Unfortunately, SPD (and consequently Covariance) matrices lie on a special Riemannian manifold, $\mathbf{\Sigma} \in \mathcal{S}_{++}^{N}$, and should be treated as such. For example, baseline learning algorithms, such as K-NN (Nearest Neighbor) and K-Means, rely on a pair-wise distance $\Delta(\mathbf{x},\mathbf{x}')$ between features; typically a Minkowski metric $L_p(\mathbf{x},\mathbf{x}') = \left(\sum_{i=1}^{N} |x_i - x_i'|^p\right)^{1/p}$. When using such Euclidean geometry, the distance between two Covariance matrices $\mathbf{\Sigma}_i$ and $\mathbf{\Sigma}_j$ would be, $\Delta_{ij}(\mathbf{\Sigma}_i,\mathbf{\Sigma}_j) = || \mathbf{\Sigma}_i - \mathbf{\Sigma}_j ||_2 = \sqrt{\text{trace}(\mathbf{\Sigma}_i - \mathbf{\Sigma}_j)^T(\mathbf{\Sigma}_i - \mathbf{\Sigma}_j)}$. where $||\mathbf{X}||_2=\sqrt{\text{trace}(\mathbf{X}^T\mathbf{X})}$ is the Euclidean (Frobenius) norm, the typical distance metric used on matrices of $\mathbf{X} \in \mathds{R}^{N\times N}$. Research has shown, that using such standard $L_p$-norms on $\mathcal{S}_{++}^{N}$ results in very poor accuracy \citep{Dryden:AAS:2009,Cherian:TPAMI:2013}. Moreover, when trying to interpolate or compute Geometric means, Euclidean metrics are prone to swelling and can lead to non-positive semi-definite estimates \citep{Arsigny:JMAA:2006}. Several approaches have been proposed to over-come this, which we categorize into two families of approaches: (i) metric substitution and (ii) kernel methods.
\textit{Metric substitution} approaches rely on using tailor-made non-Euclidean and Riemannian distances or metrics together with classical learning algorithms. Several non-Euclidean measures of similarity that deal with the unique structure of Covariance matrices have been proposed, for the sake of brevity, we introduce four of the most widely used similarity functions (Table \ref{tab:metrics}). The Affine Invariant Riemannian Metric (AIRM) \citep{Pennec:IJCV:2006} and the Log-Euclidean Riemannian Metric (LERM) \citep{Arsigny:MRM:2006} are the most commonly used metrics, as they calculate a distance analogous to the Frobenius norm while taking into account the curvature of the Riemannian manifold. Whereas, the Jensen-Bregman LogDet Divergence (JBLD) \citep{Cherian:TPAMI:2013} is a matrix form of the symmetrized Jensen-Bregman divergence. The Kullback-Leibler Divergence Metric (KLDM) \citep{Moakher:VPRF:2006}, on the other hand, uses symmetrized $f$-divergences to compute distances between covariance matrices. This list of similarity functions have both advantages and drawbacks, regarding accuracy and computational efficiency\footnote{For a thorough comparison refer to \citet{Cherian:TPAMI:2013} and \citet{Dryden:AAS:2009}}. They all posses most of the desired properties a similarity metric should have, such as non-negativity, definiteness, symmetry, affine-invariance, triangle inequality, etc. However, none of them explicitly ensure the property of \textit{transform-invariance} \begin{equation} \Delta_{ij}(\mathbf{\Sigma}_i,\mathbf{\Sigma}_j) = 0 \hspace{5pt} \Leftrightarrow \hspace{5pt} \mathbf{\Sigma}^{(j)} = \mathbf{R}\mathbf{V}^{(i)}(\gamma\mathbf{\Lambda}^{(i)})(\mathbf{R}\mathbf{V}^{(i)})^{T} \end{equation} where $\gamma \in \Re$ is a scaling factor, $\mathbf{R} \in \mathds{R}^{N \times N}$ is a rotation matrix and $\mathbf{\Sigma}_i = \mathbf{V}^{(i)}\mathbf{\Lambda}^{(i)}(\mathbf{V}^{(i)})^{T}$ is the Eigenvalue decomposition of $\mathbf{\Sigma}_i$. \begin{table}[!tbp]
\small
\centering
\begin{tabular}{|c|c|}
\hline
Type & $\Delta_{ij}(\mathbf{\Sigma}_i,\mathbf{\Sigma}_j)$ \\
\hline
AIRM \citep{Pennec:IJCV:2006} & $\left|\left|\log(\mathbf{\Sigma}_i^{-1/2}\mathbf{\Sigma}_j\mathbf{\Sigma}_i^{-1/2})\right|\right|$ \\
LERM \citep{Arsigny:MRM:2006} & $\left|\left|\log(\mathbf{\Sigma}_i) - \log(\mathbf{\Sigma}_j)\right|\right|$ \\
KLDM \citep{Moakher:VPRF:2006} & $\frac{1}{2}\texttt{trace}(\mathbf{\Sigma}_i^{-1}\mathbf{\Sigma}_j + \mathbf{\Sigma}_j^{-1}\mathbf{\Sigma}_i-2I)$\\
JBLD \citep{Cherian:TPAMI:2013} & $\log \left|\frac{\mathbf{\Sigma}_i + \mathbf{\Sigma}_j}{2}\right| - \frac{1}{2}\log\left|\mathbf{\Sigma}_i\mathbf{\Sigma}_j\right| $ \\\hline
\end{tabular}
\caption{Standard Covariance Matrix Similarity Functions \label{tab:metrics}} \end{table} The \textit{latter} approaches involve \textit{kernel methods} which project the SPD matrices to a higher dimensional feature space via defined kernels or kernels learnt from the data \citep{Vemulapalli:CVPR:2013,Jayasumana:CVPR:2013,Huang:ICML:2015}. Although successful, most of these approaches are either only applicable in a supervised learning setting or are not robust to \textit{transformations}.
In this work, we focus on the Spectra of Covariance matrices and propose a \textit{transform-invariant} similarity function which is inspired by Spectral Graph Theory and a geometrical intuition of the embedded subspace of the eigenvectors and eigenvalues. We introduce it as the Spectral Polytope Covariance Matrix \textbf{(SPCM)} similarity function in Section \ref{sec:full_spcm}.
\subsubsection{Bayesian Non-Parametric Clustering over Similarities} Given a \textit{transform-invariant} similarity function, we must find an appropriate \textbf{clustering} algorithm for our task. Since our dataset consists of abstract objects, i.e. Covariance matrices, we could use similarity-based clustering algorithms such as Spectral Clustering \citep{Ng:NIPS:2001}, kernel K-means \citep{Dhillon:KDD:2004}, Affinity Propagation \citep{Frey:Science:2007}, among others. Although robust in nature, these algorithms require some heavy parameter tuning; typically through \textit{model selection} or \textit{grid search}. An alternative to this, is to adopt \textit{non-parametric} models. We do not refer to \textit{non-parametric} as methods with ``no parameters", rather to models whose complexity is inferred from the data automatically, allowing for the number of parameters to grow w.r.t. the number of data points. \textit{Bayesian non-parametric} methods provide such an approach. When clustering with mixture models, instead of doing model selection to find the \textit{optimal} number of clusters, a Dirichlet processes ($\mathcal{DP}$) prior (or one of its representations\footnote{The $\mathcal{DP}$ can be constructed through several representations. In this work, we adopt the the Chinese Restaurant Process ($\mathcal{CRP}$) construction.}) is used to construct an infinite mixture model. By evaluating an \textit{infinite} mixture model on a \textit{finite} set of samples, one can estimate the number of clusters needed from the data itself, whilst allowing new data-points to form new clusters \citep{Gershman:JMP:2012}.
To deal with the fact that our data-points (Covariance matrices $\mathbf{\Sigma}_i$) cannot be treated as points in Euclidean space, we use spectral dimensionality reduction to map Covariance matrices $\mathbf{\Sigma}_i$ into a lower-dimensional Euclidean space, i.e. $\mathbf{y}_i = f(\mathbf{\Sigma}_i)$ where $f(\cdot):\mathcal{S}_{++}^{N}\rightarrow\mathds{R}^{P}$, induced by their similarity matrix $\mathbf{S} \in \mathds{R}^{M \times M}$, where $M$ is the number of Covariance matrices in our dataset. In standard spectral clustering approaches, the dimensionality ($P$) of this mapping is typically set by the user or through \textit{model selection}. In this work, we propose a spectral non-parametric variant of this approach, which relaxes the assumption that the number of leading eigenvectors $P$ is equivalent to the number of $K$ clusters in the data; i.e. $K=P$. This assumption is too restrictive, as it is only truly valid in ideal cases where we have sparse similarity matrices $\mathbf{S}$, which is never the case in real-world data \citep{Poon:UAI:2012}. By relaxing this assumption, this leads us to two new subproblems: 1) \textit{What is the appropriate dimensionality of the spectral embedding?} and 2) \textit{What are the optimal clusters in the lower-dimensional space?}\footnote{The general problem of finding the correct dimensionality and number of clusters in Spectral Clustering approaches has been referred to as \textit{rounding} \citep{Poon:UAI:2012}.}.
Problem 1) is tackled by proposing an unsupervised spectral embedding algorithm based on a probabilistic analysis of the eigenvalues of the Similarity matrix (Section \ref{sec:spcm_crp}). We then address problem 2) by applying a \textit{Bayesian non-parametric} clustering algorithm on the spectral subspace projections of the Covariance matrices, while exploiting the similarity information on the original (Covariance matrix) space. We achieve this by adapting the distance-dependent Chinese Restaurant Process (dd-$\mathcal{CRP}$) \cite{Blei:JMLR:2011} \footnote{A distribution over partitions that allows for dependencies (from space, time and network connectivity) between data-points.} to the proposed Spectral Polytope Covariance Matrix \textbf{(SPCM)} similarity function and the points on the spectral sub-space ($\mathbf{Y} \in \mathds{R}^{M\times P}$), leading to a SPCM \textit{similarity}-dependent Chinese Restaurant Process, which we refer to as the SPCM-$\mathcal{CRP}$ mixture.
\begin{figure*}\label{fig:problem2}
\end{figure*}
\subsection{Segmentation and Transform-Invariant Action Discovery} The problem of \textit{transform-invariance} is also exhibited in streams of continuous data. Imagine a set of time-series, corresponding to streams of motion sensors (e.g. kinect information used to control video games or teleoperate robots) or motion/interaction signals from the robotics domain, such as position, velocity, orientation, forces and torques of an end-effector or a hand; representing a human (or robot) executing a complex sequence of actions. Typically, the challenge while analyzing such streams of data is to segment or decompose them into meaningful actions or states. However, such time-series might be subject to \textit{transformations}, due to changes of Reference Frame, change of context or even changes in the execution of the task itself (i.e. changes of position and orientation of the user). We posit that \textit{transform-invariance} is an interesting problem in such time-series analysis scenarios. Following we describe the challenges we tackle in this work in a 2D illustrative example.\\
\noindent \texttt{Illustrative example}: Assume a set of $M$ 2D time-series with varying length $T= \{T^{(1)}, \cdots, T^{(M)}\}$ and switching dynamics $\pi= \{\pi^{(1)}, \cdots, \pi^{(M)}\}$, sampled from 2 \textbf{unique} Gaussian emission models $\theta_1,\theta_2$ subject to transformations $f_1(\cdot),f_2(\cdot)$ resulting in a set of transform-dependent emission models $\Theta = \{\theta_1,\theta_2,\theta_3 = f_1(\theta_2),\theta_4 = f_2(\theta_3)\}$, as shown in Figure \ref{fig:problem2}. The problem that we would like to tackle is to jointly: (i) recover the segmentation points of all time-series, (ii) recover the \textit{transform-dependent} emission models $\Theta$ and (iii) capture the similarities across these models to uncover the \textbf{true} sub-set of \textit{transform-invariant} emission models. All of this, without explicitly knowing or modeling the transformations that have caused the variation in the observed time-series nor the expected number of hidden states/emission models.
Within the \textit{robotics} community, two probabilistic approaches to tackle the segmentation problem prevail: (i) Gaussian Mixture Models \citep{Kruger:ICRA:2012,Lee:AR:2015} and (ii) Hidden Markov Models \citep{Takano:HUM:2006,Kulic:TRO:2009, Butterfield:HUM:2010, Niekum:IROS:2013}. In the \textit{former}, segmentation points are extracted by fitting a (\textit{finite} or \textit{non-parametric}) GMM to the set of demonstrations directly, indicating that each component is an \textit{action} in the sequence. This approach is ill-suited to our problem setting as it segments the trajectories purely on a spatial sense, disregarding state dynamics, transition dynamics or correlations between the trajectories. For this reason, the \textit{latter} approach is more successful in segmentation problems, as transition dynamics and action sequencing are explicitly handled through the \textit{Markovian} state chain. Two types of HMM variants have been mostly used in these approaches: (1) online HMMs \citep{Takano:HUM:2006,Kulic:TRO:2009} and (2) Bayesian Non-Parametric HMMs \citep{Butterfield:HUM:2010, Niekum:IROS:2013}. In this work, we favor the \textit{latter} approaches as they are well-suited for sets of time-series with an unknown number of states and \textit{shared} or \textit{common} emission models. Such approaches include the Hierarchical Dirichlet Process Hidden Markov Model (HDP-HMM \citep{Teh:ASA:2006}) and the Beta Process - Hidden Markov Model (BP-HMM) \citep{Fox:NIPS:2009}. The HDP-HMM is nothing but a collection of Bayesian HMMs (each modeling one time-series) with the same number of states following exactly the same sequence. This is a major set-back for our desiderata, as we do not assume a fixed number of actions per trajectory (see Figure \ref{fig:problem2}). On the contrary, we would like to avoid such restricting assumptions. The BP-HMM, relaxes these assumptions, by instead using the Indian Buffet Process (IBP), induced by the beta process (BP), to sample the states of all HMMs as a shared set of features. This allows for the set of Bayesian HMMs to have \textit{partially} shared states and independent switching dynamics.
The $\mathcal{IBP}$-HMM\footnote{The terms BP-HMM and $\mathcal{IBP}$-HMM have been used interchangeably in literature \citep{Fox:PhD:2009}. To continue with the whimsical restaurant analogies, in this work we adopt the \textit{latter} term.} has been formulated with autoregressive, multinomial and Gaussian emission models \citep{Fox:NIPS:2009, Hughes:NIPS:2012}. None of which are capable of handling \textit{transform-invariance}. To this end, we propose the IBP Coupled SPCM-$\mathcal{CRP}$ - Hidden Markov Model (ICSC-HMM). In the ICSC-HMM, we couple an $\mathcal{IBP}$-HMM (with Gaussian emission models) together with our SPCM-$\mathcal{CRP}$ mixture model to jointly \textbf{segment} multiple time-series (with partially shared parameters) and extract groups of \textbf{transform-invariant} emission models (Section \ref{sec:tgau_bp_hmm}). This results in a \textit{topic-model like} hierarchical model capable of addressing the multiple problems posed in Figure \ref{fig:problem2}.
\subsection{Contributions and Paper Organization} \noindent The contributions of this manuscript are three-fold: \begin{enumerate}[leftmargin=*]
\item We offer a novel \textit{transform-invariant} \underline{similarity function} on the space of Covariance matrices $\mathcal{S}_{++}^N$ which we refer to as the SPCM (Spectal Polytope Covariance Matrix) similarity function, presented in Section \ref{sec:full_spcm}.
\item We derive a \textit{transform-invariant} \underline{clustering} approach for Covariance matrices which elegantly leverages spectral clustering and Bayesian non-parametrics. We refer to this approach as the SPCM-$\mathcal{CRP}$ mixture model; introduced in Section \ref{sec:spcm_crp}.
\item Finally, we couple the proposed Covariance matrix clustering approach with a Bayesian non-parametric formulation of a Hidden Markov Model (HMM) to \underline{jointly segment and} \underline{cluster} transform-invariant states of an HMM for time-series data analysis. We refer to the coupled model as the $\mathcal{IBP}$ Coupled SPCM-$\mathcal{CRP}$ - Hidden Markov Model (ICSC-HMM) and present it in detail in Section \ref{sec:tgau_bp_hmm}). \end{enumerate} These sections are followed by a thorough experimental evaluation on simulated and real-world datasets (Section \ref{sec:results}).
\section{Transform-Invariant Covariance Matrix Similarity} \label{sec:full_spcm} In this work, we introduce a Covariance similarity function that explicitly holds the property of \textit{transform-invariance} and \textit{boundedness}. The derivation of this similarity function relies on a statistical and geometrical analysis of the eigen-decomposition of a Covariance matrix. Any Covariance matrix $\mathbf{\Sigma} \in \mathcal{S}_{++}^N$ is symmetric, semi-positive definite and full rank. Due to these properties, they have eigenvectors $\mathbf{V} = [V_1,V_2,\dots,V_N]$ with corresponding positive eigenvalues $\mathbf{\Lambda} = \text{diag}(\lambda_1,\lambda_2,\dots,\lambda_N)$, which form an orthonormal basis $\mathbf{\Sigma} = \mathbf{V}\mathbf{\Lambda} \mathbf{V}^T$. While the eigenvalues express the amount of variance corresponding to its respective eigenvector; the eigenvectors are uncorrelated linear combinations of the random variables that produced the covariance matrix. This eigen representation of the Covariance matrix yields an invariant embedding of the structure (i.e. shape) of the data underlying the Covariance matrix. For this reason, we explicitly work on the eigen-decomposition of the Covariance matrix to provide the property of \textit{transform-invariance} in our similarity function (Section \ref{sec:spcm}), while the property of \textit{boundedness} is covered in Section \ref{sec:bound_spcm}.
\subsection{Spectral Polytope Covariance Matrix (SPCM) Similarity} \label{sec:spcm} Inspired by spectral graph theory and a geometric intuition of the shapes of convex sets, our similarity function is based on the representation of the covariance matrix as a polytope constructed by the projections of the eigenvalues of $\mathbf{\Sigma}$ on their associated eigenvectors. We refer to it as the Spectral Polytope Covariance Matrix \textit{(SPCM)} similarity function. This non-metric similarity function is based on the assumption that two covariance matrices are indeed similar if their exists a unified homothetic ratio between their spectral polytopes. Following, we elaborate on this assumption.
Spectral algorithms provide an innate representation of the underlying structure of data, derived from the eigenvalues and eigenvectors of a symmetric positive definite (SPD) matrix (e.g. Covariance or Affinity matrix) \citep{Brand:AISTATS:2003}. Let $\mathbf{\Sigma} \in \mathcal{S}_{++}^{N}$ be a SPD Covariance matrix and $\mathbf{\Sigma}=\mathbf{V}\mathbf{\Lambda} \mathbf{V}^T$ the eigenvalue decomposition, where $\mathbf{\Lambda}$ is the diagonal matrix of eigenvalues and $\mathbf{V}$ the matrix of eigenvectors.
\begin{figure}
\caption{Illustration of Spectral Polytope Similarity Principle }
\label{fig:polytopes}
\label{fig:homothety}
\label{fig:Spectral_polytope}
\end{figure}
\newtheorem{mydef}{Definition} \begin{mydef}
\textbf{Spectral Polytope} ($\mathbf{SP}$): The $\mathbf{SP}$ is a geometrical representation of the invariant structure of a Covariance matrix $\mathbf{\Sigma} \in \mathcal{S}_{++}^N$. It is constructed by taking the convex hull of the set of orthogonal column vectors $\mathbf{X} = \left\{ X_1|X_2|\dots|X_N \right\}$, where $X_i = V_i\lambda_i^{1/2}$, creating a $(N-1)$-polytope representing the invariant shape of $\mathbf{\Sigma}$, as depicted in Figure \ref{fig:polytopes}. \end{mydef}
\noindent \texttt{Constructing $\mathbf{SP}$ via $\mathbf{X}$:} Assume two 3-$dim$ Covariance matrices $\mathbf{\Sigma}_i$,$\mathbf{\Sigma}_j$ similar in shape, yet different in rotation $\mathbf{R}$ and scale $\gamma$; i.e. $\mathbf{\Sigma}_j=\mathbf{R}\mathbf{V}^{(i)}(\gamma\mathbf{\Lambda}^{(i)})(\mathbf{R}\mathbf{V}^{(i)})^{T}$ (as in Figure \ref{fig:polytopes}). In 3-$dim$ space, the hyper-spheres of $\mathbf{\Sigma}_i,\mathbf{\Sigma}_j$ form ellipsoids centered at $\mu^{(i)},\mu^{(j)}$ with axes $\mathbf{V}^{(i)},\mathbf{V}^{(j)}$ and axis lengths $\mathbf{\Lambda}^{(i)},\mathbf{\Lambda}^{(j)}$. Each Covariance matrix has a corresponding \textit{invariant} set of column vectors $\mathbf{X} = \left\{X_1|X_2|\dots|X_N\right\}$ where $X_i = V_i\lambda_i^{1/2}$ is a projection on the $i$-th eigenvector scaled by its respective eigenvalue. Such a set encodes the linear correlations of the distribution of points embedded on the surface of a hypersphere, producing a scale and rotation invariant representation of the original dataset used to construct $\mathbf{\Sigma}$. As shown in Figure \ref{fig:polytopes}, the convex hull of the endpoints of the sets of column vectors $\mathbf{X}^{(i)}=\left\{ X_{1}^{(i)}|X_2^{(i)}|X_3^{(i)}\right\}$ and $\mathbf{X}^{(j)}=\left\{X_{1}^{(j)}|X_2^{(j)}|X_3^{(j)}\right\}$ generate the spectral $N-1$-polytopes $\mathbf{SP}_{i}$ and $\mathbf{SP}_{j}$, depicted by the shaded regions in Figure \ref{fig:polytopes}. \noindent \textit{Transfom-invariance through homothety:} By stripping down the ellipsoids from their orientation ($\mathbf{V}$) and translation ($\mathbf{\mu}$) constraints (as shown in Figure \ref{fig:homothety}), we observe that $\mathbf{SP}_i$ is an \textit{enlargement} or \textit{magnification} of $\mathbf{SP}_j$\footnote{One can also state that $\mathbf{SP}_j$ is a shrinkage or contraction of $\mathbf{SP}_i$.}, in other words it is subject to a \textit{homothetic transformation}. Since $\mathbf{X}^{(i)}$ and $\mathbf{X}^{(j)}$ are convex sets of vectors in $\mathds{R}^N$ used to construct the polytopes $\mathbf{SP}_i,\mathbf{SP}_j$, we can analogously state that, the nonempty set $\mathbf{X}^{(j)}$ is a \textit{homothetic projection} of $\mathbf{X}^{(i)}$ via the following theorem: \newtheorem{mytheor}{Theorem} \begin{mytheor}
Two nonempty sets $\mathbf{X}^{(i)}$ and $\mathbf{X}^{(j)}$ in Euclidean space $\mathds{R}^N$ are homothetic provided $\mathbf{X}^{(i)} = \mathbf{z} + \gamma \mathbf{X}^{(j)}$ for a suitable point $z \in \mathds{R}^N$ and a scalar $\gamma \in \Re_{\neq 0}$, i.e. the homothety ratio.
\begin{proof}
Provided in \citet{Soltan:BAG:2010}
\end{proof} \end{mytheor} \noindent In other words, if we can represent the column vectors of $\mathbf{X}^{(i)}$ as $X_k^{(i)} = z + \gamma X_k^{(j)}$ $\forall k=\{1,\dots,N\}$, \textit{homothety} is the linear projection that preserves collinearity across the points aligned on $\mathbf{X}^{(i)}$ and $\mathbf{X}^{(j)}$. Hence, assuming $z = \emptyset_N$ and $\gamma \in \Re_{+}$, the \textit{positive homothety ratio}, $\gamma$, represents the scaling factor between the two convex sets $\mathbf{X}^{(i)}$ and $\mathbf{X}^{(j)}$, and consequently between two Covariance matrices $\mathbf{\Sigma}_i$ and $\mathbf{\Sigma}_j$. \begin{mydef}
\textbf{Homothetic similarity:} $\mathbf{\Sigma}_i$ and $\mathbf{\Sigma}_j$ are similar if their associated convex sets $\mathbf{X}^{(i)}$ and $\mathbf{X}^{(j)}$ are homothetic. \end{mydef} \noindent We derive our similarity function from the following corrolary: \newtheorem{mycorro}{Corrolary} \begin{mycorro}
Given $\mathbf{\Sigma}_i$,$\mathbf{\Sigma}_j \in \mathcal{S}_{++}^{N}$ and their associated convex sets $\mathbf{X}^{(i)}=\left\{X_1^{(i)}|\dots|X_N^{(i)}\right\}$, $\mathbf{X}^{(j)}=\left\{X_1^{(j)}|\dots|X_N^{(j)}\right\}$, if a homothety ratio $\gamma \in \Re_{\neq 0}$ exists such that $X_k^{(i)} =\gamma X_k^{(j)}$ $\forall k=\{1,\dots,N\}$, $\mathbf{\Sigma}_i$ and $\mathbf{\Sigma}_j$ are deemed similar, subject to a homothetic scaling $\gamma$. \end{mycorro}
\noindent Our proposed similarity function, thus, relies on the existence of a single homothety ratio, $\gamma$, that holds for all pairs of column vectors $X_k^{(j)}\rightarrow X_k^{(i)}$ $\forall k=\{1,\dots,N\}$. We approximate this ratio by taking the vector-norms, namely the $L_2$ norm, of each element in the convex set and computing the element-wise division between the two sets $\mathbf{X}^{(i)}$ and $\mathbf{X}^{(j)}$, as follows: \begin{equation}
\hat{\mathbf{\gamma}}\left(\mathbf{X}^{(i)},\mathbf{X}^{(j)}\right)= \left[\frac{||X_1^{(i)}||}{||X_1^{(j)}||}, \dots, \frac{||X_k^{(i)}||}{||X_k^{(j)}||}, \dots, \frac{||X_N^{(i)}||}{||X_N^{(j)}||} \right].
\label{eq:ratios} \end{equation} This yields a vector of $N$ \textit{approximate homothety ratios} $\hat{\mathbf{\gamma}}\left(\mathbf{X}^{(i)},\mathbf{X}^{(j)}\right)=[\hat{\gamma}_1,\dots,\hat{\gamma}_N]$ corresponding to each $k$-th dimension of $\mathbf{\Sigma} \in \mathds{R}^{N\times N}$. In the ideal case, where $\mathbf{\Sigma}_i \equiv \mathbf{\Sigma}_j$, all elements of \eqref{eq:ratios} are equivalent, i.e. $\hat{\gamma}_1 = \dots = \hat{\gamma}_k = \dots = \hat{\gamma}_N$. The overall approximate homothety ratio $\bar{\hat{\gamma}}$ can then be computed as the mean of all ratios $\hat{\gamma}_k$ approximated for each dimension $k$, \begin{equation} \bar{\hat{\gamma}}\left(\mathbf{X}^{(i)},\mathbf{X}^{(j)}\right) = \frac{1}{N}\sum_{k=1}^{N} \hat{\gamma}_k. \label{eq:mean_gamma} \end{equation} Consequently, the variance of the approximate homothety ratios, \begin{equation} \sigma^2\left(\hat{\gamma}\left(\mathbf{X}^{(i)},\mathbf{X}^{(j)}\right)\right) = \frac{1}{N}\sum_{k=1}^{N} (\hat{\gamma}_k - \bar{\hat{\gamma}})^2, \label{eq:var_gamma} \end{equation} represents the variation of approximate homothetic ratios between the two convex sets $\mathbf{X}^{(i)},\mathbf{X}^{(j)}$ and thus, provides a measure of \textit{homothetic similarity} between Covariance matrices $\mathbf{\Sigma}_i, \mathbf{\Sigma}_j$. Hence, $\sigma^2\left(\hat{\gamma}\left(\mathbf{X}^{(i)},\mathbf{X}^{(j)}\right)\right) = 0$ when $\mathbf{\Sigma}_i \equiv \mathbf{\Sigma}_j$ and $\sigma^2\left(\hat{\gamma}\left(\mathbf{X}^{(i)},\mathbf{X}^{(j)}\right)\right) \rightarrow \Re_{+}$ otherwise.
As per \eqref{eq:ratios}, the proposed measure of similarity will approximate either a ratio of \textit{magnification} (if $\texttt{area}(\mathbf{SP}_i)>\texttt{area}(\mathbf{SP}_j)$) or \textit{contraction} (if $\texttt{area}(\mathbf{SP}_i)<\texttt{area}(\mathbf{SP}_j)$) in the direction of $\mathbf{X}^{(i)} \rightarrow \mathbf{X}^{(j)}$, i.e. it is uni-directional. In order to provide a bi-directional measure of similarity and consider only the \textit{magnification} ratio between the two sets $\mathbf{X}^{(i)}$ and $\mathbf{X}^{(j)}$, we formulate our similarity function as follows,
\begin{equation} \begin{split} \Delta_{ij}(\mathbf{\Sigma}_i,\mathbf{\Sigma}_j) & = H(\delta_{ij})\sigma^2\left(\hat{\gamma}\left(\mathbf{X}^{(i)},\mathbf{X}^{(j)}\right)\right) + \left(1-H(\delta_{ij})\right)\sigma^2\left(\hat{\gamma}\left(\mathbf{X}^{(j)},\mathbf{X}^{(i)}\right)\right) \\ \text{with} & \quad \delta_{ij} = \bar{\hat{\gamma}}\left(\mathbf{X}^{(i)},\mathbf{X}^{(j)}\right) - \bar{\hat{\gamma}}\left(\mathbf{X}^{(j)},\mathbf{X}^{(i)}\right), \end{split} \label{eq:spcm} \end{equation} where $H(\delta_{ij})$ is the heavyside step function, which can be approximated as $H(\delta_{ij}) = \frac{1}{2}[1 + \text{sign}(\delta_{ij})]$. Hence, \eqref{eq:spcm} represents the variation of approximate \textit{magnifying} homothetic ratios between the two convex sets $\mathbf{X}^{(i)},\mathbf{X}^{(j)}$. This similarity function is indeed not a \textit{proper} metric for (dis)similarity. However, it can be considered, a \textit{semimetric}, as it exhibits most of the required properties of a legitimate distance function or metric: \begin{enumerate}[leftmargin=*]
\item $\Delta_{ij}(\mathbf{\Sigma}_i,\mathbf{\Sigma}_j)\geq 0$
Non-negativity
\item $\Delta_{ij}(\mathbf{\Sigma}_i,\mathbf{\Sigma}_j) = 0 \Leftrightarrow \mathbf{\Sigma}_i = \mathbf{\Sigma}_j$
Positive Definiteness
\item $\Delta_{ij}(\mathbf{\Sigma}_i,\mathbf{\Sigma}_j) = \Delta_{ji}(\mathbf{\Sigma}_j,\mathbf{\Sigma}_i)$
Symmetry
\item $\Delta_{ij}(\mathbf{\Sigma}_i,\mathbf{\Sigma}_j)=0 \Leftrightarrow \mathbf{\Sigma}_j = \mathbf{R}\mathbf{V}^{(i)}(\gamma\mathbf{\Lambda}^{(i)})(\mathbf{R}\mathbf{V}^{(i)})^{T}$
Transform-Invariance \end{enumerate} \noindent \eqref{eq:spcm} is thus, a \textit{semimetric} on $\mathbf{\Sigma} \in \mathcal{S}_{++}^{N}$, as it is a function $\Delta : \mathbf{\Sigma} \times \mathbf{\Sigma} \rightarrow \Re$ that satisfies the first three axioms (1-3) of a metric except the axiom of \textit{triangle inequality}. Strict \textit{triangle inequality} is only crucial for distance-dependent learning algorithms, such as $K$-Means, GMM or $K$-NN. In spectral algorithms, it is \textit{sufficient} for a similarity/affinity measure to hold axioms (1-3). Moreover, it has been shown that applying distance-dependent algorithms directly in the space of $\mathcal{S}_{++}^N$ is not a straight-forward procedure \citep{Cherian:TPAMI:2013}. Thus, the lack of this property is not detrimental to our targeted applications, as we will focus on a spectral-based mixture model variant, described in Section \ref{sec:spcm_crp}. Following we introduce a formulation of \eqref{eq:spcm} that provides the property of \textit{boundedness}. \begin{figure}
\caption{Effects of B-SPCM hyper-parameters. }
\label{fig:b_spcm}
\end{figure}
\subsection{Bounded Decaying SPCM Similarity function} \label{sec:bound_spcm} \eqref{eq:spcm} yields values in the range of $\Delta : \mathbf{\Sigma} \times \mathbf{\Sigma} \rightarrow [0,\infty)$. Having such an unbounded metric can be a nuisance. Hence, we formulate a bounded function for SPCM similarity, as $f(\cdot)$, which is a monotonic decay function bounded between $[1,0]$, where $f(\cdot)=1$ represents definite similarity, i.e. $\mathbf{\Sigma}_i \equiv \mathbf{\Sigma}_j$ whilst $f(\cdot)=0$ is absolute dissimilarity:
\begin{equation} f(\Delta_{ij},\tau) = \frac{1}{1+\upsilon(\tau) \Delta_{ij}(\mathbf{\Sigma}_i,\mathbf{\Sigma}_j)} \label{eq:bspcm} \end{equation}
where $\Delta_{ij}=\Delta_{ij}(\mathbf{\Sigma}_i,\mathbf{\Sigma}_j)$ is the SPCM similarity value given by \eqref{eq:spcm} and $\upsilon$ is a scaling function determined by the following equation: \begin{equation} \upsilon(\tau) = 10^{(\tau e^{-N})} \end{equation} where $N$ is the dimensionality of $\mathbf{\Sigma} \in \mathcal{S}_{++}^N$ and $\tau$ is a design tolerance hyper-parameter. The \textit{B-SPCM} similarity function \eqref{eq:bspcm} was designed so that it could hold the following properties: \begin{enumerate}[leftmargin=*]
\item $f(\cdot)$ decreases as positive $\Delta_{ij}$ increases
\item $f(0)=1$
\item $f(\cdot) \rightarrow 0$ as $\Delta_{ij} \rightarrow +\infty$ \end{enumerate} \normalsize In Figure \ref{fig:b_spcm} we show the effect of the tolerance hyper-parameter, $\tau$, and the dimensionality of the data, $N$, on the B-SPCM similarity function. The rationale behind the scaling function $\upsilon$ being dependent on the $N$ is to introduce a semantic distinction between similarity values in different dimensions. For example, a similarity value $\Delta_{ij}=1$ for $\mathbf{\Sigma}_i,\mathbf{\Sigma}_j \in \mathcal{S}_{++}^3$ should not yield the same B-SCPM value if $\mathbf{\Sigma}_i,\mathbf{\Sigma}_j \in \mathcal{S}_{++}^6$. When this happens, according to Eq. \ref{eq:spcm}, it means that the variance on the homothetic ratios is the same. However, if we have the same variance for 3-$dim$ as in 6-$dim$, the relative deviation between them is not the same. Hence, the scaling function $\upsilon(\tau)$ is a form of dimensional scaling to account for this behavior. This is evident in the span of dimensions between $N = [3,9]$. For Covariance matrices with $N>9$, the effect of the scaling function is trivial. Regarding the tolerance value, $\tau$, it can take any non-negative value in $\Re$, and is merely an amplification factor that can be tuned for datasets which we know are quite noisy, typical values that we have used in our dataset lie in the range of $[1,10]$.
\begin{figure}
\caption{\small \textit{Toy-ellipsoids} dataset of transformed 3D Covariance matrices (color corresponds to similar matrices). }
\label{fig:ellipsoids}
\end{figure}
\subsection{Comparison to Standard Similarity Functions} \label{sec:comparison-standard} We highlight the advantage of the SPCM function to find similarity in \textit{transformed} Covariance matrices over the standard similarity functions (presented in Table \ref{tab:metrics}) on a toy example.
\paragraph{Toy Example} Consider a small dataset of Covariance matrices $\mathbf{\Theta} = \left\lbrace \mathbf{\Sigma},\mathbf{\Sigma}_2,\mathbf{\Sigma}_3,\mathbf{\Sigma}_4, \mathbf{\Sigma}_5 \right\rbrace$ where $\mathbf{\Sigma} \in \mathcal{S}_{++}^3$. The dataset is generated by two \textit{distinct} Covariance matrices, namely $\mathbf{\Sigma}_1$ and $\mathbf{\Sigma}_4$: \begin{equation*} \textcolor{black}{\mathbf{\Sigma}_1 = \begin{bmatrix} b&a&a\\ a&b&a\\ a&a&b \end{bmatrix}},\hspace{5pt} \textcolor{black}{\mathbf{\Sigma}_4 = \begin{bmatrix} a&d&e\\ d&b&f\\ e&f&c \end{bmatrix}} \end{equation*} where $a-f$ take on real values in $\Re$, whose signs are constrained to yield a SPD matrix. Moreover, $\textcolor{black}{\mathbf{\Sigma}_2}$ is a scaled and noisy version of $\textcolor{black}{\mathbf{\Sigma}_1}$, $\textcolor{black}{\mathbf{\Sigma}_3}$ is a rotated version of $\textcolor{black}{\mathbf{\Sigma}_1}$ and $\textcolor{black}{\mathbf{\Sigma}_5}$ is a rotated and scaled version of $\textcolor{black}{\mathbf{\Sigma}_4}$, such that $\mathbf{\Theta} = \{\textcolor{black}{\mathbf{\Sigma}_1,\text{rot}(\mathbf{\Sigma}_1),\text{scaled}(\mathbf{\Sigma}_1)}, \textcolor{black}{\mathbf{\Sigma}_4, \text{rot}(\text{scaled}(\mathbf{\Sigma}_4))} \}$. In Figure \ref{fig:ellipsoids}, we illustrate these Covariance matrices as 3D ellipsoids; ellipsoids with the same color correspond to similar Covariance matrices. Thus, if we want to group these matrices based on \textit{transform-invariant} similarity, we should end up with two clusters $C = \{\textcolor{black}{c_1}, \textcolor{black}{c_2} \}$ where $\textcolor{black}{c_1 = \{\mathbf{\Sigma}_1, \mathbf{\Sigma}_2,\mathbf{\Sigma}_3 \}}$ and $\textcolor{black}{c_2 = \{\mathbf{\Sigma}_4,\mathbf{\Sigma}_5 \}}$. \\
In Figure \ref{fig:metrics_toy}, we show the similarity matrices generated by the B-SPCM and the standard Covariance matrix similarity functions. As can be seen, the latter fail to provide discriminative values for the true partitioning of the dataset. Whereas, the B-SPCM function, which focuses on the analysis of the spectral polytope, makes for a robust \textit{transform-invariant} similarity function. This is due to the fact that none of the standard similarity functions hold the rotation and scale invariance property \textit{explicitly}. Nonetheless, it is commonly known that even though partitions from a confusion matrix are not easily identified, we can still recover them with similarity-based clustering algorithms. In Section \ref{sec:results}, we provide an \textit{exhaustive} quantitative evaluation of the performance of the B-SPCM similarity function compared to the standard similarity functions using two popular similarity-based clustering algorithms. As will be discussed later on, none of the standard similarity functions are able to recover the two clusters from our toy dataset with neither of the clustering algorithms. Whereas, applying both algorithms to the B-SPCM similarity matrix yields the true labels.
\begin{figure}
\caption{ \small Similarity Matrices sorted by cluster labels for \textbf{B-SCPM} (top-left), \textbf{RIEM} (top-center), \textbf{LERM} (top-right), \textbf{KLDM} (bottom-left) and \textbf{JBLD} (bottom-right) on the \textit{Toy-Ellipsoids} dataset }
\label{fig:metrics_toy}
\end{figure}
\section{Spectral Non-Parametric Clustering of Covariance Matrices} \label{sec:spcm_crp} Given the similarity matrix $\mathbf{S} \in \mathds{R}^{M \times M}$ of $M$ Covariance matrices, where each entry $s_{ij}=f(\Delta_{ij},\tau)$ is computed by \eqref{eq:bspcm}, we wish to find a partition that recovers a natural \textit{transform-invariant} grouping of \textit{similar} Covariance matrices. One of the most popular approaches to solve this problem is \textit{Spectral Clustering}, which relies on graph Laplacian matrices and the study thereof (i.e. spectral graph theory) \citep{Ng:NIPS:2001, Luxburg:TSC:2007}. Spectral clustering determines relationships across data-points embedded in a graph induced by the similarity matrix $\mathbf{S}$. In other words, we would find a mapping of the Covariance matrices $\mathbf{\Sigma}_i$ onto a lower-dimensional Euclidean space, $\mathbf{y}_i = f(\mathbf{\Sigma}_i)$ where $f(\cdot):\mathcal{S}_{++}^{N}\rightarrow\mathds{R}^{P}$, induced by their similarity matrix $\mathbf{S} \in \mathds{R}^{M \times M}$. This mapping is found by constructing the graph Laplacian matrix $\mathbf{L} = \mathbf{D} - \mathbf{S}$; where $\mathbf{D}$ is a diagonal weighting matrix whose entries are column sums of $\mathbf{S}$. After performing an Eigenvalue decomposition of $\mathbf{L} = \mathbf{V}\mathbf{\Lambda}\mathbf{V}^T$ and ordering the eigenvectors in ascending order wrt. their eigenvalues $\lambda_0 = 0 \leq \lambda_1 \leq \dots \leq \lambda_{M-1}$; a $P$-dimensional \textit{spectral embedding} is constructed by selecting the first $P$ eigenvectors of the graph Laplacian $\mathbf{Y} \in \mathds{R}^{P \times M}$ as $\mathbf{Y} = [V^{(1)}, \dots, V^{(P)}]^T$. We would then perform $K$-Means on the spectral embedding $\mathbf{Y} \in \mathds{R}^{P \times M}$, which represent the clusters in the original space corresponding to the set of Covariance matrices $\mathbf{\Theta} = \{\mathbf{\Sigma}_1, \dots, \mathbf{\Sigma}_M\}$\footnote{For an in-depth tutorial and derivation of spectral clustering and variants, the authors recommend reading \citet{Luxburg:TSC:2007}.}.
One limitation is that performance relies on a good choice of $P$, the sub-space dimensionality, and $K$, the number of expected clusters. In this work, we provide an algorithm that leverages spectral clustering and Bayesian non-parametrics in one single model in order to automatically estimate $P$ and $K$. Hence, we tackle the following two main challenges:\\
\noindent \textit{(1) \underline{Sub-Space Dimensionality Selection}:} In its original form, the \textit{spectral clustering} algorithm assumes that the number of clusters $K$ is equivalent to the sub-space dimensionality $P$. The value of $P,K$ could be chosen either through model selection or by analyzing the eigenvalues of $\mathbf{L}$. Such a strong assumption comes from the theorem of connected graphs, which states that the multiplicity of the eigenvalue $\lambda=0$ determines the number of $K$ connected graphs \citep{Luxburg:TSC:2007}. This theorem holds only for sparse similarity matrices $\mathbf{S}$. When $\mathbf{S}$ is fully connected solely one eigenvalue is $\lambda_0=0$, however, the contiguous eigenvalues are $\lambda_{1,\dots,P} \rightarrow 0$. Thus, we can determine $P,K$, by choosing the $K$-th and $K+1$-th pair whose gap is the largest. This approach, however, can be quite sensitive as different datasets can yield different patterns of distribution of the eigenvalues \citep{Zelnik:NIPS:2004}. To alleviate this, we first propose a relaxation on the assumption that $P=K$, which has been strongly supported by previous work \citep{Poon:UAI:2012}. We then introduce an unsupervised dimensionality selection algorithm for the \textit{spectral embedding} construction. We treat the eigenvalues as weights and apply a global normalization layer using the softmax function \citep{Bishop:PRM:2006}, to find the contiguous set of $P$-eigenvectors that best describes the dataset. By applying the softmax function on the eigenvalues, we are performing a nonlinear transformation into a range of normalized values between $[0,1]$, where the effect of extreme values is reduced; i.e. we are ``squashing" the eigenvalues. Thus, for eigenvalues $\lambda_0,\dots,\lambda_M$ of the Laplacian matrix, we can compute their weighted eigenvalues as follows, \begin{equation} \label{eq:softmax} \rho\left(\mathbf{\lambda}\right)_i = \frac{\exp\left(\lambda_i\right)}{\sum_{j=1}^{M} \exp\left(\lambda_j\right)} \hspace{5pt} \text{for} \hspace{5pt} i=\left\{1,\dots,M\right\} \end{equation} , for $M$ number of samples in our dataset. To find the continuous set of leading $P$ eigenvalues, which we will now call the \textit{primary} eigenvalues, we re-normalize the weights computed from Eq. \ref{eq:softmax} to lie between the range of $[-1,1]$. By normalizing the values in this range, the weighted eigenvalues $\rho(\mathbf{\lambda})_i < 0$ correspond to the \textit{$P$-primary} eigenvalues. We list the full unsupervised spectral embedding approach in Algorithm \ref{alg:spect_dim_red}.\\
\begin{algorithm}[!t]
\renewcommand{\textbf{Input:}}{\textbf{Input:}}
\renewcommand{\textbf{Output:}}{\textbf{Output:}}
\caption{Unsupervised Spectral Embedding}
\label{alg:spect_dim_red}
\footnotesize
\begin{algorithmic}[1]
\Require A positive-definite pair-wise similarity matrix $\mathbf{S} \in \mathds{R}^{MxM}$ of a dataset $\mathbf{\Theta} = \{\mathbf{\Sigma}_1,\dots,\mathbf{\Sigma}_M \}$ where $X_i \in \mathcal{S}_{++}^{N}$.
\Ensure Spectral embedding of $\mathbf{\Theta}$ as $\mathbf{Y} \in \mathds{R}^{P\times M}$ for $P<N$
\Procedure{UnsupervisedSpectralEmbedding}{$\mathbf{S}$}
\Statex \texttt{Compute Degree Matrix} $\mathbf{D}$
\State $D_{ii} = \sum_{j=1}^{n}S_{ij} $
\Statex \texttt{Compute Symmetric Normalized Laplacian}
\State $\mathbf{L} = \mathbf{D} - \mathbf{S} $ \Comment{Unnormalized Laplacian}
\State $\mathbf{L}_{sym} = \mathbf{D}^{-1/2}\mathbf{L}\mathbf{D}^{-1/2} = \mathbf{I} - \mathbf{D}^{-1/2}\mathbf{S}\mathbf{D}^{-1/2}$
\Statex \texttt{Eigendecomposition of} $\mathbf{L}_{sym}$
\State $\mathbf{L}_{sym} = \mathbf{V}\mathbf{\Lambda} \mathbf{V}^T$
\Statex \texttt{Order Eigenvectors wrt. $\lambda_0=0 \leq \lambda_1 \leq \dots \leq \lambda_{M-1}$}
\State $\mathbf{V} = [V^{(0)},V^{(1)},\dots,V^{(M)}]$ \Comment{Ordered-columnwise}
\Statex \texttt{Apply SoftMax function on Eigenvalues}
\State $\rho(\mathbf{\lambda})_i \leftarrow$ Equation \ref{eq:softmax}
\Statex \texttt{Normalize weights to} $[-1,1]$
\State $\overline{\rho(\mathbf{\lambda})_i} = \texttt{normalize}(\rho(\mathbf{\lambda})_i) $
\Statex \texttt{Find $P$-Primary Eigenvalues}
\State $P \leftarrow \sum\limits_{i=1}^{N} \delta_i$ where $\delta_i=\begin{cases}
1, & \text{if $\rho(\mathbf{\lambda})_i < 0$}.\\
0, & \text{otherwise}.
\end{cases}$
\Statex \texttt{Construct datapoints on the $P$-dimensional manifold}
\State $\mathbf{V}^{p} = [V^{(0)},V^{(1)},\dots,V^{(P)}]$ \Comment{keep first $P$-th columns}
\State $\mathbf{V}^{p}(i,j) = \mathbf{V}^{p}(i,j)/\left(\sum_j\mathbf{V}^{p}(i,j)^2\right)^{1/2}$ \Comment{re-normalize rows}
\State $\mathbf{y}_i = \mathbf{V}^{p}(i,:) \hspace{5pt} \forall i= \{1,\dots,M\}$ \Comment{vector corresponding to $i$-th row of $\mathbf{V}^{p}$}
\EndProcedure
\end{algorithmic} \end{algorithm}
\noindent \textit{(2) \underline{Cardinality}:} Once we have automatically determined a $P$-dimensional sub-space, $\mathbf{Y} \in \mathds{R}^{P \times M}$, corresponding to the set of Covariance matrices $\mathbf{\Theta} = \{\mathbf{\Sigma}_1, \dots, \mathbf{\Sigma}_M\}$, we can now apply a clustering approach on $\mathbf{Y}$ in order to find the $K$ expected clusters in $\mathbf{\Theta}$. When using EM-based algorithms such as $K$-Means or Gaussian Mixture Models (GMM), the most appropriate way to find the optimal $K$ is through model selection or heuristics. An alternative approach is to use a Bayesian non-parametric (BNP) model, namely the $\mathcal{DP}$-GMM to automatically estimate the optimal cluster $K$ from $\mathbf{Y}$. While this solves the \textit{cardinality} problem, such distance-based approaches can perform poorly when the distribution of the points of the \textit{spectral embedding}, $\mathbf{Y}$, exhibits idiosyncrasies such as high curvatures, non-uniformities, etc\footnote{This statement is justified empirically in Section \ref{sec:results}}. In such cases, one could benefit from the similarity values $\mathbf{S} \in \mathds{R}^{M \times M}$ in order to bias the clustering results and provide a robust algorithm to irregular point distributions that might be constructed from the \textit{spectral embedding}. A prior that is capable of including such side-information in a Bayesian non-parametric model is the distance-dependent Chinese Restaurant Process (dd-$\mathcal{CRP}$) \citep{Blei:JMLR:2011}. It is a distribution over partitions that allows for dependencies between data-points, by relaxing the \textit{exchangeability} assumption of the standard $\mathcal{CRP}$ (see Table \ref{tab:bnp_clust_prelims}). It can be applied to any non-temporal settings by using an appropriate distance function on the prior. It has been successfully used with arbitrary distances \citep{Socher:JMLR:2011}, as well as spatial distances between pixels, triangular meshes, voxels \citep{Ghosh:NIPS:2011,Ghosh:NIPS:2012,Janssen:Neuro:2015}. In this work, we propose to use the dd-$\mathcal{CRP}$ prior with our similarities $\mathbf{S} \in \mathds{R}^{M\times M}$ (computed from the original Covariance matrix space) \underline{to bias clustering} on a non-parametric mixture model applied on the sub-space of the \textit{spectral embedding} $\mathbf{Y} \in \mathds{R}^{P\times M}$.
\begin{table}[!t] \footnotesize \colorbox{violet!3}{ \resizebox{\textwidth}{!}{\begin{tabular}{p{\linewidth}}
\textbf{Chinese Restaurant Process ($\mathcal{CRP}$)} The $\mathcal{CRP}$ is a distribution over partitions of integers described by a culinary metaphor of a Chinese restaurant with an infinite number of tables. It defines a sequence of probabilities for the incoming customers (observations) to sit at specific tables (clusters) \citep{Jordan:NIPS:2005}. The first customer sits at the first table. The $i$-th customer chooses to either sit at a table with a probability proportional to the number of customers sitting at that table; otherwise she/he sits alone at a new table with a probability proportional to the hyper-parameter $\alpha$ (known as the concentration parameter). This process is summarized as follows: \begin{equation}
p(z_i = k \hspace{2pt}|\hspace{2pt} Z_{-i}, \alpha) = \begin{cases}\! \frac{M_{(k,-i)}}{\alpha + M - 1} & \text{for} \hspace{10pt} k \leq K \\ \frac{\alpha}{\alpha + M - 1} & \text{for} \hspace{10pt} k = K + 1 \\ \end{cases} \label{eq:crp_prior} \end{equation}
$M_{(k,-i)}$ is the number of customers sitting at table $k$, excluding the $i$-th customer and $M$ is the total number of customers. \\ \\
\textbf{Distance-Dependent Chinese Restaurant Process (dd-$\mathcal{CRP}$) \citep{Blei:JMLR:2011}} The dd-$\mathcal{CRP}$ focuses on the probability of customers sitting \textit{with other customers} based on an external measure of distance. In this case, the culinary metaphor becomes \textit{customer centric}. Now, the $i$-th customer has two choices, she/he can either sit with the $j$-th customer with a probability proportional to a decreasing function of a distance between them $f(d_{ij})$, or sit alone with probability proportional to $\alpha$ as follow, \begin{equation}
p(c_i = j \hspace{2pt}|\hspace{2pt}\mathbf{D}, \alpha) \propto \begin{cases}\! f(d_{ij}) & \text{if} \hspace{10pt} i \neq j \\ \alpha & \text{if} \hspace{10pt} i = j \\ \end{cases} \label{eq:ddcrp_prior} \end{equation} where $\mathbf{D} \in \mathds{R}^{M\times M}$ is a matrix of pairwise distances between $M$ customers. The smaller the distance between the customers, the more likely they are to sit together and vice versa. This can only hold if the decay function $f: \mathds{R}^{+} \rightarrow \mathds{R}^{+}$ is non-increasing and $f(\infty)=0$. Moreover, the set of \underline{customer} seating assignments $C = \{c_{1},\dots,c_{N}\}$ sampled from the dd-$\mathcal{CRP}$, can be mapped to $Z = \{z_{1},\dots,z_{N}\}$, via $Z = \mathbf{Z}(C)$, which is a recursive mapping function that gathers all linked customers. Hence, the \underline{table} (cluster) assignments emerge from the customer seating assignments. \end{tabular}}} \caption{Bayesian Non-parametric Priors for Partitions\label{tab:bnp_clust_prelims}} \end{table}
\subsection{SPCM Similarity Dependent - Chinese Restaurant Process ($\mathcal{SPCM-CRP}$) Mixture Model} \label{sec:ssd-spcm}
\paragraph{Non-parametric Prior} Following the definition of the dd-$\mathcal{CRP}$ in Table \ref{tab:bnp_clust_prelims}, it is clear to see that we can directly use our B-SPCM similarity function to generate a prior distribution over partitions by substituting $f(d_{ij})$, which should be a decaying function with \eqref{eq:bspcm}. This then yields a multinomial over \textit{customer seating} assignments conditioned on B-SPCM similarities $\mathbf{S} \in \mathds{R}^{M\times M}$, where $s_{ij}=f(\Delta_{ij},\tau)$ is generated from \eqref{eq:bspcm} for $i,j=1,\dots M$. Our SPCM-dependent $\mathcal{CRP}$ prior then becomes: \begin{equation}
p(c_i = j \hspace{2pt}|\hspace{2pt} \mathbf{S}, \alpha) \propto \begin{cases}\! s_{ij} & \text{if} \hspace{10pt} i \neq j \\ \alpha & \text{if} \hspace{10pt} i = j \\ \end{cases} \label{eq:sdcrp_prior} \end{equation} We refer to \eqref{eq:sdcrp_prior} as the $\mathcal{SPCM-CRP}$ prior. To summarize, this prior indicates the probability of $c_i = j$; in other words the probability of customer (observation) $i$ sitting together (being in the same cluster) with customer (as observation) $j$, given a deterministic similarity measure between them (B-SPCM).
\paragraph{Mixture Model} The $\mathcal{SPCM-CRP}$ prior is inherently imposing clusters on a parameter space, we thus formulate a mixture model with the emissions modeled from observations on the spectral sub-space $\mathbf{Y}\in \mathds{R}^{P \times M}$. The $\mathcal{SPCM-CRP}$ mixture model, with generative distribution $ \mathcal{N}(.|\theta)$ and base distribution $ \mathcal{NIW}(\lambda_0)$, can thus be constructed as follows: \begin{equation} \begin{aligned} c_i & \sim \mathcal{SPCM-CRP}(\mathbf{S},\alpha)\\ z_i & = \mathbf{Z}(c_i)\\ \theta_k & \sim \mathcal{NIW}(\lambda_0)\\
\mathbf{y}_i|z_i = k & \sim \mathcal{N}(\theta_k) \end{aligned} \label{eq:spcm-crp-mm} \end{equation}
where each $\theta_k=(\mu_k,\Sigma_k)$ are the parameters of a Gaussian distribution, $\mathcal{N}$. For each $k$-th cluster, its parameters are drawn from a $\mathcal{NIW}$ distribution, with hyper-parameter $\{\mu_0,\kappa_0,\Lambda_0, \nu_0 \}$. A graphical representation of this proposed mixture model is illustrated in Figure \ref{fig:my_models}. In the standard $\mathcal{CRP}$ mixture model a partition $\mathbf{z}$ is directly drawn from the table assignments in \eqref{eq:crp_prior}. In this model, the prior is in terms of customer assignments, $c_i$. Nevertheless, these indirectly determine the cluster assignment $z_i$, through the mapping function $\mathbf{Z}(C):C \rightarrow Z$, which recovers the connections of $c_i$, i.e. the emerged table assignments. It must be noted that this mixture model is not strictly a \textit{Bayesian} non-parametric model, as the $\mathcal{SPCM-CRP}$ is not generated from a \textit{random measure}. However, it still imposes a distribution over infinite partitions, thus keeping the \textit{non-parametric} nature of the $\mathcal{CRP}$ \citep{Blei:JMLR:2011}. For our clustering application, due to conjugacy, we can integrate out the model parameters $\Theta$ and estimate the posterior distribution of the customer assignments $p(C | \mathbf{Y}, \mathbf{S}, \alpha, \lambda)$, \begin{equation} \label{eq:full-post}
p(C | \mathbf{Y}, \mathbf{S}, \alpha, \lambda) = \frac{p(C \hspace{2pt}|\hspace{2pt} \mathbf{S}, \alpha) p(\mathbf{Y}| \mathbf{Z}(C), \lambda)}{\sum_C p(C \hspace{2pt}|\hspace{2pt} \mathbf{S}, \alpha) p(\mathbf{Y}| \mathbf{Z}(C), \lambda)}. \end{equation}
The prior probability $p(C|\mathbf{S},\alpha)$ is determined by the $\mathcal{SPCM-CRP}$ \eqref{eq:sdcrp_prior} and can be computed as follows, \begin{equation}
p(C \hspace{2pt}|\hspace{2pt} \mathbf{S}, \alpha) = \prod_{i=1}^{M} p(c_i = j \hspace{2pt}|\hspace{2pt}\mathbf{S}, \alpha), \end{equation} where, \begin{equation}
p(c_i = j \hspace{2pt}|\hspace{2pt} \mathbf{S}, \alpha) = \begin{cases}\! \frac{s_{ij}}{\sum_{j=1}^{M}s_{ij} + \alpha} & \text{if} \hspace{10pt} i \neq j \\ \frac{\alpha}{M + \alpha} & \text{if} \hspace{10pt} i = j \\ \end{cases} \label{eq:spcmcrp_prior} \end{equation} The likelihood of the partition $Z=\mathbf{Z}(C)$ is computed as the product of the probabilities of the customers $\mathbf{Y}$ sitting at their assigned tables $Z$, \begin{equation}
p(\mathbf{Y}|\mathbf{Z}(C), \lambda) = \prod_{k=1}^{|\mathbf{Z}(C)|} p (\mathbf{Y}_{\mathbf{Z}(C)=k}|\lambda) \label{eq:spcm-crp_lik} \end{equation}
where $|\mathbf{Z}(C)|$ denotes the number of unique tables emerged from $\mathbf{Z}(C)$; i.e. $K$ in a finite mixture model, and $\mathbf{Z}(C)=k$ is the set of customers assigned to the $k$-th table. Further, each marginal likelihood in \eqref{eq:spcm-crp_lik} has the following form, \begin{equation} \label{eq:integral}
p (\mathbf{Y}_{\mathbf{Z}(C)=k}|\lambda) = \int\limits_{\theta}^{} \left( \prod_{i \in \mathbf{Z}(C)=k} p\left( \mathbf{y}_{i} \hspace{2pt}|\hspace{2pt} \theta \right) \right) p\left( \theta\hspace{2pt}|\hspace{2pt}\lambda \right)d\theta. \end{equation}
Since $p(\mathbf{y}_i \hspace{2pt} |\hspace{2pt} \theta) = \mathcal{N}(\mathbf{y}_i \hspace{2pt} | \mu, \Sigma)$ and $p(\theta \hspace{2pt} |\hspace{2pt} \lambda) = \mathcal{NIW}(\mu, \Sigma \hspace{2pt} | \hspace{2pt} \lambda)$, \eqref{eq:integral} has an analytical solution which can be derived from the posterior $p(\mu, \Sigma |\mathbf{Y})$. The full posterior, \eqref{eq:full-post}, is, however, intractable, as the combinatorial sum in the denominator increases exponentially wrt. $M$. \begin{figure}
\caption{\small Graphical representation of \textbf{our proposed clustering and segmentation approaches} the \textbf{SPCM-CRP} Mixture model. \textbf{Colored gray} nodes correspond to observed variables, \underline{black nodes} correspond to latent variables and \underline{small gray nodes} correspond to hyper-parameters. }
\label{fig:my_models}
\end{figure}
Nevertheless, it can be approximated via Gibbs sampling, where latent variables $C$, are sampled from the following posterior distribution,
\begin{equation} \label{eq:cond_spcm_crp} \begin{aligned}
p(c_i = j \hspace{2pt}|\hspace{2pt} C_{-i},\mathbf{Y},\mathbf{S}, \alpha, \lambda) \hspace{2pt}
\propto\hspace{2pt}& \\ \textcolor{red}{\underbrace{p(c_i = j \hspace{2pt}|\hspace{2pt}\mathbf{S}, \alpha) }_{\text{\shortstack{Similarities in \\ Original Space}}}} & \textcolor{blue}{\underbrace{p(\mathbf{Y}\hspace{2pt}|\hspace{2pt}\mathbf{Z}(c_i = j \cup C_{-i}), \lambda)}_{\text{\shortstack{Observations in \\ Spectral Embedding}}}} \end{aligned} \end{equation}
with $C_{-i}$ indicating the customer seating assignments for all customers except the $i$-th. \eqref{eq:cond_spcm_crp} holds some particularities as opposed to typical \underline{collapsed conditionals} and typical \underline{mixture models}. First of all, we can see that the prior is represented through customer assignments, while the likelihood is in terms of table assignments. Moreover, due to our adaptation of the dd-$\mathcal{CRP}$, the \textcolor{red}{prior} uses \textcolor{red}{similarity measures from the original space} of the Covariance matrices, while the \textcolor{blue}{likelihood} is computed solely on the observations which live in the \textcolor{blue}{lower-dimensional spectral embeddding} constructed from the pair-wise Similarity matrix $\mathbf{S}$ of Covariance matrices. Finally, the likelihood is computed for all points $\mathbf{Y}$, rather than just for the sampled point $\mathbf{y}_i$, this is due to the fact that the partition of the dataset depends on customer assignments and not table assignments. In the $\mathcal{CRP}-MM$, sampling for new \underline{table} assignments does not affect the overall partition of the data because we assume that each customer is conditionally independent of the other customers' assignment. In the $\mathcal{SPCM-CRP}$ mixture this is not the case, sampling for new \underline{customer} assignments directly affects the partition in several ways; e.g. a table could be split or two tables could be merged; all because the customers are not conditionally independent of the other customers' assignments. Since the $\mathcal{SPCM-CRP}$ has the same form as the dd-$\mathcal{CRP}$, we adapt the Collapsed Gibbs sampler proposed by \cite{Blei:JMLR:2011} for the original dd-$\mathcal{CRP}$ model which takes these special cases into consideration.
\begin{algorithm}[!t]
\renewcommand{\textbf{Input:}}{\textbf{Input:}}
\renewcommand{\textbf{Output:}}{\textbf{Output:}}
\caption{Spectral Non-Parametric Clustering of Covariance Matrices}
\footnotesize
\label{alg:nonparam-clustering}
\begin{algorithmic}[1]
\Require $\mathbf{\Sigma} = \{\Sigma_1, \dots, \Sigma_N\}$ where $\Sigma \in \mathds{R}^{D \times D}, \Sigma \succeq 0, \Sigma =\Sigma^T $ \Comment{Data}
\Statex \hspace{15pt} $\tau, \alpha, \lambda = \{\mu_0, \kappa_0, \Lambda_0, \nu_0\}$ \Comment{Hyper-parameters}
\Ensure $\Psi=\{K,C,Z,\Theta\}$ \Comment{Inferred Clusters and Cluster indicators}
\Statex Compute pair-wise B-SPCM similarity values (Eq.\ref{eq:bspcm})
\State $\mathbf{S}\in\mathds{R}^{NxN} \leftarrow s_{ij} = f(\Sigma_i, \Sigma_j, \tau) \quad \forall i,j \in \{1,\dots,N\}$
\Statex Unsupervised Spectral Embedding (Alg.\ref{alg:spect_dim_red})
\State $\mathbf{Y} \in \mathds{R}^{P \times N} \leftarrow$ \texttt{UnSpectralEmb($\mathbf{S}$)}
\Procedure{SPCM-CRP-Gibbs-Sampler}{$\mathbf{Y}, \mathbf{S}, \alpha, \lambda$}
\State Set $\Psi^{t-1} = \{C,K,Z\}$ where $c_i = i$ for $C = \{c_1, \dots, c_N\}$
\For{\texttt{iter t = 1 to T}}
\State Sample a random permutation $\tau(\cdot)$ of integers $\{1, \dots, N \}$.
\For{\texttt{obs i = $\tau(1)$ to $\tau(N)$}}
\State \textbf{Remove} customer assignment $c_i$ from the partition
\If {$\mathbf{Z}(C_{-i}) \neq \mathbf{Z}(C)$}
\State \textbf{Update} likelihoods according to Eq. \ref{eq:spcm-crp_lik}
\EndIf
\State \textbf{Sample} new cluster assignment
\State \texttt{$c_i^{(i)} \sim p(c_i = j | C_{-i}, \mathbf{Y}_{-i}, \mathbf{S}, \alpha)$} (Eq. \ref{eq:spcmcrp_cond_final})
\If {$\mathbf{Z}(C_{-i}) \neq \mathbf{Z}(c_i =j \cup C_{-i})$}
\State \textbf{Update} table assignments $Z$.
\EndIf
\EndFor
\State \textbf{Resample} table parameters $\Theta$ from $\mathcal{NIW}$ posterior
\State update equations \eqref{eq:niw_updates}.
\EndFor
\EndProcedure
\end{algorithmic} \end{algorithm}
\subsection{Collapsed Gibbs Sampler for SPCM-CRP Mixture Model} \noindent The conditional in \eqref{eq:cond_spcm_crp} is sampled via a two-step procedure:\\ \noindent \underline{Step 1.} The $i$-th customer assignment is removed from the current partition $\mathbf{Z}(C)$. If this causes a change in the partition; i.e. $\mathbf{Z}(C_{-i}) \neq \mathbf{Z}(C)$; the customers previously sitting at $\mathbf{Z}(c_i)$ are split and the likelihood must be updated via \eqref{eq:spcm-crp_lik}.\\ \noindent \underline{Step 2.} A new customer assignment $c_i$ must be sampled, by doing so a new partition $\mathbf{Z}(c_i =j \cup C_{-i})$ is generated. This new customer assignment might change (or not) the current partition $\mathbf{Z}(C_{-i})$. If $\mathbf{Z}(C_{-i}) = \mathbf{Z}(c_i =j \cup C_{-i})$, the partition was unchanged and the $i$-th customer either joined an existing table or sat alone. If $\mathbf{Z}(C_{-i}) \neq \mathbf{Z}(c_i =j \cup C_{-i})$, the partition was changed, specifically $c_i = j$ caused two tables to merge, table $l$ which is where the $i$-th customer was sitting prior to step 1 and table $m$ is the new table assignment emerged from the new sample $\mathbf{Z}(c_i = j)$. Due to these effects on the partition, instead of explicitly sampling from Eq. \ref{eq:cond_spcm_crp}, \cite{Blei:JMLR:2011} proposed to sample from the following distribution, \begin{equation}
p(c_i = j \hspace{2pt}|\hspace{2pt} C_{-i},\mathbf{Y},\mathbf{S}, \alpha, \lambda) \propto
\begin{cases}\! p(c_i = j | \mathbf{S}, \alpha)\Lambda(\mathbf{Y}, C, \lambda) & \text{if} \hspace{2pt} \texttt{cond} \\
p(c_i = j | \mathbf{S}, \alpha) & \text{otherwise}, \\ \end{cases} \label{eq:spcmcrp_cond_final} \end{equation} where \texttt{cond} is the condition of $c_i = j \hspace{2pt} \text{merges tables} \hspace{2pt} m \hspace{2pt} \text{and} \hspace{3pt} l$ and $\Lambda(\mathbf{Y}, C, \lambda)$ is equivalent to, \begin{equation}
\Lambda(\mathbf{Y}, C, \lambda) = \frac{p(\mathbf{Y}_{(\mathbf{Z}(C)=m \hspace{2pt} \cup \hspace{2pt} \mathbf{Z}(C)=l )}|\lambda)}{p(\mathbf{Y}_{\mathbf{Z}(C)=m}|\lambda)p(\mathbf{Y}_{\mathbf{Z}(C)=l}|\lambda)}. \end{equation}
\noindent This procedure is iterated $T$ times, once it converges, we can sample the table parameters $\Theta = \{\theta_1, \dots, \theta_K\}$ through the posterior of the $\mathcal{NIW}$ distribution \citep{Sudderth:PhD:2006}, refer to Appendix \ref{app:Sample_NIW} for the exact equations. The complete non-parametric clustering algorithm is summarized in Algorithm \ref{alg:nonparam-clustering}, detailing the Collapsed Gibbs sampler steps\footnote{A MATLAB implementation of this clustering approach can be found in \underline{\url{https://github.com/nbfigueroa/SPCM-CRP}}.}.
\section{Unsupervised Joint Segmentation and Transform-Invariant Action Discovery}\label{sec:tgau_bp_hmm}
In this section, we propose a coupled model that addresses the problem of joint segmentation and \textit{transform-invariant} action discovery. To recall, given a dataset of $M$ demonstrations of continuous $N$-dimensional trajectories (i.e. time-series) composed by sequences of multiple \textbf{common} actions, we seek to individually segment each trajectory, while discovering the \textbf{common (i.e. transform-invariant)} actions that describe each segment. We adopt a probabilistic modeling approach and use Hidden Markov Models (HMM) to extract such information. Formulated as an HMM, each trajectory is considered as a sequence of observations $\mathbf{x} = \{\mathbf{x}_t\}_{t=0}^{T-1}$ for $\mathbf{x}_t \in \mathds{R}^{N}$ over $T$ discrete time steps, that are independently sampled and conditioned on an underlying hidden state $\mathbf{s} = \{s_t\}_{t=0}^{T-1}$, that evolves through a first-order temporal Markov process, modeled through a transition probability matrix $\mathbf{\pi} \in \mathds{R}^{K \times K}$. Such hidden states indicate the \textbf{actions} present in each trajectory parametrized by $K$ emission models $\Theta = \{\theta_1, \dots, \theta_K\}$, while the transitions between hidden states $\mathbf{s}_t \rightarrow \mathbf{s}_{t+1}$ denote the \textbf{segmentation points}.
\subsection{Challenges in HMM-based segmentation}
Due to their temporal Markovian assumption and flexibility of modeling different stochastic processes HMMs have become the staple method for analyzing time-series. Nevertheless, applying them to unstructured, unlabeled and transformed data such as our target application becomes quite challenging. We list the three main issues and our proposed solution:
\noindent \textit{(1) \underline{Cardinality}:} As in the previous section, finding the optimal number of hidden states $K$, through the classic HMM EM-based estimation approach relies on model selection and heuristics. For the specific case of HMMs, such external model fitting approaches tend to over-fit the time-series; i.e. either over-segment or under-segment.
\noindent \textit{(2) \underline{Fixed Switching Dynamics}:} When modeling \textit{multiple} related time-series with a HMM, the main assumption is that the time series are tied together with the same set of transition dynamics and emission parameters. This might come as a nuisance when we have multiple time-series which are indeed related, but do not necessarily follow the same switching dynamics or use the same emission models in each time-series.
\noindent \textit{(3) \underline{Transform-Invariance}:} The emission models, $\Theta$, of an HMM are always assumed to be unique. In other words, they are not expected to have any correlations, nor are they invariant to transformations or variations. Moreover, the standard HMM assumption is that the generative distribution parameters associated to the hidden state $s_t$ (i.e. $\theta_k=\{\mu_k,\Sigma_k\}$ for $s_t=k$) are identical between time series.\\
\noindent Challenge (1) is known to be solved by formulating an HMM with the Bayesian Non-Parametric (BNP) treatment. A BNP formulation of an HMM sets an infinite prior on $\mathbf{\pi}$, namely the Hierarchical Dirichlet Process (HDP) \citep{Teh:HDP:2004} or its state-persistent variant the sticky HDP-HMM \citep{Fox:ICML:2008}; and consequently, priors on the associated emission model parameters $\Theta$. This model, however, follows the fixed switching dynamics and hence, cannot account for challenge (2). This strong assumption was then relaxed by \cite{Fox:PhD:2009}, who proposed using an \textit{infinite feature model}, namely the Indian Buffet Process ($\mathcal{IBP}$) (see Table \ref{tab:ibp}), as a prior on the transition distribution, resulting in a collection of $M$ Bayesian HMMs with independent transition distributions $\mathbf{\pi} =\{\pi^{(1)}, \dots, \pi^{(M)}\}$ with partially shared emission models. Regarding challenge (3), to the best of our knowledge, there is no HMM variant that has addressed it, nor that has addressed challenges (1-2-3) in a joint fashion. \begin{figure}
\caption{\footnotesize Segmentation of the 2D dataset from Figure \ref{fig:problem2} with $\mathcal{IBP}$-HMM. Colors correspond to feature labels. As can be seen, $\mathcal{IBP}$-HMM is capable of extracting correct segmentation and \textit{transform-dependent} but cannot associate the \textit{transform-invariant} features; i.e. $\theta_1 \leftrightarrow \theta_4$ and $\theta_2 \leftrightarrow \theta_3$.
}
\label{fig:ibp-ts-segmentation}
\end{figure} The toy example described in Figure \ref{fig:problem2} is a clear motivation for the need of an HMM variant that can handle such challenges. Which is not only relevant for our target application, but for time-series analysis of sensor data in general. Imagine that each of these time-series are observations from spatial tracking sensors. If the sensors are subject to any type of motion and are not properly fixed to the surface, they are surely going to be subject to some sort of transformation, such as translations or rotations. Any variant of an HMM, be it classic HMM with EM-based estimation, $\mathcal{HDP}$-HMM or $\mathcal{IBP}$-HMM, might recover the correct segmentation points, yet, the number of emission models $K$ and consequently the number of hidden states will be \textit{transform-dependent}, as seen show in Figure \ref{fig:ibp-ts-segmentation}\footnote{This claim is supported empirically in the testing scripts provided in the accompanying code:\\ \url{https://github.com/nbfigueroa/ICSC-HMM}}. Even though the latter approach is capable of capturing the inclusion of each emission model in each time-series, it is not equipped with finding the similarities between them. Thankfully, finding these similarities was the desiderata of the approaches presented in Sections \ref{sec:full_spcm} and \ref{sec:spcm_crp}. Hence, in this section, \textbf{our third and final contribution}, we focus on providing \textit{transform-invariance} to the already flexible $\mathcal{IBP}$-HMM by coupling it with our proposed SPCM-$\mathcal{CRP}$ mixture model. Following we summarize a naive approach towards providing \textit{transform-invariance} for the $\mathcal{IBP}$-HMM which motivates our proposed $\mathcal{IBP}$ Coupled SPCM-CRP Hidden Markov Models (ICSC-HMM).
\begin{table}[!t]
\footnotesize
\centering
\colorbox{violet!3}{
\resizebox{\textwidth}{!}{\begin{tabular}{p{\linewidth}}
\noindent \textbf{Beta-Bernoulli Process} $\mathcal{BP}-BeP(M,c, \gamma)$: The beta-Bernoulli process is a stochastic process which provides a BNP prior for models involving collections of infinite binary-valued features. A draw from the beta process yields an infinite collection of probabilities in the unit interval, $B \sim \mathcal{BP}(c, B_0 )$ for a concentration parameter $c > 0$ and base measure $B_0$ on $\Theta$ with total mass $B_0(\Theta) = \gamma$. In other words, it provides an infinite collection of coin-tossing probabilities, that are represented by a set of global weights that determine the potentially infinite number of features, $B = \sum_{k=1}^{\infty} \omega_k \delta_{\theta_k}$, represented with $\theta_k$ and $\omega_k \in (0,1)$ indicate the inclusion of the $k$-th feature. The draw $B$ is then linked to $M$ Bernoulli process draws to generate the binary-valued feature matrix $\mathbf{F}$; this process is summarized as follows:
\begin{equation}
\begin{aligned}
\label{eq:BP}
B | B_0 & \sim \mathcal{BP}(c, \gamma B_0 )\\
X_i | B & \sim \text{BeP}(B) \quad i = 1, \cdots , M
\end{aligned}
\end{equation}
The draw $B$ provides the set of global weights for the potentially infinite number of features. Then, for each $i$-th time series, an $X_i = \sum_{k=1} f_{ik} \delta_{\theta_k}$, is drawn from a Bernoulli Process (BeP). Each $X_i$ is thus used to construct the binary vector $\mathbf{f}_i$ through independent Bernoulli draws $f_{ik} \sim \text{Bernoulli}(\omega_k)$. We can denote \eqref{eq:BP} as $\mathbf{F} \sim \mathcal{BP}-BeP(M, c, \gamma)$. \\ \\
\noindent \textbf{Indian Buffet Process} $\mathcal{IBP}$: As proven in \cite{Thibaux}, marginalizing out $B$ and setting $c=1$ induces the predictive distribution of the Indian Buffet Process ($\mathcal{IBP}$). It is described through a culinary metaphor consisting of an infinitely long buffet line of dishes, or \textit{features}. The first arriving customer (i.e. object) chooses Poisson($\gamma$) dishes. The following $i$-th customers select a previously tasted dish $k$ with probability $\frac{m_k}{i}$ proportional to the number of previous customers $m_k$ that have tried it, and also samples Poisson($\frac{\gamma}{i}$) new dishes. Assuming that the $i$-th object is the last customer, the following conditional distribution can be derived for the $\mathcal{IBP}$:
\begin{equation}
p(f_{ik} = 1 \hspace{2pt}|\hspace{2pt} \mathbf{f}_{-i,k}) =
\begin{cases}\!
\frac{m_{-i,k}}{M} & \text{for} \hspace{10pt} k \quad \text{with} \quad m_{-i,k}>0 \\
\end{cases}
\label{eq:ibp_prior}
\end{equation}
Moreover, the number of new features associated with the $i$-th object is drawn from a
$\text{Poisson}(\gamma/M)$ distribution.
\end{tabular}}}
\caption{Bayesian Non-Parametric Priors for Feature Models \label{tab:ibp}}
\end{table}
\subsection{Naive Transform-Invariance for the $\mathcal{IBP}$-HMM } \noindent \textit{$\mathcal{IBP}$-HMM.} Before describing a \textit{naive} approach to impose \textit{transform-invariance} on the $\mathcal{IBP}$-HMM, we summarize it briefly. As mentioned earlier, the $\mathcal{IBP}$-HMM is a collection of $M$ Bayesian HMMs, each with its own independent transition distribution $\pi^{(i)}$ for $i=\{1,\dots,M\}$ time-series defined over an unbounded set of shared emission models $\Theta = \{\theta_1,\dots,\theta_K\}$ for $K \rightarrow \infty$. The emission models that are used in each time-series are defined by the binary-valued matrix $\mathbf{F} \in \mathbb{I}^{M\times K}$, sampled from a beta-Bernoulli process $\mathcal{BP}-BeP(M,1, \gamma)$ (see Table \ref{tab:ibp}). The columns of $\mathbf{F}$ correspond to the time-series, while the rows indicate the inclusion of the $k$-the feature (i.e. emission model). Hence, given $\mathbf{f}_i$ we can define a feature-constrained transition distribution $\pi^{(i)}$, by first defining a doubly infinite collection of random variables $\eta_{jk}^{(i)} \sim \text{Gamma}(\alpha + \kappa\delta_{jk},1)$, where $\delta_{jk}$ is the Kronecker delta function and $\kappa$ is the sticky hyper-parameter introduced in \cite{Fox:ICML:2008} which places extra expected mass on self-transitions to induce state persistence. The transition variables $\mathbf{\eta}^{(i)}$ are normalized over the set of emission models present in the $i$-th time-series $\mathbf{f}_i$ to generate the corresponding transition distributions $\pi_{jk^{(i)}}$. Once we sample the corresponding transition distributions $\pi^{(i)}$ for each $i$-th HMM, we assign a state $s_{t}^{(i)}=k$ at each $t$-th time step, which determines the model parameters $\theta_k$ that generated $\mathbf{x}_{t}^{(i)}$ (observed data of $i$-th time series at step $t$). The full $\mathcal{IBP}$-HMM is summarized as follows: \begin{equation} \begin{aligned} \label{eq:IBP-HMM} \mathbf{F} & \sim \mathcal{BP}-BeP(M, 1, \gamma) \\ \eta_{jk}^{(i)} & \sim \text{Gamma}(\alpha_{b} + \kappa\delta_{jk},1) \\ s_t^{(i)} & \sim \mathbf{\pi}_{s_{t-1}}^{(i)} \qquad \text{with} \quad \pi_{jk}^{(i)} = \frac{\eta_{jk}^{(i)}f_{k}^{(i)}}{\sum_{n} f_{n}^{(i)}\eta_{jn}^{(i)}}\\
\mathbf{x}_{t}^{(i)}|s_{t}^{(i)}& =k \sim \mathcal{N}(\theta_k) \quad \text{with} \quad \theta_k \sim \mathcal{NIW}(\lambda) \end{aligned} \end{equation} where each $\theta_k=(\mu_k,\Sigma_k)$ are the parameters of a Gaussian distribution, which are drawn from a $\mathcal{NIW}$ distribution, with hyper-parameter $\lambda =\{\mu_0,\kappa_0,\Lambda_0, \nu_0 \}$. Posterior inference of the latent variables of the $\mathcal{IBP}$-HMM, namely $\{\mathbf{F},\mathbf{S},\mathbf{\Theta}\}$ is performed through a Markov Chain Monte Carlo (MCMC) method that estimates the joint posterior with collapsed $\mathbf{\Theta}$, namely $p(\mathbf{F},\mathbf{S},\mathbf{X})$.
This is achieved by alternating between (i) re-sampling $\mathbf{F}$ given the dynamics parameters $\{\eta^{(i)}, \Theta\} $ and observations $\mathbf{X}$ and (ii) sampling $\{\eta^{(i)}, \Theta\} $ given $\mathbf{F}$ and $\mathbf{X}$, accomplished by interleaving Metropolis-Hastings and Gibbs sampling updates, as listed in Algorithm \ref{alg:nonparam-segmentation}. For sake of simplicity, we have abstained from describing each step from the $\mathcal{IBP}$-HMM sampler, as it is not our contribution and they are thoroughly described in \cite{Fox:PhD:2009} and \cite{Hughes:NIPS:2012}. Instead, we focus on the sampling steps that we add for our proposed coupled model, which are highlighted in \textcolor{blue}{blue} and described in Section \ref{sec:ICSC-HMM}.\\
\noindent \textit{Naive Approach.} To address the problem of \textit{transform-invariance} in the $\mathcal{IBP}$-HMM we can use the proposed B-$\mathcal{SPCM}$ similarity function \eqref{eq:bspcm} to \textit{merge} the features (i.e. emission models $\theta_k$) that are similar and directly generate the \textit{transform-invariant} states labels. To do so, one could use split-merge moves based on sequential allocation, proposed for the $\mathcal{IBP}$-HMM \cite{Hughes:NIPS:2012}. This is a re-sampling step that proposes new candidate configurations for feature matrix $\mathbf{F}$ and state sequences $\{s^{(i)}\}_{i=1}^M$ via accepts/rejects of Metropolis-Hastings updates. The proposals indicate if one should either merge features which are \textit{similar} or split features which explain the data in a better way separately. The measure of similarity used here is the \textit{marginal likelihood ratio}\footnote{The algorithmic details of this sampling step are presented in Appendix \ref{app:split-merge}.} One could easily substitute this measure of similarity with our proposed similarity function \eqref{eq:bspcm} in order to force the merging of \textit{transform-dependent} features into 1 \textit{transform-invariant} feature. This approach might seem to solve our problem from the \textit{feature model} point-of-view, as the feature matrix $\mathbf{F}$ will now have a number of columns equal to the \textit{transform-invariant} emission models, however, it has two main disadvantages:\\
\noindent (1) \textit{Computational burden}. The MCMC sampling scheme used to infer the latent variables of the $\mathcal{IBP}$-HMM involves mostly collapsed sampling steps \citep{Fox:NIPS:2009}. For the split/merge moves, $\Theta$ is collapsed away and solely the data assigned to the features is used to compute the marginal likelihoods. Hence, re-sampling of $\mathbf{F}$ and $\{s^{(i)}\}_{i=1}^M$ is only dependent on their current values and the data itself, not on the emission model parameters associated with the features. Thus, in order for us to use \eqref{eq:bspcm} we must sample $\Theta$ from the current state sequence $\{s^{(i)}\}_{i=1}^M$ and then apply the split/merge moves. Something that is sought to be avoided in the original MCMC sampler, i.e. $\Theta$ is sampled until the end.
\noindent (2) \textit{Meaningless Emission Models}. While merging two features which are \textit{transform-dependent}, the new emission model parameters $\Theta$ will not be the \textit{transform-invariant} emission model, but rather a meaningless Gaussian distribution encapsulating the two sets of transformed data as shown in Figure \ref{fig:wrong_emission} for our illustrative example. This might not be a problem if the \textit{transformed} data-points of a similar nature are far from other; however, if they are close, we are vulnerable to merging features that are not similar at all, leading to i) incorrect and meaningless emission models and ii) loss of correct segmentation points. Due to this observation, we posit that such \textit{transform-invariance} cannot be imposed internally in the inference scheme of the HMM parameters, but rather externally, once the emission model parameters $\Theta$ are properly estimated.
\begin{figure}
\caption{\small Resulting emission model (depicted by grey ellipse) from \textit{naive merging} of transform-dependent features $\theta_2$ and $\theta_3$ from the toy example. }
\label{fig:wrong_emission}
\end{figure}
\subsection{The IBP Coupled SPCM-CRP Hidden Markov Model} \label{sec:ICSC-HMM} We impose \textit{transform-invariance} in the $\mathcal{IBP}$-HMM by proposing a \textit{topic-model-inspired} coupling between the $\mathcal{SPCM-CRP}$ mixture model and the $\mathcal{IBP}$-HMM, as shown in Figure \ref{fig:something} (left). The idea is quite straightforward, we want to jointly estimate the \textit{transform-dependent} feature matrix $\mathbf{F}$ as well as clusters that might arise from similarities in these features; without the two models hindering each others processes. This will lead to \textit{transform-dependent} state sequences $S = \{s^{(i)}\}_{i=1}^M$ as well as \textit{transform-invariant} state sequences $Z = \{z^{(i)}\}_{i=1}^M$, as shown in Figure \ref{fig:problem2}. One might argue that these two sets of \textit{state labels} could be achieved in a decoupled manner; i.e. estimate $\Theta$ with the original $\mathcal{IBP}$-HMM and then cluster the emission models with the $\mathcal{SPCM-CRP}$ mixture model. This is, of course, a valid approach which will be evaluated in Section \ref{sec:results}. By coupling them, we can alleviate the need for setting the hyper-parameters of the $\mathcal{IBP}$-HMM, namely $\alpha_b, \kappa$ and $\gamma$, which correspond to the hyper-parameters for the Dirichlet transition distribution used to sample $\eta^{(i)}$ and the hyper-parameter for the beta-Bernoulli process used to sample $\mathbf{f}_i$; both of which have a direct influence on the feature-constrained transition distributions $\pi^{(i)}$. The proposed coupling results in \textbf{our final contribution} which we refer to as the IBP Coupled SPCM-CRP Hidden Markov Models (ICSC-HMM), shown in Figure \ref{fig:something} (left).
\begin{figure*}
\caption{\small (left) Graphical representation of our $\mathcal{IBP}$-HMM feature clustering idea and (right) the $\mathcal{IBP}$ Coupled SPCM-CRP Hidden Markov Models (\textbf{ICSC-HMM}). \textbf{Colored gray} nodes correspond to observed variables, \underline{black nodes} correspond to latent variables and \underline{small gray nodes} correspond to hyper-parameters. }
\label{fig:something}
\end{figure*}
We begin our model coupling by placing a prior on the $\mathcal{IBP}$ hyper-parameter $\gamma$, parametrized by the number of clusters $K_{Z}$ obtained by sampling $Z$ from the SPCM-$\mathcal{CRP}$ mixture model on $\Theta$. The conditional posterior of $\mathbf{F}$ can be expressed as follows: \begin{equation} \label{eq:ibp_cond}
p(\mathbf{F} | \gamma) \propto \gamma^{K_+}\exp\left(-\gamma H_M \right) \end{equation} where $H_M$ is the $M$-th harmonic number, defined by $H_M = \sum_{j=1}^{M}\frac{1}{j} $ and $K_+$ is the number of \textit{unique features} (i.e. those that are unique for each time-series). \eqref{eq:ibp_cond} takes form due to the $\mathcal{IBP}$ marginal distribution \citep{Thibaux}. Hence, placing a conjugate Gamma prior on $\gamma \sim \text{Gamma}(a_{\gamma},b_{\gamma})$ yields a closed form solution to its posterior distribution, as follows: \begin{equation} \begin{split}
p(\gamma | \mathbf{F}, a_{\gamma},b_{\gamma}) & \propto p(\mathbf{F} | \gamma)p(\gamma | a_{\gamma},b_{\gamma})\\ & \propto \gamma^{K_{+}}\exp\left(-\gamma H_M\right)\frac{\gamma^{a_{\gamma}-1}\exp(-b_{\gamma}\gamma)}{\Gamma(\gamma)}\\ & = \text{Gamma}(a_{\gamma} + K_{+}, b_{\gamma} + H_M)\\ \end{split} \label{eq:gamma_distsadsd} \end{equation} where $K$ is the total number of current features \citep{Fox:PhD:2009}. The task of defining the values for the hyper-priors $a_{\gamma},b_{\gamma}$ is often overlooked in literature and set empirically. In this work, rather than setting the hyper-priors to some empirically fixed values, we adopt a data-driven approach which defines $a_{\gamma},b_{\gamma}$ at each iteration as follows: \begin{equation} \begin{split}
a_{\gamma} =b_{\gamma}=\frac{K}{K_{Z}M} \quad \text{for} \quad K_{Z} = |\mathbf{Z}(C)| \end{split} \label{eq:gamma_coupling} \end{equation} The intuition behind \eqref{eq:gamma_coupling} is to set $a_{\gamma},b_{\gamma}$ as the ratio of \textit{transform-invariant} features in the total set of features, scaled by the number of time-series $M$. This ratio will implicitly control the number of new features to be sampled for the $\mathcal{IBP}$ prior. When $K_Z << K$ is low, this indicates that the current sampled features are very similar. In this situation, the ratio is high, inducing $\gamma$ to be higher, in order to increase the number of unique features and consequently the columns of $\mathbf{F}$. Conversely, when $K_Z = K$, it means that all features are different and hence, it is not necessary to push towards sampling more unique features. Regarding the hyper-parameters on $\eta^{(i)}$, namely $\alpha_b$ and $\kappa$, we also place Gamma priors: \begin{equation} \begin{split} \alpha_b & \sim \text{Gamma}(a_{\alpha_b}, b_{\alpha_b})\\ \kappa & \sim \text{Gamma}(a_{\kappa}, b_{\kappa} )\\ \end{split} \label{eq:kappa_coupling} \end{equation}
In this case, we couple the models by setting $a_{\alpha_b}, b_{\alpha_b}$
equivalent to \eqref{eq:gamma_coupling} and $a_{\alpha_{\kappa}}, b_{\alpha_{\kappa}} = \frac{K}{K_{Z}}$, as it is the sticky-parameter inducing state-persistence, we force it to be higher than the concentration parameter $\alpha_b$. As discussed in \cite{Fox:PhD:2009}, sampling from $\eta^{(i)}$ is a non-conjugate process and thus does not have a closed form solution. Instead we must apply Metropolis-Hastings steps to iteratively re-sample $\alpha_{b}|\kappa$ and $\kappa|\alpha_{b}$, a detailed description of these steps is provided in \cite{Fox:PhD:2009}. As in the $\mathcal{IBP}$-HMM, posterior inference of the full model, namely $\{\mathbf{F},\mathbf{S},\mathbf{\Theta}, \mathbf{Z},\mathbf{\Phi},\}$ is performed through an MCMC sampler that estimates the joint posterior of the latent variables with collapsed $\mathbf{\Theta}$ and $\mathbf{\Phi}$, namely: \begin{equation} \begin{split}
p(\mathbf{F},S,\mathbf{X}, Z, \mathbf{Y}) \propto \textcolor{blue}{\underbrace{p(C \hspace{2pt}|\hspace{2pt} \mathbf{S}, \alpha) p(\mathbf{Y}| \mathbf{Z}(C), \lambda_{\phi})}_{\text{SPCM-$CRP$ Mixture}}} \textcolor{red}{\underbrace{p(\mathbf{F} | \gamma)}_{\mathcal{IBP}}}\\
\textcolor{violet}{\underbrace{\prod_{i=1}^{M}p\left(\mathbf{s}^{(i)}| \mathbf{f}_i, \alpha_b, \kappa \right) \prod_{i=1}^{M}\prod_{t=1}^{T}p\left(s_t^{(i)}|s_{t-1}^{(i)}\right)p(\mathbf{x}_{t}^{(i)}|s_t^{(i)},\theta_{s_t^{(i)}}, \lambda_{\theta})}_{\text{$M$ Bayesian HMM's}}}. \end{split} \label{eq:icsc-joint} \end{equation}
\begin{algorithm}[!t]
\renewcommand{\textbf{Input:}}{\textbf{Input:}}
\renewcommand{\textbf{Output:}}{\textbf{Output:}}
\caption{Joint Segmentation and Transform-Invariant Action Discovery MCMC Sampler}
\footnotesize
\label{alg:nonparam-segmentation}
\begin{algorithmic}[1]
\Require $\mathbf{X} = \{\mathbf{x}^{(i)}\}_{i=1}^{M}$ for $\mathbf{x} = \{\mathbf{x}_t\}_{t=0}^{T-1}$ where $\mathbf{x}_t \in \mathds{R}^{N}$ \Comment{$M$ Time-Series}
\Statex \hspace{15pt}$\{\tau, \kappa\}$ \Comment{Hyper-parameters}
\Ensure $\Psi=\{K,S,\Theta,K_{Z},Z,\Phi\}$ \Comment{Segments \& State Clustering}
\Procedure{ICSC-HMM-MCMC-Sampler}{}
\State Set $\Psi^{t-1} = \{\cdot\}$ \Comment{Initialize Markov Chain}
\For{\texttt{iter t = 1 to Maxiter}}
\State Set $\{\eta^{(i)}\} = \{\eta^{(i)}\}^{(t-1)}$
, $\{\mathbf{\mu}_k,\mathbf{\Sigma}_k\}=\{\mathbf{\mu}_k,\mathbf{\Sigma}_k\}^{(t-1)}$ , $\mathbf{F}= \mathbf{F}^{(t-1)}$
\State From current $\mathbf{F}$, compute count vector $\mathbf{m} = [m_1,\dots,m_{K_{\theta}+}]$ \\
with $m_k =$ number of time-series possessing feature $k$.
\State \textbf{Sample} Shared Features $f_{ik}$ with Const. MH Updates \cite{Fox:PhD:2009}
\State \textbf{Sample} Unique Features $f_{ik}$ with RJ-DD Updates \cite{Fox:PhD:2009}
\State \textbf{Sample} State Sequence $\{s^{(i)}\}_{i=1}^M$ with Gibbs sampler \cite{Fox:PhD:2009}
\State \textbf{Re-sample} $\mathbf{F}$ and $\{s^{(i)}\}_{i=1}^M$ with split/merge moves \cite{Hughes:NIPS:2012}
\State \textbf{Sample} trans. probabilities $\{\eta^{(i)}\}$ with Gibbs updates \cite{Fox:PhD:2009}
\State \textbf{Sample} emission parameters $\Theta$ with Gibbs updates \cite{Fox:PhD:2009}
\State \textcolor{blue}{\textbf{Sample} parameter clusters $\{z_i\}_{i=1}^{K_{Z}}$ with Gibbs updates}
\State \textcolor{blue}{$C \sim SPCM-\mathcal{CRP}-MM(\Theta)$ (Algorithm \ref{alg:nonparam-clustering})}
\State \textcolor{blue}{\textbf{Sample} hyper-parameter $\gamma$ with Gibbs updates \eqref{eq:gamma_distsadsd}\eqref{eq:gamma_coupling}}
\State \textcolor{blue}{\textbf{Sample} hyper-parameters $\alpha_b,\kappa$ with MH updates \eqref{eq:kappa_coupling} \cite{Fox:PhD:2009}}
\EndFor
\EndProcedure
\end{algorithmic} \end{algorithm}
Given that the couplings between the SPCM-$\mathcal{CRP}$ and the $\mathcal{IBP}$ are linked via deterministic equations that parametrize the hyper-priors, \eqref{eq:icsc-joint} can be estimated by following the same sampling steps as the original $\mathcal{IBP}$-HMM with three main re-sampling steps after sampling $\Theta$. Namely, we must run the Collapsed sampler for the SPCM-$\mathcal{CRP}$ mixture model listed in Algorithm \ref{alg:nonparam-clustering}. However, instead of letting the chain run for \texttt{Maxiter} steps, the number of iterations depends on the current Markov Chain state of the $\mathcal{IBP}$-HMM $\Psi^{(t)}$. More specifically, if the current estimated features $K^{(t)}$ have not changed from the previous sample $K^{(t-1)}$ we run a limited number of iterations, namely \texttt{iter $\leq$ 5}, and initialize the next chain with the previous cluster assignments $C^{(t-1)}$ Whereas, if the features have changed we reset the chain and let it run for \texttt{iter $\leq$ 10}. Once $C$ has been sampled, we compute $Z = \mathbf{Z}(C)$ and can parametrize the hyper-priors for $\gamma, \alpha_b, \kappa$ for \eqref{eq:gamma_coupling} and \eqref{eq:kappa_coupling}\footnote{MATLAB code of this sampler is provided in: \url{https://github.com/nbfigueroa/ICSC-HMM}}.
\section{Evaluation and Applications} \label{sec:results} \subsection{Datasets \& Metrics for Similarity Function and Clustering} To evaluate the proposed similarity function (B-SPCM) (Section \ref{sec:full_spcm}) against standard similarity metrics and the proposed clustering algorithm (SPCM-CRP-MM) (Section \ref{sec:spcm_crp}), we use the following datasets and metrics:\\
\noindent \textbf{Toy 6D Dataset:} This is a synthetic 6-D ellipsoid dataset. It is generated from a set of 3 unique 6-D Covariance matrices. Each unique Covariance matrix has the following values: \small \begin{align*} \textcolor{black}{\Sigma_1} & \textcolor{black}{=\mathbf{I}_6 \lambda_1 \mathbf{I}_6,} \qquad \textcolor{black}{\Sigma_2 = \mathbf{I}_6 \lambda_2 \mathbf{I}_6,} \qquad \textcolor{black}{\Sigma_3 = \mathbf{I}_6 \lambda_3 \mathbf{I}_6}\\ \textcolor{black}{\lambda_1} & \textcolor{black}{= \begin{bmatrix}
|\epsilon_1|, |\epsilon_1|,|\epsilon_1|,|\epsilon_1|,|\epsilon_1|,|\epsilon_1| \end{bmatrix}^T}\\ \textcolor{black}{\lambda_2} & = \textcolor{black}{\begin{bmatrix}
|\epsilon_2|,|10\epsilon_2|,|10\epsilon_2|,|10\epsilon_2|,|\epsilon_2|,|\epsilon_2| \end{bmatrix}^T}\\ \textcolor{black}{\lambda_3} & = \textcolor{black}{\begin{bmatrix}
|\epsilon_3|,|10\epsilon_3|,|20\epsilon_3| ,|30\epsilon_3|,|40\epsilon_3|, |50\epsilon_3| \end{bmatrix}^T}. \end{align*} \normalsize
$\epsilon_i$ is a random Gaussian value sampled from $\epsilon_i \sim \mathcal{N}(0,1)$. The full dataset $\mathbf{\Sigma} = \{\textcolor{black}{\Sigma_1,\dots,\Sigma_{20}}|\textcolor{black}{\Sigma_{21},\dots,\Sigma_{40}}|\textcolor{black}{\Sigma_{41},\dots,\Sigma_{60}}\}$ is composed of 60 randomly transformed Covariance matrices (20 per unique Covariance matrix). Each random transformation is generated by sampling a random matrix $\mathbf{A}_j \in \mathds{R}^{6\times6}$ for each $j$-th sample. Then, an orthogonal transformation matrix is extracted through QR decomposition on $\mathbf{A}_j = \mathbf{Q}_j\mathbf{R}_j$. The extracted orthogonal matrix is used to rotate each $i$-th unique Covariance matrix with respect to the $j$-th orthogonal rotation matrix $\mathbf{Q}_j$ as follows $\Sigma_j = \mathbf{Q}_j\mathbf\Sigma_i\mathbf{Q}_j^T \forall i \in [1,3], j \in [1,20]$. The expected clustering result $\mathbf{K=3}$ groups of 20 samples each.\\ \noindent \textbf{6D Task-Ellipsoid Dataset:} This is a real dataset of 6-D task-ellipsoids. These were collected from human demonstrations of 3 tasks (circle drawing, cutting, screw-driving) with a sensorized tool (i.e. force/torque sensor at the end-effector) \cite{El-Khoury:RAS:2015}. These task-ellipsoids are used to represent the principal directions of the forces \textcolor{black}{$f = \{f_x,f_y,f_z \}$} and torques \textcolor{black}{$\tau = \{\tau_x,\tau_y,\tau_z \}$} exerted on an object to achieve a task. The Covariance matrix representing a task in this Task-Wrench-Space is generated as follows: \begin{equation} \label{eq:task-ellipsoid} \Sigma= \begin{bmatrix} \textcolor{black}{\Sigma_{ff}} & \textcolor{black}{\Sigma_{\tau f}} \\
\textcolor{black}{\Sigma_{f\tau}} & \textcolor{black}{\Sigma_{\tau\tau}}
\end{bmatrix}; \qquad \Sigma_{**} \in \mathds{R}^{3\times 3}. \end{equation}
The dataset is composed of 105 samples of such Covariance matrices \eqref{eq:task-ellipsoid}, which belong to the following groups/classes of tasks: (a) \textit{circle drawing} - 63 samples, (b)\textit{cutting} - 21 and (c) \textit{screw-driving} - 21 samples each, respectively. The expected clustering result is $\mathbf{K=3}$ clusters of (1) 63, (2) 21 and (3) 21.\\
\noindent \textbf{3D Synthetic Diffusion Tensor Field (DTI) :} Diffusion Tensors (DT) are widely used to represent the diffusivity of water in brain tissue from Magnetic Resonance Images (MRI). The resulting approximation is named DT-MRI and yields a matrix-valued image (also referred to as Tensor Field), where each element of the image is a DT. Diffusion Tensors (DT) are rank 2 tensors, equivalent to 3-D symmetric positive matrices (SPD). Researchers in the field of medical imaging, use such matrix-valued images to segment areas of the scanned brain into regions that exhibit similar behaviors, to detect injuries or chronic illnesses. Overviews of such segmentation algorithms are presented in \cite{Lenglet:2006:TMI} and \cite{Barmpoutis:TMI:2007}. There are two main approaches to do such segmentation: (i) Convert the DTI to a scalar/vector valued image based on DT properties such as fractional anisotropy or mean diffusivity \cite{Shepherd:2006:NeuroImage} and apply well-established edge-based or level-set based image segmentation algorithms. (ii) or segment the matrix-valued image directly by modified active contour models or statistical clustering frameworks where the topology of SPD matrices is considered \citep{Barmpoutis:TMI:2007}. \begin{figure}\label{fig:DTI-datasets}
\end{figure} Our proposed $\mathcal{SPCM}-CRP$ mixture model can be categorized in the latter approaches. As the DTI is a lattice of tensors, we can unroll this lattice and treat it as an unordered set of SPD matrices. Through this dataset, we can show the scalability of our proposed clustering algorithm and demonstrate that it's generic enough to be applied to different fields, with no modification. The Fractional Anisotropy (FA) value of the tensor field is used to evaluate the performance of our algorithm, as it is a popular quantity used to process and analyze DTIs \citep{Lenglet:2006:TMI}. We used the fanDTasia Matlab Toolbox \citep{Barmpoutis:TMI:2007} and the accompanying tutorial \citep{Barmpoutis:2010} to generate a $32\times32$ synthetic DTI, shown in Figure \ref{fig:DTI-datasets}. Since FA is a continuous value between $[0,1]$ we can only use it to visually compare segmentation results. For this reason, we apply an automatic binning procedure on the FA values of the entire image to create \textit{virtual} regions on the image. These regions will act as the ``ground truth" segmentation, albeit being an estimate, it provides a proper quantification of the performance of our approach. From Figure \ref{fig:DTI-datasets}, we can see that we are expected to segment the image into 5 regions. This is equivalent to clustering $1024$ SPD matrices into $\mathbf{K=5}$ clusters of (1) 218, (2) 185, (3) 301, (4) 119 and (5) 201 samples each, respectively.\\
\noindent \textbf{3D DTI from a Rat's Hippocampus:} This dataset has the same properties and was generated with the same software as \textbf{Dataset 3}, however it represents a slice of a real DTI from an isolated MRI of a rat's hippocampus \citep{Barmpoutis:TMI:2007}. It has the size of $32 \times 32$ and is shown in Figure \ref{fig:DTI-datasets_rat}. The procedure used in \textbf{Dataset 3} was used to produce the expected segmentation regions, and consequently the expected clusters K. We can see that we are expected to segment the image into 4 regions. This is equivalent to clustering $1024$ SPD matrices into $\mathbf{K=4}$ clusters of (1) 336, (2) 118, (3) 239, and (4) 331 samples each, respectively.\\
\begin{figure}\label{fig:DTI-datasets_rat}
\end{figure}
\noindent \textbf{400D Covariance Features from \href{https://www.mpi-inf.mpg.de/departments/computer-vision-and-multimodal-computing/research/object-recognition-and-scene-understanding/analyzing-appearance-and-contour-based-methods-for-object-categorization/}{ETH-80 Dataset}:} The ETH-80 dataset is an image-set based dataset for object recognition. It has 8 object categories with 10 object instances each. Each object instance is an image-set of 41, $20\times20$ pixel, images of that object from different view. In \cite{Vemulapalli:CVPR:2013}, they collectively represent each image-set with a single Covariance feature. These Covariance features are simply the data Covariance matrix $C = SS^T/(N-1)$ for an image-set $S = \{s_1, \dots, s_N\}$, where $s_i \in \mathds{R}^d$ is the feature vector representing the image, in this case the intensity values of the rolled out image, i.e. $d = 20\times20 = 400$. Each object instance, hence, is represented by a Covariance matrix $C \in \mathds{R}^{400\times400}$. \cite{Vemulapalli:CVPR:2013} use these Covariance features together with a kernel-learning SVM approach and achieve high classification accuracy. In this work, we do not intend to surpass such classification results, as we are not doing supervised learning. We rather test this dataset to demonstrate the scalability and limitations of our proposed method, by clustering a dataset of $80$ samples of $400$-d Covariance features into $\mathbf{K=8}$ with 10 samples.\\
\noindent \textbf{External Clustering Metrics} Evaluating the performance of clustering algorithms without ground truth labels is still an open problem in machine learning. However, given that we have the true labels for all of our datasets, we use the following external clustering metrics to evaluate our proposed approach: \begin{enumerate}[leftmargin=*] \item \underline{\textit{Purity}} is a simple metric that evaluates the quality of the clustering by measuring the number of clustered data-points that are assigned to the same class \citep{Manning:2008:IIR}. \item \textit{Normalized Mutual Information \underline{(NMI)}}: is an information-theoretic metric, it measures the trade-off between the quality of the clustering and the total number of clusters \citep{Manning:2008:IIR}. \item The \underline{$\mathcal{F}$-measure} is a well-known classification metric which represents the harmonic mean between Precision and Recall and can be applied to clustering problems. \end{enumerate} \normalsize Refer to Appendix \ref{app:clustering_metrics} for computation details of each metric.
\subsection{Similarity Function Evaluation} We devised an evaluation strategy to measure the \textit{effectiveness} of our similarity function compared to standard Covariance distances by applying the two main similarity/affinity-base clustering algorithms, namely Spectral Clustering (\textit{SC}) and Affinity Propagation (\textit{AP}). The performance of \textit{SC} and \textit{AP} are evaluated on the obtained pairwise Similarity matrices $\mathbf{S}\in \mathds{R}^{N \times N}$ for \textbf{Dataset 1}(Toy 6D), \textbf{Dataset 2} (6D task-ellipsoid), \textbf{Dataset 3} (3D Synthetic DTI) and \textbf{Dataset 4}(3D DT-MRI Rat's Hippocampus); for each similarity function: RIEM, LERM, KDLM, JBLD and B-SPCM (see Figure \ref{fig:dti_syn_matrices}). For completeness, we include metrics for the 3D toy dataset presented in Section \ref{sec:comparison-standard} and refer to it as \textbf{Dataset 0}. In Table \ref{tab:spcm-compare-algos}, we present performance metrics of each clustering method applied to every similarity matrix for the mentioned datasets. For \textit{SC}, we set $K$ to the true cluster number. On the other hand, for Affinity Propagation (\textit{AP}) we set the damping factor $\lambda$ to optimal values, found empirically, for each dataset. The B-SPCM hyper-parameter $\tau$ is set to 1 for all datasets. As can be seen in Table \ref{tab:spcm-compare-algos}, using the \textit{SC} algorithm, the proposed B-SPCM similarity function outperforms all other standard metrics for Datasets 0-1 and 3-4, which are indeed composed of Covariance matrices with large transformations. On Dataset 2, RIEM and JBLD with \textit{SC} were capable of recovering the true clusters, however B-SPCM still gives higher scores than RIEM and KLDM, these results are understandable as the transformations on this dataset are marginal, compared to the other datasets. On the other datasets, the \textit{AP} algorithm yields dramatically different results when compared to \textit{SC} (Table \ref{tab:spcm-compare-algos}). Except for \textbf{Dataset 0}, the best clustering performance (considering all Similarity metrics) is sub-optimal. \begin{figure}\label{fig:dti_syn_matrices}
\end{figure}
We found that \textit{AP} is extremely unstable for the type of similarities that we target in this work. First of all, for each dataset we had to adjust the damping factor $\lambda$. With a $\lambda < 0.2$ the standard metrics would yield $K\approx M$, yet with a higher value $\lambda>0.5$ they would at $K=1$. Moreover, for each dataset we also had to find the optimal way of feeding the similarity matrix to the \textit{AP} algorithm, for each dataset we had to transform the similarities; either normalizing or negating, so that the AP algorithm would converge. Such parameter tuning and data transformation is cumbersome for some applications. This led to our motivation of pursuing a non-parametric clustering approach that is robust and converges without the need of heavy parameter tuning.
\begin{table*}[!t] \begin{center}
\resizebox{\textwidth}{!}{\begin{tabular}{cc|ccccc|ccccc}
\hline
\hline
\multicolumn{1}{c}{\multirow{4}{*}{Dataset}} & \multicolumn{1}{c}{\multirow{4}{*}{Metrics}} & \multicolumn{5}{c}{\multirow{2}{*}{Similarity functions + Affinity Propagation}} & \multicolumn{5}{c}{\multirow{2}{*}{Similarity functions + Spectral Clustering}}\\
\\
& & \multicolumn{1}{c}{\multirow{1}{*}{RIEM}} & \multicolumn{1}{c}{\multirow{1}{*}{LERM}} & \multicolumn{1}{c}{\multirow{1}{*}{KLDM}} & \multicolumn{1}{c}{\multirow{1}{*}{JBLD}} & \multicolumn{1}{c}{\multirow{1}{*}{B-SPCM}} & \multicolumn{1}{c}{\multirow{1}{*}{RIEM}} & \multicolumn{1}{c}{\multirow{1}{*}{LERM}} & \multicolumn{1}{c}{\multirow{1}{*}{KLDM}} & \multicolumn{1}{c}{\multirow{1}{*}{JBLD}} & \multicolumn{1}{c}{\multirow{1}{*}{B-SPCM}}\\
\cline{3-7}
\hline
\multicolumn{1}{c}{\multirow{4}{*}{Toy 3D}} & NMI & 0.65 & 0.65 & 0.65 & 0.65 & \textbf{1.00} & 0.29 (0.1) & 0.26 (0.09) & 0.26 (0.09) & 0.28 (0.09) & \textbf{1.00} \\
& Purity & 1.00 & 1.00 & 1.00 & 1.00 & \textbf{1.00} & 0.77 (0.11) & 0.66 (0.10) & 0.66 (0.10) & 0.68 (0.10) & \textbf{1.00}\\
& $\mathcal{F}$ & 0.57 & 0.57 & 0.57 & 0.57 & \textbf{1.00} & 0.70 (0.09) & 0.66 (0.08) & 0.66 (0.08) & 0.68 (0.09) & \textbf{1.00}\\
$(\lambda=0.15)$& $K(2)$ & 5 & 5 & 5 & 5 & \textbf{2} & 2 & 2 & 2 & 2 & \textbf{2}\\
\hline
\multicolumn{1}{c}{\multirow{4}{*}{Toy 6D}} & NMI & 0.19 & 0.19 & 0.19 & 0.19 & \textbf{0.40} & 0.12 (0.03) & 0.14 (0.03) & 0.10 (0.03) & 0.12 (0.08) & \textbf{1.00} \\
& Purity & 0.55 & 0.55 & 0.55 & 0.55 &\textbf{0.63} & 0.39 (0.02) & 0.40 (0.02) & 0.39 (0.03) & 0.40 (0.06) & \textbf{1.00} \\
& $\mathcal{F}$ & 0.58 & 0.58 & 0.58 & 0.58 & \textbf{0.68} & 0.49 & 0.49 & 0.49 (0.01) & 0.51 (0.05) & \textbf{1.00} \\
$(\lambda=0.5)$ & $K(3)$ & 2 & 2 & 2 & 2 & 2 & 3 & 3 & 3 & 3 & \textbf{3} \\
\hline
\multicolumn{1}{c}{\multirow{2}{*}{6D Task}} & NMI & 0.73 & 0.73 & 0.73 & 0.73 & 0.64 & \textbf{1.00} & 0.03 & 0.07 (0.04) & \textbf{1.00} & \textit{0.77 (0.07)}\\
\multicolumn{1}{c}{\multirow{3}{*}{Ellipsoids}} & Purity & 0.79 & 0.79 & 0.79 & 0.79 & \textbf{1.00} & \textbf{1.00} & 0.60 & 0.60 & \textbf{1.00} & \textit{0.82 (0.04)}\\
& $\mathcal{F}$ & 0.85 & 0.85 & 0.85 & 0.85 & 0.53 & \textbf{1.00} & 0.58 & 0.55 (0.02) & \textbf{1.00} & \textit{0.84 (0.05)}\\
$(\lambda=0.5)$ & $K(3)$ & 2 & 2 & 2 & 2 & 11 & 3 & 3 & 3 & 3 & \textbf{3}\\
\hline
\multicolumn{1}{c}{\multirow{1}{*}{3D DTI}} & NMI & 0.56 & 0.56 & 0.56 & 0.56 & \textbf{0.62} & 0.12 (0.10) & 0.25 (0.20) & 0.14 (0.07) & 0.08 (0.02) & \textbf{0.46 (0.19)}\\
\multicolumn{1}{c}{\multirow{2}{*}{Synthetic}} & Purity & 0.55 & 0.55 & 0.55 & 0.55 & \textbf{0.55} & 0.32 (0.02) & 0.40 (0.10) & 0.35 (0.04) & 0.32 (0.01) & \textbf{0.53 (0.11)}\\
& $\mathcal{F}$ & 0.58 & 0.58 & 0.58 & 0.58 & \textbf{0.59} & 0.38 (0.03) & 0.46 (0.09) & 0.40 (0.04) & 0.37 & \textbf{0.53 (0.10)} \\
$(\lambda = 0.5)$ & $K (5)$ & 2 & 2 & 2 & 2 & 2 & 5 & 5 & 5 & 5 & \textbf{5} \\
\hline
\multicolumn{1}{c}{\multirow{1}{*}{3D DTI Rat's}} & NMI & 0.17 & 0.15 & 0.00 & 0.20 & \textbf{0.23} & \textbf{0.08 (0.14)} & 0.05 (0.01) & 0.03 & 0.03 (0.01) & 0.02\\
\multicolumn{1}{c}{\multirow{2}{*}{Hippocampus}} & Purity & 0.41 & 0.42 & 0.33 & 0.44 & \textbf{0.49} & \textbf{0.36 (0.09)} & 0.35 (0.01) & 0.33 & 0.34 & 0.33 \\
& $\mathcal{F}$ & 0.43 & 0.42 & 0.43 & 0.46 & \textbf{0.52} & \textbf{0.45 (0.07)} & 0.43 & 0.43 & 0.43 & 0.43 \\
$(\lambda=0.5)$ & $K (4)$ & 2 & 3 & 1 & 2 & 2 & \textbf{4} & 4 & 4 & 4 & 4 \\
\hline
\hline \end{tabular}} \end{center}
\caption{Performance Comparison of all Covariance Matrix similarity functions with \textit{Affinity Propagation (AP)} and \textit{Spectral Clustering (SC)} algorithm with K set to the real value \textit{(presenting mean (std) of performance metrics over 10 runs).} \label{tab:spcm-compare-algos}} \end{table*}
\subsection{Similarity-based Non-parametric Clustering Evaluation} Our proposed clustering approach relies on sampling from a posterior distribution \eqref{eq:cond_spcm_crp} with the following hyper-parameters set by the user: $\tau, \alpha, \lambda = \{\mu_0, \kappa_0, \Lambda_0, \nu_0\}$. The first evaluation of our approach will focus on the convergence of the implemented collapsed Gibbs sampler and its robustness to multiple initializations. The second evaluation will focus on its robustness to hyper-parameters. Typically, the hyper-parameters of the $\mathcal{NIW}$, i.e. $\lambda = \{\mu_0, \kappa_0, \Lambda_0, \nu_0\}$, are set to data-driven values. For example, $\mu_0 = \frac{1}{M}\sum_{i=1}^M \mathbf{y}_i$ can be set to the mean value of all data-points or simple zero-mean (if the data is centered), while $\Lambda_0 = \frac{1}{M}\mathbf{Y}\mathbf{Y}^T$ can be set to the sample Covariance of all data-points and $\kappa_0 = 1,\nu_0 = M$. Hence, in reality we only have 2 hyper-parameters to tune: (1) the tolerance value, $\tau$, for the B-SPCM metric and (2) the concentration parameter, $\alpha$, of the SPCM-$\mathcal{CRP}$ prior. Finally, we will compare the results of the SPCM-$\mathcal{CRP}$ mixture model with two mixture variants: (1) GMM with model selection and (2) $\mathcal{CRP}$ mixture model. \subsubsection{Collapsed Gibbs Sampler Convergence} To evaluate the convergence properties of our proposed Collapsed Gibbs Sampler we recorded the trace of the log posterior probabilities, accompanied by the $\mathcal{F}$-measure and computation time per iteration on an Intel® Core™ i7-3770 CPU@3.40GHz$\times$8; for three of our datasets: \textbf{6D Toy Dataset}, \textbf{6D Real Dataset} and \textbf{3D DTI Synthetic Dataset} (see Figure \ref{fig:sampler_DTISYN}, respectively).
We devised \textit{sampler tests} where 20 independent chains were run for 500 iterations each. For each dataset a fixed value of $\alpha$ and $\tau$ was defined and all runs begin with each customer sitting by her/himself; i.e. $K=M$. Typically, one's interested in a sampler's capacity for rapid mixing. As can be seen, in all of our tests the Markov chains seem to come close to the steady state distribution in $<100$ iterations. Moreover, as the chains evolve, we can see how the accuracy of our cluster estimates reach \textit{or come closer to} the ground truth (through the $\mathcal{F}$-measure plots).
Regarding computation cost, the proposed sampler for the SPCM-$\mathcal{CRP}$ is indeed more costly than the classical Collapsed Gibbs sampler for the $\mathcal{CRP}$, as we must recompute the seating assignments for all customers, every time one of them creates or breaks a table. This results in a higher computational cost per iteration, more so, for the first iteration, as all the customers are being assigned to their corresponding tables for the first time, biased by the similarity matrix $\mathbf{S}$. This can be clearly seen in the plots shown in Figure \ref{fig:sampler_DTISYN}. Nevertheless, due to the fact that, in the first iteration, all the seating assignments are explored we see a big jump in both the posterior probabilities and the $\mathcal{F}$-measure, resulting in the rapid mixing capabilities of our sampler.
\begin{figure*}\label{fig:sampler_DTISYN}
\end{figure*}
\subsubsection{Sensitivity to Hyper-parameters} One of the goals of this work is to alleviate the need for heavy parameter tuning and propose a method that is robust to hyper-parameter changes. As discussed earlier, we have two main hyper-parameters for the SPCM-$\mathcal{CRP}$ mixture model: (1) the tolerance parameter, $\tau$, for the SPCM similarity function and (2) $\alpha$, the concentration parameter for the SPCM dependent $\mathcal{CRP}$ prior. We begin this evaluation by discussing the intuitive effect of each parameter and then show how our clustering algorithm is robust for a different range of values for these parameters.
$\tau$ is a scaling factor for the un-bounded similarity values $\Delta_{ij}$ in \eqref{eq:bspcm}. Generally, this value can be set to $1$, however, when set to values $>1$ the similarity values for lower-dimensional datasets; i.e. $N < 6$ for $C \in \mathds{R}^{N \times N}$; are simply amplified. $\alpha$ plays a role in the computation of the prior probabilities for seating assignments in the SPCM dependent $\mathcal{CRP}$ prior. In \eqref{eq:sdcrp_prior}, we see that the $i$-th observation has a probability of being grouped with the $j$-th observation according to \eqref{eq:bspcm}, which is bounded between [0,1]. The probability of the $i$-th observation belonging to a new singleton cluster is imposed by $\alpha$. Hence, $\alpha$ can be seen as a dispersion parameter, which dictates how tight the clustering must be. Since \eqref{eq:bspcm} is bounded between [0,1], naturally $\alpha$ should take the same values. A lower $\alpha$ would impose less clusters; i.e. more grouped observations; and vice-versa.
To evaluate hyper-parameter sensitivity of our clustering algorithm, we performed a grid search on a log-spaced range of $\tau=[1,50]$ and $\alpha=[0.1,20]$ and recorded the $\mathcal{F}$-measure and $\max \log$ posterior probabilities for two of our datasets, shown in Figure \ref{fig:hyper_6dReal}. We report solely on the \textbf{3D Toy Dataset} and \textbf{6D Real Dataset} as the other datasets exhibited minimal to absolutely no change in performance while increasing $\alpha$ and $\tau$. Our algorithm is not restricted to $\alpha$ taking on values $\geq 1$. For this reason, we explore a wider range for $\alpha=[0.1,20]$ rather than the natural choice of $\alpha=[0.1,1]$. A large value for $\alpha>>1$ only imposes a higher probability for observations to not be grouped with other observations; resulting in a higher number of clusters. This can be clearly seen in the $\mathcal{F}$-measure heatmap, where the $\mathcal{F}$-measure starts to gradually decrease as $\alpha>>2.8$ for the \textbf{3D Toy Dataset}. For the \textbf{6D Real Dataset} we see a similar behavior, the area with highest values of the $\mathcal{F}$-measure is within the range of $\alpha=[0.1,6]$. Hence, we can conclude that our algorithm is not really sensitive to the value $\alpha$ as long as $\alpha < 2$, which is in fact, twice the maximum value of our similarity function \eqref{eq:bspcm}.
\begin{figure}\label{fig:hyper_6dReal}
\end{figure}
Regarding the effect of the tolerance value $\tau$, we can see that for clearly separable datasets of Covariance matrices, as the \textbf{3D Toy Dataset}, this value has no effect whatsoever on the clustering results. However, when the Covariance matrices are not so clearly separable, amplifying the similarities will only deteriorate the clustering results, as Covariance matrices which are not so similar will have higher similarities and hence be projected onto very close coordinates in the spectral embedding.
\subsubsection{Comparison with other methods} At its core, our proposed clustering algorithm is a mixture model applied to the spectral embedding $\mathbf{Y}\in \mathds{R}^{P \times M}$ induced by a similarity matrix $\mathbf{S} \in \mathds{R}^{M \times M}$. The novelty of our approach is not only that we use a Bayesian non-parametric mixture model on this embedding, but, that we impose a similarity-dependent prior through the very same similarities that were used to create the embedding. Hence, in order to highlight the power of imposing this new prior on the mixture model we compare our approach to two variants: (i) SCPM embedding ($\mathbf{Y}$)+GMM (w/Model Selection) and (ii) SCPM embedding ($\mathbf{Y}$)+$\mathcal{CRP}$-GMM.
Since we propose a non-parametric approach, the first mixture variant is used as a baseline. Our goal is thus, to surpass the performance of the standard $\mathcal{CRP}$-GMM and exhibit better or comparable performance to the classic finite GMM, with parameters optimized through model selection. We ran the three algorithms 10 times for all of our datasets and report mean (std.) of the corresponding clustering metric in Table \ref{tab:spcm-clust-compare}. Numbers in \textbf{bold} indicate best scores. As can be seen, for all datasets, our approach is superior to applying the standard $\mathcal{CRP}$-GMM on $\mathbf{Y}$ and, either superior or comparable to the finite GMM variant.
\begin{table}[!h] \begin{center} \footnotesize
\begin{tabular}{cc|ccc}
\hline
\hline
\multicolumn{1}{c}{\multirow{4}{*}{Dataset}} & \multicolumn{1}{c}{\multirow{4}{*}{Metrics}} & \multicolumn{3}{c}{\multirow{2}{*}{Spectral Embedding ($\mathbf{Y}$) + Clustering}}
\\
& & \multicolumn{1}{c}{\multirow{2}{*}{GMM w/MS}} & \multicolumn{1}{c}{\multirow{2}{*}{CRP-GMM}} & \multicolumn{1}{c}{\multirow{2}{*}{\textbf{Our approach}}} \\ \\ \hline
\multicolumn{1}{c}{\multirow{4}{*}{Toy 3D}} & NMI & 0.807 (0.018) & 0.298 (0.316) & \textbf{1.00} \\
& Purity & 1.00 & 0.74 (0.135) & \textbf{1.00}\\
& $\mathcal{F}$ & 0.877 (0.006) & 0.738 (0.111) & \textbf{1.00}\\
& $K(2)$ & 3 & 1.8 (0.789)& \textbf{2}\\
\hline
\multicolumn{1}{c}{\multirow{4}{*}{Toy 6D}} & NMI & \textbf{1.00} & 0.633 (0.342) & \textbf{1.00} \\
& Purity & \textbf{1.00} & 0.633 (0.189) & \textbf{1.00}\\
& $\mathcal{F}$ & \textbf{1.00} & 0.744 (0.146) & \textbf{1.00}\\
& $K(3)$ & \textbf{3} & 1.900 (0.568) & \textbf{3}\\
\hline
\multicolumn{1}{c}{\multirow{2}{*}{6D Task}} & NMI & 0.893 (0.129) & 0.599 (0.417) & \textbf{0.984 (0.050)} \\
\multicolumn{1}{c}{\multirow{3}{*}{Ellipsoids}} & Purity & 0.952 (0.080) & 0.759 (0.126) & \textbf{0.980 (0.063)} \\
& $\mathcal{F}$ & 0.943 (0.100) & 0.794 (0.151) & \textbf{0.987 (0.042)} \\
& $K(3)$ & \textbf{3} & 1.800 (0.632) & \textbf{2.900 (0.316)} \\
\hline
\multicolumn{1}{c}{\multirow{1}{*}{3D DTI}} & NMI & \textbf{0.804} & 0.640 (0.089) & 0.730 (0.102)\\
\multicolumn{1}{c}{\multirow{2}{*}{Synthetic}} & Purity & \textbf{0.701} & 0.536 (0.087) & 0.620 (0.105) \\
& $\mathcal{F}$ & \textbf{0.750} & 0.536 (0.087) & 0.679 (0.097) \\
$(\tau=10)$ & $K (5)$ & 3 & 2.200 (0.422) & 2.700 (0.675) \\
\hline
\multicolumn{1}{c}{\multirow{1}{*}{3D DTI }} & NMI & 0.585 & 0.502 (0.197) & \textbf{0.633 (0.039)} \\
\multicolumn{1}{c}{\multirow{2}{*}{Rat's Hippo.}} & Purity & 0.696 & 0.652 (0.137) & \textbf{0.745 (0.036)} \\
& $\mathcal{F}$ & 0.705 & 0.502 (0.197) & \textbf{0.740 (0.046)} \\
$(\tau=10)$ & $K (4)$ & 3 & 3.100 (0.994) & \textbf{4.600 (0.699)} \\
\hline
\multicolumn{1}{c}{\multirow{1}{*}{400D Cov.}} & NMI & \textbf{0.541} & 1.800 (0.632) & 0.486 (0.091)\\
\multicolumn{1}{c}{\multirow{2}{*}{ETH-80}} & Purity & \textbf{0.325} & 0.214 (0.061) & 0.265 (0.038)\\
& $\mathcal{F}$ & \textbf{0.441} & 0.338 (0.080) & 0.386 (0.038) \\
& $K (8)$ & 3 & 1.800 (0.632) & 2.889 (0.333) \\
\hline
\hline \end{tabular} \end{center} \caption{Performance Comparison of Covariance Matrix clustering with \textbf{Our Proposed Approach} vs. GMM w/Model Selection and CRP Mixture Model \textit{(presenting mean (std) of metrics over 10 runs).} \label{tab:spcm-clust-compare}} \end{table}
It must be noted that, the optimal $K$ for the finite GMM variant was chosen considering the interpretation of the BIC/AIC curves as well as our prior knowledge of the optimal clusters in each dataset. Moreover, in both $\mathcal{CRP}$-GMM and SPCM-$\mathcal{CRP}$-GMM we set $\alpha=1$. In the $\mathcal{CRP}$, $\alpha$ has an analogous effect on clustering dispersion. In all of our datasets, we see that the $\mathcal{CRP}$-GMM generally produces less clusters than our approach. Moreover, thanks to the similarity-dependent prior, our approach seems to extract more meaningful clusters from the datasets. A clear example of this is shown on the clustering results from all algorithms on the DTI datasets presented in Figure \ref{fig:clustering_dti}. For the Real DTI dataset, which represents a lattice of diffusion tensors captured from a rat's hippocampus, we can see that our approach recovers an image segmentation much closer to the expected segmentation (see Figure \ref{fig:DTI-datasets}) as opposed to the other mixture variants. In this figure, we can also see the automatically generated embeddings $\mathbf{Y}$ with their corresponding true labels. Finally, for the challenging ETH-80 dataset, although low, our approach exhibits comparable performance to the finite mixture variant. Such low performance was expected as this dataset contains extremely overlapping classes which no clustering algorithm is capable of extracting.
\begin{figure*}\label{fig:clustering_dti}
\end{figure*}
\subsection{Datasets \& Metrics for Segmentation Evaluation} To evaluate the segmentation and action recognition algorithm proposed in Section \ref{sec:ICSC-HMM}, we use the following datasets\footnote{All datasets in this section are available in: \url{https://github.com/nbfigueroa/ICSC-HMM}.\\ They will be uploaded to the UCI Machine Learning Repository upon acceptance.}:\\
\noindent \textbf{Toy 2D Dataset}: A set of 4 synthetic 2-D time-series generated by 2 Gaussian distributions, as the one shown in Figure \ref{fig:problem2}. Within each time-series the two Gaussian emission models switch randomly and are transformed with a random rotation and scaling factor. Hence, the goal is to extract $K = 4$ transform-dependent emission models and group them into $K_{Z} = 2$ transform-invariant state clusters.\\
\noindent \textbf{7D Vegetable Grating Dataset \citep{Pais:IEEE:2015}}: 7-D time-series data of kinesthetic demonstrations on a 7DOF KUKA arm for a Carrot Grating Task. These demonstrations are trajectories of the end-effector of the robot consisting of $M=12$ (7-D) time-series $\mathbf{X}^{(i)} = \{\mathbf{x}_1,\cdots,\mathbf{x}_{T^{(i)}}\}$ for $i=\{1,\dots,12\}$. The 7 dimensions in $\mathbf{x}_t = [\xi_1, \xi_2, \xi_3, q_i, q_j, q_k, q_w]^{T}$ correspond to position and orientation of the robot's end-effector. This task involves 3 actions: (a) reach (b) grate and (c) trash. The interesting feature of this dataset is that 6 of the time-series are recorded in one reference frame (RF); i.e. the base of the robot and the other 6 are recorded in the RF of the grating tool. Hence, the two sets of demonstrations are subject to a rigid transformation. The goal is thus to segment all time-series into $K\approx6$ transform-dependent actions and group them into $K_z = 3$ transform-invariant actions (as in Figure \ref{fig:segmentation_results}).\\
\noindent \textbf{13D Dough Rolling Dataset \citep{Figueroa:HRI:2016}}: 13-D time-series data of kinesthetic demonstrations on a 7DOF KUKA arm for a Dough Rolling Task. The trajectories consist of $M=15$ (13-D) time-series $\mathbf{X}^{(i)} = \{\mathbf{x}_1,\cdots,\mathbf{x}_{T^{(i)}}\}$ for $i=\{1,\dots,12\}$. The 13 dimensions in $\mathbf{x}_t = [\xi_1, \xi_2, \xi_3, q_i, q_j, q_k, q_w, f_x, f_y, f_z, \tau_x, \tau_y, \tau_z]^T$ correspond to position, orientation, forces and torque sensed on the end-effector during kinesthetic teaching. This task involves 3 actions: (a) reach (b) roll and (c) back, which are repeated 2-4 times within each time-series. In this case, the RF is fixed to the origin of the table where the dough is being rolled (see \href{https://www.youtube.com/watch?v=br5PM9r91Fg}{video}), however, two distinctive rolling directions are observed: (i) along the x -axis and (ii) y-axis of the table, as seen in Figure \ref{fig:segmentation_results}. Hence, the goal is to segment all time-series into $K\approx6$ transform-dependent actions and group them into $K_z \approx 3$ transform-invariant actions.\\
\begin{figure*}\label{fig:segmentation_true}
\end{figure*}
\noindent \textbf{26D Bi-manual Zucchini Peeling Dataset}: 26-D time-series data of kinesthetic demonstrations on a pair of 7DOF KUKA arms for a Zucchini Peeling Task. The trajectories consist of $M=5$ (26-D) time-series $\mathbf{X}^{(i)} = \left\{ [\mathbf{x}_1^{(left)}; \mathbf{x}_1^{(right)}] ,\cdots,[\mathbf{x}_T^{(left)}; \mathbf{x}_T^{(right)}] \right\}$ for $i=\{1,\dots,5\}$, where $\mathbf{x}_t^{(\cdot)}$ corresponds to the position, orientation, forces and torque of an end-effector. This task involves 5 bi-manual actions: (a) \textit{reach}, (b) \textit{reach-to-peel},(c) \textit{peel}, (d) \textit{rotate} and (e) \textit{retract}. Each demonstration begins with a \textit{reach} and ends with a \textit{retract}. Between these two actions a sequence of \textit{reach-to-peel}, \textit{peel} and \textit{rotate} is repeated $\approx3-5$ times. Moreover, an internal sub-sequence of \textit{reach-to-peel} and \textit{peel} is repeated in each case $\approx3-5$ times. As shown in Figure \ref{fig:segmentation_true} the workspace of the robots is limited to the cutting board (in red), nevertheless, we observe some transformations between time-series corresponding to the location and length of the zucchini, as can be seen in Figure \ref{fig:segmentation_results}. Moreover, this dataset is particularly challenging due to its dimensionality and switching dynamics. Hence, the goal is to segment all time-series into $K\approx7$ transform-dependent actions and group them into $K_z \approx 5$ transform-invariant actions.\\
\noindent \textbf{External Segmentation Metrics} Given that we have the true labels for all of our datasets (from human segmentations), we use the following external segmentation metrics (in addition to the clustering metrics used in the previous section) to evaluate our proposed approach: \begin{enumerate}[leftmargin=*] \item The \textit{\underline{Hamming}} distance measures the distance between two sets of strings (or vectors) as the number of mismatches between the sets \citep{Mulmuley1987}.
\item The \textit{Global Consistency Error \underline{(GCE)}} is a measure that takes into account the differences in granularity while comparing two segmentations \citep{MartinFTM01}.
\item \textit{Variation of Information \underline{(VI)}} is a metric which defines the distance between two segmentations as the average conditional entropy of the two segmentations \citep{Meila:2005:CCA}. \end{enumerate} \normalsize Refer to Appendix \ref{app:segmentation_metrics} for computation details of each metric.
\subsection{ICSC-HMM Evaluation} \subsubsection{MCMC Sampler Convergence} To evaluate the convergence properties of the MCMC Sampler for the coupled model we recorded the trace of the log posterior probabilities, accompanied by the Hamming distance (for segmentation evaluation) $\mathcal{F}$-measure (for clustering evaluation) on an Intel® Core™ i7-3770 CPU@3.40GHz$\times$8; for three of our datasets: \textbf{2D Toy Dataset}, \textbf{7D Grating Dataset} and \textbf{13D Rolling Dataset} (see Figure \ref{fig:sampler_rolling}, respectively).
We devised \textit{sampler tests} where 20 independent chains were run for 500 iterations each. The coupled algorithm has only two hyper-parameters which need to be defined, namely the $\alpha_b$ and $\tau$ pertaining to to the SPCM-CRP prior, these have the same effect on the coupled model as in the SPCM-CRP mixture model. We thus, set $\tau=1$ and randomly sample $\alpha_b$ from a range of $[1,M]$. Moreover, for all runs we randomly sample the initial value of the features $K$ within a range of $[1,M]$ corresponding to the number of shared states for the multiple HMMs and define the initial $K_Z = K$. For all datasets, we observe that the estimated features $F$ and its corresponding \textit{transform-dependent} segmentation (measured through the Hamming distance) stabilize in $< 100 $ iterations. One can also see how the feature clustering $Z$ comes closer to the true clusters as the features $F$ are better estimated. Once features $F$ stabilize, we can see how the SPCM-$\mathcal{CRP}$ mixture explores customer assignments with all chains reaching higher $\mathcal{F}$-measure with $Z$ than with $F$, which is the main desideratum of this algorithm.
\begin{figure*}\label{fig:sampler_rolling}
\end{figure*}
\subsubsection{Comparison with other methods} At its core, our proposed segmentation algorithm can be seen as an extension of a Bayesian non-parametric HMM. The novelty, however, is that we can jointly estimate \textit{transform-invariant} segmentations which are capable of providing a more semantically rich description of a set of time-series data, as opposed to the typical HMM variants. To prove this claim, we compare our approach on the presented datasets against four HMM variants: (i) HMM w/Model Selection, (ii) sticky $\mathcal{HDP}$-HMM, (iii) $\mathcal{IBP}$-HMM and (iv) $\mathcal{IBP}$-HMM + SPCM-$\mathcal{CRP}$ mixture model. The first variant is treated as our baseline, as the expected number of states $K$ is computed through model selection with the AIC/BIC scores. The last variant is simply a decoupled approach where segmentation and feature clustering are performed in a two-step procedure. In this approach, we initially estimate the features $F$ by running the $\mathcal{IBP}$-HMM sampler multiple times and selecting the best iteration for each run. Then, we run the SPCM-$\mathcal{CRP}$ mixture model sampler on the emission models $\Theta$ for 500 iterations and select the best clustering $Z$ based on the highest log posterior. It must be noted that this approach is already biased to yield higher performance as the feature clustering is applied to the ``best features" obtained from the MCMC sampler. Nevertheless, in Table \ref{tab:spcm-segm-compare} we show that our coupled sampler is capable of achieving the same performance and even better in some cases, not only improving feature clustering but also segmentation.
\begin{table*}[!t] \begin{center}
\resizebox{\textwidth}{!}{\begin{tabular}{cc|ccc|ccc|cc}
\hline
\hline
\multicolumn{1}{c}{\multirow{4}{*}{Dataset}} & \multicolumn{1}{c}{\multirow{4}{*}{Approaches}} & \multicolumn{6}{c}{\multirow{2}{*}{Metrics (Segmentation | Feature Clustering)}}
\\
& & \multicolumn{1}{c}{\multirow{2}{*}{Hamming}} & \multicolumn{1}{c}{\multirow{2}{*}{GCE}} & \multicolumn{1}{c}{\multirow{2}{*}{VI}} & \multicolumn{1}{c}{\multirow{2}{*}{Purity}} & \multicolumn{1}{c}{\multirow{2}{*}{NMI}} &
\multicolumn{1}{c}{\multirow{2}{*}{$\mathcal{F}$}} & \multicolumn{1}{c}{\multirow{2}{*}{$K$}} &
\multicolumn{1}{c}{\multirow{2}{*}{$K_{Z}$}}
\\ \\ \hline
\multicolumn{1}{c}{\multirow{4}{*}{Toy 2D}} & HMM w/MS & 0.385 (0.016) & 0.026 (0.082) & 1.025 (0.229) & 0.977 (0.074) & 0.679 (0.109) & 0.752 (0.041) & 4 & -\\
& sticky HDP-HMM & 0.379 (0.000) & 0.000 (0.001) & 0.956 (0.006) & 1.000 (0.000) & 0.713 (0.002) & 0.765 (0.000) & 4 & - \\
\multicolumn{1}{c}{\multirow{3}{*}{$K_Z$ (2)}}& $\mathcal{IBP}$-HMM & 0.379 (0.000) & 0.000 (0.000) & 0.953 (0.000) & 1.000 (0.000) & 0.714 (0.000) & 0.765 (0.000) & 4 & - \\
& $\mathcal{IBP}$-HMM + SPCM-$\mathcal{CRP}$ & \textbf{0.000 (0.000)} & \textbf{0.000 (0.000)} & \textbf{0.000 (0.000)} & \textbf{1.000 (0.000)} & \textbf{1.000 (0.000)} & \textbf{1.000 (0.000)} & \textbf{4 (0.0)} & \textbf{2 (0.0)} \\
& ICSC-HMM & \textbf{0.000 (0.000)} & \textbf{0.000 (0.000)} & \textbf{0.000 (0.000)} & \textbf{1.000 (0.000)} & \textbf{1.000 (0.000)} & \textbf{1.000 (0.000)} & \textbf{4 (0.0)} & \textbf{2 (0.0)} \\ \hline
\multicolumn{1}{c}{\multirow{3}{*}{7D Grating}} & HMM w/MS & 0.542 (0.009) & 0.215 (0.022) & 1.827 (0.043) & 0.874 (0.015) & 0.318 (0.037) & 0.596 (0.010) & 5 & - \\
\multicolumn{1}{c}{\multirow{3}{*}{Dataset}} & sticky HDP-HMM & 0.299 (0.190) & 0.150 (0.029) & 1.213 (0.403) & 0.908 (0.023) & 0.502 (0.092) & 0.766 (0.119) & 4.7 (0.8) & - \\
\multicolumn{1}{c}{\multirow{3}{*}{$K_Z$ (3)}} & $\mathcal{IBP}$-HMM & 0.336 (0.170) & 0.094 (0.029) & 1.208 (0.364) & \textbf{0.940 (0.024)} & \textit{0.573 (0.066)} & 0.769 (0.100) & 5.1 (1.28) & - \\
& $\mathcal{IBP}$-HMM + SPCM-$\mathcal{CRP}$ & \textbf{0.122 (0.044)} & \textit{0.107 (0.045)} & \textit{0.755 (0.114)} & 0.897 (0.050) & 0.572 (0.203) & \textit{0.879 (0.059)} & 5.1 (1.28) & \textbf{3.0 (0.94)} \\
& ICSC-HMM & \textit{0.126 (0.075)} & 0.108 (0.031) & \textbf{0.751 (0.231)} & \textit{0.918 (0.022)} & \textbf{0.633 (0.069)} & \textbf{0.890 (0.039)} & 4.6 (1.07) & \textbf{3.4 (1.07)} \\ \hline
\multicolumn{1}{c}{\multirow{3}{*}{13D Rolling}} & HMM w/MS & 0.463 (0.036) & 0.188 (0.024) & 1.897 (0.108) & \textit{0.865 (0.031)} & \textbf{0.547 (0.026)} & 0.667 (0.036) & 6 & - \\
\multicolumn{1}{c}{\multirow{3}{*}{Dataset}} & sticky HDP-HMM & 0.630 (0.039) & \textbf{0.176 (0.037)} & 2.611 (0.144) & \textbf{0.874 (0.031)} & \textit{0.497 (0.027)} & 0.525 (0.038) & 11.9 (1.59) & - \\
\multicolumn{1}{c}{\multirow{3}{*}{$K_Z$ (3)}}& $\mathcal{IBP}$-HMM & 0.587 (0.046) & 0.224 (0.022) & 2.507 (0.141) & 0.838 (0.022) & 0.471 (0.021) & 0.555 (0.038) & 9.5 (1.5) & - \\
& $\mathcal{IBP}$-HMM + SPCM-$\mathcal{CRP}$ & \textit{0.362 (0.106)} & 0.226 (0.161) & \textit{1.586 (0.514)} & 0.687 (0.071) & 0.473 (0.109) & \textit{0.699 (0.093)} & 9.5 (1.5) & \textbf{3.1 (1.6)}\\
& ICSC-HMM & \textbf{0.354 (0.083)} & \textit{0.180 (0.099)} & \textbf{1.367 (0.298)} & 0.673 (0.050) & 0.489 (0.102) & \textbf{0.713 (0.076)} & 9.3 (1.05) & \textbf{3.0 (1.15)} \\ \hline
\multicolumn{1}{c}{\multirow{3}{*}{26D Peeling}} & HMM w/MS & 0.514 (0.030) & 0.400 (0.005) & 2.452 (0.087) & \textit{0.673 (0.003)} & \textbf{0.420 (0.009)} & 0.545 (0.039) & 6 & - \\
\multicolumn{1}{c}{\multirow{3}{*}{Dataset}} & sticky HDP-HMM & 0.453 (0.055) & 0.386 (0.045) & 2.306 (0.228) & 0.670 (0.018) & 0.392 (0.042) & 0.612 (0.054) & 6.3 (0.95) & - \\
\multicolumn{1}{c}{\multirow{3}{*}{$K_Z$ (5)}}& $\mathcal{IBP}$-HMM & 0.413 (0.026) & 0.390 (0.019) & 2.492 (0.071) & \textbf{0.712 (0.017)} & \textit{0.413 (0.019)} & 0.640 (0.018) & 8.4 (0.89) & - \\
& $\mathcal{IBP}$-HMM + SPCM-$\mathcal{CRP}$ & \textit{0.397 (0.126)} & \textbf{0.206 (0.154)} & \textbf{1.853 (0.329)} & 0.633 (0.138) & 0.385 (0.207) & \textbf{0.649 (0.112)} & 8.4 (0.89) & \textbf{4.8 (1.92)} \\
& ICSC-HMM & \textbf{0.390 (0.088)} & \textit{0.211 (0.108)} & \textit{1.913 (0.183)} & 0.640 (0.090) & 0.375 (0.126) & \textit{0.647 (0.070)} & 6.4 (1.26) & \textbf{4.2 (1.31)} \\ \hline
\hline \end{tabular}} \end{center}
\caption{\small Performance Comparison of Segmentation and state clustering with \textbf{Our Proposed Approach} vs. HMM w/Model Selection, sticky HDP-HMM, $\mathcal{IBP}$-HMM and decoupled $\mathcal{IBP}$-HMM + SPCM-$\mathcal{CRP}$ \textit{(presenting mean (std) of metrics over 10 runs).} \label{tab:spcm-segm-compare}} \end{table*}
As in the original $\mathcal{IBP}$-HMM, all dimensions of our time-series have zero-mean. Moreover, in our real-world datasets, the range of each dimension is considerably different, for example the position variables are between $[0.1, 1]$ meters, while the forces range between $[1,25]$, hence, we scale each dimension such that they all lie within a comparable range and the Gaussian distributions estimated by each HMM are not skewed towards a few dimensions. While analyzing the results in Table \ref{tab:spcm-segm-compare} one must take into consideration that these are mere estimates of how well each approach reaches a level of human-segmentation, which in turn might not be the optimal one. Moreover, it is well-known that often such metrics do not fully represent the power of a segmentation algorithm and much papers rely on visual and qualitative evaluations. For this reason, we illustrate segmentations of the 3D end-effector trajectories of each real-world dataset labeled with (i) the ground truth (provided by a human), (ii) \textit{transform-dependent} segmentations taken from $S$ and (iii) \textit{transform-invariant} segmentations taken from $Z$.
\begin{figure*}\label{fig:segmentation_results}
\end{figure*}
For the $7D$ Grating Dataset, we see that our coupled approach yields the highest clustering performance as well as comparable segmentation wrt. the decoupled model. However, instead of recovering the 3 expected actions (i) reach (ii) grate and (iii) trash, we recovered 4, this is mainly due to a sub-segmentation of the reaching action into two segments. Such a sub-segmentation is not entirely incorrect, if we analyze the time-series in Figure \ref{fig:segmentation_true}, we can see that there seems to be two phases of the reaching motion: (i) the initial one which varies wrt. it's starting position and (ii) an alignment phase of the vegetable wrt. the grater. To recall, these datasets are intended for robot learning; i.e. through this segmentation algorithm we decompose the task into primitive actions and learn individual dynamics for each, following the approaches in \cite{Pais:IEEE:2015, Figueroa:HRI:2016}. Having an expected primitive being sub-segmented is simply a way of representing the task on a different level of granularity.
Regarding the $13D$ Rolling dataset, we can see that indeed our coupled model yields the best performance in both feature clustering and segmentation. In this dataset, not only are the time-series transformed, but they are also intersecting each other. This causes the considerably low performance of all of the other HMM variants which assume that all emission models $\Theta$ are used. Given the flexibility of the $\mathcal{IBP}$-HMM we can represent each time-series as a single HMM with different emission models and alleviate this problem. As shown in Figure \ref{fig:segmentation_results}, this yields an excessive number of features. Yet, our coupled model is capable of grouping all of these unique features into a time-series clustering extremely close to the ground truth.
Finally, in the more challenging but less \textit{transformed} 26D Peeling dataset, we can see that the proposed algorithm yields comparable results to the decoupled approach. Given that each time-series is generated by peeling a different zucchini at a different location we can see how the \textit{transform-dependent} features are excessive, as in the previous dataset. Even though the \textit{transform-invariant} segmentation seems to be quite close to the ground truth the overall metrics yield considerably lower results than the other datasets. This is due to the fact that the \textit{rotate} action (which is extremely similar to the \textit{reach-to-peel}) was not properly extracted as a segment. This however, can be alleviated by adding more features, such as change in color or shape of the manipulated object.
\section{Discussion} In this work, we presented \textbf{three} main contributions for \textit{transform-invariant} analysis of data. We specifically focused on introducing (i) a novel measure of similarity in the space of Covariance matrices which allowed us to derive (ii) non-parametric clustering and (iii) segmentation algorithms for applications which seek \textit{transform-invariance}.
The proposed similarity function (B-SPCM) is inspired on spectral graph theory and the geometry of convex sets. Although simply, we proved it's effectiveness on a several of synthetic and real-world datasets with promising results. Even though the presented similarity function is, in fact, specific to Covariance matrices the presented clustering approach (SPCM-$\mathcal{CRP}$ mixture model) is not. In fact, any dataset that has a proper measure of similarity which can generate a matrix $\mathbf{S}$ and a corresponding set of real-valued observations $\mathbf{Y}$, be it a spectral embedding or not, can be clustered with this algorithm.
Regarding the proposed segmentation and action discovery approach (ICSC-HMM) we presented successful results on difficult datasets which would otherwise need manual tuning and pre-processing. The novelty of our approach does not lie on the fact that we use the $\mathcal{IBP}$-HMM, it lies on the joint estimation of \textit{transform-invariant} segmentations. One could easily couple the SPCM-$\mathcal{CRP}$ with the $\mathcal{HDP}$-HMM, however, this non-parametric formulation does not provide the flexibility of assuming different switching dynamics which would limit the number of \textit{transform-dependent} states. An interesting extension to this work, would be to handle time-series with missing dimensions. For example, in our last dataset (26-D Peeling), we indeed have two sets of time-series with 6 extra features, corresponding to color change, which help disambiguate two actions for those specific time-series. However, since the features are non-existent in the remaining time-series, we could not use them in this analysis. Nevertheless, since the $\mathcal{IBP}$-HMM provides the flexibility of having unique features per time-series, this can be exploited to model Gaussian emission of different dimensions and a new metric could be introduced which handles such differences.
\acks{This research was supported by the European Union via the H2020 project \textit{COGIMON} $ H2020-ICT-23-2014 $. } \appendix \section{Sampling from the $\mathcal{NIW}$ distribution} \label{app:Sample_NIW} The $\mathcal{NIW}$ \citep{Gelman:BDA:2003} is a four-parameter $\lambda = \{\mu_0,\kappa_0,\Lambda_0,\nu_0\}$ multivariate distribution generated by
$\Sigma \sim \mathcal{IW}(\Lambda_0,\nu_0), \quad \mu|\Sigma \sim$ and $\mathcal{N}\left(\mu_0,\frac{1}{\kappa_0}\Sigma\right)$ where $\kappa_0,\nu_0 \in \mathds{R}_{>0}$, and $\nu_0 > P -1$ indicates degrees of freedom of the $P$-dimensional scale matrix $\Lambda \in \mathds{R}^{PxP}$ which should be $\Lambda \succ 0$. The density of the $\mathcal{NIW}$ is defined by \begin{equation} \begin{aligned}
p(\mu, \Sigma \hspace{2pt} | \hspace{2pt} \lambda) & = \mathcal{N}\left(\mu | \mu_0,\frac{1}{\kappa_0}\Sigma\right)\mathcal{IW}(\Sigma \hspace{2pt}|\hspace{2pt} \Lambda_0, \nu_0)\\
& = \frac{1}{Z_0}|\Sigma|^{-[(\nu_0 + d)/2 + 1]} \exp \left\lbrace -\frac{1}{2} \texttt{tr}(\Sigma^{-1}\Lambda_0) \right\rbrace\\ & \times \exp \left\lbrace -\frac{\kappa_0}{2} (\mu - \mu_0)^{T}\Sigma^{-1}(\mu - \mu_0) \right\rbrace\\ \end{aligned} \end{equation}
where $Z_0 =\frac{2^{\nu_0 d/2}\Gamma_d(\nu_0 /2)(2\pi/\kappa_0)^{d/2}}{|\Lambda_0|^{\nu_0/2}} $ is the normalization constant.
A sample from a $\mathcal{NIW}$ yields a mean $\mu$ and covariance matrix $\Sigma$. One first samples a matrix from an $\mathcal{W}^{-1}$ parameterized by $\Lambda_0$ and $\nu_0$; then $\mu$ is sampled from a $\mathcal{N}$ parameterized by $\mu_0, \kappa_0, \Sigma$. Since $\mathcal{N}$ and $\mathcal{NIW}$ are a conjugate pair, the term $\left( \prod_{i \in \mathbf{Z}(C)=k} p\left( \mathbf{y}_{i} \hspace{2pt}|\hspace{2pt} \theta \right) \right) p\left( \theta\hspace{2pt}|\hspace{2pt}\lambda \right)$ in \eqref{eq:integral} also follows a $\mathcal{NIW}$ \citep{Murphy:CBG:2007} with new parameters $ \lambda_n = \{\mu_n,\kappa_n,\Lambda_n,\nu_n\}$ computed via the following posterior update equations, \begin{equation} \label{eq:niw_updates} \begin{aligned}
p(\mu,\Sigma|\mathbf{Y}_{1:n},\lambda) & = \mathcal{NIW}(\mu,\Sigma | \mu_n,\kappa_n,\Lambda_n,\nu_n)\\ \kappa_n = \kappa_0 + n, \qquad &\nu_n = \nu_0 + n, \qquad \mu_n = \frac{\kappa_0\mu_0 + n\bar{\mathbf{Y}}}{\kappa_n}\\ \qquad \Lambda_n = \Lambda_0 & + S + \frac{\kappa_0n}{\kappa_n}(\bar{\mathbf{Y}} - \mu_0)(\bar{\mathbf{Y}} - \mu_0)^T \end{aligned} \end{equation} where $n$ is the number of samples $\mathbf{Y}_{1:n}$, whose sample mean is denoted by $\bar{\mathbf{Y}}$ and $S$ is the scatter matrix, as introduced earlier.
\section{Split-merge sampler for the $\mathcal{IBP}$-HMM \citep{Hughes:NIPS:2012}} \label{app:split-merge} A proposal selects anchor objects and features to split/merge at random to improve upon finding anchors that are \textit{similar}. Following are the algorithmic details of such sampler: \begin{enumerate}[leftmargin=*] \item Randomly select a pair of sequences $\{i,j\}$ \item Select candidate feature pair $\{k_i,k_j\}$ by first selecting a random feature $k_i$, then select $k_j$ given the following proposal distribution: \begin{equation} \begin{split}
q_k(k_i,k_j) = \text{Unif}(k_i|f_i)q(k_j|k_i,f_j) \quad \text{where}\\
q(k_j|k_i,f_j) \begin{cases}\! 2C_jf_{jk} & \text{if} \quad k = k_i \\ f_{jk}\frac{m(\mathbf{x}_{k_i},\mathbf{x}_k)}{m(\mathbf{x}_{k_i})m(\mathbf{x}_k)} & \text{otherwise}. \\ \end{cases} \label{eq:split-merge} \end{split} \end{equation} Here $m(\cdot)$ is the marginal probability of the data given the state sequence $\{z^{(i)}\}_{i=1}^M$ (collapsing away $\Theta$) and $C_j = \sum_{k_i \neq k_j} f_{jk}m(\mathbf{x}_{k_i},\mathbf{x}_k)/m(\mathbf{x}_{k_i})m(\mathbf{x}_k)$. \item One then computes the MH Ratio to accept/reject a merge/split with the following distribution: \begin{equation}
q(\Psi^*|\Psi) = q_k(k_i,k_j)q(k_j|k_i,f_j) \end{equation} , where $\Psi$ is the current state of the Markov Chain and $\Psi^*$ is the proposed change. \end{enumerate} \eqref{eq:split-merge} encourages choices $k_i = k_j$ for a feature merge that explains \textit{similar} data via the \textit{marginal likelihood ratio}. A large ratio indicates that the data assigned to both $k_i , k_j$ are better modeled together rather than independently.
\section{External Clustering Metrics} \label{app:clustering_metrics} \begin{enumerate}[leftmargin=*] \item \underline{\textit{Purity}} is a simple metric that evaluates the quality of the clustering by measuring the number of clustered data-points that are assigned to the same class, as follows, \begin{equation}
Purity(\mathcal{S},\mathcal{C}) = \frac{1}{N} \sum_{j} \underset{k}{\max}|s_j\cap c_k| \end{equation} where $\mathcal{S} = \{s_1,\dots,s_J\}$ is the set of classes and $\mathcal{C} = \{c_1,\dots,c_K\}$ the set of predicted clusters. $s_j$ is the set of data-points in the $j$-th class, whereas $c_k$ is the set of data-points belonging to the $k$-th cluster \citep{Manning:2008:IIR}. \item \textit{Normalized Mutual Information \underline{(NMI)}}: is an information-theoretic metric, which measures the trade-off between the quality of the clustering and the total number of clusters, as follows, \begin{equation} NMI(\mathcal{S},\mathcal{C}) = \frac{\mathcal{I}(S,C)}{[\mathcal{H}(S) + \mathcal{H}(C)]/2} \end{equation} for mutual information $\mathcal{I}(\mathcal{S},\mathcal{C}) = \sum_{j}\sum_{k}P(s_j\cap c_k) \log\frac{P(s_j\cap c_k)}{P(s_j)P(c_k)}$ and and entropy $ \mathcal{H}(\mathcal{C}) = -\sum_{k}P(c_k)\log P(c_k)$ Both $\mathcal{I}(\mathcal{S},\mathcal{C}) $ and $\mathcal{H}(\mathcal{C})$ have closed-form ML estimates \cite{Manning:2008:IIR}.
\item The \underline{$\mathcal{F}$-measure} is a well-known classification metric which represents the harmonic mean between Precision ($P = \frac{TP}{TP+FP}$) and Recall ($R = \frac{TP}{TP+FN}$). In the context of clustering, Recall and Precision of the $k$-th cluster wrt. the $j$-th class are $R(s_j,c_k) = \frac{|s_j \cap c_k|}{|s_j|}$ and $P(s_j,c_k) = \frac{|s_j \cap c_k|}{|c_k|}$, respectively. The $\mathcal{F}$-measure of the $k$-th cluster wrt. the $j$-th class is then, \begin{equation} \mathcal{F}_{j,k} = \frac{2 P(s_j,c_k) R(s_j,c_k)}{P(s_j,c_k) + R(s_j,c_k)}, \end{equation} and the $F$-measure for the overall clustering is computed as, \begin{equation}
\mathcal{F}(\mathcal{S},\mathcal{C}) =\sum_{s_j \in \mathcal{S}} \frac{|s_j|}{|\mathcal{S}|}\underset{k}{\max}\{\mathcal{F}_{j,k}\}. \end{equation} \end{enumerate}
\section{External Segmentation Metrics} \label{app:segmentation_metrics}
\begin{enumerate}[leftmargin=*] \item The \textit{\underline{Hamming}} distance measures the distance between two sets of strings (or vectors) as the number of mismatches between the sets. In our case, the segmentation labels $S^{true} \in F^{K^{true}}, S^{est} \in F^{K^{est}}$ may possess different values. After finding the correspondence between $S^{est} \rightarrow S^{true}$ by solving the assignment problem \cite{Mulmuley1987}, one computes: \begin{equation}
d(S^{true},S^{est}) = \sum_{s_{i}^{est} \in S^{est}} \left(\sum_{s_k^{true} \neq s_j^{true},s_k^{true} \cap s_i^{est} \neq 0} \left|s_k^{true} \cap s_i^{est}\right| \right), \label{eq:hamming} \end{equation}
which corresponds to the total area of intersections between $S^{est}$ and $S^{true}$\cite{Hamming}.\eqref{eq:hamming} can be normalized as follows: $d(S^{true},S^{est})/|S^{true}|$.
\item The \textit{Global Consistency Error \underline{(GCE)}} is a measure that takes into account the differences in granularity while comparing two segmentations \cite{MartinFTM01}. If one segment is a proper subset of the other it is considered as an area of \textit{refinement}, rather than an error. Thus, for two sets of segmentations $S^{true} \in F^{K^{true}}, S^{est} \in F^{K^{est}}$, the GCE is computed as follows: \begin{equation} \begin{split} GCE(S^{true},S^{est}) & = \\ \frac{1}{M_s}\min & \left\{ \sum\limits_{i} E( S^{true},S^{est}, s_i), E( S^{est},S^{true}, s_i), \right\} \end{split} \label{eq:gce} \end{equation} for label location $s_i$ and local refinement error $E(S^1,S^2,s_i)$ which measures the degree to which two segmentations agree at label location $s_i$ and $M_s$ is the size of the segment containing $s_i$.
\item \textit{Variation of Information \underline{(VI)}} is a metric which defines the distance between two segmentations as the average conditional entropy of one segmentation given the other, as follows: \begin{equation} VI(S^{true}, S^{est}) = \mathcal{H}(S^{true}) + \mathcal{H}(S^{est}) - 2\mathcal{I}(S^{true},S^{est}) \label{eq:vi} \end{equation} where $\mathcal{H}(\cdot)$ and $\mathcal{I}(\cdot,\cdot)$ are computed as in \ref{app:clustering_metrics} Intuitively, \eqref{eq:vi} is a measure of the amount of randomness in one segmentation that cannot be explained by the other \cite{Meila:2005:CCA}. \end{enumerate}
\vskip 0.2in
\end{document} |
\begin{document}
\title{An efficient estimator of the parameters of the Generalized Lambda Distribution}
\author{Dilanka S. Dedduwakumara \textsuperscript{a}, Luke A. Prendergast \textsuperscript{a} and Robert G. Staudte \textsuperscript{a} \\ \\ \textsuperscript{a}Department of Mathematics and Statistics, La Trobe University}
\maketitle
\noindent\textbf{ABSTRACT. Estimation of the four generalized lambda distribution parameters is not straightforward, and available estimators that perform best have large computation times. In this paper, we introduce a simple two-step estimator of the parameters that is comparatively very quick to compute and performs well when compared with other methods. This computational efficiency makes the use of bootstrapping to obtain interval estimators for the parameters possible. Simulations are used to assess the performance of the new estimators and applications to several data sets are included.}
\noindent \textit{Key words: bootstrap interval estimator, generalized lambda distribution, probability density quantile}
\section{Introduction}
The generalized lambda distribution \citep[GLD,][]{ramberg1974approximate} is a flexible, four parameter distribution that can approximate a large variety of distributions of varying shapes. Due to this flexibility, the GLD has become a popular distribution to model data in many fields, including economics and finance \citep[e.g.][]{pfaff2016financial}. Given that it has four parameters, location, scale and two shape parameters, estimation is not a trivial task, and estimation methods continue to attract attention in the literature. \cite{doi:10.1080/25742558.2019.1602929} introduced a method for choosing optimal parameters for generalized distributions to approximate other distributions. The method uses the probability density quantile function \citep[pdQ,][]{staudte2017} to first find the optimal shape parameters and is computationally quick, simple to implement and often outperforms other methods. Motivated by this, we introduce estimators of the GLD shape parameters arising from the estimated pdQ. The estimators, which compare favourably to other methods, is computationally efficient, providing estimates in just a fraction of the time required for other methods that are good estimators of the parameters. This efficiency makes obtaining interval estimators for the GLD parameters via bootstrapping possible, which is a considerable advantage given the lack of intervals in the literature.
We begin by providing important definitions and notations in Section 2 before reviewing several popular estimators already available in Section 3. The new estimator is introduced in Section 4 and performance is assessed by simulations in Section 5. Several data applications are given in Section 6 before we provide concluding remarks in Section 7.
\section{Definitions}
Throughout, let $Q(u)=F^{-1}(u)$ denote the quantile function associated with distribution function $F$ for $u\in (0,\ 1)$, and let $f = F'$ denote the probability density function where $f(x)> 0$ for all $x$ in its domain.
\subsection{Probability density quantile functions}
The \textit{quantile density function} \citep{parzen1979}, also called the \textit{sparsity index} by \cite{tukey1965}, is denoted $q(u)=Q'(u)=1/f\left[Q(u)\right]$. The quantile density function is mainly used in non-parametric modeling and inference. \cite{parzen1979} called the reciprocal of this quantile density function the \textit{density quantile function} which we denote here by $f_Q(u) = f[Q(u)].$ In a recent paper, \cite{staudte2017} introduced the \textit{probability density quantile} (pdQ) denoted $f^*_Q(u)=f_Q(u)/\kappa$ where $\kappa = E[f_Q(U)]$ and $U\sim \text{Unif}(0,1)$. This pdQ function is free from location and scale and therefore can easily be used to examine shape behaviours of a distribution; see also \cite{staudte2018} who provide other insights for the pdQ. Moreover, it is defined on the finite domain $[0,1]$ for all lattice distributions and continuous distributions having square-integrable densities.
\subsection{Empirical pdQ for continuous distributions}
Let $X_1,\ldots,X_n$ denote a random sample of size $n$ from $F$ and let $X_{(1)}\leq X_{(2)} \leq \ldots \leq X_{(n)}$ denote the ordered sample. In this section we introduce the empirical pdQ earlier discussed by \cite{staudte2017}. For $k_b(·) = k(\cdot/b)/b$ denoting a kernel function and $b$ a bandwidth, we start by estimating $q(u)$ using the quantile density estimator \begin{equation}\label{eqn:qnhat} \hat{q}_n(u)=\sum_{i=1}^{n} X_{(i)}\bigg\{k_b\bigg(u-\frac{(i-1)}{n}\bigg)-k_b\bigg(u-\frac{i}{n}\bigg)\bigg\} \end{equation} which consists of a linear combination of order statistics. This kernel density estimator has been studied extensively, e.g. see \cite{jones1992estimating}, \cite{falk1986estimation} and \cite{welsh1988asymptotically} for some notable works. The choice of bandwidth is important and we choose our bandwidth to be $b(u)=(15/n)^{1/5}\{q(u)/q^{''}(u)\}^{2/5}$ since it minimizes the asymptotic mean squared error of $\hat{q}(u)$. \cite{prendergast2016exploiting} call $q(u)/q^{''}(u)$ the quantile optimality ratio (QOR) and estimate it to obtain a suitable bandwidth.
By using a discrete set of $u$s defined by $\{u_j=(j-1/2)/J\}^J_{j=1}$ and for some integer $J$, the empirical pdQ can be defined as \begin{equation}\label{eqn:fQ*hat} \hat{f}_{Q}^*(u_j)=\frac{1}{\hat{\kappa}\hat{q}_n(u_j)} \quad \quad \text{where} \quad \hat{\kappa}=\frac{1}{J}\sum_{j=1}^{J}\frac{1}{\hat{q}_n(u_j)}. \end{equation}
\subsection{The Generalized Lambda Distribution}
The generalized lambda distribution (GLD) is a very flexible distribution that can approximate, or is equal to exactly, many other distributions for appropriately chosen parameters. Although several definitions for the GLD are available, we prefer the parameterization by \cite{freimer1988} since it is defined for all parameter value choices, with the exception that the scale parameter must be positive. The distribution is defined in terms of its quantile function which is \begin{equation}\label{eqn:Q} Q(u)=\lambda_1+\frac{1}{\lambda_2}\left[\frac{u^{\lambda_3}-1}{\lambda_3} - \frac{(1-u)^{\lambda_4}-1}{\lambda_4}\right] \end{equation} where $\lambda_1$ is a location parameter, $\lambda_2>0$ an inverse scale parameter and $\lambda_3, \lambda_4$ are shape parameters. It is easy to see that the quantile density function for the GLD is $q(u)=Q'(u)=\lambda_2^{-1}\left[u^{\lambda_3-1} + (1-u)^{\lambda_4-1}\right]$ so that the density quantile function is \begin{equation}\label{fQ:GLD} f_Q(u)=\frac{\lambda_2}{u^{\lambda_3-1} + (1-u)^{\lambda_4-1}}. \end{equation} In general, no closed-form solution for the integral $\kappa =\int^1_0 f_Q(u)du$ exists, but it can be evaluated computationally and quite efficiently since integration occurs only between the finite bounds zero and one.
\section{Existing Methods}\label{Methods}
Having four parameters, GLD estimation is not straightforward, and several estimation methods are available in the literature. For comparison with the pdQ method, we consider the more common estimation methods that are believed to provide good estimates of the true parameter values. These methods are available in either or both of the \textit{gld} \citep{Kingt2016gld} and \textit{bda} packages \citep{wang2015bda} in the R statistical software \citep{R}. We briefly describe those estimation methods and an overview with more technical details can be found in \cite{dean2013improved}.
The trimmed L-moments method (TL) is discussed in \cite{asquith2007moments} and \cite{dean2013improved} and is a robust version of the matching L-moments method \citep{karvanen2002adaptive}. L-moments are estimated by linear combinations of order statistics and are related to conventional moments. Trimmed L-moments (see: \cite{elamir2003trimmed} for more details) are generalizations of these L-moments which applies zero weight to extreme observations. GLD parameters are estimated through this method by minimizing the difference between the sample trimmed L-moments of the data and the trimmed L-moments of the fitted GLD distribution.
The percentile matching method (PM) equates a selected number of empirical percentiles with their GLD counterparts to obtain a set of non-linear equations which are then solved to obtain the optimal GLD parameters. The PM method is discussed by \cite{karian1999fitting} and \cite{tarsitano2005estimation}. Further, \cite{karian2003comparison} demonstrate the superiority of the PM method compared to the method of matching moments and the method of L-moments.
Maximizing the likelihood (ML) to obtain the GLD parameters has also been a popular method and is considered by \cite{su2007fitting} and \cite{su2007numerical}. First, initial estimated parameter values using the method of moments or the PM method are chosen and used as a starting point to seek the values that maximize the numerical log likelihood.
There are instances where the Maximum Likelihood method fails as the support depends upon the parameters to be estimated. In such a scenario, the Maximum product of spacings (MPS) was introduced by \cite{cheng1983estimating,ranneby1984maximum} and used by \cite{chalabi2012flexible} to estimate GLD parameters. Here spacings refer to the differences between the cumulative distribution function at neighbouring data points and the parameters are estimated by maximizing the geometric mean of these spacings.
Rather than the spacing between transformed data points as in MPS, \cite{titterington1985comment} suggested spacing between transformed, adjacently averaged data points. This approach is called the Titterington Method (TM), and more details with the GLD estimation can be found in \cite{dean2013improved}.
\cite{owen1988starship} introduces the Starship Method (SM), a computer-intensive method that focuses on the fit to the base distribution of the inversely transformed data. \cite{king1999starship} developed this concept to be used for generalized lambda distribution where for a given data set the distribution function of GLD is obtained numerically, and the parameter values are chosen such that it minimizes the goodness-of-fit to the uniform distribution. One major drawback of this method is its slow computation time, especially with large sample sizes.
The method of distributional least absolutes (DLA) obtains the optimal GLD parameters by minimizing the sum of absolute deviations between the order statistics and the corresponding medians. See \cite{dean2013improved} for more details.
\section{Point and interval estimators using the pdQ}
Obtaining optimal GLD parameters for a specific distribution using the pdQ method has been discussed in \cite{doi:10.1080/25742558.2019.1602929} and we adopt this idea to model empirical data as detailed in the steps below. Throughout this section the estimated $p^{th}$ quantile, the empirical pdQ of the data and the corresponding GLD pdQ are denoted by $\widehat{x}_p$, $\hat{f}_{Q}^*$ and $f_{Q}^*$ respectively.
\subsection{Point estimators}
\subsubsection*{Step 1: Estimating the shape parameters}
Since the pdQ is free of location and scale, we can use it to focus only on estimating the two shape parameters, $\lambda_3$ and $\lambda_4$. Using a discrete set of probability values given as $\{u_j=(j-1/2)/J\}^J_{j=1}$, the estimated shape parameters are those that minimize the sum of squared distances between the empircal pfdQ and GLD pdQ. That is, \begin{equation}\label{eqn:obj} (\widehat{\lambda}_3,\widehat{\lambda}_4)=\argmin_{\lambda_3,\lambda_4} \sum^J_{j=1}\left[\hat{f}_{Q}^*(u_j)-f_Q^*(u_j;\lambda_3,\lambda_4)\right]^2. \end{equation}
To find the shape parameters that minimize the sum of squared distances, it is simple to use a computational optimizer. We use the R function \textit{nlminb} in \textit{stats} package for this, which we find to be both simple to use and quick to compute. As starting values for the optimizer we chose to apply the objective function in \eqref{eqn:obj} to a grid of values for $\lambda_3$ and $\lambda_4$ consisting of each paired combination of $\lambda_3,\lambda_4$ from $−0.9, −0.5, −0.1, 0, 0.1, 0.2, 0.4, 0.8, 1, 1.5$ and choose those values that result in the smallest value. This grid of values are used in \cite{dean2013improved} and considered as the default choice in \textit{gld} \citep{Kingt2016gld} package for other GLD estimation methods. This covers a wide span of the $\lambda_3,\lambda_4$ values and also we can expect uniformity between methods for the comparisons to follow. Our simulations, to be summarized in the next section, reveal that $J=50$ is more than adequate to obtain good estimates for moderate to larger sample sizes and there is little value in increasing $J$ to be more than this. Therefore, in what follows, we use $J=50$ for sample sizes greater than 200. We found that $J=25$ worked well for smaller sample sizes.
\subsubsection*{Step 2: Estimating the location and scale parameters}
Given the estimated shape parameters from Step 1, we then match sample quartiles $\widehat{x}_p$, using $p \in \{0.25,0.5,0.75\}$, to their theoretical GLD counterparts to generate the linear equations \begin{equation}\label{eqn:Q} \widehat{x}_p=\lambda_1+\frac{1}{\lambda_2} c(p,\widehat{\lambda}_3,\widehat{\lambda}_4) \end{equation} where, \begin{equation*} c(p,\widehat{\lambda}_3,\widehat{\lambda}_4)= \left[\frac{p^{\widehat{\lambda}_3}-1}{\widehat{\lambda}_3} - \frac{(1-p)^{\widehat{\lambda}_4}-1}{\widehat{\lambda}_4}\right] \end{equation*}
By solving the the system of three linear equations from \eqref{eqn:Q} using $p=0.25,0.5$ and $0.75$, we obtain our estimated inverse scale and location parameters as \begin{equation} \widehat{\lambda}_2=\frac{c(0.75,\widehat{\lambda}_3,\widehat{\lambda}_4)-c(0.25,\widehat{\lambda}_3,\widehat{\lambda}_4)}{\widehat{x}_{0.75}-\widehat{x}_{0.25}},\;\;\hat{\lambda}_1=\widehat{x}_{0.5}-\frac{c(0.5,\widehat{\lambda}_3,\widehat{\lambda}_4)}{\widehat{\lambda}_2} \end{equation} respectively.
\subsection{Interval estimators}
In order to calculate confidence interval estimates, we consider two commonly used bootstrap procedures; the percentile method \citep{efron1994introduction} and the bias-corrected and accelerated (BCa) bootstrap intervals \citep{efron1987better}. As the pdQ method requires little computational time, the computational cost to calculate the bootstrap confidence intervals is not prohibitive.
The percentile bootstrap method uses the upper and lower $\alpha/2$ percentiles of the GLD sample estimators as the interval bounds, whereas the BCa method also estimates a bias-corrected parameter and an acceleration parameter. The bias-correction parameter is related to the proportion of bootstrap estimates that are less than the observed statistic, and the acceleration parameter is proportional to the skewness of the bootstrap distribution. Unlike the percentile method, the BCa method corrects for both bias and skewness of the estimators.
In the following section to come, we look at the confidence intervals for the location ($\lambda_1$) and the skewness ($\lambda_3-\lambda_4$) of the underlying GLD distribution by using these bootstrap interval estimators. Note that the difference $\lambda_3-\lambda_4$ may be of interest since $\lambda_3-\lambda_4=0$ indicates that the distribution is symmetric.
\section{Simulation results}
In this section, we compare our pdQ estimation method with the existing methods presented in Section \ref{Methods}. To do so, we summarize the results for 500 simulated data sets by reporting the mean standard error and the mean absolute bias for estimates of the four GLD parameters compared to their true values. We adopt four representative FMKL GLD settings used in \cite{corlu2016estimating} to evaluate the performances of each method for these different shapes of GLD. The shapes of the probability density functions for these GLD settings are shown below in Figure \ref{fig1}.
\begin{figure}
\caption{Probability density functions of four representative GLDs.}
\label{fig1}
\end{figure}
Results are evaluated for a range of sample sizes: 100, 250, 500 and 1000. In the pdQ method, for sample size 100, we take $J=25$, and for other sample sizes, we use $J=50$.
\begin{landscape} \begin{table}[h]
\centering
\caption{This table shows a comparison of the mean standard error and the absolute bias of the GLD parameters represented by $\lambda_1$,$\lambda_2$,$\lambda_3$ and $\lambda_4$ between several fitting methods. The considered methods are Method of Probability Density Quantiles (pdQ), Method of TL-moments (TL), Percentile Matching Method (PM), Numerical Maximum Likelihood (ML), Maximum Product of Spacings (MPS), Titterington's Method (TM), Starship Method (SM), and Method of Distributional Least Absolutes (DLA). These values are calculated for 500 fitting results for actual parameters $\lambda_1=0$, $\lambda_2=1$, $\lambda_3=1.5$ and $\lambda_4=1.5$. } \label{tab1} \hspace*{-0.5cm} \begin{adjustbox}{max width=1.3\textwidth} \begin{tabular}{ccccccccccc} \toprule $n$ & Est & pdQ & TL & PM & ML & MPS & TM & SM & DLA\\ \midrule 100 & $\lambda_1$ & 0.013 (0.002) & 0.012 (0.005) & 0.016 (0.005) & 0.008 (0.004) & 0.011* (0.004) & 0.011* (0.004) & 0.012 (0.005) & 0.014 (0.005)\\
& $\lambda_2$ & 0.158 (0.245) & 0.314 (0.368) & 0.703 (0.531) & 0.202 (0.366) & 0.136* (0.240) & 0.142 (0.246) & 0.280 (0.333) & 0.205 (0.263)\\
& $\lambda_3$ & 0.176 (0.219) & 0.259 (0.333) & 0.404 (0.382) & 0.184 (0.329) & 0.175* (0.210) & 0.176 (0.204) & 0.241 (0.281) & 0.226 (0.219)\\
& $\lambda_4$ & 0.154* (0.220) & 0.252 (0.311) & 0.390 (0.362) & 0.184 (0.314) & 0.171 (0.196) & 0.177 (0.188) & 0.237 (0.260) & 0.210 (0.201)\\
\midrule 250 & $\lambda_1$ & 0.006 (0.006) & 0.005 (0.006) & 0.008 (0.009) & 0.004* (0.006) & 0.005 (0.003) & 0.005 (0.003) & 0.005 (0.004) & 0.007 (0.006)\\
& $\lambda_2$ & 0.070* (0.155) & 0.192 (0.301) & 0.331 (0.379) & 0.184 (0.353) & 0.086 (0.197) & 0.087 (0.198) & 0.141 (0.249) & 0.122 (0.191)\\
& $\lambda_3$ & 0.094* (0.150) & 0.185 (0.283) & 0.261 (0.300) & 0.171 (0.324) & 0.110 (0.186) & 0.104 (0.184) & 0.145 (0.232) & 0.156 (0.151)\\
& $\lambda_4$ & 0.094* (0.168) & 0.185 (0.309) & 0.268 (0.343) & 0.182 (0.345) & 0.118 (0.199) & 0.120 (0.195) & 0.156 (0.245) & 0.155 (0.182)\\
\midrule 500 & $\lambda_1$ & 0.003 (0.002) & 0.003 (0.002) & 0.005 (0.004) & 0.002 (0.004) & 0.003 (0.001) & 0.002* (0.000) & 0.003 (0.002) & 0.004 (0.003)\\
& $\lambda_2$ & 0.056* (0.146) & 0.135 (0.266) & 0.206 (0.308) & 0.181 (0.365) & 0.070 (0.195) & 0.069 (0.195) & 0.094 (0.209) & 0.091 (0.163)\\
& $\lambda_3$ & 0.080* (0.155) & 0.146 (0.275) & 0.193 (0.280) & 0.176 (0.350) & 0.092 (0.208) & 0.089 (0.207) & 0.110 (0.213) & 0.114 (0.148)\\
& $\lambda_4$ & 0.074* (0.161) & 0.148 (0.283) & 0.201 (0.301) & 0.186 (0.364) & 0.090 (0.211) & 0.091 (0.208) & 0.111 (0.221) & 0.119 (0.161)\\
\midrule
1000 & $\lambda_1$ & 0.002 (0.002) & 0.001* (0.001) & 0.002 (0.002) & 0.001* (0.003) & 0.001* (0.001) & 0.001* (0.000) & 0.002 (0.000) & 0.002 (0.001)\\
& $\lambda_2$ & 0.037* (0.095) & 0.094 (0.230) & 0.119 (0.231) & 0.174 (0.362) & 0.065 (0.196) & 0.066 (0.198) & 0.061 (0.175) & 0.058 (0.119)\\
& $\lambda_3$ & 0.058* (0.108) & 0.115 (0.261) & 0.136 (0.244) & 0.174 (0.358) & 0.084 (0.221) & 0.085 (0.220) & 0.082 (0.196) & 0.083 (0.121)\\
& $\lambda_4$ & 0.061* (0.102) & 0.112 (0.255) & 0.129 (0.233) & 0.182 (0.367) & 0.086 (0.216) & 0.085 (0.221) & 0.079 (0.194) & 0.082 (0.116)\\ \bottomrule \multicolumn{2}{l}{\textsuperscript{*}{Lowest MSE}} \end{tabular} \end{adjustbox} \end{table} \end{landscape}
\begin{landscape} \begin{table}[h]
\centering
\caption{This table shows a comparison of the mean standard error and the absolute bias of the GLD parameters represented by $\lambda_1$,$\lambda_2$,$\lambda_3$ and $\lambda_4$ between several fitting methods. The considered methods are Method of Probability Density Quantiles(pdQ), Method of TL-moments (TL), Percentile Matching Method (PM,) Numerical Maximum Likelihood (ML), Maximum Product of Spacings (MPS), Titterington's Method (TM), Starship Method (SM), and Method of Distributional Least Absolutes (DLA). These values are calculated for 500 fitting results for actual parameters $\lambda_1=0$, $\lambda_2=1$, $\lambda_3=2.5$ and $\lambda_4=1.5$ } \label{tab2} \hspace*{-0.5cm} \begin{adjustbox}{max width=1.3\textwidth} \begin{tabular}{ccccccccccc} \toprule $n$ & Est & pdQ & TL & PM & ML & MPS & TM & SM & DLA\\ \midrule 100 & $\lambda_1$ & 0.014 (0.081) & 0.013* (0.084) & 0.015 (0.079) & 0.013* (0.094) & 0.014 (0.084) & 0.013* (0.083) & 0.013* (0.083) & 0.014 (0.079)\\
& $\lambda_2$ & 0.972 (0.820) & 1.513 (1.049) & 2.607 (1.264) & 0.922 (0.898) & 0.844* (0.774) & 0.896 (0.791) & 1.443 (1.002) & 1.156 (0.871)\\
& $\lambda_3$ & 1.665 (1.212) & 2.088 (1.382) & 2.211 (1.374) & 1.909 (1.355) & 1.689 (1.184) & 1.662* (1.177) & 1.980 (1.313) & 1.726 (1.207)\\
& $\lambda_4$ & 0.299* (0.436) & 0.490 (0.606) & 0.664 (0.644) & 0.330 (0.517) & 0.314 (0.448) & 0.311 (0.449) & 0.468 (0.560) & 0.400 (0.492)\\
\midrule 250 & $\lambda_1$ & 0.010* (0.083) & 0.010* (0.088) & 0.011 (0.082) & 0.011 (0.097) & 0.011 (0.088) & 0.011 (0.088) & 0.010* (0.086) & 0.010* (0.077)\\
& $\lambda_2$ & 0.622* (0.688) & 1.142 (0.972) & 1.493 (1.019) & 0.837 (0.881) & 0.648 (0.719) & 0.669 (0.733) & 0.982 (0.888) & 0.808 (0.746)\\
& $\lambda_3$ & 1.530* (1.170) & 2.063 (1.399) & 1.987 (1.328) & 1.974 (1.387) & 1.656 (1.214) & 1.671 (1.220) & 1.900 (1.320) & 1.577 (1.147)\\
& $\lambda_4$ & 0.219* (0.395) & 0.427 (0.601) & 0.499 (0.594) & 0.317 (0.535) & 0.249 (0.448) & 0.255 (0.452) & 0.364 (0.550) & 0.319 (0.454)\\ \midrule
500 & $\lambda_1$ & 0.008* (0.079) & 0.009 (0.087) & 0.008* (0.076) & 0.010 (0.097) & 0.010 (0.090) & 0.010 (0.091) & 0.009 (0.087) & 0.008* (0.074)\\
& $\lambda_2$ & 0.569* (0.671) & 1.050 (0.965) & 1.151 (0.929) & 0.873 (0.915) & 0.617 (0.728) & 0.629 (0.735) & 0.895 (0.883) & 0.731 (0.742)\\
& $\lambda_3$ & 1.496* (1.162) & 2.065 (1.415) & 1.841 (1.297) & 2.058 (1.426) & 1.699 (1.254) & 1.702 (1.261) & 1.930 (1.356) & 1.571 (1.158)\\
& $\lambda_4$ & 0.215* (0.405) & 0.422 (0.618) & 0.446 (0.592) & 0.338 (0.567) & 0.249 (0.464) & 0.251 (0.465) & 0.359 (0.563) & 0.309 (0.484)\\
\midrule 1000 & $\lambda_1$ & 0.008 (0.083) & 0.009 (0.091) & 0.007* (0.078) & 0.011 (0.100) & 0.010 (0.097) & 0.010 (0.097) & 0.009 (0.093) & 0.007* (0.074)\\
& $\lambda_2$ & 0.623* (0.722) & 0.998 (0.975) & 0.904 (0.858) & 0.926 (0.957) & 0.675 (0.791) & 0.685 (0.798) & 0.848 (0.892) & 0.674 (0.716)\\
& $\lambda_3$ & 1.659 (1.231) & 2.140 (1.456) & 1.763 (1.285) & 2.169 (1.471) & 1.870 (1.346) & 1.877 (1.347) & 2.008 (1.402) & 1.548* (1.135)\\
& $\lambda_4$ & 0.239* (0.440) & 0.414 (0.632) & 0.385 (0.568) & 0.356 (0.593) & 0.270 (0.502) & 0.273 (0.507) & 0.346 (0.574) & 0.288 (0.471)\\ \bottomrule \multicolumn{2}{l}{\textsuperscript{*}{Lowest MSE}} \end{tabular} \end{adjustbox} \end{table} \end{landscape}
\begin{landscape} \begin{table}[h]
\centering
\caption{This table shows a comparison of the mean standard error and the absolute bias of the GLD parameters represented by $\lambda_1$,$\lambda_2$,$\lambda_3$ and $\lambda_4$ between several fitting methods. The considered methods are Method of Probability Density Quantiles(pdQ), Method of TL-moments (TL), Percentile Matching Method (PM,) Numerical Maximum Likelihood (ML), Maximum Product of Spacings (MPS), Titterington's Method (TM), Starship Method (SM), and Method of Distributional Least Absolutes (DLA). These values are calculated for 500 fitting results for actual parameters $\lambda_1=0$, $\lambda_2=1$, $\lambda_3=2$ and $\lambda_4=0.5$ } \label{tab3} \hspace*{-0.5cm} \begin{adjustbox}{max width=1.3\textwidth} \begin{tabular}{ccccccccccc} \toprule $n$ & Est & pdQ & TL & PM & ML & MPS & TM & SM & DLA\\ \midrule 100 & $\lambda_1$ & 0.020 (0.026) & 0.019 (0.050) & 0.025 (0.057) & 0.024 (0.112) & 0.016* (0.041) & 0.016* (0.043) & 0.019 (0.042) & 0.020 (0.032)\\
& $\lambda_2$ & 0.159 (0.221) & 0.151 (0.206) & 0.448 (0.381) & 0.162 (0.309) & 0.099 (0.158) & 0.096* (0.150) & 0.184 (0.255) & 0.115 (0.152)\\
& $\lambda_3$ & 0.396* (0.252) & 0.446 (0.366) & 0.631 (0.438) & 0.560 (0.645) & 0.527 (0.196) & 0.512 (0.176) & 0.538 (0.323) & 0.545 (0.170)\\
& $\lambda_4$ & 0.065 (0.133) & 0.035 (0.083) & 0.146 (0.145) & 0.032 (0.097) & 0.028 (0.065) & 0.025* (0.043) & 0.050 (0.127) & 0.032 (0.054)\\
\midrule 250 & $\lambda_1$ & 0.011* (0.028) & 0.011* (0.048) & 0.013 (0.047) & 0.018 (0.112) & 0.012 (0.032) & 0.013 (0.029) & 0.011* (0.041) & 0.013 (0.013)\\
& $\lambda_2$ & 0.061 (0.139) & 0.072 (0.151) & 0.218 (0.267) & 0.105 (0.265) & 0.037 (0.090) & 0.034* (0.075) & 0.081 (0.171) & 0.047 (0.075)\\
& $\lambda_3$ & 0.324* (0.235) & 0.336 (0.335) & 0.485 (0.383) & 0.523 (0.640) & 0.526 (0.105) & 0.576 (0.048) & 0.383 (0.296) & 0.553 (0.016)\\
& $\lambda_4$ & 0.020 (0.088) & 0.018 (0.069) & 0.077 (0.132) & 0.017 (0.090) & 0.009 (0.037) & 0.007* (0.021) & 0.022 (0.090) & 0.013 (0.032)\\
\midrule 500 & $\lambda_1$ & 0.008* (0.022) & 0.008* (0.039) & 0.009 (0.037) & 0.016 (0.111) & 0.009 (0.026) & 0.009 (0.021) & 0.008* (0.030) & 0.010 (0.004)\\
& $\lambda_2$ & 0.041 (0.107) & 0.047 (0.118) & 0.109 (0.191) & 0.080 (0.237) & 0.025 (0.070) & 0.023* (0.055) & 0.044 (0.119) & 0.029 (0.049)\\
& $\lambda_3$ & 0.276 (0.195) & 0.264* (0.275) & 0.348 (0.315) & 0.468 (0.631) & 0.365 (0.100) & 0.400 (0.042) & 0.301 (0.208) & 0.414 (0.021)\\
& $\lambda_4$ & 0.013 (0.069) & 0.012 (0.057) & 0.042 (0.108) & 0.012 (0.082) & 0.005 (0.030) & 0.004* (0.017) & 0.011 (0.065) & 0.007 (0.025)\\
\midrule
1000 & $\lambda_1$ & 0.006 (0.015) & 0.006 (0.032) & 0.005* (0.026) & 0.013 (0.101) & 0.006 (0.034) & 0.006 (0.031) & 0.006 (0.021) & 0.008 (0.006)\\
& $\lambda_2$ & 0.025 (0.078) & 0.029 (0.092) & 0.058 (0.132) & 0.061 (0.203) & 0.016 (0.067) & 0.015* (0.058) & 0.025 (0.080) & 0.019 (0.022)\\
& $\lambda_3$ & 0.243 (0.143) & 0.205* (0.229) & 0.238 (0.230) & 0.398 (0.576) & 0.221 (0.174) & 0.246 (0.140) & 0.220 (0.142) & 0.376 (0.088)\\
& $\lambda_4$ & 0.008 (0.054) & 0.007 (0.045) & 0.025 (0.081) & 0.009 (0.071) & 0.003 (0.026) & 0.002* (0.018) & 0.006 (0.044) & 0.004 (0.013)\\ \bottomrule \multicolumn{2}{l}{\textsuperscript{*}{Lowest MSE}}
\end{tabular}
\end{adjustbox} \end{table} \end{landscape}
\begin{landscape} \begin{table}[h]
\centering
\caption{This table shows a comparison of the mean standard error and the absolute bias (in brackets) of the GLD parameters represented by $\lambda_1$,$\lambda_2$,$\lambda_3$ and $\lambda_4$ between several fitting methods. The considered methods are Method of Probability Density Quantiles(pdQ), Method of TL-moments (TL), Percentile Matching Method (PM,) Numerical Maximum Likelihood (ML), Maximum Product of Spacings (MPS), Titterington's Method (TM), Starship Method (SM), and Method of Distributional Least Absolutes (DLA). Theses values are calculated for 500 fitting results for actual parameters $\lambda_1=0$, $\lambda_2=1$, $\lambda_3=0.5$ and $\lambda_4=0.6$ } \label{tab4} \hspace*{-0.5cm} \begin{adjustbox}{max width=1.3\textwidth} \begin{tabular}{ccccccccccc} \toprule $n$ & Est & pdQ & TL & PM & ML & MPS & TM & SM & DLA\\ \midrule 100 & $\lambda_1$ & 0.027 (0.011) & 0.020 (0.004) & 0.044 (0.005) & 0.022 (0.016) & 0.018* (0.012) & 0.018* (0.011) & 0.020 (0.006) & 0.033 (0.016)\\
& $\lambda_2$ & 0.052 (0.007) & 0.048 (0.043) & 0.213 (0.052) & 0.049 (0.130) & 0.038 (0.002) & 0.037* (0.013) & 0.053 (0.061) & 0.071 (0.041)\\
& $\lambda_3$ & 0.038 (0.064) & 0.029 (0.009) & 0.270 (0.122) & 0.049 (0.142) & 0.024* (0.011) & 0.026 (0.037) & 0.030 (0.026) & 0.134 (0.109)\\
& $\lambda_4$ & 0.086 (0.103) & 0.044* (0.005) & 0.323 (0.137) & 0.066 (0.172) & 0.053 (0.035) & 0.048 (0.058) & 0.045 (0.016) & 0.238 (0.158)\\
\midrule 250 & $\lambda_1$ & 0.010 (0.005) & 0.008 (0.002) & 0.018 (0.009) & 0.007* (0.002) & 0.007* (0.000) & 0.007* (0.001) & 0.008 (0.001) & 0.011 (0.002)\\
& $\lambda_2$ & 0.015 (0.018) & 0.017 (0.028) & 0.092 (0.006) & 0.012 (0.056) & 0.010 (0.001) & 0.009* (0.010) & 0.016 (0.030) & 0.029 (0.021)\\
& $\lambda_3$ & 0.010 (0.039) & 0.010 (0.016) & 0.142 (0.088) & 0.009 (0.053) & 0.005* (0.001) & 0.006 (0.016) & 0.010 (0.019) & 0.053 (0.049)\\
& $\lambda_4$ & 0.012 (0.048) & 0.011 (0.018) & 0.177 (0.111) & 0.012 (0.061) & 0.007* (0.003) & 0.007* (0.018) & 0.010 (0.018) & 0.065 (0.059)\\
\midrule 500 & $\lambda_1$ & 0.004 (0.003) & 0.003* (0.001) & 0.007 (0.005) & 0.003* (0.003) & 0.003* (0.002) & 0.003* (0.001) & 0.003* (0.000) & 0.004 (0.001)\\
& $\lambda_2$ & 0.008 (0.016) & 0.008 (0.025) & 0.043 (0.011) & 0.005 (0.026) & 0.004* (0.007) & 0.004* (0.001) & 0.007 (0.022) & 0.010 (0.005)\\
& $\lambda_3$ & 0.005 (0.027) & 0.005 (0.017) & 0.057 (0.049) & 0.003 (0.024) & 0.002* (0.007) & 0.002* (0.005) & 0.004 (0.016) & 0.009 (0.014)\\
& $\lambda_4$ & 0.007 (0.035) & 0.005 (0.019) & 0.080 (0.063) & 0.003 (0.028) & 0.002* (0.003) & 0.002* (0.007) & 0.004 (0.015) & 0.012 (0.018)\\
\midrule
1000 & $\lambda_1$ & 0.002 (0.005) & 0.002 (0.003) & 0.004 (0.006) & 0.001* (0.004) & 0.001* (0.004) & 0.001* (0.003) & 0.002 (0.004) & 0.002 (0.005)\\
& $\lambda_2$ & 0.004 (0.006) & 0.004 (0.019) & 0.022 (0.010) & 0.002* (0.016) & 0.002* (0.005) & 0.002* (0.003) & 0.003 (0.013) & 0.005 (0.003)\\
& $\lambda_3$ & 0.002 (0.012) & 0.002 (0.017) & 0.024 (0.003) & 0.001* (0.013) & 0.001* (0.006) & 0.001* (0.003) & 0.002 (0.012) & 0.004 (0.005)\\
& $\lambda_4$ & 0.003 (0.019) & 0.003 (0.015) & 0.030 (0.014) & 0.001* (0.017) & 0.001* (0.002) & 0.001* (0.006) & 0.002 (0.008) & 0.005 (0.011)\\
\bottomrule \multicolumn{2}{l}{\textsuperscript{*}{Lowest MSE}} \end{tabular} \end{adjustbox}
\end{table} \end{landscape}
As can be seen in Table \ref{tab1}, the pdQ method shows comparatively low mean squared error and absolute bias for the $\lambda_1 = 0,\lambda_2 =1,\lambda_3 =1.5,\lambda_4 =1.5$ setting for all the sample sizes. The MPS and TM methods also perform well, being second best or third best in most instances. Similar results can be found in Table \ref{tab2} for $\lambda_1 =0,\lambda_2 =1,\lambda_3 =2.5,\lambda_4 =1.5$ as well, where the pdQ method has the minimum MSE and bias values in almost all cases. The DLA and MPS methods also perform well for this setting. Although the pdQ is not the best method always for the last two GLD settings, $\lambda_1 =0,\lambda_2 =1,\lambda_3 =2,\lambda_4 =0.5$ and $\lambda_1 =0,\lambda_2 =1,\lambda_3 =0.5,\lambda_4 =0.6$, it produces competitive results compared to the other methods. The MPS and TM methods again provide very good results for these two settings, displaying low MSE and absolute bias values (Table \ref{tab3} and \ref{tab4}). However, their good performance comes with much greater computational cost which we will consider below.
In Figure \ref{fig2}, we depict the computational time for each method as the sample size increases. We consider 100 replications of data from GLD(0,1,0.5,0.6) for each sample size and calculate the average computational time for each estimation method. The run times were measured on a computer with an Intel(R) Core(TM) i7-6700 processor running at 3.40GHz using 32 GB of RAM, running Windows version 10. The graph shows that the PM and pdQ methods have much lower computational times compared to all other methods. The PM takes just a few milliseconds to estimate the parameters from a sample even as large as 100,000 observations. However, the PM method was typically not a good estimator when compared to the pdQ, MPS and TM methods. On the other hand, the pdQ method requires about a second to compute even for 100,000 observations. As expected, SM and ML are the slowest among all the methods, spending almost 4 minutes each to calculate the above results. The TM method has a computation time of around 2.5 minutes for this setting. Such large computation times make bootstrapping onerous.
\begin{table}[h]
\centering
\caption{Parameters chosen for the Generalized Lambda Distribution using the pdQ method and quartile matching.} \label{tab5} \hspace*{-0.5cm} \begin{tabular}{lcccc} \toprule Distribution & $\lambda_1$ & $\lambda_2$ & $\lambda_3$ & $\lambda_4$ \\ \midrule
Normal (0,1) & 0 & 1.4420 & 0.1469 & 0.1469 \\ Lognormal (0,1) & 0.8038 & 1.8141 & 0.7589 & -0.7082 \\
$\chi^2_5$ & 4.0559 & 0.4977 & 0.5167 & -0.1470 \\
Beta (2,3) & 0.3770 & 5.3836 & 0.4958 & 0.2637 \\
\bottomrule \end{tabular}
\end{table}
We now consider the performance of bootstrap intervals for some common distributions. The closest GLD parameters for these considered distributions are adopted from \cite{doi:10.1080/25742558.2019.1602929} and are presented in Table 5. Here one sample is generated from the above closest GLD parameters and bootstrapped from those observations. \begin{table} \footnotesize
\centering \caption{This table shows the coverage probability (cp) and mean width ($\Bar{\omega}$) of bootstrap confidence intervals for the location ($\lambda_1$) of the closest GLD distribution at nominal level 95\%. These values are obtained using 500 bootstrap re samples from and 500 iterations} \label{tab6} \begin{tabular}{ccccc@{\hskip 0.3in}cc@{\hskip 0.3in}cc} \toprule \textit{n} & \textit{F} && \multicolumn{2}{c}{pdQ} & \multicolumn{2}{c}{TM} & \multicolumn{2}{c}{PM}\\
& & & perc & bca & perc & bca & perc & bca\\ \midrule 100 & Normal & cp & 0.966 & 0.966 & 0.964 & 0.960 & 0.966 & 0.942\\ & &$\Bar{\omega}$ & 0.560 & 0.561 & 0.489 & 0.490 & 0.698 & 0.701\\
\cmidrule{2-9}
&$\chi^2_5$ & cp & 0.974 & 0.968 & 0.946 & 0.956 & 0.978 & 0.918\\ & &$\Bar{\omega}$ & 1.735 & 1.734 & 1.500 & 1.453 & 2.072 & 2.035\\
\cmidrule{2-9}
& Lognormal & cp & 0.988 & 0.960 & 0.950 & 0.950 & 0.968 & 0.882\\ & & $\Bar{\omega}$& 0.578 & 0.572 & 0.493 & 0.471 & 0.682 & 0.660\\
\cmidrule{2-9}
& Beta & cp & 0.968 & 0.964 & 0.948 & 0.960 & 0.956 & 0.940\\ & &$\Bar{\omega}$ & 0.142 & 0.142 & 0.123 & 0.121 & 0.174 & 0.173\\
\midrule
250 & Normal & cp & 0.948 & 0.948 & 0.960 & 0.966 & 0.942 & 0.948\\ & &$\Bar{\omega}$ & 0.341 & 0.342 & 0.290 & 0.291 & 0.394 & 0.397\\
\cmidrule{2-9}
& $\chi^2_5$ & cp & 0.974 & 0.966 & 0.978 & 0.974 & 0.960 & 0.944\\
& & $\Bar{\omega}$ & 1.010 & 1.011 & 0.822 & 0.824 & 1.362 & 1.350\\
\cmidrule{2-9}
& Lognormal & cp & 0.972 & 0.964 & 0.956 & 0.950 & 0.962 & 0.928\\ & & $\Bar{\omega}$ & 0.347 & 0.363 & 0.282 & 0.270 & 0.498 & 0.484\\
\cmidrule{2-9}
& Beta & cp & 0.950 & 0.934 & 0.934 & 0.940 & 0.944 & 0.942\\ & & $\Bar{\omega}$ & 0.083 & 0.083 & 0.067 & 0.067 & 0.107 & 0.107\\ \bottomrule \end{tabular} \end{table}
\footnotesize
\centering \caption{This table shows the coverage probability (cp), mean width ($\Bar{\omega}$) and median width (m) of bootstrap confidence intervals for the l3-l4 of the closest GLD distribution. These values are obtained using 500 bootstrap re samples and 500 iterations} \begin{tabular}{ccccccc@{\hskip 0.35in}cccc@{\hskip 0.35in}cccc@{\hskip 0.35in}cccc} \toprule \textit{n} & \textit{F} && \multicolumn{4}{c}{Cauchy-QOR} & \multicolumn{4}{c}{Lognormal-QOR} & \multicolumn{4}{c}{TM} & \multicolumn{4}{c}{PM}\\
& & &norm & basic & perc & bca & norm & basic & perc & bca & norm & basic & perc & bca & norm & basic & perc & bca\\
\midrule 100 & Normal & cp & 1 & 1 & 1 & 0.994 & 1 & 1 & 1 & 0.992 & 0.982 & 0.978 & 0.954 & 0.97 & 0.986 & 0.988 & 0.99 & 0.966\\ & &$\Bar{\omega}$ & 106.3 & 168.4 & 168.4 & 132.1 & 106.4 & 168.4 & 168.4 & 135.3 & 0.495 & 0.487 & 0.487 & 0.484 & 1.551 & 1.656 & 1.656 & 1.683\\ & & m & 105.8 & 223.4 & 223.4 & 112.6 & 105.8 & 223.3 & 223.3 & 112.6 & 0.466 & 0.470 & 0.470 & 0.471 & 1.472 & 1.606 & 1.606 & 1.675\\ \cmidrule{2-19}
&$\chi^2_5$ & cp & 1 & 1 & 0.998 & 0.996 & 1 & 1 & 1 & 0.998 & 0.982 & 0.962 & 0.958 & 0.974 & 0.888 & 0.872 & 0.994 & 0.916\\ & &$\Bar{\omega}$ & 108.1 & 166.9 & 166.9 & 126.1 & 108.6 & 167.5 & 167.5 & 135.6 & 1.021 & 0.857 & 0.857 & 0.707 & 1.925 & 1.903 & 1.903 & 1.837\\ & & m & 106.2 & 166.0 & 166.0 & 113.8 & 107.1 & 166.1 & 166.1 & 113.9 & 0.798 & 0.638 & 0.638 & 0.600 & 1.987 & 1.967 & 1.967 & 1.884\\ \cmidrule{2-19} & Lognormal & cp & 1 & 1 & 0.996 & 0.98 & 1 & 0.998 & 0.994 & 0.984 & 0.980 & 0.952 & 0.950 & 0.948 & 0.758 & 0.678 & 0.992 & 0.810\\ & &$\Bar{\omega}$ & 109.3 & 166.9 & 166.9 & 123.4 & 110.2 & 168.1 & 168.1 & 127.2 & 2.378 & 2.088 & 2.088 & 1.542 & 2.745 & 2.551 & 2.551 & 2.351\\ & & m & 107.9 & 171.5 & 171.5 & 115.6 & 108.7 & 183.3 & 183.3 & 115.7 & 2.190 & 1.664 & 1.664 & 1.040 & 2.792 & 2.565 & 2.565 & 2.422\\ \cmidrule{2-19} & Beta & cp & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 0.992 & 0.986 & 0.96 & 0.98 & 0.962 & 0.952 & 0.986 & 0.928\\ & &$\Bar{\omega}$ & 106.2 & 163.9 & 163.9 & 130.2 & 106.7 & 165.3 & 165.3 & 133.6 & 0.902 & 0.731 & 0.731 & 0.645 & 1.829 & 1.919 & 1.919 & 1.909\\ & & m & 105.5 & 164.4 & 164.4 & 113.2 & 105.6 & 164.6 & 164.6 & 112.9 & 0.671 & 0.577 & 0.577 & 0.562 & 1.797 & 1.929 & 1.929 & 1.909\\ \midrule 250 & Normal & cp & 1 & 0.988 & 0.986 & 0.984 & 1 & 0.814 & 0.832 & 0.858 & 0.946 & 0.948 & 0.95 & 0.946 & 0.972 & 0.98 & 0.966 & 0.944\\ & &$\Bar{\omega}$ & 82.53 & 0.447 & 0.447 & 11.44 & 82.34 & 0.429 & 0.429& 9.209 & 0.252 & 0.256 & 0.256 & 0.256 & 0.748 & 0.766 & 0.766 & 0.796\\ & & m & 85.39 & 0.400 & 0.400 & 0.405 & 79.35 & 0.377 & 0.377 & 0.383 & 0.249 & 0.252 & 0.252 & 0.252 & 0.693 & 0.694 & 0.694 & 0.712\\ \cmidrule{2-19}
&$\chi^2_5$ & cp & 1 & 0.978 & 0.976 & 0.988 & 1 & 0.972 & 0.984 & 0.984 & 0.962 & 0.954 & 0.958 & 0.956 & 0.964 & 0.938 & 0.984 & 0.948\\ & &$\Bar{\omega}$ & 83.03 & 3.011 & 3.011 & 56.78 & 82.64 & 2.386 & 2.386 & 19.313 & 0.301 & 0.304 & 0.304 & 0.302 & 1.195 & 1.218 & 1.218 & 1.170\\ & & m & 86.73 & 0.515 & 0.515 & 0.529 & 86.72 & 0.462 & 0.462 & 0.456 & 0.296 & 0.302 & 0.302 & 0.297 & 1.166 & 1.276 & 1.276 & 1.169\\ \cmidrule{2-19} & Lognormal & cp & 1 & 0.992 & 0.892 & 0.966 & 1 & 0.986 & 0.908 & 0.964 & 0.982 & 0.972 & 0.952 & 0.964 & 0.862 & 0.794 & 0.978 & 0.872\\ & &$\Bar{\omega}$ & 89.35 & 11.38 & 11.38 & 127.2 & 87.94 & 9.312 & 9.312 & 68.69 & 1.032 & 0.774 & 0.774 & 0.588 & 2.173 & 2.000 & 2.000 & 1.8874\\ & & m & 86.84 & 1.111 & 1.111 & 204.1 & 86.47 & 0.891 & 0.891 & 0.965 & 0.563 & 0.502 & 0.502 & 0.484 & 2.277 & 2.107 & 2.107 & 2.034\\ \cmidrule{2-19} & Beta & cp & 1 & 0.992 & 0.982 & 0.994 & 1 & 0.986 & 0.994 & 0.994 & 0.978 & 0.976 & 0.968 & 0.968 & 0.978 & 0.98 & 0.958 & 0.942\\ & &$\Bar{\omega}$ & 82.17 & 2.415 & 2.415 & 31.45 & 82.13 & 2.137 & 2.137 & 10.26 & 0.265 & 0.266 & 0.266 & 0.264 & 1.045 & 1.084 & 1.084 & 1.086\\ & & m & 78.58 & 0.505 & 0.505 & 0.525 & 78.50 & 0.457 & 0.457 & 0.459 & 0.257 & 0.260 & 0.260 & 0.259 & 0.999 & 1.073 & 1.073 & 1.063\\
\bottomrule \end{tabular} \end{table}
\begin{table} \footnotesize
\centering \caption{This table shows the coverage probability (cp) and mean width ($\Bar{\omega}$) of bootstrap confidence intervals for the $\lambda_3-\lambda_4$ of the closest GLD distribution at nominal level 95\%. These values are obtained using 1000 and 2000 bootstrap re samples subsequently for n=500 and n=1000 and 500 iterations} \label{tab7} \begin{tabular}{ccccc@{\hskip 0.35in}cc} \toprule \textit{n} & \textit{F} && \multicolumn{2}{c}{pdQ} & \multicolumn{2}{c}{PM}\\
& & & perc & bca & perc & bca \\
\midrule 500 & Normal & cp & 0.972 & 0.964
& 0.952 & 0.918\\ & &$\Bar{\omega}$ & 0.251 & 0.253 & 0.457 & 0.465\\
\cmidrule{2-7}
&$\chi^2_5$ & cp & 0.950 & 0.962 & 0.956 & 0.934\\ & &$\Bar{\omega}$ & 0.311 & 0.297 & 0.743 & 0.747\\
\cmidrule{2-7}
& Lognormal & cp & 0.876 & 0.942 & 0.978 & 0.918\\ & &$\Bar{\omega}$ & 0.560 & 0.477 & 1.541 & 1.450\\
\cmidrule{2-7}
& Beta & cp & 0.978 & 0.972 &0.972 & 0.952\\ & &$\Bar{\omega}$ & 0.295 & 0.300 & 0.646 & 0.650\\
\midrule
1000 & Normal & cp & 0.940 & 0.942 & 0.958 & 0.934\\ & &$\Bar{\omega}$ & 0.167 & 0.167 & 0.305 & 0.304\\
\cmidrule{2-7}
&$\chi^2_5$ & cp & 0.944 & 0.956 & 0.964 & 0.940\\ & &$\Bar{\omega}$ & 0.194 & 0.193 & 0.415 & 0.415\\
\cmidrule{2-7}
& Lognormal & cp & 0.894 & 0.934 & 0.956 & 0.932\\ & &$\Bar{\omega}$ & 0.304 & 0.293 & 1.012 & 0.996\\
\cmidrule{2-7}
& Beta & cp & 0.976 & 0.968 & 0.948 & 0.940\\ & &$\Bar{\omega}$ & 0.172 & 0.172 & 0.383 & 0.383\\
\bottomrule \end{tabular} \end{table}
In Table \ref{tab6} and Table \ref{tab7} we provide the coverage probability results for bootstrap confidence intervals for both location ($\lambda_1$) and the difference of shape parameters ($\lambda_3-\lambda_4$), where $\lambda_3-\lambda_4$ is an indication of the skewness of the GLD distribution. We only selected the TM, PM methods together with PdQ methods for the comparison as they are the close competitors in performance wise and computational time wise.
From Table \ref{tab6}, all the methods display coverage probabilities close to the nominal value for the location parameter. The TM method has the narrower width, with the pdQ method the next narrowest. We found that larger sample sizes are required to obtain reliable intervals for the difference in shape parameters. The TM method is not considered here as BCa bootstrap confidence intervals need a higher number of re-samples and when the sample size increases to 1000, and it takes roughly 33.98 minutes to obtain a single confidence interval for TM. The pdQ and PM methods, on the other hand, take only 47.65 seconds and 32.02 seconds subsequently to provide a single confidence interval for $n=1000$. Compared to the PM method, the pdQ method shows better coverage in the vicinity of the nominal level for skewness with a narrower width. So it can be concluded that the pdQ is favourable compared to these other two methods.
\begin{figure}
\caption{This figure shows the variations of the average running times for each method as the sample size increases. The values are calculated using 100 iterations}
\label{fig2}
\end{figure}
\section{Applications} \subsection{Application 1: Total income data of private households of Spain in 1980 }
In this example, we consider total income data of Spanish households (Figure \ref{fig3}), which is from the 1980 Spanish Family Expenditure Survey (FES) described in \cite{alonso1994encuesta}. This data set consists of 23,972 observations, and total income is recorded with household characteristics and expenditure on several categories. This data set is readily available in the \textit{Ecdat} package \citep{croissant2016Ecdat} under the name `BudgetFood'.
\begin{figure}
\caption{Total income of Spanish households}
\label{fig3}
\end{figure}
\begin{table}[h!t]
\centering \caption{The KS test statistic for Spanish household data} \label{tab8} \begin{tabular}{ccccccccc} \toprule pdQ & TL & PM & ML & MPS & TM & SM & DLA\\ \midrule 0.0069 & 0.0043 & 0.0091 & 0.0326 & 0.0326 & 0.0326 & 0.0183 & 0.0155\\ \bottomrule \end{tabular} \end{table}
In Table \ref{tab8}, we assess the goodness of fit of each method to this data by using the Kolmogorov Smirnov test statistic. As can be seen, both TL-moments and pdQ method provide the best fit to the data. Further, this is elaborated in Figure \ref{fig4} quantile plots where, unlike the other methods, estimated quantiles from the pdQ method and TL-moments method are clearly aligned with the sample quantiles.
\begin{figure}
\caption{Quantile plots for Spanish households data }
\label{fig4}
\end{figure}
\begin{table}
\centering \caption{Running times of each method for spanish household data} \label{tab9} \begin{tabular}{cccccc} \toprule Method & Elapsed time (seconds) & Relative to pdQ \\ \midrule PM & 0.02 & 0.05\\ pdQ & 0.41 & 1\\ TL & 2.13 & 5.20\\ ML & 11.62 & 28.34\\ TM & 17.19 & 41.93\\ MPS & 17.22 & 42\\ DLA & 21.03 & 51.29\\ SM & 37.39 & 91.20\\ \bottomrule \end{tabular} \end{table}
In Table \ref{tab9}, we provide the running times for parameter estimation by each method. The PM and pdQ methods are clearly the fastest. The pdQ is about five times faster than the TL-moments method and 91 times faster than SM for this particular data set. This further suggests the suitability of the pdQ method with large sample sizes.
\subsection{Application 2: Twin Study data }
We now consider a much smaller data set known as the Indiana Twin Study data, which consists of the birth weights of a set of 123 twins. The data originally comes from the PhD thesis of Dr Cynthia Moore, Department of Medical and Molecular Genetics, Indiana University School of Medicine and also has been used in \cite{karian2003comparison} and \cite{karian2000fitting}, for GLD parameter estimation.
Table \ref{tab10} presents the Kolmogorov Smirnov test statistic for the GLD fit to the data by each method and the corresponding p-value for the test. According to the p-values, all the methods suggest a better fit to the data, but in particular, the SM method, TL-moments method and pdQ method are the best fits, as they display smaller KS statistics compared to the other methods.
\begin{table}
\centering \caption{The KS test statistic and p-value for twin study data} \label{tab10} \begin{tabular}{lrrrrrrrr} \toprule
& pdQ & TL & PM & ML & MPS & TM & SM & DLA\\ \midrule D & 0.0465 & 0.0447 & 0.0583 & 0.0519 & 0.0487 & 0.0487 & 0.0417 & 0.0494\\ p-value & 0.9501 & 0.9657 & 0.7973 & 0.8993 & 0.9344 & 0.9284 & 0.9791 & 0.9236\\ \bottomrule \end{tabular} \end{table}
\subsection{Application 3: Earnings data }
We now look at obtaining 95\% bootstrap confidence intervals for the difference of the location parameter for two groups and also confidence intervals for the skewness. The data set we consider here is also available in the \textit{Ecdat} package \citep{croissant2016Ecdat} and is named `CPSch3'. The data set includes the hourly earnings of males ($n=5956$) and females ($n=5174$) in the US from 1992 to 1998.
\begin{table}
\centering \caption{Point and interval estimates for the difference of location for earnings of males and females from 1992 to 1998 in US} \label{tab11} \begin{tabular}{ccccc} \toprule & & pdQ & TM & PM \\ \midrule Est.& & 2.276 & 2.282 & 2.288 \\ \midrule Percentile & CI & (2.026, 2.674) & (2.011, 2.544) & (1.989, 2.736)\\ & Time(mins) & 0.945 & 213 & 0.405\\ \midrule BCa & CI & ( 1.968, 2.604 ) & (2.021, 2.545) & (1.929, 2.631)\\ & Time(mins) & 59.17 & 6451.2 & 45.03\\ \bottomrule \end{tabular} \end{table} \begin{table}
\centering \caption{Point and interval estimates for the difference of l3 and l4 for earnings of males from 1992 to 1998 in US} \label{tab12} \begin{tabular}{cccc} \toprule & pdQ & TM & PM \\ \midrule Est.& 0.383 & 0.351 & 0.417\\ CI-Perc &(0.365, 0.398) & (0.328, 0.370) & (0.385, 0.459)\\ CI-BCa &(0.345, 0.418) & (0.329, 0.373) & (0.353, 0.496)\\ \bottomrule \end{tabular} \end{table} \begin{table}
\centering \caption{Point and interval estimates for the difference of l3 and l4 for earnings of females from 1992 to 1998 in US} \label{tab13} \begin{tabular}{cccc} \toprule & pdQ & TM & PM \\ \midrule Est.& 0.486 & 0.410 & 0.524\\ CI-Perc &(0.452, 0.529) & (0.385, 0.436) & (0.455, 0.618)\\ CI-BCa &(0.447, 0.524) & (0.385, 0.437) & (0.440, 0.602)\\ \bottomrule \end{tabular} \end{table}
In Tables \ref{tab11},\ref{tab12} and \ref{tab13} we display the bootstrap confidence intervals calculated using three methods and the time efficiency of each method. For percentile bootstrap intervals, we used 500 bootstrap samples, but for BCa confidence intervals, the number of re-samples has to be much higher than the sample size. Therefore, we used 15,000 bootstrap re-samples to obtain the BCa confidence intervals as the sample sizes here are large.
All the methods provide similar results for the confidence intervals with narrower width while declaring there is a difference in the location for both males and females in earnings. Additionally, there is some skewness present in both the male and female earnings distributions. When the width of the confidence interval is considered, pdQ has a narrower width after the TM method and outperforms the PM method. When the time efficiency is considered the pdQ and PM take only 0.945 minutes and 0.405 minutes, respectively, to obtain the percentile confidence interval whereas the TM method spends around 3.5 hours to obtain the same results. For the BCa confidence intervals, TM method is much more inefficient in this case as it takes roughly 4.5 days to obtain the BCa confidence interval for this data whereas the pdQ method and PM method only takes 59.17 minutes and 45.03 minutes respectively.
\section{Discussion}
In this paper we have introduced a simple two-step estimation method for the GLD parameters. The first step uses the empirical probability density quantile function to find estimates of the shape parameters. The second step can then simply obtain estimates for the location and inverse scale. How simulations show that the method performs very well, often beating existing methods. An additional advantage of the approach is the very small computation time compared to the best of the methods. This small computation time makes bootstrapping possible which is important given the lack of interval estimators available for the GLD parameters. For the more time consuming estimation methods, bootstrap intervals can take hours, even days to compute, as our examples show.
While we focused on the GLD distribution in this paper, the method can be used for other distributions too. An obvious extension would be to consider other generalized distributions, like the generalized Beta distribution, since estimation for such distributions is not straightforward.
\end{document} |
\begin{document}
\title{EPR-B correlations: \\ a physically tenable local-real model}\author{\and A. F. Kracklauer\\ Bauhaus University, Weimar, Germany}\maketitle
\begin{abstract}
We propose a classical, i.e., local-real physical model of processes
underlying EPR experiments. \ The model leads to the prediction, that the
visibility of the output signal will exhibit increasing variation as the
coincidence window is increased, thus providing a testable criteria for its
validity. \ If it can be sustained, this model undermines the claim that
Nature has a fundamentally nonlocal feature or that irreal entities are
required by quantum theory. \end{abstract}
\section{State of the argument.}
Historically, the struggle to to find an interpretation for the wave function in quantum theory led, {\tmem{inter alia }}by way of arguments made by Einstein, Podolski and Rosen (EPR), to examination of correlations of measurements on systems comprising two entities. As is well known and commonly accepted nowadays, analysis of such correlations seems to support the conclusion that at a fundamental level, Nature admits either ``nonlocal,'' or ``irreal'' aspects \ The first of these alternatives constitutes a deep and serious rift between the two main theories of Physics, namely Quantum Mechanics and Relativity, because the latter demands ``locality,'' namely, that interaction must transpire between all entities at or below the speed of light. \ Accepting the alternative, irreality, is, if anything, an even deeper break with occidental science, as it injects a role for human perception into the evolution of the universe (via observer induced collapse of wave packets), oblivious to the eons before humans appeared.
This situation has led some researches to critically reevaluate current experiments deemed to justify the orthodox interpretations, in particular the so-called EPR experiments and tests of `Bell-inequalities' as carried out by Aspect, Weihs, etc. This writer, for example, has identified several generic arguments supporting the conclusion, that the correlations seen in these experiments do not result from structure unique to quantum mechanics, but appear already in the analysis by Stokes of polarized light at least 50 years before the need for quantum notions was recognized.{\cite{AK07}} \ Moreover, this criticism of current thinking was under-girded with proposed simulations of EPR experiments, intended to demonstrate in detail that non local interaction is not needed to duplicate data commonly believed to require quantum (i.e., nonlocal) interaction for explanation.{\cite{ak04}} \
Such simulations are incisive insofar as they constitute counterexamples to the augment put forward by, for example, Bell, and subsequently dubbed a ``theorem,'' to the effect that EPR correlations absolutely cannot be taken into account without the employ of nonlocality. \ Demonstrating a simulation without nonlocality extinguishes that claim once and for all. \ Nevertheless, these simulations can be, and have been, criticized for making use of features that, although not quantum in their essential nature, are either impossible or implausible as physics. \ This writer's simulation{\cite{ak04}} silently assumes, for example, that photo detectors respond depending on the global character of the input signal (specifically, which of the two component states making up the `singlet state' actually enters the detector).{\cite{ga08}}
\ Less objectionably, the simulation proposed by de Raedt et al. proposes that, upon deflection or transmission at a beam splitter, a ``photon'' either suffers a random delay{\cite{sz08}}, or that beam splitters behave as ``deterministic learning machines,''{\cite{dR08}}. \ While such delays cannot be rejected {\tmem{a priori}}, at least this writer knows of no physical cause for such delays in beam splitters, in particular of the magnitude required by the simulation to duplicate data taken in actual experiments. Alternatively, ``learning machine'' effects, as proposed, require, for example, persistent polarization currents to be setup by one signal which persist to influence subsequent signals passing through the beam splitter, an effect for which explanation involves somewhat implausible, even extravagant hypothetical input.
It is the purpose here to propose a physical model for these experiments that accommodates the facts as observed in experiments. \ The model is simple, fully classical, local and real; it does not presume any quantum structure, not even the existence of ``photons.''
First, factual characteristics of the data from the experiments that are to be modeled must be delineated.
\section{EPR-B data ``as it is.''}
\
The overwhelming impression made by EPR-B raw data, is that it appears to be two (or four) streams of events occurring at essentially random times{\footnote{See: {\cite{dR08}} for a comprehensive description of data from a typical Bell-test experiment.}}. \ The two or four steams represent what are called ``photon detections,'' at the detectors, either one (or two) on both the right and left of an EPR setup. These data streams do not give the impression to the eye of being temporally grouped or correlated. \ Thus, for the purpose of analysis of the EPR experiments, these streams are filtered in terms of a coincidence ``window,'' i.e., a time interval within which one event on the left is paired with one event on the right. Naturally, as this window is made more narrow, the number of resulting coincidences diminishes, but at the same time the closer the statistics of the selected pairs come to those predicted using quantum theory. At the other end, with a very wide window, the coincidence statistics more closely approach those expected from strictly classical analysis. \ The difference is, as is now well known, that the quantum statistics involve more coincidences than are expected from non quantum analysis. \ This is said to lead to ``quantum correlations stronger than admitted by classical statistics.'' \
In addition, there is a second phenomenon revealed in the data. \ It is this: the expected strict rotational invariance with respect to the input signals is imperfect. \ This defect is visible as a variation in the visibility of the output signal which oscillates so as to have minimums at the angles of $\pi / 4 + n \pi / 2$. \ To date, this anomaly remains under reported because it is unexplained.{\cite{ak07s}}
Thus, what a simulation, or model, of these experiments must reflect and explain goes beyond just the subset of the data which exhibit the peculiar quantum aspects; it must also explain the large amount of data filtered out which does not fully fit the quantum structure as revealed in calculations with solutions to the Schr\"odinger equation{\tmname{{\tmsamp{}}}}. In this regard, the two salient characteristics are: \begin{enumeratenumeric}
\item the relative paucity of qualified pairs, and
\item the strict failure of rotational invariance as revealed by oscillating
visibility. \end{enumeratenumeric} The model proposed below covers both of these features.
\section{The model: background motivation}
In a study of EPR and GHZ (after Greenburger, Horn and Zeilinger, who famously considered experiments on higher order systems with three, four, etc. output events) correlations, this writer has shown that the observed coincidences can be accurately calculated using the {\tmem{classical {\tmem{variant of the }}}}coherence function.{\cite{ak02}} For GHZ setups correlation calculations become quite unwieldy, and, therefore, are best carried out with a computer algebra program; here we use MuPAD. \ This tactic can be exploited also to lead directly to a very convenient display of the essential difference between the results of classical and quantum analysis of EPR (or GHZ if desired) experiments (see below).
Consider a prototypical experiment to test Bell-inequalities. \ The signal{\footnote{Herein the term ``signal'' refers to the pair of opposed ``pulses'' gnerated so as to have anti-correlated polarization. Each pulse is taken to have just enough total energy to evoke one electron by the photoelectric effect in a detector. \ As is customary, the ``signlet'' state is considered to be a mysterious quantum entity comprised of the sum of two such `signals' of opposite polarization orientation, which randomly ``collapses'' to one or the other component signal whenever one of the pulses encounters a detector. }} as generated in the source crystal in the quantum mechanical understanding of the setup constitutes a ``singlet state'' sent in opposed directions, \ such that if a vertical signal is detected on the left, a horizontal signal is detected deterministically on the right, or {\tmem{visa verse}}. Such signlet states or signals emitted by the crystal are considered to be essentially ambiguous in that they are the sum of both options in both directions, but that this ambiguity is resolved by whichever detector registers first, thereby causing a collapse of the quantum mechanical $\psi$-function (wave) for the pulses sent in both directions. \ The consequence is, that the mate to the first signal at the companion measuring station is {\tmem{instantly}} granted (by some mysterious nonlocal process not described by the dynamic equations of quantum mechanics) the opposite polarization. \ In non quantum renditions or simulations of these experiments, no such essential ambiguity is allowed; the paired signals are randomly selected to be one or the other anti-correlated possibility and are never the simultaneous sum in the form of the ``singlet state,'' \ a stipulation which constitutes an encoding of ``reality'' in such simulations. A singlet state must be considered ``irreal'' as it is supposedly the sum simultaneously of mutually exclusive alternatives {---} contrary to all logic.
For our model the calculation algorithm the source is encoded in terms of a source signal sent left $\tmop{Sl} (n)$ and one sent right $\tmop{Sr} (n)$, each a function of a random variable, $n$, taking on the values $0, 1$ with equal probability. \ First, some initialization statements:
{\color{red}\ttfamily{{\color{red} $\bullet$ {\color{black} }}}}{\color{blue}
\verb|reset(): Matrix:=Dom::SquareMatrix(2): vector:=Dom::Matrix():|}
Now, the source signals are encoded as functions of $n$, such that for each of its designated values, $0, 1$, it produces one or the other of the component states constituting the singlet state :
{\color{red}\ttfamily{{\color{red} $\bullet$ {\color{black} }}}}{\color{blue}
\verb|Sl:=n->vector(2,1,[[n],[1-n]]); Sr:=n->vector(2,1,[[1-n],[n]]);|}
These signals are then each sent, on both sides, through polarizing beam splitters (PBS). The axis of each beam splitter is separately variable, thus the encoding of the effect of such a beam splitter on each beam must be a function of its angular orientation. \ The operator corresponding to a PBS as a function of its angular orientation is the 2-dimensional projection operator:
{\color{red}\ttfamily{{\color{red} $\bullet$ {\color{black} }}}}{\color{blue}
\verb|proj:=z->Matrix([[cos(z),sin(z)],[-sin(z),cos(z)]]);|}
The output signal from a PBS is then obtained by multiplying the source signal by the projection operator to obtain a two component vector, where each component represents a signal from each output port of the PBS.
{\color{red}\ttfamily{{\color{red} $\bullet$ {\color{black} }}}}{\color{blue}
\verb|El:=(zl,n)->proj(zl)*Sl(n); Er:=(zr,n)->proj(zr)*Sr(n);|}
The output signals of the PBS, $(\tmop{El}, \tmop{Er}$), are electric fields and are sent to photo-detectors, where they can be used in the standard law to obtain the probability, $P$, of generating a photo electron, namely \begin{equation}
\text{$P \propto E^2,$} \end{equation} where $P$ can be interpreted as a photo current, for high intensity.
Now, whatever else is true or false, it is a fact, that the classical second order correlation function corresponds to the same correlation as computed with ostensible quantum algorithms. To execute this calculation we need to form the ratio of difference over the sum of two other sums, of the form: \begin{equation}
\frac{\sum_{\tmop{ijkl}} \tmop{El}_i^{\ast} (\theta_l) \tmop{Er}_j^{\ast}
(\theta_r) \tmop{Er}_k^{} (\theta_r) \tmop{El}_l (\theta_r) -
\sum_{\tmop{ijkl}} \tmop{El}_i^{\ast} (\theta_l) \tmop{Er}_j^{\ast}
(\theta_r + \pi / 2) \tmop{Er}_k^{} (\theta_r + \pi / 2) \tmop{El}_l
(\theta_r)}{\sum_{\tmop{ijkl}} \tmop{El}_i^{\ast} (\theta_l)
\tmop{Er}_j^{\ast} (\theta_r) \tmop{Er}_k^{} (\theta_r) \tmop{El}_l
(\theta_r) + \sum_{\tmop{ijkl}} \tmop{El}_i^{\ast} (\theta_l)
\tmop{Er}_j^{\ast} (\theta_r + \pi / 2) \tmop{Er}_k^{} (\theta_r + \pi / 2)
\tmop{El}_l (\theta_r)}, \end{equation} where all the indices take on the values $1$ \ and $2$ as the there are two components representing the two output channels of a PBS.
In this expression one sees the definition of a forth order correlation of electric field magnitudes, which for certain of these products equal second order correlations of the field energy intensities, or, calling on the theory of photo current generation, i.e., on the formula $I \propto E^2$, of the correlation of the number of photo (electron) detections.
The encoded numerator of this expression is given by:
{\color{red}\ttfamily{{\color{red} $\bullet$ {\color{black} }}}}{\color{blue}
\verb|Num:=(n,c,i,j,k,l,zl,zr)->(-1)^c*El(zl,n)[i]*Er(zr+c*PI/2,n)[j]| \\
\phantom{xxxxx}\verb|*Er(zr+c*PI/2,n)[k]*El(zl,n)[l];| }
{\ttfamily{$(n, c, i, j, k, l, \mathrm{\tmop{zl}}, \mathrm{\tmop{zr}}) \mapsto \left( - 1 \right)^c \mathrm{\tmop{El}} \left( \mathrm{\tmop{zl}}, n \right)_i \mathrm{\tmop{Er}} \left( \mathrm{\tmop{zr}} + \frac{\pi c}{2}, n \right)_j \mathrm{\tmop{Er}} \left( \mathrm{\tmop{zr}} + \frac{\pi c}{2}, n \right)_k \mathrm{\tmop{El}} \left( \mathrm{\tmop{zl}}, n \right)_l$}}
{\noindent}where the indicated sums are then executed by:
{\color{red}\ttfamily{{\color{red} $\bullet$ {\color{black} }}}}{\color{blue}
\verb|sum(sum(sum(sum(sum(sum(Num(n,c,i,j,i,j,zl,zr)| \\
\phantom{xxxxx}\verb|,i=1..2),j=1..2),k=1..2),l=1..2),n=0..1),c=0..1);| }
{\ttfamily{$8 \left( \cos \left( \mathrm{\tmop{zl}} \right) \sin \left( \mathrm{\tmop{zr}} \right) - \cos \left( \mathrm{\tmop{zr}} \right) \sin \left( \mathrm{\tmop{zl}} \right) \right)^2 - 8 \left( \cos \left( \frac{\pi}{2} + \mathrm{\tmop{zr}} \right) \sin \left( \mathrm{\tmop{zl}} \right) - \sin \left( \frac{\pi}{2} + \mathrm{\tmop{zr}} \right) \cos \left( \mathrm{\tmop{zl}} \right) \right)^2$}}
{\color{red}\ttfamily{{\color{red} $\bullet$ {\color{black} }}}}{\color{blue}
\verb|simplify(
{\ttfamily{$- 8 \cos \left( 2 \mathrm{\tmop{zl}} - 2 \mathrm{\tmop{zr}} \right)$.}}
The denominator, $\sum_{\tmop{ijkl}} \tmop{El}_i^{\ast} (\theta_l) \tmop{Er}_j^{\ast} (\theta_r) \tmop{Er}_k^{} (\theta_r) \tmop{El}_l (\theta_r) + \sum_{\tmop{ijkl}} \tmop{El}_i^{\ast} (\theta_l) \tmop{Er}_j^{\ast} (\theta_r + \pi / 2) \tmop{Er}_k^{} (\theta_r + \pi / 2) \tmop{El}_l (\theta_r)$, is computed with the same statements with exception of the factor of $(- 1)^c$, which is replaced with $(+ 1)^c$:
{\color{red}\ttfamily{{\color{red} $\bullet$ {\color{black} }}}}{\color{blue}
\verb|Den:=(n,c,i,j,k,l,zl,zr)->(+1)^c*El(zl,n)[i]*Er(zr-c*PI/2,n)[j]| \\
\phantom{xxxxx}\verb|* Er(zr-c*PI/2,n)[k]*El(zl,n)[l]:| }
{\color{red}\ttfamily{{\color{red} $\bullet$ {\color{black} }}}}{\color{blue}
\verb|sum(sum(sum(sum(sum(sum(Den(n,c,i,j,i,j,zl,zr)| \\
\phantom{xxxxx}\verb|,i=1..2),j=1..2),k=1..2),l=1..2),n=0..1),c=0..1): simplify( }
{\ttfamily{$8$.}}
Thus, this calculation delivers the same result as does the quantum algorithm for the correlation. Like many such quantum algorithms, the physical interpretation is less than obvious. \ But, for present purposes, an interesting point of entry is the numerator of the correlation. \ The result obtained above, namely \begin{equation}
\left( \cos \left( \mathrm{\tmop{zl}} \right) \sin \left( \mathrm{\tmop{zr}}
\right) - \cos \left( \mathrm{\tmop{zr}} \right) \sin \left(
\mathrm{\tmop{zl}} \right) \right)^2 - 8 \left( \cos \left( \frac{\pi}{2} +
\mathrm{\tmop{zr}} \right) \sin \left( \mathrm{\tmop{zl}} \right) - \sin
\left( \frac{\pi}{2} + \mathrm{\tmop{zr}} \right) \cos \left(
\mathrm{\tmop{zl}} \right) \right)^2, \end{equation} offers a propitious venue for interpretation of the physics involved by expanding each term, i.e., \begin{equation}
\cos^2 \left( \mathrm{\tmop{zl}} \right) \sin^2 \left( \mathrm{\tmop{zr}}
\right) - 2 \cos^{} \left( \mathrm{\tmop{zl}} \right) \sin^{} \left(
\mathrm{\tmop{zr}} \right) \cos \left( \mathrm{\tmop{zr}} \right) \sin
\left( \mathrm{\tmop{zl}} \right) + \cos^2 \left( \mathrm{\tmop{zr}} \right)
\sin^2 \left( \mathrm{\tmop{zl}} \right) + \Upsilon, \end{equation} where $\Upsilon$ is the expansion of the second term in Eq. (3). The first and third terms of Eq. (4) are the standard expressions for correlations of events on each side from each of the variant signals put out by the source; but, the middle term is not standard. \ It does not fit the law of photo current generation, i.e., $P \propto E^2$; because it is the product from four different fields, giving the amplitudes of electric fields for four different photo electron source events {---} all of which appears to mean that it cannot be associated with the generation of a photo electron current. \
In short, it is this middle term that requires a model or interpretation if the mystical aspects of EPR correlations are to be explained without recourse to preternatural phenomena.
\section{A new model for EPR correlations}
The motivation for the model proposed here is provided by the most conspicuous feature of that data from EPR experiments data, namely, that data exhibiting the so-called quantum correlations is just a subset of the total data steam which has been filtered out in terms of a ``coincidence window.'' The narrower, the window, the closer the coincidence pattern approaches the ideal as calculated using the quantum algorithm.
This fact suggests the following structure, which shall be taken as the hypothetical input for our model. The source crystal under stimulation of the driving input beam at various emission centers in the crystal independently produces two types of anti corrected output signals, one with a vertically polarized pulse to the right and a horizontally polarized pulse to the left, and one in which the signal has switched polarization orientations. \ It is taken that the separate pulses in each pair have an intensity just sufficient (statistically) to elicit one photo electron.{\footnote{The absolute anti-correlation of detections in the output channels of a a beam splitter at the one photo electron level is considered nowadays to reflect quantum structure. However, behaviour at a PBS is irrelevant to the quantum structure of coincidences. \ Moreover, there are also non quantum models for beam splitters, e.g., {\cite{es05}}, that can purge this last vestiges of quantum structure from the analysis of EPR correlations.}} \ \ Nevertheless, it is not taken that the matched pulses, one in each arm, necessarily elicit photo electrons exactly simultaneously; as is well known they can be separated by some random time interval, compatible with the coherence length of the pulses. Now, filtering the data to fit within a ``window,'' selects those coincidences of one detection on each side engendered by these two types of signal pairs that happen to have evoked photo electrons coincidentally within the window. Such coincidences arise from two distinct situations: one, when each of the \ photo electron detections is \ elicited by the same signal, and, two, when an event on each side is elicited by oppositely polarized signals. \ The first sort correspond to the first and third terms in Eq. (4).
But, there are still additional coincidences possible if two separate signal types from the crystal are timed such that a coincidence can arise between single detections evoked one by each signal. \ In terms of classical physics, this assumes that two such signals, one of each variant, arise at separate locations within the crystal. Coincidences of this sort, which can be considered ``illegitimate,'' obviously, can be involved only if these two coincidences fall within a very short interval, shorter than the expected interval between coincidences generated by ``legitimate'' pairs, thereby forming a closer time association than the underlying legitimate events. When the window is narrow these illegitimate coincidences are counted along with a certain number of legitimate ones, whereas, with a broad window, additional coincidences are counted which arise only from legitimate pairs, i.e., from signals that are not coincident with the opposite variant. These extra, legitimate counts dilute the selected data sample by adding relatively more coincidences not corresponding to the cross terms in Eq. (4).
This can be understood in terms of the probabilities of detection. \ For each detection of a photo electron resulting from a pulse (assumed to have energy sufficient for only one photo electron) the probability that this electron is lifted into the conduction band after a time $\tmop{dt}$ equals $\tmop{dt} / l$ where $l$is the pulse length. The probability of a coincidence with the lifting of a photo electron in the companion measuring station is then equal to $(\tmop{dt} / l)^2$, i.e., the probability of the coincidence of the two events is the product of their probabilities. \ Now, for the coincidence of photo electrons engendered by ``illegitimate'' combinations, the probability equals $(\tmop{dt}_1 / l) (\tmop{dt}_2 / l)$ where the subscripts indicate that two distinct signals are under consideration, i.e., an ``illegitimate'' coincidence. Clearly, to conform to the calculation described above, which is fully symmetric in the admitted combinations (as encoded by selection of indices) over which the sum is carried out, these two probabilities must be equal. \ This can arise only when $\tmop{dt}_1 \equiv \tmop{dt}_2$, that is, when the two signals from the crystal are by chance simultaneous because then the two intervals will be equal, or nearly so, only when they have essentially identical initial instants, which applies tautologically for coincidences that are virtually simultaneous i.e., when selected under a very narrow coincidence window. \ However, for illegitimate coincidences, there is likely an offset in the starting instants for the $\tmop{dt}_i$ \ as they pertain to separate, uncoordinated, independent signal pairs. \ The extent to which the data includes coincidences admitted by a wide filter, it the extent to which the quantum pattern is corrupted. \ \ \
A central issue is: can these `illegitimate' coincidences be related to the cross terms in Eq. (4)?
At first glance, this would seem to be impossible because the form of this term does not admit a straightforward interpretation in terms of the law of photo electron generation, which requires electric field amplitudes squared. \ Just here, however, another possibility enters. \ It is this. \ If it is allowed, that coincidences can arise from the occasions on which there is a temporal overlap of elementary signals, one of each variant, then there is a formal equivalence between the number of additional coincidences and the cross terms in Eq. (4). \ The essence of this equivalence consists in the fact, that formally seen, the cross terms in Eq. (4) correspond to physically sensible terms of the form: \begin{equation}
E_i (\tmop{zl})^2 E_j (\tmop{zr})^2, \end{equation} obtained with the substitutions: \begin{equation}
\cos (x + \pi / 2) = - \sin (x) ; \sin (x + \pi / 2) = \cos (x), \end{equation} that convert the four-factor, cross terms in Eq. (3) to the form of Eq. (5), which does admit physical interpretation in terms of photo electron generation. The phase shifts of $\pi / 2$ in the arguments here are related to the fact that the two factors on each side, pertain to opposite variants of the source signals, which must take into account the difference between the polarization orientation of the input pulses entering the beam splitters. \
Physically, this correspondence take coincidences into account which arise from a photo electron generated on the left from one of the variants with a detection on the right from the opposite variant which happens to overlap temporally. \ The number of such coincidences corresponds to the number arising from source signals comprised of correlated (in stead of anti-correlated) pairs. \ This poses a challenge for a physical interpretation of these experiments because the phase matching conditions imposed on the nonlinear generation processes in the crystal produces only anti-correlated output pairs. \ This challenge (for formulating a model) \ is overcome, however, by positing that such correlated pairs are comprised of detections on each side matched with detections on the other side from a distinct signal of the opposite variant.
\ It is to be stressed, that the significance of Eqs. (6) is {\tmem{not {\tmem{}}}}strictly physical. \ It represents mostly a formal correspondence rendering the quantum algorithm in accord with a feasible physical process.
In other words, detections are considerably different if two different but overlapping signals are present. \ Then, it becomes possible, that the two photo electrons closest in time have arisen from different pairs. \ That means, that in the total population of detected pairs, as identified by proximity of detection times, among pairs selected by narrow window criteria, there will be some pairs formed corresponding to the cross terms in Eq. (4). \ But as the window width is increased, so as to capture additional pairs, the number of such illegitimate pairs is not greater than the number found using a narrow window. \ In other words, the total data set grows in size with increasing window width but the number of illegitimate pairs or pairs corresponding to the cross terms remains the same. \ This explains just why experimenters must strive to reduce the window width as much as feasible; and, it explains why only the statistics of a subset of data taken with a narrow coincidence window width conform with those computed using so-called quantum algorithms.
\section{The rotational invariance anomaly}
A crucial feature of EPR data to be explained by any model is the evidence of a breakdown of rotational invariance. \ Previous analysis of this matter by this writer led to the conclusion that this break down should be complete, i.e., that the visibility of the signal should actually approach null for certain angular settings of the polarizers in the detection stations.{\cite{ak07s}} This conclusion, however, resulted from the assumption that individual signals are exclusively of one variant or the other. \ As described above, in the model proposed herein, such pure signals are indeed the majority, except for that small subset of data filtered out when the detection window width approaches null. \ These selected events in this special case comprise events corresponding to the cross terms from the overlay of both variants of source signals, and, as such, under optimum conditions, constitute a rotationally independent sum. \ Each signal variant under rotation compensates the other as they are exactly out of phase. \ This means that the small variations in visibility still seen in the data result from the fact that the window width is still sufficiently wide to admit a less than an ideal set of coincidences, that is, it contains a relative deficit of cross terms so that the statistics of this data set exhibit some portion of rotational variance.
This effect can be quantitatively depicted by redefining Eq. (4) so that the legitimate terms are multiplied by a factor, which when greater than $1$, represents the excess in their number whenever the coincidence window is widened:
{\color{red}\ttfamily{{\color{red} $\bullet$ {\color{black} }}}}{\color{blue}
\verb|cor:=(zl,zr,y)->-cos(zl)^2*cos(zr)^2+cos(zl)^2*sin(zr)^2 | \\
\phantom{xxxxxxxxxx}\verb|- y^-1* 4*cos(zl)*cos(zr)*sin(zl)*sin(zr)| \\
\phantom{xxxxxxxxxxxxxxx}\verb|+cos(zr)^2*sin(zl)^2-sin(zl)^2*sin(zr)^2;| }
{\ttfamily{$( \mathrm{\tmop{zl}}, \mathrm{\tmop{zr}}, y) \mapsto \left( \left( \cos \left( \mathrm{\tmop{zl}} \right)^2 \sin \left( \mathrm{\tmop{zr}} \right)^2 - \cos \left( \mathrm{\tmop{zl}} \right)^2 \cos \left( \mathrm{\tmop{zr}} \right)^2 \right) - \frac{4 \cos \left( \mathrm{\tmop{zl}} \right) \cos \left( \mathrm{\tmop{zr}} \right) \sin \left( \mathrm{\tmop{zl}} \right) \sin \left( \mathrm{\tmop{zr}} \right)}{y} \right) + \cos \left( \mathrm{\tmop{zr}} \right)^2 \sin \left( \mathrm{\tmop{zl}} \right)^2 - \sin \left( \mathrm{\tmop{zl}} \right)^2 \sin \left( \mathrm{\tmop{zr}} \right)^2$}}
{\noindent}This expression is the result of multiplying all terms for legitimate coincidences in both the numerator and denominator and is therefore the absolute correlation function as a function of $y$, the excess factor of legitimate coincidences resulting from non ideal filtering. \ By now substituting this into the expression for CHSH discriminator:
{\color{red}\ttfamily{{\color{red} $\bullet$ {\color{black} }}}}{\color{blue}
\verb|S:=(x,xx,z,zz,y)->cor(x,z,y)-cor(x,zz,y)+cor(xx,z,y)+cor(xx,zz,y):|}
{\noindent}and then defining, for convenience, the function:
{\color{red}\ttfamily{{\color{red} $\bullet$ {\color{black} }}}}{\color{blue}
\verb|SS:=(w,v,y)->S(w,w+2*v,w+v,w+3*v,y);|}
{\noindent}which is defined in terms of the values of the orientations of the polarizers which have been determined to maximise the violation of a Bell-inequality. \ $\tmop{SS} (w, v, y)$ can be plotted directly; see figures.
\begin{figure}
\caption{Ideal data set}
\caption{With excees legitemate coincidences}
\end{figure}
Fig. 1 shows the variation of the function $S$ in terms the variables $v$, the displacement between the angular settings on both sides in the various experiments contributing terms to the CHSH discriminator when $y = 1$, i.e., for an ideal subset of data. If the source signals are rotationally invariant, then the values for $v$ have no absolute meaning, which implies that variation of $w$, the starting point, has no effect on $S$. \ Fig. 2 illustrates just how much rotational invariance is destroyed by a 10\% increase in the number of legitimate coincidences in the total data stream.
The existence of this effect is strong evidence that the source signals are not, as envisioned in orthodox quantum theory, comprised of singlet states; rather, the singlet state structure is mimicked only in the (ideal) subset of data filtered out with a small coincidence window.
\section{Conclusions} \ \
In the introduction characteristics of the data taken in EPR experiments that a physical model of the underlying physical phenomena must explain were identified. \ It was noted that the data taken in these experiments differs considerable from the ideal data predicted by conventional analysis based on current quantum orthodoxy. \ A viable physical model should explain all the data taken, that is, the empirical facts, rather than just a sub portion, however significant it may be considered.
The three salient features of the data are explained by the model as follows:
\begin{enumeratenumeric}
\item {\tmem{The most obvious feature of EPR data streams is that there are
very few obvious, ideal coincidences}}.
The proposed model incorporates or explains this feature as a consequence of
the distribution of emission time of photo electrons. In this case it is
taken that the emission time of the photo electron in a detector is
displaced naturally and randomly within a pulse length or coherence length
of the signal impinging on the detector. \ It is a well know fact that
electromagnetic pulses, while they can stimulate virtually instantaneous
emission of photo electrons, in fact for an ensemble of such pulses, the
actual exact emission times are distributed over the pulse length. Thus, the
emission times on opposite sides of an EPR setup usually do not exactly
coincide, even when generated by the same signal or pulse pair.
\item {\tmem{The population of coincidences that exhibit the pattern as
calculated using algorithms from quantum mechanics must be filtered from the
total data set by selection using ``coincidence circuitry'' with as narrow a
time interval window as possible}}.
Data subsets selected with a wider than optimum window, while they may
exceed limits considered to obtain from non quantum regimes, nevertheless
deviate from the ideal quantum pattern and approach that for non quantum
systems. \ Specifically, the curve of the CHSH discriminator function
exceeds $2$ but is significantly less that $2 \sqrt{2}$.
In terms of the proposed model, these features are explained as follows: \
It is taken that occasionally two pairs of signals of opposite polarization
character overlap temporally. \ For theses cases, it might happen that the
closest coincidences involve detections from each signal because the actual
displacement of the truly paired events (i.e., from the same signal) are
displaced at larger intervals by cause of the effect considered in point 1.
above than these ``illegitimate'' coincidences. These extra, illegitimate,
coincidences are the excess that correspond to the cross terms in Eq. (3),
so that the manipulations in the quantum algorithm yield the observed
patterns coincidentally, even while these algebraic expressions as such do
not correspond to direct application of known physics principles, here the
photoelectric effect.
The corruption of the data set as the coincidence window is broadened is
then a consequence of the fact, that the ``illegitimate'' coincidences
represent relatively rare events that cannot occur for well separated signal
variants. They occur only for nearly simultaneous sums of both variants.
Therefore as the window is increased, the set of events considered for
analysis includes ever increasing numbers of coincidences formed from
legitimate pairs, and the statistics approach those for classical
particulate systems.
\item {\tmem{The filtered data sets for even narrow windows exhibits a
variation in visibility that should not occur for the data set envisioned in
terms of quantum analysis (i.e., for the singlet state, which is perfectly
rotationally invariant).}}
This phenomenon in terms of the model proposed herein is a result of the
fact, that no matter how narrow the coincidence window is taken, the
statistics can only approach the ideal which is mathematically codified with
the singlet state. \ If this model is faithful to the actual physical facts
of the EPR experiments, then it is to be expected that the degree of
violation of rotational invariance, and therefore the visibility, is a
function of the window width {---} a testable proposition. \end{enumeratenumeric} If this model is empirically sustainable, then it has been shown, that claims that EPR experiments and experimental tests of ``Bell's theorem'' prove that Nature at a fundamental level involves nonlocal interaction, cannot be maintained. \ Likewise, it would show, that ``irreal'' states (i.e., states composed of the sum of mutually exclusive options, for example the singlet state) do not have ontological status. \ They are artifacts of the formalism for which statistical parameters are valid for ensembles, but that cannot be applied to the individual entities constituting the ensemble. This conclusion follows directly from the fact that, were the input variant signals actually singlet states in each individual case, the statistics would be essentially invariant with respect to the coincidence window width because all signals would have equal probability of producing ``illegitimate'' coincidences.
Of course, it is in principle possible that some other true ``quantum'' system in which singlet states arise in fact exists. \ Experiments on such a system might then verify the current understanding of the issues around nonlocality; but, until such experiments on such systems are carried out, these issues remain open.
\end{document} |
\begin{document}
\title{Experimental Authentication of Quantum Key Distribution with Post-quantum Cryptography}
\author{Liu-Jun Wang} \thanks{Liu-Jun Wang and Kai-Yi Zhang contribute equally to this work.} \affiliation{Hefei National Laboratory for Physical Sciences at Microscale and Department of Modern Physics, University of Science and Technology of China, Hefei 230026, China} \affiliation{Shanghai Branch, CAS Center for Excellence and Synergetic Innovation Center in Quantum Information and Quantum Physics, University of Science and Technology of China, Shanghai 201315, China} \affiliation{School of Physics and Astronomy and Yunnan Key Laboratory for Quantum Information, Yunnan University, Kunming 650500, China} \author{Kai-Yi Zhang} \thanks{Liu-Jun Wang and Kai-Yi Zhang contribute equally to this work.} \affiliation{Department of Computer Science and Engineering, Shanghai Jiao Tong University, Shanghai 200240, China} \affiliation{Shanghai Qizhi Institute, Shanghai 200232, China} \author{Jia-Yong Wang} \affiliation{CAS Quantum Network Co., Ltd, Shanghai 201315, China} \author{Jie Cheng} \affiliation{QuantumCTek Co., Ltd, Hefei 230088, China} \author{Yong-Hua Yang} \affiliation{CAS Quantum Network Co., Ltd, Shanghai 201315, China} \author{Shi-Biao Tang} \affiliation{QuantumCTek Co., Ltd, Hefei 230088, China} \author{Di Yan} \affiliation{Department of Computer Science and Engineering, Shanghai Jiao Tong University, Shanghai 200240, China} \author{Yan-Lin Tang} \affiliation{QuantumCTek Co., Ltd, Hefei 230088, China} \author{Zhen Liu} \affiliation{Department of Computer Science and Engineering, Shanghai Jiao Tong University, Shanghai 200240, China} \author{Yu Yu}
\affiliation{Department of Computer Science and Engineering, Shanghai Jiao Tong University, Shanghai 200240, China} \affiliation{Shanghai Qizhi Institute, Shanghai 200232, China} \author{Qiang Zhang}
\affiliation{Hefei National Laboratory for Physical Sciences at Microscale and Department of Modern Physics, University of Science and Technology of China, Hefei 230026, China} \affiliation{Shanghai Branch, CAS Center for Excellence and Synergetic Innovation Center in Quantum Information and Quantum Physics, University of Science and Technology of China, Shanghai 201315, China} \author{Jian-Wei Pan}
\affiliation{Hefei National Laboratory for Physical Sciences at Microscale and Department of Modern Physics, University of Science and Technology of China, Hefei 230026, China} \affiliation{Shanghai Branch, CAS Center for Excellence and Synergetic Innovation Center in Quantum Information and Quantum Physics, University of Science and Technology of China, Shanghai 201315, China}
\maketitle
\textbf{Quantum key distribution (QKD) can provide information theoretically secure key exchange even in the era of quantum computer \cite{Bennett84,EKERT91,Scarani09}. However, QKD requires the classical channel to be authenticated, and the current method is pre-sharing symmetric keys \cite{Fung2010}. For a QKD network of $n$ users, this method requires $C_n^2 = n(n-1)/2$ pairs of symmetric keys to realize pairwise interconnection. In contrast, with the help of mature public key infrastructure (PKI) and post-quantum cryptography (PQC) with quantum resistant security, each user only needs to apply for a digital certificate from certificate authority (CA) to achieve efficient and secure authentication for QKD. We only need to assume the short-term security of the PQC algorithm to achieve the long-term security of the distributed keys. Here, we experimentally verified the feasibility, efficiency and stability of the PQC algorithm in QKD authentication, and demonstrated the advantages when new users join the QKD network. Using PQC authentication we only need to believe the CA is safe, rather than all trusted relays. QKD combined with PQC authentication will greatly promote and extend the application prospects of quantum safe communication.}
Recently, Google claimed to have achieved quantum supremacy \cite{arute2019quantum}, a major milestone towards the development of quantum computers. Quantum computing can efficiently solve classical hard problems such as integer factorization and discrete logarithms and demonstrates its quadratic speedup (over classical algorithms) in solving unstructured search problems \cite{shor1994algorithms,grover1996fast}, which poses a serious threat to the security of classical cryptographic algorithms based on the complexity of these problems. Boudot et al.~\cite{Boudot20} recently announced the factoring of RSA-240, a RSA number of 240 decimal digits or 795 bits, as well as solved a discrete logarithm of the same size. New records of this type are constantly being refreshed as the performance of computer hardware increases over time. In the era of quantum computing, there are two kinds of reliable information security mechanisms: one is quantum cryptography \cite{Gisin02}, which mainly includes quantum key distribution; the other is post-quantum cryptography, such as lattice-based cryptography and code-based cryptography, which cannot be effectively cracked by the currently known quantum computing algorithms.
Quantum key distribution is unconditionally secure based on the principle of quantum mechanics. With realistic devices, the security of QKD can also be guaranteed \cite{xu2020secure}. The experiments and practical applications of QKD have drastically developed. The secure key rate reaches 26.2 Mbps at a channel loss of 4 dB (equivalent to a 20-km-long optical fiber) \cite{islam2017provably}, and the maximum key distribution distance through practical optical fiber has exceeded 500 km \cite{PhysRevLett.124.070501,fang2020implementation}. Micius satellite has realized entanglement-based repeaterless quantum key distribution between two places on the ground at a distance of 1120 km \cite{yin2020entanglement}. Through trusted relay, several quantum communication networks have been built \cite{Peev09,Chen10,Sasaki11,Froehlich13,wang2014field,PhysRevLett.120.030501}, and the ``Beijing-Shanghai backbone'' quantum communication network spans 2200 km.
Nowadays, the hardness of most public key cryptography are based on integer factorization and discrete logarithm problems that are difficult or intractable for conventional computers. However, Shor's \cite{shor1994algorithms} quantum algorithm can achieve an exponential speedup in solving these mathematical problems. In 2016, NIST published a report on Post-quantum Cryptography \cite{CJL+2016} anticipating that a quantum computer is likely to be built by 2030 that breaks 2000-bit RSA in a few hours, and therefore renders the current public-key infrastructure insecure. As a result, in the same year NIST initiated the ``Post-Quantum Cryptography Standardization'' process by announcing a call for proposals of quantum resistant cryptographic primitives including public key encryption, digital signature and key exchange algorithms. And the process is expected to release the standardization documents by 2024.
Quantum key distribution includes the quantum channel that transmit photons and the classical channel used in post data processing. The unconditional security of QKD does not require the classical channel to be confidential, but requires it to be authenticated, otherwise there will be a man-in-the-middle attack. Combined with the intercept-resend attack, the attacker can completely obtain the keys of both parties without being discovered, as shown in Fig.~\ref{fig:MITM_Flow_PQC}a.
\begin{figure*}
\caption{ \textbf{Schematic of man-in-the-middle attack and flow diagram of post-quantum cryptography authentication.} \textbf{a,} As a middleman, Eve pretends to be a legitimate party. He cuts off the quantum channel, reconnects the legitimate parties respectively, and carries out the intercept-resend attack. \textbf{b,} $|\psi\rangle$: quantum signals; $M$: classical messages; H: hash function, we used SM3 hash algorithm in the experiment; E: encryption (signature); D: decryption; C: comparison; $K$: pre-shared symmetric keys, $P_B$: Bob's public key, $S_B$: Bob's private key. The figure shows that Alice authenticates Bob's identity. In the experiment, we implemented two-way authentication, that is, Bob also authenticates Alice's identity. \textbf{c,} $D$: digest; $R$: random nonce, we generate them by Intel chips; $C$: valid certification; $P_r$ public key of certificate authority; $||$ : concatenate two bit strings ; $T$: tag (or signature) of concatenation of $R$ and $D$; $S$: private key; $P$: public key.}
\label{fig:MITM_Flow_PQC}
\end{figure*}
The processes of QKD that requires authentication includes: basis sifting, error correction verification, random number transfer needed for privacy amplification, and final key verification \cite{Fung2010}. QKD requires two-way authentication between the two parties.
The current secure authentication method is to pre-share a small amount of symmetric seed keys and encrypt (sign) and decrypt (verify) the hash value of classical messages, as shown in Fig.~\ref{fig:MITM_Flow_PQC}b. Later, the generated quantum key can be used for authentication. This way can guarantee the information theoretical security, however when the number of QKD network users is large, this method is not easy to operate and has the following problems. On the one hand, for a network with arbitrary two users connected, if the number of users is $n$, then the number of pre-shared key pairs $m$ is \begin{eqnarray} m = C_n^2 = \frac{n(n-1)}{2} \label{eq:one}. \end{eqnarray} Symmetric keys are generally pre-shared by face-to-face. When the number of users is relatively large, the burden of pre-sharing keys is heavy and inefficient. For example, if $n$=100, then $m$=4950. At the same time, each user needs to store the authentication key pairs with all other users. The storage, synchronization and management of so many key pairs will increase the complexity and security risk of the network. One solution is to use a trusted relay to form a star-type network, each user only connects and pre-shares one key pair with the trusted relay \cite{Froehlich13,hughes2013networkcentric}, but this reduces the interconnection between users. Moreover, when new users join a QKD network, they need to pre-share symmetric keys with the trusted relay or the original users on demand. If the new user's QKD task is urgent, it may be too late to distribute the authentication key pairs.
Another type of secure authentication method is using the post-quantum public key algorithm and PKI \cite{10.1007/978-3-642-38616-9_9}, as shown in Fig.~\ref{fig:MITM_Flow_PQC}b, c. Each user gets a digital certificate signed by a trusted certification center, which contains his/her identity, public key and other items required by the PKI standard. For a network of $n$ users, the number of digital certificates issued is $n$. If a new user joins the QKD network, he/she only needs to obtain a digital certificate. Therefore, the authentication based on the public key algorithm can solve the problems of pre-sharing symmetric keys. As long as the PQC algorithm is secure during the authentication process, the security of QKD will not be affected, even if the PQC is cracked after authentication, so we only need to assume the short-term security of PQC. This is different from using PQC algorithm for confidentiality or key distribution, which will require long-term security of the PQC algorithm. Here, we verifies the application of PQC in QKD authentication, which greatly improves the operability and efficiency of QKD authentication process.
We realized the application of PQC in the QKD point-to-point link, with fiber distances from 10 km to 100 km. Figure~\ref{fig:KR_Length} shows the key rates as a function of fiber length. It can be seen that the key rates decrease exponentially with fiber length, which is in consistent with the theoretical expectation. We compared the key rates at the same fiber length using the pre-shared key authentication and the post-quantum algorithm authentication, and the two were consistent within the statistical error. This is because the execution time of post-quantum algorithm authentication is less than 1 ms (see Methods), far less than one authentication cycle of the QKD system, which is 1s. In the experiment, we also deliberately set PQC to feedback that the authentication failed, and as a result, the QKD system will discard the keys for these periods.
\begin{figure}
\caption{ \textbf{The secure key rate as a function of fiber length when QKD is authenticated by the PQC algorithm.} They are the average values in five minutes. The error bar represents a standard deviation.}
\label{fig:KR_Length}
\end{figure}
QKD networks can be generally divided into two types: all-pass network and trusted relay network. For the all-pass network, users are connected by optical switches (OS). In order to achieve arbitrary connection between users, each user must have a QKD transmitter and a receiver. We built an all-pass network for four users, connected by an optical switch, as shown in Fig.~\ref{fig:networks}a. It can realize two typical topological relationships, one is ring connection and the other is cross connection, as shown in Fig.~\ref{fig:networks}b and Fig.~\ref{fig:networks}c respectively. We verified the application of PQC authentication in these two kinds of all-pass networks. The experimental results are shown in Table~\ref{tab:all-pass}. We note that because the performance of different QKD devices are not exactly the same, their key rates and QBERs will be different under the same fiber lengths. Using PQC authentication, we also demonstrated the QKD relay network (see Supplementary Fig. 1 and Tabel I).
\begin{figure*}
\caption{ \textbf{PQC authentication in QKD networks.} \textbf{a,} All-pass QKD network. Four users are connected to each other through an optical switch. \textbf{b,} Ring network. \textbf{c,} Cross network. The actual distance between any two users is the sum of their respective distances from the optical switch. \textbf{d,} A 10-node QKD metropolitan area network composed of two relay networks. \textbf{e,} Trusted relays are replaced with optical switches to form an all-pass network. U11 and U12 are new users. See the text for the distance from each user to the trusted relay (optical switch).}
\label{fig:networks}
\end{figure*}
\begin{table} \caption{\label{tab:all-pass}Key rates and QBERs of the QKD all-pass network authenticated by PQC algorithm.} \begin{ruledtabular} \begin{tabular}{cccdc} Connection&Length (km)&Loss (dB)&\multicolumn{1}{c}{\textrm{Key rate (kbps)}}&QBER\\ \hline \multicolumn{5}{c}{\textrm{(a) Ring network}}\\ \hline U1-U2 & 50 & 11.26 & 72.16 & 0.751\%\\ U2-U3 & 70 & 15.35 & 20.17 & 1.140\%\\ U3-U4 & 90 & 18.81 & 10.52 & 0.883\%\\ U4-U1 & 70 & 15.4 & 30.58 & 0.647\%\\ \hline \hline \multicolumn{5}{c}{\textrm{(b) Cross network}}\\ \hline U1-U2 & 50 & 11.21 & 68.65 & 0.779\%\\ U2-U4 & 80 & 16.31 & 19.45 & 1.014\%\\ U4-U3 & 90 & 18.46 & 9.71 & 0.786\%\\ U3-U1 & 60 & 12.15 & 76.82 & 0.517\%\\ \end{tabular} \end{ruledtabular} \end{table}
The above results verify the feasibility of PQC algorithm for QKD network authentication. In order to demonstrate the efficiency of PQC authentication, we built two trusted relay networks and connected them to simulate the QKD metropolitan area network. They can be located on both sides of a city. Each relay network contains 5 user nodes, and a total of 10 users in the entire network, as shown in Fig.~\ref{fig:networks}d.
When using pre-shared key authentication, the trusted relay is usually needed to manage pre-shared keys at the cost of reducing the interconnection. With PQC authentication, the trusted relay can be replaced with an optical switch to realize arbitrary interconnection. Each user only needs one digital certificate for authentication, instead of pre-sharing $C_{10} ^ 2 = 45$ pairs of symmetric keys, as shown in Fig.~\ref{fig:networks}e. The interconnectivity of the QKD network has been greatly improved. To illustrate this point, in the experiment, we compared the QKD results of three pairs of users U1-U3, U5-U6, and U8-U10 in two cases, as shown in Table~\ref{tab:two_relay_OS}. Moreover, PQC authentication only needs to assume that the certificate authority is safe, reducing the security dependence on multiple trusted relays, which can improve the actual security of the entire network.
\begin{table*} \caption{\label{tab:two_relay_OS}Comparison of key rates and QBERs between relay network and all-pass network. R1 and R2 stand for realy 1 and realy 2 in Fig.~\ref{fig:networks}(e), respectively. The fiber length between two users in the all-pass network is the sum of the fiber lengths of the links between the two users in the relay network.} \begin{ruledtabular} \begin{tabular}{ccccdc} \multicolumn{2}{c}{\textrm{Connection}}&Length (km)&Loss (dB)&\multicolumn{1}{c}{\textrm{Key rate (kbps)}}&QBER\\ \hline \multicolumn{6}{c}{\textrm{(a) Relay network}}\\ \hline \multirow{2}{*}{U1-U3} &U1-R1&10&2.69&363.59&0.648\% \\ &R1-U3&30&6.70&194.32&0.761\% \\ \hline \multirow{3}{*}{U5-U6} &U5-R1&20&3.99&293.53&0.752\% \\ &R1-R2&20&4.08&288.16&0.475\% \\ &R2-U6&20&4.11&288.74&0.364\% \\ \hline \multirow{2}{*}{U8-U10} &U8-R2&10&2.62&287.47&0.511\% \\ &R2-U10&10&2.66&333.06&0.529\% \\ \hline \hline \multicolumn{6}{c}{\textrm{(b) All-pass network}}\\ \hline \multicolumn{2}{c}{U1-U3}&40&9.02&90.83&0.630\% \\ \hline \multicolumn{2}{c}{U5-U6}&60&12.12&48.00&0.978\% \\ \hline \multicolumn{2}{c}{U8-U10}&20&5.23&200.87&0.514\% \\ \end{tabular} \end{ruledtabular} \end{table*}
In the experiment, two new users U11 and U12 join the QKD network, as shown in Fig.~\ref{fig:networks}e. If pre-shared key authentication is used, for the relay network, new users need to pre-share keys with the relay, and can only perform QKD with the relay, but not with other users. For the all-pass network, each new user needs to pre-share 10 pairs of symmetric keys with 10 original users, and 1 pair of keys between the two new users. A total of 21 pairs of keys need to be pre-shared to achieve the connection between any two users. In contrast, if PQC authentication is adopted, trusted relays can be replaced with optical switches. Each new user only needs to apply for one digital certificate, and a total of two digital certificates can realize the connection of any two users. This greatly improves the convenience for new users to access the network and interconnection. After U11 and U12 got digital certificates, we demonstrated the QKD between U11-U2, U11-U7, U12-U4, U12-U9, and U11-U12. The results are shown in Table~\ref{tab:new-users}.
\begin{table} \caption{\label{tab:new-users}QKD Key rates and QBERs between new users U11 and U12 and original users in the network, and between U11 and U12.} \begin{ruledtabular} \begin{tabular}{cccdc} Connection&Length (km)&Loss (dB)&\multicolumn{1}{c}{\textrm{Key rate (kbps)}}&QBER\\ \hline U11-U2 & 40 & 8.11 & 139.79 & 0.846\%\\ U11-U7 & 50 & 11.26 & 90.18 & 0.573\%\\ U12-U4 & 40 & 8.11 & 113.42 & 0.792\%\\ U12-U9 & 40 & 8.16 & 101.78 & 0.873\%\\ U11-U12 & 50 & 11.07 & 83.05 & 0.858\%\\ \end{tabular} \end{ruledtabular} \end{table}
Finally, we tested the stability of PQC authentication with a pair of QKD devices. The fiber length is 40 km, and it has been running continuously for 30 hours. The PQC program keeps running normally, and QKD systems continuously generate keys (see Supplementary information).
Summarizing, We used the lattice-based post-quantum digital signature algorithm Aigis-Sig, combined with PKI, to achieve efficient and quantum secure authentication of QKD. Since the Aigis-Sig algorithm is highly computationally efficient, it does not affect the performance of QKD, such as the key rate. We experimentally verified the feasibility of its application in metropolitan QKD relay network and all-pass network. With PQC authentication, the trusted relay in the QKD network can be replaced with an optical switch. Each user only needs to apply for a digital certificate through PKI to realize the direct connection between any two users. When a new user joins the network, he/she only need to obtain a digital certificate, instead of distributing symmetric keys with all other users, and they can immediately establish a QKD connection. Compared with the pre-shared key authentication, PQC authentication has obvious operability and efficiency advantages. Moreover, if the number of trusted relays is less, the security dependence on trusted relays in the network can be reduced, thus improving the security of the entire QKD network. We have also verified the long-term stability of PQC authentication.
\subsection*{\label{Methods}Methods} In the experiment, we used the BB84 protocol combined with decoy state method \cite{Ma05}, with polarization encoding. The system operating frequency was 625 MHz, and single photon detectors based on InGaAs avalanche photodiodes were used. The QKD transmitter and the QKD receiver were synchronized by periodic pulsed light. The synchronous light transmitted with the quantum signal light via a single optical fiber through wavelength-division multiplexing. The QKD systems used SM3 hash algorithm to generate digest values of 256 bits for the messages to be authenticated, and output them to PQC program. The finite-key effect is considered in the data processing.
The PQC algorithm we used is Aigis-Sig \cite{zhang2020tweaking}, an efficient lattice-based digital signature scheme from variants of Learning With Errors (LWE) \cite{Regev05} and Small Integer Solutions (SIS) \cite{Ajtai96} problems. It has been shown that these two problems are at least as hard as some worst-case lattice problems (e.g., Gap-SIVP) for certain parameter choices\cite{Regev09,Peikert09,GPV08}. Therefore, the post-quantum security of Aigis-Sig algorithm is based on the conjectured quantum resistance of the underlying lattice problems. Furthermore, it has not been found that quantum algorithms have substantial advantages (beyond polynomial speedup) over classical ones in solving lattice problems.
Our authentication protocol adopts a PKI enhanced with post-quantum secure Aigis-Sig as shown in Fig.~\ref{fig:MITM_Flow_PQC}c. The protocol consists of two phases. In the first phase, the transmitter and the receiver first exchange their own certificates issued by the certificate authority (CA) to each other. Then they use the public key of CA to verify the other public key belongs to its identity. In the second phase, the transmitter and the receiver first use our Aigis-Sig to sign the message digest under their own private keys, then they use the confirmed public keys of the other to verify the correctness of the receiving signatures. Because only the legitimate party has the corresponding private key, it can be confirmed that the message is signed legally.
In order to prevent the replay attack, we introduce the nonce in our authentication protocol, the nonce is a random number generated by Intel chips. We exchange the nonce in the first phase and concatenate them with the message digest together as our signing message in the second phase. Note that we implemented two-way authentication in QKD data processing.
We implement PQC algorithm in Win10 64bit, Intel(R) Core(TM) i7-9750H CPU @2.60GHz, 8G RAM. The average CPU cycle of Signature Generation is 459903. The average CPU cycle of Signature Verification is 104337. The signature size is 2445 bytes. The real execution time is less than 1ms.
\begin{acknowledgments} This work was supported by the National Key R\&D Program of China (Grants No. 2017YFA0304000), the National Natural Science Foundation of China, the Chinese Academy of Sciences (CAS), Shanghai Municipal Science and Technology Major Project (Grant No.2019SHZDZX01), the Anhui Initiative in Quantum Information Technologies, and Yunnan Fundamental Research Project (Grant No.K264202005920) and the Major Science and Technology Project (Grant No.2018ZI002). \end{acknowledgments}
\subsection*{Supplementary information} In the relay network experiment, the network includes a relay node and three user nodes, as shown in Fig.~\ref{fig:relay}. Due to the high cost of single photon detectors, relay nodes generally deploy QKD receivers, and user nodes deploy QKD transmitters. The fiber lengths between the relay and the users are typical distances within metropolitan area. Using PQC authentication, the QKD between the relay node and the three users was successfully achieved. The key rates and QBERs are shown in the Table~\ref{tab:relay}, they are the average values in five minutes.
\begin{figure}
\caption{ The QKD network based on trusted relay, U1-U3 represents three user nodes, all of them hold QKD transmitters, relay node places a QKD receiver. The users generate key pairs with each other through the relay.}
\label{fig:relay}
\end{figure}
\begin{table} \caption{\label{tab:relay}Key rates and quantum bit error rates (QBERs) of the QKD relay network authenticated by PQC algorithm.} \begin{ruledtabular} \begin{tabular}{cccdc} Connection&Length (km)&Loss (dB)&\multicolumn{1}{c}{\textrm{Key rate (kbps)}}&QBER\\ \hline relay-U1 & 20 & 4.03 & 309.55 & 0.704\%\\ relay-U2 & 30 & 6.87 & 226.62 & 0.381\%\\ relay-U3 & 40 & 8.23 & 109.71 & 0.742\%\\ \end{tabular} \end{ruledtabular} \end{table}
In the experiment, we tested the stability of PQC authentication with a pair of QKD devices. The fiber length is 40 km, and it has been running continuously for 30 hours. The PQC program keeps running normally, and QKD systems continuously generate keys, as shown in Fig.~\ref{fig:stability_keyrate}. The secure key rate is in the range of 100 - 180 kbps, and the fluctuations are caused by the QBER reaching 3\% or the continuous running time reaching 30 minutes, which triggers the polarization feedback set by the QKD system. The 30-hour average key rate is 144.1 kbps. It can also be seen from the figure that polarization feedback is more frequent during the daytime (08:00-18:00) than at night (18:00-08:00), because human activities and temperature fluctuations during the day interfere more seriously with the optical fiber. Figure~\ref{fig:stability_QBER} shows a curve of the QBER within a 30-minute operating cycle of a QKD system. One QBER data is recorded every second. The QBER is distributed between 0.65\% and 1.1\%, and the 30-minute average is 0.876\%. The above results show the stability and reliability of PQC authentication applied to QKD.
\begin{figure}
\caption{ A curve of 30-hour QKD key rates. The fiber distance is 40 km, and each value is an average of 5 minutes. The near periodic fluctuation of the key rate is due to the fact that the QKD system is set to start polarization feedback when the QBER reaches 3\% or the QKD continuous running time reaches 30 minutes, so the data points containing polarization feedback in the key rate statistical period will be lower.}
\label{fig:stability_keyrate}
\end{figure}
\begin{figure}
\caption{ A curve of QBER for 30 minutes. One data is recorded per second in the experiment. The average QBER is 0.876\%.}
\label{fig:stability_QBER}
\end{figure}
\end{document} |
\begin{document}
\thispagestyle{plain} \begin{center}
{\large \bf \uppercase{Reversible sequences of cardinals, reversible\\[1mm]
equivalence relations, and similar structures}} \end{center}
\begin{center} {\bf Milo\v s S.\ Kurili\'c\footnote{Department of Mathematics and Informatics, Faculty of Science, University of Novi Sad,
Trg Dositeja Obradovi\'ca 4, 21000 Novi Sad, Serbia.
email: milos@dmi.uns.ac.rs} and Nenad Mora\v ca\footnote{Department of Mathematics and Informatics, Faculty of Science, University of Novi Sad,
Trg Dositeja Obradovi\'ca 4, 21000 Novi Sad, Serbia.
email:
nenad.moraca@dmi.uns.ac.rs}} \end{center} \begin{abstract} \noindent A relational structure ${\mathbb X}$ is said to be reversible iff every bijective endomorphism $f:X\rightarrow X$ is an automorphism. We define a sequence of non-zero cardinals $\langle \kappa _i :i\in I\rangle$ to be reversible iff each surjection $f :I\rightarrow I$ such that $\kappa _j =\sum _{i\in f^{-1}[\{ j \}]}\kappa_i$, for all $j\in I $, is a bijection, and characterize such sequences: either $\langle \kappa _i :i\in I\rangle$ is a finite-to-one sequence, or $\kappa _i\in {\mathbb N}$, for all $i\in I$,
$K:=\{ m\in {\mathbb N} : \kappa _i =m ,\mbox{ for infinitely many } i\in I \}$ is a non-empty independent set, and $\gcd (K)$ divides at most finitely many elements of the set $\{ \kappa _i :i\in I \}$. We isolate a class of binary structures such that a structure from the class is reversible iff the sequence of cardinalities of its connectivity components is reversible. In particular, we characterize reversible equivalence relations, reversible posets which are disjoint unions of cardinals $\leq \omega$, and some similar structures. In addition, we show that a poset with linearly ordered connectivity components is reversible, if the corresponding sequence of cardinalities is reversible and, using this fact, detect a wide class of examples of reversible posets and topological spaces.
{\sl 2010 MSC}: 03C50, 03C07, 03E05, 06A06, 05C20, 05C40.
{\sl Key words}: reversible sequence of cardinals, reversible sequence of natural numbers, reversible equivalence relation, digraph, poset. \end{abstract} \section{Introduction}\label{S1} A structure is called reversible iff all its bijective endomorphisms are automorphisms and the class of reversible structures contains, for example, Euclidean, compact and many other relevant topological spaces \cite{RajWil,DoyHoc,Dow}, linear orders, Boolean lattices, well founded posets with finite levels \cite{Kuk,Kuk1}, tournaments, Henson graphs \cite{KuMoExtr}, and Henson digraphs \cite{KRet}. In addition, reversible structures have several distinguished properties; for example, the Cantor-Schr\"{o}der-Bernstein property for condensations (bijective homomorphisms).
It seems that the property of reversibility of relational structures is more of set-theoretical or combinatorial, than of model-theoretical nature--it is an invariant of isomorphism and condensational equivalence, while it is not preserved under bi-embeddability, bi-definability and elementary equivalence \cite{KDef,KuMoSim}. But it is an invariant of some forms of bi-interpretability \cite{KRet}, extreme elements of $L_{\infty \omega}$-definable classes of structures are reversible under some syntactical restrictions \cite{KuMoExtr}, and all structures first-order definable in linear orders by quantifier-free formulas without parameters (i.e., monomorphic or chainable structures) are reversible \cite{KDef}.
In this article we continue the investigation of reversibility in the class of disconnected binary structures initiated in \cite{KuMoDiscI}. If ${\mathbb X}$ is a binary structure and ${\mathbb X} _i$, $i\in I$, are its connectivity components, then, clearly, the sequence of cardinal numbers $\langle |X_i|:i\in I\rangle$ is an isomorphism-invariant of the structure and in some classes of structures (for example, in the class of equivalence relations) that cardinal invariant characterizes the structure up to isomorphism. In such classes the reversibility of a structure, being an isomorphism-invariant as well, can be regarded as a property of the corresponding sequence of cardinals.
So, using the characterization of reversible disconnected binary structures from \cite{KuMoDiscI} (see Fact \ref{TB044}) we easily isolate the following property of sequences of cardinals (called reversibility as well) which characterizes reversibility in the class of equivalence relations: If $I$ is a non-empty set, an $I$-sequence of non-zero cardinals $\langle \kappa _i :i\in I\rangle$ will be called {\it reversible} iff there is no non-injective surjection $f :I\rightarrow I$ such that \begin{equation}\label{EQB032}\textstyle \forall j\in I \;\;\kappa _j =\sum _{i\in f^{-1}[\{ j \}]}\kappa_i . \end{equation} The first main result of this paper is the following characterization of reversible sequences of cardinals. In order to state it we recall some definitions. For a subset $K$ of the set of natural numbers, ${\mathbb N}$, let $\langle K\rangle$ denote the subsemigroup of the semigroup $\langle {\mathbb N} , +\rangle$ generated by $K$. A set $K$ is called {\it independent} iff \begin{equation}\label{EQB003} \forall n\in K \;\; n\not\in \langle K\setminus \{ n \}\rangle. \end{equation} So, $\emptyset$ is an independent set. If $K \neq\emptyset$, by $\gcd (K)$ we denote the greatest common divisor of the numbers from $K$. \begin{te}\label{TB042} A sequence of non-zero cardinals $\langle \kappa _i :i\in I\rangle$ is reversible iff \begin{itemize} \item[-] either $\langle \kappa _i :i\in I\rangle$ is a finite-to-one sequence, \item[-] or $\kappa _i\in {\mathbb N}$, for all $i\in I$,\\
$K:=\{ m\in {\mathbb N} : |\{ i\in I : \kappa _i =m\}|\geq \omega\}$
is a non-empty independent set, \\ and $\gcd (K)$ divides at most finitely many elements of the set $\{ \kappa _i :i\in I \}$.\footnote{For example, if $I$ is a non-empty set of any size and $\langle n_i :i\in I\rangle \in {}^{I}{\mathbb N}$, then by Theorem \ref{TB042} we have:
if $K=\emptyset$ (which is possible if $|I|\leq \omega$), then $\langle n_i \rangle $ is a reversible sequence;
if $K=\{ 2,5\}$, then $\langle n_i \rangle $ is a reversible sequence iff the set $\{ n_i :i\in I \}$ is finite;
if $K=\{ 4,10\}$, then $\langle n_i \rangle $ is a reversible sequence iff the set $\{ n_i :i\in I \}$ contains at most finitely many even numbers. } \end{itemize} \end{te} A proof of Theorem \ref{TB042} is given in the last (and the largest) Section \ref{S4}, where, in addition, we show that the set of reversible sequences of natural numbers is a dense $F_{\sigma \delta \sigma}$-subset of the Baire space, and that it is not a subsemigroup of $\langle {\mathbb N} ^{\mathbb N} ,\circ\rangle$.
Section \ref{S2} contains definitions and facts making the paper self-contained.
In Section \ref{S3}, generalizing the situation with equivalence relations, we isolate a wider class of structures with the same property--that the reversibility of a structure from the class is equivalent to the reversibility of the corresponding sequence of sizes of its components--the class of structures having the sequence of components rich for monomorphisms. We also study the class $\mathop{\rm RFM}\nolimits$ of such sequences of structures, compare it with some relevant classes, detect some classes of structures such that the reversibility of a structure from the class follows from the reversibility of the corresponding cardinal sequence and in this way detect wide classes of reversible digraphs, posets, and topological spaces. \section{Preliminaries}\label{S2} \paragraph{Reversible structures} If $L=\langle R_i :i\in I\rangle$ is a relational language, where $\mathop{\rm ar}\nolimits (R _i)=n_i\in {\mathbb N}$, for $i\in I$, and ${\mathbb X}$ and ${\mathbb Y}$ are $L$-structures, then by $\mathop{\rm Iso}\nolimits ({\mathbb X} ,{\mathbb Y} )$, $\mathop{\rm Cond}\nolimits ({\mathbb X} ,{\mathbb Y} )$ and $\mathop{\rm Mono}\nolimits ({\mathbb X} ,{\mathbb Y} )$ we denote the set of all isomorphisms, condensations (bijective homomorphisms) and monomorphisms (injective homomorphisms) from ${\mathbb X}$ to ${\mathbb Y}$ respectively. Clearly, $\mathop{\rm Iso}\nolimits ({\mathbb X} ,{\mathbb X} )$ is the set of automorphisms, $\mathop{\rm Aut}\nolimits ({\mathbb X} )$, of ${\mathbb X}$, instead of $\mathop{\rm Cond}\nolimits ({\mathbb X} ,{\mathbb X} )$ we will write $\mathop{\rm Cond}\nolimits ({\mathbb X} )$ etc. For a set $X$ by $\mathop{\rm Sym}\nolimits (X)$ (resp.\ $\mathop{\rm Sur}\nolimits (X)$) we denote the set of all bijections (resp.\ surjections) $f:X\rightarrow X$.
The {\it condensational preorder} $\preccurlyeq _c $ on the class of $L$-structures is defined by ${\mathbb X} \preccurlyeq _c {\mathbb Y}$ iff $\mathop{\rm Cond}\nolimits ({\mathbb X} ,{\mathbb Y} )\neq\emptyset$, the {\it condensational equivalence} is the equivalence relation defined on the same class by ${\mathbb X} \sim _c {\mathbb Y}$ iff ${\mathbb X} \preccurlyeq _c {\mathbb Y}$ and ${\mathbb Y} \preccurlyeq _c {\mathbb X}$ and it determines the antisymmetric quotient of the condensational preorder, the {\it condensational order}, in the usual way.
An $L$-structure ${\mathbb X} =\langle X,\rho\rangle$ is called {\it reversible} iff $\mathop{\rm Cond}\nolimits ({\mathbb X} )=\mathop{\rm Aut}\nolimits ({\mathbb X} )$. Clearly, $\rho=\langle \rho _i :i\in I\rangle$ is an element of the set $\mathop{\rm Int}\nolimits _L (X)=\prod _{i\in I}P(X^{n_i})$ of all interpretations of the language $L$ over the domain $X$ and defining the partial order $\subset$ on $\mathop{\rm Int}\nolimits _L (X)$ by $\rho\subset \sigma$ iff $\rho_i \subset \sigma _i$, for all $i\in I$, it is easy to obtain the following simple characterizations of reversible $L$-structures (see \cite{KuMoVar}). \begin{fac}\label{TB045} For an $L$-structure ${\mathbb X} =\langle X,\rho\rangle$ the following conditions are equivalent
(a) ${\mathbb X} $ is a reversible structure,
(b) $\forall\sigma\in\mathop{\rm Int}\nolimits _L (X)\;\;(\sigma\varsubsetneq \rho \Rightarrow \sigma\not\cong \rho )$,
(c) $\forall\sigma\in\mathop{\rm Int}\nolimits _L (X)\;\;(\rho\varsubsetneq \sigma \Rightarrow \sigma\not\cong \rho )$,
(d) $\forall f\in \mathop{\rm Sym}\nolimits (X)\;\; ( f[\rho ]\subset \rho \Rightarrow f[\rho ]= \rho)$. \end{fac} Reversible $L$-structures have the Cantor-Schr\"{o}der-Bernstein property for condensations. Moreover we have (see \cite{KuMoVar}) \begin{fac}\label{TA011} Let ${\mathbb X}$ and ${\mathbb Y}$ be $L$-structures. If ${\mathbb X}$ is a reversible structure and ${\mathbb Y} \sim _c {\mathbb X}$, then ${\mathbb Y} \cong {\mathbb X}$ (thus ${\mathbb Y}$ is reversible too) and $\mathop{\rm Cond}\nolimits ({\mathbb X} ,{\mathbb Y} )=\mathop{\rm Iso}\nolimits ({\mathbb X} ,{\mathbb Y} )$. \end{fac} \paragraph{Disconnected binary structures}
Let $L_b$ be the binary language, that is, $L_{b}=\langle R\rangle$ and $\mathop{\rm ar}\nolimits (R)=2$. If ${\mathbb X}=\langle X,\rho \rangle$ is an $L_b$-structure, then the transitive closure $\rho _{rst}$ of the relation $\rho _{rs} =\Delta _X \cup \rho \cup \rho ^{-1}$ (given by $x \,\rho _{rst} \,y$ iff there are $n\in {\mathbb N}$ and $z_0 =x , z_1, \dots ,z_n =y$ such that $z_i \;\rho _{rs} \;z_{i+1}$, for each $i<n$)
is the minimal equivalence relation on $X$ containing $\rho $. The corresponding equivalence classes are called the {\it components} of ${\mathbb X}$ and the structure ${\mathbb X}$ is called {\it connected} iff $|X/\rho _{rst} |=1$.
If ${\mathbb X} _i=\langle X_i, \rho _i \rangle$, $i\in I$, are connected $L_b$-structures and $X_i \cap X_j =\emptyset$, for different $i,j\in I$, then the structure $\bigcup _{i\in I} {\mathbb X} _i =\langle \bigcup _{i\in I} X_i , \bigcup _{i\in I} \rho _i\rangle$ is the {\it disjoint union} of the structures ${\mathbb X} _i$, $i\in I$, and the structures ${\mathbb X} _i$, $i\in I$, are its components. \begin{fac}[\cite{KuMoDiscI}]\label{TB044} Let ${\mathbb X} _i$, $i\in I$, be pairwise disjoint and connected $L_b$-structures. \begin{itemize} \item[(a)] If $\;\bigcup _{i\in I} {\mathbb X} _i$ is reversible, then all structures $\mathbb{X}_i$, $i\in I$, are reversible. \item[(b)] $\bigcup _{i\in I} {\mathbb X} _i$ is a reversible structure iff
whenever $f:I\rightarrow I$ is a surjection, $g_i \in \mathop{\rm Mono}\nolimits({\mathbb X} _i ,{\mathbb X} _{f(i)})$, for $i\in I$, and \begin{equation}\label{EQB035} \forall j\in I \;\; \Big(\Big\{g_i[X_i]: i\in f^{-1}[\{ j\}] \Big\} \mbox{ is a partition of }X_j\Big), \end{equation} we have \begin{equation}\label{EQB034} f\in \mathop{\rm Sym}\nolimits (I) \;\land \;\forall i\in I \;\; g_i \in \mathop{\rm Iso}\nolimits ({\mathbb X} _i,{\mathbb X} _{f(i)}) . \end{equation} \end{itemize} \end{fac} \section{Sequences of structures rich for monomorphisms}\label{S3} We will say that a sequence of $L$-structures $\langle {\mathbb X}_i :i\in I\rangle$ is {\it rich for monomorphisms} iff \begin{equation}\label{EQA027}
\forall i,j\in I \;\; \forall A\in [X_j ]^{|X_i|} \;\;\exists g\in \mathop{\rm Mono}\nolimits ({\mathbb X} _i , {\mathbb X} _j)\;\; g[X_i]=A. \end{equation} By Fact \ref{TB044}(a), a necessary condition for the reversibility of a disconnected binary structure is the reversibility of its components. Hence, and in order to simplify notation, in the sequel we work under the following assumption: \begin{itemize} \item[($\ast$)] ${\mathbb X} _i$, $i\in I$, are pairwise disjoint, connected and reversible $L_b$-structures. \end{itemize} Let $\mathop{\rm RFM}\nolimits$ denote the class of sequences of $L_b$-structures $\langle {\mathbb X}_i :i\in I\rangle$ (where $I$ is any non-empty set) satisfying $(\ast)$ and which are rich for monomorphisms. \subsection{Reversible equivalence relations and similar structures} First we show that the reversibility of a structure having the sequence of components in $\mathop{\rm RFM}\nolimits$ depends only on the corresponding cardinal sequence. \begin{te}\label{TA021} If $\langle {\mathbb X}_i :i\in I\rangle \in \mathop{\rm RFM}\nolimits$, then
(a) The structures of the same size are isomorphic,
(b) $\bigcup _{i\in I}{\mathbb X}_i$ is reversible $\Leftrightarrow$ $\langle |X_i|: i\in I \rangle$ is a reversible sequence of cardinals. \end{te} \noindent{\bf Proof. }
(a) If $|X_i|=|X_j|$, then by (\ref{EQA027}) there are $g\in \mathop{\rm Cond}\nolimits ({\mathbb X} _i , {\mathbb X} _j)$ and $g'\in \mathop{\rm Cond}\nolimits ({\mathbb X} _j , {\mathbb X} _i)$. So ${\mathbb X} _i \sim _c {\mathbb X} _j$, which by Fact \ref{TA011} implies that ${\mathbb X} _i \cong {\mathbb X} _j$.
(b) ($\Rightarrow$) Suppose that the sequence $\langle |X_i|: i\in I \rangle$ is not reversible and that
$f:I\rightarrow I$ is a noninjective surjection such that for each $j\in I$ we have $|X_j| =\sum _{i\in f^{-1}[\{ j \}]}|X_i|$. Then for $j\in I$ there is a partition $\{ A^j_i:i\in f^{-1}[\{ j \}]\}$ of $X_j$ such that $|A^j_i|=|X_i|$, for all $i\in f^{-1}[\{ j \}]$ and, by (\ref{EQA027}), there are monomorphisms $g_i : {\mathbb X} _i \rightarrow {\mathbb X}_j ={\mathbb X} _{f(i)}$ satisfying $g_i [X_i]=A^j_i$. By Fact \ref{TB044}(b) the structure $\bigcup _{i\in I}{\mathbb X}_i $ is not reversible.
($\Leftarrow$) Let $\langle |X_i|: i\in I \rangle$ be a reversible sequence of cardinals. In order to use Fact \ref{TB044}(b), assuming that $f:I\rightarrow I$ is a surjection,
$g_i \in \mathop{\rm Mono}\nolimits({\mathbb X} _i ,{\mathbb X} _{f(i)})$, for $i\in I$, and that (\ref{EQB035}) holds, we prove (\ref{EQB034}). First, for $i\in I$, since the function $g_i$ is injection we have $|X_i|=|g_i[X_i]|$. So, by (\ref{EQB035}) for each $j\in I$ we have $|X_j| =\sum _{i\in f^{-1}[\{ j \}]}|X_i|$ and, since the sequence $\langle |X_i|: i\in I \rangle$ is reversible, $f\in \mathop{\rm Sym}\nolimits (I)$.
Consequently, for $i\in I$ we have $g_i[X_i]=X_{f(i)}$ and, hence, $|X_i|=|X_{f(i)}|$, which by (a) implies ${\mathbb X} _i \cong {\mathbb X} _{f(i)}$ and, in addition, $g_i\in \mathop{\rm Cond}\nolimits ({\mathbb X} _i, {\mathbb X} _{f(i)})$. Since the structures ${\mathbb X}_i$ are reversible, by Fact \ref{TA011} we have $\mathop{\rm Cond}\nolimits ({\mathbb X} _i, {\mathbb X} _{f(i)})=\mathop{\rm Iso}\nolimits ({\mathbb X} _i, {\mathbb X} _{f(i)})$; so $g_i\in \mathop{\rm Iso}\nolimits ({\mathbb X} _i, {\mathbb X} _{f(i)})$, for all $i\in I$, and (\ref{EQB034}) is true indeed.
$\Box$ \begin{te}\label{TB048}
Let $\sim$ be an equivalence relation on a set $X$, ${\mathbb X} =\langle X ,\sim\rangle$, and $\{ X_i :i\in I\}$ the corresponding partition. Then the structure ${\mathbb X}$ is reversible iff $\langle |X_i| :i\in I \rangle$ is a reversible sequence of cardinals.
The same holds for the graphs (resp.\ posets) of the form ${\mathbb X} =\bigcup _{i\in I}{\mathbb X}_i$, where ${\mathbb X}_i$, $i\in I$, are pairwise disjoint complete graphs (resp.\ ordinals $\leq \omega$). \end{te} \noindent{\bf Proof. } It is clear that any sequence of disjoint $L_b$-structures with full relations, or complete graphs, or well orders $\leq \omega$ belongs to $\mathop{\rm RFM}\nolimits$; so Theorem \ref{TA021} applies.
$\Box$ \begin{rem}\label{RB001}\rm There are ${\mathfrak c}$-many non-isomorphic countable reversible equivalence relations (and the same holds for the classes of graphs and posets from Theorem \ref{TB048}). By Theorems \ref{TB048} and \ref{TB042}, if $\langle n_i :i\in {\mathbb N}\rangle \in {}^{{\mathbb N}}{\mathbb N}$ is an increasing sequence, then the structure ${\mathbb X}_{\langle n_i\rangle}$ with the equivalence relation on ${\mathbb N}$
determined by a partition $\{ C_i :i\in {\mathbb N} \}$, where $|C_i|=n_i$, for all $i\in {\mathbb N}$, is reversible. Also, if $\langle n_i :i\in {\mathbb N}\rangle\neq \langle n'_i :i\in {\mathbb N}\rangle $, then the corresponding structures are non-isomorphic. For $A\in [{\mathbb N} ]^\omega$ let $\langle n^A_i :i\in {\mathbb N}\rangle$ be the increasing enumeration of the set $A$. Then the structures ${\mathbb X} _{\langle n^A_i\rangle}$, $A\in [{\mathbb N} ]^\omega$, are non-isomorphic, countable and reversible. \end{rem} \subsection{More reversible digraphs, posets, and topological spaces} In the following theorem we detect a class of structures such that the reversibility of a structure belonging to the class {\it follows} from the reversibility of the sequence of cardinalities of its components. \begin{te}\label{TB047}
If ${\mathbb X}_i$, $i\in I$, are disjoint tournaments and the sequence of cardinals $\langle |X_i|: i\in I\rangle$ is reversible, then the digraph $\bigcup _{i\in I}{\mathbb X}_i$ is reversible.
This statement holds if, in particular, ${\mathbb X}_i$, $i\in I$, are disjoint linear orders. Then $\bigcup _{i\in I}{\mathbb X}_i$ is a reversible disconnected partial order. \end{te} \noindent{\bf Proof. } In order to apply Fact \ref{TB044}(b) we suppose that $f:I\rightarrow I$ is a surjection,
$g_i \in \mathop{\rm Mono}\nolimits({\mathbb X} _i ,{\mathbb X} _{f(i)})$, for $i\in I$, and that (\ref{EQB035}) holds. Then, since $|g_i[X_i]|=|X_i|$, for $i\in I$, for each $j\in I$ by (\ref{EQB035}) we have $|X_j|=\sum _{i\in f^{-1}[\{ j\} ]}|X_i|$, which, since the sequence $\langle |X_i|: i\in I\rangle$ is reversible, implies that $f\in \mathop{\rm Sym}\nolimits (I)$. Thus for each $i\in I$ we have $g_i \in \mathop{\rm Cond}\nolimits({\mathbb X} _i ,{\mathbb X} _{f(i)})$, and, since the structures ${\mathbb X} _i$, $i\in I$, are tournaments, $\mathop{\rm Cond}\nolimits({\mathbb X} _i ,{\mathbb X} _{f(i)})= \mathop{\rm Iso}\nolimits({\mathbb X} _i ,{\mathbb X} _{f(i)})$. Thus (\ref{EQB034}) is true and the digraph $\bigcup _{i\in I}{\mathbb X}_i$ is reversible indeed.
$\Box$ \begin{ex}\label{EXB011}\rm The converse of Theorem \ref{TB047} is not true. Let $I={\mathbb N}$ and ${\mathbb X} _i \cong \omega i$, for $i\in {\mathbb N}$. By Theorem \ref{TB042} the sequence of cardinals $\langle \omega, \omega , \dots\rangle$ is not reversible. Using Fact \ref{TB044}(b) we show that ${\mathbb X} =\bigcup _{i\in {\mathbb N}}{\mathbb X}_i$ is a reversible structure. Let $f:{\mathbb N}\rightarrow {\mathbb N}$ be a surjection, $g_i \in \mathop{\rm Mono}\nolimits({\mathbb X} _i ,{\mathbb X} _{f(i)})$, for $i\in {\mathbb N}$, and let (\ref{EQB035}) hold. First, by induction we show that $f(i)=i$, for all $i\in {\mathbb N}$.
If $i\in {\mathbb N}$ and $f(i)=1$, then $g_i \in \mathop{\rm Mono}\nolimits({\mathbb X} _i , {\mathbb X} _1 )$ and, since monomorphisms between linear orders are embeddings, $\omega i \hookrightarrow \omega$ and, hence, $i=1$. Thus $f^{-1}[\{ 1\}]\subset \{ 1\}$ and, since $f$ is a surjection, $f^{-1}[\{ 1\}]= \{ 1\}$.
Let $j\in {\mathbb N}$ and $f(k)=k$, for all $k<j$. If $i\in {\mathbb N}$ and $f(i)=j$, then $g_i \in \mathop{\rm Mono}\nolimits({\mathbb X} _i , {\mathbb X} _j )$ and, as above, $\omega i \hookrightarrow \omega j$, which means that $i\leq j$. By the induction hypothesis we have $i\geq j$, so $i=j$ and, thus, $f^{-1}[\{ j\}]\subset \{ j\}$ and, since $f$ is a surjection, $f^{-1}[\{ j\}]= \{ j\}$.
So, $f=\mathop{\mathrm{id}}\nolimits _{\mathbb N}\in \mathop{\rm Sym}\nolimits ({\mathbb N} )$, which by (\ref{EQB035}) implies that for each $i\in {\mathbb N}$ we have $g_i \in \mathop{\rm Cond}\nolimits({\mathbb X} _i ,{\mathbb X} _i)=\mathop{\rm Iso}\nolimits({\mathbb X} _i ,{\mathbb X} _i)$ and (\ref{EQB034}) is proved. \end{ex} \begin{ex}\label{EXB015}\rm More reversible posets and topological spaces. The reversible posets constructed in Examples \ref{EXB011} and \ref{EXB014} are well-founded and with infinite levels. More generally, by Theorem \ref{TB047}, if $\langle \kappa _i : i\in I\rangle$ is {\it any} reversible sequence of cardinals
(e.g., if it is finite-to-one, if we would like infinite components) and $L_i$, $i\in I$, are {\it any} linear orders, where $|L_i|=\kappa _i$, then the poset $\bigcup _{i\in I}L_i$ is reversible.
Recalling that if ${\mathbb P} =\langle P, \leq \rangle$ is a partial order and ${\mathcal O}$ the topology on the set $P$ generated by the base consisting of the sets of the form $B_p:= \{ q\in p: q\leq p\}$, then endomorphisms of ${\mathbb P}$ are exactly the continuous self mappings of the space $\langle P,{\mathcal O}\rangle$, we conclude that the poset ${\mathbb P}$ is reversible iff $\langle P,{\mathcal O}\rangle$ is a reversible topological space (i.e., each continuous bijection is an automorphism). So, Examples \ref{EXB011}, \ref{EXB014} and Theorem \ref{TB047} generate a large class of reversible topological spaces. \end{ex} \subsection{More sequences from RFM} We recall that a relational structure ${\mathbb X}$ is called {\it monomorphic}
iff each two finite substructures of ${\mathbb X}$ of the same size are isomorphic, and that, by the well-known theorems of Fra\"{\i}ss\'{e} (for finite languages) and Pouzet (for languages and structures of any size), see \cite{Fra}, an infinite structure ${\mathbb X}$ is monomorphic iff it is {\it chainable} i.e.\ there is a linear order $\prec$ on its domain, $X$, such that the relations of ${\mathbb X}$ are definable in the structure $\langle X,\prec\rangle$ by quantifier-free formulas without parameters. Then it is said that $\prec$ {\it chains} ${\mathbb X}$, or that ${\mathbb X}$ is {\it chainable} by $\prec$. For convenience, a structure ${\mathbb X}$ will be called {\it copy-maximal} (resp.\ {\it mono-range-maximal}) iff for each $A\in [X ]^{|X|}$ there is an embedding (resp.\ a monomorphism) $g:{\mathbb X} \rightarrow {\mathbb X}$ satisfying $g[X]=A$.
By (\ref{EQA027}), Theorem \ref{TA021}(a) and since each set of cardinals is well ordered,
a sequence $\langle {\mathbb X} _i :i\in I\rangle\in \mathop{\rm RFM}\nolimits$ can be described in the following way. There are an ordinal $\eta$ and a sequence of connected reversible $L_b$-structures $\langle {\mathbb Y} _\xi :\xi <\eta\rangle$ (the {\it range}) such that, defining $\kappa _\xi := |Y_\xi|$, we have
(r1) $\xi < \zeta <\eta \Rightarrow \kappa_\xi<\kappa _\zeta$,
(r2) ${\mathbb Y} _\xi$ is a mono-range-maximal structure, for each $\xi <\eta$,
(r3) $\xi < \zeta <\eta \Rightarrow \forall A\in [Y_\zeta ]^{\kappa _\xi } \;\mathop{\rm Cond}\nolimits ({\mathbb Y} _\xi ,A)\neq\emptyset$,
\noindent and there is a surjection $h:I\rightarrow \eta$ such that for each $\xi <\eta $ and $i\in h^{-1}[\{ \xi \}]$ we have ${\mathbb X}_i\cong {\mathbb Y} _\xi$, and $X_i \cap X_j =\emptyset$, for $i\neq j$. So, by Theorem \ref{TA021}(b), the structure $\bigcup _{i\in I}{\mathbb X} _i$ is reversible iff $\langle \kappa _{h(i)}:i\in I \rangle$ is a reversible sequence of cardinals. Here we consider conditions (r2) and (r3). \paragraph{Condition (r2)} Clearly, condition (r2) will be satisfied if the structures ${\mathbb Y} _\xi$ are finite or copy-maximal. From more general results of Gibson, Pouzet and Woodrow \cite{Gib} it follows that a structure ${\mathbb X}$ of size $\kappa\geq \omega$ is copy-maximal iff it is $\kappa$-chainable, that is, there is a linear order $\prec$ on $X$ which chains ${\mathbb X}$ and $\langle X ,\prec\rangle\cong \langle \kappa ,<\rangle$. On the other hand, a simple application of Ramsey's theorem shows that, up to isomorphism, there are only eight countable binary copy-maximal structures and the same holds for uncountable binary structures (see also \cite{Ktow,KZb}). The six connected of them are $\langle \kappa , \kappa ^2\rangle$, $\langle \kappa , \kappa ^2 \setminus \Delta _\kappa \rangle$, $\langle \kappa , <\rangle$, $\langle \kappa , \leq \rangle$ $\langle \kappa , >\rangle$, and $\langle \kappa , \geq \rangle$, and they are reversible. In addition, since in the class of linear orders monomorphisms are embeddings, mono-range-maximal linear orders are copy-maximal thus the only four mono-range-maximal linear orders of size $\kappa$ are mentioned above. The following example shows that the class of mono-range-maximal posets is not so restrictive. \begin{ex}\label{EXB009}\rm The posets of the form ${\mathbb X}_{\lambda ,\kappa}:={\mathbb A}_\lambda +{\mathbb L}_\kappa$, where $2\leq \lambda <\kappa\geq \omega$, ${\mathbb A}_\lambda$ is an antichain of size $\lambda$, and ${\mathbb L} _\kappa \cong \langle \kappa , <\rangle$, are not copy-maximal and, moreover, if $\lambda \geq \omega$, ${\mathbb X}_{\lambda ,\kappa}$ is not almost chainable (see \cite{Fra, Gib} for details). But ${\mathbb X}_{\lambda ,\kappa}$ is mono-range-maximal (if $S \in [X]^\kappa$, then $S\cong {\mathbb A}_\mu +{\mathbb L}_\kappa$, for some $\mu \leq \lambda$, and it is easy to construct a monomorphism from ${\mathbb X}_{\lambda ,\kappa}$ onto $S$). If $\lambda <\omega$, then ${\mathbb X}_{\lambda ,\kappa}$ is a well-founded poset with finite levels so, by \cite{Kuk}, it is reversible. \end{ex} \paragraph{Condition (r3)} All the structures considered in Theorem \ref{TB048} - disjoint unions of (a) structures with full relations, (b) complete graphs, and (c) ordinals $\leq \omega$, give examples of sequences satisfying (r3) and all of them have monomorphic components. The following examples show that this condition is not necessary for application of Theorem \ref{TA021}(b). \begin{ex}\label{EXB013}\rm Structures from $\mathop{\rm RFM}\nolimits$ with non-monomorphic components. Let
- ${\mathbb T} _3$ be the three-element tree $\langle \{ 0,1,2\}, \{ \langle 0,1\rangle , \langle 0,2\rangle\}\rangle$,
- ${\mathbb L} _5$ the five-element linear order,
- ${\mathbb K} _6^*$ a complete graph with 6 nodes and 3 of them reflexified (loops),
- ${\mathbb F} _8$ the eight-element structure with the full relation.
\noindent Now, if $\kappa$ and $\lambda$ are infinite cardinals, $m,n\in \omega$ and ${\mathbb X}$ is the (pairwise disjoint) union of $\kappa$-many copies of ${\mathbb T} _3$, $\lambda$-many copies of ${\mathbb L} _5$, $m$ copies of ${\mathbb K} _6^*$ and $n$ copies of ${\mathbb F} _8$, then the sequence $\langle {\mathbb T} _3 ,{\mathbb L} _5,{\mathbb K} _6^* ,{\mathbb F} _8\rangle $ satisfies (r1)-(r3), the corresponding sequence of components of ${\mathbb X}$ belongs to $\mathop{\rm RFM}\nolimits$ and ${\mathbb X}$ is reversible because, in notation of Proposition \ref{TB037}, $K=\{ 3,5\}$ and the set $\{ n_i : i\in I\}=\{3,5,6,8\}$ is finite and we apply Theorem \ref{TA021}(b). \end{ex} \begin{ex}\label{EXB014}\rm A structure from $\mathop{\rm RFM}\nolimits$ having all components non-monomorphic. Let ${\mathbb X}_{2,\kappa}={\mathbb A} _2 +{\mathbb L} _\kappa$, for $1\leq \kappa \leq \omega$, be the posets defined as in Example \ref{EXB009}. It is easy to see that $\langle {\mathbb X} _{2,\kappa} : 1\leq \kappa \leq \omega\rangle \in \mathop{\rm RFM}\nolimits$ . Since the corresponding sequence of cardinals $\langle 3,4,5, \dots ,\omega\rangle$ is one-to-one and, thus, reversible, the structure ${\mathbb X} =\bigcup _{1\leq \kappa \leq \omega}{\mathbb X} _{2,\kappa}$ is reversible. Clearly, its components, ${\mathbb X} _{2,\kappa}$, are not 2-monomorphic. \end{ex} \subsection{The classes RFM, RC, and RU} If by $\mathop{\rm RC}\nolimits$ (resp.\ $\mathop{\rm RU}\nolimits$) we denote the class of sequences $\langle {\mathbb X}_i :i\in I\rangle$
satisfying $(\ast)$ and such that $\langle |{\mathbb X}_i | :i\in I\rangle$ is a reversible sequence of cardinals, (resp.\ the structure $\bigcup _{i\in I}{\mathbb X}_i$ is reversible), then by Theorem \ref{TA021}(b) we have $\mathop{\rm RFM}\nolimits \cap \mathop{\rm RU}\nolimits =\mathop{\rm RFM}\nolimits \cap \mathop{\rm RC}\nolimits$. The following example shows that this equality is the only constraint, regarding the relationship between the classes $\mathop{\rm RFM}\nolimits$, $\mathop{\rm RC}\nolimits$ and $\mathop{\rm RU}\nolimits$. \begin{ex}\label{EXB010}\rm (a) $\mathop{\rm RFM}\nolimits \setminus (\mathop{\rm RU}\nolimits \cup \mathop{\rm RC}\nolimits )\neq\emptyset$. If ${\mathbb X} _i \cong \langle \omega , <\rangle$, for $i\in \omega$, then by Theorem \ref{TB042} the sequence of cardinals $\langle \omega, \omega , \dots\rangle$ is not reversible but, since ($\langle A, < \,\upharpoonright \!A\rangle \cong \langle \omega ,< \rangle$, for each $A\in [\omega ]^\omega$, the sequence $\langle {\mathbb X}_i :i\in I\rangle$ is rich for monomorphisms. It is easy to see that the structure $\bigcup _{i\in I}{\mathbb X}_i$ is not reversible.
(b) $\mathop{\rm RC}\nolimits \setminus (\mathop{\rm RFM}\nolimits \cup \mathop{\rm RU}\nolimits)\neq\emptyset$. Let ${\mathbb X} = \langle {\mathbb Z} ,\rho \rangle$, where $\rho =\{ \langle i,i \rangle :i\geq 0 \}$. Then ${\mathbb X}=\bigcup _{i\in {\mathbb Z}}{\mathbb X}_i$, where ${\mathbb X}_i=\langle \{ i\} , \emptyset \rangle$, for $i<0$, and ${\mathbb X}_i=\langle \{ i\} , \{ \langle i,i \rangle \} \rangle$, for $i\geq 0$. The corresponding sequence of cardinals $\langle \dots ,1, 1 , \dots\rangle$ is reversible and, since ${\mathbb X} \cong \langle {\mathbb Z} ,\rho \setminus \{ \langle 0,0\rangle\}\rangle$, by Fact \ref{TB045} the structure $\bigcup _{i\in {\mathbb Z}}{\mathbb X}_i$ is not reversible. Since ${\mathbb X} _{-1}\not\cong {\mathbb X} _0$, by Theorem \ref{TA021}(a) the sequence of structures $\langle {\mathbb X}_i :i\in {\mathbb Z}\rangle$ is not rich for monomorphisms.
(c) $\mathop{\rm RU}\nolimits \setminus (\mathop{\rm RFM}\nolimits \cup \mathop{\rm RC}\nolimits)\neq\emptyset$. Let ${\mathbb X} = \langle {\mathbb Z} ,\rho \rangle$, where $ \rho =\{ \langle i,i \rangle :i< 0 \}\cup \{ \langle 2i, 2i+1 \rangle :i\geq 0\}. $ Then we have ${\mathbb X}=\bigcup _{i\in {\mathbb Z}}{\mathbb X}_i$, where ${\mathbb X}_i=\langle \{ i\} , \{ \langle i,i \rangle \} \rangle$, for $i<0$, and ${\mathbb X}_i=\langle \{ 2i, 2i+1 \} , \{ \langle 2i, 2i+1 \rangle \} \rangle$, for $i\geq 0$. Now, the corresponding sequence of cardinals $\langle \dots ,1,1,2,2, \dots\rangle$ is not reversible, because the set $K=\{ 1,2\}$ is not independent ($1+1=2$). Since $\mathop{\rm Mono}\nolimits ({\mathbb X} _{-1},{\mathbb X} _0)=\emptyset$ we have $\langle {\mathbb X}_i :i\in {\mathbb Z}\rangle \not\in \mathop{\rm RFM}\nolimits$. But, by Fact \ref{TB045}, the structure $\bigcup _{i\in {\mathbb Z}}{\mathbb X}_i$ is reversible, namely, if $\sigma \varsubsetneq \rho$, then the structure $\langle {\mathbb Z} ,\sigma \rangle$ has an one-element component with the empty relation and, hence, it is not isomorphic to ${\mathbb X}$.
(d) $(\mathop{\rm RU}\nolimits \cap \mathop{\rm RC}\nolimits)\setminus \mathop{\rm RFM}\nolimits\neq\emptyset$. Let $I$ be the ordinal $\omega +2 =\omega \cup \{ \omega ,\omega +1\}$ and let ${\mathbb X} =\bigcup _{i\in \omega +2}{\mathbb X}_i$, where ${\mathbb X}_i$ are pairwise disjoint linear orders such that ${\mathbb X}_i \cong i+1$, for $i\in\omega$, ${\mathbb X} _\omega \cong \omega$, and ${\mathbb X} _{\omega +1}\cong {\mathbb Q}$. The corresponding sequence of cardinals $\langle 1,2,\dots , \omega ,\omega\rangle$ is finite-to-one and, by Theorem \ref{TB042}, reversible. By Theorem \ref{TB047} the union $\bigcup _{i\in I}{\mathbb X}_i$ is reversible too. Since $\omega \not\cong {\mathbb Q}$ by Theorem \ref{TA021}(a) we have $\langle {\mathbb X}_i :i\in I\rangle \not\in \mathop{\rm RFM}\nolimits$. \end{ex} Let $\mathop{\rm RFM}\nolimits _{LO}$, $\mathop{\rm RC}\nolimits_{LO}$ and $\mathop{\rm RU}\nolimits_{LO}$ denote the classes of sequences of linear orders $\langle {\mathbb X}_i :i\in I\rangle$ belonging to classes $\mathop{\rm RFM}\nolimits$, $\mathop{\rm RC}\nolimits$ and $\mathop{\rm RU}\nolimits$. Here, by Theorem \ref{TB047} we obtain one more constraint: $\mathop{\rm RC}\nolimits_{LO} \subset \mathop{\rm RU}\nolimits_{LO}$, and the following example shows that, in general, there are no more constraints. \begin{ex}\label{EXB012}\rm $\mathop{\rm RFM}\nolimits _{LO}\setminus \mathop{\rm RU}\nolimits_{LO}\neq\emptyset$ is witnessed by the poset $\bigcup _\omega \omega$, from Example \ref{EXB010}(a). The poset $\bigcup _{n\in {\mathbb N}}n \cup \omega \cup {\mathbb Q}$ from Example \ref{EXB010}(d) belongs to the class $\mathop{\rm RC}\nolimits_{LO}\setminus\mathop{\rm RFM}\nolimits _{LO}$, while the poset $\bigcup _{n\in {\mathbb N}}\omega n$ (see Example \ref{EXB011}) belongs to the class $\mathop{\rm RU}\nolimits_{LO}\setminus \mathop{\rm RC}\nolimits_{LO}$. \end{ex} \section{Reversible cardinal sequences -- a proof of Theorem \ref{TB042}}\label{S4} Theorem \ref{TB042} follows from Propositions \ref{TA020} and \ref{TB037} given in the sequel.
If $\langle \kappa _i :i\in I\rangle$ is a sequence of cardinals and $\kappa$ a cardinal, let $$ I_\kappa := \{ i\in I : \kappa _i =\kappa\} . $$ \subsection{Reduction to the case when the cardinals are finite} \begin{prop}\label{TA020} A sequence of non-zero cardinals $\langle \kappa _i :i\in I\rangle$ is reversible iff it is a finite-to-one sequence or a reversible sequence in ${\mathbb N}$. \end{prop} \noindent{\bf Proof. } The implications ``$ \Leftarrow $" and ``$ \Rightarrow $" follow from Claims \ref{TB040} and \ref{TB041} respectively. \begin{cla}\label{TB040} If $\langle \kappa _i :i\in I\rangle$ is a finite-to-one sequence, it is reversible. \end{cla}
\noindent{\bf Proof. } Let $|I_\kappa |<\omega $, for all $\kappa \in \mathop{\rm Card}\nolimits$. The {\it set} $\{ \kappa_i:i\in I \}$ is well-ordered and, hence, there is an ordinal $\zeta$ and an enumeration $\{ \kappa_i:i\in I \} =\{ \kappa _\xi : \xi <\zeta \}$ such that $ \xi <\xi'$ implies $ \kappa _\xi <\kappa _{\xi'}$. Assuming that $f:I\rightarrow I$ is a surjection satisfying (\ref{EQB032}) we show that $f$ is a bijection. First, by induction we prove that \begin{equation}\label{EQA026} \forall \xi <\zeta \;\; f[I _{\kappa _\xi}]=I _{\kappa _\xi}. \end{equation} If $j\in I_{\kappa _0}$, then, by (\ref{EQB032}), for $i\in f^{-1}[\{ j \}]$ we have $\kappa_i \leq \kappa_j =\kappa _0$, which, by the minimality of $\kappa_0$, implies that $\kappa_i=\kappa _0$, that is, $i\in I_{\kappa _0}$. Thus $f^{-1}[\{ j \}]\subset I_{\kappa _0}$, for all $j\in I_{\kappa _0}$, and, hence, $f^{-1}[I_{\kappa _0}]\subset I_{\kappa _0}$. Since $f$ is onto we have $I_{\kappa _0} =f[f^{-1}[I_{\kappa _0}]]\subset f[I_{\kappa _0}]$ thus
$|I_{\kappa _0}|\leq |f[I_{\kappa _0}]|\leq |I_{\kappa _0}|$ and, hence, $|f[I_{\kappa _0}]|= |I_{\kappa _0}|$, which, since the set $I_{\kappa _0} $ is finite and $I_{\kappa _0}\subset f[I_{\kappa _0}]$, implies that $f[I_{\kappa _0}]= I_{\kappa _0}$.
Assuming that $\eta <\zeta$ and $f[I _{\kappa _\xi}]=I _{\kappa _\xi}$, for all $\xi <\eta$, we prove $f[I _{\kappa _\eta}]=I _{\kappa _\eta}$. If $j\in I_{\kappa _\eta}$, then, by (\ref{EQB032}), for $i\in f^{-1}[\{ j \}]$ we have $\kappa_i\leq \kappa_j=\kappa _\eta$. The inequality $\kappa_i<\kappa _\eta$ would imply that $\kappa_i=\kappa _\xi$, for some $\xi <\eta$, and, hence, $i\in I _{\kappa _\xi}$ and, by the induction hypothesis, $f(i)=j\in I _{\kappa _\xi}$, which is not true. Thus $\kappa_i=\kappa _\eta$ and, hence, $i\in I_{\kappa _\eta}$. Thus $f^{-1}[\{ j \}]\subset I_{\kappa _\eta}$, for all $j\in I_{\kappa _\eta}$, and, hence, $f^{-1}[I_{\kappa _\eta}]\subset I_{\kappa _\eta}$. Now, as above we show that $f[I_{\kappa _\eta}]= I_{\kappa _\eta}$ and (\ref{EQA026}) is proved.
By (\ref{EQA026}) and since the sets $I_{\kappa _\xi}$ are finite, the restrictions $f\upharpoonright I_{\kappa _\xi}: I_{\kappa _\xi}\rightarrow I_{\kappa _\xi}$, $\xi <\zeta$, are bijections and, since $\{ I _{\kappa _\xi}: \xi <\zeta \}$ is a partition of the set $I$, $f$ is a bijection as well.
$\Box$ \begin{cla}\label{TB041} If $\langle \kappa _i :i\in I\rangle$ is a sequence of cardinals and some of them is infinite, then \begin{equation}\label{EQB033} \langle \kappa _i :i\in I\rangle \mbox{ is reversible } \Leftrightarrow \langle \kappa _i :i\in I\rangle \mbox{ is finite-to-one}. \end{equation} \end{cla}
\noindent{\bf Proof. } Let $i^*\in I$, where $\kappa_{i^*} \geq \omega$. By Claim \ref{TB040} the implication ``$\Rightarrow$" remains to be checked and we prove its contrapositive. Suppose that $|I_{\kappa _0}|\geq \omega$, for some cardinal $\kappa_0$.
If $\kappa_0 \leq \kappa_{i^*}$, then we choose different $i_n\in I_{\kappa _0}\setminus \{ i^*\}$, $n\in \omega$, and define a surjection $f:I\rightarrow I$ by: $$ f (i)=\left\{ \begin{array}{cl}
i^*, & \mbox{ if } i\in \{ i^*, i_0\} ,\\
i_{n-1} , & \mbox{ if } i=i_n, \mbox{ and } n\geq 1, \\
i , & \mbox{ if } i\in I \setminus (\{ i^*\} \cup \{ i_n :n\in \omega\}).
\end{array}
\right. $$ Now, for $j\in I \setminus (\{ i^*\} \cup \{ i_n :n\in \omega\}) $ we have $f^{-1}[\{ j\}]=\{ j\}$; for $n\in {\mathbb N}$ we have $f^{-1}[\{ i_{n-1}\}]=\{ i_{n}\}$ and $\kappa _{i_n}=\kappa _{i_{n-1}}=\kappa _0$; finally $f^{-1}[\{ i^*\}]=\{ i^*, i_0\}$ and $\kappa _{i^*}=\kappa _{i^*}+ \kappa _0=\kappa _{i^*}+ \kappa _{i_0}$. So (\ref{EQB032}) is true and, since $f$ is not a bijection, the sequence $\langle \kappa _i :i\in I\rangle$ is not reversible.
If $\kappa_0 > \kappa_{i^*}$, then we choose different $i_n\in I_{\kappa _0}$, for $n\in \omega$, and define a non-injective surjection $f:I\rightarrow I$ by: $$ f (i)=\left\{ \begin{array}{cl}
i_0, & \mbox{ if } i\in \{ i_0, i_1\} ,\\
i_{n-1} , & \mbox{ if } i=i_n, \mbox{ and } n\geq 2, \\
i , & \mbox{ if } i\in I \setminus \{ i_n :n\in \omega\}.
\end{array}
\right. $$ Since $f^{-1}[\{ i_0\}]=\{ i_0, i_1\}$ and $\kappa_0$ is an infinite cardinal, we have $\kappa _{i_0}=\kappa _0 =\kappa _{0}+\kappa _{0}=\kappa _{i_0}+\kappa _{i_1}$. So (\ref{EQB032}) is true and
$\langle \kappa _i :i\in I\rangle$ is not reversible again.
$\Box$ \subsection{Reversible sequences of natural numbers} Here we characterize reversible sequences of the form $\langle n_i :i\in I\rangle \in {}^{I}{\mathbb N}$, where $I\neq \emptyset$. Clearly, $I=\bigcup _{m\in {\mathbb N}}I_m$, where $$ I_m =\{ i\in I : n_i =m\},\; \mbox{ for } m\in {\mathbb N} , $$ and the following statement is the main result of this paragraph. \begin{prop}\label{TB037}
A sequence $\langle n_i :i\in I\rangle \in {}^{I}{\mathbb N}$ is reversible if and only if the set $K:=\{ m\in {\mathbb N} : |I_m|\geq \omega\}$ is independent and, if $K$ is a non-empty set, then at most finitely many elements of the set $\{ n_i :i\in I \}$ are divisible by the $\gcd (K)$.
\end{prop} A proof of Proposition \ref{TB037} is given in the sequel. First for $d\in {\mathbb N}$ we define $d{\mathbb N} :=\{ dk:k\in {\mathbb N}\}$ and recall some facts from elementary number theory (giving their proofs for reader's convenience). \begin{fac}\label{TB032} Let $K$ be a nonempty subset of ${\mathbb N}$ and $d=\gcd (K)$. Then we have:
(a) If $|K|=\omega $, then $\gcd(K^\prime)=d$, for some finite $K^\prime\subset K$;
(b) If $d=1$, then there is $M\in {\mathbb N}$ such that $[M,\infty )\subset \langle K \rangle$;
(c) If $d>1$, then there is $M\in {\mathbb N}$ such that $[dM ,\infty )\cap d{\mathbb N}\subset \langle K \rangle\subset d{\mathbb N}$;
(d) Each independent set is finite.
\end{fac} \noindent{\bf Proof. } (a) Let $K=\{ n_r:r\in {\mathbb N}\}$ and $d_r =\gcd \{ n_1,\dots ,n_r\}$, for $r\in {\mathbb N}$. Then $d_1 \geq d_2 \geq \dots$ and, hence, there is $s\in {\mathbb N}$ such that $d_r=d_s$, for all $r\geq s$. Clearly we have $d\leq d_{s}$ and, since $d_{s}$ divides all $n_r$'s, $d\geq d_{s}$, by the maximality of $d$. Now we take $K'=\{ n_1,\dots ,n_{s}\}$.
(b) By (a) there is $K^\prime=\{ n_1,\dots ,n_{s}\}\subset K$ such that $\gcd(K^\prime)=1$. By B\'ezout's lemma there are $a_r\in\mathbb{Z}$, for $1\leq r\leq s$, such that
$\sum_{r=1}^s a_r n_r=1$, which for $M:=n_1\sum_{r=1}^s|a_r|n_r$, and for any $m\in\{0,1,\ldots,n_1-1\}$, implies $M+m=\sum_{r=1}^s(n_1|a_r|+m a_r)n_r\in\langle K^\prime\rangle$; so, $[M,M+n_1)\subset\langle K^\prime\rangle$. Since $k n_1\in\langle K^\prime\rangle$, we also have that $[M+k n_1,M+(k+1)n_1)\subset\langle K^\prime\rangle$, for any $k\in\mathbb{N}$. Hence, $[M,\infty)\subset\langle K^\prime\rangle\subset\langle K\rangle$.
(c) It is clear that $\langle K \rangle\subset d{\mathbb N}$. By (a) there is $K^\prime=\{ n_1,\dots ,n_{s}\}\subset K$ such that $\gcd(K^\prime)=d$ and, hence, $K^\prime=\{d m_1, \dots ,d m_s\}$, where $\gcd(\{m_1 ,\dots ,m_s\})=1$. By (b) there is $M\in\mathbb{N}$ such that $[M,\infty)\subset\langle\{m_1, \dots ,m_s\}\rangle$, so $[dM,\infty)\cap d{\mathbb N}\subset\langle K^\prime\rangle\subset\langle K\rangle$.
(d) If $K$ is an infinite set, then by (a) there is a finite $K^\prime\subset K$ such that $\gcd(K^\prime)=\gcd(K)=d$. Since $K\setminus K^\prime\subset d{\mathbb N}$ is infinite, for every $M\in\mathbb{N}$ we have $(K\setminus K^\prime)\cap[dM,\infty)\cap d{\mathbb N}\neq\emptyset$. By (c) there is $M\in\mathbb{N}$ such that $[dM,\infty)\cap d{\mathbb N}\subset\langle K^\prime\rangle$. Then $(K\setminus K^\prime)\cap\langle K^\prime\rangle\supset(K\setminus K^\prime)\cap[dM,\infty)\cap d{\mathbb N}\neq\emptyset$. Take $n\in(K\setminus K^\prime)\cap\langle K^\prime\rangle$. Then $n\in K$ and $n\in\langle K^\prime\rangle\subset\langle K\setminus\{n\}\rangle$, which means that the set $K$ is not independant.
$\Box$ \paragraph{Proof of ``$\Rightarrow$" of Proposition \ref{TB037}} Let $\langle n_i :i\in I\rangle$ be a reversible sequence.
First, suppose that the set $K$ is not independent. Then for some $m\in K$ there are $s>0$, $k_r \in {\mathbb N}$ and different $m_r\in K\setminus \{ m \}$, for $0\leq r<s$, such that \begin{equation}\label{EQB013}\textstyle m=\sum _{0\leq r<s}k_r m_r . \end{equation} We take countable subsets with 1-1 enumerations $$ I'_m =\{ j_l :l\in \omega\} \subset I_m $$ $$ I'_{m _r} =\{ i^r_l :l\in \omega\} \subset I_{m_r}, \mbox{ for } r<s, $$ and define $f : I\rightarrow I$ by $$ f (i)=\left\{ \begin{array}{cl}
j_0, & \mbox{ if } i=i^r_l, \mbox{ where } r<s \mbox{ and } l<k_r ,\\
i^r_{l-k_r} , & \mbox{ if } i=i^r_l, \mbox{ where } r<s \mbox{ and } l\geq k_r, \\
j_{l+1}, & \mbox{ if } i=j_l , \mbox{ where } l\in \omega , \\
i , & \mbox{ if } i\in I \setminus (I'_m \cup \bigcup _{r<s} I'_{m_r}).
\end{array}
\right. $$ It is easy to see that $f [I'_m \cup \bigcup _{r<s} I'_{m_r}] =I'_m \cup \bigcup _{r<s} I'_{m_r}$ so $f$
is a surjection, satisfies (\ref{EQB004}) and it is not 1-1, which gives a contradiction. So the set $K$ is independent and, by Fact \ref{TB032}(d), $|K |<\omega$.
Second, suppose that $K\neq\emptyset$, $d=\gcd (K)$ and $|\{ n_i :i\in I \} \cap d{\mathbb N} | =\omega$. \begin{cla}\label{TB046} There is a sequence $\langle q_r :r\in \omega\rangle$ in $\{ n_i :i\in I\} \cap \langle K\rangle \setminus K$ such that \begin{equation}\label{EQB021} \forall r\in \omega \;\; q_{r+1}-q _r \in \langle K\rangle. \end{equation} \end{cla} \noindent{\bf Proof. } Since $K$ is a finite set, by Fact \ref{TB032}(c) there is $M\in {\mathbb N}$ such that $M>\max K$ and \begin{equation}\label{EQB022} \langle K\rangle \cap [dM ,\infty )=d{\mathbb N} \cap [dM ,\infty )=\{ dm: m\geq M\}. \end{equation} So $\{ n_i :i\in I \} \cap d{\mathbb N} \cap [dM ,\infty )=\{ n_i :i\in I \} \cap \langle K\rangle \cap [dM ,\infty )$ is an infinite set. Let $\{ n_i :i\in I \} \cap \langle K\rangle \cap [dM ,\infty )=\{ n_{i_k}: k\in \omega\}$, where $n_{i_0}< n_{i_1}< n_{i_2}<\dots$. By recursion we easily construct a sequence $\langle k_r :r\in \omega\rangle$ in $\omega$ such that $n_{i_{k_{r+1}}} - n_{i_{k_r}} \geq dM$, which implies that $n_{i_{k_r}}\in \langle K \rangle\setminus K$ and $n_{i_{k_{r+1}}} - n_{i_{k_r}} \in \langle K \rangle$. Defining $q_r=n_{i_{k_r}}$, for $r\in \omega$, we finish the proof of Claim \ref{TB046}.
$\Box$ \par \vspace*{2mm} For $r\in \omega$ we choose $i_r\in I$ such that \begin{equation}\label{EQB024} q_r=n_{i_r}\in\langle K\rangle \setminus K. \end{equation} Then by (\ref{EQB021}) and (\ref{EQB024}), $\{ I_m : m\in K \} \cup \{ I_{n_{i_r}}: r\in \omega \}$ is a family of pairwise disjoint subsets of $I$. For each $m\in K$ we choose a countably infinite, co-infinite subset $I'_m$ of $I_m$ and an 1-1 enumeration of $I'_m$, that is \begin{equation}\label{EQB025}
I'_m =\{ i^m_l:l\in \omega \}\subset I_m \;\;\land \;\;|I'_m|=\omega \;\;\land \;\; |I_m \setminus I'_m|\geq \omega, \end{equation} and in this way we obtain an ``one-to-one matrix indexing" $\{ i^m_l : \langle m,l\rangle \in K\times \omega\}$ of the set $\bigcup _{m\in K}I'_m$.
Now, by (\ref{EQB021}), (\ref{EQB024}) and since the sets $I'_m$ are infinite, we can choose non-empty sets $L_r$, for $r\in \omega$, such that
(l1) $L_r \in [K\times \omega]^{<\omega}$,
(l2) $r_1\neq r_2 \Rightarrow L_{r_1} \cap L_{r_2}=\emptyset$,
(l3) $q_0=n_{i_0}=\sum _{\langle m,l\rangle\in L_0}n_{i^m_l}$,
(l4) $q_{r+1}-q_r = n_{i_{r+1}}-n_{i_r}=\sum _{\langle m,l\rangle\in L_{r+1}}n_{i^m_l}$, for $r\in \omega$.
\noindent First, defining for each $r\in \omega$
(g1) $g(i_r)=i_{r+1}$,
(g2) $g(i^m_l)=i_r$, for all $\langle m,l \rangle\in L_r$,
\noindent by (l2) we obtain a surjection \begin{equation}\label{EQB026}\textstyle g: \{ i^m_l : \langle m,l \rangle\in \bigcup _{r\in \omega }L_r \} \cup \{ i_r :r\in \omega\}\rightarrow \{ i_r :r\in \omega\}. \end{equation} Since $g^{-1}[\{ i_0\}]=\{ i^m_l:\langle m,l \rangle\in L_0\}$ by (l3) we have \begin{equation}\label{EQB027}\textstyle n_{i_0}=\sum _{\langle m,l\rangle\in L_0}n_{i^m_l}= \sum _{i\in g^{-1}[\{ i_0\}]}n_i . \end{equation} Since $g^{-1}[\{ i_{r+1}\}]=\{ i_r\} \cup \{ i^m_l:\langle m,l \rangle\in L_{r+1}\}$ by (l4) we have \begin{equation}\label{EQB028}\textstyle n_{i_{r+1}}=n_{i_r}+\sum _{\langle m,l\rangle\in L_{r+1}}n_{i^m_l}= \sum _{i\in g^{-1}[\{ i_{r+1}\}]}n_i . \end{equation}
By (\ref{EQB024}) we have $n_{i_0}\not\in K$ so, by (\ref{EQB027}) we have $|L_0|>1$ and, hence, $g$ is a surjection but not a bijection. In addition, by (\ref{EQB027}) and (\ref{EQB028}) \begin{equation}\label{EQB029}\textstyle \forall j\in \{ i_r :r\in \omega\} \;\; n_{j}= \sum _{i\in g^{-1}[\{ j\}]}n_i . \end{equation} For each $m\in K$ we have $I_m \cap \{ i^{m'}_{l'} : \langle m',l' \rangle\in \bigcup _{r\in \omega }L_r \}\subset I_m '$
so by (\ref{EQB025}) we have $|I_m|=|I_m \setminus \{ i^{m'}_{l'} : \langle m',l' \rangle\in \bigcup _{r\in \omega }L_r \} |$ and, hence, there are bijections \begin{equation}\label{EQB030}\textstyle g_m : I_m \setminus \{ i^{m'}_{l'} : \langle m',l' \rangle\in \bigcup _{r\in \omega }L_r \} \rightarrow I_m. \end{equation} So, for $j\in I_m$ we have $g_m ^{-1}[\{ j\}]=\{ i_j\}$, for some $i_j\in \mathop{\mathrm{dom}}\nolimits g_m$ and, since $i,i_j\in I_m$, \begin{equation}\label{EQB031}\textstyle \forall j\in I_m \;\;n_j =n_{i_j}= \sum _{i\in g_m^{-1}[\{ j\}]}n_i . \end{equation} By (\ref{EQB026}) and (\ref{EQB030}) the function $g\cup \bigcup _{m\in K}g_m$ maps the set $\bigcup _{m\in K}I_m \cup \{ i_r :r\in \omega\}$ onto itself and, defining $$\textstyle f=g\cup \bigcup _{m\in K}g_m \cup \mathop{\mathrm{id}}\nolimits _{I\setminus (\bigcup _{m\in K}I_m \cup \{ i_r :r\in \omega\})} $$ by (\ref{EQB029}) and (\ref{EQB031}) we obtain a surjection $f:I\rightarrow I$ which is not a bijection and satisfies (\ref{EQB004}), which contradicts our assumption that the sequence $\langle n_i :i\in I\rangle$ is reversible. The implication ``$\Rightarrow$" of Proposition \ref{TB037} is proved.
$\Box$ \paragraph{Proof of ``$\Leftarrow$" of Proposition \ref{TB037}}
Let $K$ be an independent set and, if $K\neq\emptyset$, let $|\{ n_i :i\in I \} \cap d{\mathbb N} |<\omega$, where $d=\gcd (K)$.
Suppose that the sequence $\langle n_i :i\in I\rangle$ is not reversible. Then by Claim \ref{TB040} we have $K\neq\emptyset$ and, hence, $|\{ n_i :i\in I \} \cap d{\mathbb N} |<\omega$. Let $f:I\rightarrow I$ be a surjection such that \begin{equation}\label{EQB004}\textstyle \forall j\in I \;\; n_j=\sum _{i\in f^{-1}[\{ j \}]}n_i. \end{equation}
\begin{equation}\label{EQB005}\textstyle J:= \{ j\in I : |f^{-1}[\{ j \}]|>1\} \neq \emptyset. \end{equation}
\begin{cla}\label{TB031} (a) For each $i\in I$ we have $n_i\leq n_{f(i)}$.
(b) For each $j\in I$ there is a sequence $\langle i^j _k :k\in {\mathbb N}\rangle$ in $I$ such that \begin{equation}\label{EQB007}\textstyle f(i^j_1)=j \;\;\land \;\; \forall k\in {\mathbb N} \;\; f(i^j_{k+1})=i^j_k , \end{equation} \begin{equation}\label{EQB008}\textstyle \dots n_{i^j_{k+1}}\leq n_{i^j_k} \leq \dots n_{i^j_{3}}\leq n_{i^j_2} \leq n_{i^j_{1}}\leq n_{j}. \end{equation}
(c) If, in addition, $n_{i^j_{1}}< n_{j}$ in (\ref{EQB008}), then $i^j_k\neq i^j_l$, whenever $k\neq l$. \end{cla} \noindent{\bf Proof. } (a) follows from (\ref{EQB004}).
(b) If $j\in I$, then, since $f$ is an onto mapping, there is $i^j_1\in I$ such that $f(i^j_1)=j$, there is $i^j_2\in I$ such that $f(i^j_2)=i^j_1$, there is $i^j_3\in I$ such that $f(i^j_3)=i^j_2$, and so on. So in this way we obtain a sequence $\langle i^j _k :k\in {\mathbb N}\rangle \in {}^{{\mathbb N}}I$ satisfying (\ref{EQB007}) which, together with (a), gives (\ref{EQB008}).
(c) If $n_{i^j_{1}}< n_{j}$ then, by (\ref{EQB008}), $n_{i^j_{k}}< n_{j}$, for all $k\in {\mathbb N}$ and, hence, \begin{equation}\label{EQB010}\textstyle \forall k\in {\mathbb N} \;\; i^j_k\neq j . \end{equation} On the contrary, let $k$ be the minimal element of ${\mathbb N}$ such that $i^j_k= i^j_l$, for some $l>k$. Then by (\ref{EQB007}), for $k=1$ we would have $i^j_{l-1}= f(i^j_l)=f(i^j_k)=f(i^j_1)=j$, which is impossible by (\ref{EQB010}). For $k>1$ we would have $i^j_{l-1}= f(i^j_l)=f(i^j_k)=i^j_{k-1}$, which is false by the minimality of $k$.
$\Box$ \begin{cla}\label{TB029} There is a sequence $\langle p_r :r\in \omega \rangle$ in ${\mathbb N}$ such that, defining for convenience $p_{-1}:=0$, for each $r\in \omega$ we have:
(i) $p_r=\min \{ n_j : j\in J \land n_j >p_{r-1}\}$,
(ii) $\forall j\in I_{p_r}\cap J\;\; \forall i\in f^{-1}[\{ j \}] \;\; n_i \in K \cup \{ p_s : 0\leq s<r\}$,
(iii) $p_r\in \langle K\rangle\setminus K$,
(iv) $\exists i\in I_{p_r} \;\; ( f(i)\in J \land n_{f(i)}>p_r)$,
(v) $\{ n_j : j\in J\}\cap [1,p_r]=\{ p_s : 0\leq s\leq r\}$. \end{cla} \noindent{\bf Proof. } We construct the sequence by recursion.
First, by (\ref{EQB005}) we have $J\neq \emptyset$ so $\emptyset \neq \{ n_j : j\in J\}=\{ n_j : j\in J\land n_j >0\}\subset {\mathbb N}$ and defining \begin{equation}\label{EQB011}\textstyle p_0=\min \{ n_j : j\in J\} \end{equation} we see that the sequence $\langle p_0\rangle$ satisfies (i).
(ii) Let $j\in I_{p_0}\cap J$ and $i\in f^{-1}[\{ j \}]$. Then, since $j\in J$, by (\ref{EQB005}) we have $|f^{-1}[\{ j \}]|>1$ and, by (\ref{EQB004}), $n_j =\sum _{i'\in f^{-1}[\{ j \}]}n_{i'}$, so $n_i <n_{j}$. As in Claim \ref{TB031} we define $i^{j}_k\in I$, for $k\in {\mathbb N}$, satisfying $i^{j}_1:=i$, (\ref{EQB007}) and (\ref{EQB008}) and so we obtain $\dots n_{i^{j}_{3}}\leq n_{i^{j}_2} \leq n_{i^{j}_{1}}< n_{j} $. Assuming that $n_{i^{j}_{k+1}}< n_{i^{j}_k}$ for some $k\in {\mathbb N}$, since $f(i^{j}_{k+1})=i^{j}_k$ by (\ref{EQB004})
we would have $i^{j}_k\in J$ and $n_{i^{j}_k}<n_{j}=p_0$, which is, by (\ref{EQB011}), impossible. Thus there is $m\in {\mathbb N}$ such that $n_{i^{j}_k}=m$, for all $k\in {\mathbb N}$. By Claim \ref{TB031}(c) we have $i^{j}_k\neq i^{j}_l$, whenever $k\neq l$, thus $|I_m|\geq \omega$. So $n_i =n_{i^{j}_1}=m\in K$.
(iii) By the previous item and (\ref{EQB004}) we have $p_0=n_{j} \in \langle K\setminus \{ p_0\}\rangle$ and, since the set $K$ is independent, $p_0\not \in K$.
(iv) By (iii) we have $p_0\not\in K$, that is $|I_{p_0}|<\omega$. Suppose that $f[I_{p_0}]\subset I_{p_0}$. Then by (\ref{EQB004}) $f\upharpoonright I_{p_0}$ is an injection and, since the set $I_{p_0}$ is finite, $f[I_{p_0}]= I_{p_0}$. By (\ref{EQB011}) there is $j\in I_{p_0}\cap J$ and by the previous conclusion, $j=f(i)$, for some $i\in I_{p_0}$, which implies that $n_i=n_j=p_0$. But this contradicts the fact that $j\in J$. So, there is $i\in I_{p_0} $ such that $f(i)\not\in I_{p_0}$ and, hence, $n_{f(i)}> n_i=p_0$ and $f(i)\in J$.
(v) By (\ref{EQB011}) we have $\{ n_j : j\in J\}\cap [1,p_0]=\{ p_0\}$.
Suppose that $\langle p_0, \dots ,p_r\rangle$ is a sequence satisfying (i)--(v). By (iv) there is $j\in J$ such that $n_j >p_r$ and defining \begin{equation}\label{EQB012}\textstyle p_{r+1}=\min \{ n_j : j\in J \land n_j >p_r\} . \end{equation} we have (i).
(ii) Let $j\in I_{p_{r+1}}\cap J$ and $i\in f^{-1}[\{ j \}]$. Then, since $j\in J$, by (\ref{EQB005}) we have $|f^{-1}[\{ j \}]|>1$ and, by (\ref{EQB004}), $n_j =\sum _{i'\in f^{-1}[\{ j \}]}n_{i'}$, so $n_i <n_{j}$. Again, as in Claim \ref{TB031} we define $i^{j}_k\in I$, for $k\in {\mathbb N}$, satisfying $i^{j}_1:=i$, (\ref{EQB007}) and (\ref{EQB008}) and so we obtain $\dots n_{i^{j}_{3}}\leq n_{i^{j}_2} \leq n_{i^{j}_{1}}< n_{j} $.
If $n_{i^{j}_{k+1}}< n_{i^{j}_k}$ for some $k\in {\mathbb N}$, let $k$ be the minimal such $k$. Then \begin{equation}\label{EQB014} n_{i^{j}_{k+1}}<n_{i^{j}_k} = \dots = n_{i^{j}_2} = n_{i^{j}_{1}}=n_i< n_{j}=p_{r+1} . \end{equation} In addition, since $f(i^{j}_{k+1})=i^{j}_k$, by (\ref{EQB004}) we have $i^{j}_k\in J$ which implies that $n_{i^{j}_k}\in \{ n_j:j\in J\} \cap [1,p_{r+1})$ and, by (\ref{EQB012}), $n_{i^{j}_k}\in \{ n_j:j\in J\} \cap [1,p_r]$. So, by (v), there is $s_0\leq r$ such that $n_{i^{j}_k}=p_{s_0}$ and, by (\ref{EQB014}), $n_i=p_{s_0}\in \{ p_s : 0\leq s<r+1\}$.
Otherwise, there is $m\in {\mathbb N}$ such that $n_{i^{j}_k}=m$, for all $k\in {\mathbb N}$. By Claim \ref{TB031}(c) we have $i^{j}_k\neq i^{j}_l$, whenever $k\neq l$, thus $|I_m|\geq \omega$, and, hence, $m\in K$. So $n_i =n_{i^{j}_1}=m\in K$ and (ii) is true indeed.
(iii) By (\ref{EQB012}) there is $j\in J$ such that $p_{r+1}=n_j>p_r$. Thus $j\in I_{p_{r+1}}\cap J$ and, by (ii) and (\ref{EQB004}), $n_j$ is a sum of at least two integers from $K\cup \{ p_s : 0\leq s\leq r\}$. By (iii) of the induction hypothesis we have $p_s\in \langle K\rangle$, for $0\leq s\leq r$, and, hence, $p_{r+1}\in \langle K\setminus \{p_{r+1} \}\rangle$. Since the set $K$ is independent we have $p_{r+1}\not\in K$.
(iv) Since $p_{r+1}\not\in K$ we have $|I_{p_{r+1}}|<\omega$. Suppose that $f[I_{p_{r+1}}]\subset I_{p_{r+1}}$. Then by (\ref{EQB004}) $f\upharpoonright I_{p_{r+1}}$ is an injection and, since the set $I_{p_{r+1}}$ is finite, $f[I_{p_{r+1}}]= I_{p_{r+1}}$. By (\ref{EQB012}) there is $j\in I_{p_{r+1}}\cap J$ and, since $f[I_{p_{r+1}}]= I_{p_{r+1}}$, $j=f(i)$, for some $i\in I_{p_{r+1}}$, which implies that $n_i=n_j=p_{r+1}$. But this contradicts the fact that $j\in J$. So, there is $i\in I_{p_{r+1}} $ such that $f(i)\not\in I_{p_{r+1}}$ and, hence, $n_{f(i)}> n_i=p_{r+1}$ and $f(i)\in J$.
(v) By (\ref{EQB012}) and the induction hypothesis we have $\{ n_j : j\in J\}\cap [1,p_{r+1}]=\{ p_s : 0\leq s\leq r+1\}$. Thus the recursion works.
$\Box$ \par \vspace*{2mm} Now, by Claim \ref{TB029}(v), (iii) and (i),
$\{ n_j :j\in J\} =\{ p_r :r\in \omega\} \subset \langle K\rangle \setminus K$ and $p_0 <p_1 <\dots < p_r < \dots $, which implies that $| \{ n_i :i\in I \}\cap\langle K \rangle |=\omega$. Since, by Fact \ref{TB032}(c), $\langle K\rangle \subset d{\mathbb N}$, we have $|\{ n_i :i\in I \} \cap d{\mathbb N} | =\omega$ and we obtain a contradiction.
$\Box$ \paragraph{Reversible functions in the Baire space} Each countable sequence of natural numbers $\langle n_i :i\in {\mathbb N}\rangle \in {}^{{\mathbb N}}{\mathbb N}$ can be regarded as a function $\varphi :{\mathbb N} \rightarrow {\mathbb N}$, where $\varphi(i)=n_i$, for $i\in {\mathbb N}$, and, hence, as an element of the Baire space ${\mathbb N}^{\mathbb N}$ with the standard topology (see \cite{Kech}). So we can consider the set of reversible functions belonging to ${\mathbb N}^{\mathbb N}$, $$ (\N ^\N)_{\mathrm{rev}} \!:= \Big\{ \varphi \in {\mathbb N}^{\mathbb N} : \neg \exists f\in \mathop{\rm Sur}\nolimits({\mathbb N})\setminus\mathop{\rm Sym}\nolimits({\mathbb N} ) \;\forall j\in {\mathbb N} \;\;\varphi (j)=\!\sum _{i\in f^{-1}[\{ j \}]} \varphi (i) \Big\} . $$ \begin{te} $(\N ^\N)_{\mathrm{rev}} $ is a dense $F_{\sigma\delta\sigma}(=\Sigma ^0_4)$ subset of ${\mathbb N}^{\mathbb N}$ of size ${\mathfrak c}$. \end{te} \noindent{\bf Proof. } If $B=\bigcap _{k\leq n}\pi ^{-1}_{i_k}[\{ j_k\}]$ is a basic open set, then, since the finite function $p=\{ \langle i_k ,j_k\rangle : k\leq n\}$ can be extended to an finite-to-one function $\varphi \in {\mathbb N}^{\mathbb N}$ and by Proposition \ref{TB037} we have $\varphi\in (\N ^\N)_{\mathrm{rev}}$, it follows that
$B\cap (\N ^\N)_{\mathrm{rev}} \neq \emptyset$ so $(\N ^\N)_{\mathrm{rev}} $ is dense in ${\mathbb N}^{\mathbb N}$. $|(\N ^\N)_{\mathrm{rev}}|={\mathfrak c}$ follows from the fact that ${\mathbb N}^{\mathbb N}$ contains ${\mathfrak c}$-many injections.
Let ${\mathcal I}$ be the set of non-empty independent subsets of ${\mathbb N}$ and, for $K\in {\mathcal I}$, let $d_K:=\gcd (K)$. Then by Proposition \ref{TB037} \begin{equation}\label{EQB023}\textstyle (\N ^\N)_{\mathrm{rev}} = A\cup \bigcup _{K\in {\mathcal I}}\;B_K \cap C_K \cap D_K, \end{equation} where \begin{eqnarray*} A & := & \Big\{ \varphi\in {\mathbb N}^{\mathbb N} : \forall m\in {\mathbb N} \;\; ( \varphi(i)=m \mbox{ for } <\omega\mbox{-many } i\in {\mathbb N})\Big\} , \\
& = &\textstyle \bigcap _{m\in {\mathbb N}} \bigcup _{k\in {\mathbb N}} \bigcap _{i\geq k} \pi ^{-1}_i[{\mathbb N} \setminus \{ m\}] , \\ B_K & := & \Big\{ \varphi\in {\mathbb N}^{\mathbb N} : \forall m\in K \;\; ( \varphi(i)=m \mbox{ for } \omega\mbox{-many } i\in {\mathbb N})\Big\} ,\\
& = &\textstyle \bigcap _{m\in K} \bigcap _{k\in {\mathbb N}} \bigcup _{i\geq k} \pi ^{-1}_i[\{ m\}] , \\ C_K & := & \Big\{ \varphi\in {\mathbb N}^{\mathbb N} : \forall m\in {\mathbb N} \setminus K \;\; ( \varphi(i)=m \mbox{ for } <\omega\mbox{-many } i\in {\mathbb N})\Big\} , \\
& = &\textstyle \bigcap _{m\in {\mathbb N}\setminus K} \bigcup _{k\in {\mathbb N}} \bigcap _{i\geq k} \pi ^{-1}_i[{\mathbb N} \setminus \{ m\}] , \\ D_K & := & \Big\{ \varphi\in {\mathbb N}^{\mathbb N} : \varphi(i)\in d{\mathbb N} \mbox{ for } <\omega\mbox{-many } i\in {\mathbb N}\Big\} \\
& = &\textstyle \bigcup _{m\in {\mathbb N}} \bigcap _{i\geq m} \bigcap _{k\in {\mathbb N}} \pi ^{-1}_i[{\mathbb N} \setminus \{ dk\}] . \end{eqnarray*} So, for $K\in {\mathcal I}$ we have $B_K \in G_\delta$, $D_K\in F_\sigma$ and $C_K\in F_{\sigma\delta}$, which implies that $B_K \cap C_K \cap D_K \in F_{\sigma\delta}$ and, since by Fact \ref{TB032}(d) we have ${\mathcal I} \subset [{\mathbb N} ]^{<\omega}$, it follows that $\bigcup _{K\in {\mathcal I}}\;B_K \cap C_K \cap D_K \in F_{\sigma\delta\sigma}$. Since $A\in F_{\sigma\delta}\subset F_{\sigma\delta\sigma}$, by (\ref{EQB023}) we have $(\N ^\N)_{\mathrm{rev}} \in F_{\sigma\delta\sigma}=\Sigma ^0_4$.
$\Box$ \begin{rem}\label{RB000}\rm Let the equivalence relation $\sim$ on ${\mathbb N}^{\mathbb N}$ be defined by $\varphi \sim \psi$ iff there is $f\in \mathop{\rm Sym}\nolimits ({\mathbb N})$ such that $\varphi=\psi\circ f$. It is evident that the set $(\N ^\N)_{\mathrm{rev}}$ is $\sim$-invariant, that is $\psi\sim \varphi\in (\N ^\N)_{\mathrm{rev}}$ implies $\psi\in (\N ^\N)_{\mathrm{rev}}$.
But $(\N ^\N)_{\mathrm{rev}}$ is not a subsemigroup of $\langle {\mathbb N} ^{\mathbb N} ,\circ \rangle$ (it is not closed under composition). Let ${\mathbb N} \setminus \{ 2\}=A \cup B$ and ${\mathbb N} =C\cup D \cup E$ be partitions, where $A,B,C,D,E \in [{\mathbb N} ]^\omega$ and $|A\cap (2{\mathbb N} +1)|=|B\cap (2{\mathbb N} +1)|=\omega$. Then, by Proposition \ref{TB037}, $ \varphi =\{ \langle 2,2 \rangle\}\cup (A\times \{ 3 \}) \cup (B\times \{ 5 \})\in (\N ^\N)_{\mathrm{rev}}. $
If $\psi_{DA}:D\rightarrow A\cap (2{\mathbb N} +1)$ and $\psi_{EB}:E\rightarrow B\cap (2{\mathbb N} +1)$ are bijections then, by Proposition \ref{TB037} again, $ \psi = (C\times \{ 2\}) \cup \psi_{DA}\cup\psi_{EB}\in (\N ^\N)_{\mathrm{rev}} . $ But $\varphi \circ \psi \not\in (\N ^\N)_{\mathrm{rev}}$, because the set $\{ 2,3,5\}$ is not independent. \end{rem}
\footnotesize
\end{document} |
\begin{document}
\title{New Numerical Algorithm for Modeling of Boson-Fermion Stars in Dilatonic Gravity\! \thanks{This work was supported by Bulgarian National Scientific Fund, Contr. NoNo F610/99, MM602/96 and by Sofia University Research Fund, Contr. No. 245/99.}} \author{T.L.Boyadjiev\thanks{Faculty of Mathematics and Computer Science, University of Sofia, 5 James Bourchier Blvd., 1164 Sofia, Bulgaria, E-mail: todorlb@fmi.uni-sofia.bg}\hspace{2mm}, M.D.Todorov\thanks{Faculty of Applied Mathematics and Computer Science, Technical University of Sofia, 1756 Sofia, Bulgaria, E-mail: mtod@vmei.acad.bg}\hspace{2mm}, P.P.Fiziev\thanks{Faculty of Physics, University of Sofia, 5, James Bourchier Blvd., 1164 Sofia, Bulgaria, E-mail: fiziev@phys.uni-sofia.bg}\hspace{2mm}, S.S.Yazadjiev\thanks{Faculty of Physics, University of Sofia, 5, James Bourchier Blvd., 1164 Sofia, Bulgaria, E-mail: yazad@phys.uni-sofia.bg}} \date{} \maketitle
\begin{abstract} We investigate numerically a models of the static spherically symmetric boson-fermion stars in scalar-tensor theory of gravity with massive dilaton field. The proper mathematical model of such stars is interpreted as a nonlinear two-parametric eigenvalue problem with unknown internal boundary. We employ the Continuous Analogue of Newton Method (CANM) which leads on each iteration to two separate linear boundary value problems with different dimensions inside and outside the star, respectively. Along with them a nonlinear algebraic system for the spectral parameters - radius of the star $R_{s}$ and quantity $\Omega $ is solved also.
In this way we obtain the behaviour of the basic geometric quantities and functions describing dilaton field and matter fields which build the star.
{\bf Keywords.} two-parametric nonlinear eigenvalue problem, Continuous Analog of the Newton Method, mixed fermion-boson stars, scalar-tensor theory of gravity, massive dilaton field.
{\bf AMS subject classifications.} 65C20, 65P20, 65P30, 8308, 83D05 \end{abstract}
\section{Introduction}
In this paper we present a numerical method for solving the equations of the general scalar-tensor theories of gravity including a dilaton potential term for the general case of mixed boson-fermion star. This method is an impruvment of the method concerning the similar problem proposed in our recent work \cite{tomiplas}. The original domain is splitted to two domains: inner inside the star and external outside the star. The solutions in these regions are obtained separately and after that they are matched. Before we go to the substantial part of this paper we will describe briefly the physical problem.
\section{Main model}\label{sec2} Boson stars are gravitationally bound macroscopic quantum states made up of scalar bosons \cite{K,RB,RB1,GW}. They differ from the usual fermionic stars in that they are only prevented from collapsing gravitationally by the Heisenberg uncertainty principle. For self-interacting boson field the mass of the boson star, even for small values of the coupling constant turns out to be of the order of Chandrasekhar's mass when the boson mass is similar to a proton mass. Thus, the boson stars arise as possible candidates for non-baryonic dark matter in the universe and consequently as a possible solution of the one of outstanding problems in today's astrophysics - the problem of nonluminous matter in the universe. Most of the stars are of primordial origin being formed from an original gas of fermions and bosons in the early universe. That is why it should be expected that most stars are a mixture of both, fermions and bosons in different proportions.
Boson-fermion stars are also good model to learn more about the nature of strong gravitational fields not only in general relativity but also in the other theories of gravity.
The most natural and promising generalizations of general relativity are the scalar-tensor theories of gravity \cite{BD,D,Will,DEF1}. In these theories the gravity is mediated not only by a tensor field (the metric of space-time) but also by a scalar field (the dilaton). These dilatonic theories of gravity contain arbitrary functions of the scalar field that determine the gravitational ``constant'' as a dynamical variable and the strength of the coupling between the scalar field and matter. It should be stressed that specific scalar-tensor theories of gravity arise naturally as low energy limit of string theory \cite{GSW,CFMP,FT,SS1,MV,MAHS} which is the most promising modern model of unification of all fundamental physical interactions.
Boson stars in scalar-tensor theories of gravity with massless dilaton have been widely investigated recently both numerically and analytically \cite{TX,GJ,T,TLS,CS,BS,Y}. Mixed boson-fermion stars in scalar tensor theories of gravity however have not been investigated so far in contrast to general relativistic case where boson-fermion stars have been investigated \cite{HLM}.
In present paper we consider boson-fermion stars in the most general scalar-tensor theory of gravity with massive dilaton.
In the Einstein frame the field equations in presence of fermion and boson matter are: \begin{eqnarray} G_{m }^{n }=\kappa _{*}\left( \stackrel{B}{T_{m }^{n }}+\stackrel{F}{ T_{m }^{n }}\right) +2\partial _{m }\varphi \partial ^{n }\varphi &&\!\!\!\!\!\! -\partial ^{l }\varphi \partial _{l }\varphi \delta _{m }^{n }+ \frac{1}{2}U(\varphi )\delta _{m }^{n }, \nonumber \\ \nabla _{m }\nabla ^{m }\varphi +{\frac{1}{4}}U^{\prime }(\varphi )&&\!\!\!\!\!\! =- \frac{\kappa _{*}}{2}\alpha (\varphi )\left( \stackrel{B}{T}+\stackrel{F}{T} \right), \nonumber \\ [-1.8ex] \label{SFE} \\[-1.8ex] \nabla _{m }\nabla ^{m }\Psi +2\alpha (\varphi )\partial ^{l }\varphi \partial _{l }\Psi &&\!\!\!\!\!\! =-2A^{2}(\varphi ){\frac{\partial {\tilde{ W}}(\Psi ^{+}\Psi )}{\partial \Psi ^{+}}}, \nonumber \\ \nabla _{m }\nabla ^{m }\Psi ^{+}+2\alpha (\varphi )\partial ^{l }\varphi \partial _{l }\Psi ^{+}&&\!\!\!\!\!\! =-2A^{2}(\varphi ){\frac{\partial { \tilde{W}}(\Psi ^{+}\Psi )}{\partial \Psi ^{+}}} \nonumber \end{eqnarray} where $\nabla _{m }$ is the Levi-Civita connection with respect to the metric $g_{m n },(m = 0,...,3;$ $n = 0,...,3)$. The constant $\kappa _{*}$ is given by $\kappa _{*}=8\pi G_{*}$ where $G_{*}$ is the bare Newtonian gravitational constant. The physical gravitational ``constant'' is $ G_{*}A^{2}(\varphi )$ where $A(\varphi )$ is a function of the dilaton field $\varphi $ depending on the concrete scalar-tensor theory of gravity. ${\tilde W}(\Psi ^{+}\Psi )$ is the potential of boson field. The dilaton potential $U(\varphi )$ can be written in the form $U(\varphi )=m_{D}^{2}V(\varphi )$ where $m_{D}$ is the dilaton mass and $V(\varphi )$ is dimensionless function of $\varphi $.
The function $\alpha (\varphi )=\frac{d}{d\varphi }\left[ \ln A(\varphi )\right] $ determines the strength of the coupling between the dilaton field $\varphi $ and the matter. The functions $\stackrel{B}{T}$ and $\stackrel{F}{ T}$ are correspondingly the trace of the energy-momentum tensor of the fermionic matter\footnote{In the present article we consider fermionic matter only in macroscopic approximation, i.e., after averaging quantum fluctuations of the corresponding fermion fields. Thus we actually consider standard classical relativistic matter.} $\stackrel{F}{ T_{m }^{n }}$ and bosonic matter $\stackrel{B}{T_{m }^{n }}$. Their explicit forms are \begin{eqnarray} \stackrel{B}{T_{m }^{n }}=\!\!\!\!\!\!&& \frac{1}{2}A^{2}(\varphi )\left( \partial _{m }\Psi ^{+}\partial ^{n }\Psi +\partial _{m }\Psi \partial ^{n }\Psi ^{+}\right) \nonumber \\ \!\!\!\!\!\!&& \qquad -\frac{1}{2}A^{2}(\varphi )\left[ \partial _{l }\Psi ^{+}\partial ^{l }\Psi -2A^{2}(\varphi ){\tilde{W}}(\Psi ^{+}\Psi )\right] \delta _{m }^{n }\>, \label{eq:2a} \\ \stackrel{F}{T_{m }^{n }}=\!\!\!\!\!\!&& \left( \varepsilon +p\right) u_{m }\,u^{n }-p\,\delta _{m }^{n }\>. \label{eq:2b} \end{eqnarray} Here $\Psi $ is a complex scalar field describing the bosonic matter while $ \Psi ^{+}$ is its complex conjugated function. The energy density and the pressure of the fermionic fluid in the Einstein frame are $\varepsilon =A^{4}(\varphi ){\tilde{\varepsilon}}$ and $p=A^{4}(\varphi )\,\tilde{p}$ where ${\tilde{\varepsilon}}$ and $\tilde{p}$ are the physical energy density and pressure. Instead to give the equation of state of the fermionic matter in the form $\tilde{p}=\tilde{p}({\tilde{\varepsilon}})$ it is more convenient to write it in a parametric form \begin{equation} {\tilde{\varepsilon}}={\tilde{\varepsilon}_{0}}g(\mu )\,\,\,\,\,\,\,\,\,\, \tilde{p}={\tilde{\varepsilon}_{0}\,}f(\mu ) \label{eq:2c} \end{equation} where ${\tilde{\varepsilon}_{0}}$ is a properly chosen dimensional constant and $\mu $ is dimensionless Fermi momentum.
The physical four-velocity of the fluid is denoted by $u_{\mu }$. The potential for the boson field has the form \[ {\tilde{W}}(\Psi ^{+}\Psi )=-\frac{m_{D}^{2}}{2}\Psi ^{+}\,\Psi -\frac{1}{4 }{\tilde{\Lambda}\,}(\Psi ^{+}\Psi )^{2}. \] Field equations together with the Bianchi identities lead to the local conservation law of the energy-momentum of matter \begin{equation} \nabla _{n }\stackrel{F}{T_{m }^{n }}=\alpha (\varphi )\stackrel{F}{ T} \partial _{m }\varphi \,\,\,. \label{TBIAN} \end{equation}
We will consider a static and spherically symmetric mixed boson-fermion star in asymptotic flat space-time. This means that the metric $g_{m n }$ has the form \begin{equation} ds^{2}= \exp \left[ \nu ({\cal R}) \right] dt^{2}- \exp \left[ \lambda ({\cal R}) \right] d{\cal R}^{2}-{\cal R}^{2}\left( d\theta ^{2}+\sin ^{2}\theta \,d\psi ^{2}\right) \label{eq:gmn} \end{equation} where ${\cal R}, \theta ,\psi $ are usual spherical coordinates. The field configuration is static when the boson field $\Psi $ satisfies the relationship \[ \Psi ={\tilde{\sigma}(}{\cal R}{)\,}\exp(i\omega t). \] Here $\omega $ is a real number and ${\tilde{\sigma}(}{\cal R})$ is a real function. Taking into account the assumption that have been made the system of the field equation is reduced to a system of ordinary differential equations. Before we explicitly write the system we are going to introduce a rescaled (dimensionless) radial coordinate by $r= m_{B}{\cal R}$ where $m_{B}$ is the mass of the bosons. From now on, a prime will denote a differentiation with respect to the dimensionless radial coordinate $r$. After introducing the dimensionless quantities \[ \Omega ={\frac{\omega }{m_{B}}},\quad \sigma =\sqrt{\kappa }_{*}\,{\tilde{ \sigma}},\quad \Lambda ={\frac{{\tilde{\Lambda}}}{\kappa _{*}{m^{2}}_{B}}} ,\quad \gamma ={\frac{m_{D}}{m_{B}}}. \] and defining the dimensionless energy-momentum tensors as $T_{m}^{n}= {\frac {\kappa_{*}} {m_{B}^2}} T_{m}^{n}$ the components of the dimensionless energy-momentum tensor of the fermionic and bosonic matter become correspondingly \begin{eqnarray*} &&\!\!\!\!\!\!\qquad\qquad \stackrel{\mathit{F}}{T_{0}^{0}} =bA^{4}(\varphi )\,g(\mu ),\qquad \stackrel{ \mathit{F}}{T_{1}^{1}}=-bA^{4}(\varphi )\,f(\mu ),\\ \qquad \stackrel{\mathit{B}}{T_{0}^{0}}&&\!\!\!\!\!\! =\frac{1}{2}A^{2}(\varphi )\left[ \Omega ^{2}\sigma ^{2}(r)\exp (-\nu(r) )+\left( \frac{\partial \sigma }{dr}\right) ^{2}\exp (-\lambda(r) )\,\right] -A^{4}(\varphi )W(\sigma ^{2}),\\ \qquad \stackrel{\mathit{B}}{T_{1}^{1}}&&\!\!\!\!\!\! =-\frac{1}{2}A^{2}(\varphi )\left[\Omega ^{2}\sigma ^{2}(r)\exp (-\nu(r) ) + \left( \frac{\partial \sigma }{\partial r} \right) ^{2}\exp (-\lambda(r) )\,\right] - A^{4}(\varphi )W(\sigma ^{2}).
\end{eqnarray*} Here the parameter $b=\frac{\kappa _{*}{\tilde{\varepsilon}_{0}}}{m_{B}^{2}} $ describes the relation between the Compton length of dilaton and the usual radius of neutron star in general relativity.
The functions $\stackrel{\mathit{B}}{T}$ and $\stackrel{\mathit{F}}{T}$ describing the trace of energy-momentum tensor have a form: \[ \stackrel{\mathit{B}}{T}=-A^{2}(\varphi )\,\left[ \Omega ^{2}\sigma ^{2}(r)\exp (-\nu(r) ) - \left( \frac{\partial \sigma }{dr}\right) ^{2}\exp (-\lambda(r) )\,\right] - 4A^{4}(\varphi )W(\sigma ^{2}), \] \[ \stackrel{\mathit{F}}{T}=bA^{4}(\varphi )\,\left[ g(\mu )-3\,f(\mu )\right] . \]
For the independent (dimensionless) radial coordinate $r$ we have $r\in [0,R_{s}]\cup [R_{s},\infty )$ where $0<R_{s}<\infty $ is the unknown radius of the fermionic part of the mixed boson-fermion star.
With all definitions we have given the main system of differential equations governing the structure of a static and spherically symmetric boson-fermion star can be written in the following form:
1. In the interior of the fermionic part of the star (the functions in this domain are subscribed by~$i$)
\begin{eqnarray} \hspace{1.5cm} \frac{d\lambda }{dr}&&\!\!\!\!\!\! =F_{1,i}\equiv \frac{1-\exp (\lambda )}{r}+r\,\left\{ \exp (\lambda )\left[ \stackrel{F}{T_{0}^{0}}+\stackrel{B}{T_{0}^{0}}+\frac{1 }{2}\gamma ^{2}V(\varphi )\right] +\left( \frac{d\varphi }{dr}\right) ^{2}\right\} , \nonumber \\ \frac{d\nu }{dr}&&\!\!\!\!\!\! =F_{2,i}\equiv -\frac{1-\exp (\lambda )}{r}-r\,\left\{ \exp (\lambda )\left[ \stackrel{F}{T_{1}^{1}}+\stackrel{B}{T_{1}^{1}}+\frac{1}{2} \gamma ^{2}V(\varphi )\right]-\left( \frac{d\varphi }{dr}\right) ^{2}\right\} , \nonumber \\ \frac{d^{2}\varphi }{dr^{2}}&&\!\!\!\!\!\! =F_{3,i}\equiv -\frac{2}{r}\frac{d\varphi }{dr} +\frac{1}{2}\left( F_{1,i}-F_{2,i}\right) \frac{d\varphi }{dr}\nonumber\\ &&\!\!\!\!\!\!\qquad\quad +\frac{1}{2} \exp (\lambda )\,\left[ \alpha (\varphi )\,(\stackrel{F}{T}+\stackrel{B}{T})+ \frac{1}{2}\gamma ^{2}V^{^{\prime }}(\varphi )\right] , \label{eqi} \\ \frac{d^{2}\sigma }{dr^{2}}&&\!\!\!\!\!\! =F_{4,i}\equiv -\frac{2}{r}\frac{d\sigma }{dr} +\left[ \frac{1}{2}\left( F_{1,i}-F_{2,i}\right) -2\alpha (\varphi )\frac{ d\varphi }{dr}\right] \frac{d\sigma }{dr}\nonumber\\ &&\!\!\!\!\!\!\qquad\quad -\sigma \exp (\lambda )\left[ \Omega ^{2}\exp (-\nu )+2A^{2}(\varphi )W^{^{\prime }}(\sigma ^{2})\right] , \nonumber \\ \frac{d\mu }{dr}&&\!\!\!\!\!\!\! =F_{5,i}\equiv -\frac{g(\mu )+f(\mu )}{f^{^{\prime }}(\mu ) }\left[ \frac{1}{2}F_{2}+\alpha (\varphi )\frac{d\varphi }{dr}\right] . \nonumber \end{eqnarray}
Here $\lambda (r),\nu (r),\varphi (r),\sigma (r)$ and $\mu (r)$ are unknown functions of $r$ and $\Omega $ is a unknown parameter. Having in mind the physical assumptions we have to solve the equations (\ref{eqi}) under following boundary conditions: \begin{equation} \lambda (0)=\frac{d\varphi }{dr}(0)=\frac{d\sigma }{dr}(0)=0,\quad \sigma (0)=\sigma _{c},\quad \mu (0)=\mu _{c}, \label{bcil} \end{equation} \begin{equation} \mu (R_{s})=0 \label{bcir} \end{equation}
\noindent where $\sigma _{c}$ and $\mu _{c}$ are the values of density of the bosonic and fermionic matter, respectively at the star's centre. The first three conditions in (\ref{bcil}) guarantee the nonsingularity of the metrics and the functions $\lambda (r),$ $\varphi (r),$ $\sigma (r)$ at the star's centre.
2. In the external domain (subscribed by $e$) there is not fermionic matter, i.e., one can suppose formally that the function $\mu (r)\equiv 0$ if $x\geq R_{s}$. The fermionic part of the energy-momentum tensor vanishes identically also and thus the differential equations with respect to the rest four unknown functions $\lambda (r),$ $\nu (r),$ $\varphi (r)$ and $\sigma (r)$ are:
\begin{eqnarray} \hspace{1.5cm}\frac{d\lambda }{dr}&&\!\!\!\!\!\! =F_{1,e}\equiv \frac{1-\exp (\lambda )}{r}+r\,\left\{ \exp (\lambda )\,\left[ \stackrel{B}{T_{0}^{0}}+\frac{1}{2}\gamma ^{2}V(\varphi )\right] +\left( \frac{d\varphi }{dr}\right) ^{2}\right\}, \nonumber \\ \frac{d\nu }{dr}&&\!\!\!\!\!\! =F_{2,e}\equiv -\frac{1-\exp (\lambda )}{r}-r\,\left\{ \exp (\lambda )\,\left[ \stackrel{B}{T_{1}^{1}}+\frac{1}{2}\gamma ^{2}V(\varphi )\right] -\left( \frac{d\varphi }{dr}\right) ^{2}\right\}, \nonumber \\ \frac{d^{2}\varphi }{dr^{2}}&&\!\!\!\!\!\! =F_{3,e}\equiv -\frac{2}{r}\frac{d\varphi }{dr} +\frac{1}{2}\left( F_{1,e}-F_{2,e}\right) \frac{d\varphi }{dr}\nonumber\\ &&\!\!\!\!\!\! \qquad +\frac{1}{2} \exp (\lambda )\,\left[ \alpha (\varphi )\stackrel{B}{T}+\frac{1}{2}\gamma ^{2}V^{^{\prime }}(\varphi )\right], \label{eqe} \\ \frac{d^{2}\sigma }{dr^{2}}&&\!\!\!\!\!\! =F_{4,e}\equiv -\frac{2}{r}\frac{d\sigma }{dr} +\left[ \frac{1}{2}\left( F_{1,e}-F_{2,e}\right) -2\alpha (\varphi )\frac{ d\varphi }{dr}\right] \frac{d\sigma }{dr}\nonumber\\ &&\!\!\!\!\!\! \qquad -\sigma \exp (\lambda )\,\left[ \Omega ^{2}\exp (-\nu )+2A^{2}(\varphi )W^{^{\prime }}(\sigma ^{2})\right]. \nonumber \end{eqnarray}
As it is required by the asymptotic flatness of space-time the boundary conditions at the infinity are \begin{equation} \nu (\infty )=0,\quad \varphi (\infty )=0,\quad \sigma (\infty )=0 \label{bcer} \end{equation} where we denote $(\cdot) (\infty) = \lim_{r \to \infty} (\cdot) (r)$.
We seek for a solution $\left[ \lambda (r),\nu (r),\varphi (r),\sigma (r),\mu (r),R_{s},\Omega \right] \ $\ subjected to the nonlinear ODEs (\ref {eqi})\ and (\ref{eqe}),\ satisfying the boundary conditions (\ref{bcil}), ( \ref{bcir})\ and (\ref{bcer}). At that we assume the function $\mu (r)$ is continuous in the interval $[0,R_{s}],$ whilst the functions $\lambda (r),$ $ \nu (r)$\ are continuous and the functions $\varphi (r),$ $\sigma (r)$ are smooth in the whole interval $[0,\infty )$, including the unknown inner boundary $r=R_{s}$.
The so posed BVP is a two-parametric eigenvalue problem with respect to the quantities $R_{s}$ and $\Omega $.
Let us emphasize that a number of methods for solving the free-boundary problems are considered in detail in \cite{Vab,numrec}.
Here we aim at applying the new solving method to the above formulated problem. This method differs from that one proposed in \cite{tomiplas} and for the governing field equations written in the forms (\ref{eqi})\ and (\ref{eqe} )\ it possesses certain advantage.
\section{Method of solution} At first we scale the variable $r$ using the Landau transformation \cite{Vab} and in this way we obtain a fixed computational domain. Namely \[ x=\frac{r}{R_{s}},x\in [0,1]\cup [1,\infty ). \] For our further considerations it is convenient to present the systems (\ref {eqi}) and (\ref{eqe})\ in following equivalent form as systems of first order ODEs: \begin{eqnarray} -\mathbf{y}_{i}^{\prime }+R_{s}\mathbf{F}_{i}(R_{s}x,\mathbf{y}_{i},\Omega ) &=&0, \label{odei} \\ -\mathbf{y}_{e}^{\prime }+R_{s}\mathbf{F}_{e}(R_{s}x,\mathbf{y}_{e},\Omega ) &=&0 \label{odee} \end{eqnarray}
\noindent with respect to the unknown vector functions $$\mathbf{y} _{i}(x)\equiv (\lambda (x),\nu (x),\varphi (x),\xi (x),\sigma (x),\eta (x),\mu (x))^{T},$$ $$\mathbf{y}_{e}(x)\equiv (\lambda (x),\nu (x),\varphi (x),\xi (x),\sigma (x),\eta (x))^{T}$$ and right hand sides $\mathbf{F} _{i}\equiv (F_{1},F_{2},\xi ,F_{3},\eta ,F_{4},F_{5})^{T}$, $\mathbf{F} _{e}\equiv (F_{1},F_{2},\xi ,F_{3},\eta ,F_{4})^{T}$ where $(.)^{\prime }$ stands for differentiation towards the new variable $x$.
For given values of the parameters $R_{s}$\ and $\Omega $\ the independent solving of the inner system (\ref{odei}) requires seven boundary conditions. At the same time we have at disposal only six conditions of the kind (\ref{bcil}) and (\ref{bcir}). In order to complete the problem we set additionally one more parametric condition (the value of someone from among the functions $ \lambda (x),\nu (x),\varphi (x),\xi (x),\sigma (x)$ or $\eta (x)$) at the point $x=1)$. Let us set for example \begin{equation} \varphi _{i}(1)=\varphi _{s} \label{dopusl} \end{equation} where $\varphi _{s}$\ is a parameter. Then the boundary conditions (\ref {bcil}), (\ref{bcir}) and (\ref{dopusl}) of the inner BVP can be presented in the form:
\begin{equation} B_{0,i}\mathbf{y}_{i}(0)-D_{0,i}=0,\quad B_{1,i}\mathbf{y} _{i}(1)-D_{1,i}(\varphi _{s})=0. \label{bci} \end{equation} Here the matrices $B_{0,i}=diag(1,0,0,1,1,1,1)$, $D_{0,i}=diag(0,0,0,0, \sigma _{c},0,\mu _{c})$, $B_{1,i}=diag(0,0,1,0,0,0,1)$, $ D_{1,i}=diag(0,0,\varphi _{s},0,0,0,0)$.
Obviously the solution in the inner domain $x\in [0,1]$ depends not only on the variable $x,$ but it is a function of the three parameters $R_{s},\Omega ,\varphi _{s}$ as well, i.e., $\mathbf{y}_{i}=\mathbf{y}_{i}(x,\Omega ,R_{s},\varphi _{s}).$
In the external domain $x\geq 1$ the vector of solutions $$\mathbf{y} _{e}(x)\equiv (\lambda (x),\nu (x),\varphi (x),\xi (x),\sigma (x),\eta (x))^{T}$$ is 6D. Thereupon six boundary conditions are indispensable to solving of the equation (\ref{odee}). At the same time the three boundary conditions (\ref{bcer}) are only known. Let us consider that the solution $ \mathbf{y}_{i}(x)$ in the inner domain $x\in [0,1]$ is knowledged. Then we postulate the rest three deficient conditions to be the continuity conditions at the point $x=1.$The first of them is similar to the condition (\ref {dopusl}) and the else two we assign to arbitrary two functions from among $ \lambda (x),$ $\nu (x),\mathbf{\ }\xi (x),$ $\sigma (x)$ and $\eta (x)$, for example \[ \lambda _{e}(1)=\lambda _{i}(1),\quad \varphi _{e}(1)=\varphi _{s},\quad \sigma _{e}(1)=\sigma _{i}(1). \] It is convenient to present the boundary conditions in the external domain in matrix form again: \begin{equation} B_{1,e}\mathbf{y}_{e}(1)-D_{1,e}(\varphi _{s})=0,\quad B_{\infty ,e}\mathbf{y }_{e}(\infty )=0 \label{bce} \end{equation} where the matrices $B_{1,e}=diag(1,0,1,0,1,0)$, $D_{1,e}=diag(\lambda _{i}(1),0,\varphi _{s},0,\sigma _{i}(1),0)$, $B_{\infty ,e}=diag(0,1,1,0,1,0) $.
Let the solutions $\mathbf{y}_{i}=\mathbf{y}_{i}(x,\Omega ,R_{s},\varphi _{s})$ and $\mathbf{y}_{e}=\mathbf{y}_{e}(x,\Omega ,R_{s},\varphi _{s})$ be supposed known. Generally speaking for given arbitrary values of the parameters $R_{s},\Omega $ and $\varphi _{s}$ the continuity conditions with respect to the functions $\nu (x),\xi (x)$ and $ \eta (x)$ at the point $x=1$ are not satisfied. We choose the parameters $ R_{s},\Omega $ and $\varphi _{s}$ in such manner that the continuity conditions for the functions $\nu (x)$, $\xi (x)$ and $\eta (x)$ to be held, i.e.,
\begin{eqnarray} \nu _{e}(1,R_{s},\Omega ,\varphi _{s})-\nu _{i}(1,R_{s},\Omega ,\varphi _{s})&&\!\!\!\!\!\! =0, \nonumber \\ \xi _{e}(1,R_{s},\Omega ,\varphi _{s})-\xi _{i}(1,R_{s},\Omega ,\varphi _{s})&&\!\!\!\!\!\! =0, \label{cc} \\ \eta _{e}(1,R_{s},\Omega ,\varphi _{s})-\eta _{i}(1,R_{s},\Omega ,\varphi _{s})&&\!\!\!\!\!\! =0. \nonumber \end{eqnarray}
These conditions should be interpreted as three nonlinear algebraic equations in regard to the unknown quantities $R_{s},\Omega $ and $\varphi _{s}$. The usual way for solving of the above mentioned kind of equations ( \ref{cc})\ is by means of various iteration methods, for example Newton's methods. The traditional technology similarly to the methods like shutting \cite{numrec1}, requires separate treatment of the BVPs and the algebraic continuity equations and brings itself to additional linear ODEs for elements of the corresponding to (\ref{cc}) Jacobi matrix. These elements are functions of the variable $\ x$ and they have to be known actually only at the point $x=1$. The solving of the both nonlinear BVPs (\ref{odei}), ( \ref{bci})\ and (\ref{odee}), (\ref{bce}) along with the attached linear equations is another hard enough task.
At the present work using the CANM \cite{gavurin}, \cite{jmp} we propose a common treatment of both, differential and algebraic problems.
\mathstrut We suppose that the nonlinear spectral problem (\ref{odei}), (\ref {bci}), (\ref{odee}), (\ref{bce}) and (\ref{cc}) has a ``well separated'' \cite{jmp} exact solution. Let the functions $\mathbf{y}_{i,0}(x),\mathbf{y }_{e,0}(x)$ and the parameters $R_{s,0},\Omega _{0},\varphi _{s,0}$ are initial approximations to this solution. The CANM leads to the following iteration process: \begin{eqnarray} \mathbf{y}_{i,k+1}(x) &&\!\!\!\!\!\!=\mathbf{y}_{i,k}(x)+\tau _{k}\mathbf{z}_{i,k}(x), \label{stepi} \\ \mathbf{y}_{e,k+1}(x) &&\!\!\!\!\!\!=\mathbf{y}_{e,k}(x)+\tau _{k}\mathbf{z}_{e,k}(x), \label{stepe}\\ R_{s,k+1} &&\!\!\!\!\!\!=R_{s,k}+\tau _{k}\rho _{k}, \label{stepro} \\ \Omega _{k+1}&&\!\!\!\!\!\!=\Omega _{k}+\tau _{k}\omega _{k}, \label{stepomega} \\ \varphi _{s,k+1}&&\!\!\!\!\!\!= \varphi _{s,k}+\tau _{k}\phi _{k}. \label{stepfis} \end{eqnarray}
Here $\tau _{k}\in (0,1]$\ is a parameter which can rule the convergence of iteration process. The increments $\mathbf{z}_{i,k}(x),$ $\mathbf{z} _{e,k}(x),\rho _{k},\omega _{k}$ and $\phi _{k}$, $k=0,1,2,...$ satisfy the linear ODEs (for sake of simplicity henceforth we will omit the number of iterations $k$): \begin{eqnarray} -\mathbf{z}_{i}^{\prime }+R_{s}\frac{\partial \mathbf{F}_{i}}{\partial \mathbf{y}_{i}}\mathbf{z}_{i}+\left( R_{s}\frac{\partial \mathbf{F}_{i}}{ \partial R_{s}} + \mathbf{F}_{i}\right) \rho +R_{s}\frac{\partial \mathbf{F} _{i}}{\partial \Omega }\omega &&\!\!\!\!\!\!=\mathbf{y}_{i}^{\prime }-R_{s}\mathbf{F}_{i}, \label{canmi} \\-\mathbf{z}_{e}^{\prime }+R_{s}\frac{\partial \mathbf{F}_{e}}{\partial \mathbf{y}_{e}}\mathbf{z}_{e}+\left( R_{s}\frac{\partial \mathbf{F}_{e}}{ \partial R_{s}}+\mathbf{F}_{e}\right) \rho +R_{s}\frac{\partial \mathbf{F} _{e}}{\partial \Omega }\omega &&\!\!\!\!\!\!=\mathbf{y}_{e}^{\prime }-R_{s}\mathbf{F}_{e}. \label{canme} \end{eqnarray}
\noindent All the coefficients and right hand sides as well in the above two equations are known functions of the arguments $x$, $R_s$, $\Omega$ by means of the solution from the previous iteration. We seek for the unknowns $\mathbf{z}_{i}(x)$ of equation (\ref{canmi}) and $ \mathbf{z}_{e}(x)$ of equation (\ref{canme}) as a linear combinations with coefficients $\rho ,\omega $ and $\phi $: \begin{eqnarray} \mathbf{z}_{i}(x) &=&\mathbf{s}_{i}(x)+\rho \,\mathbf{u}_{i}(x)+\omega \mathbf{v}_{i}(x)+\phi \mathbf{w}_{i}(x), \label{decompi} \\ \mathbf{z}_{e}(x) &=&\mathbf{s}_{e}(x)+\rho \,\mathbf{u}_{e}(x)+\omega \mathbf{v}_{e}(x)+\phi \mathbf{w}_{e}(x). \label{decompe} \end{eqnarray} Here $\mathbf{s}_{i}(x),\mathbf{u}_{i}(x)$, $\mathbf{v}_{i}(x)$, $ \mathbf{w}_{i}(x)$, $\mathbf{s}_{i}(x)$ and $\mathbf{s}_{e}(x),\mathbf{u} _{e}(x)$, $\mathbf{v}_{e}(x)$, $\mathbf{w}_{e}(x)$\ are new unknown functions, which are defined in either, internal or external domains. Substituting for the decomposition (\ref{decompi}) into equation (\ref{canmi} ) after reduction we obtain: \begin{eqnarray} -\mathbf{s}_{i}^{\prime }+Q_{i}(x)\,\mathbf{s}_{i}&&\!\!\!\!\!\!=\mathbf{y}_{i}^{\prime}-R_{s}\mathbf{F}_{i}, \nonumber \\ -\mathbf{u}_{i}^{\prime }+Q_{i}(x)\,\mathbf{u}_{i}&&\!\!\!\!\!\!=-\left( \mathbf{F} _{i}+R_{s}\frac{\partial \mathbf{F}_{i}}{\partial R_{s}}\right) , \nonumber \\[-1.8ex] \label{lini}\\[-1.8ex] -\mathbf{v}_{i}^{\prime }+Q_{i}(x)\,\mathbf{v}_{i}&&\!\!\!\!\!\!=-R_{s}\frac{\partial \mathbf{F}_{i}}{\partial \Omega }, \nonumber \\ -\mathbf{w}_{i}^{\prime }+Q_{i}(x)\,\mathbf{w}_{i}&&\!\!\!\!\!\!=0 \nonumber \end{eqnarray} where $Q_{i}\left( x\right) \equiv R_{s}\frac{\partial \mathbf{F}_{i}\left( R_{s}x,\mathbf{y}_{i},\Omega \right) }{\partial \mathbf{y}_{i}}$ stands for a square matrix ($7\times 7)$, which consists of the Frechet derivatives of operator $\mathbf{F}_{i}$ at the point $\left\{ \mathbf{y} _{i}(x),R_{s},\Omega \right\} $.
Similarly applying the CANM to the boundary conditions (\ref{bci}) and taking into account the dependence of matrix $D_{1,i}$ on the parameter $ \varphi _{s}$ yields: \[ B_{0,i}\mathbf{z}_{i}(0)=D_{0,i}-B_{0,i}\mathbf{y}_{i}(0),\quad B_{1,i} \mathbf{z}_{i}(1)=D_{1,i}-B_{1,i}\mathbf{y}_{i}(1)-D_{1,i}^{\prime }\phi . \] By means of the decomposition (\ref{decompi}) we obtain the following eight boundary conditions (four left + four right) for the equations (\ref{lini}): \begin{eqnarray} &&\!\!\!\!\!\!B_{0,i}\mathbf{s}_{i}(0)=D_{0,i}-B_{0,i}\mathbf{y}_{i}(0),\qquad B_{1,i} \mathbf{s}_{i}(1)=D_{1,i}-B_{1,i}\mathbf{y}_{i}(1),\nonumber \\ &&\!\!\!\!\!\!B_{0,i}\mathbf{u}_{i}(0)=0,\qquad\qquad\qquad\qquad\> B_{1,i}\mathbf{u}_{i}(1)=0,\nonumber \\[-1.8ex] \label{lbci}\\[-1.8ex] &&\!\!\!\!\!\!B_{0,i}\mathbf{v}_{i}(0)=0,\qquad\qquad\qquad\qquad\> B_{1,i}\mathbf{v}_{i}(1)=0,\nonumber \\ &&\!\!\!\!\!\!B_{0,i}\mathbf{w}_{i}(0)=0,\qquad\qquad\qquad\qquad B_{1,i}\mathbf{w}_{i}(1)=-D_{1,i}^{\prime }(\varphi _{s})\nonumber . \end{eqnarray}
Let us now substitute for decomposition (\ref{decompe}) into the linear equations for external domain (\ref{canme}). As result we obtain the following four vector equations with regard to the unknown functions $\mathbf{s} _{e}(x),\mathbf{u}_{e}(x)$, $\mathbf{v}_{e}(x)$ and $\mathbf{w}_{e}(x)$ with eight boundary conditions (four left + four right): \begin{eqnarray} -\mathbf{s}_{e}^{\prime }+Q_{e}(x)\mathbf{s}_{e}&&\!\!\!\!\!\!=\mathbf{y}_{e}^{\prime }-R_{s}\mathbf{F}_{e}, \nonumber \\ -\mathbf{u}_{e}^{\prime }+Q_{e}(x)\,\mathbf{u}_{e}&&\!\!\!\!\!\!=-\left( \mathbf{F} _{e}+R_{s}\frac{\partial \mathbf{F}_{e}}{\partial R_{s}}\right) ,\nonumber\\[-1.8ex] \label{line} \\[-1.8ex] -\mathbf{v}_{e}^{\prime }+Q_{e}(x)\mathbf{v}_{e}&&\!\!\!\!\!\!=-R_{s}\frac{\partial \mathbf{F}_{e}}{\partial \Omega }, \nonumber \\ -\mathbf{w}_{e}^{\prime }+Q_{e}(x)\,\mathbf{w}_{e}&&\!\!\!\!\!\!=0. \nonumber \end{eqnarray} Here $Q_{e}\left( x\right) \equiv R_{s}\frac{\partial \mathbf{F} _{e}\left[ R_{s}x,\mathbf{y}_{e}(x),\Omega \right] }{\partial \mathbf{y}_{e}} $ is a square matrix $(6\times 6)$ whose elements are Frechet's derivatives of the operator $\mathbf{F}_{e}$ at the point $\left\{ \mathbf{y} _{e}(x),R_{s},\Omega \right\} $.
The corresponding linear BC are obtained in the same way as (\ref{lbci})\ and they become: \begin{eqnarray} B_{1,e}\mathbf{s}_{e}(1)&&\!\!\!\!\!\!=D_{1,e}-B_{1,e}\mathbf{y}_{e}(1),\qquad\> B_{\infty ,e} \mathbf{s}_{e}(\infty )=-B_{\infty ,e}\mathbf{y}_{e}(\infty ),\nonumber\\ B_{1,e}\mathbf{u}_{e}(1)&&\!\!\!\!\!\!=0,\qquad\qquad\qquad\qquad\quad B_{\infty ,e}\mathbf{u}_{e}(\infty )=0,\nonumber\\[-1.8ex] \label{lbce}\\[-1.8ex] B_{1,e}\mathbf{v}_{e}(1)&&\!\!\!\!\!\!=0,\qquad\qquad\qquad\qquad\quad B_{\infty ,e}\mathbf{v}_{e}(\infty )=0,\nonumber\\ B_{1,e}\mathbf{w}_{e}(1)&&\!\!\!\!\!\!=-D_{1,e}^{\prime }(\varphi _{s}),\qquad\qquad\quad B_{\infty ,e} \mathbf{w}_{e}(\infty )=0.\nonumber \end{eqnarray}
In the end to compute the increments $\left\{ \rho ,\omega ,\phi \right\} $ of parameters $R_{s},\Omega $ and $\varphi _{s}$ we use the three conditions (\ref{cc}).
Let the solutions of linear BVP ($\ref{lini}$), ($\ref{lbci}$) and ($\ref {line}$), ($\ref{lbce}$) at the $k$th iteration stage are assumed to be known. For sake of the simplicity we introduce the vector ${\mathbf{\tilde{y} (}}x{\mathbf{)}}\equiv (\nu(x) ,\xi(x) ,\eta(x) )^{T}$. For two arbitrary functions $h_{i}(x)$ and $h_{e}(x)$, defined in left and right vicinity of the point $x=1$, we set $ \Delta h\equiv h_{e}(1)-h_{i}(1)$. Then applying the CANM to the equations ( \ref{cc}) and having in mind the decompositions (\ref{decompi}),(\ref {decompe}), we attain the vector equation \begin{equation} \Delta \mathbf{\tilde{u}\,}\rho +\Delta \mathbf{\tilde{v}\,}\omega +\Delta \mathbf{\tilde{w}}\,\phi =-\left( \Delta \mathbf{\tilde{y}}+\Delta \mathbf{ \tilde{s}}\right) , \label{lcc} \end{equation} which represents an algebraic system consisting of three linear scalar equations with respect to the three unknowns $\rho $, $\omega $ and $\phi $.
The general sequence of the algorithm can be recapitulated in the following way. Let us assume that the functions $\mathbf{y}_{i,k}(x)$, $\mathbf{y} _{e,k}(x)$, and parameters $R_{s,k}$, $\Omega _{k}$, $\varphi _{s,k}$ are given for $k\geq 0$. We solve the linear BVPs (\ref{lini}), (\ref{lbci}) and thus we compute the functions $\mathbf{s}_{i,k}(x),\mathbf{u}_{i,k}(x)$, $ \mathbf{v}_{i,k}(x)$, $\mathbf{w}_{i,k}(x)$ in the inner domain $x\in [0,1]$ . Then we solve the linear BVPs (\ref{line}), (\ref{lbce}) in the external domain $x\in [1,\infty ]$ and compute the functions $\mathbf{s}_{e,k}(x), \mathbf{u}_{e,k}(x)$, $\mathbf{v}_{e,k}(x)$ and $\mathbf{w}_{e,k}(x)$. Next, to obtain the increments ${\rho }_{k}$, ${\omega }_{k}$ and ${\phi }_{k}$ we solve the linear algebraic system (\ref{lcc}). So using the decompositions (\ref {decompi}), (\ref{decompe}) and then the formulae (\ref{stepi}) - (\ref {stepfis}) we calculate the functions $\mathbf{y}_{i,k+1}(x)$, $\mathbf{y} _{e,k+1}(x)$, the radius of the star $R_{s,k+1}$, the quantity $\Omega _{k+1} $ and the parameter boundary condition $\varphi _{s,k+1}$ as well at the new iteration stage $k+1$.
At the every iteration $k$\ an optimal time step $\tau _{opt}$ is determinated in accordance with the Ermakov\&Kalitkin formula \cite{ermakov} \[ \tau _{opt}\approx \frac{\delta (0)}{\delta (0)+\delta (1)} \] where the residual $\delta (\tau )$ is calculated as follows \[ \delta (\tau _{k})=\max \,\left[ \delta _{f},(R_{s,k}+\tau _{k}\rho _{k})^{2},(\Omega _{k}+\tau _{k}\omega _{k})^{2},(\varphi _{s,k}+\tau _{k}\phi _{k})^{2}\right] \] and $\delta _{f}$ is the Euclidean residual of right hand side of the first equations in the systems (\ref{lini}), (\ref{lbci}) and (\ref{line}), (\ref {lbce}).
The criterion for termination of the iterations is $\delta (\tau _{opt})<\varepsilon $ where $\varepsilon \sim 10^{-8}\div 10^{-12}$ for some $k$.
Taking into account the smoothness of sought solutions we solve the linear BVPs (\ref{lini}), (\ref{lbci}) and (\ref{line} ), (\ref{lbce}) employing Hermitean splines and spline collocation scheme of fourth order of approximation \cite{zavyal}. At that we utilize essentially the important feature that everyone of the above mentioned two groups vector BVPs (inner and external) have one and the same left hand sides.
It is worth to note that the algebraic systems of linear equations and the system (\ref{lcc})\ as well become ill-posed in the vicinity of the ``exact'' solution, i.e., for sufficiently small residuals $ \delta $. That is why for small $\delta $, for example if $\delta <10^{-3}$ (then $\tau _{opt}\sim 1$ usually), it is expedient to use the Newton-Kantorovich method when the respective matrices are fixed for some $ \delta \geq 10^{-3}$.
\section{Some numerical results}
For a purpose of illustrating we will consider and discuss some results obtained from numerical experiments. A detailed description and analysing of results from physical point of view will be object of another our paper.
In present article we consider concrete scalar-tensor model with functions (see Section \ref{sec2}) \begin{eqnarray*} \qquad\qquad &&\!\!\!\!\!\! A(\varphi )=\exp (\frac{\varphi }{\sqrt{3}}),\qquad V(\varphi )={\frac {3} {2}}(1-A^{2}(\varphi ))^2,\\ f(\mu )&&\!\!\!\!\!\! =\frac{1}{8}\left[ (2\mu -3)\sqrt{\mu +\mu ^{2}}+3\ln \left( \sqrt{ \mu }+\sqrt{1+\mu }\right) \right],\\ \;g(\mu )&&\!\!\!\!\!\! =\frac{1}{8}\left[ (6\mu +3)\sqrt{\mu +\mu ^{2}}-3\ln \left( \sqrt{\mu }+\sqrt{1+\mu }\right) \right], \end{eqnarray*} \[ W(\sigma ^{2})=-\frac{1}{2}\left( \sigma ^{2}+\frac{1}{2}\Lambda \sigma ^{4}\right). \] The quantities $b,\Lambda $ are given parameters. For completeness we note that in the concrete case the functions $f(\mu)$ and $g(\mu)$ represent in parametric form the equation of state of noninteracting neutron gas while the function $W(\sigma^2)$ describes the boson field with quartic self-interaction.
\begin{figure}
\caption{The function $\sigma(x)$ for $\sigma_c = 0.4$; $\mu_c =1.2$.}
\label{sigma}
\end{figure}
\begin{figure}
\caption{The function $\varphi(x)$ for $\sigma_c = 0.4$; $\mu_c = 1.2$.}
\label{phi}
\end{figure}
The calculated functions $\sigma(x)$, $\varphi(x)$, $\nu(x)$ and $\mu(x)$ are plotted correspondingly in Fig. \ref{sigma}, \ref{phi}, \ref{nu} and \ref{mu} for the values of the parameters $\gamma = 0.1$, $\Lambda =10$ and $b=1.$ The behaviour of the mentioned functions is typical for wider range of the parameters not only for those values presented in the figures. The function $\sigma(x)$ decreases rapidly from its central value $\sigma_c = 0.4$ (in the case under consideration) to zero, at that when dimensionless coordinate $x > 6$ the function does not exceed $10^{-4}$. Similarly the function $\nu(x)$ has most large derivative for $x \in (0,9)$ after that it approaches slowly zero at infinity like $\frac{1}{x}$. For example when $x \approx 9$ the derivative $\nu^{\prime}(x) \approx 10^{-2}$, while for $x > 27$ $\nu^{\prime}(x) < 10^{-4}$, i.e., the asymptotical behaviour of calculated grid function and its derivative agrees very well with the theoretical prediction (see \cite{tomiplas}). The function $\varphi(x)$ increases rapidly for $x<4$, besides that it trends asymptotically to zero. Obviously the quantitative behaviour of $\varphi(x)$ for central value $\sigma_c = 0.4$ is determinated by the dominance of the term $\stackrel{B}{T}$ over the term $\stackrel{F}{T}$ (see \cite{tomiplas}). At last the function $\mu(x)$ is nontrivial in the inner domain $x \in [0,1]$, i.e. inside the star. Here it varies monotonously and continuously from its central value (in the case under consideration) $\mu_c = 1.2$ till zero at $x = 1$ corresponding to the radius of the star.
\begin{figure}
\caption{The function $\nu(x)$ for $\sigma_c = 0.4$; $\mu_c = 1.2$.}
\label{nu}
\end{figure}
\begin{figure}
\caption{The function $\mu(x)$ for $\sigma_c = 0.4$; $\mu_c = 1.2$.}
\label{mu}
\end{figure}
From physical point of view it is important to know the mass of the boson-fermion star and the total number of particles (bosons and fermions) making up the star.
The dimensionless star mass can be calculated via the formula $$ M= \int_{0}^{\infty} r^2 \left(\stackrel{\mathit{B}}{T_{0}^{0}}+ \stackrel{\mathit{F}}{T_{0}^{0}} + \exp(-\lambda)\left({\frac{d\varphi }{dr}} \right)^2 + {\frac{\gamma^2}{2}}V(\varphi) \right) dr. $$
The dimensionless rest mass of the bosons (total number of bosons times the boson mass) is given by $$ M_{RB}= \Omega \int_{0}^{\infty} r^2 A^2(\varphi) \exp\left({\frac{\lambda - \nu }{ 2}}\right)\sigma^2 dr. $$
The dimensionless rest mass of the fermions is correspondingly \[ M_{RF}=b\int_{0}^{\infty }r^{2}A^{3}(\varphi ) \exp\left({\frac{\lambda }{2}}\right) n(\mu ) dr \] where $n(\mu )$ is the density of the fermions. In the case we consider we have $n(\mu )=\mu ^{\frac{3}{2}}(x)$.
The dependencies of the star mass $M$ (solid line) and the rest mass of fermions $M_{RF}$ (dash line) on the central value $\mu _{c}$ of the function $\mu(x)$ are shown in configuration diagram on Fig. \ref{mass} for $\lambda =0$, $\gamma =0.1$, $b=1$ and $\sigma _{c}=0.002$. It should be pointed that for so small central value $\sigma_{c}$ we have in practice pure fermionic star. On the figure it is seen that from small values of $\mu_{c}$ to values near beyond the peak the rest mass is greater than the total mass of the star which means that the star is potentially stable.
\begin{figure}
\caption{The star mass $M$ and the rest fermion mass $M_{RF}$ as functions of the central value $\mu_c$}
\label{mass}
\end{figure}
\begin{figure}
\caption{The binding energy $E$ versus the rest fermion mass $M_{RF}$}
\label{bind}
\end{figure}
On Fig. \ref{bind} the binding energy of the star $E_{b}=M-M_{RB}-M_{RF}$ is drawn against the rest mass of fermions $M_{RF}$ for $\lambda =0$, $\gamma =0.1$, $ b=1$ and $\sigma _{c}=0.002$. Fig. \ref{bind} is actually a bifurcation diagram. With increasing the central value of function $\mu(x)$ one meets a cusp. The appearance of a cusp shows that the stability of the star changes - one perturbation mode develops instability. Beyond the cusp the star is unstable and may collapse eventually forming black hole. The corresponding physical results for pure boson stars are considered in our recent paper \cite{FYBT2}.
\section*{Acknowledgments} We are grateful to Prof. I.V. Puzynin (JINR, Dubna, Russia) for helpful discussion.
\end{document} |
\begin{document}
\setlength{\textbaselineskip}{22pt plus2pt}
\setlength{\frontmatterbaselineskip}{17pt plus1pt minus1pt}
\setlength{\baselineskip}{\textbaselineskip}
\title{Generalization Through the Lens of Learning Dynamics} \thispagestyle{empty} \begin{romanpages} \begin{abstract}
A machine learning (ML) system must learn not only to match the output of a target function on a training set, but also to generalize to novel situations in order to yield accurate predictions at deployment. In most practical applications, the user cannot exhaustively enumerate every possible input to the model; strong generalization performance is therefore crucial to the development of ML systems which are performant and reliable enough to be deployed in the real world. While generalization is well-understood theoretically in a number of hypothesis classes, the impressive generalization performance of deep neural networks has stymied theoreticians. In deep reinforcement learning (RL), our understanding of generalization is further complicated by the conflict between generalization and stability in widely-used RL algorithms. This thesis will provide insight into generalization by studying the learning dynamics of deep neural networks in both supervised and reinforcement learning tasks.
We begin with a study of generalization in supervised learning. We propose new PAC-Bayes generalization bounds for invariant models and for models trained with data augmentation. We go on to consider more general forms of inductive bias, connecting a notion of training speed with Bayesian model selection. This connection yields a family of marginal likelihood estimators which require only sampled losses from an iterative gradient descent trajectory, and analogous performance estimators for neural networks. We then turn our attention to reinforcement learning, laying out the learning dynamics framework for the RL setting which will be leveraged throughout the remainder of the thesis. We identify a new phenomenon which we term capacity loss, whereby neural networks lose their ability to adapt to new target functions over the course of training in deep RL problems, for which we propose a novel regularization approach. Follow-up analysis studying more subtle forms of capacity loss reveals that deep RL agents are prone to memorization due to the unstructured form of early prediction targets, and highlights a solution in the form of distillation. We conclude by calling back to a different notion of invariance to that which started this thesis, presenting a novel representation learning method which promotes invariance to spurious factors of variation in the environment.
\end{abstract}
\section{Learning to generalize}
The ability to generalize a lesson from the classroom to the real world is what separates \textit{learning} from \textit{memorization}. In a range of tasks ranging from mathematics to language, humans are remarkably skilled at identifying abstract patterns, and applying these patterns to novel contexts. The reader is unlikely to have previously encountered the sentence `the armadillo tipped its blue hat and returned to the game of marbles', and yet would likely have no trouble interpreting it, or answering questions concerning the colour of the armadillo's hat. However, the range of domains in which humans exhibit this ability to learn and generalize is limited. A human can easily parse natural language, but will struggle to identify structure in strings of base pairs arising from a genome. In these settings, we can benefit from computational tools. Machine learning approaches, in particular the training of deep neural networks on large datasets, present a promising direction towards the development of general algorithms which can identify patterns in data and solve a range of problems.
In a typical machine learning pipeline, the practitioner collects data (a \textit{training set}) which is then fed into a machine learning algorithm, with the hope that a system which can accurately model this data will have captured the underlying structure of the task. The training set will not encompass the set of all possible inputs a model may receive at deployment; a learned predictor must \textit{extrapolate} from the data it was trained on in order to make useful predictions. Generalization is crucial both to obtain good performance and to ensure the safety and reliability of these systems when they encounter data that was not seen during training. Yet how do we ensure that these highly expressive systems are {learning} and not {memorizing}? This question is a central concern of a long line of literature, and of this thesis.
\subsection{Defining generalization}
The machine learning community broadly distinguishes between two classes of generalization: within-distribution generalization, and out of distribution (OOD) generalization. While both types of generalization concern the performance of a predictor on data not seen during training, they differ in their structural assumptions on how this data is generated. Within-distribution generalization assumes that the process by which the training data was collected will also generate the data on which we will evaluate the trained model. This assumption is used in a rich theoretical literature which provides provable guarantees on the generalization performance of certain classes of learning algorithms. However, it is not reflected in many of the settings found in practice, where the procedure by which the training data is collected differs from how data is generated at evaluation; this difference is widely referred to as \textit{distribution shift}.
The real world is full of nonstationarities which can induce distribution shift: a user's taste in films will evolve as they age, slang terms enter and leave common usage, and interest in advertisements for winter boots may vary with the seasons. Data collected one month may quickly fall out of date and cease to be reflective of the phenomenon being modelled within a matter of weeks. Even the ways in which data is collected may introduce biases into the training set. Many image datasets, for example, include the main subject centred nicely in the middle of the frame, signalling to a learning algorithm that the corners of an image do not contain relevant information. These nonstationarities and biases can result in large {distribution shifts} when the model is deployed. The technical difficulty of overcoming such distribution shifts will depend on their structure and magnitude. Work on developing models which are robust to distribution shifts typically must make explicit assumptions on the type of shift being considered.
\subsection{The importance of generalization}
While we often marvel at great feats of memorization, such as memory champions who can recite the order of a shuffled deck of cards after a minute's concentration or taxi drivers who can recall the street maps of large cities by heart, memorization is discouraged when it occurs in place of understanding. The difference between the two is one of generalization: memorizing multiplication tables enables quick recall, but learning how to multiply numbers together algorithmically enables generalization to previously unseen number pairs. While we hesitate to anthropomorphize a linear regression model in saying that it `understands' the relationship between inputs and outputs, we nonetheless seek out a similar phenomenon in our machine learning systems. It is not sufficient to make perfect predictions on the training data: the model must be able to apply the relationship between input and output to new contexts, and make accurate predictions in these contexts.
This generalization is crucial if we hope to see the power of machine learning deployed in real-world settings, where mistakes on novel inputs can have catastrophic consequences. Unforeseen distribution shifts in medical data, such as replacing an imaging device in a hospital, can result in a decline in performance that has serious ramifications for patients' health. Cross-validation, the canonical approach to evaluating generalization performance, will not identify the model's robustness to distribution shifts that the developer did not already foresee and include in the evaluation set. This presents a particular challenge for practitioners; even if a decline in performance is detected, the best we can hope to do is re-train or fine-tune the model to improve its performance on the new data. This can become much more expensive than if the model had learned relationships that generalized well off the bat.
\subsection{Deep learning} \begin{figure}\label{fig:dnn}
\end{figure}
The deep learning revolution has spurred the growth of several international conferences, the creation of multiple industrial AI labs, and the allocation of three Turing awards. This is due to the success of deep neural networks (DNNs, see Figure~\ref{fig:dnn}) at modelling a wide range of data modalities. The applications of DNNs range from the benign, such as helping people with visual impairments navigate a street and translating text from one language to another, to those with the potential for malicious use, such as identifying human faces in surveillance footage. Strikingly, DNNs often obtain impressive generalization performance and are considered robust enough to be used in many commercial applications.
The success of these models has brought attention to the discipline, but also stymied theoreticians. Compared to the expressivity of deep neural networks, traditional learning algorithms seek to model data by searching over a relatively small class of functions. Theoretical analysis of these algorithms crucially depends on the size of the function class in order to provide guarantees on the expected error of the function found by the algorithm on new data. In contrast, the set of functions expressible by a given neural network architecture is so large as to result in vacuous results when traditional analysis is applied to deep learning. This has opened up a number of exciting approaches to study the generalization of DNNs which often have a more experimental flavour than prior work on learning theory.
\section{Learning to act}
We will be particularly interested in studying machine learning systems which are capable of \textit{doing things} in the world, rather than passively outputting predictions. This is captured by the {reinforcement learning} (RL) framework (see Figure~\ref{fig:rl_formulation}). At its core, a reinforcement learning problem consists of an \textit{agent} which can interact with an \textit{environment} with the goal of maximizing the cumulative \textit{reward} signal that it receives. Just as a trainer can teach a dog to sit by providing suitable rewards to reinforce the desired behaviour, we can apply the power of machine learning algorithms to maximize a prespecified reward function in the RL framework. We use the terminology \textit{behaviour policy} to refer to the distribution over actions that the agent takes in each state of the environment. The \textit{optimal policy} is the action-selection rule which maximizes the expected cumulative reward from each state.
\subsection{The reinforcement learning problem}
Learning how to behave optimally is often aided by learning to predict the expected cumulative reward the agent will receive after it visits a state and then follows some behaviour policy. Our usage of the word `learn' differs from that used in other areas of machine learning, where it refers to the identification of a relationship between inputs and outputs from a data set. An RL agent does not receive explicit information about the optimal policy from the environment; this policy must be deduced from the reward and transition structure via planning. Reinforcement learning is thus closely related the problem of \textit{optimal control}. Control problems assume an environment, modelled as a Markov Decision Process (MDP), with known transition and reward structure and seek to identify an optimal behaviour policy. Reinforcement learning also seeks to obtain an optimal policy, but does not assume that the structure of the MDP is known in advance. Instead, the agent must interact with the MDP in order to obtain information about the reward and transition structure, and use this information to identify an approximately optimal policy. The number of interactions with the environment needed for an agent to identify an approximately optimal policy, its \textit{sample complexity}, is a key criterion by which RL algorithms are evaluated, and which distinguishes reinforcement learning from optimal control, where the dynamics of the world are known a priori. \begin{figure}
\caption{A figure visualizing the RL problem; featuring the environment (pictured on the left) and agent (right) dichotomy.}
\label{fig:rl_formulation}
\end{figure} \subsection{Generalization in reinforcement learning}
Many problems of interest in reinforcement learning do not require generalization, i.e. it is assumed that the agent will only encounter states at deployment that it encountered during training. Indeed, the field contains a rich literature on the analysis of \textit{tabular} problems, whereby the states of the MDP are simply an enumeration of integers and learning consists of updating a lookup table. In many \textit{rich observation} settings, however, there may be an interesting functional relationship between the state observations emitted by the environment and the reward and transition structure at that state. While lookup tables are sufficient for small state spaces, most applications of RL to real-world data necessitate generalization either due to the magnitude of the state space or in order to be robust to distribution shifts. For example, the angles and torques of a robot's actuators can take on a continuum of values, and the set of possible image inputs far exceeds the size that can be fit into computer memory as a lookup table. Further, even if such a representation of the value function were possible it is not clear whether that would be desirable. Visual similarity between states can provide information about the optimal policy and value function which may be useful to the agent. A function approximator with an appropriate inductive bias will be able to leverage this similarity to accelerate the learning process.
However, a dark side of generalization arises in reinforcement learning problems: instability. Instability is particularly problematic in some of the most popular algorithms in the deep RL literature, where careful hyper-parameter tuning and engineering tricks are needed to prevent the network parameters from diverging to infinite values. Excessive generalization can also slow down learning if the inductive bias encoded by the function approximator is not aligned with the structure of the environment. A bias towards smooth functions might thus result in a network that fails to accurately distinguish between states with large differences in value. The tension between generalization and stability in deep reinforcement learning presents an additional layer of difficulty to deep RL, as compared to supervised deep learning, and is a problem we will explore in later chapters.
\section{Understanding the learning process}
Throughout this thesis, we will seek to understand how a model will generalize by studying the optimization trajectory it took during training. In contrast, most theoretical results in the literature characterize generalization using only properties of the final outputs of a learning algorithm, i.e. the neural network's final trained parameters. Studying the trajectory of a learning algorithm (which we will refer to as its \textit{learning dynamics}) gives us the opportunity to gain insights into a model that cannot be obtained by only considering the final trained parameters. We will leverage these insights in later chapters to obtain novel estimators with significant predictive power over the ranking of a model's final generalization performance.
\subsection{Learning to generalize between data points}
Many neural network training procedures leverage large datasets which cannot fit onto a single GPU. To accelerate training, learning algorithms often partition the data into subsets called \textit{minibatches}. The learner then updates its predictions for each minibatch iteratively. This procedure provides extremely useful information about whether the agent is learning to generalize or to memorize, by revealing whether the learner's update based on one minibatch has \textit{generalized} to improve its predictions on the other minibatches.
A key intuition throughout this thesis is that generalization between disjoint subsets of the training set can be indicative of generalization to the test set. If an update to the network intended to improve its predictions for one minibatch also improves the network's predictions on many other data points, this is likely to result in an improvement to the learner's predictions on novel inputs drawn from the same process that generated the training data. In contrast, if an update does not improve the learner's predictions on the other data points in the training set, it is unlikely to improve the agent's predictions on the data it will see at deployment. This relationship will be explored in greater depth in Chapter \ref{chp:supervised}.
\subsection{Stability vs extrapolation}
Our study of learning dynamics in RL agents will be particularly enlightening, as these dynamics are much more complex than their supervised counterparts. Supervised learning algorithms tend to induce well-behaved dynamics that, even if they correspond to a non-convex loss surface, nonetheless at least come with reasonable guarantees on the convergence of learning algorithms to local minima. Reinforcement learning, in contrast, incorporates nonstationarity as part of the learning process. This nonstationarity has many forms: the distribution of states that the agent visits will change as its policy improves, but so will the target values that the agent is trying to predict. This results in a dynamical system that superficially resembles the stable gradient descent regime of supervised learning algorithms, but lacks convergence guarantees many problem settings of interest.
Even more pernicious, reinforcement learning agents must also face the challenge that the very properties of a function approximator which are associated with better generalization in the supervised learning setting are precisely those which can cause divergence in RL algorithms, as we will see in later sections. This means that deep RL agents must overcome two distinct hurdles in order to learn an optimal behaviour policy which also generalizes to new settings. First, they must learn to behave optimally in the training environment, while avoiding issues of divergence and other pathologies of function approximation in RL. Second, having achieved a high-performing policy, they must then ensure that the policy is robust to superficial changes to the observations they receive from the environment.
\section{Thesis contributions and structure}
\subsection{Contributions} Broadly speaking, this thesis presents a set of novel empirical and theoretical tools to predict, understand, and improve generalization in deep neural networks in a range of problem settings. Crucial to these results will be an analysis of the \textit{dynamics} of learning algorithms in various settings. The primary contributions of this thesis are enumerated as follows: \begin{enumerate}
\item A characterization of the relationship between invariance, training speed, and generalization, with theoretical results complemented by empirical validation of our main findings in practically relevant settings.
\begin{enumerate}
\item A theoretical analysis of the effect of invariance on generalization via PAC-Bayes bounds, allowing an explicit characterization of the role of symmetries in generalization bounds via a quantity we term the \textit{symmetrization gap}. This theoretical analysis is complemented by an empirical study which highlights the limitations of the types of approximate invariance promoted by data augmentation to generalize to out of distribution inputs.
\item A new estimator of the marginal likelihood which complements the analysis of upper bounds on generalization error to provide useful rankings of models for architecture search and hyperparameter selection. The analysis of this estimator reveals a deep connection between training speed and Bayesian model selection, and yields a novel performance estimator for architecture search in deep neural networks.
\end{enumerate}
\item A theoretical framework for the study of representation dynamics in deep reinforcement learning along with practical insights derived thereof. These insights concern both the ability of a learned feature representation to linearly approximate a prediction target, and its ability to generalize to novel prediction objectives over the course of training.
\begin{enumerate}
\item A theoretical model and analysis of the representation learning dynamics of value-based reinforcement learning algorithms, yielding an analytic characterization of the effect of auxiliary tasks on agents' learned representations.
\item The identification of the `capacity loss' phenomenon in deep RL which characterizes the tendency of neural networks to catastrophically overfit to early prediction targets in sparse-reward environments.
\item A new regularization method based on the insights from this previous analysis that improves generalization to new prediction objectives even after long training periods.
\end{enumerate}
\item Theoretical analysis leveraging the above framework to provide insight into and algorithmic improvements to generalization to novel observations and environments.
\begin{enumerate}
\item A theoretically grounded explanation for prior empirical observations of overfitting in the broader deep RL literature, including dense-reward problems, and a set of recommendations for principled approaches to reduce memorization and improve generalization between observations.
\item A representation-learning objective which goes beyond the single-environment setting to enable zero-shot generalization to novel environments sharing underlying structure with the training environments.
\end{enumerate}
\end{enumerate}
\subsection{Warmup: supervised learning} The first two content chapters of this thesis will lay the groundwork for our understanding of generalization in deep neural networks. Chapter \ref{chp:invariance} presents a novel PAC-Bayes generalization bound for invariant models, characterizing the effect of invariance on generalization through a quantity which we term the \textit{symmetrization gap}. We empirically verify that the symmetrization gap appears in computations of PAC-Bayes bounds for invariant deep neural networks, and further show a strong correlation between the rankings of these upper bounds and the rankings given by final generalization performance. This chapter further contributes new empirical analysis of the optimization trajectories of DNNs trained to exhibit approximate invariances via data augmentation, illustrating the limitations of these trained invariances to {extrapolate} beyond their training distribution. The importance of invariance to generalization in reinforcement learning will be revisited in Chapter~\ref{chp:icp}.
A failing of PAC-Bayesian generalization bounds is that they do not offer predictions about which of a set of models will generalize best. In order to achieve correctness, these bounds pay the price of predictive power. Chapter~\ref{chp:supervised} shifts focus to consider performance estimators which can \textit{predict} the relative performance of different neural network architectures. It presents a novel marginal likelihood estimator which can be used to enable Bayesian model selection for a broader class of models from which we only require accurate posterior samples, overcoming the normative limitations of generalization bounds and computational challenges of the exact marginal likelihood. This estimator has a number of appealing properties, chief among which is that it can be applied to a subset of training losses from a gradient descent trajectory. This illustrates a deep relationship between generalization and learning dynamics, as a model's generalization performance can thus be said to depend on its \textit{training speed}. These chapters will principally be based on the following papers: \nobibliography* \begin{itemize}
\item \bibentry{lyle2020bayesian}
\item \bibentry{lyle2020benefits} \end{itemize} with supporting evidence drawn from \begin{itemize}
\item \bibentry{ru2020revisiting} \end{itemize}
\subsection{Generalization in reinforcement learning} The remaining chapters present an analysis of the learning dynamics of RL algorithms and explore a number of applications of this analysis to representation learning and generalization in deep RL. Chapter~\ref{chp:rl-dynamics} lays down a novel \textit{learning dynamics} framework which will be leveraged throughout the remainder of the thesis, and reveals that the dynamics followed by temporal difference methods can cause the learned representation to reflect aspects of the transition structure of the environment \citep{lyle2021effect}. In Chapter~\ref{chp:rep-learning}, we explore one application of this analysis which yields a novel regularization approach, $\text{InFeR}\xspace$ \citep{lyle2021understanding}. This method enables deep RL agents to attain nontrivial return in the notoriously difficult Montezuma's Revenge game using only a naive $\epsilon$-greedy exploration algorithm. This section is based on the following papers: \begin{itemize}
\item \bibentry{lyle2021effect}
\item \bibentry{lyle2021understanding} \end{itemize}
Chapter~\ref{chp:gen-rl} explores implications of the previous chapters on generalization, providing novel analysis and insight into why value-based deep reinforcement learning often produces highly brittle agents \citep{lyle2022generalization}, as well as identifying principled approaches to remedy the tendency of deep RL methods to overfit. Chapter~\ref{chp:icp} revisits the discussion of invariance from Chapter~\ref{chp:invariance} with a new perspective grounded in causal inference, presenting a novel representation learning method to encourage generalization in some classes of MDPs by promoting invariance to spurious factors of variation in the environment \citep{zhang2020invariant, lyle2021causal}. These chapters are based on the following papers. \begin{itemize}
\item \bibentry{lyle2022generalization}
\item \bibentry{zhang2020invariant} \end{itemize}
A number of papers that I worked on during my PhD did not fit into this thesis, including the following.
\begin{itemize}
\item \bibentry{wang2021provable}
\item \bibentry{filos2021psiphi}
\item \bibentry{kossen2021self}
\item \bibentry{bellemare2019geometric}
\item \bibentry{lyle2019comparative} \end{itemize}
A discussion of contributions to joint work can be found at the end of the thesis.
The key idea driving this thesis is that properties of a network's training trajectory can tell us a great deal about how it will generalize to novel inputs. Chapters 3 and 4 ground this idea in the supervised learning regime by studying generalization between minibatches and validate its utility by developing both novel generalization bounds and practical model selection tools. Chapters 5 and 6 identify key properties of the training dynamics of reinforcement learning agents that differ from the supervised setting, and show how these properties can be both beneficial and detrimental to representation learning. Finally, Chapters 7 and 8 apply the notions of invariance and within-training-set generalization from Chapters 3 and 4 to the RL problem, leveraging the theoretical and empirical tools presented in Chapters 5 and 6 to quantify memorization and improve generalization in reinforcement learning. We will conclude with a discussion of how these ideas can be (and in some cases have already been) further leveraged in the pursuit of learning systems which can effectively learn and generalize across a range of tasks.
\chapter{Background \& literature review} \label{chp:background}
\minitoc This chapter will provide the high-level background and literature necessary to contextualize the contributions of this thesis, and set out a standard set of notation that will be used in the following chapters. Where necessary, individual chapters may also contain a background section; these sections will relay information that pertains only to the contents of the chapter that contains them.
\section{Learning frameworks} \label{bkgd:learning-frameworks} A learning algorithm can be applied to a wide variety of problems, and it is often useful to categorize learning problems based on the information available to the algorithm. We might task a learning system with identifying a mapping between input-label pairs (supervised learning), with constructing an embedding of inputs that captures relevant structure (unsupervised learning), with generating samples from some distribution (generative modelling), or with maximizing a reward signal via interaction with an environment (reinforcement learning). The types of algorithms we can deploy in each of these situations differ not only in terms of the types of outputs they produce, but also in the stability of their learning dynamics. This thesis will focus primarily on the distinction between supervised learning, where learning dynamics of gradient descent algorithms are relatively straightforward to analyze, and reinforcement learning, wherein analogues of even simple methods such as linear regression can suffer from instability and divergence.
\subsection{Supervised learning} \label{bkgd:supervised-learning} Supervised learning is concerned with characterizing a functional relationship between inputs and labels. This framework assumes that the data takes the form of input-label pairs $(\mathbf{x}, y)$ generated by sampling from some distribution $P_{\calD}$. The objective of the learning algorithm is to find a function $f$ such that $f(\mathbf{x}) = y$. The task of finding such a function is nontrivial: one must both propose a suitable class of functions over which to search, and an effective means of identifying functions from this class which are likely to capture the target relationship.
\subsubsection{Empirical risk minimization} \label{bkgd:erm} We first consider the task of identifying a candidate function $f$ with the property that $f(\mathbf{x}) = y$ on $(\mathbf{x}, y)$ pairs sampled from $P_{\calD}$, including those not seen during training. We use a loss function $\ell$ to quantify the quality of $f(\mathbf{x})$ as an approximator to $y$; we will also refer to the expectation of this quantity as the \textit{risk}. When $y$ belongs to a continuous space, we call this a \textit{regression} problem. When $y$ belongs to a finite set, we have a \textit{classification} problem.
For a class of functions $F= \{f : \mathcal{X} \to \mathcal{Y}\}$, a set $\mathcal{D}^n = (\mathbf{x}_i, y_i)_{i=1}^n \sim P_{\calD}$, and a loss function $\ell : \mathcal{Y} \times \mathcal{Y} \to \mathbb{R}$, we define the expected risk $R_{\loss}$ as \begin{align}
R_{\loss}(f) &= \mathbb{E}_{(\mathbf{X},Y)\simP_{\calD}}[\ell (f(\mathbf{X}), Y)] \; .\label{eq:risk} \\
\intertext{ Similarly, we define the empirical risk $\widehat{R}_{\loss}$ as}
\widehat{R}_{\loss}(f, \mathcal{D}^n) &= \textstyle\frac{1}{n}\textstyle\sum_{i=1}^n \ell(f(\mathbf{x}_i), y_i) \; .\label{eq:eRisk}
\end{align}
In regression problems, $\ell$ is typically set to be the squared error $(f(\mathbf{x}) - y)^2$. In classification, it is usually the cross-entropy loss between a categorical distribution $p(\cdot | \mathbf{x})$ and the Dirac delta distribution at the label $y$. In this case, the hypothesis class $F$ will consist of mappings from inputs to \textit{distributions} over labels in $\mathcal{Y}$.
When the hypothesis class $F$ contains the true functional relationship $f$, the learning problem is realizable. However, most settings of interest are not realizable, and so we seek instead a function $f^*$ which minimizes the expected loss over the data-generating distribution, which can be expressed formally as follows. \begin{equation} f^* = \mathrm{arg min}_{f \in F} R_{\loss}(f) = \mathrm{arg min}_{f \in F} \mathbb{E}_{(\mathbf{X},Y)\simP_{\calD}}[\ell (f(\mathbf{X}), Y)] \end{equation}
In practice, computing this expectation is impossible and we instead use samples to estimate its true value. The empirical risk minimization framework assumes a finite sample $(\mathbf{x}_i, y_i)_{i=1}^n = \mathcal{D}^n \sim P_{\calD}$, and proposes to find a function $f \in F$ that minimizes the empirical expectation of the loss over this sample, i.e. the empirical risk. Concretely, the function $\hat{f}$ is called the empirical risk minimizer if the following holds:
\begin{equation}
\hat{f} = \mathrm{arg min}_{f \in F} \widehat{R}_{\loss}(f) = \mathrm{arg min}_{f \in F} \frac{1}{n} \sum_{i=1}^n \ell(f(\mathbf{x}_i), y_i) \; . \end{equation}
The empirical risk minimization principle has seen widespread application in the machine learning literature \citep{vapnik1991principles, donini2018empirical}. \citet{vapnik1968uniform} characterizes a number of appealing asymptotic properties of the empirical risk minimizer under certain conditions on the function class $F$; this analysis has formed the basis for the work on generalization bounds which will be discussed in Section~\ref{bkgd:generalization-bounds}. This principle is agnostic to the choice of function class, and does not give guidance on how to obtain a minimizer $f^*$ when such classes are too large for exhaustive search. The following discussion will focus on one such class and search procedure: neural networks trained with gradient-based optimization.
\subsubsection{Deep learning} \label{bkgd:deep-learning} Deep neural networks (DNNs) form a powerful and expressive class of function approximators \citep{raghu2017expressive}. A DNN is a parameterized function $f_{\theta}$ which consists of layer-wise computations going from input to output. Some neural architectures include recurrent connections, where the output of the network is fed back into itself as input \citep{hochreiter1997long}; this thesis will focus exclusively on feedforward architectures, where only a single pass through the network is executed in order to obtain the function outputs. Feedforward neural networks constitute a rich and widely-used class of models whose dynamics are more amenable to analysis, including fully-connected multi-layer perceptrons (MLPs), convolutional neural networks, transformers \citep{vaswani2017attention}, and ResNets \citep{he2016deep}. The function $f^k$ computed by each layer $k$ of a feedforward neural network typically consists of a linear transformation of the output of the previous layer, followed by a non-linear activation function. A variety of activations have been used in DNNs; one popular example is the Rectified Linear Unit (ReLU), of the form $\sigma(x) = \max (0, x)$. The output of a neural network can thus be expressed as a composition of layer-wise operations, \begin{equation}
f_\theta(\mathbf{x}) = f_\theta^L \circ \dots \circ f_\theta^1(\mathbf{x}) \; . \end{equation} Deep neural networks are trained using gradient-based optimization algorithms. The most fundamental of these is \textit{stochastic gradient descent}. In this setting, the data $\mathcal{D}^n$ is uniformly at random divided into minibatches of size $k$, $((\mathbf{x}_{b_1}, y_{b_1}), \dots, (\mathbf{x}_{b_k}, y_{b_k}))_{b=1}^{\lfloor n/k \rfloor}$. At each minibatch, we compute the gradient of the loss to obtain an update direction $g$ as follows, \begin{equation*}
g(\theta, \mathcal{D}_b) = -\nabla_\theta \frac{1}{k} \sum_{i=1}^k \ell(f_\theta(\mathbf{x}_{b_i}), y_{b_i})\; . \end{equation*} This yields an iterative algorithm where the parameters $\theta$ are updated according to the gradient for each successive minibatch. In most cases, we use a step-size parameter $\alpha \in (0, 1]$ to improve the stability and convergence properties of the algorithm. The iteration step typically takes the following general form: \begin{equation}
\theta_{t+1} \gets \theta_t + \alpha_t g(\theta_t, \mathcal{D}_t) \; . \end{equation} Many formulations of gradient descent allow the step size to depend on the iteration $t$; such dependence on the iteration is crucial to obtain convergence guarantees \citep{robbins1951stochastic}. Many adaptive optimization schemes \citep{duchi2011adaptive, kingma2014adam} further accumulate parameter-dependent learning rates, and allow the update direction $g(\theta_t, \mathcal{D}_t)$ to also depend on prior gradients. While these optimizers will feature in the empirical analysis that appears later, their precise form is not important to our discussion.
\subsection{Reinforcement learning} \label{bkgd:rl} Whereas the supervised learning framework seeks to model a functional relationship between two variables, the reinforcement learning framework models an agent's interaction with an environment with the goal of identifying a behaviour policy which maximizes some reward signal. We model the environment as a Markov Decision Process (MDP) $\mathcal{M} = (\mathcal{X}, \mathcal{A}, R, P, \gamma)$, where $\mathcal{X}$ denotes the state space, $\mathcal{A}$ the action space, $R:\mathcal{X} \rightarrow \mathbb{R}$ the reward function, $P:\mathcal{X} \times \mathcal{A} \rightarrow \mathscr{P}(\mathcal{X})$ the transition probability function, and $\gamma$ the discount factor. The agent obtains observation $x$ which indicates the environment's state. It may then take an action $a \in \mathcal{A}$, after which the environment outputs a new observation $x'$ and a reward $r$. The agent's objective is to maximize the cumulative discounted reward it receives over time. In \textit{finite-horizon} environments, the agent may only take a finite number of steps; in \textit{continuing} or \textit{infinite-horizon} environments, on which this thesis will predominantly focus, the agent may take an unlimited number of steps in the environment. The discount factor $\gamma$ determines the degree to which near-term rewards are preferred, resulting in the maximization target $\sum_{k=0}^{\infty} \gamma^k R_k$, called the \textit{return}. The return is a random variable that depends on the sequence of states and actions taken by the agent. The value of a state-action pair under some action-selection policy $\pi : \mathcal{X} \rightarrow \mathscr{P}(\mathcal{A})$ is equal to the expected value of the return starting at some state-action pair $(x,a)$ and following the policy $\pi$. This can be expressed by the action-value function $Q^\pi:\mathcal{X} \times \mathcal{A} \rightarrow \mathbb{R}$, defined as \begin{equation}
Q^\pi(x, a) = \mathbb{E}_{\pi , \mathcal{P}}[\sum_{k=0}^\infty \gamma^k R(x_k, a_k)|x_0=x, a_0=a ] \;. \end{equation}
\textbf{Value-based methods} seek to learn the (resp. action-) value function $V^\pi:\mathcal{X}\rightarrow \mathbb{R}$ (resp. $Q^\pi : \mathcal{X} \times \mathcal{A} \rightarrow \mathbb{R}$) associated with some policy $\pi$ \citep{sutton2018reinforcement}. In particular, we are interested in learning the value function associated with the optimal policy $\pi^*$ which maximizes the expected discounted sum of rewards from any state. Such a value function is then straightforward to translate into an optimal behaviour policy: the agent need only take the action with the highest predicted value at each state.
At the core of value-based RL are the \emph{Bellman operators} \citep{puterman}. The Bellman policy evaluation operator $T^\pi : \mathbb{R}^{\mathcal{X}} \rightarrow \mathbb{R}^{\mathcal{X}}$ is defined with respect to a policy $\pi$ and provides a method to \textit{update} a predicted value function $V$ to more closely resemble the value $V^\pi$ of the policy $\pi$. It is defined as \begin{align*}
(T^\pi V)(x) = \mathbb{E}_{X_1 \sim P(\cdot|x,\pi(x))}[ R(x) + \gamma V(X_1)] \, . \end{align*}
Introducing a matrix notation of the transition operator $P^\pi \in \mathbb{R}^{\mathcal{X}\times\mathcal{X}}$ defined by $P^\pi[x, x'] = \sum_{a \in \mathcal{A}}\pi(a|x)P(x'|x, a)$, and the expected reward vector $R^\pi \in \mathbb{R}^{\mathcal{X}}$ defined by $R^\pi(x) = \mathbb{E}_\pi[R_0|X_0=x]$, this can be expressed even more succinctly in matrix notation as \begin{align*}
T^\pi V = R^\pi + \gamma P^\pi V \, . \end{align*} $T^\pi$ is a contraction on the space of value functions \citep{puterman}, and so repeated application of $T^\pi$ to any initial value function converges to $V^\pi$ \citep{bertsekas1996neuro}. For control problems, where we seek to obtain the value of an unknown optimal policy, learning requires not only estimating the value of a policy but also improving that policy to increase its expected return. In this setting, we leverage an analogous operator termed the Bellman optimality operator $T^* : \mathbb{R}^{\mathcal{X}\times\mathcal{A}} \rightarrow \mathbb{R}^{\mathcal{X}\times\mathcal{A}}$. The action of $T^*$ on value functions is defined by \begin{align*}
(T^* Q)(x,a) \!=\! \mathbb{E}_{x' \sim P(x,a)}[R(x) \!+\! \gamma \max_{a' \in \mathcal{A}} Q(x', a')] \, . \end{align*}
The Bellman optimality operator attains similar convergence guarantees as the policy evaluation operator, and can be shown in tabular state spaces to converge to the value of the optimal policy. In principle both $T^*$ and $T^\pi$ can be defined over action-value or state-value functions, but this thesis will predominantly consider the application of $T^\pi$ to value functions and $T^*$ to action-value functions We will refer to the value $T^\pi V$ or $T^* Q$ as the \textit{Bellman target} associated with the (resp. action-) value function $V$ (resp. Q), where the operator in use will be clear from context.
In most settings of interest, we do not have access to the expected reward vector $R^\pi$ or the environment transition matrix $P$. As a result, value-based RL agents must use sampled transitions of the form ($x_t, a_t, r_t, x_{t+1}, a_{t+1}$) to approximate these updates. In the case of the policy evaluation operator, this sample-based approximation takes the form of the SARSA update, so called due to its use of (\textbf{S}tate-\textbf{A}ction-\textbf{R}eward-\textbf{S}tate-\textbf{A}ction) transitions. The SARSA algorithm assumes that the transition has been sampled from some fixed behaviour policy $\pi$, and estimates $Q^\pi$ by iteratively applying the update rule
\begin{equation}
Q_{t+1}(x_t,a_t) = Q_{t}(x_t,a_t) + \alpha [r_t + \gamma Q_t(x_{t+1}, a_{t+1}) - Q_t(x_t, a_t)] \;. \end{equation}
Sample-based methods will use some step size $0<\alpha < 1$ in order to average out noise in the Bellman targets due to stochasticity in the environment. The seminal Q-learning algorithm \citep{watkins1992q}, which forms the basis of many deep RL agents \citep{mnih2015human}, can similarly be viewed as approximating the iterative application of $T^*$ and related operators \citep{tsitsiklis1994asynchronous,jaakola1994convergence,bertsekas1996neuro}. Q-learning is based on the following update \begin{equation}
Q_{t+1}(x_t,a_t) = Q_{t}(x_t,a_t) + \alpha [r_t + \gamma \max_{a' \in \mathcal{A}} Q_t(x_{t+1}, a') - Q_t(x_t, a_t)] \; . \end{equation}
\textbf{Policy gradient methods} \citep{sutton2000policy} operate directly on a parameterized policy $\pi_\theta$. We let $d^\pi$ denote the stationary distribution induced by a policy $\pi$ over states in the MDP. Rather than first going to the trouble of learning a value function, policy gradient methods directly optimize the parameters $\theta$ of the policy so as to maximize the expected return $J(\pi_\theta) = \mathbb{E}_{s_0 \sim P_{\mathcal{M}}(s_0)} V^{\pi_{\theta}}(s_0) $. The gradient of this loss can be estimated from sampled trajectories when expressed as follows: \begin{equation}
\nabla_\theta J(\pi_\theta) = \mathbb{E}_{x_t, a_t \sim P(\cdot | \pi_\theta)}[\nabla_\theta \log \pi_\theta(a_t|x_t) Q^{\pi_\theta}(x_t, a_t)] \; . \end{equation} Variations on this learning rule include \textit{actor-critic} methods \citep{konda2000actor}, which use a baseline given by a value-based learner to reduce update variance, and trust-region based methods, such as Trust Region Policy Optimization \citep{schulman2015trust} and Proximal Policy Optimization (PPO) \citep{schulman2017proximal}.
\textbf{Function approximation} schemes enable RL agents to generalize their knowledge about the value or policy at one state to other states in the environment. Linear function approximation assumes state-action pairs are embedded as features $\phi(x,a) \in \mathbb{R}^d$, and some linear map $\mathbf{w} \in \mathbb{R}^d$ is used to approximate the value function $Q^\pi(x,a) = \langle \phi(x,a), \mathbf{w} \rangle$. This regime has been the study of a rich literature exploring the stability and sample complexity of RL in the presence of function approximation \citep{jin2020provably, wang2020optimism, precup2001off, tsitsiklis1996analysis}.
A second, more widely-used family of algorithms involve the use of neural networks as function approximators of the form $Q_\theta: \mathcal{X} \times \mathcal{A} \rightarrow \mathbb{R}$. This is the \textit{deep RL} regime. This approach is well-suited to an array of complex tasks, ranging from systems control problems \citep{degrave2022magnetic} to video games \citep{mnih2015human}. At its core, value-based deep RL involves training a neural network to approximate the Bellman targets of a predicted value function using (semi-, see e.g. \citep{sutton2018reinforcement}) gradient descent. This approach computes a semi-gradient update direction $f(\theta)$ of the following form, where $a^* = \max_{a}(Q_\theta(x_{t+1}, a))$. \begin{equation}
f(\theta) = (\nabla_\theta Q_\theta) [r_t + \gamma Q_\theta(x_{t+1}, a^*) - Q_\theta(x_t, a_t)] \end{equation} Because the final layer of a neural network is usually linear, it is possible to express the output $Q_\theta(x,a)$ in the form $Q_{\theta}(x,a) = \langle \phi_{\theta'}(x,a) , \mathbf{w} \rangle$, and $\theta = \theta' \oplus \mathbf{w}$ where $\oplus$ denotes concatenation. Under this parameterization, the map $\phi_{\theta'}$ is referred to as the \textit{feature map}. This framework has been used to study the learned representations of deep reinforcement learning agents in a number of recent works \citep{kumar2021implicit, lan2022generalization, lyle2019comparative}.
\section{Generalization in supervised learning}
We now turn our attention to the principal object of interest in this thesis: generalization. The study of generalization in supervised learning problems spans decades, from the seminal work of \citet{vapnik1968uniform} to recent exciting developments in the kernel analysis of deep networks \citep{jacot2018neural} and the interpolation regime \citep{bartlett2020benign}. We will begin by presenting classical bounds on the generalization error of a learning algorithm's output. However, these classical approaches, based on quantifying the complexity of an algorithm's hypothesis class, fail to account for the generalization performance of deep neural networks, motivating more recent empirical approaches. We will conclude with an overview of the current state of the art of our understanding of generalization in deep learning.
\subsection{Generalization bounds} \label{bkgd:generalization-bounds}
The empirical risk minimization (ERM) framework puts forward the maxim: `always pick a hypothesis which minimizes the risk on the training set'. This hypothesis will not in general attain the lowest risk on the underlying data-generating distribution, however. A long line of work~\citep{vapnik1968uniform, vapnik1999nature, bousquet2002stability} has characterized upper bounds on the gap between the empirical risk of a hypothesis and its true risk, yielding \textit{generalization bounds} which provide high-probability guarantees on the expected risk of a hypothesis. Of particular interest to us will be the application of such bounds to hypothesis classes generated by neural networks \citep{baum1989size, dziugaite2017nonvacuous, bartlett2017spectrally}. While generalization bounds for any hypothesis class are almost always looser than the upper bound on the expected risk given by computing the model's validation loss on held-out data, the study of generalization bounds continues to thrive as a means of developing theoretical insight into learning algorithms, motivating their inclusion in our discussion.
\subsubsection{Formalism} Most generalization bounds in the literature share a similar structure, consisting of the sum of a hypothesis' empirical risk and a complexity measure scaled by a function (typically the inverse square root) of the number of samples. We use the notation of \Cref{bkgd:supervised-learning}, and let $f$ be a function output by some learning algorithm with access to data $\mathcal{D}^n$ of size $n$, drawn from hypothesis class $F$. We let $R_{\loss}$ and $\widehat{R}_{\loss}$ be defined as in Equations~\ref{eq:risk} and \ref{eq:eRisk} respectively. Letting $C(F)$ denote some complexity measure (for example, the logarithm of the number of functions in the hypothesis class) and $g(\cdot)$ some non-negative function defined over $\mathbb{R}$, usually $g(x) = \log(\frac{1}{x})$ or something similar, we obtain the generic form \begin{equation}
R_{\loss}(f) \leq \widehat{R}_{\loss}(f) + \sqrt{\frac{C(F) + g(\delta)}{n}} \text{ with probability $1-\delta$}. \end{equation}
The magnitude of the complexity measure $C(F)$ is crucial to the tightness of a bound. In some cases the complexity measure which bounds the expected risk may take a value so large that it dwarfs the maximal value the risk can obtain, resulting in bounds that are \textit{vacuous}. For example, a vacuous bound would guarantee that a neural network's probability of making a classification error on new samples from the the MNIST dataset will be less than 500\%. In neural networks, bounds based on the margin around the decision boundary \citep{wei2019improved}, the VC dimension \citep{harvey2019nearly}, and the spectral norm of the network weights \citep{bartlett2017spectrally} all become vacuous for network architectures of the scale typically applied to popular benchmarks such as CIFAR-10 or ImageNet \citep{bartlett1997valid, dziugaite2017nonvacuous}. However, recent work has found complexity measures which \textit{can} capture a reasonable notion of simplicity on neural networks -- at least to the point where bounds are non-vacuous \citep{dziugaite2017nonvacuous}.
\subsubsection{Complexity and Occam's razor} The term $C(F)$ can be interpreted as a form of Occam's razor applied to the hypothesis class: given hypotheses drawn from two classes which attain the same empirical risk, we should prefer the hypothesis drawn from the simpler class. This simple idea drives most results in model selection and generalization in machine learning \citep{rasmussen2001occam}. However, while generalization bounds of the flavour shown above characterize the complexity of the entire class of hypotheses, one might also prefer to use Occam's razor as a tool to select hypotheses within a single class. This is the core idea behind PAC-Bayes generalization bounds \citep{mcallester1999, langford2003pac, leveretal2013tighterPACbayes, catoni}, which allow us to assign a complexity penalty that distinguishes between different hypotheses within a function class for randomized predictors. Leveraging a notion of hypothesis-level complexity is a powerful tool \citep{bartlett1997valid}, yielding some of the first non-vacuous generalization bounds for over-parameterized neural networks \citep{dziugaite2017nonvacuous}.
Occam's razor does not give us an out-of-the-box definition of simplicity, however. Two such definitions are favoured by the machine learning community to explain generalization of deep networks. The first of these is flatness of the local minima to which gradient-based optimization tends to converge \citep{hochreiter1997flat}. Much of the folk wisdom surrounding generalization in deep learning hypothesizes that the reason neural networks generalize is because stochastic gradient descent drives the learned weights towards flat regions of the loss landscape \citep{keskar2016large}. This picture is slightly complicated by the observation of \citet{dinh2017sharp} showing that sharp minima can also generalize, but nonetheless demonstrates strong predictive power in empirical evaluations \citep{jiang2020fantastic}. Optimization algorithms which deliberately add noise to the training process, such as entropy-SGD and the direct optimization of a PAC-Bayes bound \citep{chaudhari2016entropy, dziugaite2017entropy, dziugaite2018dependent}, have been shown to increase flatness and improve the tightness of some generalization bounds on neural networks. PAC-Bayes risk bounds are also deeply connected to the Bayesian notion of marginal likelihood \citep{germain2016pac}.
A second widely-used notion of simplicity is model compressibility, whereby models which can be compressed to a smaller length are said to be simpler. This is also referred to as the minimum description length (MDL) principle \citep{Akaike1998, hinton1993keeping}. Compressibility and flatness overlap significantly: parameters in a flat region of the loss landscape can be perturbed, for example by a quantization step of a compression algorithm, without significantly affecting the loss. However, the techniques used to measure the two quantities differ, with compression approaches typically being more compute-intensive \citep{zhou2018nonvacuous, ullrich2017soft}. PAC-Bayes bounds are appealing as either the flatness of minima \citep{neyshabur2018the, neyshabur2017exploring} or the compressibility \citep{zhou2018nonvacuous} notion of simplicity can be used to define the model complexity term in the bound.
\subsubsection{The overparameterized regime} At the core of the challenge of applying results from statistical learning theory to neural networks is the expressiveness of neural network function classes; the overparameterized models of recent years are capable of memorizing even uniform random labels of the data \citep{zhang2016understanding}. Uniform convergence bounds which depend on worst-case analysis are thus limited in the guarantees they can provide these networks. Indeed, \citet{nagarajan2019uniform} argue that certain forms of uniform convergence results may be fundamentally incapable of explaining generalization in deep learning, presenting a simple example of a learning problem where it is impossible to use uniform convergence guarantees to characterize the generalization performance of a neural network function class. While \citet{negrea2020defense} and \citet{bartlett2020benign} show that modified analysis can still yield uniform convergence results for overparameterized predictors in similar contexts, these results require studying either a surrogate predictor, or a restricted problem setting.
Even more intriguing has been the observation that, contrary to the received wisdom in learning theory that overparameterized models will overfit to their training data, increasing overparameterization can lead to \textit{improved} generalization \citep{neyshabur2014search}. These empirical observations have prompted the study of interpolating predictors \citep{bartlett2020benign}, which seeks to outline conditions by which a model which can attain zero empirical risk may still obtain near-optimal risk or robustness properties on the data-generating distribution \citep{bubeck2021universal, koehler2021uniform}. This work is intricately connected to the double descent phenomenon, whereby the risk of a model, when plotted against the number of parameters, exhibits two descent regions: one in the underparameterized regime, and one in the overparameterized regime \citep{advani2020high, belkin2018reconciling, nakkiran2019deep}.
A gap remains, however, in leveraging these insights to attain tight bounds on the generalization error of modern neural network architectures trained on real-world datasets. Even worse, \citet{jiang2020fantastic} show that many of the complexity measures that appear in generalization bounds are \textit{negatively} correlated with generalization. This presents a double blow to the argument that generalization bounds might provide insight into what makes neural networks generalize well: not only are such bounds too loose to give practically relevant information, but they do not even offer an accurate \textit{ranking} of models.
\subsection{Explaining generalization without bounds} Why does a given neural network generalize well or poorly? This is a question that generalization bounds cannot (currently) answer, yet is crucial to the principled development and application of neural networks to real-world datasets. A large value of a complexity measure does not entail that a learning algorithm will not generalize; it simply states that there is not sufficient information to guarantee a small generalization gap. To address questions of \textit{why} particular models generalize well, we must look to the particulars of the network architectures, training procedures, and datasets that arise from the natural world. The works discussed in this section will present such an empirical study. While sometimes similar notions of complexity to those leveraged in generalization bounds may be used, the philosophical underpinnings of this research differ fundamentally from the formal guarantees of learning theory.
\subsubsection{Scientific vs mathematical rigor}\label{sec:background-science} The scientific study of any phenomenon depends on a mixture of empirical experiments, during which data is gathered, and theory-building, where an explanation of the phenomenon is proposed \citep{popper1968logic}. A scientific theory is one that makes falsifiable predictions, which can then be tested by experiments. The generalization bounds of \Cref{bkgd:generalization-bounds} can be viewed as theory-building in a loose sense; however, the goal of a generalization bound is to make a statement that will be \textit{guaranteed} to hold with high probability, accepting that this may result in a large gap between the predicted upper bound and the empirical realization of the generalization error in some settings. This runs counter to the qualities of a good scientific theory, whose goal is to explain a phenomenon with as simple a model as possible, and to expose itself to the risk of falsification in doing so.
One notable step towards this framework of formulating and falsifying hypotheses arises from the work of \citet{jiang2020fantastic} and \citet{dziugaite2020search}, who conduct a large-scale empirical study of the correlation between \textit{complexity measures} and generalization in deep neural networks. This line of work seeks to translate the formal guarantees of generalization bounds into testable scientific theories which make predictions about which models will generalize best. The key to doing so is to treat the ranking over models given by a complexity measure as a \textit{prediction}, rather than an upper bound. If the generalization measure $C$ is higher for network A than for network B, we interpret this as a prediction that network A should generalize worse than network B stemming from the theory that a model which generalizes well must do so \textit{because} $C$ is low.
\citet{dziugaite2020search} focus on identifying experimental settings where a measure \textit{fails} to predict generalization, seeking to identify instances where the theory can be falsified. Both works show that complexity measures based on flatness, such as PAC-Bayes generalization bounds \citep{dziugaite2017nonvacuous}, are highly predictive of generalization across a range of experimental settings. However, none of the generalization measures studied by \citet{dziugaite2020search} robustly predicts generalization in neural networks under all experimental conditions, motivating further investigation of quantites which can predict -- and ideally also {explain} -- generalization in neural networks.
\subsubsection{The loss landscape of neural networks} Much theoretical work studying neural networks has considered questions on the \textit{existence} of parameters that allow the network to represent a function \citep{hornik1989multilayer, raghu2017expressive, dong2020expressivity}, but less attention has been paid to the search process by which such parameters might be found. To this end, recent work studying the loss landscape of neural network seeks to understand the properties of the optimization problem faced by neural networks in practice. These results have found that the initialization schemes used today \citep{he2015delving, glorot2010understanding} push the parameters towards a well-behaved region of the loss landscape that is relatively convex \citep{fort2019goldilocks} and induces stable learning dynamics in suitable architectures \citep{yang2017mean}. Indeed, many architecture design choices such as residual connections \citep{he2016deep} and batch normalization \citep{ioffe2015batch} that improve performance can be shown to increase the smoothness of the loss landscape \citep{li2018visualizing, santurkar2018does}, leading to minima which are flatter and should thus generalize better.
Much recent work has focused in particular on the geometry of the loss landscape around the minima found by gradient descent \citep{maddox2020rethinking}. One intriguing property identified recently is that of \textit{linear mode connectivity}, whereby the convex combination of two locally optimal parameter vectors attains a similarly low loss as the two minima from which it was derived \citep{benton2021loss}. \citet{frankle2020linear} show that stochasticity in the optimization process \textit{early} in training leads the trajectory towards disconnected regions of parameter space, while stochasticity late in training results in a perturbation within a linearly connected loss basin. This geometric perspective is consistent with prior work studying the importance of the early learning period in determining the network's ability to represent certain functions \citep{achille2018critical}. Flatness properties of local minima have also been studied in the context of Bayesian deep learning \citep{izmailov2018averaging}, highlighting the connection between flat minima and approximations of the Bayesian marginal likelihood \citep{daxberger2021laplace}.
\subsubsection{Correlates of generalization in deep learning}
Much empirical work on generalization in deep learning seeks to identify measurable quantities which correlate with generalization in neural networks \citep{neyshabur2017exploring}. This work has found such quantities in the robustness of minima to perturbations \citep{keskar2016large} and to pruning \citep{bartoldson2020generalization}. Relatedly, \citet{morcos2018importance} identify a relationship between the dependence of a network on single dimensions of activation space and generalization. \citet{neyshabur_norm-based_2015} propose that implicit norm-based regularization may contribute to the generalization performance of deep networks; some evidence suggests, however, that the mechanism by which overparameterization regularizes norm may not apply to fully general problem constructions \citep{hanin2019deep}.
Further attention has been paid to regularization methods which improve generalization such as dropout \citep{srivastava2014dropout}, data augmentation \citep{wu2020on}, batch normalization \citep{ioffe2015batch}, and weight decay \citep{arpit2017closer}. The study of early stopping \citep{duvenaud2016early} has further revealed intriguing connections between variational inference and flatness of the loss landscape. However, the precise relationship between training speed, early stopping, and generalization has yet to be fully understood: \citet{hardt2015train} provide a theoretical and empirical analysis of the relationship between training time and generalization error; in contrast, \citet{hoffer2017train} observe that longer training can improve regularization provided a suitable optimization procedure is used. This is emblematic of a broader lack of granular understanding of generalization in deep neural networks. While many correlates of generalization and techniques to improve generalization are widely used, the precise mechanisms by which they improve generalization have yet to be elucidated.
\subsubsection{Studying the learning trajectory} \label{sec:bkgd-trajectory}
The study of how networks evolve over the course of training has revealed a number of intriguing insights which promise to shed light on some of these mechanisms. A number of works have endeavoured to explain the efficacy of early stopping by arguing that neural networks learn functions of increasing complexity over the course of training \citep{rahaman2019spectral, arpit2017closer, kalimeris2019sgd}, with similar biases towards other notions of simplicity also widely observed in the mapping between parameters and the resulting output functions of neural networks \citep{valle2018deep, de2019random}.
Approximating a discrete gradient descent trajectory as a continuous-time process in the limit of infinite layer width has facilitated closed-form analysis of the trajectory of deep neural networks \citep{jacot2018neural, lee2019wide}, which has sparked a number of intriguing insights into generalization \citep{arora2019fine}, posterior sampling \citep{he2020bayesian}, and feature learning \citep{yang2021tensor}. The analysis of \citet{jacot2018neural} allows the optimization trajectory of a neural network to be modelled by a kernel gradient descent procedure with respect to a specific kernel called the neural tangent kernel (NTK). \citet{smith2020origin} and \citet{barrett2021implicit} analyze a similar continuous-time approximation of gradient descent to identify an explicit regularization term that biases gradient descent algorithms towards flatter regions of the parameter space, providing insight into the relationship between the flatness of the region of the loss landscape traversed by gradient descent and sources of stochasticity such as minibatch sizes and finite learning rates.
\subsection{Summary} As this section has shown, the question of why neural networks generalize has sparked a wide-ranging literature characterizing generalization and optimization dynamics in DNNs. In spite of this rich literature, the answer to this question remains open. Generalization bounds present a domain of great theoretical interest, but fail to \textit{explain} generalization in DNNs. Meanwhile, empirical work has identified many quantities corresponding to model complexity terms in generalization bounds that correlate with generalization \citep{jiang2020fantastic}. However, no single quantity studied thus far has been found to predict generalization performance under \textit{all} possible experimental conditions \citep{dziugaite2020search}. Complementary lines of work have shed significant insight into the optimization landscape and generalization properties of DNNs, but this picture is far from complete. A number of recent theoretical results, such as the analytic form of optimization dynamics in the infinite-width limit of DNNs \citep{jacot2018neural}, the double descent phenomenon \citep{belkin2018reconciling}, and benign overfitting \citep{bartlett2020benign}, hint at a theoretical explanation of the benefits of overparameterization for generalization, but these findings do not currently apply to modern deep learning training regimes. Similarly, empirical investigation into the loss landscapes and gradient structure in DNNs has yielded principled insights into the optimization dynamics of deep networks, but still admits a gap between theory and practice. One contribution of this thesis will be to propose a family of performance estimators based on a network's training speed which are predictive of generalization performance rankings in a number of experimental settings. These estimators will be motivated by a theoretical and empirical analysis linking generalization bounds, invariance, and training speed, providing a potential mechanism to explain their success.
\section{Generalization in reinforcement learning} The supervised learning regime has proven to be fertile ground for the study of generalization. However, reinforcement learning agents also benefit from accurate extrapolation to unseen inputs, and moreover existing approaches are particularly prone to overfitting \citep{zhang2018study}. Though many of the subproblems of generalization in reinforcement learning mirror those found in supervised learning \citep{cobbe2019quantifying}, the nonstationarity of the training objectives used in deep RL adds additional challenges \citep{igl2021transient} which we will discuss in this section.
\subsection{Single environment} Generalization is not always necessary for reinforcement learning: in small state spaces, for example, one may assume that all states the agent will encounter are known in advance and that the value function can be represented as a lookup table; the scope for generalization in this setting is limited to the context of learning an optimal policy under new reward functions \citep{dayan1993improving}. However, in large spaces generalization is crucial for sample efficiency, enabling faster convergence to an optimal policy and, in cases where the state space is so large that not all states will be visited during training, ensuring robust performance in previously-unseen states at evaluation time. This thesis will explore two different perspectives on generalization in a single environment: \textit{state abstractions}, which can be leveraged to develop theoretical insights into the complexity of certain classes of environments, and \textit{interference}, which studies how deep neural networks disentangle and distinguish between states.
\begin{figure}\label{fig:state_abstractions}
\end{figure}
\subsubsection{State abstractions} \label{sec:background:stateabstractions} State abstractions are used in reinforcement learning to simplify planning and to enable generalization in large or infinite state spaces; see Figure~\ref{fig:state_abstractions} for an illustration. Mathematically, a state abstraction is a mapping $\phi: \mathcal{X} \rightarrow \bar{\mathcal{X}}$, where $\bar{\mathcal{X}}$ is some simplified space (for example, $\mathbb{R}^d$). A state abstraction is useful for reinforcement learning if it captures the similarity between states in the environment. That is, $\phi(s) = \phi(s')$ if and only if $s$ and $s'$ have similar values or optimal policies. \citet{li2006towards} characterize a hierarchy of such abstractions, with the finest being a bisimulation of the original MDP and the coarsest being a partition of states according to the optimal action at each state. We will be particularly interested in the former, as our study of representation learning in Chapter~\ref{chp:icp} will focus on this class of state abstractions, known as \textit{model-irrelevance} state abstractions. The following definition is for finite state spaces, but can be generalized to continuous state spaces.
\begin{definition}\label{def:misa} A \emph{model-irrelevance state abstraction} on a Markov Decision Process $\mathcal{M} = (\mathcal{X}, \mathcal{A}, R, P, \gamma)$ is a mapping $\phi: \mathcal{X} \rightarrow \bar{\mathcal{X}}$ such that if $\phi(x_1) = \phi(x_2) = \bar{x}$, the following holds, where $x' \in \mathcal{X}$, and $\bar{x'} \in \bar{\mathcal{X}}$. \begin{enumerate}
\item $R(x_1,a) = R(x_2,a)$ $\forall a \in \mathcal{A}$
\item $\sum_{x' \in \phi^{-1}(\bar{x}')} P(x'|x_1, a) =\sum_{x' \in \phi^{-1}(\bar{x}')} P(x'|x_2, a) $, $\forall a \in \mathcal{A}$ \end{enumerate} \end{definition}
There are a range of approaches to discovering and selecting state abstractions: \citet{jong2005state} eliminate irrelevant variables from inputs where $\mathcal{X} \subset \mathbb{R}^n$; other approaches leverage statistical \citep{jiang2015abstraction} and information-theoretic \citep{abel2019state} tools to aggregate states, while \citet{taylor2008bounding} construct state abstractions based on state similarity in the MDP. State aggregation is further leveraged in algorithms for RL in rich-observation settings \citep{misra2020kinematic}.
In deep reinforcement learning, the state abstraction framework begins to merge with the feature-learning and representation-learning frameworks of deep learning. One can interpret the latent feature representation of the neural network as a state abstraction, and the remaining layers as a value function approximator. \citet{gelada2019deepmdp} and \citet{zhang2020learning} use ideas from the state abstraction literature to construct representation-learning objectives for DNNs which allow them to capture the structure of the environment. Such structure can be made explicit in the architecture of the neural network used as a function approximator to obtain guarantees on the equivariance of the learned value function to symmetries that arise in the MDP \citep{van2020mdp, van2020plannable}.
\subsubsection{Representations in deep RL}
A major challenge present in deep reinforcement learning is the sparsity of the reward signal relative to the rich structure of the environment. A broad range of auxiliary tasks have been developed over the years to overcome this sparsity in order to improve generalization and representation learning in deep RL \citep{jaderberg2016reinforcement, veeriah2019discovery, gelada2019deepmdp, machado2017eigenoption}. These contributions have in common the goal of enriching the scalar reward signal to encourage the network to pick up additional structure in the environment, either with the downstream objective of using this information later for policy improvement, or as a more stable learning signal for the network. Additional work has analyzed the geometry \citep{bellemare2019geometric} and stability \citep{ghosh2020representations} of the learned features in deep RL agents, along with their linear algebraic properties \citep{kumar2021implicit, gogianu2021spectral}.
A second challenge in the application of deep neural networks to RL arises from the nonstationarity of the learning objective. The challenge of training a network on a sequence of tasks has been studied in great depth in both reinforcement learning \citep{schaul2019ray, teh2017distral, igl2021transient} and supervised learning settings \citep{sharkey1995analysis, ash2020warm, beck2021effective}. Of particular interest has been the problem of catastrophic forgetting, with prior work proposing novel training algorithms using regularization \citep{kirkpatrick2017overcoming, bengio2013empirical, lopez2017gradient} or distillation \citep{schwarz2018progress, silver2002task, li2017learning} approaches. \citet{benjamin2018measuring}, apply a function-space regularization approach, but require saving input-output pairs into a memory bank. However, in reinforcement learning the converse problem must also be considered: forward-interference, whereby networks may see deteriorating ability to solve later tasks encountered during training. Methods which involve re-initializing a new network have seen particular success at reducing interference between tasks in deep reinforcement learning \citep{igl2021transient, teh2017distral, rusu2016policy, fedus2020catastrophic}.
\subsubsection{Interference}\label{sec:bkgd-interference} Even in the absence of an explicit abstraction-learning objective, generalization between states in deep RL will naturally arise from the network's inductive bias. In many problem settings, such as in large observation spaces or procedurally generated environments, some degree of generalization is necessary in order to obtain good performance at test time \citep{kirk2021survey}. However, when an update to the predicted value of one state exerts excessive influence on the network's output at other states, this form of generalization (which we will also refer to as \textit{interference}) can lead to instability \citep{jiang2021emphatic, van2018deep}. The introduction of instability as a result of interference between states is not unique to deep RL; even in the case of \textit{linear} function approximation, off-policy temporal difference learning is not guaranteed to converge to the optimal value function \citep{baird1993advantage, tsitsiklis1996analysis}. Interference has been identified as a barrier to learning progress in both policy-based and value-based problems \citep{schaul2019ray, fedus2020catastrophic}.
There are two principal approaches by which we will quantify this notion of interference. To a first-order approximation, interference is equivalent to \textit{gradient alignment}. To formalize this concept, we let $f$ be a function which takes as input an observation $\mathbf{x}$ and a set of parameters $\theta$ (for example, $f$ might be the loss function of a neural network, or one dimension of its output). We define gradient alignment $\mathrm{I}_\nabla(\mathbf{x}, \mathbf{y}; \theta)$ (where $\nabla$ indicates that we are studying the \textit{gradient} of the loss) with respect to some loss function and network architecture parameterized by $\theta$ and taking inputs $\mathbf{x}$ and $\mathbf{y}$ as follows. \begin{equation}
\mathrm{I}_{\nabla}(\mathbf{x}, \mathbf{y}; \theta) \overset{\text{def}}{=} \langle \nabla_\theta f(\mathbf{x}; \theta), \nabla_\theta f(\mathbf{y}; \theta) \rangle \label{eq:gradint} \end{equation}
We may instead measure the effect of a gradient step computed on data point $(\mathbf{x}_1, y_1)$ on the loss of another data point $\mathbf{x}_2 \neq \mathbf{x}_1$ with corresponding target $y_2$. Letting the targets $y$ be left implicit we obtain the following, where $f$ is some function as before and $\ell$ is the optimization objective (which may or may not be equal to $f$). We let $g_\ell$ be the update computed by some optimizer which may or may not be equal to the gradient $\nabla_\theta \ell(\mathbf{x}_1, y_1; \theta)$, and which may also depend on the optimizer state $\eta$. \begin{equation}
\mathrm{I}_{\Delta}(\mathbf{x}, \mathbf{y}; \theta) \overset{\text{def}}{=} f(\mathbf{y}; \theta) - f(\mathbf{y}, \theta') \text{ where } \theta' = \theta + \alpha g_\ell (\mathbf{x}; \theta, \eta) \label{eq:deltaint} \end{equation}
Interference has been studied under a variety of names. \citet{fort2019stiffness} refer to the gradient alignment between data points (a measure related to $I_\nabla$ as `stiffness', while \citet{he2019local} call a notion that more closely resembles $\mathrm{I}_\Delta$ `elasticity'.
A number of approaches which specifically reduce the effect of a gradient update for state $s$ on the target $V(s')$ have been shown to improve the stability and robustness of off-policy algorithms \citep{ghassian2020improving, lo2019overcoming}. Prior works have also endeavoured to rigorously define and analyze interference in deep RL \citep{liu2020measuring, liu2020towards, bengio2020interference}, and to study its role in the stability of offline algorithms \citep{kumar2021dr3}. Similarly, some recent methods \citep{shao2020self, pohlen2018observe} include an explicit penalty which discourages gradient updates from affecting the target values used in TD updates, though this is incidental to the principal contribution of these works. Tuning the degree of interference exhibited by the function approximator to maximize performance thus remains an open problem.
\subsection{Multiple environments} \begin{figure}
\caption{Visualization of multi-environment generalization framework with coinrun levels sampled from the ProcGen benchmark \citep{cobbe2020leveraging}.}
\label{fig:multi-environment}
\end{figure} Most standard formulations of generalization in RL do not concern generalization to unseen states within the training environment, but rather generalization to novel environments, MDPs with different observations, transition dynamics, and reward functions than those seen during training \citep{kirk2021survey}. This generalization problem in deep RL can be decomposed into two categories: generalization of the function approximator to novel observations from an MDP which follows the same underlying dynamics as the training environment, and generalization to novel tasks, where some combination of the reward function, observations, and transition dynamics are allowed to vary. Many techniques that benefit one of these categories also benefit the other \citep{packer2018genrl}. \citet{farebrother2018generalization} argue, for instance, that an agent which has learned a policy which generalizes well will be robust to changes in the difficulty of a game in addition to more superficial changes in the observation space.
\subsubsection{Formalism} We will be concerned with the generalization gap incurred by a policy trained on a set of training environments $\mathcal{E}_{{\mathrm{train}}}$ when deployed to a novel \textit{test} environment. \begin{equation}
\mathbb{E}_{\mathcal{E}_{{\mathrm{train}}}, \pi} [ \sum_{t=0}^\infty \gamma^t R(x_t, a_t)] - \mathbb{E}_{\mathcal{E}_{{\mathrm{test}}}, \pi}[ \sum_{t=0}^\infty \gamma^t R(x_t, a_t)] \end{equation} Typically $\mathcal{E}_{{\mathrm{test}}}$ will be assumed to share some structure with $\mathcal{E}_{{\mathrm{train}}}$, but the nature of this shared structure may vary between problem settings. In large observation spaces, $\mathcal{E}_{{\mathrm{test}}}$ may be equal to $\mathcal{E}_{{\mathrm{train}}}$ with a different initial state distribution, while for multi-environment problems $\mathcal{E}_{{\mathrm{train}}}$ may be homomorphic to $\mathcal{E}_{{\mathrm{test}}}$ \citep{zhang2020invariant}. In the worst case, the evaluation environments may be chosen adversarially so as to make generalization difficult; we will not consider this setting, instead focusing predominantly on test environments that share some characteristics with the training environment.
\textit{Procedurally generated} environments \citep{cobbe2020leveraging, kuttler2020nethack, samvelyan2021minihack} enable us to sample an almost-unlimited number of evaluation environments which are visually diverse but which share important structural characteristics. A visualization of frames from different procedurally-generated levels of the \textit{CoinRun} game is given in Figure~\ref{fig:multi-environment}. Being able to generate an unlimited set of environments brings us closer to the setting of supervised learning, where training and evaluation data points are sampled from the same distribution, by allowing us to sample training and evaluation \textit{environments} from the same distribution. This regime opens interesting research questions concerning the optimal sampling of environments during training so as to maximize the agent's learning progress and generalization performance \citep{jiang2021prioritized, parker2022evolving}. However, the number of training environments in these settings is typically orders of magnitude smaller than the number of training data points seen in standard supervised learning benchmarks, making generalization more challenging.
\subsubsection{Regularization} Regularization has been widely shown to improve generalization in deep learning \citep{srivastava2014dropout, krogh1991simple}, and its application to reinforcement learning tasks has a rich history extending back to even before the explosion of the deep RL paradigm \citep{farahmand2011regularization}. More recent works focus specifically on the regularization of neural networks trained on RL tasks \citep{cobbe2019quantifying, farebrother2018generalization}, and show that many regularization methods used to improve generalization in supervised deep learning, such as dropout and $\ell_2$ regularization, are also applicable to generalization in reinforcement learning. For example, \citet{schrittwieser2020mastering} use $\ell_2$ regularization in their model-based algorithm which attains state-of-the-art results in many game environments. Many recent works have further sought to leverage the benefits of data augmentation to improve generalization and robustness to input perturbations in reinforcement learning agents \citep{raileanu2021automatic, laskin2020curl,laskin2020reinforcement, hansen2021generalization}. \citet{li2021functional} use low-frequency learned Fourier features to regularize deep Q-networks towards smooth functions. Trust region-based methods further leverage a form of explicit regularization on a learned policy to stabilize learning \citep{schulman2015trust}.
The stochasticity induced by some regularization methods serves a dual purpose as a tool for exploration. \citet{igl2019generalization} use dropout masks to induce stochasticity into the exploratory policy of an agent, while \citet{fortunato2018noisy} apply a Gaussian perturbation to the network parameters. This approach is also leveraged by the Rainbow \citep{hessel2018rainbow} architecture. \citet{goyal2018transfer} use an information bottleneck architecture both to improve generalization and to generate exploration strategies.
\subsubsection{Transfer learning and meta-RL}
In some problem settings, we seek to train an agent that is capable of quickly adapting to novel environments. In the case where we wish to generalize to novel rewards under the same transition dynamics as we saw during training, this can be achieved via the \textit{successor representation} \citep{dayan1993improving, barreto2017successor}. In deep RL, we approximate the successor representation using \textit{successor features}, which have been shown to improve sample efficiency and generalization in both single-agent RL and multi-agent settings \citep{machado2017eigenoption, filos2021psiphi}. Recently, \citet{abdolshah2021new} present an approach to learning successor features which can generalize across different environments.
However, in many cases the novel environment may feature a novels observation space with different transition dynamics from those seen at training. With the advent of powerful function approximation architectures and the development of effective meta-learning algorithms \citep{finn2017model, zintgraf2019fast}, generalization across multiple environments exhibiting less shared structure may also be approached using transfer- and meta-learning techniques. \citet{teh2017distral} and \citet{schmitt2018kickstarting}, for example, use policy distillation to improve transfer learning to new tasks. Other approaches use context-dependent policies \citep{rakelly2019efficient, sodhani2021multi}, or encourage invariance across training environments \citep{zhang2020learning}.
\subsection{Summary} The study of generalization in the reinforcement learning literature has historically been restricted to the framework of \textit{state abstractions}, which consider arbitrary aggregations of states rather than explicit inductive biases on some input space \citep{li2006towards}. Some attempts have been made to define generalization with respect to input observations in single-environment RL, but these definitions run into limitations due to the nonstationary nature of RL, wherein improving prediction accuracy does not necessarily entail improved performance \citep{bengio2020interference, liu2020measuring}. One contribution of this thesis will be to present explicit definitions of interference and learning capacity in deep RL that are less dependent on the chaotic relationship between performance and prediction error, which will be used to study the evolution of learned representations in deep RL agents. We will explore both the benefits, e.g. improved robustness to input distribution shifts, and the drawbacks, e.g. decreased ability to adapt to new reward signals, of generalization between inputs. This exploration will result in algorithmic tools to accelerate learning and improve generalization in deep RL agents. While recent analysis has studied interference between different regions of the state space as a source of instability and a cause of performance plateaus, this line of work largely ignores the beneficial effects of generalization.
In contrast, the benefits of generalization are widely acknowledged in the context of multi-environment RL, where an agent is evaluated on previously-unseen environments at test time. However, the current state of the art in this regime requires prohibitive sample complexity in order to generalize well outside of their training set \citep{cobbe2019quantifying}, largely due to the weak assumptions placed on the generative process from which environments are sampled. While some approaches from the supervised learning literature, such as weight decay, dropout, and data augmentation, have been shown to improve generalization in deep RL, these approaches do not have as great an effect as in supervised learning \citep{raileanu2021automatic}. Orthogonal approaches from the meta- and transfer-learning literature have seen more success as they allow for fine-tuning on the test set, though these methods encounter similar sample complexity issues when applied to zero-shot generalization. In Chapter~\ref{chp:icp} we will consider a characterization of families of environments in which generalization from a handful of training environments is theoretically possible, and present algorithms for identifying or learning state abstractions for which an optimal policy will generalize zero-shot to any novel environment from this family.
\chapter{The role of invariance in generalization} \label{chp:invariance} \minitoc
\section{Introduction} The first object of interest in our study of learning dynamics and generalization will be \textit{invariance}. Invariance arises naturally as a part of the data generating process in many real-world datasets, such as point clouds (invariant to permutation), speech recognition (invariant to the average pitch of the speaker's voice), natural language (different sentences may be semantically equivalent), and objects in natural images (often rotation- and translation-invariant). In many instances, an invariance can be precisely characterized in terms of the action of a specified group, meaning that the input distribution is partitioned into equivalence classes, for example all rotations of an image or all permutations of elements in a set. This structure yields straightforward desiderata on the generalization properties of a model trained to fit such a data-generating distribution: learning about one element of an equivalence class should be sufficient for the model to correctly identify any other element in the equivalence class. In this sense invariance presents an idealized form of \textit{interference}: updates to an invariant model's output on one element of an equivalence class will necessarily have an identical effect across all inputs in this class.
It is not surprising then that models which capture the invariant structure of a given distribution tend to perform better than those which do not on a variety of tasks \citep{Cohen:Welling:2016, fawzi2016adaptive, salamon2017deep}. Yet while the empirical benefits of invariant models have been widely corroborated, work which seeks to explicitly quantify and understand how invariance can benefit generalization is scarce. Part of this challenge stems from the breadth of approaches by which invariance can be incorporated into a model class. One can build the invariance into the network as a convolution or weight-tying scheme, average network predictions over transformations of the input (feature averaging), or simply train on a dataset augmented with these transformations (data augmentation). Comparison of these different approaches requires different tools, as each incorporates confounding factors into the learning process which are distinct from the effect of the training method on invariance.
This chapter will employ a diverse range of techniques to gain insight into how invariance benefits generalization. The analysis presented here will lay the foundation for our study of more nebulous forms of inductive bias in Chapter~\ref{chp:supervised}, and our study of generalization in reinforcement learning in Chapters~\ref{chp:gen-rl} and \ref{chp:icp}. We will provide both an empirical and theoretical analysis contrasting neural networks trained to exhibit approximate invariance via data augmentation and networks which exhibit exact invariance via architectural design or feature averaging. Our empirical analysis will study the degree to which data augmentation yields models whose invariances generalize outside of the training set, evaluating the effect of these methods on interference between inputs in the same equivalence class. We further characterize conditions under which data augmentation will provably induce convergence to an invariant model parameterization in Section~\ref{sec:da-optim}, and study the differential effects of data augmentation and feature averaging on {interference} in Section~\ref{sec:variance-reduction}.
In our theoretical analysis, we will shed light onto the role of invariant structure in PAC-Bayes generalization bounds. Our main contribution will be a ranking of a set of PAC-Bayes bounds on models trained with data augmentation and with feature averaging, resulting in the practical conclusion that feature averaging at test time improves upon data augmentation, which in turn outperforms training on data sampled independently from $P_{\calD}$. We find that the differential effects of feature averaging and data augmentation on interference mirror those on the model complexity term in our PAC-Bayes bound. This analogy between model simplicity and interference in the setting of group-theoretic invariances grounds our further study of interference in the learning problems of Chapters~\ref{chp:supervised}, \ref{chp:gen-rl}, and \ref{chp:icp} where more nuanced inductive biases must be identified.
\section{Background} \label{sec:background-invar}
``Invariance'' has been used to describe a number of related but distinct phenomena in the machine learning and statistics literature concerning the stability of a function's output under some set of transformations of its input \citep{zou2012deep, raj2017orbit_embeddings, jaderberg2015spatial, van2018learning, chen2019invariance}. We focus on invariance under the action of a group $\mathcal{G}$, a setting shared by several prior works \citep[e.g.,][]{Cohen:Welling:2016,Kondor:Trivedi:2018,invariantdistributions}. The {action} of $\mathcal{G}$ on a set $\mathcal{X}$ is a mapping $\alpha : \mathcal{G} \times \mathcal{X} \to \mathcal{X}$ which is compatible with the group operation, i.e. ${\alpha(e, x) = x}$ and ${\alpha(g_1, \alpha(g_2, x)) = \alpha(g_1g_2, x)}$. We will abbreviate this operation as $\alpha(g,x) = gx$ when the form of $\alpha$ is not important. The {orbit} of any $x\in\mathcal{X}$ is the subset $\mathcal{G}_x$ of $\mathcal{X}$ that can be obtained by applying an element of $\mathcal{G}$ to $x$, $\mathcal{G}_x = \{ gx : g \in \mathcal{G} \}$. We note that the sets $\mathcal{G}_x$ are equivalence classes which partition the underlying set $\mathcal{X}$, and will refer to them as the orbits of $\mathcal{G}$.
For mathematical simplicity, we assume $\mathcal{G}$ to be compact, with (unique) normalized Haar measure denoted by $\lambda$.\footnote{$\lambda$ is analogous to the uniform distribution on $\mathcal{G}$ \citep{haar1933massbegriff}.} Critically, any finite group is compact under the discrete topology. For the purposes of this chapter, the reader may therefore substitute `compact' with `finite' and `Haar measure' with `uniform distribution over $\mathcal{G}$' and preserve the correctness (though not the full generality) of the theoretical results.
We denote a random element of $\mathcal{G}$ by $G$. A mapping $f : \mathcal{X} \to \mathcal{Y}$ is {invariant} under $\mathcal{G}$ (or $\mathcal{G}$-invariant) if \begin{align} \label{eq:invariant:function}
f(gx) = f(x) \;, \quad \forall \, g\in \mathcal{G},\ x \in \mathcal{X} \;. \end{align} The act of transforming an arbitrary function $f: \mathcal{X} \to \mathbb{R}$ to be $\mathcal{G}$-invariant is known as {symmetrization}, and can be performed by averaging the output of $f$ over the orbits of $\mathcal{G}$. We define this symmetrization operator $S_{\mathcal{G}}$ as follows: \begin{align} \label{eq:symmetrization:def}
\invf{f}(x) := S_{\mathcal{G}}f(x) = \mathbb{E}_{G\sim\lambda}[f(G x)] \;, \quad x \in \mathcal{X} \;. \end{align} While other symmetrization methods exist, such as taking the supremum over the group's orbit, we will always use $\invf{f}$ to refer to the \textit{average} of $f$ over the orbit of its input. We consider a typical machine learning scenario, with a training data set $\mathcal{D}^n$ of $n$ observations $(X_i, Y_i)_{i=1}^n\in (\mathcal{X},\mathcal{Y})^n$ sampled i.i.d.\ from some (unknown) probability distribution $P_{\calD}$. Additionally, $P_{\calD}$ \emph{is assumed to be $\mathcal{G}$-invariant}, i.e. \begin{align} \label{eq:invariant:dgd}
P_{\calD}(gX, Y) = P_{\calD}(X ,Y ) \;, \quad g \in \mathcal{G} \;. \end{align} For example, $X$ may be an image of an animal, $Y$ a label of the animal, and $\mathcal{G}$ the group of two-dimensional rotations.
Given a group action on the space $\mathcal{X}$, we can partition it into disjoint \textit{orbits} of the form $
\{g\Phi : g \in \mathcal{G} \}$, where $\Phi \in \mathcal{X}$ denotes an orbit representative. The $\mathcal{G}$-invariance of $P_{\calD}$ enables the decomposition of $P_{\calD}$ into a distribution over orbits $P_{\Phi}$ along with a conditional distribution $P_{X|\Phi} = \lambda(g)$ where $g\Phi = X$ \citep[see, e.g.,][]{invariantdistributions}. Letting $G \sim \lambda(\mathcal{G})$, this entails $(X,Y) \equdist (G\Phi,Y)$ and $P_{\calD} = P_{\Phi} \times P_{X | \Phi} \times P_{Y|X}$. In other words, the distribution $P_{\calD}$ can be interpreted as first sampling an equivalence class $\Phi$, then sampling an element $X$ uniformly at random from the set $\Phi$, and then sampling $Y$ based on the conditional distribution $P_{\calD}(Y|X)$.
This decomposition will play a key role in our analysis in later sections, as it will allow us to iterate expectations over $P_{\calD}$ as
\begin{align} \label{eq:iterated:expectations}
& \mathbb{E}_{(X,Y)\simP_{\calD}}[f(X,Y)] = \mathbb{E}_{\Phi\sim P_{\Phi}}[ \mathbb{E}_{G\sim\lambda(\mathcal{G})}[\mathbb{E}_{Y\sim P_{Y|X}}[f(G\Phi,Y) \mid \Phi,Y] \mid Y ] ] \;. \nonumber
\end{align}
This will allow us to marginalize over and condition on orbits of $\mathcal{G}$, making it possible to reason about the expected behaviour of a function within and between orbits.
\subsection{Modes of invariance}
When the data-generating distribution and target function are known to be $\mathcal{G}$-invariant, incorporating this invariance into the model class is an intuitive approach to accelerate convergence and improve generalization. This can be done in a variety of ways.
\textbf{Trained invariance} is implemented as data augmentation (DA) \citep{fawzi2016adaptive,Cubuk2018}: elements $G_{ij}$ of $\mathcal{G}$ are applied to each observation $X_i$ of the training data, with the label $Y_i$ left unchanged. The result is an augmented dataset $\mathcal{D}^n_{\mathcal{G}} = ( (G_{ij} X_i, Y_i)_{j \leq m} )_{i\leq n}$ used to minimize the augmented empirical risk \begin{equation} \label{eq:aug:risk}
\invf{\widehat{R}}_{\loss}(f,\mathcal{D}^n) = \frac{1}{n}\sum_{i=1}^n \mathbb{E}_{G\sim\lambda}[\ell(f(G X_i),Y_i) ] \end{equation} which can be approximated using samples as follows \begin{equation}\label{eq:approx:aug:risk} \invf{\widehat{R}}_{\loss}(f,\mathcal{D}^n) \approx \frac{1}{nm} \sum_{i=1}^n \sum_{j=1}^m \ell(f(G_{ij} X_i),Y_i) = \widehat{R}_{\loss}^{\widehat{\circ}}(f, \mathcal{D}^n) \; . \end{equation} DA is now a standard method in the practitioner's toolkit \citep{iyyer-etal-2014-neural,zhou2015predicting,salamon2017deep, machinehealth}, and recent theoretical work has further established connections to variance reduction methods \citep{kerneltheory}. A major benefit of data augmentation is its versatility: while our study in this chapter focuses on augmentations that can be expressed as the action of a group, data augmentation can also be used to promote other properties of the target function such as smoothness by leveraging a broader class of transformations.
\textbf{Architectural invariance} restricts the function class being learned to contain only invariant functions, typically through either feature averaging (FA) or symmetric function composition. FA symmetrizes the output of an arbitrary neural network by computing an average over $\mathcal{G}$ at one or more layers, such that the overall network is invariant under the action of $\mathcal{G}$. In practice, averaging is typically done at the penultimate or final layer, resulting in a $\mathcal{G}$-invariant network ${f}^{\circ}$. A network $f$ with $D$ layers is written as the composition of $h_D \circ \dots \circ h_1$, with the shorthand $h_{d}^{d'}$ referring to the composition of layers $d$ through $d'$. The empirical risk of a network with FA at layer $d$ evaluated on $\mathcal{D}^n$ is \begin{align*}
\widehat{R}_{\loss}(\invf{f},\mathcal{D}^n)
= \frac{1}{n}\sum_{i=1}^n \ell\big(h_{d}^D\circ \mathbb{E}_{G\sim\lambda}[ h_{1}^{d-1} (G X_i) ], Y_i \big) \;. \end{align*}
The development of \textit{invariant} or \textit{equivariant} neural network architectures enables users to avoid the potentially costly computation of this expression for a number of different group invariances. A body of literature of varying degrees of generality has developed to propose new architectures which characterize invariant function classes under a variety of group actions, and to study their properties \citep{Wood:ShaweTaylor:1996,ravanbakhsh2017equivariance,Kondor:Trivedi:2018,invariantdistributions,cohenetal2019generaltheory, Cohen:Welling:2016}.
\subsection{PAC-Bayes generalizations bounds} \label{sec:pac:bayes} As discussed in Section~\ref{bkgd:generalization-bounds}, PAC-Bayes bounds characterize the risk of a randomized prediction rule; the randomization can be interpreted as a posterior distribution over functions $Q$ that may depend on $\mathcal{D}^n$, though this distribution need not correspond to the exact posterior of a Bayesian model. A PAC-Bayes bound on generalization error is typically expressed in terms of a sum of the empirical risk and the Kullback-Leibler (KL) divergence \citep{kullback1951} between $Q$ and a fixed prior distribution $P$, which as mentioned in Chapter~\ref{chp:background} can be interpreted as characterizing the \textit{complexity} of functions in the support of $P$.
We define the risk of a probability distribution $Q$ defined on a function class $F$ in the natural way, \begin{equation*}
R_{\loss}(Q) = \mathbb{E}_{f \sim Q} R_{\loss}(f) \quad \text{ and analogously } \quad \widehat{R}_{\loss}(Q) = \mathbb{E}_{f \sim Q} \widehat{R}_{\loss}(f) \; . \end{equation*}
PAC-Bayes bounds are highly correlated with generalization performance \citep{jiang2020fantastic}, and in some cases provide non-vacuous results for even highly over-parameterized neural networks \citep{dziugaite2017nonvacuous,dziugaite2018dependent,zhou2018nonvacuous}. The following is a standard bound due to \citet{catoni}, which holds for general data generating distributions $P_{\calD}$ evaluated on the 0-1 loss. \begin{theorem}[\citet{catoni}]\label{thm:catoni:bound}
Let $\mathcal{D}^n$ be sampled i.i.d. from $P_{\calD}$, and let $\ell$ be the binary 0-1 loss.
For any prior $P$ and any $\delta\in (0,1)$, with probability $1-\delta$ over samples $\mathcal{D}^n$, for all posteriors $Q$ and for all $\beta > 0$,
\begin{equation} \label{eq:catoni:bound}
R_{\loss}(Q) \leq \frac{ 1 - e^{-\beta \widehat{R}_{\loss}(Q, \mathcal{D}^n) - \frac{1}{n}(\KL{Q}{P} + \log \frac{1}{\delta})} }{1 - e^{-\beta}} \;.
\end{equation} \end{theorem}
For bounded loss functions, analogous bounds rely on a sum of the empirical risk and a function of the KL divergence \citep[see, e.g.,][]{dziugaite2017nonvacuous,dziugaite2018dependent}. We state all results only for variations of Catoni's bound \eqref{eq:catoni:bound}, but analogous findings hold for bounds of the form proposed by \citet{mcallester1999}.
\section{Invariance and optimization dynamics} \label{sec:invar-optim} We begin with an analysis of the manner in which data augmentation and feature averaging influence the optimization dynamics and resulting function output by gradient descent. This analysis will yield two main insights, both concerning the setting of convex losses: first, that while data augmentation can cause gradient descent to converge to a $\mathcal{G}$-invariant solution in certain realizable settings, in general it leads to networks that are only approximately invariant. In fact, these networks can overfit to the training dataset such that their variance over orbits of out-of-distribution data increases over the course of training. Second, we will show that feature averaging, in addition to ensuring $\mathcal{G}$-invariance, reduces the variance of both the loss and its gradients. We will relate this finding to the notion of gradient interference discussed in Section~\ref{sec:bkgd-interference}, illustrating the effect of trained and architectural invariance on interference within and between orbits of a group $\mathcal{G}$.
\subsection{Data augmentation and trained invariance} \label{sec:da-optim} To begin our study of optimization dynamics and invariance, we lay out some basic properties of the symmetrized risk $\invf{\widehat{R}}_{\loss}$ and its interaction with invariant models. When the loss function is convex, Jensen's inequality can be applied to the augmented risk to compare DA and FA risk estimates. We note that the relevant notion of convexity to this line of argument is with respect to the loss function's first argument, not the parameters of the model. Many (though not all) widely used loss functions are convex in this way (e.g., squared error, cross-entropy). We provide counterexamples highlighting the necessity of this property in Appendix~\ref{appx:counterexamples}.
\begin{restatable}{proposition}{propEmpRisk} \label{prop:empirical:risk:order}
Let $\ell : \mathbb{R} \times \mathbb{R} \to \mathbb{R}_+$ be a loss function that is convex in its first argument.
Then for any $f : \mathcal{X} \to \mathcal{Y}$,
\begin{align*}
\widehat{R}_{\loss}(\invf{f},\mathcal{D}^n) = \invf{\widehat{R}}_{\loss}(\invf{f},\mathcal{D}^n) \leq \invf{\widehat{R}}_{\loss}(f,\mathcal{D}^n) \;,
\end{align*}
and therefore analogous inequalities hold for $\widehat{R}_{\loss}(\invf{Q},\mathcal{D}^n)$, $R_{\loss}(f)$, and $R_{\loss}(Q)$.
Furthermore, if $\ell(f({\,\vcenter{\hbox{\tiny$\bullet$}}\,}),{\,\vcenter{\hbox{\tiny$\bullet$}}\,}) \in L_2(P_{\calD})$ (i.e., has finite second moment), then
\begin{align*}
\variance_{\mathcal{D}^n\simP_{\calD}^n}\big[ \widehat{R}_{\loss}(\invf{f},\mathcal{D}^n) \big] \leq \variance_{\mathcal{D}^n\simP_{\calD}^n}\big[ \invf{\widehat{R}}_{\loss}(f,\mathcal{D}^n) \big] \;.
\end{align*} \end{restatable} The proof of this result can be found in Appendix~\ref{apx:invar-proofs}; it follows from Jensen's inequality and the invariance of the data-generating distribution to augmentations. The beneficial effect of an invariant function on the expected risk stems from its reduced variance over orbits. This observation can be flipped to characterize the detrimental effect of incorporating an invariance in the model which is not present in the data: by mapping all elements of an orbit to the same output, the model's expected risk can be lower bounded by a function of the average variance of the true labels over orbits.
In principle, symmetrizing the risk function via data augmentation should regularize the training procedure towards invariant functions; however, in most settings it is not possible to guarantee that the learned function will be exactly $\mathcal{G}$-invariant, particularly on inputs that differ from those seen during training. Neural networks trained with data augmentation will in general exhibit low but nonzero variance over equivalence classes, and their failure to generalize this low variance to out-of-distribution is illustrated in Figure~\ref{fig:ood}. One notable setting in which it \textit{is} possible to obtain such guarantees is linear function approximation with groups admitting a linear representation over the input space. To state the result, we define a data-generating distribution $P_{\calD}$ supported on the set $\mathcal{X} \times \mathcal{Y}$, where $\mathcal{Y} \subseteq \mathbb{R}$ and $ \mathcal{X} \subseteq V \simeq \mathbb{R}^d$ is a subset of a $d$-dimensional vector space over $\mathbb{R}$, $V$, with dual vector space $V^*$. We assume that $\mathcal{X}$ spans $V$. Furthermore, let $\mathcal{G}$ admit a linear representation \citep{serre1977linear}, $\rho : \mathcal{G} \to GL(V)$, with corresponding dual $\rho^*_g = \rho_{g^{-1}}^{\top}$. Finally, we assume that $P_{\calD}$ is invariant to the action of $\mathcal{G}$ and that $\mathbb{E}[y | \mathbf{x}] = \langle \mathbf{w}^*, \mathbf{x} \rangle$ for some unique $\mathbf{w}^*$.
\begin{restatable}{proposition}{propSymmGD}\label{theorem:symmgd}
Let $P_{\calD}$ and $\mathbf{w}^*$ be defined as above, and let $\ell(\mathbf{w}) = \mathbb{E}_{\mathbf{x}, y \sim P_{\calD}}[ (\langle \mathbf{w}, \mathbf{x} \rangle - y)^2]$. Then the (global) minimizer $\mathbf{w}^*$ of $\ell$ satisfies $\rho_g^* \mathbf{w}^*= \mathbf{w}^*$ for $\lambda$-almost all $g\in\mathcal{G}$. In particular, the function $f(\mathbf{x}) = \langle \mathbf{w}^*, \mathbf{x} \rangle$ is $\mathcal{G}$-invariant. \end{restatable} In brief, this result shows that in the case of linear models, data augmentation with respect to certain types of symmetry can induce \textit{exact} invariance in the learned function. The proof of Propsition~\ref{theorem:symmgd} can be found in Appendix~\ref{apx:invar-proofs}. It follows straightforwardly from the condition that the group act linearly on the input space along with the linearity of the function $f(\mathbf{x}) = \langle \mathbf{w}, \mathbf{x} \rangle$, as in this case the symmetrization of the parameters $\mathbf{w}$ is equivalent to the symmetrization of the function, so by Jensen's inequality any minimizer of $\ell$ must be invariant to the group action. Under suitable step-size conditions (e.g., the Robbins--Monro conditions) this proposition implies that SGD will converge to an invariant set of weights. Thus, to learn a predictor that exhibits the desired invariance on the entire dataset, it is sufficient to train with SGD on augmented data with a convex loss.
\begin{figure}\label{fig:ood}
\end{figure}
Once non-linearity is introduced into the model architecture ,Propsition~\ref{theorem:symmgd} will no longer hold, and in the worst case data augmentation may simply result in the function approximator memorizing an invariance on its training set. The example depicted in Figure~\ref{fig:ood} demonstrates such a failure: the learned function appears to capture the target invariance on the training data, but, having not learned the appropriate symmetry in weight space, fails to generalize this invariance to out-of-distribution data. We measure the generalization of a learned invariance by computing the variance of the network outputs over orbits of data drawn from a dataset distinct from the training distribution. In this case, we train fully connected neural networks using DA on one of two related datasets: MNIST and FashionMNIST ($28 \times 28$ pixel black and white images of handwritten digits and clothing categories respectively), each augmented by rotations of multiples of 90 degrees. We then evaluate the variance of the outputs over orbits (rotations by 0, 90, 180, and 270 degrees) in the test set. Finally, we evaluate the two networks on orbits in the complementary dataset. In one sense the results are predictable: networks attain lower variance over orbits of within-distribution as compared to out-of-distribution data points. Somewhat surprising is the discrepancy in out-of-distribution generalization between models trained on MNIST and FashionMNIST. As the FashionMNIST inputs are more visually diverse than the MNIST inputs, one explanation of this phenomenon is that the learned invariances are capable of interpolating between training points but struggle to extrapolate, for some loose notion of interpolation and extrapolation.
\keyinsight{Data augmentation can lead to invariant parameter configurations in restricted settings, but is prone to memorize invariances on its training distribution, leading to out-of-distribution generalization failures.}
\subsection{Symmetrization} \label{sec:variance-reduction} Unlike the approximate invariance induced by data augmentation, architectural invariance will trivially generalize to any data-generating distribution. Architectural invariance may also improve computational efficiency by reducing the number of parameters needed to fit the training data.
\subsubsection{Feature averaging and Jensen's inequality}
\begin{figure*}\label{fig:acc}
\end{figure*}
Jensen's inequality suggests that incorporating FA at evaluation time will improve performance on convex losses. It does not, however, directly yield insights into whether it is worthwhile to also deploy FA during training. Feature averaging, even when performed approximately, changes the mapping between network parameters and functions, and it is not immediately obvious whether this will benefit the optimization dynamics of the learning process. One potential detractor of training with FA is that the network may not learn parameters which can attain a low loss in its absence.
\hypothesis{H1}{models trained with feature averaging will be more dependent on FA to attain a low loss compared to those trained with data augmentation. \label{hyp:variance}}
To test Hypothesis~\ref{hyp:variance}, we train a series of convolutional neural networks on an augmentation of the FashionMNIST dataset. The labels of this dataset are invariant to rotations: put simply, there is no way of rotating a shoe such that it can be mistaken for a t-shirt. We augment FashionMNIST via the group $\mathcal{G}$ of rotations in the set $\{1^\circ, \dots, 360^\circ\}$, and perform approximate FA with $k$ samples, where $k << |\mathcal{G}|$. We see in Figure~\ref{fig:acc} that feature averaging profoundly changes the optimization dynamics of the network. The left plot demonstrates that the model trained with FA becomes increasingly dependent on feature averaging to obtain a low loss: the loss of the network's output in the absence of FA increases during training, particularly for larger $k$ (better approximations of FA); it is only when averaging over orbits that the network attains the lowest loss. The right hand side confirms that the models trained with feature averaging exhibit a variance reduction in their gradients and a reduced test loss. The models trained with approximate FA thus converge to regions of parameter space which are distinct from those learned by data augmentation, and which do not yield accurate predictions in the absence of FA.
\subsubsection{Invariance and interference}
A hallmark of memorization occurs when the network's predictions on its training data match the target function, but its gradients are orthogonal, suggesting it is not able to use information from one input to improve its predictions on others. We therefore study the alignment of gradients, i.e. \textit{interference}, within and between orbits of $\mathcal{G}$; this will amount to generalizing the quantity $\mathrm{I}_{\nabla}$ described in Equation~\ref{eq:gradint} to apply to \textit{sets} of states. Colinearity of gradients is a much stronger notion of invariance than predictive variance over an orbit. While the latter can easily be set to zero by simple memorization, gradient alignment requires that the network correctly generalize changes in its predictions from one input to others in its orbit. The gradients of a network evaluated on inputs from the same equivalence class under FA will by necessity be colinear; we now investigate whether this property also holds in models trained with DA.
We study gradient colinearity by computing the rank of the matrix of gradients of inputs computed on a minibatch $\mathbf{X}$ of independently sampled data points, along with the rank of the matrix of gradients of inputs in a batch which contains the full orbit of a subset of these inputs $\mathbf{X}_{\Phi}$. We subsample $\mathbf{X}$ when generating $\mathbf{X}_{\Phi}$ to ensure that both batches contain the same number of inputs. Concretely, given a batch of data $\mathbf{X}, \mathbf{y}$ of size $n$ and parameters $\theta \in \mathbb{R}^d$ we are interested in the matrix $\mathbf{I}_\nabla(\mathbf{X}) \in \mathbb{R}^{n \times d}$ defined as follows \begin{equation}
[\mathbf{I}_\nabla(\mathbf{X})]_{i,j} \overset{\text{def}}{=} \nabla_{\theta_j} \ell (f_\theta(\mathbf{X}_i), \mathbf{y}_i) \end{equation} where $\ell$ is a loss function and $f_\theta$ is the function computed by the neural network with parameters $\theta$. By considering $\mathbf{X}$ sampled independently and $\mathbf{X}_{\Phi}$ containing entire equivalence classes, we can distinguish between gradient interference over the whole dataset, and gradient interference specifically along an orbit. An invariant neural network architecture will produce the same gradient for every input from a given equivalence class, and thus the rank of $\mathbf{I}_\nabla(\mathbf{X}_{\Phi})$ will be upper bounded by the number of distinct equivalence classes. A non-invariant model might in the worst case assign orthogonal gradients to elements of the same equivalence class; however, such a model could also assign colinear gradients to inputs from different equivalence classes, thus attaining a low rank on $\mathbf{X}_{\Phi}$ despite exhibiting no invariance. In this case, the network would also attain low rank on $\mathbf{X}$, giving us a means of identifying whether a low gradient rank is due to interference across or within orbits.
\begin{figure}\label{fig:cifar-invariances}
\end{figure}
In order to be robust to small perturbations of the matrix $\mathbf{I}_\nabla$, we use the numerical rank~\citep{golub1976rank} and srank~\citep{kumar2021implicit}, which count the number of (resp. normalized by the maximum) singular values of a matrix that lie above some threshold $\epsilon$. In our evaluations we set $\epsilon=0.01$ in accordance with \citet{kumar2021implicit}. Note that the singular values of $\mathbf{I}_\nabla$ are equal to the eigenvalues of the matrix whose entries are given by the dot product $\mathrm{I}_{\nabla}(\mathbf{x}_i, \mathbf{x}_j)$. We plot these values over the course of training in Figure~\ref{fig:cifar-invariances}. Our training data consists of the CIFAR-10 dataset augmented by rotations of 90 degrees and the evaluations are run on both stochastic gradient descent (SGD) and adaptive (Adam \citep{kingma2014adam}) optimizers. For computational tractability, we use a batch size of 32 and subsample parameters in the network to compute the gradient dot products. Despite the batch size being orders of magnitude lower than the number of parameters, we still find that the gradient matrices in most cases do not exhibit full row rank.
We see in Figure~\ref{fig:cifar-invariances} that networks trained with FA naturally have colinear gradients over orbits, while networks trained without FA do not. In particular, the network trained with DA does not appear to distinguish between elements from the same equivalence class and elements from different classes, at least as far as gradient alignment is concerned. Intriguingly, the network trained without data augmentation exhibits \textit{higher} gradient rank on the augmented minibatches than on the independently sampled data, exhibiting the opposite trend of the network trained with FA. In combination with Proposition~\ref{theorem:symmgd}, this suggests that while data augmentation is capable of learning invariant functions over its training set, it does not capture the same degree of invariance in its gradient structure as FA.
\section{Invariance and generalization bounds} \label{sec:data:augmentation} Our previous analysis showed that data augmentation alone is not sufficient to induce invariances in a model that generalize to arbitrary inputs. Instead, models trained with data augmentation have the potential to learn an invariance only on the training distribution, in a sense memorizing the invariant structure. We now explore whether this phenomenon is mirrored in generalization bounds. We will focus in particular on PAC-Bayes bounds, which can be decomposed into a \textit{data fit} and a \textit{model complexity} term. The previous section showed that symmetrization improves the data fit (when this term is given by a convex loss function) of a function class on invariant data-generating distributions. In this section, we will show that invariance can also reduce a notion of model complexity, and combine these two findings to show that invariance leads to lower PAC-Bayes generalization bounds. These results will be complementary to our previous analysis of the training dynamics of neural networks trained with data augmentation and feature averaging.
\subsection{Validity of the augmented risk}
The PAC-Bayes bound in Theorem~\ref{thm:catoni:bound} holds for a binary classification loss. However, the average of the 0-1 loss over an orbit will lie in the continuum, i.e. $\mathbb{E}_{G\sim\lambda}[\ell(f(GX),Y)] \in [0,1]$. Monte Carlo approximations of $\mathbb{E}_{G\sim\lambda}[\ell(f(GX),Y)]$ also violate the assumptions of Theorem~\ref{thm:catoni:bound} because the augmented data set is not identically distributed (note that sampling two elements of the same orbit in the augmented dataset will be guaranteed, whereas this event will have vanishing probability under $P_{\calD}$). Encouragingly, the following theorem shows that neither of these issues prevents us from leveraging the augmented risk in PAC-Bayes bounds.
\begin{restatable}{theorem}{thmpacbayesda}
\label{thm:pac:bayes:da}
Assume that $P_{\calD}$ is $\mathcal{G}$-invariant. Then Theorem~\ref{thm:catoni:bound} holds with $\invf{\widehat{R}}_{\loss}(Q,\mathcal{D}^n)$ as in \eqref{eq:aug:risk} substituted for $\widehat{R}_{\loss}(Q,\mathcal{D}^n)$. \end{restatable}
The proof of Theorem~\ref{thm:pac:bayes:da} can be found in Appendix~\ref{apx:invar-proofs}. The key step in obtaining this result is to define a convex function which measures the difference between $\widehat{R}_{\loss}(f)$ and $R_{\loss}(f)$ (which can be translated to a bound on the generalization gap), and then apply Jensen's inequality to show that substituting $\invf{\widehat{R}}_{\loss}$ for $\widehat{R}_{\loss}$ in this expression will yield a lower bound on this difference, which can then be used to obtain the bound of Theorem~\ref{thm:catoni:bound}. \subsection{Invariance and model simplicity} \label{sec:invar-gap} In the case of architectural invariance, particularly via FA, we can go beyond the validity of the PAC-Bayes bound and show a provable reduction in the model complexity term, resulting in a {symmetrization gap} in the PAC-Bayes bound. Given a function class $F$, we consider the set $\invf{F}$, denoting the class of $\mathcal{G}$-invariant functions obtained by symmetrizing the functions belonging to $F$. Symmetrization is a surjective map from $F$ to $\invf{F}$ by its definition. However, symmetrization is not necessarily injective, meaning that $\invf{F}$ is in a sense smaller than $F$, and so a distribution over this set should be \textit{simpler}. It is then intuitive that symmetrization should also reduce the model complexity term appearing in a generalization bound. This section will characterize these intuitions rigorously. In order to do so, we must first construct an analogous symmetrization operator on probability measures as we saw for functions in \eqref{eq:symmetrization:def}.
This is fortunately straightforward: for any probability measure $P$ on $F$, the induced probability measure on $\invf{F}$ is the image of $P$ under $S_{\mathcal{G}}$, $\invf{P} = P \circ S_{\mathcal{G}}^{-1}$. Intuitively, incorporating invariances into an architecture should reduce its complexity by forcing $P$ and $Q$ to agree on this property; this intuition is quantified in the following lemma.
\begin{restatable}{lemma}{lempushforwardKL} \label{lem:pushforward:KL}
Suppose that $(E_i,\mathcal{E}_i)$, $i=1,2$, are two measurable spaces, the second of which is standard, $\mu$ and $\nu$ are two probability measures on $(E_1,\mathcal{E}_1)$, and $\psi : (E_1,\mathcal{E}_1) \to (E_2,\mathcal{E}_2) $ is a measurable map. Then
\begin{align} \label{eq:kl:inequality:pushforward}
\KL{\mu\circ\psi^{-1}}{\nu\circ\psi^{-1}} \leq \KL{\mu}{\nu} \;.
\end{align}
Furthermore, if $\mu\ll \nu$ with density $m$, then $\mu\circ\psi^{-1} \ll \nu\circ\psi^{-1}$ with density $m_{\psi}$, and the {$\psi$-gap} is
\begin{align}
\Delta_{\psi}(\mu\ ||\ \nu) : & = \KL{\mu}{\nu} - \KL{\mu\circ\psi^{-1}}{\nu\circ\psi^{-1}} \nonumber \\
& = \int_{E_1} \mu(dx) \log \frac{m(x)}{(m_{\psi}\circ\psi)(x)} \;. \label{eq:psi-gap}
\end{align} \end{restatable}
We provide a sketch here for intuition, and defer the full proof of Lemma~\ref{lem:pushforward:KL} to Appendix~\ref{apx:invar-proofs}. The lemma leverages the chain rule of the relative entropy to decompose the KL divergence between two distributions into a component which marginalizes over equivalence classes of $\psi$ in the space $E_1$, and one which conditions on them. We will use the pushforward measure $\mu \circ \psi^{-1}$ (and anlogously $\nu \circ \psi^{-1}$), defined over sets in $\mathcal{E}_2$ as $(\mu \circ \psi^{-1})(A) = \mu(\psi^{-1}(A))$, to `marginalize' over equivalence classes. Concretely, for probability measures $P$ and $Q$ on the measurable space $\mathcal{E}_1 \times \mathcal{E}_2$, under mild assumptions to guarantee that the following terms are well-defined, we have \begin{equation} \small
\KL{P(X_1,X_2)}{Q(X_1,X_2)} = \KL{P(X_1)}{Q(X_1)} + \KL{P(X_2|X_1)}{Q(X_2|X_1)} \; .\label{eq:simple-chain-rule} \end{equation}
By constructing the joint space $(\mathcal{E}_1,\mathcal{E}_2)$ to correspond to $(\invf{F}, F)$, we can design a probability measure on this space with the property that the left hand side of the equality in \eqref{eq:simple-chain-rule} will be equal to $\KL{\mu}{\nu}$, while the right hand side will consist of the sum $\KL{\mu \circ \psi^{-1}}{\nu \circ \psi^{-1}}$, the KL divergence of the pushforward measures $\mu \circ \psi^{-1}$ and $\nu \circ \psi^{-1}$, and $\Delta_{\psi}(\mu \ll \nu)$, which measures the degree to which the density of $\mu$ varies over equivalence classes of $\psi$. If the density $m$ is constant wherever it is not zero, then this gap will be zero.
\textbf{The symmetrization gap.} Lemma~\ref{lem:pushforward:KL} simply states that when we compress the input space $E_1$ via a mapping $\psi$, we analogously compress probability distributions over this space and obtain a subsequent reduction in the KL divergence of the corresponding pushforward measures. The precise gap in the KL divergence has the closed form \eqref{eq:psi-gap} which measures the degree to which the measure $\mu$ varies over the equivalence classes induced by $\psi$. This indicates that symmetrization can reduce the KL divergence term in the PAC-Bayes bound \eqref{eq:catoni:bound}, as we show in the following theorem.
\begin{theorem} \label{lemma:KL:gen}
Let $\mathcal{X}$ be a compact metric space and $\mathcal{Y}$ a Polish space, $\mathcal{G}$ a group acting measurably on $\mathcal{X}$, and $F = C(\mathcal{X},\mathcal{Y})$ the class of continuous functions $\mathcal{X}\to \mathcal{Y}$.\footnote{The result can hold for other function classes $F$; the key requirement is that conditioning is properly defined in $F$ and $\invf{F}$.}
Let $Q$ and $P$ be probability measures on $F$ such that $Q \ll P$ with density $q$, and $\invf{Q}\ll\invf{P}$ (density $\invf{q}$) their images under $S_{\mathcal{G}}$ on $\invf{F}$. Then
\begin{equation*}
\KL{\invf{Q}}{\invf{P}} \leq \KL{Q}{P} \;.
\end{equation*}
Furthermore, the {symmetrization gap} is
\begin{align} \label{eq:symm:gap}
\invf{\Delta}(Q \ ||\ P) = \mathbb{E}_{f\sim Q}\bigg[ \log \frac{q(f)}{\invf{q}(S_{\mathcal{G}} f)} \bigg] \;.
\end{align} \end{theorem} \begin{proof}[Proof of Theorem~\ref{lemma:KL:gen}]
The proof of this result follows straightforwardly from Lemma~\ref{lem:pushforward:KL}; we need only show that the conditions of the lemma hold. To see this, we note that for $\mathcal{X}$ a compact metric space and $\mathcal{Y}$ a Polish space, the space $F = C(\mathcal{X},\mathcal{Y})$ of continuous functions $f : \mathcal{X} \to \mathcal{Y}$ is a Polish space, and therefore it (along with its Borel $\sigma$-algebra $\mathcal{B}(C(\mathcal{X},\mathcal{Y}))$) is a standard Borel space.
For a group $\mathcal{G}$ acting measurably on $\mathcal{X}$, the symmetrization operator $S_{\mathcal{G}} : F \to \invf{F}$ is measurable, and the product space $(F \times \invf{F}, \mathcal{B}(F)\otimes\mathcal{B}(\invf{F}))$ is a standard Borel space. Thus, the conditions of Lemma~\ref{lem:pushforward:KL} are satisfied and the result follows. \end{proof}
In practice, computing the expectation over orbits required by exact feature averaging may be intractable. Instead, one may sample a set of $k$ transformations with which to average the function output. While this will not output the exact expectation, it still takes advantage of a simplification of the function space via Lemma~\ref{lem:pushforward:KL}, by aggregating functions that have some probability of being mapped to the same approximately averaged function. To formalize the idea, let $g^k = \{g_1,g_2,\dotsc,g_k\}$ be a set of $k$ elements of $\mathcal{G}$, and $G^k$ a random realization sampled i.i.d.\ from $\lambda$. Let $S_{g^k} f(x) = k^{-1} \sum_{j\leq k}f(g_j x)$ denote the approximate symmetrization of $f$ by $g^k$. Finally, let $\invfMC{Q}_{g^k} = Q \circ S_{g^k}^{-1}$ denote the image of a distribution $Q$ on $F$ under $S_{g^k}$. The following result is a consequence of the fact that Lemma~\ref{lem:pushforward:KL} is true for every $g^k$, and that for $g^{k+1}=g^k\cup\{g_{k+1}\}$, $S_{g^{k+1}}f(x) = f(g_{k+1}x) + \frac{k}{k+1}S_{g^k}f(x)$.
\begin{proposition} \label{prop:kl:chain}
Assume the conditions of Lemma~\ref{lemma:KL:gen}. Let $G_{s}=G_1,G_2,\dotsc$ be a sequence of elements sampled i.i.d.\ from $\lambda$. Then with probability one over $G_s$,
\begin{align*}
\KL{Q}{P} & \geq \KL{\invfMC{Q}_{G^1}}{\invfMC{P}_{G^1}} \geq \dotsb \\
& \geq \KL{\invfMC{Q}_{G^k}}{\invfMC{P}_{G^k}} \geq \dotsb \\
& \geq \KL{\invf{Q}}{\invf{P}} \;.
\end{align*} \end{proposition} As the results of Section~\ref{sec:da-optim} suggest, the interplay between SGD and symmetrization remains an open question. However, Proposition~\ref{prop:kl:chain} makes it clear that at least at evaluation time, feature averaging is preferred. Indeed, an intriguing feature of this result is that the model simplicity improvement in the PAC-Bayes bound holds even if FA was not applied during training, meaning that the final bound benefits from both the variance reduction on the empirical risk and the $\psi$-gap provided by symmetrization.
\keyinsight{Feature averaging, even when only performed approximately, reduces the model complexity term in PAC-Bayes generalization bounds. The \textit{symmetrization gap} quantifies the degree to which feature averaging simplifies the resulting function class.}
Though its exact computation will be intractable for FA in neural networks, the symmetrization gap in the theoretical bounds corresponds to real improvements of generalization, as shown by our empirical analysis in Section~\ref{sec:empirical-pacbayes}. However, the question of causality remains open. Do invariant models generalize better \textit{because} of the reduced KL penalty? Or do they generalize better and obtain better generalization bounds for some other reason? Understanding the answer to this type of question has been the focus of the long line of literature outlined in Section~\ref{sec:background-science}. It is likely that the underlying mechanism relating the two also appears in our analysis from Section~\ref{sec:da-optim}: the symmetrization gap arises because an invariant function's output on one element of an equivalence class uniquely determines its output over the whole class. This makes the class of functions computed by a given network architecture smaller, which is reflected in the reduced KL divergence. A similar notion of simplicity when FA is used during training can be seen in the gradient structure illustrated by Figure~\ref{fig:cifar-invariances}, where an update to one element of an equivalence class automatically generalizes to all other elements in that class.
\subsection{Ordering of PAC-Bayes bounds} \label{sec:ordering}
Combining the previous results yields an ordering of the PAC-Bayes generalization upper bounds. Let $B_0$ be the upper bound on the right-hand side of \eqref{eq:catoni:bound}, with $B_{\text{\rm DA}}$ and $B_{\text{\rm FA}}$ corresponding to the upper bounds for DA (using the augmented empirical risk $\invf{\widehat{R}}_{\loss}(Q,\mathcal{D}^n)$) and FA (using $\KL{\invf{Q}}{\invf{P}}$), respectively. Finally, let $B_{\text{\rm DA *}}$ denote the computationally intractable bound for DA given in Appendix~\ref{appx:tighter:pacbayes:da}.
\begin{theorem} \label{thm:bound:order}
Assume the conditions of Theorem~\ref{thm:catoni:bound}, and also that $P_{\calD}$ is $\mathcal{G}$-invariant. Then $B_{\text{\rm FA}} \leq B_{\text{\rm DA *}} \leq B_{\text{\rm DA}} = B_{0}$. \end{theorem}
The proof of Theorem~\ref{thm:bound:order} follows directly from our previous results. Importantly, this ordering on upper bounds does not imply a strict ordering on generalization error. It is nonetheless encouraging, as prior work has shown that many design choices which reduce PAC-Bayes risk bounds result in a reduced generalization gap. We investigate whether this finding holds for feature averaging in Section~\ref{sec:empirical-pacbayes}.
\subsection{PAC-Bayes bounds for invariant architectures} \label{sec:empirical-pacbayes}
We conclude with a demonstration of the effect of invariance on PAC-Bayes bounds for neural networks. We use the ModelNet10 dataset, which consists of LiDAR point cloud data for 10 classes of household objects. This dataset exhibits permutation invariance: the LiDAR reading is stored as a sequence of points defined by $\{x,y,z\}$ coordinates, and the order in which the points are listed is irrelevant to the class. We consider three different architectures: a PointNet-like architecture \citep{qi2017pointnet}, which is invariant to permutations; a partitioned version of the PointNet architecture which is invariant to subgroups of the permutation group (details in Appendix~\ref{appx:invar-pb}); and a fully connected model where the invariant pooling operation in the PointNet is replaced by a fully connected layer. The invariance in the network is implemented via a max-pooling layer instead of an averaging layer and so is not a direct application of feature averaging; however, the results of \eqref{eq:kl:inequality:pushforward} still apply due to the injective mapping over functions induced by max-pooling.
\begin{table}
\caption{Generalization performance for a permutation-invariant point cloud classification task (see text for details).}
\label{table:decomp}
\begin{center}
\resizebox{0.75\textwidth}{!}{
\begin{tabular}{ccccc}
\toprule
\textbf{Network} & \textbf{Train} & \textbf{Test} & \textbf{KL} & \textbf{PAC-Bayes} \\
& \textbf{Error} & \textbf{Error} & \textbf{Divergence} & \textbf{Bound} \\
\midrule
Fully connected & 0.002 & 0.65 & 24957 & 1.75 \\
Partial-PointNet & 0.172 & 0.248 & 1992 & 0.67\\
PointNet & 0.24 & 0.245 & 944 & 0.533 \\
\bottomrule
\end{tabular}
}
\end{center} \end{table} We compute the PAC-Bayes bounds following the procedure in \citet{dziugaite2017nonvacuous}: we convert a deterministic network to a stochastic network by adding Gaussian noise to the weights, and then train this stochastic model using a differentiable surrogate loss that bounds the true PAC-Bayes bound. After this training procedure converges, we then compute the PAC-Bayes bound. We attain an ordering consistent with our theoretical results: the invariant architecture attains the lowest bound, followed by the partially invariant architecture, and finally followed by the fully connected network. We provide a decomposition of the distinct terms in the bound in Table~\ref{table:decomp}. While the fully-connected architecture obtains the lowest loss on the training set, it significantly overfits to this data and we see a large generalization gap which is mirrored in the PAC-Bayes bound. In contrast, the invariant architectures exhibit a reduced PAC-Bayes bound and improved accuracy on the test set, despite obtaining a greater loss on the training set.
\section{Conclusions} \label{sec:conclusion}
This chapter has posed a simple question: how do different approaches to incorporating invariance into a model influence its generalization performance? To answer this question, we have proposed a novel PAC-Bayes generalization bound which applies to models trained with or without data augmentation. Our theoretical analysis gives the answer that invariant model classes are simpler, where this change in simplicity can be quantified by the relative `size' of the invariance relative to the size of the data generating distribution. We have further characterized settings under which gradient descent optimization will converge to solutions which satisfy the invariances exhibited by the data generating distribution.
To gain deeper insight into the interplay between invariance and optimization, this chapter further presented an empirical analysis of the effects of data augmentation and feature averaging on the optimization dynamics of neural networks. For non-convex problems, this analysis reveals that networks develop a notion of approximate invariance that holds on held-out points from the data-generating distribution, but can exhibit increasing variance on the orbits of out-of-distribution data points over the course of training, suggesting that neural networks are able to pick up approximate invariance properties that generalize well within distribution but do not correspond to architectural invariance. Our analysis in this setting has also revealed a variance reduction effect of both data augmentation and feature averaging.
The contributions of this chapter exhibit two principal limitations: we consider only model selection problems concerning the selection of exact invariances, and our generalization bounds are not sufficiently tight to be useful for model selection in most settings of interest. The former allowed us to obtain a precise characterization of the benefit of incorporating symmetries into a model class, but limited the scope of inductive bias which we could consider. The latter is a generic failing of current generalization bounds for neural networks, as outlined in Section~\ref{sec:background-science}. While realizations of these bounds were correlated with generalization in our experiments, the bounds are not intended to directly translate to a model ranking. The following chapter will address both of these issues by performing \textit{marginal likelihood estimation} rather than generalization bound computation, and will be applicable to a broad range of hyperparameter and inductive bias selection problems beyond symmetries.
\chapter{Training speed and model selection} \label{chp:supervised} \minitoc
\section{Introduction} \label{sec:introduction-supervised} \begin{figure}\label{fig:sotl-auc}
\end{figure} As we saw in the previous chapter, choosing the right inductive bias for a machine learning model, in particular incorporating {symmetries} exhibited by the data, is critical for good generalization. The problem of \emph{Bayesian model selection} concerns itself with identifying good inductive biases for a given dataset, including but not limited to symmetries. Model selection arises in both Bayesian inference, which seeks to identify a probabilistic model that best captures some distribution $P_{\calD}$, and in deep learning in the form of neural architecture search. This chapter will show that computing a measure of \textit{training speed}, the area under a model's learning curve, provides a principled approach to Bayesian model selection. Further, we will show that an analogue of this measure adapted to deep learning, the area under a network's training curve, produces competitive performance estimators for neural architecture search.
Critical to obtaining these results is our attention to the Bayesian learning paradigm, which is outlined in Section~\ref{sec:background-bayes}. In Bayesian inference, the marginal likelihood (ML) provides a principled tool for model selection. In contrast to cross-validation, for which computing gradients is cumbersome, the ML can be conveniently maximised using gradients when its computation is tractable. Unfortunately, computing the marginal likelihood for complex models such as neural networks is typically \textit{in}tractable. Workarounds such as variational inference suffer from expensive optimization of many parameters in the variational distribution and differ significantly from standard training methods for deep neural networks, which optimize a single parameter sample from initialization. A method for estimating the ML that closely follows standard optimization schemes would pave the way for new practical model selection procedures, yet remains an open problem.
The Bayesian model selection perspective is complementary to the study of PAC-Bayes generalization bounds as a tool with which to rank models \citep{dziugaite2020search}, though the two are deeply connected \citep{germain_pac-bayesian_2016}. In parallel, theoretical analysis has hinted at a connection between training speed and generalization error \citep{hardt2015train}, though the resulting generalization bounds from this analysis are only valid for extremely short training budgets. These results, while providing insight into the stability of stochastic gradient descent, do not present an \textit{explanation} (in the sense of that described in Section~\ref{sec:background-science}) of the empirical observation that models which train faster tend to generalize better. This is because the stability-based generalization bounds of \citet{hardt2015train} implicitly propose a notion of hypothesis class complexity that depends on the number of steps taken in the optimization trajectory, a measure which becomes vacuous after the first training epoch. However, \citet{jiang2020fantastic} show strong empirical performance of some measures of training speed at predicting generalization performance in DNNs, suggesting that this relationship may provide clues towards understanding generalization in deep learning.
The principal contribution of this chapter is to show that Bayesian model selection, training speed, and generalization are in fact deeply connected. To do so, we take a prequential coding perspective on the marginal likelihood, framing the log ML as the sum of predictive log likelihoods of data points, conditioned on preceding data in the dataset. This perspective reveals a family of estimators of the log ML which depend only on predictions sampled from the posterior of an iterative Bayesian updating procedure. We study the proposed estimators in the context of linear models, where we can conclusively analyze their theoretical properties. Leveraging the fact that gradient descent can produce exact posterior samples for linear models \citep{matthews2017} and the infinite-width limit of deep neural networks \citep{matthews2018gaussian,lee2018deep}, we show that this estimator can be viewed as the sum of a subset of the model's training losses in an iterative optimization procedure. This immediately yields an interpretation of marginal likelihood estimation as measuring a notion of training speed in linear models, visualized in Figure~\ref{fig:sotl-auc}.
We demonstrate the utility of this estimator through empirical evaluations on a range of model selection problems, confirming that it can effectively approximate the marginal likelihood of a Bayesian model in a manner useful for model selection. We go on to investigate whether our theoretical results for linear models may have explanatory power over generalization in deep learning. We construct and justify an analogue of our training speed estimator which can be applied to neural networks trained with stochastic gradient descent, and show that this estimator is predictive of final test accuracy in neural architecture search problems. These findings suggest that studying generalization between points on a training set can be informative of generalization to unseen data, an observation that will inspire our study of interference in reinforcement learning in Chapter~\ref{chp:gen-rl}.
\section{Background on Bayesian inference} \label{sec:background-bayes} Philosophically, Bayesian model selection addresses the question \textit{which out of this set of models is most likely to have generated my data?} This superficially resembles empirical risk minimization, which seeks to answer the question \textit{which set of {(hyper-)}parameters minimizes the loss function on my training data?} However, whereas empirical risk minimization is a principle for selecting a single predictor with the goal of minimizing its risk on the data-generating distribution, Bayesian model selection concerns itself with fitting probability distributions over parameters and corresponding functions, without explicit concern for the model's future predictions.
\subsection{Bayesian modelling}
A Bayesian model $\mathcal{M}$ is defined by a prior distribution over parameters $\theta$, $P(\theta | \mathcal{M})$, and a prediction map from parameters $\theta$ to a likelihood over the data $\mathcal{D}$, $P(\mathcal{D}|\theta, \mathcal{M})$. Parameter fitting in the Bayesian framework entails finding the posterior distribution $P(\theta|\mathcal{D})$, which yields robust and principled uncertainty estimates. Though exact inference is possible for certain models such as Gaussian processes (GPs) \citep{rasmussen2003gaussian}, it is intractable for DNNs \citep{neal2012bayesian}. Here approximate methods such as variational inference \citep{blei2017variational, gal2016dropout, blundell2015weight,mackay1992bayesian, graves2011practical, duvenaud2016early} and Laplace approximations \citep{daxberger2021laplace, mackay1998choice} are used to improve robustness and obtain useful uncertainty estimates.
Variational approximations require optimization over the parameters of the approximate posterior distribution. This optimization over distributions changes the loss landscape, and is significantly slower than the pointwise optimization used in standard DNNs. Pointwise optimization methods inspired by Bayesian posterior sampling can produce similar uncertainty estimates as variational inference, while improving computational efficiency \citep{welling2011bayesian,mandt2017stochastic,maddox2019simple}. An appealing example of this is ensembling \citep{lakshminarayanan2017simple}, which works by training a collection models in the usual pointwise manner, starting from $k$ independently initialized parameter values.
In the case of linear models, marginalization over an ensemble of models trained with gradient descent is exactly equivalent to Bayesian inference, as this sample-then-optimize approach yields exact posterior samples \citep{matthews2017, osband2018randomized}. \citet{he2020bayesian} extend this approach to obtain posterior samples from DNNs in the infinite-width limit.
\subsection{Model selection} \label{sec:bayesian-model-selection}
In addition to finding model parameters, Bayesian inference can also perform \textit{model selection} over different inductive biases, which are specified through both model structure (e.g. convolutional vs fully connected Bayesian neural network architectures) and the prior distribution on parameters. The Bayesian approach relies on finding the posterior over models conditioned on the data $P(\mathcal{M}|\mathcal{D})$, which uses the \emph{marginal likelihood} (ML) as its likelihood function: \begin{equation}\label{eq:marg-lik}
P(\mathcal{D} | \mathcal{M}) = \int_\theta P(\mathcal{D}|\theta)P(\theta|\mathcal{M}_i)d\theta = \mathbb{E}_{P(\theta|\mathcal{M})} [P(\mathcal{D} | \theta)] \,. \end{equation} Instead of computing the full posterior, it is common to select the model with the highest marginal likelihood. This is known as type-II maximum likelihood \citep{mackay1992bayesian,mackay2003information} and is less prone to overfitting than performing maximum likelihood over the parameters and model combined. This is because the marginal likelihood is able to trade off between model fit and model complexity \citep{rasmussen2001occam}, while addressing a problem with fewer degrees of freedom than that of parameter fitting. Maximizing the ML is standard procedure when it is easy to compute. For example, in Gaussian processes it used to set simple model parameters like smoothness \citep{rasmussen2003gaussian}, while recent work has demonstrated that complex inductive biases in the form of invariances can also be learned \citep{van2018learning}.
For many deep models, computing Equation~\ref{eq:marg-lik} is intractable, and obtaining approximations that are accurate enough for model selection and that scale to complex models is an active area of research \citep{khan2019approximate}. In general, variational lower bounds that scale are too loose when applied to DNNs \citep{blundell2015weight} to provide useful estimates for model selection. Deep Gaussian processes provide a case where these bounds do work \citep{damianou13a,dutordoir20a}, but heavy computational load holds performance several years behind that of deep learning. While ensembling methods provide useful uncertainty estimates and improve the computational efficiency of the variational approach, they have not yet provided a solution for Bayesian model selection.
Further work on generalization in deep learning has examined training dynamics as a tool for understanding how deep neural networks generalize. Many of these works point obliquely at a connection with the marginal likelihood. For example, \citet{arora2019fine} obtain generalization bounds for shallow ReLU neural networks that bear striking resemblance to the marginal likelihood of a Gaussian process with kernel given by the neural tangent kernel \citep{jacot2018neural}. The marginal likelihood also appears in other analyses of stochastic gradient descent. \citet{duvenaud2016early} use a single run of stochastic gradient descent to approximate the marginal likelihood of a Bayesian model, and then use tools for model selection to define a criterion for early stopping. Similarly, \citet{smith2018} use the marginal likelihood to identify the optimal batch size for a learning problem, using the width of the minimum found by stochastic gradient descent as a Laplace approximation for the marginal likelihood of the model.
\section{Marginal likelihood estimation with training statistics}\label{sec:ml-estimation} In this section, we investigate the equivalence between the marginal likelihood (ML) and a notion of training speed in models trained with an exact Bayesian updating procedure. For linear models and infinitely wide neural networks, exact Bayesian updating can be performed using gradient descent optimization. For these cases, we derive an estimator of the marginal likelihood which
\begin{enumerate}
\item is related to how quickly a model learns from data, \item only depends on statistics that can be measured during pointwise gradient-based parameter estimation, and \item becomes tighter for ensembles consisting of multiple parameter samples. \end{enumerate} We also investigate how gradient-based optimization of a linear model combination can implicitly perform approximate Bayesian model selection in Appendix~\ref{sec:optimize-then-prune}.
\subsection{Training speed and the marginal likelihood} \label{sec:decomposing_ML} \label{sec:speed-and-ml}
We begin by developing intuition on the marginal likelihood. Let $\mathcal{D}$ denote a dataset of the form $\mathcal{D} = (\mathcal{D}_i)_{i=1}^n = (x_i, y_i)_{i=1}^n$, and let $\mathcal{D}_{<i}=(\mathcal{D}_j)_{j=1}^{i-1}$ with $\mathcal{D}_{<1}=\emptyset$. We will abbreviate $P(\mathcal{D}^n|\mathcal{M}) \equiv P(\mathcal{D}^n)$ when considering a single model $\mathcal{M}$. We observe that $P(\mathcal{D}^n) = \prod_{i=1}^n P(\mathcal{D}^n_i|\mathcal{D}^n_{<i})$ to get the following form of the \textit{log} marginal likelihood:
\begin{equation}
\log P(\mathcal{D}) = \log \prod_{i=1}^n P(\mathcal{D}_i|\mathcal{D}_{<i}) = \sum_{i=1}^n \log P(\mathcal{D}_i | \mathcal{D}_{<i}) = \sum_{i=1}^n \log [\mathbb{E}_{P(\theta|\mathcal{D}_{<i})} P(\mathcal{D}_i|\theta) ]. \end{equation}
If we define training speed as the number of data points required by a model to form an accurate posterior, then models which train faster -- i.e. whose posteriors assign high likelihood to the data after conditioning on only a few data points -- will obtain a higher marginal likelihood. Interpreting the negative log posterior predictive probability $\log P(\mathcal{D}_i|\mathcal{D}_{<i})$ of each data point as a loss function, the log ML then takes the form of the sum over the losses incurred by each data point during training, i.e. the area under a training curve defined by a Bayesian updating procedure. Crucially, this notion of area under a curve differs from the number of steps required to attain a pre-specified average conditional likelihood or loss. Under the area-under-curve definition of training speed, if model $\mathcal{M}_1$ trains faster than $\mathcal{M}_2$, then for some accuracy $\epsilon$ the number of steps required for $\mathcal{M}_1$ to attain average conditional log likelihood $\epsilon$ will be lower than for $\mathcal{M}_2$. There is no fixed value of $\epsilon$ under which this statement will hold for all possible model pairs; instead, the area under a training curve can be thought of as averaging out many values of $\epsilon$ to arrive at a ranking.
\keyinsight{The marginal likelihood measures the degree to which a posterior update on each data point increases the likelihood of not-yet-seen data points. It can be computed by taking the area under the loss curve of a particular updating procedure, yielding a notion of \textit{training speed}.} \subsection{Unbiased estimation of a lower bound} \label{sec:LB} \label{sec:unbiased-estimation}
In practice, computing $\log P(\mathcal{D}_i|\mathcal{D}_{<i})$ may be intractable, necessitating approximate methods to estimate the model evidence. In our analysis, we are interested in estimators of $\log P(\mathcal{D})$ computed by drawing $k$ samples of $\theta \sim P(\theta|\mathcal{D}_{<i})$ for each $i=1, \dots, n$. We can directly estimate a lower bound $\mathcal{L}(\mathcal{D}) = \sum_{i=1}^n\mathbb{E}[\log P(\mathcal{D}_i|\mathcal{D}_{<i})]$ using the log likelihoods of these samples, yielding the estimator \begin{equation}
\hat{\mathcal{L}}(\mathcal{D}) = \sum_{i=1}^n \frac{1}{k}\sum_{j=1}^k\log P(\mathcal{D}_{i}|\theta^i_j). \end{equation}
This will produce a biased estimate of the log marginal likelihood due to Jensen's inequality. We can get a tighter lower bound by first estimating $\mathbb{E}[\log P(\mathcal{D}_i|\theta)]$ using our posterior samples before applying the logarithm, obtaining
\begin{equation}\label{eq:estimators}
\hat{\mathcal{L}}_k(\mathcal{D}) = \sum_{i=1}^n \log \frac{1}{k}\sum_{j=1}^k P(\mathcal{D}_{i}|\theta^i_j). \end{equation} The two estimators converge when $\mathcal{L}$ is estimated with a single sample and $\hat{\mathcal{L}}_k$ is evaluated at $k=1$, however we consider them separately in the following theorem due to their disagreement on larger sample sizes. \begin{restatable}{proposition}{PropLk}\label{prop:lk} Both $\hat{\mathcal{L}}$ and $\hat{\mathcal{L}}_k$ as defined in Equation~\ref{eq:estimators} are estimators of lower bounds on the log marginal likelihood; that is \begin{equation}
\mathbb{E}[\hat{\mathcal{L}}(\mathcal{D})] = \mathcal{L}(\mathcal{D}) \leq \log P(\mathcal{D}) \quad \text{ and } \quad
\mathbb{E}[\hat{\mathcal{L}}_k(\mathcal{D})] = \mathcal{L}_k(\mathcal{D}) \leq \log P(\mathcal{D}) \; . \end{equation} Further, the bias term in $\mathcal{L}$ can be quantified as follows.
\begin{equation}\mathcal{L}(\mathcal{D}) = \log P(\mathcal{D}) - \sum_{i=1}^n \KL{(P( \theta | \mathcal{D}_{<i})}{ P(\theta|\mathcal{D}_{< {i+1}}))} \label{eq:lb-decomp} \end{equation} \end{restatable} The proof of this result follows from a straightforward application of Jensen's inequality and can be found in Appendix~\ref{sec:proofs-supervised}. We observe that both lower bound estimators exhibit decreased variance when using multiple posterior samples; however, $\hat{\mathcal{L}}_k$ also exhibits decreasing bias (with respect to the log ML) as $k$ increases; each $k$ defines a distinct lower bound $\mathcal{L}_k = \mathbb{E}[\hat{\mathcal{L}}_k ]$ on $\log P(\mathcal{D})$. The gap induced by the lower bound $\mathcal{L}(\mathcal{D})$ is characterized by the information gain each data point provides to the model about the posterior, as given by the KL divergence between the posterior at iteration $i$ and the posterior at iteration $i+1$. Thus, while $\mathcal{L}$ has a Bayesian interpretation it is arguably more closely aligned with the minimum description length notion of model complexity \citep{hinton1993keeping}. Increasing $k$ in $\mathcal{L}_k$ thus allows us to interpolate between the minimum description length and marginal likelihood maximization principles.
When the posterior predictive distribution of our model is Gaussian, we consider a third approach which, unlike the previous two methods, also applies to noiseless models\footnote{Note that the log probability $\log P(\mathcal{D}_i | \theta^i_j)$ will not necessarily be well-defined for noiseless models and so the previous estimators cannot be naively applied.} given in Equation~\ref{eq:l-s}. We shift focus slightly to the regression setting, where each data point is in the form of an input-label pair $(X, y)$, such that $\mathcal{D}^n =(X_i, y_i)_{i=1}^n$, and $(\theta^i_j)_{j=1}^k$ be $k$ parameter samples from $P(\theta|\mathcal{D}_{<i})$. We assume some uniform distribution over inputs $X$ and a structured conditional distribution consisting of a mapping $f: \Theta \times X \rightarrow Y$ which given a set of parameters and an input induces a Gaussian likelihood $P(\cdot | \theta, X) = \mathcal{N}(f(\theta, X), \sigma_N^2)$ for some variance $\sigma_N^2$ $P(\cdot|\mathcal{D}_{<i}, X_i)$. The likelihood of a given data point $\mathcal{D}_i = (x_i,y_i)$ under the model thus takes the form \begin{equation*}
P_{\mathcal{M}}(\mathcal{D}_i | \mathcal{D}_{<i}) = \int_{\theta} \mathcal{N}(y_i|f(\theta, X_i), \sigma_N^2) P(\theta|\mathcal{D}_{<i})d\theta \end{equation*} We can then obtain the following proposition, which characterizes an estimator of a lower bound on $\log \mathcal{P}(\mathcal{D})$.
\begin{restatable}{proposition}{PropLS}\label{prop:ls}
Let $P(Y_i|\mathcal{D}_{<i}, X_i) = \mathcal{N}(\mu_i, \sigma^2_i)$ for some $\mu_i, \sigma_i^2$. Define the standard mean and variance estimators $\hat{\mu}_i = \frac{1}{N} \sum_{j=1}^N f(\theta^i_j, x_i)$ and $\hat{\sigma}^2_i = \frac{1}{N-1} \sum (f(\theta_{j}^i, x_i) - \hat{\mu})^2$. Then the estimator \begin{equation}\label{eq:l-s}
\hat{\mathcal{L}}_S(\mathcal{D}) = \sum_{i=1}^n \log P(Y_i|\hat{\mu}_i, \hat{\sigma}^2_i) \end{equation} is a lower bound on the log ML: i.e. $\mathbb{E}[\hat{\mathcal{L}}_S(\mathcal{D})] \leq \log P(\mathcal{D}) $. \end{restatable} The proof of this result leverages the independence of the sample mean and variance in order to iteratively apply Jensen's inequality to the estimator, and can be found in Appendix~\ref{sec:proofs-supervised}. We provide an empirical evaluation of the rankings provided by the different estimators in Section~\ref{sec:BMS}. We find empirically that $\hat{\mathcal{L}}_S$ exhibits the least bias in the presence of limited samples from the posterior, though we emphasize its limitation to Gaussian posteriors; for more general posterior distributions, $\hat{\mathcal{L}}_k$ minimizes bias for large $k$ while still estimating a lower bound.
\subsubsection{Lower bounds via gradient descent trajectories} \label{sec:lower-bounds-gd}
The bounds on the marginal likelihood we introduced in Section~\ref{sec:unbiased-estimation} required samples from the sequence of posteriors $P(\theta|\mathcal{D}_{<i})$ as data points were incrementally added. However, such bounds were agnostic to the procedure by which the posterior was sampled. We now draw a connection between Bayesian model selection and gradient descent on linear model classes via a result of \citet{matthews2017}: ensembles of linear models trained with gradient descent yield samples from the model posterior. In particular, we show that we can use these samples to estimate the log ML using the estimators introduced in Section~\ref{sec:unbiased-estimation}.
We will consider the Bayesian linear regression problem of modelling data $\mathcal{D} = (x_i, y_i)_{i=1}^n$ assumed to be generated by the process $Y = \theta^\top \Phi(X) + \epsilon \sim \mathcal{N}(0, \sigma_N^2 I)$ for some unknown $\theta$, known $\sigma_N^2$, and feature map $\Phi$. Typically, a Gaussian prior is placed on $\theta$; this prior is then updated as data points are seen to obtain a posterior over parameters. In the overparmeterized, noiseless linear regression setting, \citet{matthews2017} show that the distribution over parameters $\theta$ obtained by sampling from the prior on $\theta_0$ and running gradient descent to convergence on the data $\mathcal{D}_{<i}$ is equivalent to sampling from the posterior conditioned on $\mathcal{D}_{<i}$. \citet{osband2018randomized} extend this result to posteriors which include observation noise $\sigma^2_N \neq 0$ under the assumption that the targets $y_i$ are themselves noiseless observations.
\begin{algorithm} \SetAlgoLined \KwIn{A dataset $\mathcal{D} =(x_i, y_i)_{i=1}^n $, parameters $\mu_0, \sigma_0^2, \sigma_N^2$} \KwResult{An estimate of $\mathcal{L}(\mathcal{D})$} $\theta_t \gets \theta_0 \sim \mathcal{N}(\mu_0, \sigma_0^2)$; \quad
$\tilde{Y} \gets Y + \epsilon \sim \mathcal{N}(0, \sigma_N^2)$; \quad sumLoss $\gets$ 0 \; $\ell(\mathcal{D}_{\le i}, w) \gets \|\tilde{Y}_{\le i} - \theta^\top X_{\le i} \|_2^2 + \frac{\sigma_N^2}{\theta_0^2}\|\theta - \theta_0\|_2^2 $\;
\For{$\mathcal{D}_i \in \mathcal{D}$}{
sumLoss $ = $ sumLoss $ + \; \frac{(\theta_t^\top x_i - y_i)^2}{2\sigma_N^2}$ \;
$\theta_t \gets$ GradientDescent($ \ell, \theta_t, \mathcal{D}_{\le i}$) \;
}
\KwRet sumLoss
\caption{Marginal likelihood estimation for linear models}
\label{alg:estimate} \end{algorithm}
We can use this procedure to obtain posterior samples for our estimators by iteratively running sample-then-optimize on the sets $\mathcal{D}_{<i}$. Algorithm \ref{alg:estimate} outlines our approach, which executes gradient descent optimization on iterative subsets of the data to obtain the necessary posterior samples for our estimator. We note that the GradientDescent subroutine in Algorithm~\ref{alg:estimate} will in idealized settings output the cluster point of the gradient descent algorithm under decreasing step sizes to give an exact posterior sample; in practice a finite step size may be used and this output will only be an approximate sample from the posterior. Theorem \ref{thm:sto} shows that this procedure yields an unbiased estimate of $\mathcal{L}(\mathcal{D})$ when posterior samples are used to estimate $\mathbb{E} [\log P(\mathcal{D}_i|\theta)]$, and an unbiased estimate of $\mathcal{L}_k(\mathcal{D})$ when an ensemble of $k$ models are trained in parallel to estimate $\mathbb{E}[P(\mathcal{D}_i | \theta)]$.
\begin{restatable}{theorem}{ThmSTO} \label{thm:sto}
Let $\mathcal{D} = (X_i, Y_i)_{i=1}^n$ and let $(\theta_j^i)_{i,j=1}^{n,J}$ be generated by the procedure outlined in Algorithm~\ref{alg:estimate}. Then the estimators $\hat{\mathcal{L}}, \hat{\mathcal{L}}_S,$ and $ \hat{\mathcal{L}}_k$, applied to the collection $(\theta_j^i)$, are lower bounds on $\log P(\mathcal{D})$. Further, expressing $-\log P(\mathcal{D}_i|\theta)$ as the $\ell_2$ regression loss plus a constant, we then obtain \begin{equation}
\log P(\mathcal{D}) \geq \sum_{i=1}^n \mathbb{E}_{\theta_i \sim P(\cdot | \mathcal{D}_{<i})}[\log P(\mathcal{D}_i|\theta_i)] = \mathbb{E}\sum_{i=1}^n -\ell_2 (\mathcal{D}_i, \theta_i) + c = \mathcal{L}(\mathcal{D}) \end{equation} \end{restatable}
We highlight that Theorem \ref{thm:sto} precisely characterizes the lower bound on the marginal likelihood as a sum of training losses based on the regression loss $\ell_2(\mathcal{D}_i, \theta_i)$ when the likelihood $P(\mathcal{D}_i | \theta)$ is a Gaussian.
\subsubsection{Infinite-width neural networks}
\label{sec:ntk-ml} Beyond linear models, our estimators can further perform model selection in the infinite-width limit of neural networks. Using the optimization procedure described by \citet{he2020bayesian}, we can obtain an exact posterior sample from a GP with kernel equal to the NTK. The iterative training procedure described in Algorithm~\ref{alg:estimate} will thus yield a lower bound on the marginal likelihood of this GP using sampled losses from the optimization trajectory of the neural network. We evaluate this bound in Section \ref{sec:BMS}, and formalize this argument in the following corollary. \begin{restatable}{corollary}{CorNTK}\label{cor:ntk}
Let $\mathcal{D}^n$ be a dataset indexed by our standard notation. Let $f_0$ be sampled from an infinitely wide neural network architecture $\mathcal{F}$ under some initialization distribution, and let $f_\infty^i$ be the limiting solution under the training dynamics defined by \citet{he2020bayesian} applied to the initialization $f_0$ and using data $\mathcal{D}^n_{< i}$. Let $K_\infty$ denote the neural tangent kernel for $\mathcal{F}$, and $\mathcal{M}=\mathrm{GP}(\mathbf{0}, K_\infty)$ the induced Gaussian Process. Then $f_\infty^i \sim P(f|\mathcal{D}^n_{< i}, \mathcal{M})$, and in the limit of infinite training time, the iterative sample-then-optimize procedure yields an unbiased estimate of $\mathcal{L}(\mathcal{D}^n |\mathcal{M})$. Letting $\ell_2$ denote the scaled squared $\ell_2$ regression loss and $c$ be a constant, we obtain as a direct corollary of Theorem~\ref{thm:sto} \begin{equation}
\log P(\mathcal{D}) \geq \mathbb{E}_{f_\infty^i \sim P(\cdot | \mathcal{D}_{<i})}[\log P(\mathcal{D}_i|f_{\infty}^i)] = \mathbb{E}\sum_{i=1}^n -\ell_2 (\mathcal{D}_i, f^i_{\infty}) + c = \mathcal{L}(\mathcal{D}) \; . \end{equation} \end{restatable}
It is natural to ask if such a Bayesian interpretation of the sum over training losses can be extended to non-linear models trained with stochastic gradient descent. Although SGD lacks the exact posterior sampling interpretation of our algorithm, we conjecture a similar underlying mechanism connecting the sum over training losses and generalization. Just as the marginal likelihood measures how well model updates based on previous data points generalize to a new unseen data point, the sum of training losses in a stochastic gradient descent trajectory measures how well parameter updates based on one minibatch generalize to the rest of the training data. If the update generalizes well, we expect to see a sharper decrease in the training loss, i.e. for the model to train more quickly and exhibit a lower sum over training losses. This intuition can be related to the notion of `stiffness' proposed by \citet{fort2019stiffness}. We provide empirical evidence supporting our hypothesis in Sections \ref{sec:ts-interference} and \ref{sec:DNN_exp}; for now, we focus on the utility of this estimator in Bayesian model selection.
\subsection{Empirical evaluation} \label{sec:BMS} Section \ref{sec:ml-estimation} focused on two key ideas: that online training statistics can be used in an estimator of a Bayesian model's marginal likelihood (or a lower bound thereof), and that gradient-based optimization can produce the samples needed for this estimation problem. We further conjectured that similar phenomena may also hold for deep neural networks. We now illustrate these ideas in a range of settings. Section \ref{sec:BMS} provides confirmation and quantification of our results for linear models, the model class for which we have theoretical guarantees, while Section \ref{sec:DNN_exp} provides preliminary empirical confirmation that the mechanisms at work in linear models also appear in DNNs.
While we have shown that our estimators correspond to lower bounds on the marginal likelihood, in order to be useful for model selection also we need the rankings given by the estimators to agree with those assigned by the marginal likelihood. We first evaluate the relative rankings given by the true marginal likelihood with those given by our estimators on a simple feature selection task, consisting of 15 informative features $\phi_1, \dots, \phi_{15}$ and 15 features containing random noise $\phi_{16}, \dots, \phi_{30}$. Each model $\mathcal{M}_{i}$ uses features $\phi_1, \dots, \phi_i$ in its regression objective and ignores the rest. Full experiment details, along with a description of two additional model selection problems evaluated in Appendix~\ref{sec:optimize-then-prune}, can be found in Appendix~\ref{sec:ex_ms_blr_synthetic_data}. Naturally, the optimal model is $\mathcal{M}_{15}$, which uses all informative features and no extraneous ones. We compare $\mathcal{L}_S$, $\mathcal{L}$ and $\mathcal{L}_k$ to see whether each can identify the optimal model. We first observe that all methods agree on the optimal model: this is a consistent finding across all of the model selection tasks we consider. While all methods lower bound the log marginal likelihood, $\mathcal{L}_k(\mathcal{D})$ and $\mathcal{L}_S(\mathcal{D})$ exhibit a reduced gap compared to the naive lower bound. In the rightmost plot of Figure~\ref{fig:lse-estimator}, we further quantify the reduction in the bias of the estimator $\mathcal{L}_k(\mathcal{D})$ described in Section~\ref{sec:unbiased-estimation}. We use exact posterior samples (which we denote in the figure simply as posterior samples) and approximate posterior samples generated by the gradient descent procedure outlined in Algorithm~\ref{alg:estimate} using a fixed step size and thus inducing some approximation error. We find that both sampling procedures exhibit decreasing bias as the number of samples $k$ is increased, with the exact sampling procedure exhibiting a slightly smaller gap than the approximate sampling procedure.
\begin{figure}\label{fig:lse-estimator}
\end{figure}
\begin{figure}\label{fig:ntk_evaluation}
\end{figure}
We further illustrate how our estimator of $\mathcal{L}(\mathcal{D})$ can select inductive biases in the infinite-width neural network regime in Figure~\ref{fig:ntk_evaluation}. The left hand side of the figure shows the marginal likelihoods of NTK-GP models for a fully-connected MLP architecture given different prior variances, illustrating that the sample-based estimator $\hat{\mathcal{L}}$ does indeed give a lower bound which becomes more accurate as the number of samples grows. In the right hand side of the figure, we evaluate the relative change in the log ML of a Gaussian Process induced by a fully-connected MLP (MLP-NTK-GP) and a convolutional neural network (Conv-NTK-GP) which regress on the MNIST dataset. The fully-connected model sees a consistent decrease in its log ML with each additional data point added to the dataset, whereas the convolutional model sees the incremental change in its log ML become less negative as more data points are added, as well as a less negative incremental decrease in the log ML at the start of training. This leads to the Conv-NTK-GP having a higher value of $\mathcal{L}(\mathcal{D})$, and a higher log marginal likelihood, than the MLP-NTK-GP. This provides reassurance that the estimators we have proposed behave reasonably in simple architecture selection problems.
\section{Training speed and interference} \label{sec:ts-interference} Having revealed that statistics from a particular gradient descent training procedure can yield an accurate estimator of the log marginal likelihood, we now turn our attention towards generalizing these findings to the setting of multi-epoch stochastic gradient descent optimization in deterministic function approximators. We begin by relating the posterior predictive likelihoods used in our estimator to the agreement between the gradients of disjoint minibatches (i.e. interference exhibited by the function approximator) in a dataset. This motivates an analogous estimator for SGD trajectories, which is highly correlated with generalization performance.
\subsection{Generalization and multi-epoch optimization} \label{sec:theory_multi_epoch} We have thus far studied the relationship between training speed and the marginal likelihood in the context of Bayesian models updated iteratively on successive elements of the training data. This has yielded an effective model selection tool based on computing the area under a training curve for specific updating procedures. Translating this approach to the optimization methods used to train deep neural networks faces two major hurdles: first, although it is possible to train neural networks to approximate a Bayesian posterior over weights \citep{neal2012bayesian}, standard training schemes deal only with a deterministic initialization and so do not directly correspond to probabilistic models. It is possible to argue that many stochastic optimization algorithms sample from an approximate Bayesian posterior \citep{welling2011bayesian, mandt2017stochastic}, but this argument only highlights the second hurdle facing our estimator: most optimization schemes used in practice run over a fixed dataset for several epochs. In contrast, our estimator requires an iterative training procedure whereby each data point is sequentially added to the training set, and we never evaluate the loss on revisited data points.
However, independent of the relationship between gradient descent and Bayesian posterior sampling, the same intuition relating training speed to the marginal likelihood hints at a mechanism by which training speed correlates with generalization error in the gradient descent setting. Recall that the marginal likelihood can be written as the sum
\begin{equation}
\log P(\mathcal{D}) = \sum_{i=1}^n \log P(\mathcal{D}_i | \mathcal{D}_{<i}). \end{equation}
Now consider the term $\log P(\mathcal{D}_i | \mathcal{D}_{<i})$. This quantity characterizes how accurately the model predicts $\mathcal{D}_i$ given that it already knows the values of $\mathcal{D}_{<i}$. The change in the log conditional probability of a data point $\mathcal{D}_{i+1}$ after conditioning on data point $\mathcal{D}_i$ thus presents a Bayesian analogue of interference as defined in \eqref{eq:deltaint}. We now investigate whether interference in minibatch optimization might provide a means of leveraging the insights described for Bayesian models in a broader class of learning algorithms.
We first establish notation. We denote data $\mathcal{D}^n = \{(x_i, y_i)\}_{i=1}^n$, let $\ell$ be a loss function, $f_\theta$ the function induced by parameters $\theta$, and $R(\theta) = \mathbb{E}_{P_{\calD}}[\ell(f_\theta(x), y)]$. The sequence $(\theta_t)_{t=0}^T$ denotes the series of parameters obtained over a gradient descent trajectory. Recalling that $\log P(\mathcal{D}^n)$ has the interpretation of an area under a training curve, we therefore consider the area under the training curve of a stochastic gradient descent trajectory. For the moment, we will consider minibatches of size one to simplify our analysis; it is straightforward to extend this reasoning to larger minibatches. We first consider a single training epoch, denoting by $\theta_k$ the value of the parameters at step $k$ after performing gradient steps $\theta_{t+1} = \theta_t - \alpha \nabla_\theta \ell (x_t, y_t , \theta_t)$. With a slight abuse of notation, we repurpose $\hat{\mathcal{L}}$ to apply to SGD trajectories as follows.
\begin{align}
\hat{\mathcal{L}}(\mathcal{D}^n) &= \sum_{k=0}^n \ell(x_k, y_k, \theta_k) \\
&= \sum_{k=0}^n [\ell(x_k, y_k, \theta_0) + \sum_{j=1}^{k} (\ell(x_k, y_k, \theta_j) - \ell(x_k, y_k, \theta_{j-1}) ) ] \\
\intertext{Recall that this is precisely our definition of \textit{interference} from Equation~\ref{eq:deltaint}: $\mathrm{I}_{\Delta}(x_k, x_j)$.}
&= \sum_{k=0}^n[ \ell(x_k, y_k, \theta_0) + \sum_{j=1}^k \mathrm{I}_{\Delta}(x_j, x_k)] \\
&= \widehat{R}_{\loss}(\mathcal{D}^n; \theta_0) + \sum_{j=1}^n \sum_{k>j} \mathrm{I}_{\Delta}(x_j, x_k) \end{align}
The term $\mathrm{I}_{\Delta}(x_j, x_k)$ measures the effect of the gradient step computed on data point $(x_j, y_j)$ on the loss at the data point ($x_k, y_k)$, thus $\sum_{j<k} \mathrm{I}_{\Delta}(x_j, x_k)$ measures the cumulative effect of gradient updates computed on earlier data on the loss at point $k$. This quantity philosophically resembles the $\log P(\mathcal{D}_i | \mathcal{D}_{ <i})$ term appearing in the expression of the log marginal likelihood. Indeed, in the first epoch of training it is an unbiased estimate of the effect of the gradient updates performed up to step $k$ on the expected risk $R$.
\begin{align}
\mathbb{E}_{x, y \sim P_{\calD}}[\ell(x,y; \theta_k)] &= \mathbb{E}_{\mathcal{D}^n \sim P_{\calD}}[\ell(x_k, y_k; \theta_k)] \\
&= \mathbb{E}_{\mathcal{D}^n \sim P_{\calD}}[\ell(x_k, y_k; \theta_0) + \sum_{j< k} \mathrm{I}_{\Delta}(x_j, x_k)] \end{align}
Unbiased estimation of the change in the true risk is only attainable due to the independence of $\theta_k$ and $x_k, y_k$. After the first epoch of training, this property no longer holds as the pair $(x_k, y_k)$ was used to obtain the network's current parameters. Applying similar techniques to those of \citet{hardt2015train} may yield bounds on the bias induced by this dependency, but these tend to be overly pessimistic, resulting in loose upper bounds on the expected risk. Concretely, two problems arise in the case of multiple epochs: first, the loss on the training set will decrease, meaning that the raw change in the loss for any given data point will eventually tend to zero and cease to provide informative updates; second, over the course of many epochs the network may overfit not just its predictions but also its gradient structure to the training set. Of course, neither of these problems is guaranteed to prevent effective performance estimation. In the first case, the losses from earlier in the training trajectory are still likely to be informative, and the low magnitude of the later losses when the network has overfit mean that this later period is unlikely to significantly influence the area under the training curve. In the second case, we note that in general changing the structure of gradients is a much more difficult task than changing predictions (recall the invariance-learning experiments of Figure~\ref{fig:cifar-invariances}), and so we expect the bias induced by these second-order effects to be small. Taking into account the large variance of using a minibatch to estimate the change in loss over the whole dataset, this yields the following practical conjecture.
\hypothesis{Bias-variance trade-off in interference estimation}{the interference between minibatches in the training set $I_{\Delta}(x_i, x_j)$ will exhibit low bias as an estimator of the change in the validation loss relative to its variance due to stochastic optimization. \label{hyp:minibatch}}
We evaluate Hypothesis~\ref{hyp:minibatch} in Figure~\ref{fig:sotl-bias-variance}, where we plot $\mathrm{I}_{\Delta}(x_j, x_k)$, for $y_k$ equal to the same minibatch as $x_k$ (`same minibatch'), a larger subset of the training set (`training holdout set'), and a minibatch drawn from the validation set (`validation set'). For the training holdout set and the validation set, we use minibatches of size 5000 to isolate the variance in the estimator due to only the optimizer minibatch sampling. Intriguingly, these results suggest that even after several epochs, $\mathrm{I}_{\Delta}(x_j, x_k)$ will exhibit low bias as an estimator of $R_{\loss}(\theta_t) - R_{\loss}(\theta_{t-1})$ in a range of neural network architectures and optimization schemes. This suggests that smoothing out the variance in the loss estimate induced by stochastic parameter updates will plausibly provide a greater benefit than using an unbiased estimate of performance on the validation set. This is likely to be the case in particular when a large learning rate is used early in training, as is common in practice.
\begin{figure}\label{fig:sotl-bias-variance}
\end{figure}
Motivated by these observations, we propose the following generalization measure which directly computes the area under a network's training curve. We refer to it as the SOTL, due to its interpretation as measuring the sum of training losses from the model's training trajectory. \begin{equation}
\mathrm{SOTL }= \sum^T_{t=1} \left[ \frac{1}{B} \sum^B_{i=1} \ell \left( f_{\theta_{t, i}}(\mathbf{X}_i), \mathbf{y}_i \right) \right] \end{equation}
A more detailed study of the SOTL is outside the scope of this thesis, though validation of its utility in neural architecture search can be found in the related work of \citet{ru2020revisiting}. We perform a small-scale proof of concept in this chapter.
\subsubsection{Training speed in DNNs} \label{sec:sgd_dnn}
\begin{figure}\label{fig:mod_select_dnn}
\end{figure}
Motivated by the previous discussion, we conjecture that just as the sum of the log posterior likelihoods is useful for Bayesian model selection, the sum of minibatch training losses will be useful to predict generalization error. In this section, we evaluate whether this conjecture holds for a simple convolutional neural network trained on the FashionMNIST dataset. Our results provide preliminary evidence in support of this claim, and suggest that further work investigating this relationship may reveal valuable insights into how and why neural networks generalize. We first evaluate whether the sum over training losses (SOTL) obtained over an SGD trajectory correlates with a model's generalization error, and whether SOTL predicts the weight assigned to a model by a linear ensemble. A discussion of the link between linear ensemble weight and training speed is provided in Appendix~\ref{sec:optimize-then-prune}; to summarize, we note that (sub-)models with the best sum of training losses will also exhibit the greatest time-averaged correlation with the target, and so in some sense present the `best' feature for the linear model combination to use. To evaluate the connection between these three concepts, we train a linear combination of DNNs with stochastic gradient descent and evaluate a) the SOTL of each model's training trajectory, b) the final validation loss of each model, and c) the weight assigned to each model by a linear ensembling layer trained concurrently with the networks.
We observe a strong correlation between the sum of training losses (SOTL) and average test cross-entropy (see Figure \ref{fig:mod_select_dnn} middle column), validating that the SOTL provides useful model rankings. Further, we find that architectures with lower test error (when trained individually) are given higher weight by the linear ensembling layer -- as can be seen from the left plot in Figure \ref{fig:mod_select_dnn}. This finding may have intriguing implications on empirical phenomena such as the lottery ticket hypothesis~\citep{frankle2018the}. Further details of the experiment can be found in Appendix \ref{sec:exp_details_sgd_dnn}. Our results are summarized in Figure \ref{fig:mod_select_dnn}. Though the architecture sets in these illustrative experiments are small, these results are replicated in much larger architecture search spaces by \citet{ru2020revisiting}.
\subsection{Bias and variance in performance estimation} \label{sec:DNN_exp} We now investigate \textit{why} the sum of training losses seems to predict generalization despite our departure from the theoretically grounded regime of Bayesian models. We have seen in Figure~\ref{fig:mod_select_dnn} that the SOTL is predictive of generalization performance in simple model selection problems -- indeed, further work \citep{ru2020revisiting} has robustly demonstrated that it is often a \textit{better} predictor of generalization than the early-stopping validation loss. This is quite remarkable as most of the received wisdom in deep learning recommends against using the training set for hyperparameter search in favour of a held-out validation set. We recall Figure~\ref{fig:sotl-bias-variance}, which revealed that the effect of one minibatch gradient step on the loss of a disjoint minibatch exhibited low bias as an estimator of the change in the expected risk, while the variance of this estimator due to stochasticity in the gradient descent trajectory was itself significant. This suggests that using training speed as a performance estimator may be performing a bias-variance trade-off: training speed introduces a small amount of bias in the performance estimator as the model overfits, but by marginalizing over all of the parameters visited during an epoch or a trajectory, significantly reduces the variance in the performance estimation. Concretely, we obtain the following hypothesis. \hypothesis{Bias-variance trade-off in training speed}{training speed, as measured by the sum of training losses, provides a lower-variance estimate of performance than the validation loss in noisy optimization procedures such as stochastic gradient descent.} \begin{figure}\label{fig:tse-variance}
\end{figure}
We validate this hypothesis in Figure~\ref{fig:tse-variance}, where we evaluate a randomly sampled subset of models from the NASBench-201 dataset \citep{dong2020nasbench201}, the dataset in which variants of the SOTL evaluated by \citet{ru2020revisiting} obtained impressive rank correlation performance, outperforming learning curve extrapolation methods and the early stopping validation loss. We see that the curves corresponding to the validation accuracy and validation loss at each epoch are indistinguishable for most architectures, while the training loss, which is averaged over minibatches sampled during an entire epoch, exhibits much lower variance. This suggests that using the early-stopping validation loss would yield an extremely noisy estimator of final performance, whereas the SOTL, even if only averaged over the losses of a single epoch late in training, yields a relatively consistent ranking.
\keyinsight{The sum of training losses yields an analogous performance estimator to $\mathcal{L}(\mathcal{D})$ which can be applied to neural networks. It measures, among other things, \textit{interference}: the degree to which a gradient update on one minibatch generalizes to the rest of the training set. }
\section{Conclusions}
In this chapter, we have proposed a family of estimators of the marginal likelihood which illustrate the connection between training speed and Bayesian model selection. Because gradient descent can produce exact posterior samples in linear models, our result shows that Bayesian model selection can be performed by training a linear model with gradient descent and tracking how quickly it learns. This approach also applies to the infinite-width limit of deep neural networks. The intuition behind this estimator is appealing: a model which can better leverage information from one data point to improve its predictions on other data points will generalize better to new data than one which is not able to do so. We apply similar intuition to propose an analogous performance estimator for deep neural networks, which computes the area under a model's training curve. This estimator obtains high rank correlation with the final generalization error of a neural network, yielding a competitive approach to performance estimation for neural architecture search. We provide evidence that the connections shown in linear models have predictive power towards explaining generalization and training dynamics in DNNs. This empirical analysis highlights the importance of generalization between inputs in the training set, i.e. interference, as a factor influencing both training speed and generalization error.
One limitation of the connection illustrated in this chapter is that it can only be straightforwardly applied to problems of identifying a suitable inductive bias or network architecture. Regularization schemes that influence training dynamics, such as dropout or weight regularization, introduce confounding factors into the training process under which the relationship between training speed and generalization breaks down. Training speed alone may also be insufficient for problems of optimizer or hyperparameter selection, where these quantities may reduce training speed via their influence on the ability of each optimization step to reduce the loss on the current minibatch, rather than by influencing generalization between minibatches.
While generalization between training inputs will play a key role in our analysis of generalization in {reinforcement learning}, the results from this chapter cannot immediately be applied to deep RL agents. This is because the estimators we use require a fixed optimization objective to provide a meaningful notion of training speed. In reinforcement learning, both the optimization objective and the input distribution are constantly changing. A deep RL agent that obtains a low training loss quickly may nonetheless achieve sub-optimal return in the environment. In order to understand the role of interference in deep reinforcement learning, it will first be necessary to understand how the dynamics of RL differ from those of the supervised learning setting. It is only with this understanding that we will be able to leverage the insights developed in this chapter and the previous one to predict and improve generalization in deep reinforcement learning.
\chapter{Dynamics of reinforcement learning} \label{chp:rl-dynamics} \minitoc \section{Introduction} \label{sec:rl-dynamics-introduction} \begin{figure}
\caption{An example of qualitatively different value function dynamics for a two-state MDP for 1-step temporal difference learning and Monte Carlo learning, with fixed point $V^\pi$ in red.}
\label{fig:two-state-example}
\end{figure} The previous chapters have shown that properties of a network's training dynamics can provide useful insights into its generalization performance. The remainder of this thesis will apply similar analysis into the training of \textit{reinforcement learning} agents. As mentioned in the discussion of TD learning in Section~\ref{bkgd:rl}, the dynamics of reinforcement learning agents are less stable than those of supervised learning \citep{baird1993advantage}, leading to divergence even for simple linear function approximation schemes. This complexity necessitates a more detailed study of the learning dynamics of neural networks trained with value-based RL algorithms before it is possible to discuss the implications of these dynamics on generalization in full detail. To this end, the current chapter will provide a novel theoretical framework for our study of representation learning and illustrate its utility by analyzing the effect of various auxiliary tasks on these dynamics; Chapter~\ref{chp:rep-learning} will draw on insights from this framework to motivate an empirical study of the representations learned by deep RL agents, while Chapter~\ref{chp:gen-rl} will explore its implications on memorization and overfitting to training observations.
Our theoretical analysis of representation dynamics in RL is motivated in part by the robust improvements given by auxiliary tasks to deep reinforcement learning agents. While supervised learning often benefits from unsupervised pretraining \citep{radford2018improving, erhan2010does, lee2021pebble}, fitting auxiliary labels that are only tangentially related to the principal task has not been incorporated into standard learning pipelines. In contrast, training a network to predict not just the value function, but other properties of the environment such as the value of a pixel at the next timestep or expected state occupancies under the current policy, consistently helps RL agents improve their performance \citep{jaderberg2016reinforcement,mirowski2017learning, lin2019adaptive}. A commonly-held belief is that these benefits are mediated through improved representation learning. This hypothesis naturally raises a number of questions that, broadly speaking, remain open. What makes a good auxiliary task? Can we predict how an auxiliary task will affect an agent's representation? When should one auxiliary task be used instead of another? More generally, how should this hypothesis about the mechanism of auxiliary tasks itself be tested?
The complex interacting components of large-scale deep reinforcement learning agents make it difficult to extract general insights. In this chapter we aim to shed light on the answers to these questions by distilling the benefits of auxiliary tasks down to their effects on the dynamics of the learned representation. We begin by considering a \emph{learning dynamics} framework for studying the evolution of an agent's predictions and learned representation; see Figure~\ref{fig:two-state-example} for a toy illustration, with full details given in Section~\ref{sec:learning-dynamics}. The central idea behind this framework is that it is not just \emph{what} an agent learns that dictates how its representation is shaped, but also \emph{how} it learns.
This framework provides a model for representation learning in RL. Under this model, even in the case of model-free algorithms, agents can be shown to automatically incorporate the transition structure of the environment into their representations. We characterize the dynamics induced by a number of auxiliary tasks, with particular focus on ensemble predictions and random cumulant functions, and prove convergence of the induced representations to subspaces defined by certain decompositions of the environment's transition operator. These results will play a key role in our analysis in Chapter~\ref{chp:gen-rl}. We then consider the effectiveness of auxiliary tasks in sparse-reward environments, and via the use of the learning dynamics framework, construct a hypothesis as to which auxiliary tasks should be particularly well suited to such environments. We test this conjecture in the Arcade Learning Environment \citep{bellemare2013arcade}, demonstrating strong performance with random cumulant auxiliary tasks. These findings motivate our study of representation learning in sparse-reward environments in Chapter~\ref{chp:rep-learning}.
\section{Mathematical framework} \label{sec:learning-dynamics} This section will present a mathematical framework from which to study the learning dynamics of RL agents, with a particular focus on the dynamics of an agent's representation. A description of the reinforcement learning setting along with the basic notation used in this chapter can be found in Chapter~\ref{chp:background}. While we will formulate the representation in terms of the values attained by some feature output, our analysis will focus on the \textit{evolution} of these features over time. In taking this perspective, we lay the groundwork for our later discussion of \textit{capacity} in Chapter~\ref{chp:rep-learning}, where we frame a representation in terms of not just the network's feature outputs under its current parameters, but also the optimization dynamics that these parameters induce.
\subsection{Features and representations}\label{sec:reps}
In many environments, it is impractical to store a value function as a table indexed by states. Even in settings where this is practical, it may not be desirable, as function approximation is necessary for generalization to new observations that the agent may encounter during or after training. Instead, it is typical to parameterize $V \in \mathbb{R}^{\mathcal{X}}$ through a \emph{feature map} $\phi : \mathcal{X} \rightarrow \mathbb{R}^K$ and \emph{weight vector} $\mathbf{w} \in \mathbb{R}^K$, leading to a factorization of the form \begin{align*}
V(x) = \langle \phi(x), \mathbf{w} \rangle \, . \end{align*} Such a parameterization may be amenable to more efficient learning, for example if $\phi$ abstracts away unimportant information, allowing for generalization between similar states. Even more concisely, writing $\Phi \in \mathbb{R}^{\mathcal{X}\timesK}$ for the matrix with rows $\phi(x)$ yields \begin{align}\label{eq:q-phi-w}
V = \Phi \mathbf{w} \, . \end{align} The quantity $\Phi$ is often referred to as the agent's \emph{representation} of the environment \citep{boyan1999least, levine2017shallow, bertsekas2018feature,chung2018two,bellemare2019geometric,dabney2020value}. In many small- and medium-scale applications, the representation is fixed ahead of time, and only $\mathbf{w}$ is updated during learning; this is the linear function approximation regime. Many common choices of features relate to various decompositions of mathematical objects associated with the transition operator $P^\pi$. In deep reinforcement learning, however, $\Phi$ and $\mathbf{w}$ are learned simultaneously. In this chapter we will consider the representation to be the output of the penultimate layer of the network, meaning that the mapping from representation to value is linear. While it is possible to model $\Phi$ as the output of any network layer, the resulting dynamics will not necessarily be analytically tractable as the mapping from features to network outputs will be nonlinear.
\subsection{Subspace distances} \begin{wrapfigure}{l}{0.35\linewidth}
\centering
\includegraphics[width=0.95\linewidth]{figures/rl-dynamics/grassmann.png}
\caption{Visualizing the Grassmann distance between two 2-dimensional subspaces.}
\label{fig:grassmann} \end{wrapfigure} The key results of this chapter will depend on characterizing the convergence of the subspace spanned by a set of feature vectors. To do so, we need a notion of distance between subspaces of $\mathbb{R}^{\mathcal{X}}$. The following definition follows \citet{ye2016schubert}. Intuitively, it can be thought of as generalizing the notion of an angle between vectors to one between subspaces.
\begin{definition}\label{def:grassmann-distance}
For two $K$-dimensional subspaces $Y_1, Y_2 \leq \mathbb{R}^\mathcal{X}$, the \emph{principal angles} $\theta_1, \ldots, \theta_K \in [0, \pi/2]$ between the subspaces are defined by taking orthonormal matrices $\mathbf{Y}_1 \in \mathbb{R}^{\mathcal{X} \times K}$ and $\mathbf{Y}_2 \in \mathbb{R}^{\mathcal{X} \times K}$ the columns of which span $Y_1$ and $Y_2$ respectively, and defining $\theta_k = \cos^{-1}(\sigma_k(\mathbf{Y}_1^\top \mathbf{Y}_2))$, where $\sigma_k(\mathbf{A})$ is the $k$\textsuperscript{th} singular value of the matrix $\mathbf{A}$. One can check that this definition is independent of the matrices $\mathbf{Y}_1$ and $\mathbf{Y}_2$, depending only on the subspaces $Y_1,Y_2$ themselves. The \emph{Grassmann distance} $d(Y_1, Y_2)$ between $Y_1$ and $Y_2$ is then defined as $\| \theta \|_2 = (\sum_{k=1}^K \theta_k^2)^{1/2}$. \end{definition}
With these definitions in hand, we are now ready to give a precise version of the statement alluded to by the discussion and figure in Section~\ref{sec:rl-dynamics-introduction}.
\section{Value function \& representation dynamics} \label{sec:value-function} We begin our study of learning dynamics in RL with a discussion of the dynamics followed by a \textit{value function} when Bellman updates are applied. We make some simplifying assumptions to avoid focusing on technicalities here, and give a discussion of the more general case in Appendix~\ref{sec:more-general-value-function-results}.
\subsection{Value function dynamics} \label{sec:vf-dynamics} We consider a continuous-time analogue of one-step temporal difference (TD) learning dynamics. While in practice TD updates are performed discretely, the continuous-time regime lends itself more readily to analysis and has been leveraged frequently in the literature to provide theoretical guarantees on the corresponding discrete-time process \citep{borkar2000ode}. We begin with the following system: \begin{align*}
\partial_t V_t(x) = \mathbb{E}_\pi[R_0 + \gamma V_t(X_1)|X_0 = x]- V_t(x) \, , \end{align*} for each $x \in \mathcal{X}$, which may also be written \begin{align*}
\partial_t V_t(x) = R^\pi(x) + \gamma (P^\pi V_t)(x) - V_t(x) \, , \end{align*} or in full matrix notation, \begin{align}\label{eq:value-function-ode}
\partial_t V_t = -(I - \gamma P^\pi) V_t + R^\pi \, . \end{align} The differential equation in \eqref{eq:value-function-ode} is an affine autonomous system, and is straightforwardly solvable. \begin{restatable}{lemma}{lemODESoln}\label{lem:ode-soln} If $(V_t)_{t \geq 0}$ satisfies Equation~\eqref{eq:value-function-ode} with initial condition $V_0$ at time $t=0$, then we have \begin{align}\label{eq:value-function-ode-solution}
V_t = \exp( -t (I - \gamma P^\pi ) )(V_0 - V^\pi) + V^\pi \, . \end{align} \end{restatable} We recover as a straightforward corollary the well-known result that $V_t \rightarrow V^\pi$ as $t \rightarrow \infty$, since all eigenvalues of $(I - \gamma P^\pi)$ have strictly positive real part.
However, the solution in Equation~\eqref{eq:value-function-ode-solution} also describes the \emph{trajectory} by which $V_t$ reaches this limiting value. Figure~\ref{fig:two-state-example} provides an illustration of this in a small MDP; the value functions accumulate along a particular affine subspace of $\mathbb{R}^\mathcal{X}$ prior to convergence. This phenomenon can be formalized via the Grassmann distance. \begin{assumption}\label{assume:value-function-conditions}
$P^\pi$ is real-diagonalizable, with strictly decreasing eigenvalue sequence $1=\lambda_1 > \lambda_2 > \cdots > \lambda_{|\mathcal{X}|}$, and corresponding right-eigenvectors $U_1, \ldots, U_{|\mathcal{X}|}$. \end{assumption}
\begin{restatable}{proposition}{propOneValueFunction}\label{prop:one-value-function}
Under Assumption~\ref{assume:value-function-conditions}, and $(V_t)_{t \geq 0}$ the solution to Equation~\eqref{eq:value-function-ode}, for almost every\footnote{In the measure-theoretic sense that the set of excluded initial conditions $V_0$ has Lebesgue measure $0$.} initial condition $V_0$, we have
\begin{align*}
d(\langle V_t - V^\pi \rangle, \langle U_1 \rangle) \rightarrow 0 \, .
\end{align*} \end{restatable} \begin{proof}[Proof sketch]
The full proof of this statement and those that follow can be found in Appendix~\ref{sec:proofs-rl-dynamics}. We provide a sketch as follows: note that we can write the value function trajectory $V_t$ with respect to the eigen-basis $U_1, \dots, U_{|\mathcal{X}|}$, where $V_0 = \sum_{i=1}^{|\mathcal{X}|} \alpha_0^i U_i$, as $V_t = \sum_{i=1}^{|\mathcal{X}|} \alpha_0^i \exp(-\lambda_i t) U_i = \sum_{i=1}^{|\mathcal{X}|} \alpha_t^i U_i$. This then gives the following form of the error $V_t - V^\pi$
\begin{align}
V_t - V^\pi &= \exp(-t(I - \gamma P^\pi)) (V_0 - V^\pi) = \sum_{i=1}^{|\mathcal{X}|} \alpha_i \exp(t(\gamma \lambda_i - 1)) U_i \\
&= \alpha_1 \exp(t(\gamma - 1)) U_1 + o(\exp(t(\gamma \lambda_1 - 1))\label{eq:suff-grass}
\end{align} whenever $\lambda_2 < \lambda_1$. Via a lemma in Appendix~\ref{sec:aux-results}, we can show that this condition \eqref{eq:suff-grass} is sufficient to guarantee that $V_t - V^\pi$ converges to $U_1$ in Grassmann distance. \end{proof} We can observe this predicted behaviour in Figure~\ref{fig:two-state-example}, where the value function $V_t$ converges to the line $V^\pi + \alpha \mathbbm{1}$, i.e. the value function offset by a constant function under temporal difference learning dynamics. Note that $\mathbbm{1}$ is an eigenvector of any stochastic matrix, corresponding to eigenvalue $1$, the largest eigenvalue (i.e. $U_1$ for any ergodic $P^\pi$ will be the constant function). A more general version of this statement can also be given with an ensemble of $K$ value functions, which indicates that yet more information about the environment is contained in the learned collection. The proofs of these results relate to the {power method} in linear algebra.
\begin{restatable}{proposition}{propManyValueFunctions}\label{prop:many-value-functions}
Under Assumption~\ref{assume:value-function-conditions}, and $(V^{(k)}_t)_{t \geq 0}$ the solution to Equation~\eqref{eq:value-function-ode} for each $k=1,\ldots,K$, for almost every initial condition $(V_0^{(k)})_{k=1}^K$, we have
\begin{align*}
d(\langle V^{(k)}_t - V^\pi \mid k \in [K] \rangle, \langle U_{1:K} \rangle) \rightarrow 0 \, .
\end{align*} \end{restatable} The proof of this statement is analogous to that of Proposition~\ref{prop:one-value-function} and can be found in Appendix~\ref{sec:proofs-rl-dynamics}. It depends on an eigendecomposition of the transition operator, which allows us to characterize the convergence of the value function along each eigenspace. Because the value function converges more slowly along the principal eigenspaces of $P^\pi$, most of the mass of the resulting error term will reside in these spaces as training progresses. The intermediate values of $V_t$ are therefore biased towards the principal subspaces of $P^\pi$, and in this sense give us some information about the transition structure of the environment. \keyinsight{ Even in an environment with no reward signal at all (in which case $V^\pi = 0$), an agent performing TD learning still picks up information about the transition structure of the environment within its value function.}
Due to the importance of the vectors $U_{1:K}$ in this analysis, we introduce the term \emph{eigen-basis functions} (EBFs) to describe them. Intuitively, the eigen-basis functions $U_{1:K}$ provide a basis for the $K$-dimensional subspace to which $V_t$ converges; we illustrate two demonstrative eigen-basis functions of a random walk on a gridworld in Figure~\ref{fig:ensemble_feature0}. For any MDP $\mathcal{M}$ and any policy $\pi$, we will have that $U_1 = \mathbbm{1}$, the constant function. In general, the eigenvectors corresponding to large positive eigenvalues of $P^\pi$ will be `smooth' with respect to the transition dynamics, in the sense that they will not vary much in expected value after an application of $P^\pi$. This property will be discussed further in Chapter~\ref{chp:gen-rl}.
We observe that a similar analysis, indicating similar behaviour, is possible for related learning algorithms such as $n$-step temporal difference learning and TD($\lambda$); see Appendix~\ref{sec:beyond-one-step} for further details. In contrast, Monte Carlo learning dynamics correspond to the differential equation \begin{align*}
\partial_t V_t = (I - \gamma P^\pi)^{-1}R^\pi - V_t \, , \end{align*} which has the solution \begin{align*}
V_t = e^{-t}(V_0 - (I - \gamma P^\pi)^{-1} R^\pi) + (I - \gamma P^\pi)^{-1} R^\pi \, . \end{align*} The trajectory associated with this solution simply linearly interpolates between $V_0$ and $V^\pi$, as illustrated in Figure~\ref{fig:two-state-example}, and does not pick up any additional information about the environment in the value function as learning proceeds. See Appendix~\ref{sec:beyond-one-step} for further details. This example serves to illustrate that it is not just \emph{what} an agent learns ($V^\pi$), but \emph{how} the agent learns (the trajectory $V_t$) that plays a key, measurable role in what environment information is incorporated in its value function. The notion of information used here is characterized by the subspace in which the learned value function lies; this property naturally relates to linear function approximation in determining the set of functions which can be well-approximated using a given set of features, leading us to consider whether similar subspace convergence properties to those shown for value functions arise in an agent's representation.
\subsection{Representation dynamics} \label{sec:rep-dynamics} We now seek to apply similar dynamics analysis to our model of representation learning to better understand the learning dynamics followed by an agent's representation. Recall the parameterization of $V \in \mathbb{R}^{\mathcal{X}}$ from Section~\ref{sec:reps}, taking the form \begin{align*}
V = \Phi \mathbf{w} \, , \end{align*} for $\Phi \in \mathbb{R}^{\mathcal{X} \times K}$, $\mathbf{w} \in \mathbb{R}^{K}$. Central to deep reinforcement learning is the idea that $\Phi$ and $\mathbf{w}$ are simultaneously learned from a single RL loss. As in the value function case, we will focus on the dynamics with single-step temporal difference learning; remarks on other learning algorithms are given in Appendix~\ref{sec:beyond-one-step}. The dynamics associated with single-step TD learning are given by \begin{align}
\partial_t \Phi_t & = -\alpha \frac{1}{2}\nabla_{\Phi_t} \| R^\pi + \square[\gamma P^\pi \Phi_t \mathbf{w}_t] - \Phi_t \mathbf{w}_t \|^2_2 \, , \label{eq:phi-ode} \\
\partial_t \mathbf{w}_t & = -\beta\frac{1}{2}\nabla_{\mathbf{w}_t} \| R^\pi + \square[\gamma P^\pi \Phi_t \mathbf{w}_t] - \Phi_t \mathbf{w}_t \|^2_2 \label{eq:w-ode} \, , \end{align} where $\alpha, \beta \in [0, \infty)$ are learning rates, implying that features and weights may be learned at different rates. Further, $\square[\, \cdot\, ]$ denotes a \emph{stop-gradient} on its argument, indicating that we treat the instances of $\Phi_t$ and $\mathbf{w}_t$ within the expression as constants when computing derivatives; this reflects the fact that temporal difference learning is a \emph{semi-gradient} method \citep{sutton2018reinforcement}.
The use of a single loss to learn both the representation and weights corresponds to the approach taken in deep RL, and we will use these dynamics as an idealized model of the dynamics of deep neural networks. While this model ignores some practicalities of deep RL (such as visitation distributions, implicit bias from the function approximation architecture, and stochasticity introduced by minibatch training), it allows us to obtain valuable insights into representation dynamics which, as we will see in Section \ref{sec:experiments} and in the following chapters, accurately predict the behaviour of deep RL agents.
\begin{restatable}{lemma}{lemCoupledDynamics} Let $\Phi_t$ and $\mathbf{w}_t$ parameterize a value function approximator as defined above. Then \begin{align}
\partial_t \Phi_t & = \alpha (R^\pi + \gamma P^\pi \Phi_t \mathbf{w}_t - \Phi_t \mathbf{w}_t) \mathbf{w}_t^\top \, , \label{eq:phi-flow} \\
\partial_t \mathbf{w}_t & = \beta \Phi_t^\top(R^\pi + \gamma P^\pi \Phi_t \mathbf{w}_t - \Phi_t \mathbf{w}_t) \label{eq:w-flow} \, . \end{align} \end{restatable}
This joint flow on $\Phi_t$ and $\mathbf{w}_t$ leads to much richer behaviour than the flow considered on value functions in the previous section. Without further assumptions, the evolution of the representation $\Phi_t$ may be complex, and will not necessarily incorporate environment information as described for the case of value functions in Proposition~\ref{prop:many-value-functions}. In particular, in sparse-reward environments, the agent may learn to predict a near-zero value function by setting the weights $\mathbf{w}_t$ close to zero, which would effectively prevent any further updating of the features $\Phi_t$, ruling out the possibility of a result analogous to Proposition~\ref{prop:many-value-functions}. We will only be able to obtain the upcoming result of Theorem~\ref{thm:infinite-heads} by restricting the rate at which the weights evolve over the course of training.
\section{Auxiliary tasks}\label{sec:aux-dynamics} Having studied the temporal difference learning dynamics in Equations~\eqref{eq:phi-flow} \& \eqref{eq:w-flow}, we now examine how auxiliary value-prediction tasks influence the behaviour of the agent's representation during the learning process. As described previously, developing a granular description of the joint learning dynamics of the representation and weights of the learner is a complex task, and so we focus on the limiting case in which the number of auxiliary tasks is large relative to the dimensionality of the representation. We conclude that under certain conditions, representations learned in the many-task limit bear a close connection to the \emph{eigen-basis functions} described in Section~\ref{sec:value-function}, and also to the \emph{resolvent singular basis functions}, a new decomposition introduced in Section~\ref{sec:random-cumulants}. The reader may find it useful to refer to Appendix~\ref{sec:feature-selection} for a more detailed discussion of these decompositions.
\begin{table*} \smaller
\centering
\begin{tabular}{c|c|c|c|c}
\toprule
Auxiliary task & Dynamics ($r=0$) & $\Phi_\infty$ ($r=0$) & $\Phi_\infty$ ($r\neq 0$) & $\lim_{t \rightarrow \infty} \langle \Phi_t - \Phi_\infty \rangle$\\
\midrule
Ensemble & $-(I - \gamma P^\pi) \Phi_t$ & $0$ & $(I - \gamma P^\pi)^{-1} r\epsilon^\top$ & EBFs of $P^\pi$\\
Random cumulants & $-(I - \gamma P^\pi) \Phi_t + Z_{\Sigma}$ & $\Psi Z_{\Sigma}$ & $\Psi Z_{\Sigma}$ & EBFs of $P^\pi$\\
Additional policies & $ -(I - \gamma P^{\overline{\pi}}) \Phi_t$ & $0$ & $ (I - \gamma P^{\overline{\pi}})^{-1} R^{\overline{\pi}} \epsilon^\top$ & EBFs of $P^{\bar{\pi}}$\\
Multiple $\gamma$s & $-(I - \overline{\gamma} P^\pi)\Phi_t$ & $0$ & $(I - \overline{\gamma} P^{\pi})^{-1}R^{\pi} \epsilon^\top$ & EBFs of $P^{\pi}$\\
\bottomrule
\end{tabular}
\caption[Summary of dynamics and limiting solutions under some common auxiliary tasks in the limit of infinitely-many prediction outputs.]{Summary of dynamics and limiting solutions under some common auxiliary tasks in the limit of infinitely-many prediction outputs. For additional policies, $\overline{\pi}$ denotes the average of the finite set of policies $\pi_1,\ldots,\pi_L$ under consideration ($L$ fixed and independent of $M$), and for multiple discount factors, $\overline{\gamma}$ denotes the average of the discount factors $\gamma_1,\ldots,\gamma_L$ under consideration. We let $\Psi = (I - \gamma P^\pi)^{-1}.$ See Section~\ref{sec:random-cumulants} and Appendix~\ref{appx:feature_theory} for additional details.
}
\label{tab:theory} \end{table*} \subsection{Ensemble value prediction}
We begin by considering the auxiliary task of \emph{ensemble value prediction} \citep{osband2016deep, anschel2017averaged, agarwal2019striving}. Rather than making a single prediction of the value function $Q^\pi$, the learner makes $M \in \mathbb{N}$ separate predictions as linear functions of a common representation $\Phi^{M}$, using $M$ independently initialized weights matrices $\mathbf{w}^{m} \in \mathbb{R}^{K}$ ($m=1,\ldots,M$). We note that while at initialization $\Phi^M_0 \in \mathbb{R}^{\mathcal{X} \times d}$ is independent of $M$, its dynamics do depend on $M$ through the contribution of the weights. Simultaneous temporal difference learning on all predictions leads to the following dynamics: \begin{align}
\partial_t \Phi^{M}_t \label{eq:ensemble-phi-flow}
\!= & \alpha\! \sum_{m=1}^M (R^\pi\! +\! \gamma P^\pi \Phi^{M}_t \mathbf{w}_t^{m}\! -\! \Phi^{M}_t \mathbf{w}_t^{m}) (\mathbf{w}_t^{m} )^\top \, , \\
\partial_t \mathbf{w}_t^{m} = & \beta (\Phi^{M}_t)^\top (R^\pi + \gamma P^\pi \Phi^M_t \mathbf{w}^{m}_t - \Phi_t \mathbf{w}^{m}_t ) \, . \label{eq:ensemble-w-flow} \end{align}
The following result characterizes the representation learned by the agent in the many-tasks limit, again establishing a connection to EBFs; we follow the approach described by \citet{arora2019fine} in fixing the linear weights associated with the value function; this dramatically simplifies our analysis, while still describing practical settings in which the features and weights are trained separately as in \citet{chung2018two}. Analogous results follow when the \textit{learning rate} $\beta$ is scaled appropriately, resembling the results of \citet{jacot2018neural} and \citet{yang2021tensor}.
\begin{restatable}{theorem}{thmInfiniteHeads}\label{thm:infinite-heads}
For $M \in \mathbb{N}$, let $(\Phi^{M}_t)_{t \geq 0}$ be the solution to Equation~\eqref{eq:ensemble-phi-flow}, with each $\mathbf{w}^{m}_t$ for $m=1,\ldots,M$ initialized independently from $N(0, \sigma_M^2)$, and fixed throughout training ($\beta=0$). We consider two settings: first, where the learning rate $\alpha$ is scaled as $\frac{1}{M}$ and $\sigma_M^2 = 1$ for all $M$, and second where $\sigma_M^2 = \frac{1}{M}$ and the learning rate $\alpha$ is equal to $1$. These two settings yield the following dynamics, respectively: \begin{align}
\lim_{M \rightarrow \infty} \partial_t \Phi_t^{M} \overset{P}{=}& -(I - \gamma P^\pi )\Phi_t^{M}\quad \text{, and } \\
\lim_{M \rightarrow \infty} \partial_t \Phi_t^{M} \overset{D}{=}& -(I - \gamma P^\pi )\Phi_t^{M} + R^\pi \epsilon^\top \; \text{, $\epsilon \sim \mathcal{N}(0, I)\,$.} \end{align} The corresponding limiting trajectories for a fixed initialization $\Phi_0 \in \mathbb{R}^{\mathcal{X}\times K}$ are therefore given respectively by \begin{align}
\lim_{M \rightarrow \infty} \Phi_t^{M} \overset{P}{=}& \exp(-t(I - \gamma P^\pi))\Phi_0 \quad \text{, and } \\
\lim_{M \rightarrow \infty} \Phi_t^{M} \overset{D}{=}& \exp(-t(I - \gamma P^\pi))(\Phi_0 - (I - \gamma P^\pi)^{-1} R^\pi \varepsilon^\top ) \nonumber \\
& \qquad \ + (I - \gamma P^\pi)^{-1}R^\pi \varepsilon^\top \, ,\, \epsilon \sim \mathcal{N}(0, I)\,. \end{align} \end{restatable} \begin{proof}[Proof sketch] The key step in the proof of this result is to show that the limiting dynamics of $\phi^M_t$ depend on the matrix of outer products $\sum_{m=1}^M \mathbf{w}_m \mathbf{w}_m^\top$, which under the conditions provided in the theorem statement can be shown to converge to the identity matrix. In the case of scaled initializations, we obtain the following, though the derivation in the case of scaled learning rates is similar. \begin{align}
\partial_t \Phi^M_t &= (I - \gamma P^\pi)\Phi_t^M \sum_{m=1}^M \mathbf{w}^m (\mathbf{w}^m)^\top + \sum_{m=1}^M R^{\pi} (\mathbf{w}^m)^\top \\
\lim_{M \rightarrow \infty} \partial_t \Phi^M_t &= (I - \gamma P^\pi)\Phi_t^M \lim_{M \rightarrow \infty}\sum_{m=1}^M \mathbf{w}^m (\mathbf{w}^m)^\top + \lim_{M \rightarrow \infty} R^\pi(\sum_{m=1}^M \mathbf{w}^m)^\top \\
&\overset{D}{=} (I - \gamma P^\pi )\Phi_t^M I + R^\pi \epsilon^\top, \; \epsilon \sim \mathcal{N}(0, I). \end{align} In the case of zero rewards, we then get analogous dynamics on $\Phi_t^M$ as we did on the value functions in Proposition~\ref{prop:many-value-functions}. \end{proof}
In contrast to the case described in Section~\ref{sec:rep-dynamics}, this result indicates that the introduction of auxiliary tasks leads to useful environment information being incorporated into the representation. Indeed, the dynamics described above imply the following convergence result, analogous to Proposition~\ref{prop:many-value-functions}.
\begin{restatable}{corollary}{propSubspaceConvergence}\label{prop:subspaceconvergence} Under the feature flow \eqref{eq:ensemble-phi-flow} with $\mathbf{w}^m_t$ fixed at initialization for each $i = 1, \dots, M$, under either the scaled initialization or scaled learning rate assumption of Theorem~\ref{thm:infinite-heads}, and under Assumption~\ref{assume:value-function-conditions}, for almost all initializations $\Phi_0$, we have when $R^\pi = 0$
\begin{align*}
d(\langle \Phi_t \rangle, \langle U_{1:K} \rangle) \rightarrow 0 \, ,\quad\text{as } t \rightarrow \infty.
\end{align*} \end{restatable} The proof of this result follows straightforwardly from Theorem~\ref{thm:infinite-heads} and the proof of the convergence of value functions to the subspace spanned by $U_1, \dots, U_k$, and can be found in Appendix~\ref{sec:proofs-rl-dynamics}. \keyinsight{Under the conditions of Theorem~\ref{thm:infinite-heads} and Corollary~\ref{prop:subspaceconvergence}, the ensemble auxiliary tasks cause the agent's representation $\Phi$ to align with EBFs.}
We show in Appendix \ref{sec:ensemble-dynamics} that this behaviour is observed in practice when $M \gg K$ and the value of $\mathbf{w}^m_t$ is fixed at initialization for all $m$. We additionally compare the representations learned when $\mathbf{w}^m_t$ is allowed to vary over training. Here we find empirically that allowing the weights to vary during training induces dynamics that differ from those predicted by Theorem~\ref{thm:infinite-heads} for the fixed-weights setting. To illustrate this, we follow the evolution of a single column of $\Phi_t$, i.e. a single feature vector $\phi_t = \Phi_t[ \; : \,, 1 \; ]$, trained with the ensemble prediction dynamics of Equations~\eqref{eq:ensemble-phi-flow} \& \eqref{eq:ensemble-w-flow} on a simple four-rooms gridworld environment in Figure~\ref{fig:feature-viz}.
\begin{figure}\label{fig:feature-viz}
\end{figure}
We visualize $\phi_t$ along with two illustrative eigenvectors of the transition matrix $P^\pi$, corresponding to one positive and one negative eigenvalue. We observe that while the feature $\phi_t$ quickly evolves to resemble the smooth eigenvector corresponding to the positive eigenvalue for small values of $t$, it later converges to the non-smooth eigenvector corresponding to the most negative eigenvalue of the transition matrix $P^\pi$. While we leave further analysis to future work, this example hints at an intriguing relationship between the EBFs and the joint representation dynamics.
\subsection{Random cumulants}\label{sec:random-cumulants}
In the case of zero rewards, our previous results show that while from the perspective of subspaces the representation approaches the EBF subspace in Grassmann distance, in Euclidean distance the representation is approaching the zero matrix pointwise. This has important implications for the scenario of large-scale sparse-reward environments, in which the agent may not encounter rewards for long periods of time, and indicates that the agent's representation is at risk of collapsing in such cases.
Motivated by this analysis, we consider a means of alleviating this representation collapse, by learning value functions for \emph{randomly generated cumulants} \citep{osband2018randomized,dabney2020value}. Mathematically, the agent again makes many predictions from a common representation, with each prediction indexed by $m=1,\ldots,M$ attempting to learn the value function associated with a randomly drawn reward function $r^m \in \mathbb{R}^{\mathcal{X}}$ under the policy $\pi$. Thus, the agent's parameters are the representation $\Phi$ and a set of weights $\mathbf{w}^m$ for each prediction. The learning dynamics are then given by: \begin{align}
\partial_t \Phi^{M}_t \label{eq:rc-phi-flow}
\!= & \alpha \sum_{m=1}^M (r^m\!\! +\!\! \gamma P^\pi \Phi^{M}_t \mathbf{w}_t^{m}\!\! -\!\! \Phi^{M}_t \mathbf{w}_t^{m}) (\mathbf{w}_t^{m} )^\top , \\
\partial_t \mathbf{w}_t^{m} = & \beta (\Phi^{M}_t)^\top (r^m + \gamma P^\pi \Phi^M_t \mathbf{w}^{m}_t - \Phi_t \mathbf{w}^{m}_t ) \, . \label{eq:rc-w-flow} \end{align}
The main result of this section is to show that, even in the absence of reward, the limiting distribution induced by random cumulant auxiliary tasks dynamics described in Equation~\eqref{eq:rc-phi-flow} is a non-zero subspace which, while dependent on the random reward function, is biased towards principal components of the transition matrix.
\begin{restatable}{theorem}{ThmDistribution}\label{thm:distribution} For fixed $M \in \mathbb{N}$, let the random rewards $(r^m)_{m=1}^M$ and weights $(\mathbf{w}^m)_{m=1}^M$ be as defined above, let $\alpha=1$, and consider the representation dynamics in Equation~\eqref{eq:rc-phi-flow}, with weights fixed throughout training ($\beta=0$). Let $\Sigma$ denote the covariance matrix of the random cumulant distribution. Then \begin{align}
&\lim_{M \rightarrow \infty} \sum_{m=1}^M r^m (\mathbf{w}^m)^\top \overset{D}{=} Z_\Sigma \sim \mathcal{N}(0, \Sigma), \text{ and } \nonumber \\
& \lim_{M \rightarrow \infty} \Phi^M_t \overset{D}{=} \exp(-t(I - \gamma P^\pi ))(\Phi_0 - (I - \gamma P^\pi)^{-1} Z_\Sigma) \nonumber \\
& \qquad\qquad+ (I - \gamma P^\pi)^{-1} Z_\Sigma \nonumber \;. \end{align}
As the columns of $Z_\Sigma$ are mean-zero, uncorrelated, with covariance matrix $\Sigma$, the limiting distribution of each column of $\Phi_\infty = \lim_{t \rightarrow \infty} \lim_{M \rightarrow \infty} \Phi^M_t$ has covariance $\Psi\Sigma \Psi^\top$, where $\Psi$ is the resolvent $(I - \gamma P^\pi)^{-1}$. \end{restatable}
The proof of this result is obtained via a similar argument as in the case of Theorem~\ref{thm:infinite-heads}, though care must be taken to account for the non-zero and non-uniform rewards, and can be found in Appendix~\ref{sec:proofs-rl-dynamics}. Importantly, while $Z_\Sigma$ may have zero \textit{expected value} over its initialization distribution, with probability one it will not be zero. As a result, we obtain convergence to a limiting representation $\Phi_\infty$ whose value depends on the randomly initialized cumulants and the transition structure of the environment. This limiting value will thus be biased towards the lower-index eigen-basis functions, but will also have non-zero dot product with higher-index EBFs, in contrast to our results in the zero-reward setting. However, while this limiting value might result in non-zero mass assigned to lower EBFs, the \textit{error term} of $\Phi_\infty - \Phi^M_t$ will inherit similar convergence properties as in the ensemble prediction setting, becoming dominated by the EBFs over time.
\begin{restatable}{corollary}{propSubspaceConvergenceRC}\label{prop:subspaceconvergence-rc} Under the feature flow \eqref{eq:rc-phi-flow} with $\mathbf{w}^m_t$ fixed at initialization for each $i = 1, \dots, M$ and Assumption~\ref{assume:value-function-conditions}, for almost all initializations $\Phi_0$, we have when $R^\pi = 0$
\begin{align*}
d(\lim_{M \rightarrow \infty} \langle \Phi^M_t - \Phi_\infty \rangle, \langle U_{1:K} \rangle) \rightarrow 0 \, ,\quad\text{as } t \rightarrow \infty.
\end{align*} \end{restatable}
Theorem~\ref{thm:distribution} indicates that the left-singular vectors of $\Sigma^{1/2}\Psi$ (or equivalently, the right-eigenvectors of $\Psi \Sigma \Psi^\top$) are key to understanding the effects of random cumulants on representations; we introduce the term \emph{resolvent singular basis functions} (RSBFs) to refer to these vectors in the canonical case $\Sigma = I$.
\keyinsight{ With random cumulant auxiliary tasks, under the assumptions of Theorem~\ref{thm:distribution} and Corollary~\ref{prop:subspaceconvergence-rc}, the distribution of the limiting representation does not collapse, and is characterized by the RSBFs of $P^\pi$, while the trajectory it follows to reach this subspace is determined by the EBFs of $P^\pi$. }
These decompositions of $P^\pi$ bear deep connections to prior work on feature learning. EBFs correspond to the eigendecomposition of the successor representation, which can be explicitly related to the proto-value functions described by \citet{mahadevan2009learning} when the transition matrix $P^\pi$ corresponds to that of a random walk policy \citep{machado2017eigenoption}. For symmetric $P^\pi$ we obtain an additional correspondence between EBFs and RSBFs, though we note that when $P^\pi$ is not symmetric the RSBFs may differ from both the EBFs and the singular value decomposition of the transition matrix $P^\pi$. We provide further discussion of RSBFs and comparisons against existing concepts in feature selection in Appendix~\ref{sec:feature-selection}.
The subspace given by the RSBFs has a further appealing connection to General Value Functions (GVFs). We provide additional details in Appendix~\ref{sec:bayes-opt}, where we show that RSBFs can be viewed as Bayes-optimal features in the sense that they minimize the expected value function approximation error given an isotropic Gaussian prior on an unknown reward function.
\subsection{Analysis of additional auxiliary tasks}
The infinite-task limit simplifies the analysis of a broad range of auxiliary tasks, and analogous results to Theorem \ref{thm:infinite-heads} can be easily derived for other tasks that involve value prediction for some discount factor and policy. We provide a summary of these results in Table \ref{tab:theory}, including their full statements and derivations in Appendix~\ref{apx:table-results}. We consider two additional classes of auxiliary task: predicting the values of multiple policies \citep{dabney2020value}, and predicting the value function of the current policy under multiple discount factors \citep{fedus2019hyperbolic}.
Under the \textbf{multiple policies} auxiliary task, the agent's objective is to learn a set of value functions $V^1, \dots, V^M$ such that $V^i(x) = \mathbb{E}_{\pi_i}[R^{\pi_i}(x) + \gamma P^{\pi_i}V^i(x)]$. We consider an ensemble prediction variant of this objective, where given a fixed set of $k$ policies, we train an ensemble of $M$ predictors $V^{1,1}, \dots, V^{m, 1}, \dots, V^{1,k}, \dots, V^{m, k}$, where $m=\frac{M}{k}$ and the value function $V^{i, j}$ is trained on policy $\pi_j$.
Under the \textbf{multiple discount factors} auxiliary task, the agent's objective is analogously to find $V^i(x) = \mathbb{E}_{\pi_i}[R^{\pi}(x) + \gamma_i P^{\pi}V^i(x)]$ for $\gamma_i \in \gamma_1, \dots, \gamma_k$. As with the multiple policies auxiliary task, we assign to each discount factor objective multiple prediction heads $V^{1,1}, \dots, V^{m, 1}, \dots, V^{1,k}, \dots, V^{m, k}$, where $m=\frac{M}{k}$ and the value function $V^{i, j}$ is trained on discount factor $\gamma_j$.
In both cases, under the conditions of the previous theorems, the dynamics of the ensemble converge to the dynamics induced by the mean of the set of auxiliary tasks, implying the counter-intuitive result that training with multiple auxiliary tasks does not provide additional utility over the single task setting. This apparent shortcoming stems from the large number of auxiliary prediction objectives all operating on the same `feature subspace' of $\mathbb{R}^d$, essentially forcing any dimension of that subspace to fit the mean target value. It can be modified to more closely resemble practical settings by ensuring that the weights corresponding to each auxiliary task $\pi_i$ or $\gamma_i$ are initialized in \textit{orthogonal subspaces}, so that the vector space $V$ in which the representation evolves can be decomposed as the direct sum of the subspaces $V_i$ associated with each task $i$, $V = \oplus_{i\in[1, k]} V_i$. In this case, we obtain an analogous decomposition of the representation $\Phi$ and its corresponding dynamics, obtaining convergence to a direct sum of the limiting representation of each task. This suggests that the benefits of auxiliary tasks might be maximized by appropriate initialization schemes which encourage the representations learned for each task to be (linearly) independent.
\subsection{Dynamics in the infinite-width limit}
So far, the analysis of this chapter has studied an idealized feature-learning model where the features are represented by an extremely large matrix, and for which an update to the feature vector of one state will not influence the features of other states. This simplifies our analysis, but diverges from the function approximation regimes seen in practice, where a neural network is often used to construct the feature representation. We take a step towards the deep RL setting by considering a limiting case of DNN function approximation.
Recent results on neural networks in the limit of infinite width reveal that their learning dynamics are determined by a kernel, denoted the \emph{neural tangent kernel} (NTK) \citep{jacot2018neural}, described in more detail in Section~\ref{sec:bkgd-trajectory}. This allows us to apply the analysis of the previous sections to the infinite-width limit of neural networks, for which we obtain the following dynamics. \begin{equation}
\partial_t V_t = \Phi_{\Theta^{(L)}_\infty} (\gamma P^\pi - I)V_t \; . \end{equation} We now see that the kernel matrix $\Phi_{\Theta^{(L)}}$ influences the trajectory of the value function, rather than solely $P^\pi$ as previously. We can now directly quantify how the auxiliary losses influence the trajectory of the value function in the infinite-width regime, as we describe in the following theorem. \begin{theorem}\label{thm:ntk-dynamics} Let $f = (f^1, \dots, f^M): \mathcal{X} \rightarrow \mathbb{R}^M$ be computed by an $L$-layer neural network with layer widths $n_1, \dots, n_L=M$, parameters $\theta$, and Lipschitz nonlinearity $\sigma$. Let $f^1_t$ follow the Bellman-error minimizing flow of Eq \eqref{eq:value-function-ode}, and let the joint loss $\delta$ be of the form \begin{equation*}\delta(f) = \delta_1(f^1) + \delta_2(f^2, \dots, f^k)\;. \end{equation*} Under the conditions of Theorem 2 of \citet{jacot2018neural}, with the limiting NTK matrix $\Phi_{\Theta^{(L)}_\infty}$ defined therein, the dynamics $\partial_t f^1_{t}$ are independent of the values of $f^{2}_{t}, \dots, f^{k}_{t}$. Thus, for any set of auxiliary tasks $\delta_2$, number of heads $k > 1$, and auxiliary head values $f^2_t, \dots, f^m_t$, \begin{equation}
\lim_{n_1, \dots n_{L-1}\rightarrow \infty} \partial_t f^1_{t} = \Phi_{\Theta^{(L)}_\infty} \nabla_{f^1} \delta_1 (f_t^1) \;. \end{equation} \end{theorem}
This result highlights the importance of the shared feature dynamics between the network outputs in order for auxiliary tasks to influence value function learning. The initialization and dynamics of the NTK regime result in each head of the network evolving independently; essentially, each of the auxiliary tasks is operating on a mutually-orthogonal subset of the feature space, and as a result incorporating auxiliary tasks will not accelerate learning of the value function. This is in contrast to most DNN architectures used in deep RL, which tend to be relatively small and so exhibit significant interference (both positive and negative) between tasks. Two notable exceptions to this observation are categorical distributional reinforcement learning, where the softmax operator induces dependence between the different heads' loss functions, and random ensemble mixture (REM) \citep{agarwal2019striving}, where a random mixture is taken over an ensemble of value prediction heads before applying the TD loss. The analysis of the effect of this family of auxiliary tasks on representation dynamics in wide neural networks requires additional mathematical tools to account for their nonlinear output layer, however, and falls outside the scope of this chapter. Other parameterizations such as those studied by \citet{yang2021tensor} may further yield more interesting feature-learning dynamics even for the classes of auxiliary tasks studied in this chapter.
\section{Experiments}\label{sec:experiments}
In this section, we complement the previous theoretical results with empirical investigations in both tabular and deep reinforcement learning settings. These empirical results will motivate the directions taken in Chapter~\ref{chp:rep-learning}, where we will further explore both the sparse- and dense-reward setting.
\subsection{Feature generalization across the value-improvement path}\label{sec:feature-generalization}
Having established connections between the representations induced by auxiliary tasks and several decompositions of the environment transition operator, we now turn to the question of how useful these representations are to a reinforcement learning agent. In particular, we address how well representations learned under one policy \textit{generalize} under the policy improvement step to approximate future value functions, with particular attention paid to EBFs and RSBFs, the decompositions that feature in our earlier analysis.
\begin{figure}
\caption{Transfer of EBFs, RSBFs, and RFs across the value-improvement path of a chain MDP, with and without the value function as an additional feature.
}
\label{fig:chain-transfer}
\end{figure}
\begin{figure*}\label{fig:deep-rl}
\end{figure*} \begin{figure}\label{fig:naux}
\end{figure}
To address this question empirically we run tabular policy iteration on a stochastic chain MDP, yielding a sequence of policies $(\pi_j)_{j=1}^J$ and associated value functions $(V_j)_{j=1}^J$. We then compute EBFs and RSBFs associated with $P^\pi$, and compute the acute angle between $V_j$ and the subspace spanned by these features, for each $j \in [J]$; this is in fact equal to a generalization of the Grassmann distance for subspaces of unequal dimension \citep{ye2016schubert}. We also compare against a baseline of isotropic randomly-generated features. Full experimental details are provided in Appendix~\ref{sec:experiment-details}.
Results are given in the top row of Figure~\ref{fig:chain-transfer} for the case of four features; each individual heatmap plots Grassmann distances, with rows indexing the policy that generated the features, and columns indexing the policy yielding the target value function. In general, the RSBFs provide better transfer across policies in the improvement path relative to random features and EBFs. For times $j, j' \in [J]$, we observe that the Grassmann distance between the RSBFs of $P^{\pi_j}$ and the value function of $j'$, $V^{\pi_{j'}}$, increases as $|j - j'|$ does.
We also evaluate transfer when the vector $V^{\pi_j}$ is added to the set of features, in the bottom row of Figure~\ref{fig:chain-transfer}. This contains the subspace to which the value functions described in Proposition~\ref{prop:many-value-functions} converge, as the limiting solutions can be described as being of the form $V^{\pi_j} + u$ for $u \in \langle U_{1:K} \rangle$. This results in a significant improvement to the ability of the EBFs to fit future value functions, suggesting that representation learning objectives which \textit{only} seek to incorporate information about the transition dynamics, without including any reward information, may be sub-optimal for representing value functions of interest. Surprisingly, we find that this addition results in the EBFs for $\pi_j$ outperforming RSBFs specifically in predicting $V^{\pi_{j+1}}$. This can be observed in the upper off-diagonal the EBF plot in Figure~\ref{fig:chain-transfer}. We conclude that the dynamics induced by TD updates may be particularly beneficial to transfer between policies in the value-improvement path, and further study of this phenomenon is a promising avenue for future work.
\subsection{Auxiliary tasks for large-scale environments with sparse rewards}\label{sec:deep-rl-aux}
We now turn our attention to deep reinforcement learning, particularly in the context of environments with sparse reward structure. Motivated by the theoretical results obtained in earlier sections, we study the effects of a variety of auxiliary tasks in this setting; our analysis indicates that random cumulants may be particularly effective in preventing representation collapse in such environments. Concretely, this section will seek to evaluate whether auxiliary tasks that incorporate non-zero reward signals can improve performance in sparse-reward tasks; Chapter~\ref{chp:rep-learning} will then investigate the mechanisms behind this in more depth. \hypothesis{Representation collapse}{incorporating non-zero reward in auxiliary tasks will improve the learned representation, and thereby improve performance, by preventing representation collapse in deep RL agents.\label{hyp:collapse}}
To evaluate Hypothesis~\ref{hyp:collapse}, we empirically evaluate the performance of a variety of agents on a representative subset of Atari environments. We modify a Double DQN agent \citep{van2016deep} with a variety of auxiliary tasks, including random cumulants (RC) \citep{dabney2020value}, random ensemble mixtures (REM) \citep{agarwal2019striving}, an ensembling approach \citep{anschel2017averaged}, and also compare with QR-DQN, a distributional agent \citep{dabney2018distributional}. Full details of these agents, including specific implementation details for deep RL versions of these auxiliary tasks, are given in Appendix~\ref{sec:experiment-details}.
We evaluate these agents on a series of Atari games from the Arcade Learning Environment \citep{bellemare2013arcade,machado2018revisiting}, comprising Montezuma's Revenge, Pong, MsPacman, Seaquest, and Q$^*$bert. In addition, we evaluate on a more challenging, sparse reward, version of Pong in which the agent does not observe negative rewards.\footnote{We attempted a similar modification of the other three dense reward games, but found no agent or configuration that was able to successfully learn on them. Full details, along with hyperparameters and results on these unsuccessful modifications, are given in Appendix~\ref{sec:experiment-details}.}
Figure~\ref{fig:deep-rl} shows the main results from these experiments. Recall from Section~\ref{sec:aux-dynamics} that the random cumulant auxiliary task causes the agent's representation to converge to the RSBFs of $P^\pi$ in idealized settings. We hypothesize that this auxiliary task will therefore improve agent performance over ensemble-based auxiliary tasks in sparse-reward environments. Our empirical results support our conjecture, with the random cumulant agent (DDQN+RC) generally performing well in the sparse-reward environments. Of particular note is the strong performance in Montezuma's Revenge. We expected reduced performance for DDQN+RC in the dense-reward games, but were surprised to observe improved performance here as well, with the exception of Seaquest. Finally, Figure~\ref{fig:naux} shows the result of a hyperparameter sweep over the number of auxiliary task heads, revealing relevant differences in the three methods considered. Overall, we find that random cumulants are a promising auxiliary task specifically in sparse-reward environments. In the following chapter, we will investigate an approach which adapts the random cumulant task to avoid interference in dense-reward tasks like Seaquest while preserving its ability to prevent representation collapse in sparse-reward environments.
\section{Conclusions}
This chapter has introduced a framework based on learning dynamics to analyze representations in reinforcement learning. This led to a variety of theoretical results concerning learning with and without the presence of auxiliary tasks, as well as several straightforward models for studying representation learning empirically. In particular, we neatly characterized the \textit{trajectory} taken by value functions and, in some cases, representations under TD learning dynamics. With this, we were able to thoroughly test a new hypothesis on the effectiveness of particular auxiliary tasks in sparse-reward environments, which led to improved understanding of representation learning in RL, as well as practical modifications to deep RL algorithms.
There are many natural follow-up directions to this work, some of which we will explore in the following chapters. One direction is to further develop the theory associated with the learning dynamics perspective, in order to (i) understand how additional types of auxiliary tasks, in particular auxiliary tasks that do not correspond to value functions, affect the representations in the learning models developed in this chapter, (ii) extend the learning models themselves to incorporate further aspects of large-scale learning scenarios, such as sample-based learning and state-visitation distribution corrections, and (iii) investigate other common learning dynamics, such as gradient TD methods \citep{sutton2008convergent} or policy gradient updates. In some cases, as with non-uniform state distributions, such extensions are straightforward. However, other less trivial extensions are crucial to improve the applicability of this model to practical settings. The current formulation of the learning dynamics model does not capture several important factors of practical deep RL training paradigms, leaving a gap between the exact, continuous-time representation updates of Theorem~\ref{thm:infinite-heads} and those of finite-width deep neural networks whose architectures endow the representation dynamics with particular inductive biases.
The following chapters will bridge this gap to extract useful methodological implications of this framework. First, Chapter~\ref{chp:rep-learning} will capitalize on our observation that sparse-reward environments encourage feature collapse to study the mechanisms by which networks progress along the value improvement path. It will propose an auxiliary task which avoids the pitfalls of the RCDQN objective, which can hinder progress in some dense-reward environments, and to develop a more fine-grained view on the impact of non-stationarity on the plasticity of the learned representation. Next, Chapter~\ref{chp:gen-rl} will study the implications of Theorem~\ref{thm:infinite-heads} and Proposition~\ref{prop:many-value-functions} as they relate to generalization and interference. This discussion will take a complementary view of the presentation in this chapter by considering not the subspace spanned by the predicted value functions, but rather the properties of the approximation error incurred by the agent.
\chapter{Capacity loss} \label{chp:rep-learning} \minitoc
\section{Introduction}
The reinforcement learning problem presents a number of difficulties not present in supervised learning. Chief among these is the \textit{non-stationarity} of the learning objective. RL agents must solve a sequence of similar prediction problems as they iteratively improve their prediction accuracy and their policy \citep{dabney2020value}, and sufficient improvement in each subproblem in this sequence is necessary to progress to the next subproblem. Ideally a solution to one subproblem should \textit{generalize} to the subsequent one, in the sense that the learned representation should enable rapid adaptation. At minimum, fitting one prediction problem should not hinder performance on the next. This challenge was alluded to in Chapter~\ref{chp:rl-dynamics}; we now confront it explicitly, focusing particularly on its effect in RL agents using deep neural networks as function approximators. Prior works have shown that the early training period of a network is critical for its ability to achieve optimal performance on a task \citep{frankle2020the, achille2018critical}, and that early exposure to diverse data is crucial for optimal generalization performance \citep{ash2020warm, berariu2021study}. This suggests that the non-stationary target functions prevalent in reinforcement learning may be particularly ill-suited to function approximation by DNNs trained with gradient-based optimization, particularly in sparse-reward environments where the network receives no learning signal in this critical phase. Indeed, several prior works studying the effect of re-initializing network parameters in reinforcement learning have found that this enables agents to break through plateaus \citep{fedus2020catastrophic} and improve generalization performance \citep{igl2021transient}. This chapter will study one mechanism driving these phenomena, and lay the groundwork for a more thorough empirical analysis of the effect of prediction targets on generalization in Chapter~\ref{chp:gen-rl}.
\subsection{Motivating example: the zero function}
We follow Chapter~\ref{chp:rl-dynamics} in framing representation-learning in terms of a learned feature layer $\phi_{\bm{\theta}}(\mathbf{x})$ which is composed with some simple (usually linear) output function to predict state-action values. Previously, we restricted ourselves to settings where the representation's dynamics did not depend on the particular value of the linear map $\mathbf{w}$, as the ensuing dynamics became intractable to analyze theoretically when $\Phi$ and $\mathbf{w}$ were coupled. We now take an empirical approach to study what happens when the dynamics of $\Phi$ take more complex forms. To do so, we will focus on the location of a given set of parameters in the optimization landscape. This can provide some information not available in the outputs of a feature layer, as there are many different sets of parameters which correspond to the same function outputs, but which may occupy locations in the loss landscape with very different properties, making some easier to use as a starting point for learning than others.
To provide a concrete illustration of this, consider a single hidden layer ReLU network. We let $\mathbf{x}$, $\mathbf{x} \in \mathbb{R}^d$ denote the observation, and $\bm{\theta} \in \mathbb{R}^{k \times d}$ network's first layer weights. We then write $\phi_{\bm{\theta}}(\mathbf{x}) = \sigma(\bm{\theta}^\top \mathbf{x})$ where $\sigma(y) = \max(0, y)$ is applied entry-wise to the vector $\bm{\theta} ^\top \mathbf{x}$. The output of the network is then $f_{\bm{\theta}}(\mathbf{x}) = \langle \mathbf{w}, \phi_{\bm{\theta}}(\mathbf{x}) \rangle$. Suppose we have encountered the (unlikely) scenario that all of the parameters $\bm{\theta}$ are initialized to have negative value, the input distribution is only supported on non-negative vectors, and $\mathbf{w}$ is initialized to be exactly zero. Then, recalling the framework of Chapter~\ref{chp:rl-dynamics}, any regression objective with target $\mathbf{y}$ will face the following dynamics.
\begin{align}
\nabla_{\bm{\theta}} (\mathbf{y} - f_{\bm{\theta}}(\mathbf{x}))^2 &= (\mathbf{y} - f_{\bm{\theta}}(\mathbf{x}) ) \cdot ( \mathbf{w} \cdot \mathbf{0}) = \mathbf{0} \\
\nabla_{\mathbf{w}} (\mathbf{y} - f_{\bm{\theta}}(\mathbf{x}))^2 &= (\mathbf{y} - f_{\bm{\theta}}(\mathbf{x}) ) \cdot \phi_{\bm{\theta}}(\mathbf{x}) = \mathbf{0} \end{align}
As a result, the gradient descent trajectory is locked in at the current parameters and will not be able to escape them. In contrast, if $\bm{\theta}^\top \mathbf{x}$ contains non-negative elements, then even if $\mathbf{w}$ is initialized to equal zero and the network still outputs the same function value, the gradients for $\mathbf{w}$ will be non-zero and so the gradient descent trajectory will be able to escape this initialization. Thus, under one parameterization of the zero function the network is never able to update its predictions, while under the other it can do so with relative ease. Relating this back to the analysis of the previous chapter, this condition is equivalent to saying that we would like at least one of $\partial_t \bm{\theta}_t$ and $\partial_t \mathbf{w}_t$ to be non-zero. This failure mode can be considered a special case of \textit{mutually frozen weights} \citep{zilly2021on}, a more general failure mode whereby saturated units prevent networks from making learning progress.
\subsection{Contributions} The principal hypothesis of this chapter is that over the course of training, deep RL agents lose some of their capacity to quickly fit new prediction tasks, and in extreme cases this capacity loss prevents the agent entirely from making learning progress. In other words, capacity loss results in representations which \textit{fail to generalize} to the value functions induced by new policies and new TD targets in the value improvement path. We will show that the ability of deep RL agents to fit new target functions declines over the course of training in several environments from the Atari suite \citep{bellemare2013arcade} and on a non-stationary supervised prediction tasks. We take a deeper look at the representation collapse phenomenon, where the feature outputs for every state in the environment inhabit a low-dimensional -- or possibly even zero -- subspace, extending the empirical analysis of Chapter~\ref{chp:rl-dynamics}. Finally, we provide evidence that representation collapse is a key factor in agents' failure to make performance improvements in sparse-reward environments. We saw in the previous chapter that predicting \textit{random cumulants} can prevent feature collapse in sparse-reward environments; however, it can also lead to interference in dense-reward environments, harming performance.
To address this limitation we propose a simple regularization technique, Initial Feature Regularization (\text{InFeR}\xspace), to prevent capacity loss in both dense- and sparse-reward environments. In this approach, we regress a set of feature projection heads to their values at initialization. While the regression targets used in \text{InFeR}\xspace can be viewed as an auxiliary task, the method does not incorporate any additional environment information into the learning objective. This allows us to isolate the effect of capacity loss on agent performance without introducing confounding from the interaction between the auxiliary environment information and the agent's representation. We find that this regularization scheme mitigates capacity loss in sequential supervised prediction tasks, and that RL agents trained with \text{InFeR}\xspace avoid egregious cases of representation collapse in sparse reward environments. We further show that \text{InFeR}\xspace works by regularizing the network's learning dynamics, and conduct ablation studies to better understand this mechanism.
One striking take-away from these results is that agents trained on so-called `hard exploration' games such as Montezuma's Revenge can attain significant improvements over existing competitive baselines \textit{without} using smart exploration algorithms. This suggests that the poor performance of deep RL agents in sparse-reward environments is not \textit{solely} due to inadequate exploration, but rather also in part due to poor representation learning as the network `overfits' to predicting the zero function. There are thus two levers one can pull to improve performance: increasing the amount of reward signal the agent encounters via improved exploration, and ensuring that it is able to effectively update its predictions when it does receive reward signal. These two levers produce complex feedback loops in practical environments: more accurate predictions produce more informative behaviours, and more informative behaviours provide information needed to improve the network's predictions. The sparse reward settings studied in this chapter highlight the necessity of network plasticity in enabling this virtuous cycle. Chapter~\ref{chp:gen-rl} will study a more nuanced form of overfitting to early targets that arises even in dense-reward environments, extending the insights from this chapter to a broader range of settings.
\section{Learning capacity in neural networks}
\label{sec:learning-capacity} Each time an RL agent discovers a new source of reward in its environment or improves its ability to obtain this reward, the value function that it seeks to predict changes. Over the course of learning, a value-based RL agent attempts to solve a long sequence of target prediction problems, though in the case of value iteration-style algorithms such agents many only partially solve each problem before the next is constructed. Studies of neural networks in supervised learning suggest that this sequential fitting of new targets may be harmful to a neural network's ability to adapt to new objectives \citep{ash2020warm}. This presents a concern for deep RL, where networks are trained to fit a constantly-changing target and may need to quickly make significant changes to their predictions even late in the training process. In this section, we show that training on a sequence of prediction targets can indeed lead to a reduced ability to fit new objectives in deep neural networks, a phenomenon that we term \textit{capacity loss}, and confirm that capacity loss can arise in value-based deep RL agents. Further, we provide evidence that an agent's inability to quickly update its value function to distinguish states presents a barrier to performance improvement in deep RL agents trained on environments from the Atari suite.
\subsection{Target-fitting capacity} The parameters of a neural network determine not just the network's current outputs, but also how these outputs will evolve over time via the magnitude and structure of its gradients and the curvature of the loss landscape. This evolution determines the \textit{capacity} of the network to learn to predict new targets by following an optimization trajectory. We therefore view the agent's representation in terms of the optimization dynamics that it induces. In particular, we are interested in identifying when an agent's current parameters are flexible enough to allow it to perform gradient updates that meaningfully change its predictions based on new reward information in the environment or evolving bootstrap targets, a notion formalized in the following definition.
\begin{definition}[Target-fitting capacity]\label{def:tf_cap} Let $P_X \in \mathscr{P}(X)$ be some distribution over inputs $X$ and $P_\mathcal{F}$ a distribution over a family of functions $\mathcal{F}$ with domain $X$. Let $\mathcal{N} = (g_\theta, \mathbf{\theta}_0)$ represent the pairing of a neural network architecture with some initial parameters $\mathbf{\theta}_0$, and $\mathcal{O}$ correspond to an optimization algorithm. We measure the \textit{capacity} of $\mathcal{N}$ under the optimizer $\mathcal{O}$ to fit the data-generating distribution $\mathcal{D}=(P_X, P_\mathcal{F})$ as follows:
\begin{equation}
\mathcal{C}(\mathcal{N}, \mathcal{O}, \mathcal{D}) = \mathbb{E}_{f \sim P_{\mathcal{F}}}[ \mathbb{E}_{x \sim P_X}[ (g_{\mathbf{\theta}'}(x) - f(x))^2 ]] \quad \;\text{where} \; \mathbf{\theta}' = \mathcal{O}(\theta_0, P_X, f) \, .
\end{equation}
\end{definition}
\begin{figure}
\caption{Deeper analysis on a sequential MNIST prediction setting showing that target-fitting capacity sees the greatest reduction in low capacity networks with ReLU units.}
\label{fig:mnist-cap}
\end{figure}
This definition of capacity measures the ability of a network to reach a new set of targets within a limited optimization budget from its current parameters and optimizer state. The choice of optimization budget and target distribution are left as free variables, and different choices result in different notions of capacity. In reinforcement learning we ultimately care about the network's ability to fit its Bellman targets quickly, particularly to the extent that it allows the greedy policy to obtain high reward. However, only evaluating a network's ability to fit its Bellman targets within a small budget will not necessarily be a useful measure of capacity: for example, a network which can only output the zero function will attain low Bellman error immediately on a sparse-reward environment, but will fail to produce useful updates to improve its policy. Our evaluations of this measure will assign longer training budgets and use target functions that are independent of the current network parameters to avoid these pathologies; the effect of this choice is explored further in Appendix~\ref{appx:cap-loss-supervised}.
The process of training a neural network to fit a set of labels must by necessity change some properties of the network. Works studying the information bottleneck principle \citep{tishby2015deep} for example, identify a compression effect of training on the latent representation, where inputs with similar labels are mapped to similar feature vectors. This compression can benefit generalization on the current task, but may make learning more difficult if in the future the network must distinguish between these inputs. In the face of the rapidly-changing nature of the targets used in value iteration algorithms, this compression has the potential to harm the learning process by impeding the network's ability to fit new targets rather than to help it. This motivates the first hypothesis of this section.
{\hypothesis{H1}{networks trained to iteratively fit a sequence of dissimilar targets will lose their capacity to fit new target functions within a fixed optimization budget compared to their capacity at initialization.}\label{hyp:capacity-loss}}
To evaluate {Hypothesis~\ref{hyp:capacity-loss}}, we construct a series of iterative prediction problems on the MNIST data set, a widely-used computer vision benchmark which consists of images of handwritten digits and corresponding labels. We first fit a series of labels computed by a randomly initialized target neural network $f_\theta$: we transform input-label pairs $(x,y)$ from the canonical MNIST dataset to $(x, f_\theta(x))$, where $f_\theta(x)$ is the network output. To generate a new task, we simply reinitialize the target network. Given a target function, we then train a student neural network for a fixed budget from the parameters obtained at the end of the previous iteration (using a random initialization for the first iteration), and repeat this procedure of target initialization and training thirty times. We use a subset of MNIST inputs of size 1000 to reduce computational cost. In these experiments we focus only on the ability of the network to fit its training data; a study of the effect of prediction targets on generalization is deferred to Chapter~\ref{chp:gen-rl}. We provide additional details in Appendix~\ref{appx:mnist-details}.
In Figure~\ref{fig:mnist-cap} we see that the networks trained on this task exhibit decreasing ability to fit later target functions under a fixed optimization budget. This effect is strongest in the smaller networks, matching the intuition that solving tasks which are more challenging for the network will result in greater capacity loss. We consider two other tasks in Appendix~\ref{appx:cap-loss-supervised}, as well as a wider range of architectures, obtaining similar results. We find in this broader set of evaluations that sufficiently over-parameterized networks (on the order of one million parameters for a task with one thousand data points) exhibit positive forward transfer; however, models which are not over-parameterized relative to the task difficulty consistently exhibit increasing error as the number of targets trained on grows.
This raises a question: are the deep neural networks used by value-based RL agents on popular benchmarks in the over- or under-parameterized regime? The examples here differ in an important way from the value iteration methods common in deep RL: we run our optimizer on each task long enough for the loss to converge before turning to the next task. In RL problems, the network will not have converged to the value of the current policy before the changes to the value function induce a policy improvement step. We therefore turn our attention to reinforcement learning, to evaluate whether the phenomena observed in the idealized supervised learning setting also hold in value-based RL agents.
\hypothesis{H2}{the non-stationary prediction problems in value-based deep RL also result in capacity loss.}
\begin{figure}
\caption{Neural networks exhibit a decline in their ability to fit random network outputs over the course of training in two demonstrative Atari environments.}
\label{fig:atari-cap}
\end{figure} To evaluate {Hypothesis~\ref{hyp:subspace}}, we train network checkpoints stored over the course of training to fit randomly generated target functions. We provide full details of this procedure in Appendix~\ref{appx:atari}. We generate target functions by randomly initializing a neural network with an identical architecture to the agent's, and use the outputs of this network as targets for regression. We then load initial parameters from an agent checkpoint at some time $t$, sample inputs from the replay buffer, and regress on the random target function evaluated on these inputs. We then evaluate the mean squared error after training for fifty thousand steps. We consider a DQN \citep{mnih2015human}, a QR-DQN \citep{dabney2018distributional}, and a Rainbow agent \citep{hessel2018rainbow}. We observe in all three cases that as training progresses agents' checkpoints obtain increasing error after training to fit randomly generated targets; due to space limitations we only show two representative environments where this phenomenon occurs in Figure~\ref{fig:atari-cap}, and defer the full evaluation to Appendix~\ref{appx:tf-capacity-atari}.
\keyinsight{When neural networks are trained to predict a sequence of challenging target functions, they get progressively worse at fitting these targets. At least some RL settings exhibit this property, resulting in \textit{capacity loss} over the course of training.}
\subsection{Representation collapse and performance} The notion of capacity in Definition~\ref{def:tf_cap} measures the ability of a network to \textit{eventually} represent a given target function. This definition reflects a number of intuitions about capacity: that networks which are good at fitting only a narrow range of targets should have lower capacity than more adaptable ones, and that capacity should decrease over the course of training as the network becomes more specialized. However, this quantity is computationally expensive to compute and is dependent on the choice of target function class.
We now introduce an alternative measure of capacity which captures a network's ability to quickly adapt to changes in the target function, while being significantly cheaper to compute than the more precise measure introduced in Definition~\ref{def:tf_cap}. We call this notion of capacity the \text{feature rank}\xspace, as it corresponds to an approximation of the rank of a feature embedding. Intuitively, the \text{feature rank}\xspace measures how easily states can be distinguished by updating only the final layer of the network. This notion of state similarity is particularly relevant to sparse-reward environments, where policy improvement depends on the agent's ability to distinguish a handful of rewarding states from the vast non-rewarding majority. It also bears close resemblance to the implicit underparameterization phenomenon \citep{kumar2021implicit} studied previously, allowing us to evaluate how well these prior notions of capacity capture a network's ability to change its predictions and improve its performance.
\begin{definition}[\text{Feature rank}\xspace] Let $\phi: \mathcal{X} \rightarrow \mathbb{R}^d$ be a feature mapping. Let $\mathbf{X}_n \in \mathcal{X}^n$ be a set of $n$ states in $\mathcal{X}$ sampled from some fixed distribution $P$. Fix $\varepsilon \ge 0$, and let $\phi(\mathbf{X}_n) \in \mathbb{R}^{n \times d}$ denote the matrix whose rows are the feature embeddings of states $x \in \mathbf{X}_n$. Let $\textnormal{SVD}(M)$ denote the multiset of singular values of a matrix $M$. Then the $\text{feature rank}\xspace$ of $\phi$ given input distribution $P$ is defined to be \begin{equation}\label{eq:eff-dim}
\rho(\phi, P, \epsilon ) = \lim_{n \rightarrow \infty} \mathbb{E}_{\mathbf{X}_n \sim P}[|\{\sigma \in \textnormal{SVD} \bigg (\frac{1}{\sqrt{n}}\phi(\mathbf{X}_n) \bigg ) | \sigma > \varepsilon \} | ] \, \end{equation} for which a consistent estimator can be constructed as follows \begin{equation}\label{eq:eff-dim-samples}
\hat{\rho}_n(\phi, \mathbf{X}, \epsilon) = |\{\sigma \in \textnormal{SVD} \bigg (\frac{1}{\sqrt{n}}\phi(\mathbf{X}) \bigg ) | \sigma > \varepsilon \} | \, . \end{equation} \end{definition} The $\text{feature rank}\xspace$ is equal to the dimension of the subspace spanned by $\phi(\mathcal{X})= \{\phi(x) \mid x \in \mathcal{X}\}$ when $\varepsilon=0$ and the state space $\mathcal{X}$ is finite. For $\epsilon > 0$, it throws away small components of the feature matrix. We show that $\rho$ is well-defined and that $\hat{\rho}_n$ is a consistent estimator in Appendix~\ref{appx:consistency}.
Our analysis of the \text{feature rank}\xspace resembles that of \citet{kumar2021implicit}, but differs in two important ways: first, our estimator does not normalize by the maximal singular value. This allows us to more cleanly capture \textit{representation collapse}, where the network features, and thus also their singular values, converge to zero. Second, we are interested in the capacity of agents with unlimited opportunity to interact with the environment, rather than in the data-limited regime. We compare our findings on feature rank against the \textit{srank} used in prior work in Appendix~\ref{appx:cap-loss-supervised}. \begin{figure}\label{fig:effdim_vanilla}
\end{figure}
We proceed to empirically evaluate the feature rank of various deep RL agents trained on games from the Atari suite to study its evolution over the course of training. We train a double DQN (DDQN) agent, a quantile regression (QRDQN) agent, and a double DQN agent with an auxiliary random cumulant prediction task (RC-DQN) \citep{dabney2020value}, on environments from the Atari suite, then evaluate $\hat{\rho}_n$ with $n=5000$ on agent checkpoints obtained during training. We revisit two of the environments studied in Chapter~\ref{chp:rl-dynamics}: Montezuma's Revenge (sparse reward), and Pong (dense reward), deferring two additional environments, a sparsified version of Pong in which the agent does not receive negative reward when its opponent scores, and Seaquest (dense reward, but more challenging than Pong), to Appendix~\ref{appx:feature-rank-atari}. We run 3 random seeds on each environment-agent combination.
We visualize agents' \text{feature rank}\xspace and performance in Figure~\ref{fig:effdim_vanilla}. Our findings confirm that the RC-DQN objective prevents representation collapse, as previously conjectured. More generally, non-trivial prediction tasks, either value prediction in the presence of environment rewards or auxiliary tasks, lead to higher \text{feature rank}\xspace. Unlike in the case of target-fitting capacity, \text{feature rank}\xspace does not decline monotonically, indicating that it measures a subtly different notion of capacity than simply an agent's ability to fit new target functions. This subtlety will be explored further in Chapter~\ref{chp:gen-rl}, where it will be interpreted as incorporating a particular inductive bias into the network structure. In Montezuma's Revenge, the higher \text{feature rank}\xspace induced by RC-DQN corresponds to higher performance, but as we saw in Chapter~\ref{chp:rl-dynamics}, this auxiliary loss can have a detrimental effect on learning progress in complex, dense-reward games such as Seaquest, presumably due to interference between the random rewards and the true learning objective. Unlike in target-fitting capacity, we only see a consistent downward trend in sparse-reward environments, where a number of agents, most dramatically QRDQN, exhibit representation collapse. We discuss potential mechanisms behind this trend in Appendix~\ref{appx:feature_theory}. We conjecture that in practice, the \text{feature rank}\xspace is a better measure of the inductive bias of the network than it is a measure of capacity, but that it can accurately and cheaply detect extreme instances of feature collapse which may impede learning.
\begin{figure}\label{fig:capacity-update}
\end{figure}
While the many moving parts in deep RL algorithms make it difficult to isolate the causal effect of a single representation property on performance, Figure~\ref{fig:capacity-update}a reveals a correlation between learning progress and \text{feature rank}\xspace on challenging games where agents fail to achieve human-level performance. We see this correlation in both a Rainbow \citep{hessel2018rainbow} agent, and an agent trained with the regularizer we will introduce in the coming section whose precise form is not relevant to this discussion. Further, in all of our evaluations we find that agents whose representations have collapsed do not make learning progress. This is best exemplified by the learning curves shown in Figure~\ref{fig:capacity-update}b, which highlights a particularly unlucky random seed that did not observe a point being scored during its initial random exploration period and experienced representation collapse. Eventually, after several million training frames, its $\text{feature rank}\xspace$ increases dramatically, and shortly \textit{after} this occurs the agent solves the task. We conclude that in its extreme form, representation collapse appears to completely prevent learning progress, but that the relationship between learning progress and \text{feature rank}\xspace in less extreme cases is complex, as capacity is one of many factors influencing performance in RL. In other words: capacity is a \textit{necessary} but not \textit{sufficient} condition for agents to make progress.
\keyinsight{Sparse-reward environments induce \textit{feature collapse}, whereby the representation converges to a low-dimensional subspace and `overfits' to the zero function. While it is possible to recover from feature collapse, agents fail to take successful policy improvement steps until this recovery occurs.}
\section{\text{InFeR}\xspace: mitigating capacity loss with feature regularization} \label{sec:pyoi}
The previous section showed that capacity loss can arise in online deep RL algorithms, and in some cases appears to be a bottleneck to performance. We now consider how it might be mitigated, and whether explicitly regularizing the network to preserve its initial capacity improves performance in environments where representation collapse occurs. Our approach involves a function-space perspective on regularization, encouraging networks to preserve their ability to output linear functions of their initial features. It further yields insight into the role of capacity loss in deep RL by isolating the effect of capacity on agent performance independent of the agent's exploration policy or auxiliary learning signals.
\subsection{Feature-space regularization}
Much like parameter regularization schemes seek to keep {parameters} close to their initial values, we wish to keep a network's ability to fit new targets close to its initial value. We motivate our approach with the intuition that a network which has preserved the ability to output functions it could represent at initialization should be better able to adapt to new targets. To this end, we will regress a set of network {outputs} towards the values they took at initialization. Our method, Initial Feature Regularization (\text{InFeR}\xspace), applies an $\ell_2$ regularization penalty on the output-space level by regressing a set of auxiliary network output heads to match their values at initialization. Similar perspectives have been used to prevent catastrophic forgetting in continual learning \citep{benjamin2018measuring}, though in our case we care about the functional form of the network outputs, and not preserving the outputs associated with specific past inputs.
In our approach, illustrated in Figure~\ref{fig:effdim_pyoi}, we begin with a fixed deep Q-network with parameters $\theta$, and modify the network architecture by adding $k$ auxiliary linear prediction heads $g_i$ on top of the feature representation $\phi_\theta$. We take a snapshot of the agent's parameters at initialization $\theta_0$, and use the outputs of the $k$ auxiliary heads under these parameters as auxiliary prediction targets. We then compute the mean squared error between the outputs of the heads under the current parameters $g_i(x; \theta_t)$ and their outputs at initialization $g_i(x; \theta_0)$. This approach has the interpretation of preserving subspaces of the features that were present at initialization. Because a randomly initialized network tends to produce low-magnitude outputs, a scaling factor $\beta$ ensures that the regularizer target value is on a comparable scale to that of the TD targets. This results in the following form of our regularization objective, where we let $\mathcal{B}$ denote the replay buffer sampling scheme used by the agent: \begin{equation}
\mathcal{L}_{\text{InFeR}\xspace}(\theta, \theta_0; \mathcal{B}, \beta) = \mathbb{E}_{x \sim \mathcal{B}} \bigg [\sum_{i=1}^k (g_i(x; \theta) - \beta g_i(x; \theta_0) )^2 \bigg ] \; . \end{equation}
\begin{figure}\label{fig:effdim_pyoi}
\end{figure}
The loss can then be additively combined with any RL or supervised loss function.
We first investigate whether this regularization scheme does indeed preserve capacity. To do so, we replicate the non-stationary MNIST prediction task studied in the previous section, but now incorporate \text{InFeR}\xspace. We observe in Figure~\ref{fig:effdim_pyoi} that the \text{InFeR}\xspace objective almost entirely eliminates capacity loss in a network whose performance would otherwise degrade to that of random guessing; while it is not a silver bullet, we show in Appendix~\ref{appx:infer-mnist} that this effect consistently occurs across a range of architectures and target classes. We show in Appendix~\ref{appx:feature-rank-atari} that similar phenomena occur in RL: \text{InFeR}\xspace tends to increase the \text{feature rank}\xspace of agents trained on the Atari domain over the entire course of training; we study the early training period in Appendix~\ref{appx:tf-capacity-atari}. Our findings in both settings suggest that this form of regularization may prove fruitful in a variety of continual and reinforcement learning contexts beyond the ALE benchmark studied in the remainder of this chapter.
Our analysis of RL agents trained with \text{InFeR}\xspace is made particularly insightful by the fact that \text{InFeR}\xspace does not incorporate any additional information from the environment or induce any sophisticated exploration behaviour beyond that deployed by the agent it is applied to. As a result, it allows us to isolate the effect of capacity loss on performance. If adding the \text{InFeR}\xspace objective improves performance, it can only be due to its effect on the agent's representation, or some knock-on effect thereof. We evaluate the effect of incorporating this loss in both DDQN \citep{van2016deep} and Rainbow \citep{hessel2018rainbow} agents, and include the relative performance improvement obtained by the \text{InFeR}\xspace agents over Rainbow on 57 games from the Atari 2600 suite in Figure~\ref{fig:effdim_pyoi}, deferring the comparison to DDQN, where the regularizer improved performance slightly on average but only yielded significant improvements on sparse-reward games, to Appendix~\ref{appx:atari}. We observe a net improvement over the Rainbow baseline by incorporating the \text{InFeR}\xspace objective, with significant improvements in games where agents struggle to obtain human performance. The evaluations in Figure~\ref{fig:effdim_pyoi} are for $k=10$ heads with $\beta=100$ and $\alpha=0.1$, and we show the method's robustness to these hyperparameters in Appendix~\ref{appx:hypers}.
The striking improvement obtained in the sparse-reward Montezuma's Revenge environment begs the question of whether such results can be replicated in other RL agents. We follow the same experimental procedure as before, but now use the DDQN agent; see Figure~\ref{fig:effdim_pyoi}. We find that adding \text{InFeR}\xspace to the DDQN objective produces a similar improvement as does adding it to Rainbow, leading the DDQN agent, which only follows an extremely naive $\epsilon$-greedy exploration strategy and obtains zero reward at all points in training, to exceed the performance of the noisy networks approach taken by Rainbow in the last 40 million training frames. This leads to two intriguing conclusions: first, that agents which are explicitly regularized to prevent representation collapse \textit{can} make progress in sparse reward problems without the help of good exploration strategies; and second, that this form of regularization yields significantly larger performance improvements in the presence of additional algorithm design choices that are designed to speed up learning progress.
\subsection{Understanding how \text{InFeR}\xspace works}
Having observed that our regularizer both improves performance and mitigates capacity loss, we now take a closer look into the mechanisms by which it may enable learning progress. While \text{InFeR}\xspace improves performance \textit{on average} across the Atari games, it does not do so uniformly: its improvements are concentrated principally on games where then Rainbow agent performs significantly below the human baseline. It further clearly slows down progress in a subset of environments such as Asteroids and Jamesbond. Without a deeper analysis, it is not immediately obvious what makes games like Jamesbond and Asterix, where \text{InFeR}\xspace reduces performance, different from Seaquest and Berzerk. We now take a closer look at the mechanisms by which \text{InFeR}\xspace is shaping the agent's representation in the hopes of explaining this differential effect. We consider two hypotheses.
\hypothesis{1}{\text{InFeR}\xspace improves performance by preserving a random subspace of the representation that the final linear layer can use to better predict the value function. The effect of the regularizer on other aspects of the representation learning dynamics does not influence performance.\label{hyp:regularizer} }
Hypothesis~\ref{hyp:regularizer} implies that having access to a random feature vector can improve an agent's performance even in the absence of any regularization over deeper network layers. It assumes that having a wider variety of basis functions for the final linear layer to approximate the value function will benefit the network's adaptability. To evaluate Hypothesis~\ref{hyp:regularizer}, we concatenate a single dimension of the output of a randomly initialized network to the feature outputs of the network used to learn the Q-function, and train a linear layer on top of these joint learned and random features. We compare its performance with that of an identical network architecture with a single \text{InFeR}\xspace auxiliary head. If Hypothesis~\ref{hyp:regularizer} were true, then we would expect this architecture to perform comparably to the \text{InFeR}\xspace agent, as the final linear layer has access to a randomly initialized feature subspace of equal dimension to that regularized by \text{InFeR}\xspace. If not, then we would expect the performance of the agents with access to the random features to be comparable to that of the vanilla Rainbow agents. Figure~\ref{fig:double} shows that the latter occurs, confirming that the effect of \text{InFeR}\xspace on earlier layers is crucial to its success.
But what exactly is the effect of \text{InFeR}\xspace on earlier layers? In order to preserve its initial outputs, a network is limited in the extent to which it can modify the outputs of intermediate layers. We conjecture that \text{InFeR}\xspace limits the degrees of freedom with which a network can overfit its representation in response to early target-fitting objectives, which may also reduce the flexibility of the network to make the changes necessary to fit the current value function. In cases where representation collapse is not a concern, this will have the effect of slowing down progress. In such cases, increasing the dimension of the layer to which we apply \text{InFeR}\xspace should give the network more degrees of freedom to fit its targets, and so reduce the performance gap induced by the regularization. \begin{figure}\label{fig:double}
\end{figure}
\hypothesis{2}{\text{InFeR}\xspace slows down the rate at which the learned features at every layer of the network can drift from their initialization in function space, regularizing the learning dynamics of the entire network. The precise subspace spanned by the auxiliary weights is not directly useful.\label{hyp:subspace}}
We test this hypothesis by doubling the width of the penultimate network layer and comparing the performance of \text{InFeR}\xspace and Rainbow on games where \text{InFeR}\xspace hurt performance in the original network. We see in Figure~\ref{fig:double} that increasing the network's size reduces, eliminates, or in some cases reverses the performance gap induced by \text{InFeR}\xspace in the smaller architecture. We therefore conclude that the principal mechanism by which \text{InFeR}\xspace affects performance is by regularizing the entire network's optimization dynamics. This finding has intriguing implications on the optimal network size in reinforcement learning problems, suggesting that given suitable regularization, performance in some environments may be improved simply by increasing the network size.
\section{Conclusions}
This chapter has identified a fundamental challenge facing deep RL agents: loss of the capacity to distinguish states and represent new target functions over the course of training. We have shown that this phenomenon is particularly salient in sparse-reward settings, in some cases leading to complete collapse of the representation and preventing the agent from ever making learning progress. To address this, we proposed a novel regularizer, \text{InFeR}\xspace, with the goal of preserving capacity, yielding improved performance across a number of tasks in which deep RL agents have historically struggled to match human performance. Further investigation into this method suggests that it is performing a form of function-space regularization on the neural network, and that settings where it appears to reduce performance are actually instances of under-parameterization relative to the difficulty of the task. Particularly notable is the effect of incorporating \text{InFeR}\xspace in the hard exploration game of Montezuma's Revenge: its success here suggests that effective representation learning can allow agents to learn good policies in sparse-reward environments even under naive exploration strategies.
Our findings open up a number of exciting avenues for future work in reinforcement learning and beyond to better understand how to preserve plasticity in non-stationary prediction tasks. For example, a deeper study into the mechanisms by which capacity is lost similar to that of \citet{zilly2021on} could provide further practically useful insights for the design of optimization algorithms and regularizers in deep learning. Of particular interest is the development of methods which preserve network plasticity. Since the submission of the paper on which this chapter is based, exciting related work has emerged proposing optimization \citep{dohare2021continual} and resetting \citep{nikishin2022primacy, zaidi2022does} methods to improve network plasticity.
One mysterious finding that we have not yet fully explained is the nuanced relationship between \text{feature rank}\xspace, capacity, and generalization. We observed that \text{feature rank}\xspace and target-fitting capacity are often correlated, but that this is not always the case: in some cases, an increase in the \text{feature rank}\xspace of a network was accompanied by a decreased ability to fit `structured' families of target functions, such as those given by the outputs of randomly initialized neural networks. Recent work has confirmed similarly nuanced relationship between \text{feature rank}\xspace and performance in offline RL \citep{gulcehre2022empirical}. This observation suggests that the notion of capacity loss we discussed previously is multi-faceted: over the course of training, networks can develop inductive biases which make them better suited to fitting certain function classes than others. But why should the neural network, which is after all trained via bootstrapping on its own outputs, see a \textit{decreased} ability to fit the outputs of other initializations of the same architecture as it improves its ability to linearly disentangle states? Chapter~\ref{chp:gen-rl} will answer this question, bringing to the fore the question of how the learning dynamics discussed thus far come to bear on generalization in deep RL.
\chapter{Interference and generalization}
\label{chp:gen-rl} \minitoc \section{Introduction} Capacity loss can be viewed as an extreme form of overfitting, where a neural network reduces not just its current performance on a set of related targets to the current training task, but also its ability to fit these targets even after many optimization steps. This chapter studies a weaker notion of overfitting, whereby training a neural network on one set of targets results in an inductive bias that is ill-suited to the value function that the network will eventually be tasked with representing. Key to this notion of overfitting is the concept of \textit{interference}, the degree to which an update to a function approximator's output given one input influences its predictions for other input observations. We saw a form of interference presented in Chapter~\ref{chp:supervised}, where we discussed the degree to which a gradient update on one minibatch reduced the loss on other minibatches in a training dataset. Here we will take a more generic view and focus on the magnitude of the change in the predicted value of one state as the result of a semi-gradient update on another. Interference can be viewed as an inductive bias encoding the {similarity} of the value function at different inputs. Function approximation schemes with weaker interference, such as those induced by tabular value functions or tile coding schemes, have been shown empirically to produce more stable behaviour and faster convergence in value-based algorithms on a number of classic control domains \citep{ghassian2020improving}. However, such schemes by construction require treating the value functions for different states independently, limiting the potential for a model’s predictions to generalize to new observations and resulting in \textit{memorization}.
An array of prior empirical works demonstrate that value-based RL algorithms frequently overfit to their training environment's observations and dynamics \citep{lewandowskigeneralization,farebrother2018generalization,cobbe2021phasic, zhang2018study}, essentially `memorizing' the value function in a way that fails to generalize even to minor perturbations of the input. While a diverse set of training methodologies seek to mitigate overfitting \citep{igl2019generalization, raileanu2021automatic}, the {source} of this pathology remains under-explored. This can be attributed in part to the challenge of even defining what type of generalization is desirable in RL: unlike in supervised learning, we typically do not have access to the true optimal value function or policy in the environment, which means we cannot directly compare the performance of a policy or the accuracy of a value function to some ground truth objective \citep{liu2020towards}.
Instead, we consider interference as a proxy for generalization. While networks with a large degree of interference may not always extrapolate correctly, networks with zero interference between states will not extrapolate at all, making generalization impossible. The importance of interference to generalization has been discussed previously in Chapters~\ref{chp:supervised} and~\ref{chp:invariance}, and was hinted at in the analysis of representation dynamics in Chapter~\ref{chp:rep-learning}, where we saw that neural networks tended to evolve representations under which states were easier to disentangle, provided that the environment offered a sufficiently dense reward signal. We now extend this analysis to understand why this phenomenon occurs, and whether excessive disentanglement can harm generalization when the agent is exposed to new observations.
Our primary contributions in this chapter will be twofold: first, to provide a rigorous theoretical and empirical analysis of the relationship between generalization, interference, and the dynamics of temporal difference learning; second, to study the effect of distillation, which avoids the pitfalls of temporal difference learning, on generalization to novel environments. Towards this first contribution, we will extend the analysis of Chapters~\ref{chp:rl-dynamics} and~\ref{chp:rep-learning} to show that the dynamics of temporal difference learning accelerate convergence along non-smooth components of the value function first, resulting in implicit regularization towards learned representations that generalize weakly between states. Our findings present an explanation for the observation noted widely across the literature that TD-learning produces representations that are particularly vulnerable to overfitting \citep{raileanu2021decoupling,zhang2018study}.
We then evaluate whether these findings hold empirically across a range of popular deep RL benchmarks. We measure interference by constructing a summary statistic which evaluates the extent to which optimization steps computed for one state influence predictions on other states, which we call the \textit{update rank}. This metric is similar to the notion of feature rank proposed in Chapter~\ref{chp:rep-learning}, but applies to the updates performed by a network rather than its outputs. We find that value-based agents trained with temporal difference (TD) methods learn representations with weak interference between states, performing updates similar to those of a lookup table, whereas networks trained with policy-gradient losses learn representations for which an update on one state has a larger effect on the policy at other states. Finally, we show that post-training policy distillation is a cheap and simple approach to improve the generalization and robustness of learned policies. We find that distillation is particularly effective at increasing \textit{smoothness} in the student network's output compared to that of the teacher. This property benefits interpolation and robustness to perturbations, however it is not a panacea: we see that increasing smoothness via distillation does not provide significant performance improvements on generalization to new tasks that are dissimilar to the training environment. This limitation will be addressed in Chapter~\ref{chp:icp}, where we will present a new representation-learning objective that targets generalization to novel environments by leveraging a notion of invariance.
\section{Learning dynamics and smoothness}\label{sec:learning-smoothness} This section will explore a tension between learning dynamics in neural networks, which tend to `generalize-then-memorize' \citep{kalimeris2019sgd}, and the dynamics of temporal difference learning with tabular value functions, discussed in Section~\ref{sec:vf_gen}, which tend to pick up information about the value function's global structure only late in training. We go on to study how these learning dynamics may affect the structure of gradient updates in the function approximation setting in Section~\ref{sec:fa_gen}.
\textbf{Eigendecomposition of transition operators.} An important concept in our theoretical analysis will be that of the eigendecomposition of the environment transition matrix. We will follow the precedent of prior work, and that of Chapter~\ref{chp:rl-dynamics}, in considering diagonalizable transition matrices \citep{machado2017laplacian, stachenfeld2017hippocampus, mahadevan2005proto}. The relationship between the smoothness of an eigenvector and its corresponding value has been noted previously \citep{mahadevan2007proto}. However, prior discussion of this connection has defaulted to an intuitive notion of smoothness without providing an explicit definition; this intuitive notion is illustrated by the eigenvectors of the MountainCar environment visualized in Figure~\ref{fig:mdp}. We provide a concrete definition of the smoothness of a function on the state space $\mathcal{X}$ of an MDP $\mathcal{M}$ in order to provide an unambiguous characterization to which we will refer throughout this chapter.
\begin{definition} Given a function $V : \mathcal{X} \rightarrow \mathbb{R}$, MDP $\mathcal{M}$, and policy $\pi$, we define its expected variation $\nu(V)$ as follows.
\begin{equation}\nu(V) = \sum_{x \in \mathcal{X}} |V(x) - \mathbb{E}_{P^\pi(x'|x)}V(x')| \; \end{equation} We say $V$ is \emph{smooth} if $\nu(V)$ is small. \end{definition} This expression reveals a straightforward relationship between the eigenvalue $\lambda_i$ associated with a normalized eigenvector $v_i$ and the smoothness of that eigenvector. \begin{equation}
\nu(v_i) = \sum_{x\in \mathcal{X}}|v_i(x) - \mathbb{E}_{P^\pi(x'|x)} v_i(x') | = \sum_{x \in \mathcal{X}} |(1-\lambda_i) v_i(x) | \end{equation}
In other words, the eigenvalue of an eigenvector precisely determines its smoothness. If $\lambda = 1$, for example, then the eigenvector must be constant over the MDP, whereas if $\lambda = -1$, then we have $\mathbb{E}_{P^\pi(x'|x)}[V(x')] = - V(x)$ and the expected value fluctuates between extremes when stepping from one state to another. The \textit{variance} over next-state values can in principle be large even for functions of low variation by our definition, though in our empirical evaluations (see e.g. Figure~\ref{fig:mc-ff}) smooth eigenvectors tended to also exhibit little variance. For our analysis of the \textit{expected} updates performed by TD learning, we will find the smoothness of the expected updates to be a more useful quantity than the variance. \begin{figure*}\label{fig:mdp}
\end{figure*} \subsection{Tabular dynamics} \label{sec:vf_gen} In light of this more precise definition of smoothness, we now revisit our previous results on the convergence of value functions under temporal difference and Monte Carlo learning. We recall the analysis of Chapter~\ref{chp:rl-dynamics}, which expressed the dynamics of Monte Carlo (MC) updates as a continuous-time differential equation \begin{equation*}
\partial_t V_t = V^\pi - V_t
\end{equation*} where $V_t \in \mathbb{R}^\mathcal{X}$ is a function on the state space $\mathcal{X}$ of the MDP, resulting in the trajectory
\begin{equation*}
V_t = \exp ( -t)(V_0 - V^\pi) + V^\pi \, .
\end{equation*}
Intuitively, this corresponds to a `straight line' trajectory where the estimated value function $V_t$ converges to $V^\pi$ along the shortest path in $\mathbb{R}^{\mathcal{X}}$. In practice, most deep RL algorithms more closely resemble temporal difference updates, which are expressed as \begin{align}
\partial_t V_t &= -(I-\gamma P^\pi)V_t + R_t \\
V_t &= \exp ( -t (I-\gamma P^\pi))(V_0 - V^\pi) + V^\pi \, . \label{eq:td_dynamics} \end{align}
Whereas under Monte Carlo learning the value function converges equally quickly in all dimensions, its convergence under temporal difference learning depends on the environment transition matrix. As in Chapter~\ref{chp:rl-dynamics}, we will consider an MDP with a diagonalizable transition operator $P^\pi$ corresponding to some policy $\pi$. We write the decomposition of a function $V : \mathcal{X} \rightarrow \mathbb{R}$ as a sum of the eigen-basis vectors of $P^\pi$, written $\{v_1, \dots v_{|\mathcal{X}|} \}$, obtaining $V = \sum_{i=1}^{|\mathcal{X}|}\alpha_i v_i$ for some (unique) set of coefficients $(\alpha_i)_{i=1}^{|\mathcal{X}|}$. Under this decomposition, we can show that a predicted value function trained via TD learning will converge more slowly along smooth eigenvectors of $P^\pi$.
\begin{restatable}{obs}{convergence}\label{obs:convergence}
Let $P^\pi$ be diagonalizable, with eigenvectors $v_1, \dots, v_{|\mathcal{X}|}$ corresponding to eigenvalues $\lambda_1 > \dots > \lambda_{|\mathcal{X}|}$, and let $V_t$ be defined as in \eqref{eq:td_dynamics}. Write $V_t = \sum_{i=1}^{|\mathcal{X}|} \alpha^t_i v_i$ to express the value function at time $t$ with respect to the eigen-basis $\{v_i\}$. Then the convergence of $V_t$ to the value function $V^\pi = \sum_{i=1}^{|\mathcal{X}|} \alpha^\pi_i v_i$ can be expressed as follows.
\begin{align*}
\alpha^t_i - \alpha^\pi_i &= \exp(-t(1-\gamma \lambda_i)) (\alpha_i^0 - \alpha_i^\pi)
\end{align*} \end{restatable} \begin{proof}[Proof Sketch]
The full proof of this result can be found in Appendix~\ref{apx:proofs}. The key step lies in expressing the difference between $V_t$ and $V^\pi$ in terms of the eigen-basis $v_1, \dots, v_{|\mathcal{X}|}$, for which we can obtain a closed-form expression based on the eigenvalues $\lambda_1, \dots, \lambda_{|\mathcal{X}|}$. We consider the coefficient of $V_t$ corresponding to the basis vector $v_i$, which can be written out as follows. \begin{align*}
|V_t - V^\pi|[i] &= |\alpha^t_i - \alpha^\pi_i| \\
&= | \exp(-t(1-\gamma \lambda_i)) (\alpha_i^0 - \alpha_i^\pi) + \alpha_i^\pi - \alpha^\pi_i| \\
&=|\exp(-t(1-\gamma \lambda_i)) (\alpha_i^0 - \alpha_i^\pi) |\\
&= \exp(-t(1-\gamma \lambda_i)) | (\alpha_i^0 - \alpha_i^\pi) | \end{align*} We note that if the basis vectors are not orthogonal, this coefficient will not be equal to the projection of $V_t$ onto the basis vector $v_i$; however, understanding the evolution of coefficients still gives some insight into the convergence of smooth as opposed to non-smooth components of the value function space. \end{proof} The implications of Observation~\ref{obs:convergence} on the learned value function depend to some extent on the eigendecomposition of $V^\pi$. If $V^\pi$ is equal to the constant function, then we expect the high-frequency components of $V_t$ to quickly converge to zero. If $V^\pi$ puts weight on non-smooth eigenvectors, then early values of $V_t$ may assign disproportionately large weight to these components relative to their contribution to $V^\pi$. In practice, value functions tend to exhibit a mixture of smooth and discontinuous regions. The corresponding expression of $V^\pi$ with respect to the eigen-basis of $P^\pi$ consequently places non-zero coefficients on eigenvectors corresponding to negative eigenvalues in order to fit this discontinuity, though its spectrum is dominated by smooth eigenvectors. We include some illustrative examples of the spectra of value functions under different assumptions on the reward and transition matrix $P^\pi$ in Appendix~\ref{appx:numerical}, finding in most cases that the smooth eigenvectors of the MDP tend to be assigned greater mass by the value function. The following result highlights that non-smooth components of a predicted value function, while contributing relatively little to the Monte Carlo error, contribute disproportionately to the TD error, providing an incentive to fit these components early in training.
\begin{restatable}{thm}{tderror}
Let $P^\pi$ be real diagonalizable with eigenvalues $\lambda_1 > \dots \geq \lambda_n$ and $(v_k)_{k=1}^n$ the corresponding (normalized) eigenvectors. Then for any value function $V$, the TD error $\mathrm{TD}(V_t) = \|V_t - T^{\pi} V_t\|^2$ can be bounded as \begin{align}
\|\mathrm{TD}(V_t) \|^2 &= \| T^\pi V_t - V_t \|^2\\
&= \| \sum (1-\gamma \lambda_i)(\alpha^\pi_i -\alpha^t_i)(v_i)\| ^2 \\
&\leq \sum_{i=1}^n (\alpha^\pi_i - \alpha^t_i)^2 (1-\gamma \lambda_i)^2 \; \end{align} with equality when $P^\pi$ has orthogonal eigenvectors. \end{restatable} The proof of this result can be found in Appendix~\ref{apx:proofs}. Monte Carlo updates, which simply regress on the value function, give equal weight to errors along any component of the basis. These incentives provide some intuition for the different trajectories followed by Monte Carlo and TD updates: in order to minimize the TD loss, the predicted value $V_t$ must quickly become accurate along non-smooth components of the value function $V^\pi$; however, its error due to smooth components such as the value function's bias term will have little effect on the loss and so converges more slowly. We provide an illustrative example of the relationship between the eigenvalue associated with a subspace and the convergence rate of the value function in that subspace in Figure~\ref{fig:mdp}.
\subsection{TD learning with function approximation} \label{sec:fa_gen} \begin{figure*}\label{fig:mc-ff}
\end{figure*} Most function approximation schemes leverage the assumption that states which are close together in observation space are likely to have similar values; i.e. they encode a preference towards smooth (with respect to the observations) functions. This pushes against the tendency of temporal difference updates to encourage the learned value function to fit the components of the value function with large variation first.
To investigate this tension, we consider the \textit{kernel gradient descent} regime. \subsubsection{Kernel gradient descent} Formally, a kernel is a positive definite symmetric function $K : \mathcal{X} \times \mathcal{X} \rightarrow \mathbb{R}$. In our case, we will define $\mathcal{X}$ to be the state space of an MDP. Letting $\mathbf{x} \subseteq \mathcal{X}$, we denote by $\tilde{K}$ the (symmetric) matrix $K(\mathbf{x},\mathbf{x})$ with entries $K(\mathbf{x},\mathbf{x})_{i,j} = K(\mathbf{x}_i, \mathbf{x}_j)$. Loosely speaking, a kernel can be thought of as measuring the similarity between two states, allowing us to encode certain forms of inductive bias into the learning dynamics of the agent. Importantly, the similarity of two states under $K$ does not inform us about how similar the states' initial values are, but rather how an update to the value function at one state influences the value of the other; in other words, it is a proxy for the \textit{interference} between two states. Under kernel gradient descent, the trajectory of a function is defined in terms of a kernel $K$ and the function-space gradient of a cost function. We can translate TD semi-gradient updates into the kernel (semi-)gradient descent regime as follows, where we let $R^\pi$ denote the expected reward under policy $\pi$. \begin{equation}
\partial_t V_t = \tilde{K} ((\gamma P^\pi - I)V_t + R^\pi) \end{equation} It is straightforward then to obtain analogous results as before on the convergence of $V_t$ to $V^\pi$ based on the eigendecomposition of the matrix $\tilde{K} (\gamma P^\pi - I)$ in cases where this matrix is positive definite, though in general it may have negative eigenvalues. This decomposition will not in general have a closed form in terms of the eigendecompositions of $\tilde{K}$ and $P^\pi$, but special cases have been studied in the setting of linear regression by \citet{ghosh2020representations} and can be related to kernel gradient descent straightforwardly as discussed in Appendix~\ref{apx:proofs}. This setting also describes the dynamics of neural networks in the limit of infinite width \citep{jacot2018neural, fort2020deep, lee2020finite}, which follow kernel gradient descent with respect to the neural tangent kernel.
A more interesting case occurs when we assume some states in the environment are not updated during training. In this case, we can characterize their evolution (and potential influence on the training set via bootstrapping) using the direct sum of $K(X_{\mathrm{train}}, X_{\mathrm{train}})$ and $K(X_{\mathrm{test}}, X_{\mathrm{train}})$. \begin{restatable}{thm}{ntk}\label{thm:ntk} Let $K$ be a kernel and $\pi$ a fixed policy in an MDP with finite state space $X$. Let $X_{\mathrm{train}} \subset \mathcal{X}$ be the set of states in the support of $\pi$, $X_{\mathrm{test}} = \mathcal{X} \setminus X_{\mathrm{train}}$, and let $V_t$ be a value trajectory obtained by applying kernel semi-gradient updates to some initial value function $V_0$ with kernel $K$. Let $K_{\mathrm{all}}$ be defined as \begin{equation}
K_{\mathrm{all}} = K(X_{\mathrm{train}}, X_{\mathrm{train}}) \oplus K(X_{\mathrm{test}}, X_{\mathrm{train}}) \end{equation}Then the trajectory of $V_t$ on the entire state space $X$ will be as follows,
\begin{align}
\partial_t V_t(X) &= (K_{\mathrm{all}}) [ (T^\pi V_t - V_t) (X_{\mathrm{train}})]\;.
\end{align} \end{restatable} A full derivation is provided in Appendix~\ref{apx:proofs}. These dynamics diverge notably from the standard kernel gradient descent regime in that changes to predictions on the test set have the potential to influence the dynamics of $V_t$ on the training set. This situation can arise when the agent is trained on a collection of sampled transitions which do not correspond to a contiguous trajectory, for example if transitions are removed from a replay buffer via some prioritization scheme, rather than in order of recency. In off-policy learning it can also arise when the policy used to construct the bootstrap target differs from that used to collect the replay buffer data. A large value of $K(X_{\mathrm{test}}, X_{\mathrm{train}})$ implies that updates to the training set hold influence over predictions on the test set, but at the cost of increasing asymmetry in $K_{\mathrm{all}}$ when viewed as an operator on $\mathbb{R}^\mathcal{X}$. In Appendix~\ref{appx:kernel-gd} we illustrate how this asymmetry can harm stability in the case of a simple radial basis function kernel. This can be viewed as a special case of off-policy temporal difference methods where the probability of sampling some states is set to zero \citep{ghosh2020representations}.
Combining insights from Theorem~\ref{thm:ntk} and Observation~\ref{obs:convergence}, we arrive at an intriguing conclusion: in the case of smooth kernels, the components of the value function most suitable to approximation via the kernel $K$ are precisely those which appear in the value estimate of the training set only later in the trajectory. As a result, the kernel does not receive the necessary information to generalize accurately to new observations until late in the training process. This observation runs contrary to the standard kernel regression regime, where one argument in support of early stopping is that kernel gradient descent methods converge along smooth components fastest \citep{jacot2018neural}. At the same time it is an obvious effect of bootstrapping, which requires that the agent update its predictions several times in order to propagate information about the value function through the entire input space. This effect is illustrated in Figure~\ref{fig:kernel-generalization} in Appendix~\ref{appx:kernel-gd}.
\subsubsection{Non-linear function approximation} We saw in Chapter~\ref{chp:rep-learning} that the evolution of a network's representation is crucial to its ability to adapt to new learning signals. We now turn our attention to how the evolution of a network's features can influence its generalization properties. Our primary object of focus will be the gradient structure of a function approximator, whose analysis depends on the second-order effects of TD semi-gradient updates under finite step sizes. We consider the system
\begin{equation} \label{eq:discrete_dynamics}
\theta_{t+1} \gets \theta_t + \alpha \nabla_\theta V(\theta_t) \cdot [(\gamma P^\pi - I)V(\theta_t) + r ]
\end{equation}
which can be viewed as an Euler discretization of the dynamics described in \eqref{eq:td_dynamics}. We will use the notation $f(\theta_t)$ to refer to the semi-gradient update on parameters $\theta_t$ inducing value function $V_{\theta_t}$, and write $\mathrm{TD}(\theta) = \frac{1}{2}\| V_\theta - \square T^\pi V_\theta \|^2$, where the $\square$ denotes a stop-gradient. Using the continuous time system in \eqref{eq:td_dynamics} to approximate these updates for time $t$ will gradually accumulate increasing errors, proportional to $(\alpha n)^2$, as it does not take into account the effect of the discrete step size on higher-order gradients of $V_\theta$. We apply a similar analysis to that of \citet{barrett2021implicit} and \citet{ smith2020origin} to understand the effect of the discrete learning dynamics on the gradient structure of $V_\theta$ itself. We let \begin{equation} \small \label{eq:second-correction}
f_1(\theta) = -\frac{1}{2} \nabla_\theta \| \nabla_\theta \mathrm{TD}(\theta) \|^2 + \gamma (\nabla_\theta ^\top V P^\pi \nabla_\theta V) f(\theta) \end{equation} to obtain a second-order correction describing the effect of gradient descent on the gradient structure of the learned representation. \begin{restatable}[Second-order dynamics]{obs}{theoremsecond} \label{thm:second}
Let $\theta_t$ be defined by the discrete-time system (\ref{eq:discrete_dynamics}) with step size $\alpha$. Let $f_1 (\theta)$ be defined as in (\ref{eq:second-correction}). Let $\tilde{\theta}_t$ denote the trajectory obtained by following the dynamics:
\begin{align}
\partial_t \tilde{\theta}_t = f(\tilde{\theta}_t) + \frac{\alpha}{2} f_1(\tilde{\theta}_t)\;.
\end{align}
Then we have $ \theta_{n} \approx \tilde{\theta}_{n\alpha} + O( (n\alpha) ^3)$, where $\tilde{\theta}_{n\alpha}$ denotes the value of $\tilde{\theta}_t$ at time $t=n\alpha$. \end{restatable}
The proof of this result follows a similar structure as that of \citet{smith2020origin} and can be found in Appendix~\ref{apx:proofs}; the key difference from the gradient descent regime is that we must now take into account the influence of gradient updates on the loss via the Bellman targets. This distinction presents itself in the form of $f_1$ constructed in \eqref{eq:second-correction}: the first term in the sum consists of a semi-gradient norm penalty term with respect to the instantaneous TD error, while the second term partially offsets this penalty along smooth components of the MDP, accounting for the target drift due to gradient updates. The first term is the standard gradient norm penalty observed in supervised regression. The second term has been studied in the setting of offline RL by \citet{kumar2021dr3}, where it plays a greater role than in the online RL problems in this chapter.
The effect of this penalty term on the evolution of interference in deep neural networks is difficult to analyze in closed form due to the influence of the network Hessian, however our empirical observations from Chapter~\ref{chp:rep-learning} provide an illustrative example. We recall that the previous section showed that early TD targets will typically be less smooth than the true value function. The gradient norm penalty will thus implicitly discourage interference between states in order to fit this relatively discontinuous function in a manner robust to noise in the optimization process. This observation is foreshadowed in the findings of Chapter~\ref{chp:rep-learning}, which found that deep RL agents learn to map nearby states to distinct feature vectors in dense-reward environments. We illustrate an analogous effect on interference in Figure~\ref{fig:qualitative}, where networks trained in dense-reward games (whose early TD targets will exhibit greater variation) exhibit weaker interference after training than they did at initialization. We can further observe the long-term impact of fitting non-smooth targets on the inductive bias of a network in Figure~\ref{fig:mc-ff}, where networks trained to fit high-frequency sinusoid functions exhibit pathological interpolation behaviour when later fine-tuned on the target value function.
In combination, the findings of this section suggest that the dynamics of temporal difference learning work to discourage interference between states in deep RL by fitting high-frequency components of the value function early in training while also encouraging robustness of the loss to noisy optimization steps. While this may result in more stable learning, as highlighted in Theorem~\ref{thm:ntk}, it has the double-edged effect of reducing the degree to which the network may generalize to novel observations. We now leverage these results to gain insight into deep RL agents trained in rich-observation environments.
\section{Generalization and interference in deep RL} \label{sec:rank-exps} The inductive bias of a neural network is influenced heavily by the initial training period \citep{frankle2020the, achille2018critical, golatkar2019time}. Much like in human development, the data a neural network sees in the early stages of optimization sets the network's inductive bias in a way that can be difficult to unlearn later. The distribution of states and the targets that the agent is trained to fit during this period therefore have an outsize influence on what types of targets the network will be able to easily fit down the road. In Chapter~\ref{chp:rep-learning}, we saw that networks which did not receive any reward signal early in training had greater difficulty distinguishing states later in training. We now dive deeper into how dense rewards shape agents' representations: whereas sparse rewards discourage state disentanglement, we conjecture that dense reward environments may encourage excessive disentanglement -- in other words, \textit{discourage} interference -- at the expense of generalization in value-based deep RL algorithms. We begin by presenting a quantitative approach to measure the degree to which interference is occurring between states in the agent's visitation distribution. Armed with this metric, we evaluate two concrete hypotheses. First, that deep neural networks trained with TD updates will exhibit weaker interference between states as training progresses compared to their value at initialization (Hypothesis~\ref{hyp:td}). Second, that networks trained with TD learning will exhibit weaker interference than those trained with policy gradient objectives (Hypothesis~\ref{hyp:distill}).
\subsection{Representation evolution in DQN agents}
We begin by presenting a method to track interference in deep RL agents. Given a set of transitions $\tau_1, \dots, \tau_n$ of the form $\tau_i=(\mathbf{x}_i, a_i, r_i, \mathbf{x}'_i)$ and a value function $V$ with parameters $\theta$, we let $\theta_i$ denote the network parameters after performing an optimization step with respect to the transition $\tau_i$. We then construct a matrix $\mathbf{I}_{\Delta}$ entry-wise by computing the interference $\mathrm{I}_{\Delta}(\mathbf{x}_i, \mathbf{x}_j)$ between each pair of states $\mathbf{x}_i, \mathbf{x}_j$ in the sampled set. We refer to this object as the \textit{update matrix}, as it consists of the changes in the network outputs as a result of gradient updates. Recalling \eqref{eq:deltaint}, we let $Q_\theta(\mathbf{x})$ denote the vectorized action-value function output by a DQN network at state $\mathbf{x}$ given parameters $\theta$, $g_\ell$ the TD semi-gradient update direction, and $\eta$ the optimizer state. \begin{equation}
[\mathbf{I}_{\Delta}]_{i,j} = \mathrm{I}_{\Delta} (\mathbf{x}_i, \mathbf{x}_j) = \|Q_\theta(\mathbf{x}_j, \cdot ) - Q_{\theta'}(\mathbf{x}_j, \cdot)\| \text{ where } \theta' = \theta + \alpha g_\ell (\mathbf{x}_i; \theta, \eta) \end{equation} \begin{figure}\label{fig:update_heatmaps}
\end{figure} Figure~\ref{fig:update_heatmaps} provides an illustration of the update matrix of a radial basis function kernel regression model where the lengthscale of the kernel is set to different values. In DNNs, the properties of this matrix will depend on the optimizer used to perform updates, leading to notable differences from the neural tangent kernel regime studied elsewhere \citep{yang2022overcoming} in the case of non-linear function approximators trained with adaptive optimizers. At one extreme, the update matrix $\mathbf{I}_{\Delta}$ for a tabular value function (demonstrated by the short lengthscale kernel plot on the left hand size of Figure~\ref{fig:update_heatmaps}) will have non-zero entries only along the diagonal and the matrix will have full rank. At the other, if the value function is represented by a single parameter $\theta \in \mathbb{R}$, then every row will be identical up to a scalar multiple and the matrix will have rank one. More generally, the rank of this matrix can be interpreted as a proxy for whether an agent tends to \textit{generalize} updates between states (low rank), or whether it \textit{memorizes} the value of each state-action pair independently from other states (high rank). As in Chapters~\ref{chp:invariance} and \ref{chp:rep-learning}, we use an approximate version of the rank that discards negligible components of the matrix based on the singular value decomposition, analogous to the feature rank quantity computed in the previous chapter. We provide full computation details in Appendix~\ref{appx:update-details} \begin{equation}{}
\rho(\mathbf{I}_{\Delta}) = | \{ \sigma | \sigma \in \mathrm{SVD}(\mathbf{I}_{\Delta}) \text{ and } \sigma > \epsilon\} | \end{equation}
We will refer to $\rho(\mathbf{I}_{\Delta})$ as the \textit{update rank}. Typically, $\mathbf{I}_{\Delta}$ will depend on the optimization state $\theta$ (which describes the network architecture, parameters, and optimizer state) of the agent and so we will sometimes write $\mathbf{I}_\Delta(\theta)$ to denote this dependence when it is not obvious. An alternative approach outlined by \citet{daneshmand2021batch} involves computing the Frobenius norm of the difference between the matrix $\mathbf{I}_\Delta$ and the identity, however this may overestimate interference in optimizers which use momentum due to constant terms in the update matrix. In our case, a change in the rank of $\mathbf{I}_\Delta(\theta_t)$ and $\mathbf{I}_\Delta(\theta_{t+n})$ after $n$ steps of optimization indicates that the inductive bias of the network has shifted to generalize more, in the case of a decrease in rank, or less, in the case of an increase in rank, between states. The update rank thus gives us a means of testing our first empirical hypothesis of this chapter.
\hypothesis{H1}{deep neural networks trained with TD updates exhibit weaker interference between states as training progresses.\label{hyp:td} }
We measure interference by evaluating $\rho(\mathbf{I}_{\Delta}(\theta))$ for states sampled from the replay buffer of a deep RL agent. Hypothesis~\ref{hyp:td} predicts that the rank $\rho(\mathbf{I}_{\Delta}(\theta_t))$ will increase as a function of the training step $t$. To test this we train a standard DQN architecture on environments from the Atari 2600 suite, and save a range of checkpoints throughout training. We illustrate the evolution of agents' update matrices $\mathbf{I}_{\Delta}(\theta_t)$ over the course of training in Figure~\ref{fig:qualitative}. We observe that RL agents trained in dense-reward environments tend to develop update matrices which resemble those of tabular value functions: they tend to have a pronounced diagonal or block-diagonal structure later in training, compared to a more linear structure early in training. Those trained in the absence of reward, i.e. those for which the target value function has no high-frequency components, maintain low-rank update matrices through training as our theory would predict. We find that similar results hold for a range of update rules, including distributional updates performed in the C51 algorithm \citep{bellemare2017distributional}. Quantitatively, we track the update rank of these checkpoints on the right hand side of Figure~\ref{fig:qualitative}. We include further evaluations in Appendix~\ref{apx:more-results}.
\begin{figure}\label{fig:qualitative}
\end{figure}
\subsection{Actor-critic methods}
Hypothesis~\ref{hyp:td} was motivated by the observation that early temporal difference targets predominantly contain information about immediate rewards, implicitly rewarding networks which memorize rather than generalize the learned value function. However, policy gradient methods do not encounter this issue: these methods seek to maximize the probability of selecting an action with large advantage, which is independent of the smoothness of the TD targets. While policy gradient methods encounter their own challenges, in particular high gradient variance, these challenges are largely orthogonal to the particularly adversarial nonstationarity of TD learning. Actor-critic methods, which involve training a policy via policy gradient methods and a value function via temporal difference methods, therefore present an ideal test bed to study whether the decline in generalization observed during training in the previous section is unique to TD updates, or whether it is a more general property of RL agents. A priori, we have no reason to expect the policy gradient losses to exhibit significant increases in structure later in training. We therefore propose the following hypothesis.
\hypothesis{H2}{networks trained with TD learning will exhibit weaker interference than those trained with policy gradient objectives. } \begin{figure*}\label{fig:daac}
\end{figure*} We repeat the analysis of the previous section with actor-critic methods to study whether this is the case, measuring the change in the actor's output policy at a state $\mathbf{x}$ rather than Q-values. We run our evaluations in the ProcGen environment \citep{cobbe2019quantifying}, which consists of 16 games with procedurally generated levels. While the underlying mechanics of each game remain constant across the different levels, the layout of the environment may vary. The agent is given access to a limited subset of the levels during training, in this case 10, and evaluated on novel randomly generated levels. In this section we study interference only on the training environments. We investigate Hypothesis~\ref{hyp:distill} using two different algorithms on the ProcGen suite: PPO \citep{schulman2017proximal}, which uses a shared representation network for both the actor and critic, and DAAC \citep{raileanu2021decoupling}, where there are no shared parameters between the actor and the critic. We then evaluate the update dimension of the two different methods, and plot the results in Figure~\ref{fig:daac}. Additional details can be found in Appendix~\ref{appx:procgen-details}. Omitting the critic gradients from the actor's representation leads to significantly lower update dimensions early in training in a number of environments, including bigfish, heist, and miner. Further, the critic network in DAAC, which receives only TD gradients, exhibits markedly higher update rank in all environments in at least the early stages of training, and often throughout the entire trajectory, than the other networks which have access to the actor gradients.
\section{Post-training distillation and generalization}
The previous section has demonstrated that training value-based deep RL agents results in a bias towards weaker interference between inputs. Arguably, this weak interference may be beneficial to the stability of these learning algorithms, but it comes at the cost of generalization. This bias towards memorization arises when, during the network's crucial early development stage, it is trained to fit target functions that do not capture the global structure of the value function. This leaves us with an open question: how might we adapt the training procedures followed in deep RL to obtain agents that can adapt to inputs they have not seen before?
One simple solution to this problem is to train a freshly initialized network on the final value function obtained by TD learning. If the teacher network was able to fit the low-frequency components of the value function -- even if this was achieved via memorization -- then the freshly initialized network will be able to benefit from incorporating this structure into its predictions from the start of its optimization procedure. In the case of policy distillation in actor-critic algorithms, it allows us to evade the influence of critic gradients without requiring a complete decoupling of the actor and critic during training time. Such approaches have seen success in prior work \citep{igl2019generalization, nikishin2022primacy}; this section presents a deeper study of a mechanism driving this success.
\subsection{Value distillation} We begin by studying the effect of post-training distillation on robustness of the learned value function in environments from the Atari suite \citep{bellemare2013arcade}. We first consider value distillation as a means of eliminating the counterproductive bias towards memorization induced by early TD targets. We leverage a data collection policy from a pre-trained teacher network $q_t$, and perform distillation of a freshly initialized network $q_s$ on this data. We follow a similar procedure to that of \citet{ostrovski2021the} to perform distillation of the function $q_{\mathrm{s}}$ on data collected sampled from the teacher's replay buffer $\mathcal{B}_T$, leveraging their insight that distillation on \textit{all} action values, rather than only the value of the action taken by the teacher agent, yields significantly higher performance. We additionally study the effect of behaviour cloning with entropy regularization, obtaining the objectives \begin{align}\label{eq:value_distill}
\ell_{\mathrm{VD}}(q_{\mathrm{S}}, q_{\mathrm{T}}) &= \mathbb{E}_{s \sim \mathcal{B}_{\mathrm{T}}} \bigg [\sum_{a \in \mathcal{A}} (q_{\mathrm{S}}(a) - q_{\mathrm{T}}(a) )^2 \bigg ]
\intertext{and}
\ell_{\mathrm{BC} }(\theta) &= \mathbb{E}_{s,a\sim \mathcal{B}_{\mathrm{T}}}[ \log \pi_\theta(s,a) + \lambda H(\pi_\theta(s)] \label{eq:policy_distill} \end{align} where $H(\cdot)$ denotes the entropy of the policy. We set $\lambda = 0.01$ in our evaluations. We show results for value distillation \eqref{eq:value_distill}, which regresses on the outputs of the frozen Q-network, and behaviour cloning \eqref{eq:policy_distill}, which predicts the action taken by the frozen Q-network. We track three quantities: the performance of the learned policy, the robustness of the learned policy to perturbations, and the consistency of the learned policy when interpolating between observations. The performance is measured by following an $\epsilon$-greedy policy in the training environment, with $\epsilon=0.01$. The robustness to perturbations is measured by tracking whether the network takes the same action under a Gaussian perturbation to its input as in the unperturbed observation. Finally, we investigate the network's interpolation behaviour by evaluating whether, given a convex combination of observations $o_1$ and $o_2$, the network takes the same action under the combination as it does in either of the original observations. Additional evaluation details can be found in Appendix~\ref{appx:atari-details}. We are interested in evaluating the folowing hypothesis.
\begin{figure}\label{fig:atari-gen}
\end{figure}
\hypothesis{H3}{post-training distillation will reduce overfitting and therefore improve robustness; this will hold more strongly for policy distillation than for value distillation due to the discontinuity of the value distillation targets. } Figure~\ref{fig:atari-gen} shows that the distillation approaches yield policies that exhibit significant improvements in robustness to perturbations and are more consistent under interpolations between observations. We observe that the behaviour cloning method matches or nearly matches the performance of the pretrained agent in three of the four environments, while also obtaining the best robustness. Both behaviour cloning and value distillation improve upon the robustness of the teacher network that was trained online. We conclude that both value and in particular policy distillation increase smoothness in the form of improved robustness to perturbations and greater interpolation consistency. This finding motivates the next section, where we will dig deeper into policy distillation.
\subsection{Policy distillation} We have previously shown that decoupling the policy and value function approximators in an actor-critic architecture resulted in weaker interference in the value approximator, and stronger interference in the policy network. However, it is not clear that this should necessarily lead to better final performance on the test environment, as the interaction between the policy and value function training is complex and difficult to predict -- a policy with a small generalization gap but weak training performance may nonetheless perform worse at evaluation time than a policy with a larger generalization gap. An alternate approach to remove the influence of critic gradients on the learned policy is to train a behaviour cloning agent on the final policy obtained after training. Because we only need to distill the policy, we avoid contaminating the actor with value approximation gradients while (hopefully) preserving the performance of the original agent on the training environments. We return to the ProcGen benchmark, with the hypothesis that post-training distillation of PPO agents should produce policies which improve on the ability of the final trained actor to generalize to new levels. \begin{figure*}\label{fig:procgen-distill}
\end{figure*}
\hypothesis{}{post-training policy distillation will reduce the generalization gap of actor-critic agents trained on procedurally generated environments.\label{hyp:distill}}
Hypothesis~\ref{hyp:distill} brings with it a subtle but crucial caveat: our discussion of generalization thus far has focused implicitly on \textit{within-environment} generalization, but procedurally generated benchmarks inherently evaluate agents on novel environments, inducing a distribution shift. Our primary tool in studying generalization in the previous section, the update rank, carries with it the assumption that a network's generalization behaviour on training observations will be representative of its behaviour on novel states that may be encountered later. However, the ProcGen benchmark requires generalization \textit{out-of-distribution} to novel environments whose observations may have probability zero under the training environments. A neural network will not necessarily exhibit similar generalization behaviour in a novel environment which may be highly visually dissimilar to its training environment. As such, methods such as policy distillation which increase generalization between observations in the training environment should only be expected to benefit test set performance if the test environments are sufficiently `within-distribution'.
With this caveat in mind, we proceed to evaluate Hypothesis~\ref{hyp:distill}. We train a set of PPO agents on the difficult setting of the ProcGen benchmark, using the same experiment configurations as that used by \citet{cobbe2020leveraging}. We then perform behaviour cloning on these agents, using data collected only from the training environments, by training a freshly initialized network (the distillation agent) to minimize an entropy-regularized KL divergence with the teacher's policy on trajectories collected by the teacher. Training was terminated at 50M sampled environment steps, except for Maze, Miner, and Ninja, which were run for 90M sampled steps as the training performance of the student had not yet matched that of the teacher after the initial budget. We then evaluate the distilled agent's performance on the test environments. Results are shown in Figure~\ref{fig:procgen-distill}.
We find a rich heterogeneity in the shape of the learning curves of the student on the train and test environments across the different ProcGen games. Post-training distillation in most settings slightly reduces the generalization gap obtained by the original network, with the notable exception of the `maze' environment. In settings where the gap between train and test performance is larger, we see a more mixed effect from distillation. The naivety of our approach suggests that it is likely that final performance and sample-efficiency of these agents could be improved using tools from the policy distillation literature such as allowing the student to generate training data \citep{czarnecki2019distilling, rusu2016policy, teh2017distral}.
We observe three broad categories of behaviour in the student agent's learning curves. First, in the two environments where PPO achieves a negligible generalization gap, distillation improves not only test performance but also performance on the training environments. This suggests that the memorization behaviour of PPO was limiting not only its test set performance but also its returns on training environments. We further do not observe any evidence of stereotypical overfitting in these environments: the train and test performance increase in lockstep with each other. In environments where there is a moderate gap between train and test environment teacher performance, the test set performance of the student tends to increase more slowly than its training set performance, resulting in slow but fairly consistent convergence of the learned policy to approximately the level of that of the teacher (or slightly exceeding that in the case of bigfish, caveflyer, jumper, and starpilot). In no environments do we see the classic sign of overfitting: a sequence of increasing, saturating, and decreasing training performance. Instead, the final test performance depends more on the relative positive slope of the test and the training performance curves -- in other words, it depends on the degree to which the agent is able to identify shared structure between the training environments that is more sophisticated than can be detected by measuring extrapolation between observations within a single training environment. This notion of shared structure will be explored in greater depth in Chapter~\ref{chp:icp}.
\section{Conclusions}
The analysis of this chapter presents a complementary perspective to that of Chapter~\ref{chp:rep-learning} in studying how learning dynamics can influence generalization, particularly in dense-reward environments. Our key take-away is that the nature of the targets that we ask a neural network to fit early in its training trajectory hold significant influence over the bias of the learning process towards certain types of functions later. In the case of sparse-reward environments, networks which overfit to the task of predicting the zero target struggled to fit any non-trivial target function later in training. In the case of dense rewards, we've shown in this chapter that networks evolve to be more prone to memorization as a result of fitting discontinuous and unstructured targets early in training. Broadly, our analysis suggests that the nature of the learning dynamics of value-based RL discourages generalization in deep neural networks. We have shown that temporal difference learning in the continuous-time dynamics setting of Chapter~\ref{chp:rl-dynamics} fits non-smooth components of the value function first, resulting in an implicit bias towards representations that encode near-tabular updates. In the context of prior work demonstrating that weaker generalization can improve the stability and convergence of RL algorithms, this phenomenon may be beneficial to an agent's stability at the cost of observational overfitting. We further show that post-training distillation improves generalization and robustness, mitigating some of the tendency of value-based RL objectives to encourage overfitting.
The findings presented here are a crucial stepping stone along the path to the principled development of robust and stable deep RL algorithms which are capable of strong generalization performance. Our insights may prove useful in a range of future directions, such as using different architectures during training and distillation, leveraging larger neural network function approximators to minimize harmful interference, and modifying the update rule used in TD learning to adaptively promote or inhibit interference between inputs. Further, the role of the optimizer is fundamental to the phenomena studied in this paper, and RL-specific optimization approaches may benefit from our findings. The notion of update rank that we consider here may also be adapted to other settings to shine a light onto where a network lies on the memorization-generalization spectrum at different points during training, potentially providing a mechanism to trigger early stopping in supervised learning settings, or as a meta-learning objective for hyperparameter tuning and architecture search.
The experiments presented in this chapter have considered both single-environment (in the case of Atari and MountainCar) and multi-environment (ProcGen) problems. While our findings concerning interpolation and robustness to perturbations in single-environment RL are straightforward, analysis of the multi-environment setting is more subtle. The update rank we compute on agents measures the degree of generalization between observations from the training environments. Because the test environments may differ systematically from the training environments, the update rank may not capture properties of a neural network that enable effective generalization to these out-of-distribution test examples. Indeed, based on our observations from Chapter~\ref{chp:supervised} we have no reason to expect that invariance over training inputs will generalize to sufficiently out-of-distribution test environment observations. The following chapter will study the multi-environment problem explicitly. It will outline sufficient assumptions on the structure of the data-generating process to ensure generalization is tractable, and present a novel representation-learning approach that is able to identify the shared structure between the training and test environments.
\chapter{Generalization across environments} \label{chp:icp} \minitoc \section{Introduction}
A robot's LiDAR sensor is knocked askew. An autonomous vehicle encounters hail for the first time. In the distant future, a RoboChef discovers that the kitchen in which it usually works has been remodelled. Reinforcement learning agents will frequently encounter situations at deployment that they did not see during training. Such situations will not necessarily correspond to novel observations from the agent's training environments, but they may share a broader notion of \textit{causal structure}. An agent which has learned to robustly exploit this structure will generalize with ease to the challenges it faces at deployment. However, in such cases good generalization between observations from the training environment will not necessarily be sufficient for an agent to pick up on this causal structure. We saw the limitations of such generalization in Chapter~\ref{chp:gen-rl} on the ProcGen benchmark. Reducing the degree of memorization performed by a neural network improved some types of robustness, but did not always result in better performance on test environments. In the worst case, some training environments may contain spurious correlations that will not be present at test time. An agent which depends on these correlations may then generalize well to new observations in its training environments, but experience catastrophic failure on new environments where the correlation is not present ~\citep{azhang2018natrl,Song2020Observational}. Two things are clearly necessary in order to obtain effective generalization outside of the training environment. First, we must look beyond interference to more sophisticated notions of invariance. Second, we must consider families of test environments for which the training environments provide sufficient information for generalization to be possible.
Generalization to new environments has been a topic of great interest to the RL community, but this interest has been concentrated on settings that make few explicit assumptions on the shared structure between training and test environments \citep{kirk2021survey}. Recent works~\citep{amit2018mlpacbayes,yin2019meta} have developed generalization bounds for the multi-task problem, but they depend on the number of tasks seen at training time, which can be prohibitively expensive given the sample complexity of RL even in the single task regime. To obtain stronger generalization results, we consider a multi-environment RL problem: like multi-task RL, the agent seeks to maximize return on a set of environments, only some of which are available to the agent during training. We make the assumption that there exists some latent {causal structure} in the form of a causal graph that is shared among all of the environments, and that the sources of variability between environments can be modelled by interventions on spurious variables in the causal graph. This family of environments, which we show to be equivalent to a \textit{Block MDP}~\citep{du2019pcid}, allows for observations to vary, but fixes the latent states, dynamics, and reward function.
Chapter \ref{chp:gen-rl} studied multi-environment generalization, but with relatively few assumptions on the shared structure between the different environments; we only required that the environments be drawn from the same data-generating process. We show in this chapter that the added assumption of shared structure allows for much stronger generalization results than have been obtained by prior work with significant sample complexity improvements. Our results are enabled by a key insight: we turn the problem of identifying features that will generalize well to novel environments from a learning problem, for which sample complexity may be prohibitive, to a causal identification problem, which will often be more tractable. Indeed, where \citet{cobbe2019quantifying} and \citet{azhang2018genrl} find that agents trained using standard methods must see many thousands of training environments in order to successfully generalize to new environments, in our more restricted setting as few as two or three training environments can be sufficient to identify the correct causal structure.
The main technical contribution of this chapter is the application of tools from {causal inference} to identify (and in some cases learn) representations that will enable an agent to generalize well to new environments. We propose a method motivated by a simple intuition: features which remain {invariant} across the different training environments are likely to be causally related to the reward and transition structure. Policies which depend on these features will therefore be robust to changes in the environment brought about by interventions on spurious state variables. In certain linear function approximation settings, we demonstrate that this method will, with high probability, learn an optimal state abstraction that generalizes correctly to novel environments, requiring many fewer training environments than would be necessary without the block MDP assumption. We then draw a connection between bisimulation and the minimal causal set of variables found by our algorithm, providing bounds on the model error and sample complexity of the method. We further show that using analogous invariant prediction methods for the nonlinear function approximation setting can yield improved generalization performance over multi-task and single-task baselines.
\section{Background on causality}
A desirable property of a learned representation is that it should enable a policy trained on one environment to generalize well to new environments. A promising approach to learn these general representations, particularly in the partially observable setting, comes from training an agent to learn the {causal} structure of the environment \citep{zhang2019causal, de2019causal}. We refer to Section~\ref{sec:background:stateabstractions} for a discussion of state abstractions. This section will provide the relevant background on causality; the two concepts will be related in Section~\ref{sec:causal-abs}. Causal inference \citep{pearl2000causality} is a powerful tool that machine learning researchers are beginning to leverage \citep{johansson2016learning, louizos2017causal}, developing explicit connections to generalization bounds and robustness \citep{shalit2017estimating}. While a great deal of work on causality focuses on learning an explicit causal graph \citep{de2019causal}, an alternative approach involving invariant prediction \citep{Magliacane2018, peters2016causal, rojas2018invariant} is in fact closely aligned with the literature on generalization bounds discussed in Chapter~\ref{chp:background}. \subsection{Causal discovery} \label{sec:causal_inf} Causal inference concerns itself with identifying causal relationships from data \citep{pearl2000causality}. The central object of study is a Structural Causal Model (SCM), which characterizes the data generating distribution as a set of functions on observed and hidden variables. \begin{definition}[\citep{pearl2000causality}] A structural causal model is a tuple $(\mathbf{U}, \mathbf{V}, \mathcal{F}, P )$, where $\mathbf{U}$ is a set of exogenous variables (e.g. the unobserved source of stochasticity in the environment) drawn from the distribution $P$, $\mathbf{V}$ is a set of endogenous variables (e.g. the observed state $s$, the reward $r$, and the action $a$ in RL), and $\mathcal{F}$ is the set of functions $f_V: \mathrm{PA}_V \times \mathbf{U} \rightarrow \mathbf{V}$ with $\mathrm{PA}_V \subset \mathbf{V}$, which determine the value of endogenous variable $V$ for each $V \in \mathbf{V}$. \end{definition} An SCM has a representation as a directed acyclic graph, called a causal Bayesian network or simply a causal graph, whose nodes are the variables in $\mathbf{U} \cup \mathbf{V}$ and whose edges are given by $\{(p, v) \mid p \in \mathrm{PA}_V, V \in \mathbf{V} \}$. An edge from variable $V_1$ to $V_2$ indicates that $V_1$ is a causal parent of $V_2$. This mapping is not one-to-one. In general, many SCMs may induce identical causal graphs. Causal models enable reasoning about how changes to the data-generating process, called \textit{interventions}, affect the resulting distribution over variables.
\begin{definition}[Do-Intervention] A do-intervention on a variable $V$, denoted do($V=v$), in a causal model $S=(\mathbf{U}, \mathbf{V}, \mathcal{F}, P )$ is an operation that induces a new SCM $S'=(\mathbf{U}, \mathbf{V}, \mathcal{F}', P )$, where $\mathcal{F}' = \{ f_{W} \in \mathcal{F} \mid W \neq V \} \cup \{ f_{V=v} \}$ and $f_{V=v}(\mathbf{p}, \mathbf{u}) = v \; \forall \mathbf{p} \in \mathrm{PA}_V, \mathbf{u} \in U$. \end{definition} The randomized procedure used to assign patients to the treatment and control groups in medical trials is an example of an intervention. Predicting the effect of a do-intervention requires identifying the direction of the edges in the causal graph, a process known as \textit{causal discovery}. Many recent works have explored how to incorporate causal structure into deep RL by learning disentangled representations of the observation space \citep{bengio2017independently, suter2019robustly}.
\subsection{Invariant prediction} A fundamental property of causal relationships is their invariance under interventions. \textit{Invariant prediction} seeks to identify causal structure from data by evaluating whether the relationships between variables in a prediction problem are invariant across different environments, where each environment corresponds to a different intervention on the data generating process. Relationships which are invariant over these interventions are assumed to be causal, and by applying a straightforward hypothesis-testing approach \citep{peters2016causal}, one can obtain high-probability guarantees on identifying the causal parents of a target variable of interest.
The approach of \citet{peters2016causal} is limited to problems in which the inputs consist of sets of variables whose relationship can be described by a causal graph. A more adaptable framework based on the same principle of invariance has emerged recently: invariant risk minimization (IRM) \citep{arjovsky2019invariant}. This setting assumes that the learner has access to data which is partitioned into different environments, and that these environments share the same causal structure. Differences in the distribution of data in each environment are attributed to non-causal correlations, which the learner should avoid using for prediction. Unlike invariant causal prediction methods, there is no explicit assumption on the structure of the inputs as corresponding to distinct variables in the causal model. Instead, the learner seeks to identify a non-linear feature map which induces a linear classifier whose errors are invariant over the training environments. The mathematical formalism of this objective yields the minimization problem \begin{equation} \min_{\phi} \sum_{e \in \mathcal{E}} \ell (w^\top \phi(X_e), Y_e) \text{ s.t. } \text{argmin}_{v} \ell(v^\top \phi(X_e), Y_e) = w , \; \forall e \in \mathcal{E}\;. \end{equation}
The invariant risk minimization framework provides a compelling intuition, but the IRM objective proposed by \citet{arjovsky2019invariant} requires extensive hyperparameter tuning to obtain competitive results, and in many cases fails to capture the invariances motivating its proposal \citep{-kamath2021does}. In spite of these limitations, several works have extended the ideas behind IRM to a variety of settings \citep{ahuja2020invariant, alesiani2021continual}, and a notion of invariance across environments has recently been deployed by \citet{raileanu2021decoupling} to improve generalization to novel environments on the ProcGen suite.
\section{Problem setting} \label{sec:problem_setup} We consider a family of environments $\mathcal{M}_\mathcal{E} = \{(\mathcal{X}_e, \mathcal{A}, \mathcal{R}_e, \mathcal{P}_e, \gamma) \mid \; e \in \mathcal{E}\}$, where $\mathcal{E}$ is some index set. For simplicity of notation, we drop the subscript $e$ when referring to the union over all environments $\mathcal{E}$, e.g. $\mathcal{X} = \cup_{e \in \mathcal{E}} \mathcal{X}_e$. Our goal is to use a subset $\mathcal{E}_{\text{train}} \subset \mathcal{E}$ of these environments to learn a policy $\pi$ which generalizes to {every} environment. Concretely, we seek a parameterized policy $\pi$ which maximizes the quantity
\begin{equation}
\mathbb{E}_{\mathcal{E}_{{\mathrm{test}}}, \pi}[ \sum_{t=0}^\infty \gamma^t R(x_t, a_t)] \; . \end{equation}
We denote the number of training environments by $N=|\mathcal{E}_{\text{train}}|$. We assume that the environments share some structure, and consider different degrees to which this structure may be shared in this section. As we have alluded to previously, it is this presumed structure which will enable significant sample efficiency gains over more naive approaches.
\subsection{Block MDPs}
Block MDPs~\citep{du2019pcid} are described by a tuple $\langle \mathcal{S}, \mathcal{A}, \mathcal{X}, p, q, R \rangle$ with a finite, unobservable state space $\mathcal{S}$, a finite action space $\mathcal{A}$, and a possibly infinite, but observable space $\mathcal{X}$. Here $p$ denotes the latent transition distribution $p(s'|s,a)$ for $s,s'\in\mathcal{S}, a\in\mathcal{A}$, $q$ is the (possibly stochastic) emission function that generates observations from the latent state $q(x|s)$ for $x\in\mathcal{X}, s\in\mathcal{S}$, and $R$ denotes the reward function. \begin{assumption}[Block structure~\citep{du2019pcid}] \label{asmp:block}
Each observation $x$ uniquely determines its generating state $s$. That is, the observation space $\mathcal{X}$ can be partitioned into disjoint blocks $\mathcal{X}_s$, each containing the support of the conditional distribution $q(\cdot|s)$. \end{assumption} This assumption gives us the Markov property in $\mathcal{X}$, and is crucial for our empirical and theoretical results. \citet{zhang2020learning} discuss the partially observable setting.
The definition of a block MDP given by \citet{du2019pcid} defines a single environment; our primary interest in this chapter, however is in a multi-environment setting. We will therefore concern ourselves with \textit{block MDP families}, which characterize a set of environments with shared dynamics. We define a block MDP family as a collection of environments $\mathcal{M}_{e}$, where each environment $e$ corresponds to an emission function $q_e$. Each environment $\mathcal{M}_e$ thus has the form $\langle \mathcal{S}, \mathcal{A}, \mathcal{X}, p, q_e, R \rangle$, where all terms except the emission function $q_e$ are shared between environments. We will move the potential randomness from $q_e$ into an auxiliary variable $\eta \in \Omega$, where $\Omega$ is some probability space, and write $q_e(s, \eta)$. Crucially, we enforce Assumption~\ref{asmp:block} on the union of the emission functions $q_e$. This entails that whenever $\text{range}(q_e (s, \cdot)) \cap \text{range}(q_{e'}(s', \cdot)) \neq \emptyset$, then $s = s'$. The decomposition of the state into a latent state $s$ and noise term $\eta$, and the corresponding causal structure, can be seen in Figure~\ref{fig:irm_model_irrelevant}.
Without additional assumptions on shared structure between the emission functions, generalization to novel environments can be an impossible task. We will therefore focus on settings where the $q_e$ overlap in structured ways -- for example, where $q_e$ outputs the concatenation of the noise and state variables, $q_e(s, \eta) = s \oplus f(\eta)$ -- such that it is possible to learn a feature map from observations to latent states that will generalize to new emission functions from this class.
Our ultimate goal is to find a policy $\pi$ which maximizes cumulative reward over any novel test environment. This chapter will exclusively consider settings where the policy $\pi$ is a function of some state abstraction (also referred to as a feature map) $\phi(\mathcal{X})$. Under the block MDP assumption, if $\phi$ maps observations $\mathbf{x}$ to the state $s$ that emitted them, then any policy which is optimal on the training environments will trivially be optimal on the test environments. As a result, most of our discussion in the coming sections will focus exclusively on the invariance of a learned state abstraction, with the ensuing optimality of the learned policy left implicit. Section~\ref{sec:relaxations} will characterize environments where learning such an invariant state abstraction is tractable.
\subsection{Relaxations} \label{sec:relaxations} \begin{figure}\label{fig:irm_model_irrelevant}
\end{figure} Figure~\ref{fig:irm_model_irrelevant} illustrates the graphical model representing the transition dynamics of a Markov Decision Process under varying assumptions. This graphical model can be interpreted as a stationary causal graph, with arrows indicating causal dependence, as seen on the right hand side of the figure. Under this interpretation, different assumptions on the structure of the graph correspond to different classes of Markov Decision Process. We will be particularly interested in settings where different environments correspond to different correlation structure in the noise variables, as outlined below.
\textbf{Correlated noise variables.} The standard formulation of the block MDP assumes that the noise variable $\eta$ is sampled independently at random at every time step, which prevents multi-timestep correlations. We therefore also consider a more realistic \textit{relaxed block MDP}, where spurious variables may have different transition dynamics across the different environments so long as these dynamics are independent of the return. We introduce the notation $q_S$ to refer to the projection of the inverse mapping $q_e$ onto the invariant component of the latent state $s$, \begin{equation}\label{eq:inverse-emmision}
q_S(x) = s \text{ where } \exists \eta, e \text{ s.t. } : q_e(s, \eta) = x \end{equation} and note that this will be invariant over environments. This formulation is equivalent to augmenting each MDP $\mathcal{M}_e \in \mathcal{M}$ with a noise variable $\eta_e$, such that for any observations $x = q_e(s, \eta_e)$, any observation $x' = q_e(s', \eta_e')$, and any action $a$, we have
\begin{equation*}p(x'|x, a) = p(s'|s, a) \int_{\eta_e':q(q_S(x'), \eta_e')=x'} p_e(\eta_e'|s, \eta_e) d \eta_e'\, . \end{equation*}
This dependency can be observed in the arrows between $\eta_{t-1}$ and $\eta_t$ in Figure~\ref{fig:irm_model_irrelevant}. We note that $p(s'|s,a)$ remains independent of $\eta$ (i.e. there is no edge between $\eta_t$ and $s_{t+1}$), while $\eta_{t+1}$ is permitted to depend on $s_t$ and $\eta_t$ (i.e. we permit edges between these nodes in the causal graph). The assumption that for each observation $x$ there is a unique generating latent state $s$ is crucial to ensure that the return will be independent of the variable $\eta$, and its absence results in a partially observable MDP. In principle, we might go further to relax the independence of $\eta_t$ on the previous timestep to also causally depend on the reward (the grey dashed line in Figure~\ref{fig:irm_model_irrelevant}); while the techniques shown in the subsequent sections can be applied in this setting, this chapter will focus on dependencies only between components of the state.
\subsection{Assumptions on causal structure}
Importantly, under Assumption~\ref{asmp:block} or under the correlated noise variables assumption, the spurious variables $\eta_t$ are disjoint from the set of causal ancestors of the reward (i.e. the set of variables $X$ from which there is a directed path in $\mathcal{G}$ from $X$ to $r_t$ for some $t$). The suggestive notation of $s_t$ and $\eta_t$ to describe the environment-specific and environment-invariant components of the state is deliberate: we will show later that identifying the abstraction $(s_t, \eta_t) \rightarrow s_t$ is equivalent to a causal structure identification problem, and that this abstraction guarantees optimal generalization to new environments.
In order to obtain these results, we must enforce the Markov property on the MDP's causal graph. This goes beyond the requirement that the variables in the environment state at time $t$ can only affect the values of the state at time $(t+1)$, and can only affect the reward at time $t$, allowing us to consider the state and action at time $t$ as the only candidate for causal parents of the state at time $(t+1)$ and of the reward at time $t$. We refer the reader to Figure~\ref{fig:statgm} to demonstrate how causal graphical models can be translated to this setting.
\begin{assumption}[Independent Causal Mechanisms]\label{assmpt:causal_mechanisms} Let $x^1$ and $x^2$ be components of the observation $\mathbf{x}$. Let $X^i_{t}$ denote the value of $x^i$ at time $t$ for $i=1,2$. Then when no intervention is performed on the environment, we have the following independence, \begin{equation}
X^1_{t+1} \perp X^2_{t+1} \mid \mathbf{x}_t \; . \end{equation} \end{assumption}
The assumption of independent causal mechanisms is standard in the causal inference literature \citep{peters2017elements} but is stronger than the assumption of Markovian transition dynamics in the MDP. For example, an MDP whose state space consists of two variables $x_1$ and $x_2$ which are both set at each timestep by the same coin flip will have Markov transition dynamics, but its causal graph will not satisfy the Markov condition. Intuitively, Assumption~\ref{assmpt:causal_mechanisms} requires that the variables in the observation space reflect the environment's underlying data generating process. This assumption is crucial for our results in linear function approximation, though our results on the rich observation setting allow us to relax independence of causal mechanisms in the observation space.
While many of the most challenging technical problems in causal inference stem from more complex causal structures which do not satisfy the Markov property, these structures often arise in the first place because the data collector is not able to see the step-by-step evolution of the system. Put simply: an RL agent can observe that rain always occurs {before} a person opens their umbrella, while a supervised predictor will have access to only an instantaneous snapshot of the world with binary values of rain and umbrella. As a result, RL agents have access to much richer temporal information about the data-generating distribution, and this removes many, but not all, of the ambiguities that causal inference methods seek to resolve.
We have thus far required that the dynamics of the latent states in the environment be independent of the spurious variables; however, the spurious variables may still correlate with the return to an extent that a function approximator may use them to predict the value function or to select an action. For example, the spurious variables may be a copy of the latent state at the previous time step. In order to identify the relevant and irrelevant components of the state, the agent must receive suitable information from the training environments. In this chapter we will assume that the training environments are generated by interventions on the data-generating process shown on the right hand side of Figure~\ref{fig:irm_model_irrelevant}.
\begin{assumption}[Environment Interventions]\label{assmpt:envs} Let $\mathcal{X} = X_1 \times \dots \times X_n$, and $\mathcal{S} = X_{i_1} \times \dots X_{i_k}$. Each environment $e \in \mathcal{E}$ corresponds to a do-intervention or soft intervention \citep{eberhardt2007interventions} on a single variable $x_i$ in the observation space. \end{assumption}
We note that in order for the training environments to constitute a block MDP family, these interventions may only be applied to the spurious variables in the state if they are applied over multiple timesteps. An intervention on the causal variables -- for example, fixing the angle of a joint in a robotic simulator -- will change the dynamics on the shared latent states $\mathcal{S}$. While such interventions may provide valuable information about the structure of the world, they will also change the value function that the agent is trying to predict. Interventions which only influence the initial state distribution do not encounter this issue, but face the limitation of providing only a single transition where the target variable takes the desired value. As a result, we assume that the test environments will only contain interventions on spurious variables, but allow for potential interventions on causal ancestors of the reward in the training environments.
\section{State abstractions and causal feature sets} \label{sec:causal-abs} We begin our analysis with a simplified setting: we assume that the observation space $\mathcal{X}$ of the block MDP is a direct sum of variables $X_1, \dots, X_n$, and that each of these variables can be represented as a node in a causal graph corresponding to the dynamics of the MDP which satisfies Assumption~\ref{assmpt:causal_mechanisms}. Let $I \subset [n]$ be an index set of the variables which correspond to the latent state $s$ in the block MDP. Then the state can be decomposed into $\mathbf{x} = (\mathbf{x}_I) \oplus (\mathbf{x}_{I^C}) = s \oplus \eta$, where $s = \mathbf{x}_I$ denotes the latent state and $\eta = \mathbf{x}_{I^C}$ denotes the spurious variables. This setting admits a natural formulation of invariant causal prediction in order to identify the state abstraction $\phi(\mathbf{x}) = \mathbf{x}_I$. It is also straightforward to alternate between the causal graph formulation of the MDP and the block MDP formalism, noting that the emission function $q_e$ for an environment $e$ will simply be of the form $q_e(s, \eta) = s \oplus \eta = \mathbf{x}_I \oplus \mathbf{x}_{I^C}$. We can further show that the state abstraction $\mathbf{x} \mapsto \mathbf{x}_I$ is a model-irrelevance state abstraction (c.f. Definition~\ref{def:misa}) for all $\mathcal{M}_{e} \in \mathcal{M}$.
\begin{theorem}[Existence of model-irrelevance state abstractions]\label{thm:existence} Let $\mathcal{M}_\mathcal{E}$ denote a block MDP family with joint observation space $\mathcal{X}_\mathcal{E} = \cup_{e \in \mathcal{E}} X_e$, where the observation space $X_e$ is the image of some emission function $q_e(s,\eta)$ s.t. $s \in \mathcal{S}, \eta \in \Omega$ as before. Let $f_e = q_S \mid_{\mathcal{X}_e}$ as defined in \eqref{eq:inverse-emmision}. Then $\phi = \cup_{e \in \mathcal{E}} f_e$ is a model-irrelevance state abstraction for each $\mathcal{M}_{e} \in \mathcal{M}_{\mathcal{E}}$. \end{theorem} \begin{proof} First, note that $\cup_{e \in \mathcal{E}} f_e$ is well-defined by the block MDP assumption. By assumption we have that the reward function satisfies $R(x) = R(q_S(x))=R(\phi(x))$ for all observations $x$ and environments $e$. We further have that for any $x_1, x_2 $ s.t. $\phi(x_1) = \phi(x_2) = s$, and for any $s'$: \begin{align}
\int_{x' \in \phi^{-1}(s')}P(x'|x_1, a) &= P(s'|s,a) \int_{x' \in \phi^{-1}(s')} \int_{\eta': q_e(s', \eta) = x'} P(\eta'|s, \eta_1) \\
&= P(s'|s,a) \int_{x' \in \phi^{-1}(s')} \int_{\eta': q_e(s', \eta) = x'} P(\eta'|s, \eta_2) \\
&= \int_{x' \in \phi^{-1}(s')}P(x'|x_2, a) \end{align} Noting that $\phi( q_e(s, \eta') ) = s$ and $\phi^{-1}(s) = \{ q_e(s, \eta) \mid \eta \in \Omega\}$, we obtain the desired result. \end{proof}
We now consider whether, under Assumptions \ref{asmp:block}, \ref{assmpt:causal_mechanisms}, and \ref{assmpt:envs}, a model-irrelevance state abstraction can be obtained by causal inference methods. Intuitively, one would then expect that the {causal variables} (in this case, the causal \textit{ancestors} of the return, rather than the parents of the prediction target as in the case of regression problems) should have nice properties as a state abstraction: in particular, they should enable generalization to new environments. In what follows, we will generalize our definition of a model-irrelevance state abstractions to refer to a block MDP family for which the dynamics of the induced abstract MDP on each environment in the family are equivalent.
\begin{definition}\label{def:block-misa} A model-irrelevance state abstraction over a block MDP family $\mathcal{M}_{\mathcal{E}}$ is one for which the following two conditions hold. First, $\phi$ is consistent with the reward function $R$, i.e. \begin{equation}
R(\phi(\mathbf{x})) = R(\phi(\mathbf{y})) \quad \forall \mathbf{x}, \mathbf{y}
\in \cup_{e \in \mathcal{E}} \mathcal{X}_e \; . \end{equation} Further, for all $e_1, e_2 \in \mathcal{E}, \mathbf{x}_1 \in e_1, \mathbf{x}_2 \in e_2, \mathbf{y} \in \mathcal{X}_{e_1} \cup \mathcal{X}_{e_2}$ such that $\phi(\mathbf{x}_1) = \phi(\mathbf{x}_2) $ we have: \begin{align}
\sum_{\mathbf{x}' \in \phi^{-1}(\mathbf{y})} P_{e_1}(\mathbf{x}'|\mathbf{x}_1) = \sum_{\mathbf{x}' \in \phi^{-1}(\mathbf{y})} P_{e_2}(\mathbf{x}'|\mathbf{x}_2) \end{align} \end{definition} The following result highlights the connection between the causal ancestor set and model irrelevance: a state abstraction that selects the set of causal variables from the observation space of a block MDP will be a model-irrelevance abstraction for every environment $e \in \mathcal{E}$ in this stronger sense. \begin{figure}\label{fig:statgm}
\end{figure}
\begin{theorem} \label{thm:causalstate_modelirrelevance}
Consider a family of MDPs $\mathcal{M}_\mathcal{E} = \{(\mathcal{X}, A, \mathcal{R}, P_e, \gamma)|e \in \mathcal{E} \}$, with $\mathcal{X} = X_1 \times \dots \times X_k$ where each $X_i, i \in 1, \dots, k$ is some collection of sets, for which there exists a SCM $\mathcal{C} = ((x^1_t, \dots, x^k_t, A_t, R_t)_{t=1}^\infty, \{ \eta_1, \dots, \eta_k\}, \mathcal{F}, P)$ such that each $\mathcal{M}_e \in \mathcal{M}_{\mathcal{E}}$ corresponds to an intervention on a single variable $X_i$ in $\mathcal{C}$, where $X_i$ is not a causal ancestor of the reward. Let $M_\mathcal{E}$ satisfy Assumptions \ref{asmp:block}-\ref{assmpt:envs}. Let $S = \textbf{AN}(R)$ denote the ancestors of the reward $R$ in $\mathcal{C}$. Then the state abstraction $\phi_S(x) = [x]_S = (x_i \mid i \in S)$ is a \textit{model-irrelevance} abstraction over the block MDP family $\{\mathcal{M}_e \in \mathcal{E} \}$. \end{theorem}
\begin{proof} This result follows as a special case of Theorem~\ref{thm:existence} after showing that $\mathcal{M}_{\mathcal{E}}$ is a block MDP family. To do so, we construct each component of such a block MDP family as follows: we set $\mathcal{S} = (X_i)_{i \in S}$, $\mathcal{A}=A$, $R = \mathcal{R}$ and $\mathcal{X}=\mathcal{X}$. We define the transition operator $p = P_{S}$, where $P_S$ denotes the marginal transition probability $P(X^{t+1}_S \mid X^{t}_S)$ which is well-defined as all causal parents of nodes in $X_S$ are contained in $X_S$. The emission function $q$ is defined on the joint space $X_S$ and $X_{S^C}$ via concatenation and re-ordering such that $q(s, \eta_e) \rightarrow x$ whenever $s = [x]_S$ and $\eta_e = [x]_{S^C}$. Crucially, the criterion that each environment correspond to an intervention requires that $\eta_e$ evolve over time according to the dynamics induced by $P_e$ for each environment $e$. However, because $S$ is closed under taking causal parents and because Assumptions 1 and 2 apply, this correlation of the noise variable $\eta_e^t$ over time will not interfere with the dynamics on $\mathcal{S}$. We thus obtain a (relaxed) block MDP family which precisely corresponds to $\mathcal{M}_{\mathcal{E}}$. This then implies that the map $\phi(x) = [x]_S$ is a model-irrelevance state abstraction. \end{proof}
An important detail in the previous result is that the model-irrelevance state abstraction incorporates not just the parents of the reward, but also its ancestors. This is because in RL, we seek to model \textit{return} rather than solely rewards, and a variable which is a causal parent of the return may exert influence over several timesteps. We provide an illustration of such a state abstraction in the rightmost column of Figure~\ref{fig:statgm}. As a concrete example from the CartPole environment, only the position $x$ and pole angle $\theta$ are necessary to predict the reward. However, predicting the return requires knowledge of $\dot{\theta}$ and $\dot{x}$, their respective velocities.
\keyinsight{To obtain a state abstraction which will be a model-irrelevance abstraction on all environments $e$ in a block MDP, it suffices to identify the variables which are causal ancestors of the reward in the MDP's causal model. }
Strictly speaking, other subsets of variables which include the set of causal ancestors $S$ will be a model-irrelevance abstraction for each individual environment. What makes the set of causal ancestors $S$ special is that it applies to all environments in the block MDP. This property is crucial for generalization: under $\phi: \mathbf{x} \rightarrow [\mathbf{x}]_S$, we have that the union over environment transition operators acting on the latent space $\phi(\mathcal{X})$ is well-defined. In other words, $\phi$ maps each environment to an abstract MDP whose dynamics and state space are identical across the block MDP family.
Under this definition a model-irrelevance state abstraction must remove all spurious variables from the input. Doing so is a non-trivial task, and requires observing the effects of their variation. It is not in general possible to identify spurious variables from only a single environment, particularly when spurious variables are highly correlated with their causal counterparts. The following proposition highlights the importance of Assumption~\ref{assmpt:envs} to the {identifiability} of the causal feature set. This result will require either that we have at least one environment corresponding to an intervention on each variable in the causal graph, or that we have a particular class of intervention applied to all variables simultaneously \citep{peters2016causal}. \begin{proposition}[Identifiability of causal state abstractions]\label{prop:identifiability} Consider a block MDP family whose corresponding SCM and set of training environments $\mathcal{E}$ satisfy the conditions of Theorem 2 of \citet{peters2016causal}. Then the causal feature set $\phi_S$ is identifiable and corresponds to a model-irrelevance state abstraction for all test environments consisting of interventions on non-ancestors of the reward. Conversely, if these conditions are not satisfied then there may exist a state abstraction $\phi : \cup_{e \in \mathcal{E}} \mathcal{X}_e \rightarrow \bar{\mathcal{X}}$ such that $\phi$ is a model-irrelevance abstraction over $\mathcal{E}_{\text{train}}$, but not over $\mathcal{E}$ globally. \end{proposition}
\begin{proof} The proof of the first statement follows immediately from the iterative application of the identifiability result of \citet{peters2016causal} to each variable in the causal variables set. These conditions require that we also show the agent interventions on the \textit{causal} variables; the resulting training environments will not necessarily satisfy the block MDP assumption, but may still be used for identification. Once a causal variable set has been identified, it is straightforward to use this variable set to train a value function on environments in the block MDP family. Alternatively, it is possible to show that for environments satisfying Assumption~\ref{assmpt:causal_mechanisms}, so long as there is a do-intervention on each variable in $\textbf{AN}(R)^C$ in the training set $\mathcal{E}$, we preserve identifiability of the causal variable set. Constructing this set requires a slight modification of the aggregation procedure of \citet{peters2016causal} to return the largest set $S$ whose induced predictor is invariant.
For the converse, we consider a simple counterexample in which one non-ancestor of the return $\mathbf{x}_m$ is constant in every training environment with value $v_m$, where $\mathbf{x} = (\mathbf{x}_i)_{i=1}^d \in \mathcal{X}$. Then letting $S = \textbf{AN}(R)$, we observe that ${\phi = \mathbf{x} \mapsto \mathbf{x}_{S \cup \{m\}} }$ is also a model-irrelevance state abstraction for each training environment $\mathcal{M}_e$. However, it is straightforward to show that if $x_m$ is perturbed by a Bernoulli noise variable $Z \sim \mathrm{Ber}(0.5)$ in a new test environment, then these transition dynamics will not satisfy the conditions for $\phi$ to be a model-irrelevance state abstraction in the new environment. Indeed, we now have that the state abstraction $\phi$ violates the second condition of Definition~\ref{def:block-misa}. In particular, let $\mathbf{x}$ be a state occurring in both a training environment $e_{\mathrm{train}}$ and the test environment $e_{\mathrm{test}}$. Let $\mathbf{y} \in \mathcal{X}_{e_{\mathrm{train}}}$ be such that $\mathbf{y}_m = v_m$ , $P_{e_{\mathrm{train}}}(\mathbf{y} \mid \mathbf{x}) > 0$, and $P_{e_{\mathrm{test}}}(\mathbf{y} \mid \mathbf{x}) > 0$. Then \begin{equation}
\sum_{\mathbf{x}' : \phi(\mathbf{x}') = \phi(\mathbf{y})}P_{e_{\mathrm{train}}}(\mathbf{x}'|\mathbf{x})
= \sum_{\mathbf{x}' : \phi(\mathbf{x}') = \phi(\mathbf{y})} 2 P_{e_{\mathrm{test}}}(\mathbf{x}'|\mathbf{x}) \; . \end{equation} Because $P_{e_\mathrm{test}}$ spreads probability mass equally over states where $\mathbf{x}_m = v_m$ and $\mathbf{x}_m = v_m + 1$. This violates our requirement that the union over transition operators $P_{e_{\mathrm{train}}}$ and $P_{e_{\mathrm{test}}}$ yield a well-defined function. \end{proof}
\subsection{Variable selection for linear predictors} We have shown that identifying a model-irrelevance state abstraction that will generalize to any environment consistent with the block MDP is possible by identifying the causal feature set. The following \textit{Linear MISA} algorithm (Algorithm~\ref{alg:linear_misa}) provides a concrete approach to do this. We require the presence of a replay buffer $\mathcal{D}$, in which transitions are stored and tagged with the environment from which they came. The algorithm then applies ICP to find all causal ancestors of the reward iteratively. This approach has the benefit of inheriting many desirable properties from ICP -- under suitable identifiability conditions, it will return the exact causal variable set to a specified degree of confidence.
It also inherits inconvenient properties: the ICP algorithm is exponential in the number of variables, and so this method is not efficient for high-dimensional observation spaces. We are further restricted to considering linear relationships of the observation to the reward and next state. Additionally, because we take the union over iterative applications of ICP, the confidence parameter $\alpha$ used in each call must be adjusted accordingly. Given $n$ observation variables, we use a conservative value of $\frac{\alpha}{n}$ to get an overall confidence of $\alpha$ from the procedure.
\begin{algorithm}[t] \SetAlgoLined \hspace*{\algorithmicindent} \textbf{Input:} $\alpha$, a confidence parameter; $\mathcal{D}$, a replay buffer with observations $\mathcal{X}$. \\% \\ \hspace*{\algorithmicindent} \textbf{Output:} {$S \subset \{1, \dots, k\}$, the causal state variables}. \\
$S \gets \emptyset$\;
stack $\gets$ r \;
\While{stack is not empty}{
$v$ = stack.pop() \;
\If{$v \not \in S$}{
$S' \gets$ \texttt{ICP}(v, $\mathcal{D}$, $\frac{\alpha}{\text{dim}(\mathcal{X})}$) \;
$S \gets S \; \cup S'$\;
stack.push($S'$)}
} \caption{Linear MISA: Model-irrelevance State Abstractions} \label{alg:linear_misa} \end{algorithm}
\subsubsection{Evaluation} \label{sec:model_linear} We illustrate the benefits of explicitly removing spurious features from the state input in Figure~\ref{fig:icp_result}. We consider a simple family of MDPs with state space $\mathcal{X} = \{ (x_1, x_2, x_3)\}$, with a transition dynamics structure such that $x_1^{t+1} = x_1^t + \epsilon_1^e$, $x_2^{t+1} = x_2^t + \epsilon_2^e$, and $x_3^{t+1} = x_2^t + \epsilon_3^e$. The reward $r(x)$ is set to be a linear function of $x_1$ and $x_2$. We train on 3 environments with soft interventions on each noise variable. We run the linear MISA algorithm on batch data from these 3 environments to get a state abstraction $\phi(x) = \{x_1, x_2\}$, then train 2 linear predictors on $\phi(x)$ and $x$. We evaluate the {generalization error} on novel environments that correspond to different hard interventions on the value of the $x_3$ variable. We observe that the predictor trained on $\phi(x)$ attains zero generalization error because it zeros out $x_3$ automatically. However, any nonzero weight on $x_3$ in the least-squares predictor will lead to arbitrarily large generalization error, which is precisely what we observe in Figure \ref{fig:icp_result}. \begin{figure}\label{fig:icp_result}
\end{figure}
\section{State abstractions for rich observations}
The variable selection problem has been an enlightening regime in which to ground our intuition about the relationship between invariant prediction and generalization. However, most RL problems of interest involve more complex observation spaces where simply identifying an input mask is not sufficient to guarantee generalization to new environments. This section leverages the intuition discussed previously to present a method motivated by invariant prediction which learns approximate model-irrelevance state abstractions in rich observation settings.
\subsection{Block MDP generalization bounds}
Our goal in this section is to \textit{learn} representations that will generalize from the training environments to a novel test environment, as opposed to identifying input masks as we did in the previous section. However, normal PAC generalization bounds require a much larger number of environments than one could expect to obtain in the reinforcement learning setting. The appeal of an invariant representation is that it may allow for theoretical guarantees on learning the right state abstraction with many fewer training environments, as discussed by \citet{peters2016causal}. If the learned state abstraction is close to capturing the true latent dynamics of the block MDP family, then the model error, and by extension performance, in the test environment can be bounded by a function of the distance between the test environment's abstract state distribution and that of the training environments. In particular, if the state abstraction is {invariant} and always maps equivalent states in any pair of environments to the same latent representation, then the learned model error on the test environment will be equal to that on the training environments.
A more interesting case arises when the state abstraction $\phi$ is {approximately invariant}: such a state abstraction might map equivalent states from the training environment $M$ and test environment $M'$ to similar but non-identical feature vectors. For simplicity we will consider only deterministic environments, but similar reasoning can be applied to stochastic MDPs. In this case, a looser requirement on $\phi$ can still yield reasonable generalization error bounds for a learned transition dynamics model. Intuitively, the requirement we set is that the training environment dynamics function $T_M$ be Lipschitz with respect to $\phi$, and that the union of the training and test environment dynamics $T_M \cup T_{M'}$ be well-defined and Lipschitz. This is analogous to the requirement on the induced dynamics of a model-irrelevance state abstraction: if states from $M$ and $M'$ map to the same feature vector, then the successor states to each must also map to identical feature vectors. The Lipschitz requirement introduces a geometric notion of \textit{similarity}: if states from $M$ and $M'$ map to nearby feature vectors, then their successors must also be relatively close in feature space. With this requirement, it is possible to bound the generalization error of a learned latent dynamics model trained on $M$ in terms of the Lipschitz constant of $T_{M} \cup \mathcal{T}_{M'}$ and the Wasserstein distance between the state visitation distributions of the two environments in feature space. Intuitively, the Lipschitz constant quantifies the smoothness the environment dynamics with respect to $\phi$, and the Wasserstein distance between the state visitation distributions of the two environments in feature space, which measures how well $\phi$ has been able to discard irrelevant information. The following result assumes a fixed policy and a state-valued input to the transition function, but can be easily adapted to the control setting.
\begin{restatable}{theorem}{thmModelErr}[Model error bound] \label{thm:model_error}
Let $M = \langle \mathcal{X}, A, R, P, \gamma \rangle$ and $M' = \langle \mathcal{X}', A, R', P', \gamma \rangle$ be two environments from a block MDP family, and let $\phi: \mathcal{X} \rightarrow \mathbb{R}^d$ denote a model-irrelevance state abstraction that maps states from both MDPs to feature vectors. Set $T_M: \mathcal{X} \rightarrow \mathcal{X}$ as the (deterministic) transition function of $M$. Suppose that the union of the dynamics of $M$ and $M'$ are $L$-Lipschitz with respect to the embedding $\phi$ and that $T:\mathbb{R}^d \rightarrow \mathbb{R}^d$ is some approximate transition model satisfying ${\max_{s} \mathbb{E}\|T(\phi(x)) - \phi(T_M(x)) \| < \delta}$, for some $\delta > 0$. Let $W_1(\pi_1, \pi_2)$ denote the 1-Wasserstein distance between distributions $\pi_1, \pi_2$ on $\mathbb{R}^d$. Then \begin{equation}
\mathbb{E}_{x \sim M'}[\|T(\phi(x)) - \phi(T_{M'}(x)) \|] \leq \delta + 2LW_1(\pi_{\phi(M)}, \pi_{\phi(M')}). \end{equation} \end{restatable} \begin{proof} The proof follows from a straightforward decomposition of the error into three components: one which depends on the similarity between each input $x \in X_{M'}$ and the nearest neighbour $y$ in $X_{M}$, the model error on $y$, and the similarity between the representations of the successor states $T_M(x)$ and $T_M(y)$: \begin{align*}
& \mathbb{E}_{x \sim M'} [\|T(\phi(x)) - \phi(T_{M'}(x)) \|] \\&= \mathbb{E}_{x \sim M'} \bigg[\min_{y \in X_M} \|T(\phi(x)) - T(\phi(y)) + T(\phi(y)) -\phi( T_{M}(y)) + \phi(T_M(y)) - \phi(T_{M'}(x)) \| \bigg ] \\
&\leq \mathbb{E}_{x \sim M'} \bigg [\min_{y \in X_M} \|T(\phi(x)) - T(\phi(y))\| + \|T(\phi(y)) - \phi(T_M(y))\|+\| \phi(T_M(y)) - \phi(T_{M'}(x))\| \bigg ]\\
\intertext{Letting $\gamma$ be a coupling over the distributions of $\phi(\mathcal{X}')$ and $\phi(\mathcal{X})$ which minimizes the objective of the Wasserstein distance, i.e. ${\mathbb{E}_{\gamma(\phi(x),\phi(y))} \|\phi(x)-\phi(y)\|= W_1(\pi, \pi')}$}
&\leq \mathbb{E}_{\gamma(\phi(x), \phi(y))} \bigg [\|T(\phi(x)) - T(\phi(y))\| + \delta + L\|x-y\| \bigg ]\\
&\leq \mathbb{E}_{\gamma(\phi(x), \phi(y))} \bigg [L\|\phi(x) - \phi(y)\|+ \delta + L\|\phi(x)-\phi(y)\| \bigg ] \\
&= \mathbb{E}_{\gamma(\phi(x),\phi(y))}\bigg[L\|\phi(x) - \phi(y)\|+ \delta + L\|\phi(x)-\phi(y)\| \bigg ] \\
&= 2LW_1(\pi, \pi') + \delta \end{align*}
\end{proof}
Analogous results can be obtained for value error \citep{zhang2020invariant}, which provide a bound on generalization performance that depends on the supremum of the dynamics and reward errors obtained by a learned model, along with the degree of invariance captured by the representation. The former corresponds to the empirical risk in a PAC bound, while the latter corresponds to the \textit{generalization gap}. A state abstraction which perfectly captures the equivalence classes of the block MDP family will attain error on new environments equal to that of its error on the training environments and exhibit no generalization gap. Meanwhile, a state abstraction equal to the identity map can induce arbitrarily large errors on test environments.
\subsection{Learning a model-irrelevance state abstraction}
\label{sec:model_irrelevant}
It is clearly desirable to learn an invariant representation across environments. How to do so is less clear. In this section, we propose one such approach based on a model-learning objective, but note that the principles guiding this method can easily be applied to other representation-learning methods. Our approach aims to learn a representation ${\phi : \mathcal{X} \rightarrow \mathbb{R}^d}$ which preserves the reward- and dynamics-relevant information of the state, while discarding environment-specific information in settings where such information is encoded non-linearly in rich observations. Ideally, a representation which discards environment-specific information in the training environments should also do so in new test environments, though guaranteeing this property is more challenging than in the variable selection setting.
We now present an objective to learn a dynamics preserving state abstraction $\mathcal{Z}$. This requires disentangling the state space into a minimal representation that causally relates to the reward $s_t:=\phi(x_t)$ and all other features of the observation $\eta_t:=\varphi(x_t)$.
We decompose our learning objective into two parts: \begin{itemize}
\item \textbf{Invariance}: we train a {\color{PineGreen}\textbf{task classifier}} on the shared latent representation ${C:\mathcal{Z}\mapsto [0,1]^N}$ with cross-entropy loss and employ an adversarial loss~\citep{tzeng2017ada} on $\phi$ to maximize the entropy of the classifier output to ensure task specific information is not passing through to $\mathcal{Z}$.
\item \textbf{Dynamics preservation:} we train a reward model and a latent dynamics model on the state abstraction $\varphi$ to encourage the preservation this information.
\subitem The {\color{BrickRed}\textbf{reward model}} is an MLP $R$ trained to predict the sample reward $r$ given input transition $(\phi(\mathbf{x}), a, \phi(\mathbf{x}'))$, using the objective
\begin{equation*}
J_R(\phi,R)=\sum_i \mathbb{E}_{\pi_{b_i}}\big[(R(\phi(x_i), a,\phi(x_i')) - r_i')^2\big]\;. \end{equation*}
\subitem The {\color{Cerulean} \textbf{transition model}} is an MLP $f_s$ acting on the latent space $\mathcal{Z}$. The transition model is trained via a reconstruction loss, using a learned decoder $\phi^{-1}$ which takes as input a task-dependent state embedding $\psi(\mathbf{x})$ along with the latent-space model output $f_s(a, \phi(\mathbf{x}))$, and outputs a predicted next observation.
\begin{align*}
J_D(\phi,\psi, f_s,f_\eta)=\sum_i \mathbb{E}_{\pi_{b_i}}\big[&(\phi^{-1} (f_s(a,\phi(x_i)),
f_\eta(a,\psi(x_i))) - x_i')^2\big], \end{align*} \end{itemize}
\noindent This gives us a final objective \begin{equation}
J_{\text{ALL}}(\phi, \psi, f_s, f_\eta, r) = {\color{Cerulean}J_D(\phi,\psi, f_s,f_\eta)} + {\color{BrickRed}\alpha_R J_R(\phi,r)} - {\color{PineGreen}\alpha_C H(C(\phi))}, \end{equation} where $\alpha_R$ and $\alpha_C$ are hyperparameters and $H$ denotes entropy (Algorithm~\ref{alg:nonlinear_misa}).
\begin{algorithm}[t] \SetCustomAlgoRuledWidth{0.6\textwidth} \SetAlgoLined \hspace*{\algorithmicindent} \textbf{Input:} $\mathcal{E}$, a set of environments; $\pi_0$, an initial policy; $\phi_0$, $f_0$ an initial invariant feature map and transition model; $\{\psi_0^e \mid e \in \mathcal{E} \}$ and $f^e_{\eta, 0}$, initial task-specific feature maps and transition models.\\ \hspace*{\algorithmicindent} \textbf{Output:} \text{$\phi$, an invariant state encoder.} \\
$\pi \gets \pi_0$\;
$\phi, f_s \gets \phi_0, f_{s,0}$ \;
$\psi^e, f_\eta^e \gets \psi^e_0,f_{\eta,0}^e $ for $e \in \mathcal{E}$ \;
$\mathcal{D}_e \gets \emptyset$ for $e \in \mathcal{E}$ \;
\While{forever}
{
\For{$e \in \mathcal{E}$}
{
$a \gets \pi(x_e)$\;
$x'_e, r \gets $ \texttt{step}$(x_e,a)$ \;
\texttt{store}$(x_e,a,r,x'_e)$ \;
}
\For{$e \in \mathcal{E}$}
{
Sample batch $X_e$ from $\mathcal{D}_e$ \;
$f_\eta^e,\psi^e \gets \nabla_{f_{\eta}^e,\psi^e} [J_D(X_e) ]$ \;
}
$f_s, \phi, r \gets \sum_{X_e}\nabla_{f_s,\phi} [J_{\text{ALL}}(X_e) ]$\;
$C \gets \nabla_C$ \texttt{CE\_loss}$(C(\phi(\{x_e\}_{e\in\mathcal{E}}), \{e\}_{e\in\mathcal{E}})$ \;
}
\caption{Nonlinear Model-irrelevance State Abstraction (MISA) Learning}
\label{alg:nonlinear_misa} \end{algorithm}
\subsection{Results} We first evaluate MISA in a rich-observation model-learning task to evaluate whether the learned representation and abstract model do indeed generalize to complex dynamics structures where the factors of variation correspond to non-linear functions of the observation. We next look to imitation learning (Section~\ref{sec:imitation_learning}), where we show that the state abstractions learned by MISA allow the agent to zero-shot generalize to observations generated by new camera angles. Finally, we explore end-to-end reinforcement learning in the low-dimensional observation setting with correlated noise (Section~\ref{sec:reinforcement_learning}) and again show generalization capabilities where single task and multi-task methods fail.
\subsubsection{Model learning: rich observation setting} \label{sec:model_nonlinear} We evaluate the gradient-based MISA method (Algorithm~\ref{alg:nonlinear_misa}) in a setting with nonlinear dynamics and rich observations. We set the agent the task of learning a model which can generalize to new background colours on pixel-valued control tasks. We randomly initialize the background colour of two training environments from Deepmind Control~\citep{deepmindcontrolsuite2018} from the range $[0, 255]^3$. We also randomly initialize another two backgrounds for evaluation. The orange line in Figure~\ref{fig:imitation_learning} shows performance on the evaluation environments in comparison to three baselines. In the first, we train on a single environment and test on another with our method, (\texttt{MISA - 1 env}). Without more than a single experiment to observe at training time, there is no way to identify whether or not the background can safely be ignored, and indeed we observe that the method appears to include information about the background in its representation. In the second baseline, we combine data from the two environments and train a model over all data (\texttt{Baseline - 1 decoder}), but without explicitly encouraging invariance across the two environments. The third is another invariance-based method which uses a gradient penalty, IRM~\citep{arjovsky2019invariant}. In the second case the error is tempered by seeing variance in the two environments at training time, but it is not as effective as MISA with two environments. In the case of IRM, the loss starts much higher but very slowly decreases, and we find it is very brittle to tune in practice. Implementation details are deferred to Appendix~\ref{app:model_nonlinear_implementation}.
\begin{figure}\label{fig:imitation_learning}
\end{figure}
\subsection{Imitation learning} \label{sec:imitation_learning} Model-learning is often a useful auxiliary task in RL problems, but ultimately our the objective in RL is to learn an effective policy. We now evaluate how well a policy trained on top of the invariant representation found by MISA is able to generalize to new environments. We focus on imitation learning, as this setting involves fewer moving parts than an online learning setting, while preserving the policy-learning challenge of the RL problem. In this setup, we first train an expert policy using the proprioceptive state of Cheetah Run from the DeepMind Control suite \citep{deepmindcontrolsuite2018}. We then use this policy to collect a dataset for imitation learning in each of two training environments. When rendering these low dimensional images, we alter the camera angles in the different environments (Figure~\ref{fig:imitation_learning_envs}). We report the generalization performance as the test error when predicting actions in Figure~\ref{fig:imitation_learning}. Model error on the evaluation environment increases significantly over the course of training in all baselines; while we see that test error does increase with MISA as well, the error growth is significantly slower compared to single task and multi-task baselines.
\begin{figure}
\caption{The Cheetah Run environment from Deepmind Control with different camera angles. The first two images are from the training environments and the last image is from evaluation environment.}
\label{fig:imitation_learning_envs}
\end{figure}
\subsection{Reinforcement learning} \label{sec:reinforcement_learning} Finally, we evaluate Algorithm~\ref{alg:nonlinear_misa} on the full RL loop. This presents a greater challenge than was observed in the offline learning settings, as the invariant prediction component of the model must now face the possibility that the learned policy will visit different state distributions in different environments, particularly early in training. We therefore go back to the proprioceptive state in the \texttt{cartpole\_swingup} environment in DeepMind Control~\citep{deepmindcontrolsuite2018} to show that we can in some cases still learn a model-irrelevance state abstraction while training a policy. We use the Soft Actor Critic (SAC) algorithm~\citep{haarnoja2018sac} with an additional linear encoder, and add spurious correlated dimensions which are a multiplicative factor of the original state space. We also add an additional environment identifier to the observation. This factor varies across environments. We train on two environments with multiplicative factors $1\times$ and $2\times$, and test on $3\times$. Like \citet{arjovsky2019invariant}, we also incorporate noise on the causal state to make the task harder, specifically Gaussian noise $\mathcal{N}(0, 0.01)$ to the true state dimension. This incentivizes the agent to attend to the spuriously correlated dimension instead, which has no noise. In Figure~\ref{fig:cartpole_swingup_rl} we see the generalization gap drastically improves with our method in comparison to training SAC with data over all environments in aggregate and with the IRM objective~\citep{arjovsky2019invariant} implemented on the critic loss. Intriguingly, we find that even without noise on the ground truth states, with only two environments, baseline SAC fails (Figure~\ref{fig:cartpole_swingup_rl}). Implementation details and more information about SAC can be found in Appendix~\ref{app:rl_implementation}.
\begin{figure}\label{fig:cartpole_swingup_rl}
\end{figure}
\section{Conclusions} The findings presented in this chapter illustrate the importance of leveraging explicit assumptions on the causal structure of the environment for sample-efficient multi-environment generalization. We have shown that the application of invariant prediction methods in RL allows agents to learn policies that robustly generalize across environments with a shared causal structure after experiencing only a handful of training environments. We have further provided methods for both the low dimensional linear value function approximation setting and the deep RL setting which leverage invariant prediction to extract a causal representation of the state.
The block MDP family we defined in this chapter provides an explicit characterization of a rich class of MDPs in which generalization from a training task to a novel environment is tractable; however, promising future directions include extending these results to apply to an even broader class of problems. The proposed method in this chapter has not solved the generic problem of generalization to new environments in RL, but it has clearly demonstrated the benefits of applying the principle of invariant prediction to identify features that are useful for generalization in this expressive family of environments. Future work on this problem setting may find variations on invariant prediction which are more robust and which can handle the non-stationary rich-observation input distributions found in many deep RL problems. It may also extend these ideas to related problems including third-person imitation learning and sim-to-real transfer.
If the reader is to come away with one insight, it is this: just as representations which enable generalization between training inputs enable better generalization in the single task setting (as we saw in Chapters~\ref{chp:supervised}, \ref{chp:invariance}, and \ref{chp:gen-rl}), a representation which enables generalization between different training \textit{environments} is likely to do the same in the multi-environment setting. Translating invariant prediction methods to RL is non-trivial; in large part this is due to the non-stationarities described in Chapters~\ref{chp:rl-dynamics} and \ref{chp:rep-learning}. However, our findings here present a striking proof-of-concept of the utility of these ideas in reinforcement learning.
\chapter{Conclusion}
It is easy to ensure that a deep neural network learns what we tell it to learn: doing so simply requires ensuring sufficient expressivity for the size of the training set at hand. It is an unsolved problem to ensure that it learns what we \textit{want} it to learn, particularly in the context of deep reinforcement learning, where what we want a network to learn may evolve over the course of training. This thesis has argued that understanding how a network learns, by studying the properties of the learning trajectory taken over the course of training, is crucial to ensure that the learned function captures the underlying structure of the data it was trained on. We have applied this principle to a vast array of application areas, from Bayesian model selection to neural architecture search, from reinforcement learning to computer vision problems. Throughout these domains, a consistent trend has emerged: networks which are better able to use an update to their prediction on one data point to improve their accuracy on other data points train faster, generalize better, and have higher marginal likelihoods.
\section*{Discussion} Our primary quantity of interest has been interference, a notion of intra-dataset generalization. The intuition motivating this study is simple: a network which can leverage information from a gradient update on one data point to improve its performance on other data points should generalize better than one which treats the data as being independent or, worse, faces competing interests wherein improving a prediction on one data point reduces performance on others. A model which experiences positive interference will converge more quickly and in principle generalize better than one which exhibits weak or conflicting interference. This intuition, inspired by a connection to the Bayesian marginal likelihood, motivated our study of training speed and invariance in Chapters~\ref{chp:invariance} and \ref{chp:supervised}. Many implications of this relationship have been leveraged in follow-up work to develop efficient performance estimators for neural architecture search and hyperparameter selection.
Whereas supervised learning involves fitting a single target function that remains fixed over the course of training, the targets that value-based RL objectives seek to fit are more mercurial, evolving alongside the network's predictions. This complicates the story relating training speed and generalization, and requires additional analytical and empirical tools. We developed a notion of subspace convergence to study the representations learned by an idealized model of deep RL agents in the theoretical results of Chapter~\ref{chp:rl-dynamics}. We went on to present a characterization of an agent's representation in terms of the types of functions it is biased towards fitting quickly in Chapter~\ref{chp:rep-learning}. These tools provided us with a number of fruitful insights into the learning dynamics of reinforcement learning agents, setting the stage for our study of generalization in Chapters~\ref{chp:gen-rl} and \ref{chp:icp}.
Our study of generalization in deep RL leveraged these tools to understand the nuanced trade-offs between generalization and stability in value-based RL algorithms. This analysis hinged on a key insight outlined in Chapter~\ref{chp:gen-rl}: that the targets which deep RL agents seek to fit initially contain relatively little information about environment structure, and only accumulate this structure later in training. By asking networks to fit unstructured, discontinuous targets in the critical early phases of training, we bias them towards regions of the parameter space where interference between observations is weaker, reducing their potential for generalization. This analysis presented some obvious work-arounds in the form of post-training distillation which we showed improve generalization and extrapolation, but the benefits of this approach were largely limited to generalization within a single environment. Chapter~\ref{chp:icp} concluded with discussion of generalization via invariant prediction, establishing connections to causality to address these limitations. We showed that by assuming a large degree of shared structure between environments, we can significantly improve upon the sample-efficiency of standard RL methods via an application of invariant prediction algorithms from causal inference.
The initial motivation of this thesis was to understand why neural networks generalize, and how this picture can become complicated in the setting of reinforcement learning. Looking back, it is clear that this was the wrong question: firstly, because as we have seen in this thesis neural networks often \textit{fail} to generalize, particularly in the case of out-of-distribution data and in RL problems; secondly, because there is not in general a single straightforward explanation as to why a given neural network generalizes well or poorly. Instead, a variety of properties of a network architecture and training procedure come into play, and to understand the relationship between these properties and generalization it is crucial to understand the learning dynamics of the training procedure. This thesis has demonstrated the utility of this perspective in both supervised and reinforcement learning settings, and provided a number of theoretical and empirical tools for further analysis.
\section*{Further work}
While there are many straightforward extensions of the results presented in the preceding chapters, a number of deeper questions also emerge concerning how to apply these ideas to modern training regimes. Chief among these is the recent success of pre-training gargantuan models via self-supervised learning on vast swathes of data. How should we evaluate interference in such cases? As opposed to the supervised learning regime of Chapters 3 and 4, state of the art training procedures involve showing the network a vast and diverse array of data and then fine-tuning its predictions on some downstream tasks. In these cases the line between a model's inductive bias and its learned outputs becomes blurred, making the application of the tools connecting training speed and model selection presented here nontrivial. Some interesting connections might arise between the analysis of Chapter~\ref{chp:rl-dynamics} and the self-supervised learning objectives used in pre-training large models: temporal difference learning can be viewed as implicitly training the network to be self-predictive before any 'fine-tuning' reward signal is given. It also raises an interesting question of whether the ideas behind invariance and interference might be used to shape pre-training objectives which build even better inductive biases than what is used by autoregressive models.
A second cluster of intriguing ideas concern trade-offs between plasticity and generalization, and the precise relationship between loss of plasticity and declining generalization performance. Chapter~\ref{chp:rep-learning} suggests that in many cases the correlation between the rank of a feature embedding and the plasticity of the network which parameterizes it is weak. The degree to which the network disentangles inputs can measure not just its ability to linearly distinguish observations, but also the degree to which its predictions have needed to change in order to fit its previous targets. This presents a conflict between the intuitions of prior literature in reinforcement learning, that higher-dimensional embeddings should correspond to networks with greater flexibility, and that from deep learning, where significant changes to a network's outputs tend to reduce its plasticity. This tension raises an interesting question into the existence of trade-offs between plasticity and the network's ability to represent particular function classes. It may be the case that in order to solve complex prediction tasks, a network must necessarily reduce its ability to quickly fit unrelated targets. Analogously, it may be the case that excessive generalization too early in training could destabilize learning, making memorization a feature rather than a bug of the learning process. If such trade-offs exist, they will carry significant implications for the application of RL to increasingly complex tasks, where training trajectories are long and require a persistent ability to adapt to new information and generalize from old experiences. It may be the case that new architectures and training procedures are needed in order to avoid the pitfalls identified in this thesis, which take into account the continual nature of reinforcement learning problems.
Finally, the role of generalization between data points can likely be leveraged more explicitly in the training process to encourage the network to develop the correct inductive biases for the data set it is trained on. To some degree, the implicit bias of gradient descent appears to already push large models towards solutions which generalize well after the training accuracy has saturated in many natural language tasks, a phenomenon commonly referred to as `grokking'. By more directly encouraging networks to pick up on features which generalize between many training inputs, it may be possible to accelerate this phenomenon to obtain solutions which generalize well earlier in training.
Beyond these concrete research directions, the findings presented in this thesis raise a deeper philosophical question. We discussed in Section~\ref{sec:background-science} the distinction between mathematical truths and scientific theories, and have seen examples of each throughout the thesis. Our formal guarantees have consistently been forced to trade off between generality of the problem setting (e.g. the exact gradient updates of Theorem~\ref{thm:infinite-heads}, the linear group action in Theorem~\ref{theorem:symmgd}, and the Lipschitz continuity of the transition operator in Theorem~\ref{thm:model_error}), and precision of the resulting guarantee. In contrast, many chapters have gone on to distill the intuition behind each theoretical result into a principle which yields empirically testable predictions, such as the training speed estimator of Chapter~\ref{chp:supervised} and the nonlinear invariant prediction methods of Chapter~\ref{chp:icp}. Arguably, it is the empirical test of the intuition which has provided the greatest practical insight into the phenomena of interest to this thesis. But is this empirical approach capable of truly giving an explanation of generalization?
On its face, the answer would appear to be negative. Our empirical approach to generalization is almost tautological. A network will generalize well to test data, we have shown, if it generalizes well between data points it encounters during learning. However, the ultimate reason why a network generalizes on a given dataset is because its inductive bias is a good fit for a data. Developing a detailed understanding of the training trajectory is crucial to be able to obtain a measure of the fit between a network’s inductive bias and the data generating distribution. This detailed understanding of a network’s training trajectory has also shown to bring useful insights into more nuanced notions of generalization and capacity in reinforcement learning, where a network must not only generalize between observations drawn from some distribution, but also to new observations and new prediction targets. The learning dynamics framework has thus already born fruit in both providing insight into generalization under the classical i.i.d. assumption on the training and evaluation data, and broadening this notion of generalization to apply to a richer class of prediction problems arising in reinforcement learning. Whether or not the precise empirical methods and theoretical tools presented in this thesis are leveraged directly in future work, it is clear that the study of learning dynamics promises to provide deep insights into some of the most fundamental questions about generalization in the years to come.
\startappendices \let\svaddcontentsline\addcontentsline \renewcommand\addcontentsline[3]{
\ifthenelse{\equal{#1}{lof}}{}
{\ifthenelse{\equal{#1}{lot}}{}{\svaddcontentsline{#1}{#2}{#3}}}} \chapter{The role of invariance in generalization}
\section{Proofs} \label{apx:invar-proofs} \propEmpRisk*
\begin{proof}[Proof of Proposition~\ref{prop:empirical:risk:order}]
Let $\mathcal{G}$ be a group with some probability measure $\lambda$, and $F$ a class of functions $f : \mathcal{X} \to \mathbb{R}$. Let $\ell : \mathbb{R} \times \mathbb{R} \to \mathbb{R}_+$ be a loss function such that $\ell(f({\,\vcenter{\hbox{\tiny$\bullet$}}\,}),{\,\vcenter{\hbox{\tiny$\bullet$}}\,})\in L_2(P_{\calD})$ for every $f \in F$. Then the augmented risk of any function $f \in F$ is
\begin{align*}
\invf{\widehat{R}}_{\loss}(f,\mathcal{D}^n) = \frac{1}{n}\sum_{i=1}^n \mathbb{E}_{G\sim\lambda}[\ell(f(G X_i),Y_i)] \;.
\end{align*}
If $\ell$ is convex in the first argument, then by Jensen's inequality,
\begin{align} \label{eq:jensens}
\mathbb{E}_{G\sim\lambda}[\ell(f(G X_i),Y_i)] \geq \ell(\mathbb{E}_{G\sim\lambda}[f(G X_i)],Y_i) \;, \quad i = 1,2,\dotsc,n \;.
\end{align}
On the other hand, the $\mathcal{G}$-symmetrization of $f(X)$ is $\invf{f}(X) = \mathbb{E}_{G\sim\lambda}[f(G X)]$, with augmented risk
\begin{align*}
\invf{\widehat{R}}_{\loss}(\invf{f},\mathcal{D}^n) & =
\frac{1}{n}\sum_{i=1}^n \mathbb{E}_{G\sim\lambda}[\ell(\mathbb{E}_{G\sim\lambda}[f(G X_i)],Y_i)] \\
& =\frac{1}{n}\sum_{i=1}^n \ell(\mathbb{E}_{G\sim\lambda}[f(G X_i)],Y_i) = \widehat{R}_{\loss}(\invf{f},\mathcal{D}^n) \;.
\end{align*}
Combined with \eqref{eq:jensens}, the reduction in empirical augmented risk
follows. The reduction in $\invf{\widehat{R}}_{\loss}(Q,\mathcal{D}^n)$ follows trivially.
The variance-reduction
is established by extending the argument in the proof of \citet{chen2019invariance}. Specifically, by the conditional Jensen's inequality,
\begin{align*}
\variance_{\mathcal{D}^n\simP_{\calD}^n}\big[ \invf{\widehat{R}}_{\loss}(f,\mathcal{D}^n) \big]& = \variance[\mathbb{E}[\invf{\widehat{R}}_{\loss}(f,\mathcal{D}^n) \mid \Phi^n]] \geq \variance[ \mathbb{E}[ \widehat{R}_{\loss}(\invf{f},\mathcal{D}^n) \mid \Phi^n] ] \\
&= \variance_{\mathcal{D}^n\simP_{\calD}^n}\big[ \widehat{R}_{\loss}(\invf{f},\mathcal{D}^n) \big] \;.
\end{align*} \end{proof}
\propSymmGD* \begin{proof} Let $w \in V^*$, and suppose that $w$ is not invariant under the action of $\mathcal{G}$. Let $\invf{w} = \mathbb{E}_{G\sim\lambda}[\rho^*_G w]$, which is $\mathcal{G}$-invariant by construction. Because $\mathcal{X}$ spans $V$ and , $w - \invf{w} \neq 0$ implies that $w^\top(\mathcal{X}) \neq {\invf{w}}^\top(\mathcal{X})$.
Consider the minimizer \begin{align*}
\hat{\mathbf{w}} = \mathrm{arg min}_{\mathbf{w} \in V^*} \invf{\widehat{R}}_{\loss}(f_\mathbf{w},\mathcal{D}^n) = \mathrm{arg min}_{\mathbf{w} \in V^*} \frac{1}{n}\sum_{i=1}^n \mathbb{E}_{G\sim\lambda}[\ell(\mathbf{w}^{\top}\rho_G X_i,Y_i)] \;, \end{align*} which is unique because $\ell$ is strictly convex by assumption. Assume that $\hat{\mathbf{w}}$ is not $\mathcal{G}$-invariant. Applying Jensen's inequality, we have \begin{align*}
\invf{\widehat{R}}_{\loss}(f_{\hat{\mathbf{w}}},\mathcal{D}^n) & = \frac{1}{n}\sum_{i=1}^n \mathbb{E}_{G\sim\lambda}[\ell(\hat{\mathbf{w}}^{\top} \rho_G X_i,Y_i)] \\
& > \frac{1}{n}\sum_{i=1}^n \ell(\mathbb{E}_{G\sim\lambda}[\hat{\mathbf{w}}^{\top}\rho_G X_i,Y_i)] \\
& = \frac{1}{n}\sum_{i=1}^n \ell(\mathbb{E}_{G\sim\lambda}[(\rho_{G^{-1}}^* \hat{\mathbf{w}})]^{\top} X_i,Y_i)] \\
& = \frac{1}{n}\sum_{i=1}^n \ell(\invf{\hat{\mathbf{w}}} X_i, Y_i) = \invf{\widehat{R}}_{\loss}(f_{\invf{\hat{\mathbf{w}}}},\mathcal{D}^n) \;, \end{align*} which cannot be the case because $\hat{\mathbf{w}}$ minimizes $\invf{\widehat{R}}_{\loss}$. Therefore, $\hat{\mathbf{w}}$ must be $\mathcal{G}$-invariant.
\end{proof}
\lempushforwardKL* The proof of Lemma~\ref{lem:pushforward:KL} relies on the chain rule of relative entropy.
Let two probability measures, $\tilde{\mu} \ll \tilde{\nu}$ defined on the product space $(E_1 \times E_2, \mathcal{E}_1 \otimes \mathcal{E}_2)$, have marginal measures $\tilde{\mu}_1\ll \tilde{\nu}_1$ on $(E_1,\mathcal{E}_1)$ (respectively, $\tilde{\mu}_2 \ll \tilde{\nu}_2$ on $(E_2,\mathcal{E}_2)$) and regular conditional probability measures $\tilde{\mu}_{2|1}\ll\tilde{\nu}_{2|1}$ (resp.\ $\tilde{\mu}_{1|2}\ll\tilde{\nu}_{1|2}$). Recall the chain rule of relative entropy is \begin{align} \label{eq:chain:rule}
\KL{\tilde{\mu}}{\tilde{\nu}} = \KL{\tilde{\mu}_1}{\tilde{\nu}_1} + \mathbb{E}_{\tilde{\mu}}\bigg[ \log \frac{d\tilde{\mu}_{2|1}}{d\tilde{\nu}_{2|1}} \bigg] = \KL{\tilde{\mu}_2}{\tilde{\nu}_2} + \mathbb{E}_{\tilde{\mu}}\bigg[ \log \frac{d\tilde{\mu}_{1|2}}{d\tilde{\nu}_{1|2}} \bigg] \;. \end{align} Observe that each of the terms in the equalities is non-negative In particular, when $\psi$ is non-injective, points of $(E_1,\mathcal{E}_1)$ become equivalent; $(E_2,\mathcal{E}_2)$ is a compressed version, and the probability measures $\mu$ and $\nu$ are similarly compressed.
\begin{proof}[Proof of Lemma~\ref{lem:pushforward:KL}]
Given probability measures on $(E_1,\mathcal{E}_1)$ $\mu \ll \nu$ (with density $m$ such that $\mu = m\cdot \nu$) and a measurable map $\psi : (E_1,\mathcal{E}_1) \to (E_2,\mathcal{E}_2)$, construct the probability measure $\tilde{\mu}$ on $(E_1 \times E_2, \mathcal{E}_1 \otimes \mathcal{E}_2)$ as
\begin{align*}
\tilde{\mu}(A \times B) = \mu(A \cap \psi^{-1}B) = \int_A \mu(dx_1) \int_{B} \delta_{\psi(x_1)}(dx_2) \;, \quad A \in \mathcal{E}_1,\ B \in \mathcal{E}_2 \;,
\end{align*}
and likewise for $\tilde{\nu}$. Then in the notation of \eqref{eq:chain:rule}, $\tilde{\mu}_1 = \mu\ll \nu = \tilde{\nu}_1$, and $\tilde{\mu}_{2|1} = \delta_{\psi(x_1)} = \tilde{\nu}_{2|1}$. Therefore,
\begin{align}
\KL{\tilde{\mu}}{\tilde{\nu}} = \KL{\tilde{\mu}_1}{\tilde{\nu}_1} = \KL{\mu}{\nu} \;.
\end{align}
Alternatively, $\tilde{\mu}_2 = \mu\circ\psi^{-1}$, $\tilde{\nu}_2 = \nu\circ\psi^{-1}$, and it is straightforward to show that
\begin{align}
\mathbb{E}_{\tilde{\mu}}\bigg[ \log \frac{d\tilde{\mu}_{1|2}}{d\tilde{\nu}_{1|2}} \bigg] = \mathbb{E}_{\tilde{\mu}}\bigg[ \log \frac{d\tilde{\mu}_{1}}{d\tilde{\nu}_{1}} \bigg] - \mathbb{E}_{\tilde{\mu}}\bigg[ \log \frac{d\tilde{\mu}_{2}}{d\tilde{\nu}_{2}} \bigg] = \mathbb{E}_{\mu}\bigg[ \log \frac{m}{m\circ\psi} \bigg] = \Delta_{\psi}(\mu\ ||\ \nu) \geq 0. \;.
\end{align}
Therefore,
\begin{align}
\KL{\tilde{\mu}}{\tilde{\nu}} = \KL{\mu}{\nu} = \KL{\mu\circ\psi^{-1}}{\nu\circ\psi^{-1}} + \Delta_{\psi}(\mu\ ||\ \nu) \;.
\end{align} \end{proof} \subsection{Proof of \texorpdfstring{Theorem~\ref{thm:pac:bayes:da}}{Theorem 4}}
\thmpacbayesda* The proof of our PAC-Bayes bound for data augmentation makes use of the following result due to \citet{leveretal2013tighterPACbayes}.
\begin{theorem}[\citet{leveretal2013tighterPACbayes}, Theorem 1] \label{lem:lever:bound}
For any functions $A(f, \mathcal{D}^n)$, $B(f)$ over $F$, either of which may be a statistic of the training data $\mathcal{D}^n$, any distribution $P$ over $F$, any $\delta \in (0,1]$, any $t > 0$, and a convex function $\mathscr{D} : \mathbb{R} \times \mathbb{R} \to \mathbb{R}$, with probability $P_{\calD}^n$ at least $1 - \delta$, for all distributions $Q$ on $F$,
\begin{align} \label{eq:lever:bound}
\mathscr{D}\big( \mathbb{E}_{f\sim Q}[A(f, \mathcal{D}^n)],\mathbb{E}_{f\sim Q}[B(f)] \big) \leq \frac{1}{t} \bigg( \KL{Q}{P} + \log \frac{\mathcal{L}_P}{\delta} \bigg) \;,
\end{align}
where $\mathcal{L}_P: = \mathbb{E}_{\mathcal{D}^n\sim P_{\calD}, f\sim P}[e^{t\mathscr{D}(A(f, \mathcal{D}^n,B(f))}]$ is the Laplace transform of $\mathscr{D}(A(f),B(f))$. \end{theorem}
As \citet{leveretal2013tighterPACbayes} discuss, many PAC-Bayes bounds in the literature can be obtained as special cases of Lemma~\ref{lem:lever:bound}, including Catoni's bound in Theorem~\ref{thm:catoni:bound}. In that case, which applies to 0-1 loss, $t=n$, $A(f) = \widehat{R}_{\loss}(f,\mathcal{D}^n)$, $B(f) = R_{\loss}(f)$, $C=n^{-1}$ and \begin{align}
\mathscr{D}_C(q,p) & := -\log (1-p(1-e^{-C})) - C q \;, \quad q,p \in (0,1), \ C > 0 \\
& = -\log \mathbb{E}_{Z \sim \text{Bern}(p)}[e^{-CZ}] - Cq \;. \end{align} Basic calculations show that with these quantities, $\mathcal{L}_P=1$. \begin{align}
\mathcal{L}_P = \mathbb{E}_{\mathcal{D}^n, f}[e^{t \mathscr{D}(A(f; \mathcal{D}^n), B(f))}] &= \mathbb{E}_{\mathcal{D}^n, f}[e^{t\mathscr{D}(\widehat{R}_{\loss}(f; \mathcal{D}^n), R_{\loss}(f))}] \\
&= \mathbb{E}_{f, \mathcal{D}^n}[e^{t(-\log \mathbb{E}[e^{CZ}] - C \widehat{R}_{\loss}(f; \mathcal{D}^n))} ]\\
&= \mathbb{E}_{f}[e^{-t\log \mathbb{E}[e^{-CZ}]} \mathbb{E}_{\mathcal{D}^n}[e^{-tC \widehat{R}_{\loss}(f; \mathcal{D}^n)}]] \\
&= \mathbb{E}_{f}[e^{-\log \mathbb{E}[e^{-CZ}]} \mathbb{E}_{Z_{1:n} \sim \text{Bern}(R_{\loss}(f))}[e^{-tC\frac{1}{n} \sum_{i=1}^n Z_i}]] \\
&= \mathbb{E}_{f}[e^{-\log \mathbb{E}[e^{-CZ}]} \Pi_{i=1}^n \mathbb{E}_{Z_i \sim \text{Bern}(R_{\loss}(f))}[e^{-\frac{1}{n}Z_i}]] \\
&= \mathbb{E}_{f}[e^{-t\log \mathbb{E}[e^{-CZ}]} (\mathbb{E}_{Z \sim \text{Bern}(R_{\loss}(f))}[e^{-CZ}])^n] \\
&= \mathbb{E}_{f}[e^{-t\log \mathbb{E}_{Z \sim \text{Bern}(R_{\loss}(f))}[e^{-CZ}]} e^{t \log \mathbb{E}_{Z \sim \text{Bern}(R_{\loss}(f))}[e^{-CZ}]}] \\
&= \mathbb{E}_{f} [1] = 1 \end{align} Recall that \begin{align}
\widehat{R}_{\loss}(f,\mathcal{D}^n) &:= \frac{1}{n} \sum_{i=1}^n \ell(f(X_i),Y_i) \label{eq:risk:1} \\
\invf{\widehat{R}}_{\loss}(f,\mathcal{D}^n) &:= \frac{1}{n} \sum_{i=1}^n \mathbb{E}_{G\sim\lambda}[\ell(f(G X_i),Y_i)] \label{eq:risk:2} \\
\widehat{R}_{\loss}^{\widehat{\circ}}(f,\mathcal{D}^n) &:= \frac{1}{nm} \sum_{i=1}^n \sum_{j=1}^m \ell(f(G_{ij} X_i),Y_i) \label{eq:risk:3} \;. \end{align}
Let $(G_{ij})$ denote the collection of $m\cdot n$ random augmentation transformations sampled i.i.d.\ from $\lambda$. \begin{lemma} \label{lem:div:bounds}
Let $\ell$ be the binary loss, $P$ any distribution on $F$, and assume that $P_{\calD}$ is $\mathcal{G}$-invariant. Then
\begin{align} \label{eq:div:bound:aug}
\mathbb{E}_{f\sim P}\big[\mathbb{E}_{\mathcal{D}^n\simP_{\calD}}\big[ e^{n\mathscr{D}_C(\invf{\widehat{R}}_{\loss}(f,\mathcal{D}^n),R_{\loss}(f))} \big] \big] & \leq \mathbb{E}_{f\sim P}\big[\mathbb{E}_{\mathcal{D}^n\simP_{\calD}}[e^{n\mathscr{D}_C(\widehat{R}_{\loss}(f,\mathcal{D}^n),R_{\loss}(f))}] \big] = 1
\end{align}
and
\begin{align} \label{eq:div:bound:aug:mc}
\mathbb{E}_{f\sim P}\big[\mathbb{E}_{\mathcal{D}^n\simP_{\calD}}\big[ e^{n\mathscr{D}_C(\widehat{R}_{\loss}^{\widehat{\circ}}(f,\mathcal{D}^n),R_{\loss}(f))} \big] \big] & \leq \mathbb{E}_{f\sim P}\big[\mathbb{E}_{\mathcal{D}^n\simP_{\calD}}[e^{n\mathscr{D}_C(\widehat{R}_{\loss}(f,\mathcal{D}^n),R_{\loss}(f))}] \big] = 1 \;.
\end{align} \end{lemma} \begin{proof}
Since the observations $(X_i,Y_i)$ are i.i.d., the expectation over $\mathcal{D}^n$ on the left-hand side of \eqref{eq:div:bound:aug} requires evaluating $\mathbb{E}_{\mathcal{D}^n\simP_{\calD}}\big[ e^{-C\mathbb{E}_{G\sim\lambda}[ \ell(f(X_i),Y_i))]} \big]$. Using the convexity of $e^{-x}$, Jensen's inequality and Fubini's theorem yield
\begin{align} \label{eq:augrisk:laplace}
\mathbb{E}_{(X_i,Y_i)\simP_{\calD}}\big[ e^{-C\mathbb{E}_{G\sim\lambda}[ \ell(f(G X_i),Y_i))]} \big]
& \leq \mathbb{E}_{(X_i,Y_i)\simP_{\calD}}\big[ \mathbb{E}_{G\sim\lambda} \big[ e^{-C \ell(f(G X_i),Y_i))} \big] \big] \\
& = \mathbb{E}_{G\sim\lambda} \big[ \mathbb{E}_{(X_i,Y_i)\simP_{\calD}}\big[ e^{-C \ell(f(G X_i),Y_i))} \big] \big] \;. \nonumber
\end{align}
Now, $\mathcal{G}$-invariance of $P_{\calD}$ implies that $\mathbb{E}_{(X_i,Y_i)\simP_{\calD}}[h(gX_i,Y_i)] = \mathbb{E}_{(X_i,Y_i)\simP_{\calD}}[h(X_i,Y_i)]$ for all measurable functions $h : \mathcal{X} \times \mathcal{Y} \to \mathbb{R}_+$ and all $g\in\mathcal{G}$, which extends to {independent} random $G$ by Fubini's theorem. Therefore,
\begin{align*}
\mathbb{E}_{G\sim\lambda} \big[ \mathbb{E}_{(X_i,Y_i)\simP_{\calD}}\big[ e^{-C \ell(f(G X_i),Y_i))} \big] \big]
= \mathbb{E}_{(X_i,Y_i)\simP_{\calD}}\big[ e^{-C \ell(f(X_i),Y_i))} \big] = \mathbb{E}_{Z\sim\text{Bern}(R_{\loss}(f))}[e^{-CZ}] \;,
\end{align*}
which implies \eqref{eq:div:bound:aug}.
For the second inequality \eqref{eq:div:bound:aug:mc}, observe that by Jensen's inequality,
\begin{align*}
\mathbb{E}_{\mathcal{D}^n\simP_{\calD}}\big[ e^{-nC\widehat{R}_{\loss}^{\widehat{\circ}}(f,\mathcal{D}^n)} \big]
& = \prod_{i=1}^n \mathbb{E}_{(X_i,Y_i)\simP_{\calD}}\bigg[ \mathbb{E}_{(G_{ij})_{j=1}^m\sim\lambda}\bigg[ \exp\bigg(-\frac{C}{m}\sum_{j=1}^m \ell(f(G_{ij}X_i),Y_i) \bigg) \bigg] \bigg] \\
& \leq \prod_{i=1}^n \mathbb{E}_{(X_i,Y_i)\simP_{\calD}}\bigg[ \mathbb{E}_{(G_{ij})_{j=1}^m\sim\lambda}\bigg[ \frac{1}{m} \sum_{j=1}^m e^{-C \ell(f(G_{ij}X_i),Y_i) } \bigg] \bigg] \\
& = \prod_{i=1}^n \mathbb{E}_{(X_i,Y_i)\simP_{\calD}}\big[ \mathbb{E}_{G\sim\lambda} \big[ e^{-C \ell(f(G X_i),Y_i) } \big] \big]
\end{align*}
Using the $\mathcal{G}$-invariance of $P_{\calD}$ once again, we have
\begin{align*}
\mathbb{E}_{\mathcal{D}^n\simP_{\calD}}\big[ e^{-nC\widehat{R}_{\loss}^{\widehat{\circ}}(f,\mathcal{D}^n)} \big] \leq \mathbb{E}_{\mathcal{D}^n\simP_{\calD}}\big[ e^{-nC\widehat{R}_{\loss}(f,\mathcal{D}^n)} \big] = \big(\mathbb{E}_{Z\sim\text{Bern}(R_{\loss}(f))}[e^{-CZ}] \big)^n \;,
\end{align*}
which implies \eqref{eq:div:bound:aug:mc}. \end{proof}
Note that Lemma~\ref{lem:div:bounds} depends on the observation that for any distribution over functions, the symmetrized risk will be a lower-variance estimator than the empirical risk but will remain unbiased. This is only the case for \textit{invariant} data-generating distributions -- symmetrizing the risk of a non-symmetric $P_{\calD}$ will not necessarily result in a valid PAC-Bayes bound.
\section{Examples, counterexamples, tighter bounds}
\subsection{Counterexamples} \label{appx:counterexamples}
\textbf{Feature averaging and non-convex losses.} We consider the binary classification setting with the zero-one loss and some function class $f$ bounded in $[0,1]$ -- that is $\ell(x, y) = \mathbbm{1}[|f(x) - y| > 1]$. Suppose that there exists some invariance $\mathcal{G}$ in the data such that $y(x) = y(gx)$ for all $x, g$. Then consider a function which, for some small $\epsilon$, outputs $f(x) = \frac{1}{2} + y\epsilon$ on a $1 - 2\epsilon$ fraction of each equivalence class of the inputs, and $1 - y$ on $2\epsilon$ of the inputs in each equivalence class. Then $\mathbb{E}[f(gx)] = (1 - 2\epsilon) (\frac{1}{2} + y\epsilon) + 2\epsilon (1-y)$. When $y=0$, this expectation is $\frac{1}{2} + \epsilon$, and when $y=1$ it is $\frac{1}{2} [1 - \epsilon - 2\epsilon^2] < \frac{1}{2}$, so the feature-averaged model would have risk 1 whereas the original model had risk 0.
\textbf{Non-uniform data-generating distributions.} When the data-generating distribution is not uniform over the set $\mathcal{T}$, then performing data augmentation with $\mathcal{T}$ will not necessarily lead to a more accurate estimate of the model's empirical risk. For example, consider the task of learning a function $g$ satisfying $g(x) = g(-x)$, bounded in magnitude by some constant $A$. Suppose, however, that positive numbers are much more likely under the data generating distribution, with $p(\mathbb{R}^+) = 1 - \epsilon$ for small $\epsilon$. Then the function $f(x) = \mathbbm{1}[x>0]g(x)$ will satisfy $\mathbb{E}[\|f(X_S) - g(X_s)\|] \neq \mathbb{E}[ \|f(X_{S^\text{aug}}) - g(X_{S^\text{aug}})\|]$. So the augmented risk is no longer an unbiased estimator of the empirical risk. Further, in this particular case its variance is also higher, as it will be equal to $\frac{1}{2} $Var$(g(x))$, in contrast to $\epsilon \text{Var}(g(x))$.
\subsection{Tighter PAC-Bayes bound for data augmentation} \label{appx:tighter:pacbayes:da}
Although Theorem~\ref{thm:pac:bayes:da} establishes that the i.i.d.\ PAC-Bayes bound \eqref{eq:catoni:bound} is valid for exact DA, the proof of Theorem~\ref{thm:pac:bayes:da} indicates that a tighter bound is possible. In particular, recall that when $P_{\calD}$ is $\mathcal{G}$-invariant \citep{invariantdistributions,chen2019invariance}, \begin{align*}
\mathbb{E}_{G\sim\lambda}[\ell(f(GX),Y)] = \mathbb{E}_{(X,Y)\simP_{\calD}}[\ell(f(X),Y) \mid \Phi] := \invf{\ell}_f(\Phi) \;. \end{align*} $\invf{\ell}_f(\Phi)$ is a random variable, the average loss on the random orbit with representative $\Phi$, whose distribution is induced by $P_{\calD}$. Therefore, we can write $\mathcal{L}_P$ in \eqref{eq:lever:bound} as \begin{align*}
\mathcal{L}_P = \mathbb{E}_{f\sim P}\bigg[ \bigg(\frac{ \mathbb{E}_{\Phi\simP_{\calD}} \big[ e^{-C\invf{\ell}_f(\Phi)} \big]}{\mathbb{E}_{Z\sim\text{Bern}(R_{\loss}(f))}[e^{-CZ}]} \bigg)^n \bigg] \leq 1 \;. \end{align*} In general, this cannot be computed in closed form. However, it might be possible to estimate using the data (with appropriate modifications to the resulting bound) and samples $f \sim P$.
\section{Computation details for PAC-Bayes bounds} \label{appx:invar-pb} PAC-Bayes bounds for neural networks are computed via the following procedure: a deterministic neural network is trained to minimize the cross-entropy loss on the dataset. After it has reached a suitable training accuracy, we use these parameters as the initialization for the means and variances of the stochastic neural network weights used for the PAC-Bayes bounds. We directly optimize a surrogate of the PAC-Bayes bound (using the cross-entropy loss instead of the zero-one accuracy and using the reparameterization trick to get the derivatives of the variance parameters). The exact computation of the PAC-Bayes bound uses the union bound and discretization of the PAC-Bayes prior as described in \citep{dziugaite2017nonvacuous}. Reported values are at optimization convergence.
The experiment code is provided with the paper submission, but we describe here at a high level the different models used in our empirical evaluations.
\textbf{FashionMNIST CNN:} the convolutional network used for FashionMNIST consists of two convolutional layers (with batch norm and max pooling) followed by a single fully connected layer.
\textbf{LiDAR Permutation-Invariant Network:} we use a scaled-down version of the PointNet architecture \citep{qi2017pointnet}. We include two layers of 1D convolutions followed by a max-pooling layer that selects the maximum over input points for each channel. This layer is followed by two fully-connected layers leading into the final output.
\textbf{Partially-Invariant Network:} we alter the previous architecture slightly so that it is only invariant to \textit{subgroups} of the permutation group on its inputs. Specifically, we partition the input into 8 disjoint subsets, and apply the previous model's permutation-invariant embedding layers to each partition. The result is a feature representation that is invariant to permutations within each partition of the input, but not between partitions. This representation is then fed through the same architecture. We note that we keep the number of convolutional filters per layer constant, which results in a larger feature embedding by a factor of 8 that is fed into the first fully connected layer. As a result, this model has significantly more parameters than the fully permutation-invariant model.
\textbf{Fully Connected Network:} the max-pooling operator of the previous two architectures is omitted. This network has many more parameters than either of the first two models, and is not invariant to any subgroup of the permutation group.
\chapter{Training speed and model selection} \section{Linear model combination}
\label{sec:optimize-then-prune}
The estimator $\mathcal{L}(\mathcal{D})$ reveals an intriguing connection between pruning in linear model combinations and Bayesian model selection. This connection arises when we consider the weight assigned to a model in a linear model combination when models are fit on data points iteratively, as in the marginal likelihood estimation algorithms discussed previously.
We study a setting that presents a bridge between Bayesian updating and optimization by considering the limiting behaviour of a linear regressor trained on predictions output by Bayesian models fitted iteratively as seen in Algorithm~\ref{alg:estimate}. Concretely, we treat samples from the model posterior $P(Y_i | \mathcal{D}_{<i}, X_i, \mathcal{M}_j)$ as entries $\Phi[i, j]$ in a design matrix $\Phi$, and then consider properties of an optimal linear predictor trained on $\Phi$. While this does not perfectly replicate the setting of deep neural networks trained with gradient descent, it highlights an important property of gradient descent on non-stationary features: the feature which contributes the most to the output of a linear ensemble will often be the one which was most predictive of the target \textit{on average} over the course of training, rather than the one which is most correlated with the target at the end of training.
Our analysis requires a number of assumptions to be formalized. We assume a data set $\mathcal{D} = (X_i, Y_i)_{i=1}^n$ and a collection of $k$ models $\mathcal{M}_1, \dots, \mathcal{M}_k$. We train a linear regressor $w$ to fit the posterior predictive distributions of the models to the target $Y_i$; i.e. to regress on the dataset \begin{equation}
(\Phi, Y) = \bigg (\phi_i=(\hat{Y}^i_1, \dots, \hat{Y}_n^i), Y_i\bigg)_{i=1}^n \text{ with } \hat{Y}_j^i \sim P(\hat{Y}|\mathcal{D}_{<i}, X_i, \mathcal{M}_j). \end{equation} The following result shows that the optimal linear regressor on this data generating distribution assigns the highest weight to the model with the highest $\mathcal{L}(\mathcal{D})$ whenever the model errors are independent. This shows that magnitude pruning in a linear model combination is equivalent to approximate Bayesian model selection, under certain assumptions on the models.
\begin{restatable}{proposition}{PropBMS}\label{prop:modelselect}
Let $\mathcal{M}_1, \dots, \mathcal{M}_k$ be Bayesian linear regression models with fixed noise variance $\sigma_N^2$ and Gaussian likelihoods. Let $\Phi$ be a (random) matrix of posterior prediction samples, of the form $\Phi[i, j] = \hat{y}_i^j \sim P(y_j|\mathcal{D}_{<j}, x_j, \mathcal{M}_i)$. Suppose the following two conditions on the columns of $\Phi$ are satisfied: $\mathbb{E}\langle \Phi[:, i], y \rangle = \mathbb{E}\langle \Phi[:, j], y \rangle$ for all $i, j$, and $\mathbb{E}\langle \Pi_{y^\perp} \phi_i, \Pi_{y^\perp} \phi_j \rangle = 0$. Let $w^*$ denote the least-squares solution to the regression problem $\min_w \mathbb{E}_{\Phi}\|\Phi w - y\|^2$. Then the following holds
\begin{equation} \mathrm{arg max}_i w^*_i = \mathrm{arg max}_i \mathcal{L}(\mathcal{D} | \mathcal{M}_i) \qquad \forall w^* = \mathrm{arg min}_w \mathbb{E} \|\Phi w - y\|^2\;. \end{equation} \end{restatable}
The assumption on the independence of model errors is crucial in the proof of this result: families of models with large and complementary systematic biases may not exhibit this behaviour. We observe in Section \ref{sec:BMS} that the conditions of Proposition 1 are approximately satisfied in a variety of model comparison problems, and running SGD on a linear combination of Bayesian models still leads to solutions that approximate Bayesian model selection. We conjecture that analogous phenomena occur during training within a neural network. The proof of Proposition~\ref{prop:modelselect} depends on the observation that, given a collection of features, the best least-squares predictor will assign the greatest weight to the feature that best predicts the training data. While neural networks are not linear ensembles of fixed models, we conjecture that, especially for later layers of the network, a similar phenomenon will occur wherein weights from nodes that are more predictive of the target values over the course of training will be assigned higher magnitudes. We empirically investigate this hypothesis in Section \ref{sec:DNN_exp}.
We consider three model selection problems in our empirical evaluations. In \textbf{prior variance selection} we evaluate a set of BLR models on a synthetic linear regression data set. Each model $\mathcal{M}_i$ has a prior distribution over the $d$ parameters of the form $w \sim \mathcal{N}(0, \sigma_i^2 I_d)$ for some $\sigma_i^2$, and the goal is to select the optimal prior variance (in other words, the optimal regularization coefficient). We additionally evaluate an analogous initialization variance selection method on an NTK network trained on a toy regression dataset. In \textbf{frequency (lengthscale) selection} we use as input a subset of the handwritten digits dataset MNIST given by all inputs labeled with a 0 or a 1. We compute random Fourier features (RFF) of the input to obtain the features for a Bayesian linear regression model, and perform model selection over the frequency of the features (full details on this in the appendix). This is equivalent to obtaining the lengthscale of an approximate radial basis function kernel. In \textbf{feature dimension selection}, we use a synthetic dataset \citep{wilson2020bayesian} of the form $(\textbf{X}, \textbf{y})$, where $x_i = (y_i + \epsilon_1, y_i + \dots, y_i + \epsilon_{15}, \epsilon_{16}, \dots, \epsilon_{30})$ with $\epsilon_i$ i.i.d. noise variables. We then consider a set of models $\{\mathcal{M}_k\}$ with feature embeddings $\phi_k(x_i) = x_i[1, \dots, k]$. The optimal model in this setting is the one which uses exactly the set of `informative' features $x[1, \dots, 15]$.
\begin{figure}
\caption{Relative rankings given by optimize-then-prune, ML, and estimated $\mathcal{L}(\mathcal{D})$. Left: feature selection. Middle: prior variance selection. Right: RFF frequency selection. Rankings are consistent with what our theoretical results predict. Results are averaged over $5$ runs.
}
\label{fig:app_ml_v_weight}
\end{figure}
We empirically evaluate the claims of Proposition~\ref{prop:modelselect} in settings where the assumptions only approximately hold. Concretely, we consider learning problems where the outputs of the models have roughly equal norm and approximately independent errors. We compare the ranking given by the true log marginal likelihood, the estimated $\mathcal{L}(\mathcal{D})$, and the weight assigned to each model by the trained linear regressor. We consider three variations on how sampled predictions from each model are drawn to generate the features $\phi_i$: sampling the prediction for point $\hat{Y}_i$ from $P(\hat{Y}_i | \mathcal{D}_{<i})$ (`concurrent sampling' -- this is the setting of Proposition \ref{prop:modelselect}), as well as two baselines: the posterior $P(\hat{Y}_i |\mathcal{D})$ (`posterior sampling'), and the prior $P(\hat{Y}_i)$ (`prior sampling'). The baselines illustrate the importance of \textit{concurrently} fitting the models and the linear weights; fitting linear weights over a set of trained models simply identifies the one with the best training set performance, and fitting linear weights to the untrained models tends to favour those with lower-magnitude predictions. Their inclusion further highlights that in the model selection problems on which we evaluate the methods, it is not the case that either prior or posterior predictive likelihood is sufficient to correctly identify the best model.
Concretely, we find that the rankings of the marginal likelihood, its lower bound, and of the ranking given by concurrent optimization agree on the best model in all three of the model selection problems outlined previously. The prior and posterior sampling procedure baselines do not exhibit a consistent ranking with the log ML, and indeed exhibit opposite trends in the prior variance selection task. We visualize these results for the feature dimension selection problem in Figure \ref{fig:app_ml_v_weight}.
\subsubsection{Subnetwork selection in neural networks} \label{sec:sgd_submodel} Finally, we evaluate whether our previous insights apply to submodels within a neural network, suggesting a potential mechanism which may bias SGD towards parameters with better generalization performance. Based on the previous experiments, we expect that nodes that have a lower sum over training errors (if evaluated as a classifier on their own) are favoured by gradient descent and therefore have a larger final weight than those which are less predictive of the data. If so, we can then view SGD followed by pruning (in the final linear layer of the network) as performing an approximation of a Bayesian model selection procedure. We replicate the model selection problem of the previous setting, but replace the individual models with the activations of the penultimate layer of a neural network, and replace the linear ensemble with the final linear layer of the network. Full details on the experimental set-up can be found in Appendix \ref{sec:exp_details_sgd_submodels}. We find that our hypotheses hold here: SGD assigns larger weights to subnetworks that perform well, as can be seen in Figures~ \ref{fig:sgd_submodel_full} and \ref{fig:sgd_submodel_full_cifar}. This suggests that SGD is biased towards functions that generalize well, even within a single neural network. We find the same trend holds for CIFAR-10, which is shown in Appendix \ref{sec:exp_details_sgd_submodels}.
\begin{figure}
\caption{Weight assigned to subnetwork by SGD in a deep neural network (x-axis) versus the subnetwork performance (estimated by the sum of cross-entropy, on the y-axis) for different FashionMNIST classes. The light blue ovals denote depict $95\%$ confidence intervals, estimated over 10 seeds (i.e. 2$\sigma$ for both the weight and SOTL). The orange line depicts the general trend.}
\label{fig:sgd_submodel_full}
\end{figure}
\begin{figure}
\caption{Weight assigned to subnetwork by SGD in a deep neural network (x-axis) versus the subnetwork performance (estimated by the sum of cross-entropy, on the y-axis) for different CIFAR-10 classes. The light blue ovals denote depict $95\%$ confidence intervals, estimated over 10 seeds (i.e. 2$\sigma$ for both the weight and SOTL). The orange line depicts the general trend.}
\label{fig:sgd_submodel_full_cifar}
\end{figure}
\section{Proofs of theoretical results} \label{sec:proofs-supervised}
\PropLk* \begin{proof} The result for $\mathcal{L}$ follows from a straightforward derivation: \begin{align}
\mathcal{L}(\mathcal{D}) &= \sum \int \log P(\mathcal{D}_i|\theta) dP(\theta|\mathcal{D}_{<i}) \\
&= \sum \int \log [\frac{ P(\mathcal{D}_i|\theta) P(\theta | \mathcal{D}_{<i}) P(\mathcal{D}_i|\mathcal{D}_{<i})}{P(\theta | \mathcal{D}_{<i}) P(\mathcal{D}_i|\mathcal{D}_{<i})}] dP(\theta|\mathcal{D}_{<i})\\
&= \sum \int \log\frac{ P(\theta|\mathcal{D}_{\leq i}))}{P(\theta|\mathcal{D}_{<i})} dP(\theta|\mathcal{D}_{<i}) + \sum \log P(\mathcal{D}_i| \mathcal{D}_{<i}) \\
&= \sum \bigg ( \log P(\mathcal{D}_i|\mathcal{D}_{<i})-\KL(P(\theta|\mathcal{D}_{<i})|| P(\theta|\mathcal{D}_{\leq i})) \bigg ) \\
&= \log P(\mathcal{D}) - \sum_{i=1}^n \KL(P(\theta|\mathcal{D}_{<i})||P(\theta|\mathcal{D}_{\leq i})). \end{align} The result for $\hat{\mathcal{L}}_k$ follows immediately from Jensen's inequality, yielding \begin{equation}
\sum \mathbb{E}[\log \sum_{j=1}^k \frac{1}{k} p(\mathcal{D}_i|\theta_j)] \leq \sum \log \mathbb{E}[ \sum_{j=1}^k \frac{1}{k} p(\mathcal{D}_i|\theta_j)] =\sum \log \mathbb{E}[ p(\mathcal{D}_i|\theta_j)] = \log P(\mathcal{D}) \; . \end{equation} Because $\mathcal{L}_k$ applies Jensen's inequality to a random variable with decreasing variance as a function of $k$, we expect the bias of $\mathcal{L}_k$ to decrease as $k$ grows, an observation characterized in Section \ref{sec:BMS}. \end{proof} \PropLS* \begin{proof}
To show that the sum of the estimated log likelihoods is a lower bound on the log marginal likelihood, it suffices to show that each term in the sum of the estimates is a lower bound on the corresponding term in log marginal likelihood expression. Thus, without loss of generality we consider a single data point $\mathcal{D}_i = (x, y)$ and posterior distribution $p(y|x, \mathcal{D}_{<i})=\mathcal{N}(\mu, \sigma^2)$.
Let $y \in \mathbb{R}$, $\hat{\mu}, \hat{\sigma}$ the standard estimators for sample mean and variance given sample $\hat{Y} \in \mathbb{R}^k$ sampled from $\mathcal{N}(\mu, \sigma^2)$. We want to show
\begin{equation}
\mathbb{E}_{\hat{Y} \sim \mathcal{N}(\mu, \sigma^2)}[\ln p(y|\hat{\mu}, \hat{\sigma}^2)] \leq \ln p(y|\mu, \sigma^2). \end{equation} We first note that $\hat{\mu}(\hat{Y}) \perp \hat{\sigma}(\hat{Y})$ for $\hat{Y}$ a collection of i.i.d. Gaussian random variables \citep{basu1955}. We also take advantage of the fact that the log likelihood of a Gaussian is concave with respect to its $\mu$ parameter and its $\sigma^2$ parameter. Notably, the log likelihood is \textit{not} concave w.r.t. the joint pair $(\mu, \sigma^2)$, but because the our estimators are independent, this will not be a problem for us. We proceed as follows by first decomposing the expectation over the samples $\hat{Y}$ into an expectation over $\hat{\mu}$ and $\widehat{\sigma^2}$ \begin{align}
\mathbb{E}_{X \sim \mathcal{N}(\mu, \sigma^2)}[\ln p(y| \hat{\mu}, \hat{\sigma}^2)] &= \mathbb{E}_{\hat{\mu}, Y_2, \dots, Y_N} \ln p(y|\hat{\mu}, \hat{\sigma}^2) \\
&= \mathbb{E}_{\hat{\mu}} \mathbb{E}_{\hat{\sigma}^2} \ln p(y|\hat{\mu}, \hat{\sigma}^2) \intertext{We apply Jensen's inequality first to the inner expectation, then to the outer.}
&\leq \mathbb{E}_{\hat{\mu}} \ln p(y|\hat{\mu}, \mathbb{E}[\hat{\sigma}^2]) = \mathbb{E}_{\hat{\mu} }\ln p(y|\hat{\mu}, \sigma^2) \\
&\leq \ln p(y|\mu, \sigma^2) \end{align} So we obtain our lower bound. \end{proof}
\ThmSTO*
\begin{proof} The heavy lifting for this result has largely been achieved by Propositions \ref{prop:lk} and \ref{prop:ls}, which state that provided the samples $\theta^{i}_j$ are distributed according to the posterior, the inequalities will hold. It therefore remains only to show that the sample-then-optimize procedure yields samples from the posterior. The proof of this result can be found in Lemma 3.8 of \citet{osband2018randomized}, who show that the optimum for the gradient descent procedure described in Algorithm \ref{alg:estimate} does indeed correspond to the posterior distribution for each subset $\mathcal{D}_{<i}$.
Finally, it is straightforward to express the lower bound estimator $\hat{\mathcal{L}}$ as the sum of regression losses. We obtain this result by showing that the inequality holds for each term $\log P(\mathcal{D}_i|\theta_i)$ in the summation.
\begin{align}
\log P(\mathcal{D}_i|\theta) &= \log[ \exp \bigg (-\frac{(\theta^\top x_i - y_i)^2 }{2 \sigma^2} \bigg )\frac{1}{\sqrt{2\pi}\sigma} ] \\
&= -\frac{(\theta^\top x_i - y_i)^2 }{2 \sigma^2} -\frac{1}{2} \log (2 \pi \sigma^2) \\
&= c_1 \ell_2(\mathcal{D}_i, \theta) + c_2 \end{align}
We note that in practice, the solutions found by gradient descent for finite step size and finite number of steps will not necessarily correspond to the exact local optimum. However, it is straightforward to bound the error obtained from this approximate sampling in terms of the distance of $\theta$ from the optimum $\theta^*$. Denoting the difference $|\theta - \theta^*|$ by $\delta$, we get \begin{align}
| \log P(\mathcal{D}_i|\theta^*) - \log P(\mathcal{D}_i|\theta)| &= |
\frac{((\theta^*)^\top x_i - y_i)^2 }{2 \sigma^2} - \frac{((\theta)^\top x_i - y_i)^2 }{2 \sigma^2}| \\
& \leq
\frac{1}{2 \sigma^2} | (\theta^*)^\top x_i - \theta^\top x_i|^2 \\
&\leq |( (\theta^*)^\top x_i)^2 - (\theta^\top x_i)^2| + |2y||\theta^\top x - (\theta^*)^\top x| \\
&\leq |(\theta^* - \theta)^\top x + 2((\theta^*)^\top x)((\theta^*-\theta)^\top x)| + |2y||\theta^\top x - (\theta^*)^\top x| \\
&\leq |\theta^* - \theta||x| + 2|\theta^* x||\theta^* - \theta||x| + |2y||x||\theta - \theta^*| \end{align}
and so the error in the estimate of $\log P(\mathcal{D} | \theta)$ will be proportional to the distance $|\theta - \theta^*|$ induced by the approximate optimization procedure. \end{proof}
\CorNTK* \begin{proof}
Follows immediately from the results of \citet{he2020bayesian} stating that the the limiting distribution of $f^k_\infty$ is precisely $P(f|\mathcal{D}^n_{\le k}, \mathcal{M})$. We therefore obtain the same result as for Theorem \ref{thm:sto}, plugging in the kernel gradient descent procedure on $f$ for the parameter-space gradient descent procedure on $\theta$. \end{proof} The following Lemma will be useful in order to prove Proposition~\ref{prop:modelselect}. Intuitively, this result states that in a linear regression problem in which each feature $\phi_i$ is `normalized' (the dot product $\langle \phi_i, y \rangle = \langle \phi_j, y \rangle = \alpha$ for some $\alpha$ and all $i, j$) and `independent' (i.e. $\langle \Pi_{y^\perp} \phi_i, \Pi_{y^\perp} \phi_j \rangle = 0$), then the optimal linear regression solution assigns highest weight to the feature which obtains the least error in predicting $y$ on its own. \begin{lemma} Let $y \in \mathbb{R}^n$, and $\Phi \in \mathbb{R}^{d \times d}$ be a design matrix such that $\Phi[:, j] = \alpha y + \epsilon_j \forall j$ for some fixed $\alpha \geq 0$, with $\epsilon \in y^\perp$, and $\epsilon_i^\top \epsilon_j = 0$ for all $i \neq j$. Let $w^*$ be the solution to the least squares regression problem on $\Phi$ and $y$. Then \begin{equation}
\mathrm{arg max}_i w_i = \mathrm{arg min}_i \|f_i(x) - y\|^2 = \mathrm{arg max}_i \mathcal{L}(\mathcal{M}_i) \end{equation} \end{lemma} \begin{proof}
We express the minimization problem as follows. We let $\phi(x)$ = $( f_1(x), \dots, f_k(x))$, where $f_i(x) = \alpha y + \epsilon_i$, with $\epsilon_i \perp \epsilon_j $. We denote by $\mathbbm{1}$ the vector containing all ones (of length $k$). We observe that we can decompose the design matrix $\Phi$ into one component whose columns are parallel to $y$, denoted $\Phi_y$, and one component whose columns are orthogonal to $y$, denoted $\Phi_\perp$. Let $\sigma^2_i = \|\epsilon_i\|^2$. By assumption, $\Phi_y = \alpha y \mathbbm{1}^\top$, and $\Phi_\perp^\top \Phi_\perp = \text{diag}(\sigma^2_1, \dots, \sigma^2_n) = \Sigma$. We then observe the following decomposition of the squared error loss of a weight vector $w$, denoted $\ell(w)$. \begin{align*}
\ell(w) &= \| \Phi w - y\|^2 = (\Phi w - y)^\top (\Phi w - y) \\ &= ((\Phi_y + \Phi_\perp) w - y)^\top ((\Phi_y + \Phi_\perp)w - y)\\ &=(\Phi_y w - y)^\top (\Phi_y w - y) + w^\top \Phi_\perp^\top \Phi_\perp w \\
&= \|y\|^2 \|1 - \alpha \mathbbm{1}^\top w \|^2 + \sum \sigma_i^2 w_i^2 \\ \end{align*} In particular, the loss decomposes into a term which depends on the sum of the $w_i$, and another term which will depend on the norm of the component of each model's predictions orthogonal to the targets $y$.
As this is a quadratic optimization problem, it is clear that an optimal $w$ exists, and so $w^\top \mathbbm{1}$ will take some finite value, say $\beta$. It is straightforward to conclude that for any fixed $\beta$, the solution to the minimization problem \begin{equation}
\min_w \sum w_i^2 \sigma_i^2 : w^\top \mathbbm{1} = \beta \end{equation} is such that the argmax over $i$ of $w_i$ is equal to the index of the predictor with the minimal error. \end{proof} \PropBMS* \begin{proof} We begin by emphasizing the necessity of the two conditions: the first condition ensures that all of the predictions are of the same scale. This avoids situations where one feature is equal to a small multiple of the targets $y$ (i.e. $ Phi[:, i] = \beta y$ for small $\beta$) resulting in a disproportionately large weight on that model's predictions in order to normalize their scale. The second condition ensures that the models do not have complementary systematic errors, such that the optimal predictor might assign one model's predictions a higher weight in order to cancel out errors in other models' predictions. This is satisfied when we require that $\epsilon_i \perp \epsilon_j$ be orthogonal, and that $\zeta_i^j$ be sampled independently for all $i$ and $j$. This assumption is crucial: in the case where there errors are linearly independent but not orthogonal, we can arrive at situations where greater weight may be assigned to a model whose error is complementary to those of other models, despite this model not being the best fit for the data.
We note that our lower bound for each model in the linear regression setting is equal to $\mathbb{E} \sum_{i=1}^N \|f_k(x_i) + \zeta_i - y_i\|^2 + c$ where $c$ is a fixed normalizing constant. By the previous Lemma, we know that the linear regression solution $w^*$ based on the posterior means satisfies, $\max_i w^*_i = \max_i \mathcal{L}(\mathcal{M}_i)$. It is then straightforward to extend this result to the noisy setting. \begin{align}
\mathbb{E}[ \|\Phi w - y\|^2] &= \mathbb{E}[\|(\Phi_y + \Phi_\perp + \zeta)w - y\|^2] \\
&= \mathbb{E}[((\Phi_y + \Phi_\perp + \zeta)w - y)^\top ((\Phi_y + \Phi_\perp + \zeta)w - y)] \\
&= \|\Phi_y w - y\|^2 + w^\top \Phi_\perp ^\top \Phi_\perp w + \mathbb{E}[w^\top \zeta^\top \zeta w] \\
&= (w^\top \mathbbm{1} - \alpha)^2\|y\|^2 + w^\top \Phi_\perp ^\top \Phi_\perp w + \mathbb{E}[w^\top \zeta^\top \zeta w] \\
&= (w^\top \mathbbm{1} - \alpha)^2\|y\|^2 + \sum w_i^2( \|\Phi_\perp[:, i] \|^2 + \|\zeta_i\|^2) \end{align}
We again note via the same reasoning as in the previous Lemma that the model with the greatest lower bound will be the one which minimizes $\|\Phi_\perp[:, i]\|^2 + \|\zeta_i\|^2$, and that the weight given to index $i$ will be inversely proportional to this term.
It only remains to show that for each model $i$, the model which maximizes $\mathcal{L}(M_i)$ will also minimize $\|\Phi_\perp[:, i]\|^2 + \|\zeta_i\|^2$. This follows precisely from the Gaussian likelihood assumption. As we showed previously \begin{align}
\mathcal{L}(\mathcal{D} | \mathcal{M}_i) = \mathbb{E}[\sum \log P(y_i | \mathcal{D}_{<i})] &\propto - \sum \mathbb{E}[\ell_2(y_i - \hat{y}_i] \\
&= [\| y - \mu\|^2 + \mathbb{E}[\|\hat{y} - \mu \|^2] \\
&= \alpha\|y\|^2 + \|\Phi_\perp[:, i]\|^2 + \mathbb{E}[\|\zeta_i\|^2] \end{align} and so finding the model $\mathcal{M}_i$ which maximizes $\mathcal{L}(\mathcal{D}, \mathcal{M}_i)$ is equivalent to picking the maximal index $i$ of $w^*$ which optimizes the expected loss of the least squares regression problem. \end{proof}
\section{Experiments}
\subsection{Experimental details: model selection using trajectory statistics} \label{sec:ex_ms_blr_synthetic_data}
We consider 3 model selection settings in which to evaluate the practical performance of our estimators. In \textbf{prior variance selection} we evaluate a set of BLR models on a synthetic linear regression data set. Each model $\mathcal{M}_i$ has a prior distribution over the $d$ parameters of the form $w \sim \mathcal{N}(0, \sigma_i^2 I_d)$ for some $\sigma_i^2$, and the goal is to select the optimal prior variance (in other words, the optimal regularization coefficient). We additionally evaluate an analogous initialization variance selection method on an NTK network trained on a toy regression dataset. In \textbf{frequency (lengthscale) selection} we use as input a subset of the handwritten digits dataset MNIST given by all inputs labeled with a 0 or a 1. We compute random Fourier features (RFF) of the input to obtain the features for a Bayesian linear regression model, and perform model selection over the frequency of the features (full details on this in the appendix). This is equivalent to obtaining the lengthscale of an approximate radial basis function kernel. In \textbf{feature dimension selection}, we use a synthetic dataset \citep{wilson2020bayesian} of the form $(\textbf{X}, \textbf{y})$, where $x_i = (y_i + \epsilon_1, y_i + \dots, y_i + \epsilon_{15}, \epsilon_{16}, \dots, \epsilon_{30})$. We then consider a set of models $\{\mathcal{M}_k\}$ with feature embeddings $\phi_k(x_i) = x_i[1, \dots, k]$. The optimal model in this setting is the one which uses exactly the set of `informative' features $x[1, \dots, 15]$.
The synthetic data simulation used in this experiment is identical to that used in \citep{wilson2020bayesian}. Below, we provide the details.
Let $k$ be the number of informative features and $d$ the total number of features. We generate a datapoint $\mathcal{D}_i = \{x_i,y_i\}$ as follows: \begin{enumerate}
\item {Sample $y_i$}: $y_i \sim U([0, 1])$
\item {Sample $k$ informative features}: $x_{i,j} \sim N(y_i, \sigma_0) \quad \forall j \in 1, \dots k$
\item {Sample $\max(d-k,0)$ noise features}: $x_{i,k+j} \sim N(0, \sigma_1) \quad \forall j \in 1, \dots d-k$
\item {Concatenate the features}: $X_i= [x_{i,1}, \dots x_{i,d}]$ \end{enumerate}
We set $\sigma_0= \sigma_1=1$, $k = 15$, $n = 30$, and let $d$ vary from $5$ to $n$. We then run our estimators on the Bayesian linear regression problem for each feature dimension, and find that all estimators agree on the optimal number of features, $k$.
To compute the random fourier features used for MNIST classification, we vectorize the MNIST input images and follow the procedure outlined by \citet{rahimi2008random} (Algorithm 1) to produce RFF features, which are then used for standard Bayesian linear regression against the binarized labels. The frequency parameter (which can also be interpreted as a transformation of the lengthscale of the RBF kernel approximated by the RFF model) is the parameter of interest for model selection.
\subsection{Experimental details: Bayesian model comparison} \label{sec:exp_details_sgd_dnn}
Here we provide further detail of the experiment in Section 4.2.1. The goal of the experiment is to determine whether the connection between sum-over-training losses (SOTL) and model evidence observed in the linear regression setting extends to DNNs. In particular, the two sub-questions are: \begin{enumerate}
\item Do models with a lower SOTL generalize better?
\item Are these models favoured by SGD? \end{enumerate}
To answer these questions, we train a linear combination of NNs. We can answer subquestion [1] by plotting the correlation between SOTL and test performance of an individual model. Further, we address subquestion [2] by considering the correlation between test loss and linear weights assigned to each model.
Below we explain the set-up of the linear combination in more detail. We train a variety of deep neural networks along with a linear `ensemble' layer that performs a linear transformation of the concatenated logit outputs\footnote{These are pre-softmax outputs. To obtain the predicted probability of a class, they are fed through a softmax function.} of the classification models. Let $h_m(x_i)$ be logit output of model $m$ for input $x_i$, $\ell(y_i, h_i)$ be the loss for point $i$ (where $h_i$ is a logit) and $w_{m,t}$ be the weight corresponding to model $m$ at time step $t$.
We consider two training strategies: we first train models individually using the cross-entropy loss between each model's prediction and the true label, only cross-entropy loss of the final ensemble prediction to train the linear weights. Mathematically, we update the models using the gradients \begin{equation}
\frac{\partial}{\partial \theta_m} \ell(y_i, h_m(x_i)), \end{equation} and the `ensemble' weights using \begin{equation}
\frac{\partial}{\partial w_m} \ell( y_i, \sum_m w_m h_m(x_i)). \end{equation} We refer to this training scheme as \textit{Parallel Training} as the models are trained in parallel. We also consider the setting in which the models are trained using the cross entropy loss from the ensemble prediction backpropagated through the linear ensemble layer, i.e. the model parameters are now updated using: \begin{equation}
\frac{\partial}{\partial \theta_m} \ell(y_i, \sum_m w_m h_m(x_i)). \end{equation} We refer to this scheme as the \textit{Concurrent Training}.
We train a variety of different MLPs (with varying layers,and nodes) and convolutional neural networks (with varying layers, nodes and kernels) on FashionMNIST using SGD until convergence.
\subsection{Experimental details: SGD upweights submodels that perform well} \label{sec:exp_details_sgd_submodels} Below we provide further details of the experiment in Section 4.2.2. The goal of the experiment is to determine whether SGD upweights sub-models that fit the data better.
We train a MLP network (with units $200, 200, 10$) on FashionMMIST using SGD until convergence. After training is completed, for every class of $y$, we rank all nodes in the penultimate layer by the norm of their absolute weight (in the final dense layer). We group the points into submodels according to their ranking -- the $k$ nodes with the highest weights are grouped together, next the $k+1, \dots 2k$ ranked nodes are grouped, etc. We set $k=10$.
We determine the performance of a submodels by training a simple logistic classifier to predict the class of an input, based on the output of the submodel. To measure the performance of the classifier, we use the cross-entropy loss. To capture the equivalent notion of the AUC, we estimate the performance of the sub-models throughout training, and sum over the estimated cross-entropy losses.
Below, we show additional plots for the \textit{parallel} and \textit{concurrent} training schemes. The results are the same to those presented in the main text, and we observe [1] a negative correlation between test performance and ensemble weights and [2] a strong correlation between SOTL and average test cross-entropy.
\begin{figure}
\caption{\textbf{Linear combinations of DNNs on FashionMNIST.} Left: ensemble weights versus the test loss for parallel training; we observe a negative correlation. Middle: SOTL (standardized by the number of training samples) versus test loss for concurrent and concurrent training. We observe a strong correlation indicating that the SOTL generalizes well. Right: training curves for the different models in concurrent training schemes. All results are averaged over $10$ runs, and standard deviations are shown by the shaded regions around each observation. The model parameters, given in the parentheses, are the number of layers ($l$), nodes per layer ($n$) and kernel size ($k$), respectively. }
\label{fig:mod_select_dnn_parallel}
\end{figure}
However, similarly to the linear setting, the difference in assigned weights is magnified in the concurrent training scheme. Here we find that in the concurrent training scheme, the ensemble focuses on training the CNNs (as can be seen from the training curve in Figure \ref{fig:mod_select_dnn} in the main text). This is likely because CNNs are able to learn more easily, leading to larger weights earlier on.
\chapter{Dynamics of reinforcement learning}
\section{Auxiliary results} \label{sec:aux-results} In this section, we state and prove some additional lemmas that are useful in proving the results stated in this chapter.
\begin{lemma}\label{lem:grassmann1}
Let $x \in \mathbb{R}^d$, and let $(v_t)_{t \geq 0}$ be a sequence of vectors in $\mathbb{R}^d$ satisfying $v_t = f(t) x + o(f(t))$, for some function $f : [0, \infty) \rightarrow (0, \infty)$. Then $d(\langle v_t\rangle , \langle x\rangle ) \rightarrow 0$ as $t \rightarrow \infty$. \end{lemma} \begin{proof}
The Grassmann distance $d(\langle v_t\rangle , \langle x\rangle )$ between two one-dimensional subspaces has a particular simple form, given by
\begin{align*}
d(\langle v_t \rangle , \langle x \rangle ) = \min\left( \arccos\left( \frac{\langle v_t, x \rangle}{\|v_t\| \|x\|} \right) , \arccos\left( \frac{\langle -v_t, x \rangle}{\|v_t\| \|x\|} \right) \right) \, .
\end{align*}
In our case, for sufficiently large $t$ this yields
\begin{align*}
d(\langle v_t\rangle , \langle x\rangle ) & = \arccos\left( \frac{\langle f(t) x + o(f(t)), x \rangle}{\| f(t) x + o(|f(t)|) \| \|x\|} \right) \\
& = \arccos\left( \frac{\langle x + o(1), x \rangle}{\| x + o(1) \| \|x\|} \right)\\
& \rightarrow \arccos\left( \frac{\langle x , x \rangle}{\| x \| \|x\|} \right)\\
& = 0 \, .
\end{align*} \end{proof}
\begin{lemma}\label{lem:grassmann2}
Let $U_1,\ldots,U_{|\mathcal{X}|}$ be a basis for $\mathbb{R}^{\mathcal{X}}$, let $K < |\mathcal{X}|$, and let $(a_{ij} |i \in [K], j \in [|\mathcal{X}|])$ be real coefficients.
Let $0 < \beta_1 < \cdots < \beta_{|\mathcal{X}|}$
, and consider time-dependent vectors $W_1(t),\ldots,W_d(t)$ defined by
\begin{align*}
W_i(t) = \sum_{j=1}^{|\mathcal{X}|} a_{ij} e^{-\beta_j t} U_j \, , \quad t \geq 0 \, .
\end{align*}
Then for almost all sets of coefficients $(a_{ij} |i \in [K], j \in [|\mathcal{X}|])$, we have
\begin{align*}
d(W_{1:K}(t) , U_{1:K}) \rightarrow 0 \, .
\end{align*} \end{lemma}
\begin{proof}
Without loss of generality, we may take the vectors $U_1,\ldots,U_{|\mathcal{X}|}$ to be the canonical basis vectors. Under the assumptions of the theorem, we exclude initial conditions for which the matrix $A$ with $(i,j)$\textsuperscript{th} element $a_{ij}$ is not full rank. Note that under this condition, the matrix $A_t$ with $(k,i)$\textsuperscript{th} element $a_{ki}e^{\beta_i t}$ is also full rank for all but finitely many $t$.
By performing row reduction operations and scaling rows, for all such $t$ we may pass from $(W_{k}(t) \mid k \in [K])$ to an alternative spanning set $(\widetilde{W}_{k}(t) \mid k \in [ K])$ of the same subspace such that $\widetilde{W}_{k}(t) - U_k \in \langle U_{K+1:|\mathcal{X}|}\rangle$, and $\|\widetilde{W}_k(t) - U_k\| = O(e^{-t(\beta_{K+1} - \beta_k)}) = o(1)$. We therefore obtain an orthonormal basis for this subspace of the form $U_1 + o(1),\ldots, U_K +o(1)$.
We now use the singular value decomposition characterization of Grassmann distance in Definition~\ref{def:grassmann-distance}. Since we have obtained an orthonormal basis for the subspace $\langle W_k(t) \mid k \in [K] \rangle$, the top-$K$ singular values of the matrix $(\sum_{k=1}^K U_k U_k^\top)(\sum_{k=1}^K (U_k + o(1))( U_k + o(1))^\top )$ determine the Grassmann distance. However, this matrix is equal to $\text{diag}(1,\ldots,1,0,\ldots,0) + o(1)$, with $K$ entries of $1$ in the diagonal matrix.
But the top-$K$ singular values this matrix are $1+o(1)$, and so the principal angles between the subspaces are $o(1)$, and hence the Grassmann distance between the subspaces is $o(1)$, as required. \end{proof}
\begin{restatable}{lemma}{lemmaRewardMatrix}\label{lem:reward-matrix} For $M \in \mathbb{N}$, let $(r^m)_{m=1}^M$ be independent random variables drawn from some fixed mean-zero distribution in $\mathscr{P}(\mathbb{R}^{\mathcal{X}\times\mathcal{A}})$ such that the covariance between coordinates $(x, a), (y, a)$ is $\Sigma_{xy}$, independent of $a \in \mathcal{A}$. Let $(\mathbf{w}^m)_{m=1}^M$ be independent random variables taking values in $\mathbb{R}^{K \times \mathcal{A}}$, with columns drawn independently from $\mathcal{N}(0, (1/M)I)$. Then $\sum_{m=1}^M r^m (\mathbf{w}^m)^\top$ converges (in distribution) to a mean-zero Gaussian distribution over $\mathbb{R}^{\mathcal{X} \times K}$, with independent columns, and individual columns having covariance matrix $\Sigma$. \end{restatable}
\begin{proof}
The proof simply follows by noting that $\sum_{m=1}^M r^m \mathbf{w}^m$ may be written $1/\sqrt{M} \sum_{m=1}^M r^m \varepsilon^m$, with $(\varepsilon^m)_{m=1}^\infty$ i.i.d.~$N(0,I)$ random variables. The individual terms have the desired mean and variance, and the resulting converge in distribution now follows from the central limit theorem. \end{proof}
\begin{restatable}{lemma}{lemmaWLimit}\label{lem:w-limit} For fixed $M$, let $(\mathbf{w}^m)_{m=1}^M$, $\mathbf{w}^m \in \mathbb{R}^d$, be sampled i.i.d. according to $\mathcal{N}(0, \frac{1}{M}I)$. Then the following hold. \begin{equation}
\lim_{M \rightarrow \infty} \sum_{m=1}^M \mathbf{w}^m (\mathbf{w}^m)^\top = I \text{ and } \lim_{M \rightarrow \infty} \sum_{m=1}^M \mathbf{w}^m \overset{D}{=} \epsilon \sim \mathcal{N}(0, I) \end{equation} \end{restatable} \begin{proof} We prove two results on the limit of $W = \sum_{m=1}^M \mathbf{w}^m (\mathbf{w}^m)^\top$ as $k \rightarrow \infty$. First \begin{align*}
\lim_{M \rightarrow \infty} \sum_{m=1}^M \mathbf{w}^m (\mathbf{w}^m)^\top &\overset{P}{=} I \, , \\
\intertext{which we observe by evaluating an arbitrary diagonal and off-diagonal element of $\sum_{m=1}^M \mathbf{w}^m (\mathbf{w}^m)^\top$. For the diagonal terms, note that}
\left(\sum_{m=1}^M \mathbf{w}^m (\mathbf{w}^m)^\top\right) [j, j] &= \sum_{m=1}^M (\mathbf{w}^m_{j})^2 \end{align*} Now observe that \begin{align*}
\mathbb{E}\left\lbrack \sum_{m=1}^M (\mathbf{w}^m_j)^2 \right\rbrack &= M \frac{1}{M} = 1 \, , \text{ and } \quad
\text{Var} \left(\sum_{m=1}^M (\mathbf{w}^m_j)^2\right) = M \frac{1}{M^2} \rightarrow 0 \\ \end{align*} Similarly, for the off-diagonal terms, let $j \not= \ell$. Then we have \begin{align*}
\left( \sum_{m=1}^M \mathbf{w}^m (\mathbf{w}^m)^\top\right) [j, \ell] &= \sum_{m=1}^M \mathbf{w}^m_j \mathbf{w}^m_\ell \, , \end{align*} and further \begin{align*}
\mathbb{E}\left\lbrack \sum_{m=1}^M \mathbf{w}^m_j \mathbf{w}^m_\ell \right\rbrack = 0 \, , \text{ and } \quad
\text{Var}\left(\sum_{m=1}^M \mathbf{w}^m_\ell \mathbf{w}^m_j\right) &= M \frac{1}{M^2} \rightarrow 0; \quad \end{align*} The limit in probability is immediately implied by Chebyshev's inequality. The result on $\sum_{m=1}^M \mathbf{w}^m$ follows immediately from part 1 and the fact that a sum of Gaussian random variables is another Gaussian random variable whose mean and variance in this case will be a standard normal. \end{proof} \section{Proofs} \label{sec:proofs-rl-dynamics} \lemODESoln*
\begin{proof}
Equation~\eqref{eq:value-function-ode-solution} can be verified as a solution to Equation~\eqref{eq:value-function-ode} by direct differentiation. Uniqueness of the solution follows since this is an autonomous initial value problem that satisfies the Lipschitz condition, and so the Picard-Lindelh\"of theorem applies. \end{proof}
\propOneValueFunction*
\begin{proof}
By Assumption~\ref{assume:value-function-conditions}, $P^\pi$ is diagonalisable, with eigenbasis $U_1,\ldots,U_{|\mathcal{X}|}$, with corresponding eigenvalues $\lambda_{1:|\mathcal{X}|}$ with strictly decreasing magnitudes $|\lambda_1| > \cdots > |\lambda_{|\mathcal{X}|}|$. We note then that $\exp-(t(I - \gamma P^\pi))$ is also diagonaisable under the same basis, with eigenvalues $\exp(t (\gamma \lambda_i - 1))$, for $i=1,\ldots,|\mathcal{X}|$. We may therefore expand $V_0$ with respect to this eigenbasis, and write
\begin{align*}
V_0 - V^\pi = \sum_{i=1}^{|\mathcal{X}|} \alpha_i U_i \, ,
\end{align*}
for some $\alpha_{1:|\mathcal{X}|} \in \mathbb{R}^{|\mathcal{X}|}$. Now note from the differential equation \eqref{eq:value-function-ode-solution}, we have
\begin{align*}
V_t - V^\pi = \exp(-t(I - \gamma P^\pi)) (V_0 - V^\pi) = \sum_{i=1}^{|\mathcal{X}|} \alpha_i \exp(t(\gamma \lambda_i - 1)) U_i \, .
\end{align*}
Note that as $P^\pi$ is a stochastic matrix, we have $|\lambda_i| \leq 1$ for all $i=1,\ldots,|\mathcal{X}|$, and hence $\exp(t(\gamma \lambda_i - 1)) \rightarrow 0$ for all $i=1,\ldots,|\mathcal{X}|$. Further, $\exp(t(\gamma \lambda_i - 1)) = o(\exp(t(\gamma \lambda_1 - 1)))$ for all $i=2,\ldots,|\mathcal{X}|$.
We make the additional assumption that $\alpha_1 \not= 0$, which makes the `almost every initial condition' assumption in the statement precise. Under this assumption, we therefore have
\begin{align*}
V_t - V^\pi &= \alpha_1 \exp(t(\gamma \lambda_1 - 1)) U_1 + \sum_{i=2}^{|\mathcal{X}|} \alpha_i \exp(t(\gamma \lambda_i - 1)) U_i\\
&= \alpha_1 \exp(t(\gamma \lambda_1 - 1)) U_1 + o(\exp(t(\gamma \lambda_1 - 1))) \, .
\end{align*}
Then Lemma~\ref{lem:grassmann1} applies to give $d(\langle V_t - V^\pi \rangle, \langle U_1 \rangle) \rightarrow 0$, as required. \end{proof}
\propManyValueFunctions*
\begin{proof}
Expanding $V^{(k)}_0 - V^\pi$ with respect to $U_1,\ldots,U_{|\mathcal{X}|}$ for each $k=1,\ldots,|\mathcal{X}|$, we obtain expressions of the form
\begin{align*}
V^{(k)}_0 - V^\pi = \sum_{i=1}^{|\mathcal{X}|} a_{k i} U_i \, .
\end{align*}
By the ODE solution in Lemma~\ref{lem:ode-soln}, we then have
\begin{align*}
V^{(k)}_t - V^\pi = \sum_{i=1}^{|\mathcal{X}|} a_{k i} e^{-t(1-\gamma\lambda_i)} U_i \, .
\end{align*}
We may now apply Lemma~\ref{lem:grassmann2} to obtain the desired result. \end{proof}
\lemCoupledDynamics*
\begin{proof}
This follows immediately by computing the derivatives in Equations~\eqref{eq:phi-ode} \& \eqref{eq:w-ode}, and so we omit the direct calculations. \end{proof}
\thmInfiniteHeads*
\begin{proof} We write the dynamics on $\Phi^M_t$ as follows and apply the results of Lemma~\ref{lem:w-limit}. We first consider the scaled initialization setting (implicitly setting the learning rate $\alpha=1$), where we find \begin{align}
\partial_t \Phi^M_t &= (I - \gamma P^\pi)\Phi_t^M \sum_{m=1}^M \mathbf{w}^m (\mathbf{w}^m)^\top + \sum_{m=1}^M R^{\pi} (\mathbf{w}^m)^\top \\
\lim_{M \rightarrow \infty} \partial_t \Phi^M_t &= (I - \gamma P^\pi)\Phi_t^M \lim_{M \rightarrow \infty}\sum_{m=1}^M \mathbf{w}^m (\mathbf{w}^m)^\top + \lim_{M \rightarrow \infty} R^\pi(\sum_{m=1}^M \mathbf{w}^m)^\top \\
&\overset{D}{=} (I - \gamma P^\pi )\Phi_t^M I + R^\pi \epsilon^\top, \; \epsilon \sim \mathcal{N}(0, I). \end{align}
We further observe that, for any finite interval, in the setting of zero reward we obtain \textit{uniform} convergence of the induced trajectory $\Phi_t^M$ to the trajectory of the limiting dynamics. We first observe that for a fixed initialization, we have that the induced dynamics are linear (in the zero-reward setting, affine otherwise) function of $\Phi^M_t$, and so \begin{align*}
\partial_t \Phi^M_t &= (I - \gamma P^\pi) \Phi^M_t \sum_{m=1}^M w^m (w^m)^\top = \mathcal{L}^M \Phi^M_t \\
&\text{ where $\mathcal{L}^M(A) = (I - \gamma P^\pi) A \sum_{m=1}^M w^m (w^m)^\top$} \\
\implies \Phi^M_t &= \exp(t \mathcal{L}^M)\Phi^M_0\; . \intertext{Because the function $t \mapsto \exp (t A)$ is Lipschitz on a bounded interval for any $A$, this implies that for any finite interval $[0, T]$, the functions $t \mapsto \Phi^M_t$, as well as limiting solution, are $L$-Lipschitz for some $L$. Further, since the exponential is continuous, } \lim_{M \rightarrow \infty} \Phi^M_t &= \lim_{M \rightarrow \infty} \exp(t \mathcal{L}^M) \Phi^M_0 = \exp(t \lim_{M \rightarrow \infty} \mathcal{L}^M) \Phi_0 \\ &= \exp (-t(I - \gamma P^\pi)) \Phi_0 = \Phi^\infty_t \;. \intertext{Therefore, the functions $t \mapsto \Phi^M_t$ are $L$-Lipschitz and converge to the limit $\Phi^\infty_t$ on the interval $[0, T]$, which implies that they converge uniformly. } \end{align*}
To evaluate the scaled learning rate setting, we observe that we now have \begin{align}
\partial_t \Phi^M_t &= \frac{1}{M} (I - \gamma P^\pi)\Phi_t^M \sum_{m=1}^M \mathbf{w}^m (\mathbf{w}^m)^\top + \sum_{m=1}^M R^{\pi} (\mathbf{w}^m)^\top \\
\lim_{M \rightarrow \infty} \partial_t \Phi^M_t &= (I - \gamma P^\pi)\Phi_t^M \lim_{M \rightarrow \infty} \frac{1}{M}\sum_{m=1}^M \mathbf{w}^m (\mathbf{w}^m)^\top + \lim_{M \rightarrow \infty} \frac{1}{M}R^\pi(\sum_{m=1}^M \mathbf{w}^m)^\top \\
& = (I - \gamma P^\pi )\Phi_t^M I . \\
\implies \lim_{M \rightarrow \infty} \Phi^M_t & = \exp(-t(I - \gamma P^\pi)) \Phi_0\, , \end{align} almost surely. The principal difference between this and the scaled initialization setting is that here we divide the $R^\pi \mathbf{w}^\top$ term by $\frac{1}{M}$, whereas the scaled initialization is equivalent to scaling by $\frac{1}{\sqrt{M}}$. Therefore the scaled learning rate limit can be computed by the law of large numbers and converges in probability to its mean (zero), whereas under the scaled initialization it converges via the central limit theorem to a Gaussian distribution. \end{proof}
\propSubspaceConvergence*
\begin{proof}
As described in the proof of Theorem~\ref{thm:infinite-heads}, we have $\Phi_t = \exp(-t(I - \gamma P^\pi))(\Phi_0 - \Phi_\infty) + \Phi_\infty$. Under Assumption~\ref{assume:value-function-conditions}, we may now apply an analogous argument as in Proposition~\ref{prop:many-value-functions} to the columns of $\Phi_t - \Phi_\infty$, and apply Lemma~\ref{lem:grassmann2} to obtain the desired result. \end{proof}
\ThmDistribution*
\begin{proof} We recall from Theorem~\ref{thm:infinite-heads} that the limiting dynamics follow the distribution \begin{align}
\lim_{t \rightarrow \infty} \lim_{M \rightarrow \infty} \Phi_t &\overset{D}{=} \lim_{t \rightarrow \infty} \exp(-t(I - \gamma P^\pi)) (\Phi_0 - (I -\gamma P^\pi)^{-1} Z_\Sigma) + (I - \gamma P^\pi)^{-1} Z_\Sigma\\
& \overset{D}{=} (I - \gamma P^\pi) ^{-1} Z_\Sigma \end{align} for which we can straightforwardly apply known properties of Gaussian distributions: namely, that the distribution of a linear transformation $A$ of a Gaussian random variable with parameters $\mu, \Sigma$ is also Gaussian with mean $A\mu$ and covariance $A \Sigma A^\top$. Letting $A=(I - \gamma P^\pi)$ therefore gives the desired result. \end{proof}
\propSubspaceConvergenceRC*
\begin{proof}
As described in the proof of Theorem~\ref{thm:distribution}, we have $\Phi_t = \exp(-t(I - \gamma P^\pi))(\Phi_0 - (I - \gamma P^\pi)^{-1} Z_\Sigma ) + (I - \gamma P^\pi)^{-1} Z_\Sigma $. Under Assumption~\ref{assume:value-function-conditions}, we may now apply an analogous argument as in Proposition~\ref{prop:many-value-functions} to the columns of $\Phi_t - (I - \gamma P^\pi)^{-1} Z_\Sigma$, and apply Lemma~\ref{lem:grassmann2} to obtain the desired result. \end{proof}
\section{Additional results from Table~\ref{tab:theory}} \label{apx:table-results} We begin this section by noting the following property of systems following linear dynamics. \begin{lemma}\label{lem:dynamics-aux} Let $\Phi_t \in \mathbb{R}^{\mathcal{X} \times M}$ follow the dynamics $\partial_t \Phi_t \overset{D}{=} A \Phi_t + B$, where $A$ is a linear operator for which all eigenvalues have negative real part, and $B$ is a vector. Then \begin{align}
\lim_{t \rightarrow \infty} \Phi_t = -A^{-1}B \, . \end{align} Further, if $A$ is diagonalisable, with all eigenvalues of different magnitudes, \begin{align}
\lim_{t \rightarrow \infty} d( \langle \Phi_t - \Phi_\infty \rangle, \langle U_{1:K}(A) \rangle) = 0 \, , \end{align} where $U_i(A)$ is the eigenvector of $A$ corresponding to the eigenvalue with $i$\textsuperscript{th} largest magnitude. \end{lemma} \begin{proof}
We observe that the dynamics $\partial_t \Phi_t = A \Phi_t$ induce the trajectory
\begin{equation}
\Phi_t = \exp(tA)\Phi_0 + (I - \exp(tA))(-A^{-1}B)\; ,
\end{equation}
with limit $\Phi_\infty = - A^{-1}B$. When $A$ is diagonalizable, we can therefore straightforwardly apply the results of Lemma~\ref{lem:grassmann2} to get that the limiting subspace will be characterized by the top $k$ eigenvectors of $A$. In the settings we are interested in, $A = - ( I - \gamma P^\pi)$ for some $\pi$ and some $\gamma$, and so the principal eigenvectors of $A$ will be the principal eigenvectors of $P^\pi$. \end{proof}
The following two theorems characterize the learning dynamics under the past policies and multiple timescale auxiliary tasks listed in Table~\ref{tab:theory}. With these characterizations, it becomes straightforward to deduce $\Phi_\infty$ and the limiting subspace error as a direct consequence of the previous lemma.
\begin{theorem} \label{thm:pastpolicies-aux} Let $\pi_1, \dots, \pi_L$ be a fixed set of policies. Given fixed $M$ and $L$, we define the indexing function $i_m = \lceil\frac{L}{m} \rceil$ for $m \in [1, M]$. Let $\Phi^M_t$ follow the dynamics \begin{align}
\partial_t \Phi_t^M &= \sum_{m=1}^M -( (I - \gamma P^{\pi_{i_m}})\Phi_t^M \mathbf{w}^m_t + R^{\pi_{i_m}})(\mathbf{w}^m_t)^\top \end{align} Then $\Phi_t^M$ satisfies the following dynamics and trajectory in the limit as $M \rightarrow \infty$, where $\bar{\pi} = \sum_{i=1}^L \pi_i$ and $\epsilon_i \in \mathbb{R}^d$ is an isotropic Gaussian with variance $\frac{1}{L}$. Note that we cannot naively average the rewards without changing the variance of the induced distribution unless $R^{\pi_i} = R^{\pi_j}$ for all $i,j$. \begin{align}
\lim_{M \rightarrow \infty} \partial_t \Phi_t^M &\overset{D}{=} -(I - \gamma P^{\bar{\pi}})\Phi_t + \sum_{i=1}^L R^{\pi_{i}} \epsilon_{i}\\
\lim_{M \rightarrow \infty} \Phi_t^M &\overset{D}{=} \exp (-t(I - \gamma P^{\bar{\pi}} )) (\Phi_0 - \Phi_\infty ) + (I - \gamma P^{\bar{\pi}} )^{-1} \bigg ( \sum_{i=1}^L R^{\pi_{i}} \epsilon_{i}^\top \bigg ) \end{align}
\end{theorem} \begin{proof} The result on the trajectories follows immediately from the result on the dynamics, so it suffices to prove convergence of the dynamics. We approach this problem by decomposing the dynamics of $\Phi^M_t$ as follows. \begin{equation}
\partial_t \Phi_t^M = \sum_{m=1}^M - (I - \gamma P^{\pi_{i_m}})\Phi_t^M \mathbf{w}^m_t (\mathbf{w}^m_t)^\top - \sum_{m=1}^M R^{\pi_{i_m}}(\mathbf{w}^m_t)^\top \, . \end{equation}
We first consider the random variables in the term which includes the rewards $R^\pi$. For this, we can directly apply the results from the previous theorems to the random variables $\epsilon_j = \sum_{m : i_m = j} w^m$, whose limiting variance is easily computed to be \begin{equation}
\lim_{M \rightarrow \infty} \text{Var}\left(\sum_{m : i_m = j} \mathbf{w}^m\right) = \lim_{M \rightarrow \infty} \sum_{\lfloor \frac{j}{n}M \rfloor}^{\lfloor \frac{j+1}{n}M \rfloor} \frac{1}{M}I = \frac{1}{L}I \, . \end{equation} For the term which depends on $\Phi_t$, we see \begin{align}
\sum_{m=1}^M - (I - \gamma P^{\pi_{i_m}} )\Phi^M_t \mathbf{w}^m_t (\mathbf{w}^m_t)^\top &= \sum_{i=1}^L \sum_{m : i_m = i}^M - (I - \gamma P^{\pi_{i_m}} )\Phi^M_t \mathbf{w}^m_t (\mathbf{w}^m_t)^\top \\
&= \sum_{i=1}^L - (I - \gamma P^{\pi_{i}} )\Phi^M_t \sum_{m : i_m = i}^M \mathbf{w}^m_t (\mathbf{w}^m_t)^\top \, . \\
\intertext{Since $L$ is finite and fixed, $\sum_{m:i_m=i}^M\mathbf{w}^m_t(\mathbf{w}^m_t)^\top$ converges to $\frac{1}{L}I$}
& \underset{M \rightarrow \infty}{\longrightarrow} \sum_{i=1}^L - (I - \gamma P^{\pi_{i}} )\Phi^M_t \frac{1}{L}I \\
&= -( I - \gamma \frac{1}{L}\sum_{i=1}^L P^{\pi_i})\Phi_t^M \\
&= -(I - \gamma P^{\bar{\pi}} )\Phi_t^M \, . \end{align} And so the limiting distribution becomes \begin{equation}
\lim_{M \rightarrow \infty } \partial_t \Phi_t^M = -(I - \gamma P^{\bar{\pi}} )\Phi_t^M - \bigg ( \sum_{i=1}^L R^{\pi_{i}} \epsilon_{i} \bigg ) \, . \end{equation} \end{proof} \begin{corollary}
The above result can be readily adapted to the setting in which each head predicts a randomly selected (deterministic) policy in MDPs with finite state and action spaces. Let $L = |\mathcal{A}| ^{|\mathcal{X}|}$, $\{\pi_1, \dots, \pi_L\}$ be an enumeration of $\mathcal{A} ^\mathcal{X}$, and $i_m$ denote the index of the policy randomly assigned to head $m$; then the above result still holds, and $\bar{\pi}$ is the uniform policy. \end{corollary} \begin{theorem} \label{thm:multiple-timescales} We consider the task of predicting the value functions of a fixed policy under multiple discount rates $\gamma_1, \dots, \gamma_L$. For fixed $M$, $L$, let $i_m$ denote the indexing function defined in Theorem \ref{thm:pastpolicies-aux} Let $\Phi_t$ follow the dynamics \begin{align}
\partial_t \Phi_t^M &= \sum_{m=1}^M -( (I - \gamma_{i_m} P^{\pi})\Phi_t^M \mathbf{w}^m_t + R^{\pi})(\mathbf{w}^m_t)^\top \, . \end{align} Then the limiting dynamics as $M\rightarrow \infty$ of $\Phi_t^M$ are as follows, where $\bar{\gamma} = \sum \frac{1}{L} \gamma_i$ \begin{align}
\lim_{M \rightarrow \infty} \partial_t \Phi_t^M &\overset{D}{=} -(I - \bar{\gamma} P^{\pi})\Phi_t + R^{\pi} \epsilon^\top\\
\intertext{and}
\lim_{M \rightarrow \infty} \Phi_t^M &\overset{D}{=} \exp (-t(I - \gamma P^{\bar{\pi}} )) (\Phi_0 - \Phi_\infty ) + (I - \gamma P^{\pi} )^{-1} R^{\pi} \epsilon^\top)\; . \end{align} \end{theorem}
\begin{proof} We follow a similar derivation as for Theorem~\ref{thm:pastpolicies-aux} in deriving the component of the dynamics which depends on $\Phi^M_t$. The result of Theorem~\ref{thm:infinite-heads} immediately applies to the $\sum R^\pi (\mathbf{w}^m_t)^\top$ term: \begin{align}
\sum_{m=1}^M - (I - \gamma_{i_m} P^\pi )\Phi^M_t \mathbf{w}^m_t (\mathbf{w}^m_t)^\top &= \sum_{i=1}^L \sum_{m : i_m = i}^M - (I - \gamma_i P^\pi )\Phi^M_t \mathbf{w}^m_t (\mathbf{w}^m_t)^\top \\
&= \sum_{i=1}^L - (I - \gamma_i P^{\pi} )\Phi^M_t \sum_{m : i_m = i}^M \mathbf{w}^m_t (\mathbf{w}^m_t)^\top \, . \\
\intertext{Since $L$ is finite and fixed, $\sum_{m:i_m=i}^M\mathbf{w}^m_t(\mathbf{w}^m_t)^\top$ converges to $\frac{1}{L}I$ as before:}
& \underset{M \rightarrow \infty}{\longrightarrow} \sum_{i=1}^L - (I - \gamma_i P^{\pi} )\Phi^M_t \frac{1}{L}I \\
&= -( I -\sum_{i=1}^L \frac{\gamma_i}{L} P^{\pi})\Phi_t^M \\
&= -(I - \bar{\gamma} P^{{\pi}} )\Phi_t^M \, . \end{align} \end{proof}
\section{Beyond diagonalisability assumptions}\label{sec:more-general-value-function-results}
In this section, we briefly describe extensions of the results of Chapter~\ref{chp:rl-dynamics} in scenarios where Assumption~\ref{assume:value-function-conditions} does not hold. There are two main cases we consider: (i) those in which $P^\pi$ is still diagonalisable, but does not have all eigenvalues with distinct magnitudes; and (ii) those in which $P^\pi$ is not diagonalisable.
In the former case, we do not have the different convergence rates of coefficients of different eigenvectors as in the proof of Proposition~\ref{prop:many-value-functions}. By similar arguments we can still deduce convergence of $V_t$ to the span of the eigenspaces with highest magnitude eigenvalues, but we can no longer deduce convergence to individual eigenspaces if there are several other eigenvalues with the same magnitude as the eigenvalue concerned. Note also that this includes the case where the matrix $P^\pi$ is complex- but not real-diagonalisable, since in such case non-real eigenvalues must come in conjugate pairs (which are necessarily of the same absolute value).
In the latter case, we no longer have an eigenbasis for $\mathbb{R}^{\mathcal{X}}$ based on $P^\pi$. However, we can consider the Jordan normal decomposition, and may still recover analogous results to those in Chapter~\ref{chp:rl-dynamics}, where convergence is now to the subspaces generated by \emph{Jordan blocks} with high absolute value eigenvalues. See \citet{parr2008analysis} for further commentary on Jordan normal decompositions in feature analysis.
\section{Extensions beyond one-step temporal difference learning}\label{sec:beyond-one-step}
Our analysis in Chapter~\ref{chp:rl-dynamics} has focused on the case of learning dynamics under one-step temporal difference learning. This choice is largely because one-step temporal difference learning is such a popular algorithm, not because the results do not hold more generally. In this section, we describe the elements of analogous results for $n$-step learning and TD($\lambda$) for interested readers. We focus on the case of value function dynamics, and believe extensions of the representation dynamics analysis in Chapter~\ref{chp:rl-dynamics} along these lines will be interesting directions for future work.
\subsection{Temporal difference learning with $n$-step returns}
In the case of $n$-step returns, the dynamics on the value function $(V_t)_{t \geq 0}$ are given by \begin{align*}
\partial_t V_t(x) = \mathbb{E}_\pi\left\lbrack \sum_{k=0}^{n-1} \gamma^k R_k + \gamma^n V_t(X_n)\middle| X_0 = x \right\rbrack - V_t(x) \, . \end{align*} In full vector notation, we have \begin{align*}
\partial_t V_t = -(I - \gamma^n (P^\pi)^n) V_t + \left\lbrack \sum_{k=0}^{n-1} (\gamma P^\pi)^k \right\rbrack R^\pi \, . \end{align*} The solution to this differential equation is \begin{align*}
V_t = \exp( -t (I - (\gamma P^\pi)^n ) )(V_0 - V^\pi) + V^\pi \, . \end{align*} This bears a close relationship with the result obtained for $1$-step temporal difference learning in Chapter~\ref{chp:rl-dynamics}. As expected, we obtain the same limit point. Further, under Assumption~\ref{assume:value-function-conditions}, $(P^\pi)^n$ has the same eigenvectors as $P^\pi$, and so results analogous to Propositions~\ref{prop:one-value-function} \& \ref{prop:many-value-functions} hold for $n$-step temporal difference learning too under these conditions.
\subsection{Temporal difference learning with $\lambda$-returns}
In the case of temporal difference learning with $\lambda$-returns (for $\lambda \in [0,1)$), the dynamics on the value function $(V_t)_{t \geq 0}$ are given by \begin{align*}
\partial_t V_t(x) = \mathbb{E}_\pi\left\lbrack \sum_{k=0}^{\infty} (\lambda\gamma)^k (P^\pi)^k ( R^\pi + \gamma P^\pi V_t(X_{k+1}) - V_t(X_k))\middle| X_0 = x \right\rbrack - V_t(x) \, . \end{align*} In full vector notation, we have \begin{align*}
\partial_t V_t = \sum_{k=0}^{\infty} (\lambda\gamma)^k (P^\pi)^k ( R^\pi + \gamma P^\pi V_t - V_t) \end{align*} The solution to this differential equation is \begin{align*}
V_t = \exp\left(t\left((1-\lambda) \sum_{k=1}^\infty \lambda^{k-1}\gamma^k (P^\pi)^k - I\right)\right) (V_0 - V^\pi) + V^\pi \, . \end{align*} As with $n$-step temporal difference learning, this bears a close relationship with the result obtained for $1$-step temporal difference learning in Chapter~\ref{chp:rl-dynamics}. As expected, we obtain the same limit point. Further, under Assumption~\ref{assume:value-function-conditions}, each $(P^\pi)^k$ has the same eigenvectors as $P^\pi$, and so results analogous to Propositions~\ref{prop:one-value-function} \& \ref{prop:many-value-functions} hold for $n$-step temporal difference learning too under these conditions.
\section{Bayes-optimality of RSBFs} \label{sec:bayes-opt}
We can develop the discussion of RSBFs beyond their properties as a matrix decomposition described in Section~\ref{sec:feature-selection} to observe that the RSBFs characterize the Bayes-optimal features for predicting an unknown value function given an isotropic Gaussian prior distribution on the reward, and further characterize a Bayesian posterior over value functions given by conditioning on the known dynamics of the MDP. We will denote by $V_K(\Psi)$ the top $K$ eigenvectors of the matrix $\Psi \Psi^\top$, i.e. the top $K$ left singular vectors of $\Psi$. \begin{restatable}{corollary}{corrBayesOpt} Under an isotropic Gaussian prior on reward function $r \in \mathbb{R}^{\mathcal{X}}$, the subspace $V_K(\Psi)$ corresponds to the optimal subspace with respect to the following regression problem. \begin{equation}
\min_{\Phi \in \mathbb{R}^{\mathcal{X} \times K}} \mathbb{E}_{r \sim \mathcal{N}(0, I)} \left\lbrack \| \Pi_{\Phi^\perp} (I - \gamma P^\pi)^{-1} r \|^2 \right\rbrack \, , \end{equation} where $\Pi_{\Phi^\perp}$ denotes orthogonal projection onto the orthogonal complement of $\Phi$. \end{restatable}
\begin{proof} Let $S$ denote some subspace $S \subset V$.
\begin{align}
\mathbb{E}[\|\Pi_s \Psi r\|^2] &= \mathbb{E}[r^\top \Psi^\top \Pi_s^\top \Pi_s \Psi r]
\intertext{We note that for any real symmetric matrix $A$ we can rewrite $A = \sum \alpha_i v_i v_i^\top$.}
\mathbb{E}[r^\top \Psi^\top \Pi_s^\top \Pi_S \Psi r] &= \mathbb{E}[ r^\top (\sum \alpha_i v_i v_i^\top) r] = \mathbb{E}[\sum \alpha_i (r^\top v_i) (v_i^\top r)] \\
&= \mathbb{E}[\sum \alpha_i v_i^\top r r^\top v_i] = \sum \alpha_i v_i^\top \mathbb{E}[r r^\top] v_i \\
&= \sum \alpha_i v_i^\top v_i = \text{Tr}(\Psi^\top \Pi_S^\top \Pi_S \Psi) =\text{Tr}(\Psi^\top \Pi_S \Psi)
\intertext{Finally, we can re-express the minimization problem as follows}
\text{argmin}_{S: \text{Dim}(S) = k} \text{Tr}(\Psi^\top(\Pi_{S^\perp})\Psi) &= \text{argmax}_{S:\text{Dim}(S) = k} \text{Tr}(\Psi^\top \Pi_S \Psi)
\intertext{Now, because the subspace spanned by the top $k$ left-singular vectors $\{u_1, \dots, u_k\}$ of $\Psi$ is known to be the maximizer of the above equation, we finally obtain}
&= \langle u_1, \dots, u_k \rangle = V_K(\Psi) \; .
\end{align} \end{proof}
\begin{corollary} The limiting distribution of $\Phi^M_t$ under the random cumulant auxiliary task described in Theorem~\ref{thm:distribution} is equivalent to the Bayesian posterior over value functions obtained by conditioning on the dynamics $P^\pi$, and given a prior distribution on the reward function equal to $\mathcal{N}(0, \Sigma)$. \end{corollary} \begin{proof} Each column of $Z_\Sigma$ is sampled from an isotropic Gaussian distribution, and therefore each feature $\phi_i \overset{D}{=} (I - \gamma P^\pi) \epsilon_i$. It therefore suffices to show that under a suitable prior distribution, the distribution of $\phi_i$ is equal to a Bayesian posterior.
For this, it suffices to show that such a posterior can be obtained by conditioning on the transition dynamics $P^\pi$, and looking at the induced pushforward measure on the reward distribution. Noting that $(I - \gamma P^\pi)$ is invertible, we then obtain the following prior over $V^\pi$, assuming an isotropic Gaussian prior on $p_r(r)$ and any arbitrary distribution over potential transition dynamics $p_\pi(P^\pi)$ which covers $\mathbb{R}^{|S| \times d}$. \begin{align}
P(V^\pi) &= \int_{(r, P^\pi)} \mathbbm{1}[(I - \gamma P^\pi)^{-1}r = V^\pi] dp_r(r)dp_{\pi}( P^\pi)
\intertext{We observe that the random variable $V^\pi$ has conditional distribution $P(V^\pi|P^\pi) = P((I - \gamma P^\pi)^{-1}r)$, whose density is proportional to $p_r( (I - \gamma P^\pi)V)$ by the change of variables formula.}
P(V^\pi | P^\pi) &= c p_r(r = (I - \gamma P^\pi)V^\pi) \\
\intertext{ Because our prior over $r$ is equal to the initialization distribution of $\epsilon_i$, we obtain}
&= c p_{\text{init}}(\epsilon_i = (I - \gamma P^\pi) V^\pi) \\
\intertext{ which is precisely the limiting distribution $p_\infty$ of $\phi_i$ (again applying the change of variables formula).}
&= p_\infty (\phi_i = (I - \gamma P^\pi)^{-1} \epsilon_i = V^\pi) \end{align} So we see that the limiting distribution of $\phi_i$ is equal to the prior over value functions conditioned on the transition dynamics. \end{proof}
\section{Further discussion of features and operator decompositions}\label{sec:feature-selection}
Proto-value functions (PVFs), were first defined by \citet{mahadevan2007proto} as the eigenvectors of the \textit{incidence matrix} induced by the environment transition matrix $P$. In the ensuing years, the term PVF has been used to refer to a number of related but not necessarily equivalent concepts. To clarify our use of the term and the relationship of our decompositions of the resolvent and transition matrices of an MDP, we provide a brief discussion here; a summary is provided in Table~\ref{table:features}.
We will use $A$ to refer to the adjacency matrix of the unweighted, undirected graph induced by the matrix $P$ (i.e. $A[i,j]$ is 1 if there exists some action with nonzero probability of taking the agent from state $i$ to state $j$ or from state $j$ to state $i$, and 0 otherwise). $L_G$ will refer to the graph Laplacian based on this matrix $A$.
We can additionally consider the Laplacian of the weighted, directed graph defined by $P^\pi$; we will refer to this matrix as $L_{P^\pi}$, in reference to its dependence on the probability of transitioning. $T$ denotes the matrix defined by a collection of sampled transitions indexed by $t$, with entries $T_{it} = -1$ if the transition $t$ leaves $i$ and $+1$ if it enters state $i$.
Our first observation is that eigendecomposition and SVD are equivalent for symmetric matrices because any real symmetric matrix has an orthogonal eigenbasis; this means that performing either decomposition yields the same eigenvectors and easily related eigenvalues. Our second observation is that when $P^\pi$ is \textit{not} symmetric, its singular value decomposition and eigendecomposition may diverge; further, the relationship between the SVD of the resolvent matrix $\Psi = (I - \gamma P^\pi)^{-1}$ and of $P^\pi$ is no longer straightforward, despite the eigenspaces of the two matrices being analogous. This means that analysis of the singular value decomposition of $P^\pi$ does not immediately imply any results about the resolvent matrix.
\begin{table}[!ht]
\centering
\begin{tabular}{c|c|c}
Matrix & SVD & Eigendecomposition (ED) \\
\hline
$L_G$ & PVFs \citep{mahadevan2007proto} & Equivalent to SVD \\
$T$ & sometimes $\equiv$ ED($L_G$) \citep{machado2017laplacian} & not discussed \\
$L_{P^\pi}$ & $\neq$ ED($L_{P^\pi}$) & \citet{stachenfeld2014design} \\
$(I - \gamma P^\pi)^{-1}$ & RSBFs & $ \equiv L_{P^\pi}$ \\
$P^\pi$ & \citet{behzadian2018feature} & $\equiv L_{P^\pi}$ \\
\end{tabular}
\caption{Summary of decompositions of various matrices associated with MDP transition operators, and associated features.}
\label{table:features} \end{table}
Finally, we note that applying a uniform random walk policy may not be sufficient to guarantee that $P^\pi$ will be symmetric, and that in general it will not be possible to obtain a policy which will symmetrize the transition matrix. For example: when $G$ is a connected, non-regular graph (as is the case in many environments such as chains), there must be a node $v$ of degree $d$ adjacent to a node $v'$ of degree $d' \neq d$. A random walk policy will assign $p(v, v') = \frac{1}{d}$, while $p(v', v)$ will receive probability $\frac{1}{d'}$; thus, $P^\pi$ will not be symmetric. Fortunately, this is not a barrier to spectral analysis; the eigenvectors and eigenvalues of $P^\pi$ will still be real, as their transition matrix will be \textit{similar} to a symmetric matrix. We defer to \citet{machado2017laplacian} for a more detailed discussion of this relationship.
\section{Learning dynamics for ensemble prediction} \label{sec:ensemble-dynamics}
We provide some visualizations of the induced behaviour on features as a result of training an ensemble with multiple heads and zero reward, replicating the analysis of Section \ref{sec:reps}, to highlight how the eigendecomposition of $P^\pi$ affects the learned representations. We run our evaluations on the Four-Rooms Gridworld by initializing $\Phi \in \mathbb{R}^{105 \times 10}$ (i.e. $|\mathcal{X}| = 105$ and the number of features $d=10$) and simulating the ODE defined in Equation~\ref{eq:ensemble-phi-flow} for time $t=100$ with transition matrix $P^\pi$ defined by the uniform random policy on this Gridworld. In some cases, the features converged to zero quickly and so we show a final $t < 100$ to highlight the behaviour of the representation before it reaches zero.
We consider three variables which we permit to vary: the initialization scheme of features, in one case sampled from an isotropic Gaussian \texttt{rand} or from a randomly initialized 2-layer MLP \texttt{nn}); whether the weight matrix is fixed at initialization \texttt{fix} or permitted to follow the flow defined by Equation~\ref{eq:w-ode} \texttt{train}; and finally the number of `heads', \texttt{M}=1, 20, and 200.
In Figure \ref{fig:ensemble_predictions}, we plot the output of an arbitrary head $\mathbf{w}^m$ of the ensemble. In Figure \ref{fig:ensemble_feature0} we visualize the value of a single feature (i.e. a single column of $\Phi$).
We observe, as predicted, that for fixed heads in the overparameterized regime, the features (and the value functions they induce) converge to smooth eigenfunctions. We do not see meaningful convergence of the features trained in conjunction with a single weight vector. In contrast, the value functions and features trained in conjunction with ensembles with more heads than the feature dimension consistently resemble the eigenfunctions of $P^\pi$. When $(\mathbf{w}^m)$ are held fixed, we see convergence to smooth eigenfunctions as predicted by our theory; when $(\mathbf{w}^m)$ are permitted to vary according to the flow in Equation~\ref{eq:ensemble-w-flow}, we see convergence to the most eigenfunction corresponding to the most negative eigenfunction of $P^\pi$.
\begin{figure}
\caption{Value functions learned by the ensemble head at index 0 for different training regimes. Plot titles of form (feature initialization scheme, train/fix weight matrix, number of heads in ensemble). Observe that the representation learned with fixed weights tends to converge to smoother eigenfunctions than those learned with weights that are also allowed to train. f}
\label{fig:ensemble_predictions}
\end{figure} \begin{figure}
\caption{Values of ensemble feature at index 0 for different training regimes. Plot titles of form (feature initialization scheme, train/fix weight matrix, number of heads in ensemble). Observe that the representation learned with fixed weights tends to converge to smoother eigenfunctions than those learned with weights that are also allowed to train.}
\label{fig:ensemble_feature0}
\end{figure}
\section{Experimental details} \label{sec:experiment-details}
\subsection{Experimental details for Figure~\ref{fig:feature-viz}}
In our evaluations of the evolution of single feature vectors, we compute the continuous-time feature evolution defined in Equation~\eqref{eq:phi-ode}, using $P^\pi$ defined by a random walk on a simple Four-Rooms Gridworld with no reward. We use a randomly initialized representation $\Phi \in \mathbb{R}^{ |\mathcal{X}| \times 10}$, and use a single column of this matrix in our feature visualization (we observed similar behaviour in each feature). To compute trajectories, we use the SciPy ODE solver \texttt{solve\_ivp} \citep{2020SciPy}.
\subsection{Experimental details for Section~\ref{sec:feature-generalization}}
Here, we provide details of the environment used in producing Figure~\ref{sec:feature-generalization}. The environment is a 30-state chain, with two actions, \texttt{left} and \texttt{right}, which move the agent one state to the left or right, respectively. When the agent cannot move further left or right (due to being at an end state of the chain), the result of the corresponding action keeps the agent in the same state. There is additionally environment stochasticity of $0.01$, meaning that with this probability, a uniformly random action is executed instead. This stochasticity ensures that $P^\pi$ satisfies the conditions of Assumption~\ref{assume:value-function-conditions}. Taking the action \text{left} in the left-most state incurs a reward of $+2$, and taking the action \texttt{right} in the right-most state incurs a reward of $+1$; all other rewards are zero.
\subsection{Experimental details for Section~\ref{sec:deep-rl-aux}}
We modify a base Double DQN agent \citep{van2016deep} and evaluate on the ALE without sticky actions \citep{bellemare2013arcade}. Our agents are implemented in Jax \citep{jax2018github}, and are based on the DQN Zoo \citep{dqnzoo2020github}. Unless otherwise mentioned, all hyperparameters are as for the default Double DQN agent, with the exception of the epsilon parameter in the evaluation policy, which is set to 0.001 in all agents, and the optimizer, which for agents using auxiliary tasks CV, REM and Ensemble is Adam with epsilon $0.1/32^2$, and a lightly tuned learning rate; see below for further details.
Experimental results shown in bar plots, such as Figures~\ref{fig:naux} and~\ref{fig:rc_sweep}, report a ``relative score'' which is the per-game score normalized by the maximum average score achieved by any agent or configuration. The same, per-game, normalization values are used for all such figures.
\textbf{Auxiliary task details.} In this section, we describe the implementations of all auxiliary tasks considered in the main text. \begin{itemize}
\item \emph{QR-DQN.} The implementation and hyperparameters match QR-DQN-1 in \citet{dabney2018distributional}.
\item \emph{DDQN+RC.} We use a many-head DQN network which is identical to the standard neural network used for DQN, except that the output dimension is $(M+1) \times |\mathcal{A}|$ instead of $|\mathcal{A}|$, where $M$ is the number of auxiliary heads. Random cumulants are generated using a separate neural network with the same architecture as a standard DQN, but with output dimension equal to the number of auxiliary heads. The width of the Huber loss for each auxiliary head is equal to the number of auxiliary tasks. Let $\phi(x) \in \mathbb{R}^M$ be the output of the cumulant network given input observation $x$, with $M$ the number of auxiliary heads. Then the cumulant for auxiliary head $m$, at time step $t$, is given by $c_t = s \times (\phi(x_{t+1}) - \phi(x_t))$, where $s \in \mathbb{R}$ is a scaling factor. We performed a small hyperparameter sweep over scaling factors in $\{1, 10, 100, 500\}$, finding $s = 100$ to provide the best performance and use this value for all reported experiments. Note that this auxiliary task and the details are nearly identical to the \emph{CumulantValues} auxiliary task of \citet{dabney2020value}, except that we do not pass the values through a tanh non-linearity as this did not appear to have any impact in practice. We performed a hyperparameter sweep over learning rates and gradient norm clipping for this agent, considering learning rates $\{0.00025, 0.0001, 0.00005\}$ and gradient clipping in $\{10, 40\}$. We found that a learning rate of $0.00005$ and gradient norm clipping of $40$ to work best and use these values for all experiments.
\item \emph{DDQN+REM.} We use a many-head variant of Double DQN, with heads trained according to the REM loss of \citet{agarwal2019striving}. For the agent's policy, an argmax over a uniform average of the heads is used. We swept over learning rates of $0.0001$ and $0.00005$, generally finding $0.00005$ to perform best.
\item \emph{DDQN+Ensemble.} As for the REM auxiliary task, we use a many-head variant of Double DQN. Each head is trained using its own double DQN loss, and the resulting losses are averaged. For the agent's policy, an argmax over a uniform average of the heads is used. We swept over learning rates of $0.0001$ and $0.00005$, generally finding $0.00005$ to perform best. \end{itemize}
\textbf{Modified dense-reward games.} We modified four Atari games (Pong, MsPacman, Seaquest, and Q*bert) to obtain sparse, harder versions of these games to test the performance of random cumulants and other auxiliary tasks. The details of these games are given below. In each case a low-valued, commonly encountered reward is `censored', which means that during training the agent observes a reward of $0$ instead of the targeted reward. When evaluated, and thus for all empirical results reported, the standard uncensored rewards are reported. \begin{itemize}
\item \emph{Sparse Pong.} All negative rewards are censored (i.e. set to 0 before being fed to the agent), so the agent receives a reward of +1 for scoring against the opponent, but no reward when it concedes a point to the opponent. As $0$, $1$, and $-1$ are the only rewards in Pong, this modification makes Pong significantly harder. The agent can no longer learn to `avoid losing points`, but can only improve by learning to score points directly.
\item \emph{Sparse MsPacman.} All rewards less than or equal to 10 are censored. This corresponds to rewards for the numerous small pellets that MsPacman eats, but not the larger pellets or ghosts. Each level ends when all of the small pellets are consumed, thus, by hiding these from the agent we may have significantly changed the primary incentive for the agent to advance the game.
\item \emph{Sparse Seaquest.} All rewards less than or equal to 20 are censored. This corresponds to the rewards for shooting the sharks underwater, but not the rewards for picking up divers or surfacing. Additionally, even the rewards for sharks increase beyond this level, and thus become visible, once the agent has surfaced and collected enough divers.
\item \emph{Sparse Q*bert.} All rewards less than or equal to 25 are censored. These are the rewards for flipping the colour of a tile, which is the primary source of reward and the mechanism for advancing to the next level of the game. Once all tiles are flipped, the agent will go to the next level. However, the agent can still observe rewards for going to the next level and for dispatching the enemies. \end{itemize}
As described in the main text, we found that the sparse versions of MsPacman, Seaquest, and Q*bert were too difficult for any agent we tested to achieve a reasonable level of performance. In Figure~\ref{fig:sparse-learning-curves}, we display the performance of several auxiliary tasks on these games, noting that the performance achieved is extremely low in comparison to the agents trained on the standard versions of these games (see Section~\ref{sec:deep-rl-aux}).
\begin{figure}
\caption{Learning curves on sparsified MsPacman (left), sparsified Seaquest (centre), and sparisifed Q*bert (right).}
\label{fig:sparse-learning-curves}
\end{figure}
\textbf{Hyperparameter sweeps.} In Figure~\ref{fig:rc_sweep} we vary the weight of the auxiliary loss for the random cumulants agents, with the aim of understanding how this hyperparameters affect each method's performance. Next, in Figures~\ref{fig:ens_sweep} and~\ref{fig:rem_sweep} we present the results of a hyperparameter sweep for Ensemble and REM respectively. For these two, since there is no separate auxiliary loss as in RC, we vary number of heads and the learning rate. Results presented in the main text use the best settings for each algorithm found from these sweeps.
\begin{figure}
\caption{Results of hyper-parameter sweep for Random Cumulant (RC) method, where each row is for a different value of multiplicative scale applied to the auxiliary losses and each bar corresponds to the number of auxiliary heads ($M$). Note that the first row of results corresponds to initializing a network with the auxiliary heads, but setting the weight to zero, effectively disabling the auxiliary task.}
\label{fig:rc_sweep}
\end{figure}
\begin{figure}
\caption{Results of hyper-parameter sweep for the Ensemble method, where each row is for a different learning rate and each bar corresponds to the number of auxiliary heads ($M$).}
\label{fig:ens_sweep}
\end{figure}
\begin{figure}
\caption{Results of hyper-parameter sweep for the REM method, where each row is for a different learning rate and each bar corresponds to the number of auxiliary heads ($M$).}
\label{fig:rem_sweep}
\end{figure}
\chapter{Capacity loss}
\section{Proofs} \subsection{Estimator consistency} \label{appx:consistency} We here show that our estimator of the agent's \text{feature rank}\xspace is consistent. First recall
\begin{align} \left(\frac{1}{\sqrt{n}} \Phi_n\right)^\top \left(\frac{1}{\sqrt{n}} \Phi_n\right) &= \frac{1}{n} \sum_{i=1}^n \phi(x_i) \phi(x_i)^\top \, . \\ \intertext{The following property of the expected value holds} \mathbb{E}_{x \sim P} [ \phi(x)\phi(x)^\top ] &= \mathbb{E}\left\lbrack \frac{1}{n} \sum_{i=1}^n \phi(x_i)\phi(x_i)^\top \right\rbrack \, . \\ \intertext{It is then straightforward to apply the strong law of large numbers. To be explicit, we consider an element of $M = \mathbb{E}[\phi \phi^\top] $, $M_{ij}$. } \mathbb{E}[(\phi(x) \phi(x)^\top)_{ij}] &= M_{ij} = \mathbb{E}[\phi_i(x) \phi_j(x)]
\implies \sum_{k=1}^n \frac{1}{n} \phi_i(x_k)\phi_j(x_k) \overset{a.s.}{\rightarrow} M_{ij} \, . \end{align}
Since we have convergence for any $M_{ij}$, we get convergence of the resulting matrix to $M$. Because the singular values of $\Phi$ are the eigenvalues of $M$ and the eigenvalues are continuous functions of that matrix, the eigenvalues of $M_n$ converge to those of $M$ almost surely. Then for almost all values of $\epsilon$, the threshold estimator $N(\lambda_1, \dots, \lambda_k; \epsilon) = | \{\lambda_i > \epsilon\} |$ will converge to $N(\text{spec}(M); \epsilon )$. Specifically, the estimator will be convergent for all values of $\epsilon$ which are not eigenvalues of $M$ itself.
\subsection{Feature collapse case study: quantile regression} \label{appx:feature_theory} We apply similar analysis to that of Chapter~\ref{chp:rl-dynamics} to better understand the effect of sparse-reward environments on representation collapse. To do so, we return to the setting where $\Phi_t$ are non-parametric features and $w_t$ a linear function approximator which jointly parameterize a value function $V_t = \langle \Phi_t(x), w_t \rangle$. We recall the dynamics \begin{equation}
\partial_t \Phi_t = \alpha (\gamma P^\pi - I) \Phi_t (w_t w_t^\top) + R^\pi w_t^\top \end{equation} and \begin{equation}
\partial_t w_t = \beta \Phi_t^\top [(\gamma P^\pi - I)\Phi_t w_t + R^\pi] \, , \end{equation} where $P^\pi \in \mathbb{R}^{\mathcal{X} \times \mathcal{X}}$ is the matrix of state-transition probabilities under $\pi$, and $R^\pi \in \mathbb{R}^{\mathcal{X}}$ is the vector of expected rewards.
In value-based deep RL, we model a Q-function which takes as input an observation $\mathbf{x}$ and outputs a vector $Q(s, a_i)_{i=1}^{n_a} \in \mathbb{R}^{n_a}$. This resembles the setting of \textit{ensemble prediction}, whose dynamics we also recall here. \begin{align}
\partial_t \Phi^{M}_t
\!= & \alpha\! \sum_{m=1}^M (R^\pi\! +\! \gamma P^\pi \Phi^{M}_t w_t^{m}\! -\! \Phi^{M}_t w_t^{m}) (w_t^{m} )^\top \, , \\
\partial_t w_t^{m} = & \beta (\Phi^{M}_t)^\top (R^\pi + \gamma P^\pi \Phi^M_t w^{m}_t - \Phi_t w^{m}_t ) \, . \end{align}
The ensemble learning regime also bears similarity to the quantile regression DQN (QR-DQN) objective \citep{dabney2018distributional}, which learns to fit a set of quantiles to the distribution taken by the return when treated as a random variable. Each quantile $\tau_i \in (0, 1)$ is trained using a quantile regression loss, of the form \begin{equation*}
\mathcal{L}^{\tau}_{\mathrm{QR}}(\theta) := \mathbb{E}_{\hat{Z} \sim Z} [ \rho_\tau(\hat{Z} - \theta)] \quad \text{ where } \rho_\tau(u) = u(\tau - \delta_{u < 0}), \; \forall u \in \mathbb{R}. \end{equation*} This loss is known to yield unbiased gradients, however it has the undesirable property that the gradients of $\mathcal{L}^{\tau}_{\mathrm{QR}}$ remain constant even as $\theta \rightarrow \tau$, which can result in pathological convergence properties. The QR-DQN objective addresses this by implementing a Huber loss \citep{huber64robust}, whose gradients tend to zero at its minimum. \begin{equation} \mathcal{L}_{\kappa}(u) = \begin{cases}
\frac{1}{2}u^2 & \text{if }|u| \leq \kappa \\
\kappa(|u| - \frac{1}{2}\kappa) & \text{otherwise}. \end{cases} \end{equation}
For a sufficiently large number of quantiles and under similar conditions as in Theorem~\ref{thm:infinite-heads} the dynamics induced by this objective bear marked resemblance to those of the ensemble prediction setting. In particular, when no reward is observed in the environment, we again obtain that the features will converge to the zero vector. The setting of this result is distinct from that of deep neural network representation dynamics, as neural networks use discrete optimization steps, finite learning rates, and typically do not leverage linear ensembles. However, we emphasize two crucial observations that suggest the intuition developed in this setting may be relevant: first, in sparse reward environments the representation will be pushed to zero along dimensions spanned by the linear weights used to compute outputs. Once sufficiently many independent weight vectors are being used to make predictions, this effectively forces \textit{every} dimension of the representation to fit the zero vector output. We would therefore expect representation collapse to be particularly pronounced in the QR-DQN agents trained on sparse-reward environments, as in this setting we obtain many independently initialized heads all identically trying to fit the zero target.
\begin{restatable}{theorem}{thmQrdqn}\label{thm:qr-dqn} Let let $\bm{\tau}=(\tau_i)_{i=1}^M$ be a set of quantile functions $\tau_i : \mathcal{X} \rightarrow \mathbb{R}$ with corresponding estimators $(\hat{\tau}^i)_{i=1}^M$ of the form $\hat{\bm{\tau}} = \hat{\tau}^i(\mathbf{x}) =\langle \phi(\mathbf{x}), \mathbf{w}^i \rangle$ for $\mathbf{w}^i \in \mathbb{R}^d$, $\phi: \mathcal{X} \rightarrow \mathbb{R}^d$. Let $\pi$ be some policy in an MDP $\mathcal{M}$, with $R^\pi = \mathbf{0}$, and $P^\pi$ be the corresponding transition function. Let $\Phi^M_t$, $\mathbf{w}^i_t$ follow the dynamics \begin{align}
\partial_t \Phi_t &= \beta^M \nabla_\Phi\mathcal{L}_{\kappa}(\hat{\bm{\tau}})\\
\partial_t \mathbf{w}_t &= \alpha^M \nabla_{\mathbf{w}}\mathcal{L}_{\kappa}(\hat{\bm{\tau}})\;. \end{align} Then if $\alpha^M = 0$ or $\alpha^M = O(\frac{1}{M})$, letting $\Phi_\infty = \lim_{t \rightarrow \infty} \Phi_t$, we obtain \begin{equation}
\lim_{M \rightarrow \infty} \Phi_{\infty} \rightarrow \bm{0}. \end{equation} \end{restatable}
\begin{proof}
We break the derivation of this result into two cases. We first observe that, when $\alpha=0$ and $\beta=1$, the QR-DQN update reduces $\|V_t\|_\infty$ by either $\kappa$ if $\|(\gamma P^\pi - I) V_t\|_\infty > \kappa$ or $\gamma \|V_t\|_\infty$ if $\|(\gamma P^\pi - I ) V_t\|_\infty < \kappa$. In the latter case, the QR-DQN objective is identical to the TD updates studied in Chapter~\ref{chp:rl-dynamics}, and we obtain convergence of $\Phi$ to zero in the case of infinitely many heads, and in the case of $M=d$ heads with orthogonal initialization aligned with the features.
In the case where $M \rightarrow \infty$, when $w^i$ are initialized to scale with $\frac{1}{\sqrt{M}}$, we obtain straightforwardly that $\|\Phi_0 w^i\| \overset{P}{\rightarrow} 0$. Thus, the limiting probability that any error falls outside the $\kappa$ cutoff of the Huber loss will be zero, and the dynamics on $\Phi$ will be identical to the TD dynamics studied previously, allowing us to apply the result of Theorem~\ref{thm:infinite-heads}.
In the case where $M=d$ and the $w^i$ are initialized such that $\langle \phi^i, w^j \rangle = \delta_{i,j} \phi^i$, we need only show that the dynamics followed by quantile regression are sufficiently well-behaved to push $\|V_t\|_\infty < \kappa$ for some finite $t$.
We note that in this case we can express the dynamics as follows, and for the rest of this discussion assume without loss of generality that $\phi^i_t(x) > 0 \forall x \in \mathcal{X}$. \begin{align} \partial_t \Phi_t &= - \sum_{i=1}^M \min ( (I - \gamma P^\pi)\Phi_t w^i, \kappa) (w^i)^\top \\ &= - \sum_{i=1}^M \min ( (I - \gamma P^\pi)\phi^i_t w^i, \kappa) (w^i)^\top \\ \implies \partial_t \phi^k_t &= - -\min( (I - \gamma P^\pi) \phi^k_t w^k, \kappa) (w^k)^\top \\ &= -\min ((I - \gamma P^\pi) \phi^k_t, \kappa) \end{align}
Let $\phi^i_t$ be a feature vector at some time $t$. Let $x_{\max}$ maximize $\{|\phi^i_t(x)| :x \in \mathcal{X} \}$. Then we know that the time-derivative on $\partial_t \phi^i_t(x_{\max})$ must have the opposite sign to $\phi^i_t(x_{\max})$, as it takes the following form. \begin{align}
\partial_t \phi^i_t(x_{\max}) &= - \min( (I - \gamma P^\pi)\phi^i_t(x_{\max}) , \kappa) \\
\mathrm{sign}(\partial_t \phi^i_t(x_{\max}) &= - \mathrm{sign}(\phi^i_t(x_{\max}) \end{align}
Now, because this applies to the maximal value of $\phi^i_t$ over all states, we get that the function $\tilde{\phi}^{i}(t) = \max_{x \in \mathcal{X}} \phi^i_t(x)$ is decreasing. The rate of this decrease is either constant, in which case it is equal to $\kappa$, or we get the standard exponential convergence described in \ref{eq:td_dynamics}.
\end{proof}
\keyinsight{In the presence of many prediction targets, such as those induced by a quantile regression objective, sparse-reward environments will induce low dimensional feature representations in deep RL agents.}
\section{Sequential supervised learning} \label{appx:supervised}
\subsection{Details: target-fitting capacity on non-stationary MNIST} \label{appx:mnist-details}
In addition to our evaluations in the Atari domain, we also consider a variant of the MNIST dataset in which the labels change over the course of training. \begin{itemize}[leftmargin=0.5cm]
\item \textbf{Inputs and Labels:} We use 1000 randomly sampled input digits from the MNIST dataset and assign either binary or random targets.
\item \textbf{Distribution Shift:} We divide training into $N=30$ or $N=10$ iterations depending on the structure of the target function. In each iteration, a target function is randomly sampled, and the network's parameters obtained at the end of the previous iteration are used the initial values for a new optimization run. We use the Adam \citep{kingma2014adam} optimizer with learning rate \texttt{1e-3}, and train to minimize the mean squared error between the network outputs and the targets for either 3000 or 5000 steps depending on the nature of the target function.
\item \textbf{Architecture:} we use a standard fully-connected architecture with ReLU activations, and vary the width and depth of the network. The parameters at the start of the procedure are initialized following the defaults in the Jax Haiku library. \end{itemize}
We note that the dataset sizes, training budgets, and network sizes in the following experiments are all relatively small. This was chosen to enable short training times and decrease the computational budget necessary too replicate the experiments. The particular experiment parameters were selected to be the fastest and cheapest settings in which we could observe the capacity loss phenomenon, while still being nontrivial tasks. In general, we found that capacity loss is easiest to measure in a 'sweet spot' where the task for a given architecture is simple enough for a freshly-initialized network to attain low loss, but complex enough that the network cannot trivially solve the task. In the findings of the following section, we see how some of the larger architectures do not exhibit capacity loss on `easier' target functions, but do on more challenging ones that exhibit less structure. This suggests that replicating these results in larger networks will be achievable, but will require re-tuning the task difficulty to the larger network's capacity.
\subsection{Additional evaluations} \label{appx:cap-loss-supervised} \begin{figure}
\caption{Mean squared error at the end of training on each iteration of the \textbf{hash-MNIST} task. Target-fitting error increases over time in smaller networks, but increasing the depth or width of the network slows down capacity loss, enabling positive transfer in the largest networks we studied.}
\label{fig:hash-mnist}
\end{figure} We expand on the MNIST target-fitting task shown in the main text by considering how network size and target function structure influences capacity loss.
\begin{itemize}
\item \textbf{Random-MNIST} (smooth) this task uses the images from the MNIST dataset as inputs. The goal is to perform regression on the outputs of a randomly initialized, fixed neural network. We use a small network for this task, consisting of two width-30 fully connected hidden layers with ReLU activations which feed into a final linear layer which outputs a scalar. Because the network outputs are small, we scale them by 10 so that it is not possible to get a low loss by simply predicting the network's bias term. This task, while randomly generated, has some structure: neural networks tend to map similar inputs to similar outputs, and so the inductive bias of the targets will match that of the function approximator we train on them.
\item \textbf{Hash-MNIST} (non-smooth) uses the same neural network architecture as the previous task to generate targets, however rather than using the scaled network output as the target, we multiply the output by 1e3 and feed it into a sine function. The resulting targets no longer have the structure induced by the neural network. This task amounts to memorizing a set of labels for the input points.
\item \textbf{Threshold-MNIST} (sparse) replaces the label of an image with a binary indicator variable indicating whether the label is smaller than some threshold. To construct a sequence of tasks, we set the threshold at iteration $i$ to be equal to $i$. This means that at the first iteration, the labels are of the form $(x,0)$ for all inputs $x$. At the second iteration, they are of the form $(x, \delta(y<1) )$, where $y$ is the digit in the image $x$, and so on. \end{itemize} We consider MLP networks of varying widths and depths, noting that the network architecture used to generate the random targets is fixed and independent of the approximating architecture. We are interested in evaluating whether factors such as target function difficulty, network parameterization, and number of target functions previously fit influence the network's ability to fit future target functions. Our results are shown in Figure~\ref{fig:hash-mnist},~\ref{fig:random-mnist}, and \ref{fig:threshold-mnist}. We visualize srank and $\text{feature rank}\xspace$ of the features output at the network's penultimate layer, in addition to the loss obtained at the end of each iteration.
\begin{figure}
\caption{Mean squared error after 2e3 training steps on the \textbf{random-MNIST} task. Target-fitting error increases over time in under-parameterized networks, but increasing the depth or width of the network slows down capacity loss, enabling positive transfer in the largest network we studied.}
\label{fig:random-mnist}
\end{figure} \begin{figure}
\caption{Mean squared error after 2e3 training steps on the \textbf{threshold-MNIST} task. Target-fitting error increases over time in under-parameterized networks, but increasing the depth or width of the network slows down capacity loss, enabling positive transfer in the largest network we studied.}
\label{fig:threshold-mnist}
\end{figure} \subsection{Effect of \text{InFeR}\xspace on target-fitting capacity in MNIST} \label{appx:infer-mnist} In addition to our study of the Atari suite, we also study the effect of \text{InFeR}\xspace on the non-stationary MNIST reward prediction task with a fully-connected architecture; see Figure~\ref{fig:pyoi_on_mnist}. We find that it significantly mitigates the decline in target-fitting capacity demonstrated in Figure~\ref{fig:mnist-cap}.
\begin{figure}
\caption{Effect of adding \text{InFeR}\xspace to the regression objective in a random reward prediction problem on the non-stationary MNIST environment studied previously. We see that the \text{InFeR}\xspace objective produces networks that can consistently outperform those trained with a standard regression objective, exhibiting minimal capacity loss in comparison to the same network architecture trained on the same sequence of targets. }
\label{fig:pyoi_on_mnist}
\end{figure} \section{Atari evaluations} \label{appx:atari} We now present full evaluations of many of the quantities described in the corresponding chapter, along with a study of the sensitivity of InFeR to its hyperparameters. We use the same training procedure for all of the figures in this section, loading agent parameters from checkpoints to compute the quantities shown.
\subsection{Hyperparameter sensitivity of InFeR in deep reinforcement learning agents} \label{appx:hypers} We report results of hyperparameter sweeps over the salient hyperparameters relating to InFeR, so as to assess the robustness of the method. For both the DDQN and Rainbow agents augmented with InFeR, we sweep over the number of auxiliary predictions (1, 5, 10, 20), the cumulant scale used in the predictions (10, 100, 200), and the scale of the auxiliary loss (0.01, 0.05, 0.1, 0.2). We consider the capped human-normalized return across four games (Montezuma's Revenge, Hero, James Bond, and MsPacman), and run each hyperparameter configuration with 3 seeds. Results are shown in Figure~\ref{fig:ddqn-sweep} for the DDQN agent; we compare performance as each pair of hyperparameters varies (averaging across the other hyperparameter, games, and seeds, and the last five evaluation runs of each agent). Corresponding results for Rainbow are given in Figure~\ref{fig:rainbow-sweep}.
\begin{figure}
\caption{Hyperparameter sweeps for the DDQN+InFeR agent. Each contour plot shows average capped human-normalized score at the end of training marginalized over all hyperparameters not shown on its axes.}
\label{fig:ddqn-sweep}
\end{figure}
\begin{figure}
\caption{Hyperparameter sweeps for the Rainbow+InFeR agent. Each contour plot shows average capped human-normalized score at the end of training marginalized over all hyperparameters not shown on its axes.}
\label{fig:rainbow-sweep}
\end{figure} \label{appx:evals}
\begin{itemize}[leftmargin=0.5cm]
\item \textbf{Agent:} We train a Rainbow agent \citep{hessel2018rainbow} with the same architecture and hyperparameters as are described in the open-source implementation made available by \citet{dqnzoo2020github}. We additionally add InFeR, as described in Section~\ref{sec:pyoi}, with 10 heads, gradient weight 0.1 and scale 100.
\item \textbf{Training:} We follow the training procedure found in the Rainbow implementation mentioned above. We train for 200 million frames, with 500K evaluation frames interspersed every 1M training frames. We save the agent parameters and replay buffer every 10M frames to estimate feature dimension and target-fitting capacity. \end{itemize}
\subsection{Feature rank} \label{appx:feature-rank-atari}
We first extend the results shown in Figure~\ref{fig:effdim_vanilla} to two additional games: Seaquest, and a sparsified version of Pong in which the agent does not receive negative rewards when the opponent scores. In these settings, we stored agent checkpoints once every 10M frames in each 200M frame trajectory, and used 5000 sampled inputs from the agent's replay buffer to estimate the feature rank, using the cutoff $\epsilon=0.01$. Results are shown in Figure~\ref{fig:full-effdim-perf-apx}. \begin{figure}
\caption{Feature rank and performance of RL agents on demonstrative Atari environments.}
\label{fig:full-effdim-perf-apx}
\end{figure}
We further evaluate the evolution of feature rank in agents trained on all 57 games in the arcade learning environment in Figure~\ref{fig:effdim_all}. We find that the decline in dimension after the first checkpoint at 10M frames shown across the different agents in the selected games also occurs more generally in Rainbow agents across most environments in the Atari benchmark. We also show that in most cases adding InFeR mitigates this phenomenon. Our observations here do not show a uniform decrease in feature rank or a uniformly beneficial effect of InFeR. The waters become particularly muddied in settings where neither the Rainbow nor Rainbow+InFeR agent consistently make learning progress such as in tennis, solaris, and private eye. It is outside the scope of this work to identify precisely why the agents do not make learning progress in these settings, but it does not appear to be due to the type of representation collapse that can be effectively prevented by InFeR.
\textbf{Procedure.} We compute the feature rank by sampling $n=50000$ transitions from the replay buffer and take the set of origin states as the input set. We then compute a $n \times d$ matrix whose row $i$ is given by the output of the penultimate layer of the neural network given input $S_i$. We then take the singular value decomposition of this matrix and count the number of singular values greater than $0.01$ to get an estimate of the dimension of the network's representation layer.
In most games, we see a decline in feature rank after the first checkpoint at 10M frames. Strikingly, this decline in dimension holds even in the online RL setting where the agent's improving policy presumably leads it to observe a more diverse set of states over time, which under a fixed representation would tend to increase the numerical rank of the feature matrix. This indicates that even in the face of increasing state diversity, agents' representations face strong pressure towards degeneracy. It is worth noting, however, that the agents in dense-reward games do tend to see their feature rank increase significantly early in training; this is presumably due to the network initially learning to disentangle the visually similar states that yield different bootstrap targets.
\begin{figure}
\caption{feature rank of agent representations over the course of training on all 57 games in the Atari benchmark. We compare Rainbow against Rainbow+InFeR. }
\label{fig:effdim_all}
\end{figure}
\subsection{Target-fitting capacity early in training} \label{appx:tf-capacity-atari}
In this section we examine the target-fitting capacity of neural networks trained with DQN, QR-DQN, and Rainbow over the course of $50$ million environment frames on five games in the Atari benchmark (amidar, montezuma's revenge, pong, bowling, and hero). Every $1$ million training frames we save a checkpoint of the neural network weights and replay buffer. For each checkpoint, we generate a random target network by initializing network weights with a new random seed. We then train the checkpoint network to predict the output of this random target network for $10000$ mini-batch updates (batch size of $32$) under a mean squared error loss, for states sampled from the first $100,000$ frames in the checkpoint's replay buffer. Furthermore, we repeat this for $10$ seeds used to initialize the random target network weights.
The results of this experiment are shown in Figure~\ref{fig:atari_target_fit_cap_comb} (in orange), where the solid lines show means and shaded regions indicate standard deviations over all seeds (both agent seeds ($5$) and target fitting seeds ($10$), for a total of $50$ trials). We also show srank and $\text{feature rank}\xspace$ of the features output at the network's penultimate layer for each of the checkpointed networks used for target fitting. These are computed using the network features generated from $1000$ states sampled randomly from that checkpoint's replay buffer. For feature rank, averages and standard deviations are only over the $5$ agent seeds.
\begin{figure}
\caption{Mean squared error, after $10000$ training steps for the target-fitting on random network targets. We also show the corresponding feature rank of the pre-trained neural network (before target-fitting).}
\label{fig:atari_target_fit_cap_comb}
\end{figure}
\subsection{Performance}
We provide full training curves for both Rainbow and Rainbow+InFeR on all games in Figures~\ref{fig:pyoirainbowhncap} \& \ref{fig:pyoirainbowhncap-double} (capped human-normalized performance), and \ref{fig:pyoirainboweval} \& \ref{fig:pyoirainboweval-double} (raw evaluation score). We also provide evaluation performance curves for DDQN and DDQN+InFeR agents in Figure~\ref{fig:ddqn-eval}.
\begin{figure}
\caption{Full evaluation of capped human-normalized performance on Atari benchmarks for the default Rainbow architecture.}
\label{fig:pyoirainbowhncap}
\end{figure}
\begin{figure}
\caption{Full evaluation of capped human-normalized performance on Atari benchmarks in the double-width Rainbow architecture.}
\label{fig:pyoirainbowhncap-double}
\end{figure}
\begin{figure}
\caption{Full evaluation of raw scores on Atari benchmarks for the default Rainbow architecture.}
\label{fig:pyoirainboweval}
\end{figure}
\begin{figure}
\caption{Full evaluation of raw scores on Atari benchmarks for the double-width Rainbow architecture.}
\label{fig:pyoirainboweval-double}
\end{figure}
\begin{figure}
\caption{Evaluations of the effect of \text{InFeR}\xspace on performance of a Double DQN agent. Overall we see a slight improvement in average performance over all games, alongside a significant improvement in Montezuma's Revenge.}
\label{fig:ddqn-eval}
\end{figure} \chapter{Interference and generalization}
\section{Proofs}
\subsection{Proofs of main results} \label{apx:proofs} \convergence*
\begin{proof} Recall we assume the following dynamical system
\begin{align*}
\partial_t V_t &= -(I - \gamma P^\pi)V_t + R
\intertext{Inducing the trajectory}
V_t &= \exp( - t(I - \gamma P^\pi)) (V_0 - V^\pi) + V^\pi
\intertext{As we assume $P^\pi$ is diagonalizable, this implies that $(I - \gamma P^\pi)$ is also diagonalizable. Let $u_1, \dots, u_n$ denote the right eigenvectors of $P^\pi$ with corresponding eigenvalues $\lambda_1 \ge \dots \ge \lambda_n$. Let $V_0 = \sum \alpha^0_i u_i$. }
V_t &= \sum \alpha_i^t u_i \\
&= \exp (-t(I - \gamma P^\pi)) (\sum \alpha_i^0 - \alpha^\pi_i u_i) + \sum \alpha^\pi_i u_i \\
&= \sum \exp(-t(1-\gamma \lambda_i)) \bigg ( \sum (\alpha_i^0 - \alpha_i^\pi ) u_i + \sum \alpha_i^\pi u_i \bigg )
\intertext{Now, we consider the value of $V_t - V^\pi$ along each coordinate. Note that we have not assumed an orthogonal eigenbasis, thus cannot speak directly to the norm of the projection of this difference onto the eigenspace corresponding to each eigenvector $\lambda_k$. However, treating the eigendecomposition as a basis, we can discuss how the coordinates $\alpha^t_i$ of the value function $V_t$ converge with respect to this basis.}
|V_t - V^\pi|[i] = |\alpha^t_i - \alpha^\pi_i| &= | \exp(-t(1-\gamma \lambda_i)) (\alpha_i^0 - \alpha_i^\pi) + \alpha_i^\pi - \alpha^\pi_i| \\
&=|\exp(-t(1-\gamma \lambda_i)) (\alpha_i^0 - \alpha_i^\pi) | = \exp(-t(1-\gamma \lambda_i)) | (\alpha_i^0 - \alpha_i^\pi) | \end{align*} We conclude by noting that for large values of $\lambda_i$, the exponential term $\exp ( - t(1 - \gamma \lambda_i)) $ will decay more slowly as a function of $t$ than for smaller values of $\lambda_i$. Thus, these coordinates (which correspond to non-smooth functions over the state space) will converge fastest. When the eigenvectors form an orthogonal basis, as is the case for symmetric $P^\pi$, we can go further and observe that this convergence will apply to the norm of the projection of the value function into the corresponding eigenspace. Thus for symmetric $P^\pi$, we obtain the following stronger convergence result, where $U_k$ denotes the eigenspace corresponding to the eigenvalue $\lambda_k$. \begin{equation}
\| \Pi_{U_k} ( V_t - V^\pi ) \| = \exp(-t (1 - \gamma \lambda_k)) \| \Pi_{U_k} (V_0 - V^\pi ) \| \end{equation} \end{proof} \tderror* Let $V_0 = \sum \alpha_i v_i$. Then, letting $V_t$ be defined as in Equation~\ref{eq:td_dynamics}. \begin{equation} \mathrm{TD}(V_t) \leq \sum_{i=1}^n \exp(-2t(1-\gamma \lambda_i))( \alpha^\pi_i - \alpha_i^0)^2 (1-\gamma \lambda_i)^2 \; . \end{equation} \begin{proof}
By our assumption on the diagonalizability of $P^\pi$, we can leverage the previous result on the coordinates of $V_t$. \begin{align*}
V_t - V^\pi &= \sum \exp(-t(1-\gamma \lambda_i)) \bigg ( \sum (\alpha_i^0 - \alpha_i^\pi ) u_i \bigg ) \\
\intertext{We then bound the TD error as follows.}
\|V_t - \gamma P^\pi V_t - R \|^2 &= \| V_t - \gamma P^\pi V_t + \gamma P^\pi V^\pi -\gamma P^\pi V^\pi -R \| \\
&= \| V_t - \gamma P^\pi V^\pi - R - \gamma P^\pi (V_t - V^\pi) \| \\
\intertext{Since $V^\pi = R + \gamma P^\pi V^\pi$, we obtain the following.}
&= \| (I - \gamma P^\pi) (V_t - V^\pi )\|^2 \\
&= \| \sum (1 - \gamma \lambda_k) (\alpha_i^t - \alpha_i^\pi) u_i \|^2 \\
&\leq \sum (\alpha^\pi_i - \alpha^t_i)^2(1-\gamma \lambda_i)^2 \end{align*} The remainder follows a straightforward substitution. \end{proof}
\theoremsecond*
\begin{proof} While our prior analysis has considered the continuous time system $\tilde{\theta}_t$, this does not perfectly approximate the discrete system $\theta_t$. When a fixed step size is used, the first-order continuous-time approximation accrues error roughly proportional to $\alpha t$. We then follow the procedure of \citet{barrett2021implicit}, applying a Taylor expansion to the evolution of $\tilde{\theta}_t$ with respect to time. We will use the notation $\tilde{\theta}(t)$ to denote the explicit dependence of $\tilde{\theta}$ as a function of time. \begin{align}
\tilde{\theta}(\alpha t) &= \tilde{\theta}(0) + \sum \frac{(\alpha t)^n}{n!} \theta^{(n)}(0) \\
&= \tilde{\theta}(0) + \alpha t f(\tilde{\theta}(0)) + \frac{(\alpha t)^2}{2} \nabla_\theta f \cdot f(\tilde{\theta}(0)) +O(\alpha^3)\\
&= \tilde{\theta}(0) + \alpha t f(\tilde{\theta}(0)) + \frac{(\alpha t)^2}{2} f_1(\theta(0)) +O(\alpha^3) \\
\intertext{Relating this back to the discrete system $\theta_t$}
\theta_{1} &= \theta_0 + \alpha f(\theta_0) = \tilde{\theta}(0) + \alpha f(\tilde{\theta}(0)) \\
\theta_1 &= \tilde{\theta}(1) - \frac{\alpha^2}{2}f_1(\tilde{\theta}(0)) + O(\alpha^3)
\intertext{Thus, the system $\partial_t \check{\theta}_t = f(\check{\theta}_t) + \alpha^2/2 f_1( \check{\theta}_t)$ satisfies}
\theta_1 &= \check{\theta}(1) + O(\alpha^3) \end{align}
We begin by observing that $\nabla_\theta \| V_\theta - \square T V_\theta \|^2 = (V_\theta - T V_\theta) \cdot \nabla_\theta V_\theta = f(\theta)$. \begin{align}
\theta_{} &= \theta_0 + \alpha n f(\theta_0) + (\alpha n) ^2/2 \nabla_\theta f (\theta_0) \cdot f(\theta_0) + O( (\alpha n)^3) \\
&= \theta_0 + \alpha n f(\theta_0) + \frac{(\alpha n)^2}{2} f_1(\theta_0) + O((n\alpha)^3)\\
\intertext{We then express $f_1(\theta)$ as follows.}
f_1(\theta_0) &= \nabla_\theta[ f (\theta_0)] \cdot [f(\theta_0)] \\
&= [\nabla^2_w V_\theta \cdot ((\gamma P^\pi - I)V_\theta + r) + \nabla_\theta V_\theta \cdot ((\gamma P^\pi - I) \nabla_\theta V_\theta)][f(\theta)] \\
&= [\nabla_\theta^2 V_\theta \cdot ( (\gamma P^\pi - I)V_\theta + r) + \nabla_\theta V_\theta \cdot \nabla_\theta V_\theta][f(\theta)] \\ & \quad + \gamma [\nabla_\theta V_\theta P^\pi \nabla_\theta V_\theta][f(\theta)] \\
\intertext{Noting that the left hand side term is equal to the gradient of the gradient norm penalty for the stop-gradient version of the TD regression problem, we simplify as follows:}
&= \frac{1}{2}\nabla_\theta \| \nabla_\theta \frac{1}{2}\|V_\theta - \square T^\pi V_\theta\|^2 \|^2 + \gamma [\nabla_\theta V_\theta \cdot P^\pi \cdot \nabla_\theta V_\theta][f(\theta)] \end{align} We note that, unlike in the stochastic gradient descent setting, $f_1$ does not correspond to a gradient of any function. Instead, it corresponds to the second-order we would get for a frozen target, which corresponds to a gradient norm penalty, plus a term that measures the alignment of the gradients at each state and its expected successor. Intuitively, both of these terms minimize the `variance' in the loss induced by noisy, discrete gradient steps. The flatter loss surfaces induced by the gradient norm penalty will naturally lead to greater robustness to parameter perturbations. The gradient alignment term reflects the observation previously that non-smooth functions contribute the most to the TD error, and so encourages the first-order gradient effects on successive states to move in a similar direction.
We note that this final observation seems to be at odds with the tendency for TD learning to encourage more tabular updates. Why would a second-order correction term which promotes flat minima and gradient alignment result in tabular updates? To answer this, we point to the tendency of TD targets to converge along. the non-smooth components of the value function first. We are therefore faced with finding a flat region of parameter space to fit a discontinuous function. A representation which succeeds at this will benefit from minimizing interference between states, as the gradients for one transition will be on average uncorrelated with even nearby other states. The gradient alignment penalty suggests that, while the implicit regularization will prefer flat minima, smooth interference patterns which move other states in a similar direction to the current state will be penalized less than non-smooth directions. \end{proof}
\begin{restatable}{cor}{corr-second}
The second-order dynamics push features towards precisely the worst direction w.r.t. stability. I.p. looking at the set of positive definite representations introduced by \citet{ghosh2020representations} we see
\begin{equation}
\{v : v^\top P^\pi v < \gamma^{-1} \|v\|_{\Xi} \}
\end{equation}
whereas the optimal gradients for the second order term implicitly solve the following optimization problem
\begin{equation}
\min \mathbb{E}_{x \sim \eta(x)}[g(x)^\top g(x) - \gamma g(x)^\top (P^\pi g)(x)]
\end{equation}
\end{restatable}
\ntk* \begin{proof} We leverage the dynamics $\partial_t V_t = K(X,X) \nabla_\theta V_\theta \cdot ((\gamma P^\pi - I) + r)$ and follow the derivation of Section 5 of \citet{jacot2018neural}. \end{proof}
We can develop intuitions for the kernel gradient descent setting by considering the special case of linear function approximation, where $K(x_1, x_2) = \langle \phi(x_1), \phi(x_2) \rangle$ for some feature map $\phi$. For the moment, we will define $\Phi$ to be a matrix consisting of features for every state in the state space $X$ (i.e. we update all states in the mdp at once). We then obtain \begin{align}
\partial_t \mathbf{w}_t & = \alpha \Phi^\top (R^\pi + \gamma P^\pi \Phi \mathbf{w}_t - \Phi \mathbf{w}_t) \, . \end{align} We can express the evolution of the value function constructed by multiplication of $\Phi$ and $w$ as follows. \begin{align}
\partial_t V_t &= (\partial_w V_t)^\top \partial_t w_t = \Phi \partial_t w_t \\
&= -\Phi (\Phi^\top (I - \gamma P^\pi) \Phi) w \\
&= - \Phi \Phi^\top (I - \gamma P^\pi) V_t \\
&= - K (I - \gamma P^\pi) V_t
\intertext{We further consider the dynamics of the value function on inputs outside of the set of states on which the Bellman updates are computed as follows.}
\partial_t V_t(x_{\mathrm{test}}) &= (\partial_w V_t(x_{\mathrm{test}}))^\top \partial_t w_t \\
&= - \phi(x_{\mathrm{test}})^\top \Phi^\top (I - \gamma P^\pi) V_t \\
&= - K(x_{\mathrm{test}}, X_{\mathrm{train}}) K(X_{\mathrm{train}}, X_{\mathrm{train}})^{-1} \partial_t V_t \end{align}
We now lift the assumption that all states are updated. In this more general kernel gradient descent setting, we let $K$ be a kernel as before, with $\tilde{K} = K(X_{\mathrm{train}}, X_{\mathrm{train}})$ and $\kappa_{x_{\mathrm{test}}} = K(x_{\mathrm{test}}, X_{\mathrm{train}})$. We then obtain the following dynamics \begin{align}
\partial_t V_t(x_{\mathrm{test}}) &= \kappa_{x_{\mathrm{test}}} \tilde{K}^{-1} \partial_t V_t(X_{\mathrm{train}}) \\
\intertext{In particular, this results in the following trajectory.}
V_t(x_{\mathrm{test}}) &= V_0(x_{\mathrm{test}}) + \kappa_{x_{\mathrm{test}}} \tilde{K}^{-1} [ V_t(X_{\mathrm{train}}) - V_0(X_{\mathrm{train}})] \end{align}
An interesting case study occurs when we consider, e.g., off-policy evaluation where the bootstrap targets used in the TD updates may not have been visited by the agent during training. This will be the case in many offline RL problems, where the action that would be selected by the policy we seek to evaluate was not taken by the behaviour policy, and so the agent leverages bootstrap targets which are not updated as part of the training set of states. In such cases, we will decompose the state space as $X = X_{\mathrm{train}} \oplus X_{\mathrm{test}}$. The dynamics we get in this case look quite different from standard kernel regression, as the dynamics of the training states will depend on the predictions on the `test' states. To condense notation, we will use $T^\pi V_t$ to refer to an application of the Bellman operator $V_t \mapsto \gamma P^\pi V_t + R^\pi$.
\begin{align}
\partial_t V_t(X_{\mathrm{train}}) &= \Phi_{{\mathrm{train}}} \Phi_{\mathrm{train}}^\top ( (T^\pi V_t)(X_{\mathrm{train}}) - V_t(X_{\mathrm{train}})) \\
\partial_t V_t(X_{\mathrm{test}}) &= \Phi_{{\mathrm{test}}} \Phi_{\mathrm{train}}^\top ((T^\pi V_t)(X_{\mathrm{train}}) - V_t(X_{\mathrm{train}}))
\intertext{We note that $(T^\pi V_t)(X_{\mathrm{train}})$ depends on both $V(X_{\mathrm{train}})$ and $V(X_{\mathrm{test}})$ due to the application of the Bellman operator $T^\pi$. We thus end up with the following joint system.}
\partial_t V_t(X_{\mathrm{train}} \oplus X_{\mathrm{test}}) &= \Phi_{{\mathrm{test}}} \Phi_{\mathrm{train}}^\top ((T^\pi V_t)(X_{\mathrm{train}}) - V_t(X_{\mathrm{train}})) \oplus \Phi_{{\mathrm{train}}} \Phi_{\mathrm{train}}^\top ( (T^\pi V_t)(X_{\mathrm{train}}) - V_t(X_{\mathrm{train}})) \\
\partial_t V_t(X_{\mathrm{train}} \oplus X_{\mathrm{test}}) &= (\Phi_{{\mathrm{test}}} \oplus \Phi_{{\mathrm{train}}} ) \Phi_{\mathrm{train}}^\top ( (T^\pi V_t)(X_{\mathrm{train}}) - V_t(X_{\mathrm{train}}))
\intertext{Using a non-standard notation of $K_1 \oplus K_2:= X \mapsto K_1(X) \oplus K_2(X)$, we can then rewrite the above in terms of the dot product kernel $K(x,x')$ as follows.}
\partial_t V_t(X_{\mathrm{all}}) &= (\tilde{K} \oplus \kappa_{x_{\mathrm{test}}}) [ (T^\pi V_t - V_t) (X_{\mathrm{train}})] \end{align} We emphasize that while this at first looks as though the dynamics are independent of the value $V_t(X_{\mathrm{test}})$, this is an artefact of the Bellman operator notation $(T^\pi V_t) (X_t)$, which hides the dependence of the Bellman targets $ T^\pi V_t$ on $X_{\mathrm{test}}$. In particular, we can write $(T^\pi V_t)(X_t) = \Pi_{X_{\mathrm{train}}}[\gamma P^\pi V_t (X_{\mathrm{train}} \oplus X_{\mathrm{test}}) + R^\pi]$, which makes this dependence more explicit but is less succinct. \section{Experiment details} \label{apx:details} \subsection{Estimation of update rank} \label{appx:update-details} To estimate the update rank of an agent, we sample $k$ transitions from the agent's replay buffer and compute the matrix $A(\theta)$ as described in Section~\ref{sec:rank-exps}. We use the agent's current optimizer state and its current parameters in this computation. We then take the singular value decomposition of $A$ to obtain $k$ singular values $S = \{\sigma_1, \dots, \sigma_k\}$. We then threshold using the numerical approach taken in prior works \citep{maddox2020rethinking}, and compute the size of the set $S_{\epsilon} = \{ \sigma \in S : \sigma > \epsilon \max(S) \}$. This allows us to ignore directions of near-zero variation in the update matrix. In practice, we use $\epsilon = 0.1$.
Because the Q-functions learned by value-based deep RL agents are vector- rather than scalar-valued functions of state, and our estimator depends on an 2-dimensional update matrix, we must make a choice on how to represent the change in the state-value function. We considered taking the maximum over actions, the mean over actions, selecting a fixed action index, and selecting the action taken in the transition on which the update was computed, and found that both choices produced similar trends. In all evaluations in this paper, Q-functions are reduced using the max operator. We apply the same approach for distributional agents by taking the expectation over the distribution associated with each state-action pair.
To evaluate the policy-based agents, whose outputs correspond to distributions over actions, we compute the norm of the difference in the output probability distributions for each state in lieu of taking the difference of output values. I.e., the entry $A_{i,j} = \| p_\theta(x_j) - p_{\theta_i}(x_j) \|$, where the discrete probability distribution $p_\theta$ is taken as a vector. \subsection{ProcGen}\label{appx:procgen-details} \begin{figure}
\caption{Example levels from the dodgeball environment.}
\label{fig:procgen-viz}
\end{figure} The ProcGen benchmark consists of sixteen procedurally generated environments. Each environment consists of a set of randomly generated levels, of which a fixed subset are used for training and a disjoint subset are used for evaluation. Levels differ superficially in their observations and initial sprite layouts but retain the same underlying structure, as can be seen in Figure~\ref{fig:procgen-viz}. The observation space is a box space with the RGB pixels the agent sees in a numpy array of shape (64, 64, 3).
Our PPO and DAAC agents use the same hyperparameters and implementation as is provided by \citet{raileanu2021decoupling}. Our behaviour cloning objective minimizes the KL divergence between the distillation agent the pretrained agent's policies, with an entropy bonus equal to that used to train the original PPO agent. \subsection{Atari}\label{appx:atari-details}
We additionally perform evaluations on environments from the Atari benchmarks. Due to computational constraints, we consider only a subset of the entire benchmark. We obtain a mixture of easy games, such as pong and boxing, and more challenging games like seaquest, where we measure difficulty by the time it takes for the agent to meet human performance. For some experiments, we used the sparse-reward environment Montezuma's Revenge.
In our distillation experiments, we train the original agent for 50M frames using $\epsilon$-greedy exploration with $\epsilon = 0.1$, and train the distillation agents for a number of updates equivalent to 10M frames of data collected online. We base our implementation off of the open-source implementations in \citet{ostrovski2021the}.
For our behaviour cloning objective, we use the same architecture as is used for DQN, but feed the final layer of actions into a softmax to obtain a probability distribution over actions, which we denote as $P_\theta(a|x)$. Given a state-action pair taken by the target agent, we implement the following behaviour cloning loss for distillation \begin{equation}
\ell(\theta, x_i, a_i) = -\log P_\theta(a_i | x_i) -0.1 H(P_\theta(\cdot | x_i)) \end{equation} where $H$ denotes the entropy of a distribution. We use a replay capacity of 1e6 and allow the pre-trained agent to collect additional data during distillation to further increase the training set size of the distilled agents.
\section{Additional numerical evaluations} \label{appx:numerical} We provide additional numerical evaluations to provide insight into the theoretical results of Section~\ref{sec:learning-smoothness}. \subsection{Fourier analysis} We begin by studying the Fourier decomposition of value and reward functions in popular Atari domains by treating the value function as a function of \textit{time} rather than as a function of \textit{observations}. In this sense, the Fourier decomposition is measuring the continuity of the value function with respect to time and so is a closer approximation of the notion of smoothness we focus on in Section~\ref{sec:vf_gen}. We show our evaluations in Figure~\ref{fig:atari_fourier}. \begin{figure}
\caption{Fourier decomposition of Atari value functions when viewed as a function of time. We sample $k$ consecutive states from the replay buffer and compute the predicted value on each state (fixing an arbitrary action) to get a function $V: \{1, \dots, k \} \rightarrow \mathbb{R}$. We then compute the Fourier decomposition of this function. The top row shows indices $k=0 \dots 50$, while the bottom row omits the $k=0$ index (the constant function) to better illustrate the rate of decay of the spectrum of each function.}
\label{fig:atari_fourier}
\end{figure}
\subsection{Kernel gradient descent} \label{appx:kernel-gd} We include an illustration of the kernel gradient descent dynamics described in Section~\ref{sec:fa_gen} in Figure~\ref{fig:kernel-dynamics}. We run our evaluations using a radial basis function (RBF) kernel of varying lengthscale, with shorter lengthscales corresponding to weaker generalization between states. While the shorter lengthscale corresponds to more stable learning dynamics and better fitting of the value function on the training set, it also induces greater value approximation error on the test states. In contrast, the longer lengthscales result in better generalization to novel test states under Monte Carlo dynamics, but result in divergence for large values of $\gamma$. \begin{figure}
\caption{Numerical evaluations of kernel gradient descent with an RBF kernel. The MDP in question is a "circle MDP" whose states are integers $n \in \{1, \dots, 50\}$. We assume the agent is `trained' on states 1 to 40, and does not perform value function updates on the final ten states, use the policy which always takes the agent from state $n$ to $n+1 \mod 50$, and set a single reward at state 25. Each row corresponds to a different value of the discount factor $\gamma$: the top corresponds to $\gamma = 0.5$, and the bottom to $\gamma = 0.99$. Each column corresponds to the lengthscale which parameterizes the kernel, going left to right: 0.01, 1.0, and 100. The left hand side and right hand side are distinguished by the number of update steps which the TD dynamics are evaluated for. The LHS runs TD for only 20 steps, while the RHS runs it for 100 steps. MC updates are run for 1500 steps on both figures. We see that for $\gamma = 0.99$, the larger-lengthscale kernel predictions diverge under TD dynamics, though not Monte Carlo. The Monte Carlo dynamics further nicely illustrate the trade-off between generalizing out of the training set and ability to fit the discontinuities of the value function on the training set. The larger lengthscale has lower MSE from the value function on the test set, but fails to fit the discontinuity of the value function at the reward state. Meanwhile, the smaller lengthscales easily fit the value function on the training set but predict zero for all over states. }
\label{fig:kernel-dynamics}
\end{figure}
Additionally, as promised in Section~\ref{sec:fa_gen}, we illustrate the role of smooth eigenfunctions in generalization in Figure~\ref{fig:kernel-generalization}. To produce this figure, we randomly generate an unweighted graph and then construct an MDP whose dynamics correspond to a random walk on this graph. We consider the generalization error of a kernel regression process where the kernel $K_S$ is of the form $ K_S(x,y) = \sum_{i \in S} v_{\lambda_i}(x) v_{\lambda_i}(y)$ for some $S \subseteq \mathrm{spec}(P^\pi)$. In the right-hand-side plot of Figure~\ref{fig:kernel-generalization}, we set $S=\{1, \dots, 20\}$, so that our analysis concentrates on smooth eigenfunctions. We then consider the generalization error of this smooth kernel when we only regress on a subset of the state space selected uniformly at random\footnote{Because the MDP-generating process is invariant to permutations of the state indices, we sample the indices $\{1, \dots, \lfloor |\mathcal{X}| \times \mathrm{training fraction} \rfloor \}$, and average over randomly generated MDPs. }. We study the effect of varying the size of this set, i.e. the fraction of states in the training set, in Figure~\ref{fig:kernel-generalization}, in order to quantify the degree to which additional information about the value function translates to improved generalization. We consider three regression problems: regression on $V^\pi$, regression on the projection of $V^\pi$ onto the span of $T = \{v_1, \dots, v_{20} \}$, and $B = \{v_{n-19}, \dots, v_{n} \}$. Unsurprisingly, we see that the smooth kernel is able to improve its generalization performance as the size of the training set increases when it is set to regress $V^\pi$ or $\Pi_{T} V^\pi = V^\pi_T$. However, when the kernel regresses only on the projection of $V^\pi$ onto the non-smooth eigenvectors, we do not see a benefit of adding additional training points: because there is no information about the smooth components of the function in the targets, adding additional data points will not help to improve regression accuracy. The left hand side of the figure shows similarly that fitting local information in the form of $n$-step returns for small $n$ also does not provide the kernel with sufficient information for it to be able to extrapolate and improve its generalization error as the size of the training set increases.
\begin{figure}
\caption{Generalization of predicted function under kernel regression using $n$-step return targets evaluated on a random subset of states (left), and projecting value function onto top or bottom eigenvectors of $P^\pi$ (right). We see a similar trend where for larger $n$ (corresponding to smoother targets), the kernel regression method generalizes better with increasing dataset sizes. For smaller $n$ and for the projection of $V^\pi$ onto non-smooth eigenvectors, adding additional data points does not improve generalization performance.}
\label{fig:kernel-generalization}
\end{figure} \section{Additional empirical results} \label{apx:more-results} \subsection{Additional value distillation results} We consider three different types of regression to the outputs of the pre-trained network, along with two more traditional bootstrapping methods for offline RL. \texttt{Q-regression} regresses the outputs of the distilled network to those of the pre-trained network for every action. \texttt{qa-regression} only does q-value regression on the action taken by the pre-trained agent. \texttt{adv-regression} regresses on the advantage function (computed as the q-value minus the mean over all actions) given by the pre-trained agent; \texttt{qr} does quantile regression q-learning on the offline data; \texttt{double-q} performs a standard double q-learning update on the offline data.
We find that all of these methods obtain an initial update rank significantly below that of the pre-trained network when they begin training, which increases over time. Regression to the advantages obtains a significantly lower update rank than any other method, suggesting that the advantage function may be much smoother than the action-value function. With respect to performance on the original environment, we see that the methods which use all action values at every update obtain significantly higher performance than those which only update a single action at a time. This improvement in performance is not mediated by an auxiliary task effect or an increase in the network's ability to distinguish states: the advantage regression network attains low update rank but high performance, while the qr-regression task provides a great deal of information to the representation but is not competitive with the q-regression network. \begin{figure}
\caption{Results from post-training distillation on a variety of objectives. We note that advantage regression tends to exhibit the lowest update rank, with the qr agent tending to exhibit the highest update rank and the q-regression objectives falling somewhere in between. Because the behaviour cloning objective minimizes a cross-entropy loss rather than a regression loss, further investigation is required to understand how the trajectory of its update dimension differs from those of the regression objectives.}
\label{fig:tandem-apx}
\end{figure} \subsection{More detailed update trajectories}
We include a more detailed view of the update matrices obtained by DQN and C51 agents during the first 7 million frames of training, roughly 5\% of the training budget, in Figure~\ref{fig:updates-long}. We see that even early in training, the DQN and C51 agents both exhibit significant overfitting behaviour. Note that states are sampled uniformly at random from the replay buffer, and then assigned an index based on the output of a clustering algorithm to improve readability of the figures.
\begin{figure}
\caption{Update matrices for distributional and DQN agents on four games from the Atari suite, chosen to represent a range of reward densities and difficulties. Each iteration corresponds to 1e5 training frames.}
\label{fig:updates-long}
\end{figure}
\chapter{Generalization across environments}
\section{Implementation details} \subsection{Model learning: rich observations} \label{app:model_nonlinear_implementation} For the model learning experiments we use an almost identical encoder architecture as in~\citet{deepmindcontrolsuite2018}, with two more convolutional layers to the convnet trunk. Secondly, we use \texttt{ReLU} activations after each convolutional layer, instead of \texttt{ELU}. We use kernels of size $3 \times 3$ with $32$ channels for all the convolutional layers and set stride to $1$ everywhere, except of the first convolutional layer, which has stride $2$. We then take the output of the convolutional net and feed it into a single fully-connected layer normalized by \texttt{LayerNorm}~\citep{ba2016layernorm}. Finally, we add \texttt{tanh} nonlinearity to the $50$ dimensional output of the fully-connected layer.
The decoder consists of one fully-connected layer that is then followed by four deconvolutional layers. We use \texttt{ReLU} activations after each layer, except the final deconvolutional layer that produces pixels representation. Each deconvolutional layer has kernels of size $3 \times 3$ with $32$ channels and stride $1$, except of the last layer, where stride is $2$.
The dynamics and reward models are all MLPs with two hidden layers with 200 neurons each and \texttt{ReLU} activations.
\subsection{Reinforcement learning} \label{app:rl_implementation} For the reinforcement learning experiments we modify the Soft Actor-Critic PyTorch implementation by \citet{pytorch_sac} and augment with a shared encoder between the actor and critic, the general model $f_s$ and task-specific models $f_{\eta}^e$. The forward models are multi-layer perceptions with ReLU non-linearities and two hidden layers of 200 neurons each. The encoder is a linear layer that maps to a 50-dim hidden representation. We also use L1 regularization on the $S$ latent representation. We add two additional dimensions to the state space, a spurious correlation dimension that is a multiplicative factor of the last dimension of the ground truth state, as well as an environment id. We add Gaussian noise $\mathcal{N}(0, 0.01)$ to the original state dimension, similar to how \citet{arjovsky2019invariant} incorporate noise in the label to make the task harder for the baseline.
Soft Actor Critic (SAC)~\cite{haarnoja2018sac} is an off-policy actor-critic method that uses the maximum entropy framework to derive soft policy iteration. At each iteration, SAC performs soft policy evaluation and improvement steps. The policy evaluation step fits a parametric soft Q-function $Q(x_t, a_t)$ using transitions sampled from the replay buffer $\mathcal{D}$ by minimizing the soft Bellman residual, \begin{equation*}
J(Q) = \mathbb{E}_{(x_t, x_t, r_t, x_{t+1}) \sim \mathcal{D}} \bigg[ \bigg(Q(x_t, a_t) - r_t - \gamma \Bar{V}(x_{t+1})\bigg)^2 \bigg]. \end{equation*} The target value function $\Bar{V}$ is approximated via a Monte-Carlo estimate of the following expectation, \begin{equation*}
\Bar{V}(x_{t+1}) = \mathbb{E}_{a_{t+1} \sim \pi} \big[\Bar{Q}(x_{t+1}, a_{t+1}) - \alpha \log \pi(a_{t+1}|x_{t+1}) \big], \end{equation*}
where $\bar{Q}$ is the target soft Q-function parameterized by a weight vector obtained from an exponentially moving average of the Q-function weights to stabilize training. The policy improvement step then attempts to project a parametric policy $\pi(a_t|x_t)$ by minimizing KL divergence between the policy and a Boltzmann distribution induced by the Q-function, producing the following objective, \begin{equation*}
J(\pi)= \mathbb{E}_{x_t \sim \mathcal{D}} \bigg[ \mathbb{E}_{a_t \sim \pi} [\alpha \log (\pi(a_t | x_t)) - Q(x_t, a_t)] \bigg]. \end{equation*}
We provide the hyperparameters used for the RL experiments in \cref{table:rl_hyper_params}.
\begin{table}[hb!] \centering
\begin{tabular}{|l|c|} \hline Parameter name & Value \\ \hline Replay buffer capacity & $1000000$ \\ Batch size & $1024$ \\ Discount $\gamma$ & $0.99$ \\ Optimizer & Adam \\ Critic learning rate & $10^{-5}$ \\ Critic target update frequency & $2$ \\ Critic Q-function soft-update rate $\tau_{\textrm{Q}}$ & 0.005 \\ Critic encoder soft-update rate $\tau_{\textrm{enc}}$ & 0.005 \\ Actor learning rate & $10^{-5}$ \\ Actor update frequency & $2$ \\ Actor log stddev bounds & $[-5, 2]$ \\ Encoder learning rate & $10^{-5}$ \\ Decoder learning rate & $10^{-5}$ \\ Decoder weight decay & $10^{-7}$ \\ L1 regularization weight & $10^{-5}$ \\ Temperature learning rate & $10^{-4}$ \\ Temperature Adam's $\beta_1$ & $0.9$ \\ Init temperature & $0.1$ \\ \hline \end{tabular}\\ \caption{\label{table:rl_hyper_params} A complete overview of used hyper parameters.} \end{table}
\chapter*{Contributions to joint-authored work} The research in this thesis is the product of a number of valuable collaborations.
\textbf{Chapter 3} is based on work presented at the NeurIPS 2019 workshop on machine learning with guarantees \citep{lyle2020benefits}. All empirical results in this section were proposed and implemented by me. While I produced an initial analysis of PAC-Bayes bounds under symmetries and showed a preliminary version of Theorem~\ref{lemma:KL:gen} for finite groups independently, I worked closely with Benjamin Bloem-Reddy to generalize these initial results to the form presented in this thesis.
\textbf{Chapter 4} is principally built on the NeurIPS 2020 paper \citep{lyle2020bayesian}, but also draws on some of the insights developed in a tandem work led by Binxin Ru and myself which was presented at NeurIPS 2021 \citep{ru2020revisiting}. I was responsible for all theoretical and empirical results concerning linear models and gradient alignment, while Lisa Schut extended and improved on my initial toy experiments on neural networks to generate Figures~\ref{fig:mod_select_dnn} and \ref{fig:mod_select_dnn_parallel}. Figure~\ref{fig:tse-variance} was produced independently by me, but was inspired by analysis initially performed by Binxin Ru.
\textbf{Chapter 5} was written in close collaboration with Mark Rowland and Will Dabney, and the initial research question sparking this project was brainstormed during joint meetings involving all authors. I ran the evaluations for Figure~\ref{fig:feature-viz} and performed the evaluations on linear value function evolution in Appendix~\ref{sec:ensemble-dynamics}. Mark proposed the continuous-time formalism and proved the initial subspace convergence result for a single value function, while I proposed the `exact feature update' framework and proved the results concerning auxiliary tasks and ensemble prediction.
\textbf{Chapter 6} is based on work I led during an internship at DeepMind \citep{lyle2021understanding}. I conducted additional analysis into the learned representations of the agents used for the empirical results of that work, and eventually formulated the main hypotheses of this chapter based on that analysis. I wrote and ran the experiment code for the supervised and Atari benchmarks. My co-authors ran code I had written during my internship after I returned to Oxford to generate final versions of several figures, and provided helpful discussion throughout the project.
\textbf{Chapter 7} is based on \citep{lyle2022generalization}, for which I was the lead author. I was responsible for the implementations in all empirical results and for the proposal and initial proofs of the theoretical results. The other authors on the paper provided feedback on early drafts of the paper and recommendations for interesting experimental conditions to pursue.
\textbf{Chapter 8} is based on a paper co-first-authored by myself and Amy Zhang. I stated and proved all theoretical results included in the thesis, proposed the algorithm for linear state abstractions, and implemented the linear model experiments. Both Amy and I worked together to propose the nonlinear MISA architecture and training objective.
\end{document} |
\begin{document}
\begin{abstract} We study regularity of solutions $u$ to $\overline\partial u=f$ on a relatively compact $C^2$ domain $D$ in a complex manifold of dimension $n$, where $f$ is a $(0,q)$ form. Assume that there are either $(q+1)$ negative or $(n-q)$ positive Levi eigenvalues at each point of boundary $\partial D$. Under the necessary condition that a locally $L^2$ solution exists on the domain, we show the existence of the solutions on the closure of the domain that gain $1/2$ derivative when $q=1$ and $f$ is in the H\"older-Zygmund space $\Lambda^r(\overline D)$ with $r>1$. For $q>1$, the same regularity for the solutions is achieved when $\partial D$ is either sufficiently smooth or of $(n-q)$ positive Levi eigenvalues everywhere on $\partial D$. \end{abstract}
\maketitle
\setcounter{thm}{0}\setcounter{equation}{0}
\section{Introduction}\label{sec1} Let $D$ be a relatively compact domain in a complex manifold $X$ of dimension $n$. We say that $D$ satisfies the condition $a_q$ if $\partial D\in C^2$ and its Levi form has either $(q+1)$ negative or $(n-q)$ positive eigenvalues at each point on $\partial D$. We are interested in the regularity of solutions $u$ to the $\overline\partial$-equation $\overline\partial u=f$ on $D$. We will study the case where $f$ is a $V$-valued $(0,q)$ form for a holomorphic vector bundle $V$ on $X$. Denote by $\Lambda_{(0,q)}^r(D, V)$ the space of $V$-valued $(0,q)$ forms whose coefficients are in H\"older-Zygmund space $\Lambda^r(D)$ (see definition in Section~\ref{h-space}). To ensure the existence of solutions, we impose a minimum requirement that there is an $L_{loc}^2$ solution $u_0$ on $D$ and seek a solution of better regularity.
Our main results are the following. \th{regsol} Let $r\in(1,\infty)$ and $q\geq 1$. Let $D $ be a relatively compact domain with $C^3$ boundary in a complex manifold $X$ satisfying the condition $a_q$. Let $V$ be a holomorphic vector bundle on $X$.
Then there exists a bounded linear $\overline\partial$-solution operator $H_q\colon \Lambda_{(0,q)}^r(D, V)\cap\overline\partial L^2_{loc}(D)\to \Lambda_{(0,q-1)}^{r+1/2}(D, V)$,
provided $(a)$ $q=1$ or $\partial D$ has $(n-q)$ positive Levi eigenvalues at each point on $\partial D$; or $(b)$ $\partial D\in\Lambda^{r+\f{5}{2}}$. \end{thm}
Note that $H_q$ is independent of $r$ and it provides a smooth ($C^\infty$) linear $\overline\partial$-solution operator for smooth forms in the two cases. When $\partial D\in C^2$, \rt{regsol} (a) provides a satisfactory regularity result for $\overline\partial$-solutions in the H\"older-Zygmund spaces for $q=1$. For $q>1$, we have the following. \th{regsol+} Let $q\geq 2$ and keep notations in \rta{regsol} with $\partial D\in C^2$. Suppose $4 \leq r< \infty$. Then there exists a bounded linear $\overline\partial$-solution operator $H_q^{r}\colon \Lambda_{(0,q)}^r(\overline D, V)\cap \overline\partial L^2_{loc}(D)\to \Lambda_{(0,q-1)}^{r-3}(\overline D)$. Furthermore, $H_q^{r}$ maps $C_{(0,q)}^\infty(\overline D, V)\cap \overline\partial L^2_{loc}(D) $ into $ C_{(0,q-1)}^{\infty}(\overline D)$ \end{thm}
We first state some closely related results on $\overline\partial$-solutions $u$ to $\overline\partial u=f$ on strictly pseudoconvex domains $D$ in ${\bf C}^n$: After work of Lieb-Grauert~~\cite{MR273057} and Kerzman~~\cite{MR0281944}, Henkin-Romanov~~\cite{MR0293121} achieved the sharp $C^{1/2}$ solutions for $f\in L^\infty$ by integral formulas. The $C^{k+1/2}$ solutions for $f\in C^k$ $(k\in{\bf N})$ was obtained by Siu~~\cite{MR330515} for $(0,1)$ forms and by Lieb-Range~~\cite{MR597825} for forms with $q\geq1$ when $\partial D$ is sufficiently smooth boundary.
For $\partial D\in C^2$, Theorem~\ref{regsol} and analogous result for a homotopy formula were proved in~~\cite{MR4289246} through the construction of a homotopy formula. These results were extended by Shi~~\cite{MR4244873} to a weighted Sobolev spaces with a gain less than $1/2$ derivative and by Shi-Yao~\cites{https://doi.org/10.48550/arxiv.2107.08913, https://doi.org/10.48550/arxiv.2111.09245} to $H^{s+1/2,p}$ space gaining $1/2$ derivative for $s>1/p$ when $\partial D\in C^2$ and for $s\in{\bf R}$ when $\partial D$ is sufficiently smooth. It is worthy to point out that Shi-Yao achieved the first regularity result for negative order $s$. Furthermore, Gong-Lanzani~~\cite{MR4289246} obtained $\Lambda^{r+1/2}$ (with $r>1$) regularity gaining $1/2$ derivative on strongly ${\bf C}$-linear convex $C^{1,1}$ domains $D$.
Next, we mention results on $\overline\partial$ solutions on $a_q$ domains in complex manifolds:
The basic estimates for the Cauchy-Riemann operator were proved by Morrey~~\cite{MR0099060} for $(0,1)$-forms and by Kohn~\cites{MR0153030,MR0208200} for forms of any type on strongly pseudoconvex manifolds with smooth boundary. These results lead to the regularity of the $\overline\partial$-Neumann operator for strictly pseudoconvex manifolds and more general for compact manifolds whose boundary satisfies property $Z(q)$. Kohn proved that property $Z(q)$ is satisfied by strongly pseudoconvex manifolds with smooth boundary and H\"ormander proved that the $Z(q)$ condition is satisfied by the condition $a_q$ on $D$ when $\partial D\in C^3$. When $\partial D\in C^\infty$, sharp regularity results for $\overline\partial$ solutions were obtained by Greiner-Stein~~\cite{MR0499319} for $(0,1)$ forms and Beals-Greiner-Stanton~~\cite{MR886418} for $(0,q)$ forms under condition $Z(q)$ through the study of the regularity of $\overline\partial$-Neumann operator in $L^{k,p}$ and $\Lambda^r$ spaces. Condition $a_q$ also ensures the stability of the solvability of the $\overline\partial$-equation on $(0,q)$ forms; namely if $f=\overline\partial u_0+\tilde f$ on $D$ while $\tilde f$ is a $\overline\partial$-closed form on a larger domain and $u_0$ has the regularity in the sought-after class, then $\tilde f=\overline\partial\tilde u$ for some $L^2$ form $\tilde u$. This stability is useful to obtain regularity for $\overline\partial$ solutions as shown by Kerzman~~\cite{MR0281944}; first one seeks regularity for $u_0$ without solving the $\overline\partial$-equation. Then $u_0+\tilde u$ provides a desired solution based on regularity of $u_0$ and the interior regularity of $\tilde u$ from the elliptic theory on systems of partial differential equations.
To prove our results, we will use integral formulas to obtain local solutions near each boundary point of $D$. We then use the Grauert bumping method as in~~\cite{MR0281944} to construct $\tilde f$. To provide background for our results, let us mention regularity results for $\overline\partial$-solutions on the transversal intersection of domains in ${\bf C}^n$. Range-Siu~~\cite{MR338450} obtained $C^{1/2-\epsilon}$ estimate with any positive $\epsilon$ for a real transversal intersection of sufficiently smooth strictly pseudoconvex domains. For higher order derivatives, J. Michel~~\cite{MR928297} obtained $C^{k+1/2-\epsilon}$ estimate for $\overline\partial$ solutions on a certain intersection of smooth strictly pseudoconvex domains. J.~Michel and Perotti~~\cite{MR1038709} extended the result to real transversal intersection of strictly pseudoconvex domains with sufficiently smooth boundary. We should also mention that the local version of \rt{regsol} was proved by Laureate-Thiébaut and Leiterer ~\cite{MR1207871} when $\partial D\in C^\infty$ and $k\in {\bf N}$.
Ricard~~\cite{MR1992543} obtained regularity for concave wedge with $C^{k+2}$ boundary and convex wedges with $C^2$ boundary. The reader is referred to
Barkatou~~\cite{MR1888228} and Barkatou-Khidr~~\cite{MR2844676} for further results in this direction.
However, all existing integral formulas for $\overline\partial$ solutions, including ours, require boundary to be sufficiently smooth when concavity is present. On the other hand, it is well-known that concavity of the domains is useful; the classical Hartogs' theorem says that a holomorphic function on a $1$-concave domain extends to a holomorphic function across the boundary. Therefore, on a $1$-concave domain in a complex manifold, all $\overline\partial$-solutions for $(0,1)$ forms must have the same regularity regardless the smoothness of the boundary of the domain. In Section 7 we will successfully implement this idea to prove \rt{regsol} (i) with $q=1$.
When $q>1$, not all $\overline\partial$ solutions have the same regularity.
To prove \rt{regsol+}, we first derive an estimate for the solution operator when $\partial D$ is sufficiently smooth. Then we apply the Nash-Moser iteration methods by solving the $\overline\partial$-equation on the subdomains $D_k\subset D_{k+1}$ that have smooth boundary and in the limit, we obtain a desired solution on the closure of $D=\cup D_k$.
We organize the paper as follows.
In Section 2, we formulate an approximate local homotopy formula on a suitable neighborhood of a boundary point $\zeta_0\in\partial \Omega$. In Sections 3 and 4. we derive (genuine) local homotopy formulas for $\overline\partial$-closed $(0,q)$ forms near $(n-q)$ convex and $(q+1)$ concave boundary points of an $a_q$ domain. There we follow approaches developed in Lieb-Range~~\cite{MR597825} and Henkin-Leiterer~~\cite{MR986248}. While a homotopy formula for forms that are not necessary $\overline\partial$-closed can be derived for $(n-q)$ convex points without extra conditions, such a formula on the concave side of the boundary turns out to be subtle. We will need an extra negative Levi eigenvalues, i.e. $(q+2)$ negative Levi eigenvalues. It is not clear if a local homotopy formula exists without this stronger negativity condition. Such phenomena already occurs to strictly pseudoconvex hypersurfaces of dimension $5$ in work of Webster~~\cite{MR995504} on the local CR embeddings and concave compact CR manifolds in work of Polykov~~\cite{MR2088929} on global CR embeddings.
Section 5 contains some elementary facts on H\"older-Zygmund spaces, where we derive an equivalence characterization on the H\"older-Zygmund norms solely relying on a version of Hardy-Littlewood lemma. This allows us to simplify previous work.
Section 6 contains the main local estimates for the homotopy operator that appear in the local homotopy formula. One of main purposes of the section is to derive precise estimates that reflex the convexity of the H\"older-Zygmund norms; see \re{hqfr12-c} for strictly $(n-q)$ convex $C^2$ domains and \re{hqfr12} for strictly $(q+1)$-concave domains. We emphasize that the estimates do not require the forms to be $\overline\partial$-closed; in fact, the estimates hold for $(q+1)$ concave boundary points, although we don't know if a homotopy formula exists. These estimates immediately give us the desired regularity stated in \rt{regsol} for local solutions.
In Section 7 we show how Hartogs' extension can be used to study the regularity of $\overline\partial$ solutions for $(0,1)$ forms. A local version of \rt{regsol} (a) for $q=1$ is proved in this section.
In Section 8 we show the existence of global solutions with the desired regularity by using local solutions in Sections 6 and 7 and the interior regularity of elliptic systems. We also derive a global estimate for $\overline\partial$-solutions. Using this global estimate, we employ the Nash-Moser smoothing operator to prove a detailed version of \rt{regsol+} in Section 9.
The paper has two appendices. In Appendix A, we recall the existing regularity on the signed distance function near a $C^2$ hypersurface in a Riemannian manifold. In Appendix B, we describe a stability result of solvability of $\overline\partial$-equation on $a_q$ domains with $C^2$ boundary using results in H\"ormander~~\cite{MR0179443}.
\setcounter{equation}{0}
\section{A local approximate
homotopy formula}
Let $X$ be a complex manifold of dimension $n$. Let $D$ be a relatively compact domain in $X$ defined by $\rho<0$, where $\rho$ is a $C^2$ defining function with $d\rho(\zeta)\neq0$ when $\rho(\zeta)=0$. Following Henkin-Leiterer~~\cite{MR986248}, we say that $\partial D$ is \emph{strictly $q$-convex} at $\zeta\in\partial D$, if the Levi-form $L_\zeta\rho$, i.e. the restriction of the complex Hessian $H_\zeta\rho$ on $T_\zeta^{1,0}(\partial D)$, has at least $q$ positive eigenvalues. We says that $\partial D$ is \emph{strictly $q$-concave} at $\zeta\in\partial D$, if $L_\zeta\rho$ has at least $q$ negative eigenvalues. Thus a domain is strictly pseudoconvex if and only if it is strictly $(n-1)$-convex. Following H\"ormander~~\cite{MR0179443}, we say that $D$ satisfies the \emph{condition $a_q$} if the Levi-form $L_\zeta\rho$ has at least either $(q+1)$ negative eigenvalues or $(n-q)$ positive eigenvalues for every $\zeta\in\partial D$.
To see a domain satisfying the condition $a_q$, let ${\bf P}^n$ be the complex project space. Let $B_{q}^r\subset{\bf P}^n$ be defined by \begin{gather}{}
B_{q}^r\colon |z^{q}|^2+\cdots+ |z^n|^2< r|z^0|^2+\cdots+ r|z^{q-1}|^2 \end{gather} with $1\leq q\leq n$ and $r>0$. Then $B^r_{q}$ is both strictly $(n-q)$-convex and $(q-1)$-concave. In general, when $\partial D$ is Levi non-degenerate, the condition $a_q$ for $\partial D$ is equivalent to the number of negative Levi eigenvalue not being $q$ at every point $\zeta\in\partial D$. Thus $B^{r_2}_{q_1} \setminus\overline{ B^{r_1}_{q_2}}$ satisfies the condition $a_q$ if $r_2>r_1$, $q_2\geq q_1$, and \begin{gather}{} q\neq q_1-1, q_2-1. \end{gather}
\begin{defn}\label{pck} Let $k\geq1$ and $r>1$. A relatively compact domain $D$ in a $C^k$ manifold $X$ is {\it piecewise smooth} of class $C^k$
(resp. $\Lambda^r$), if for each $\zeta\in\partial D$, there are $C^k$ (resp. $\Lambda^r$) functions $\rho_1,\dots, \rho_\ell$ defined on a neighborhood $U$ of $\zeta$ such that $D\cap U=\{z\in U\colon\rho_j(z)<0, j=1,\dots, \ell\}$, $\rho_j(\zeta)=0$ for all $j$, and $$ d\rho_{1}\wedge\cdots\wedge d\rho_{\ell} \neq0. $$ \end{defn}
The main purpose of this section is to construct an approximate homotopy formula on a subdomain $D'$ of $D$, where $\partial D'$ and $\partial D$ share a piece of boundary containing a given point $\zeta_0\in\partial D$. The $D'$ will have the form $D^{12}=D^1\cap D^2$ where $D^1=D$ and $D^2$ is a ball in suitable coordinates for $\partial D$. Since the construction of $D'$ is local, we assume that $D$ is contained in ${\bf C}^n$.
We will need Leray maps, following notation in ~\cite{MR986248}. \begin{defn} Let $D$ be a domain in ${\bf C}^n$. Let $S\subset {\bf C}^n\setminus D$ be a $C^1$ submanifold or an open subset in ${\bf C}^n$. We say that $g( z,\zeta )$ is a \emph{Leray map} on $D\times S$ if $g\in C^1(D\times S)$ and \eq{leray-map} g( z,\zeta )\cdot(\zeta-z)\neq 0, \quad \zeta\in S, \quad z\in D. \end{equation}
Throughout the paper, we use \eq{g0} g_0( z,\zeta )=\overline\zeta-\overline z. \end{equation} \end{defn}
Let $g^j \colon D\times S^j\to{\bf C}^n$ be $C^1$ Leray mappings for $j=1,\dots,\ell$. Let $w=\zeta-z$. Define \begin{gather*} \omega^i=\f{1}{2\pi i}\f{g^i\cdot dw}{g^i\cdot w}, \quad \Omega^i=\omega^i\wedge(\overline\partial\omega^i)^{n-1},\\ \Omega^{01}=\omega^0\wedge\omega^1\wedge\sum_{\alpha+\beta=n-2} (\overline\partial\omega^0)^{\alpha}\wedge(\overline\partial\omega^1)^{\beta}. \end{gather*} Here both differentials $d$ and $\overline\partial$ are in $z,\zeta$ variables.
In general, define \begin{gather*} \Omega^{1\cdots \ell}=\omega^{g_1}\wedge\cdots\wedge\omega^{g_\ell}\wedge\sum_{\alpha_1+\cdots+\alpha_\ell=n-\ell} (\overline\partial\omega^{g_1})^{\alpha_1}\wedge\cdots(\overline\partial\omega^{g_\ell})^{\alpha_\ell}. \end{gather*} Decompose $\Omega^{\bigcdot}=\sum\Omega_{0,q}^{\bigcdot}$, where $\Omega_{0,q}^{\bigcdot}$
has type $(0,q)$ in $z$. Hence $\Omega^{i_1,\dots, i_\ell}_{0,q}$ has type $(n,n-\ell-q)$ in $\zeta$. Set
$\Omega^{\bigcdot}_{0,-1}=0$. The Koppelman lemma says that
$$
\overline\partial_z\Omega^{1\cdots \ell}_{0,q-1}+\overline\partial_\zeta\Omega^{1\cdots \ell}_{0,q}=\sum(-1)^{j}\Omega_{0,q}^{1\cdots\hat j\cdots\ell}.
$$ See Chen-Shaw~\cite{MR1800297}*{p.~263} for a proof. We will use special cases \begin{gather} \label{kop1}\overline\partial_\zeta\Omega_{0,q}^1+\overline\partial_z\Omega_{0,q-1}^1=0, \\ \label{kop2}\overline\partial_\zeta\Omega^{01}_{0,q}+\overline\partial_z\Omega_{0,q-1}^{01}=-\Omega_{0,q}^1+\Omega_{0,q}^0, \\ \label{kop2}\overline\partial_\zeta\Omega^{012}_{0,q}+\overline\partial_z\Omega_{0,q-1}^{012}=-\Omega_{0,q}^{12}+\Omega_{0,q}^{02}-\Omega^{01}_{0,q}.
\end{gather} Here each identity holds in the sense of distributions on the set where the kernels are non-singular.
To integrate on submanifolds of ${\bf C}^n$, let us see how a sign changes when an exterior differentiation interchanges with integration. Following~\cite{MR1800297}*{p.~263}, define $$
\int_{y\in M} u(x,y)dy^J\wedge dx^I = \left \{\int_{y\in M}u (x,y)dy^J\right\}dx^I $$ for a function $u$ on a manifold $M$ with boundary. For the exterior differential $d_x$, we have \begin{gather}\label{checksign} d_x\int_{y\in M}\phi(x,y) =(-1)^{\dim M}\int_{y\in M}d_x\phi(x,y). \end{gather} Stokes' formula has the form \begin{gather} \int_{y\in\partial M}\phi(x,y)\wedge \psi(y)=\int_{y\in M}\Bigl\{ d_y \phi(x,y)\wedge \psi(y)+ (-1)^{\deg \phi} \phi(x,y)\wedge d\psi(y)\Bigr\}, \label{checksign+} \end{gather} where $\deg\phi$ is the total degree of $\phi$ in $(x,y)$. Throughout the paper, we use notation $$D^{1\dots\ell}:=D^1\cap\cdots\cap D^\ell,$$ which is a relatively compact piecewise $C^1$ domain by \rd{pck}. We choose orientations so that $$ \int_{D^{1\dots\ell}}\, df=\sum_{i=1}^\ell\int_{\overline D^{1\dots\ell}\cap\partial D^i}f. $$
Then we define \eq{d12s}
\partial{D^{12}}=S^1\cup S^2,\quad S^i=\overline{D^{12}}\cap \partial D^i, \quad \partial S^1=S^{12}, \quad \partial S^2=S^{21}. \end{equation} Thus Stokes' formula has the following special cases $$ \int_{D^1\cap D^2}df=\int_{S^1}f+\int_{S^2}f,\quad \int_{S^1}df=\int_{S^{12}}f, \quad \int_{S^2}df=\int_{S^{21}}f. $$
We introduce integrals on domains and lower-dimensional sets: \begin{gather} \label{defnLR} R_{D, q}^{i_1\dots i_\ell}f(z)=\int_{D}\Omega^{i_1\dots i_\ell}_{0,q}(z,\zeta)\wedge f(\zeta), \quad L_{i_1\cdots i_\mu, q}^{j_1\dots j_\nu}f=\int_{S^{i_1\cdots i_\mu}}\Omega_{0,q}^{j_1\cdots j_\nu}\wedge f. \end{gather} When $f$ is said to have type $(0,q)$, we write $R_{D,q}f$, $L_{\bigcdot,q}^{\bigcdot}f$ as $R_{D}f$, $L_{\bigcdot}^{\bigcdot}f$, respectively.
Let $E_D\colon C^0(\overline D)\to C^0_0({\bf C}^n)$ be the Stein extension operator such that \eq{prop-E}
|Eu|_{{\bf C}^n,r}\leq C_r(D)|u|_{D,r} \end{equation} where the H\"older-Zygmund norm is defined in Section~\ref{h-space}. Note that the extension exists for any bounded Lipschitz domain $D$. See~~\cite{MR3961327} for a proof of the extension property and references therein.
The main purpose of this section is to
derive the following approximate homotopy formula on a piecewise $C^2$ domain. \pr{hf} Let $D^{12}\subset{\bf C}^n$ be a bounded piecewise $C^2$ domain. Let $S^1,S^2$ be given by \rea{d12s}. Let $U^1\subset{\bf C}^n\setminus D^{1}$ be a bounded piecewise $C^1$ domain such that $\partial U^1=S^1\cup S^1_+$ with $S^1_+=\partial U^1\setminus S^1$. Suppose that $g^1(z,\zeta)$ is a $C^1$ Leray map on $D^{12}\times \overline{U^1}$ and $g^2$ is a $C^1$ Leray map on $D^{12}\times S^2$.
Let $ f$ be a $(0,q)$-form such that $ f$ and $\overline\partial f$ are in $C^1(\overline{ D^{12}})$. Then on $D^{12}$ we have \begin{gather}\label{tsqf}
f=L_{1,q}^1f+L_{2,q}^2f+L_{12, q}^{12}f+\overline\partial H_q f+H_{q+1}\overline\partial f,\quad \text{if $q\geq1$},\\ \label{tsqf+}
f=L_{1,0}^1f+L_{2,0}^2f+L_{12,0}^{12}f+H_1\overline\partial f, \quad \text{if $q=0$} \end{gather}
where \begin{align}{}
\label{hqf}
H_q f&:=H^{(1)}_q f+ H_q^{(2)}f, \\ \label{hq1} H^{(1)}_q f&:=R_{U_1\cup D^{12}, q-1}^0 E f+R_{U_1,q-1 }^{01}[\overline\partial,E] f, \quad q>0,
\\
\label{hq2}
H^{(2)}_qf&:=-R^1_{U_1,q-1}Ef+L_{1^+,q-1}^{01} Ef +L_{2,q-1}^{02} f+L_{12,q-1}^{012}f,\\ L^{01}_{1^+, q-1}Ef&:=\int_{S^1_+}\Omega^{01}_{0,q-1}\wedge Ef, \\ H_0 f&:=\int_{\partial D^{12}}\Omega_{0,0}^1 f-\int_{U_1}\Omega_{0,0}^1\wedge E\overline\partial f=\int_{U_1}\Omega_{0,0}^1\wedge [\overline\partial, E] f.
\label{H0f} \end{align} \end{prop} \begin{rem}Formula \re{tsqf} is called an \emph{approximate homotopy formula} due to the presence of the boundary integrals $L^1_1,L^{2}_2,L^{12}_{12}$. We will get rid of these boundary integrals under further conditions on the Levi-form of $\partial D^1$. \end{rem} \begin{proof}
We first consider case $q\geq1$. We recall the Bochner-Martinelli-Koppelman formula~\cite{MR1800297}*{p.~273} and a version for piecewise $C^1$ domains~~\cite{MR986248}*{Thm 3.12, p.~53}:
\begin{align}\label{BM}
f(z)&=\overline\partial_z\int_{D^{12}}\Omega_{0,q-1}^0(z,\zeta)\wedge f(\zeta)+\int_{D^{12}}\Omega_{0,q}^0(z,\zeta)
\wedge\overline\partial f\\
\nonumber &\quad +\int_{\partial{D^{12}}}\Omega_{0,q}^0(z,\zeta)\wedge f(\zeta).
\end{align}
Here and in what follows, we assume $z\in D^{12}$. To apply Stokes' formula for piecewise smooth set $\partial D^{12}$, we use notation in \re{kop1}-\re{kop2} and rewrite the last term as \begin{align*} \int_{\partial {D^{12}}}\Omega_{0,q}^0(z,\zeta)\wedge f(\zeta) &=\int_{S^1}\Omega_{0,q}^0(z,\zeta)\wedge f(\zeta)+\int_{S^2}\Omega_{0,q}^0(z,\zeta)\wedge f(\zeta). \end{align*} Using Koppelman's lemma and Stokes' formula for $S^1$ with $\partial S^1=S^{12}$, we obtain \begin{align*} \int_{S^1}&\Omega_{0,q}^0(z,\zeta)\wedge f(\zeta)= L_1^1f(z) +\int_{S^1}\left(\overline\partial_\zeta\Omega_{(0,q)}^{01}(z,\zeta)+\overline\partial_z\Omega^{01}_{0,q-1}\right)\wedge f(\zeta) \\ &=L_1^1f(z) +L^{01}_{12}f(z)-\int_{S^1}\Omega_{(0,q-1)}^{01}(z,\zeta)\wedge\overline\partial_\zeta f(\zeta)- \overline\partial_z\int_{S^1}\Omega_{(0,q-1)}^{01}(z,\zeta)\wedge f(\zeta). \end{align*} Analogously, we get \begin{align} L_2^0f=L_2^2f+L_{21}^{01}f -L_{2}^{02}\overline\partial f - \overline\partial_zL_2^{02} f. \nonumber \end{align} Using $L^{02}_{21}f=-L^{02}_{12}f$, we get $$ L^{01}_{12}f+L_{21}^{02}f=-L_{12}^{12}f+\int_{S^{12}}\overline\partial_z\Omega^{012}_{0,q-1}\wedge f+\int_{S^{12}}\overline\partial_\zeta\Omega^{012}_{0,q}\wedge f. $$ Applying Stokes' theorem to last term and using $\partial(S^1\cap S^2)=\emptyset$, we obtain from \re{checksign+} $$ L^{01}_{12}f+L_{21}^{02}f=-L_{12}^{12}f+\overline\partial L_{12}^{012}f +L_{12}^{012}\overline\partial f. $$ This shows that \begin{align}\label{BM+}
f(z)&=- \overline\partial_z\int_{S^1}\Omega_{0,q-1}^{01}(z,\zeta)\wedge f(\zeta)+\overline\partial_z\int_{D^{12}}\Omega_{0,q-1}^0(z,\zeta)\wedge f(\zeta)\\ \nonumber&\quad-\int_{S^1}\Omega_{0,q}^{01}(z,\zeta)\wedge\overline\partial_\zeta f(\zeta) +\int_{D^{12}}\Omega_{0,q}^0(z,\zeta)
\wedge\overline\partial f\\ \nonumber &\quad-L_{12}^{12}f + \overline\partial L_{12}^{012}f+L_{12}^{012}\overline\partial f
-L_2^{02}\overline\partial f -\overline\partial L_2^{02}f.
\end{align} Next, we transform both integrals on $S^1$ into volume integrals using Stokes' formula. Here we need to modify the methods in Lieb-Range~\cite{MR597825} and~~\cite{MR3961327}, since $S^1$ is not boundary free.
With orientations, we have $\partial U_+= S^1_+-S^1$.
By Stokes' formula and
~\cite{MR3961327}*{(2.12)}, we have \begin{align}\label{keyidsim} &-\int_{\zeta\in S^1}\Omega_{(0,q-1)}^{01}( z,\zeta )\wedge f(\zeta)+ L^{01}_{1^+}E f(z)=\\
&
\nonumber\qquad \qquad\int_{U_1 } \Omega_{(0,q-1)}^{01}(z,\zeta)\wedge\overline\partial E f(\zeta) + \int_{U_1 } \Omega_{0,q-1}^{0}(z,\zeta)\wedge E f(\zeta)\\ &\nonumber\qquad \qquad-\int_{U_1 } \Omega_{0,q-1}^{1}(z,\zeta)\wedge E f(\zeta)+ \int_{U_1 } \overline\partial_z\Omega_{(0,q-2)}^{01}(z,\zeta)\wedge E f(\zeta). \end{align} This shows that \begin{align}\label{keyidsim} R^0_{D^{12}}f-L_{1}^{01} f&=- L^{01}_{1^+}E f(z)+ R_{U_1 } ^{01}\overline\partial E f\\ &\quad + R_{U_1\cup D^{12} } ^{0} E f-R_{U_1 }^ {1} E f+\overline\partial R_{U_1 } ^{01} E f.\nonumber \end{align} After applying $\overline\partial$, the last term will be dropped. This shows that \begin{align}\label{keyidsim+} \overline\partial R^0_{D^{12}}f- \overline\partial L_1^{01}f &=-\overline\partial L^{01}_{1^+}Ef- \overline\partial R_{U_1}^{1} E f\\ &\quad+\overline\partial R_{U_1 } ^{01}\overline\partial E f+ \overline\partial R_{U_1\cup D^{12} } ^{0} E f(\zeta). \nonumber\end{align} We apply \re{keyidsim} in which $f$ is replaced by $\overline\partial f=E\overline\partial f$ to obtain \begin{align}\label{keyidsimb} R_{D^{12}}^0\overline\partial f-L_{1}^{01}\overline\partial f&=-L_{1+}^{01}E\overline\partial f
+R_{U_1 } ^{01}\overline\partial E\overline\partial f + R_{U_1\cup D^{12} } ^{0} E\overline\partial f\\ &\nonumber \quad-R_{U_1 } ^{1} E\overline\partial f + \overline\partial R_{U_1 } ^{01} E\overline\partial f(\zeta). \end{align} We can pair the last term with the second last term in \re{keyidsim+} to form the desired commutator $[E,\overline\partial] f$. Finally, we write $\overline\partial E\overline\partial f=[E,\overline\partial]\overline\partial f$. This completes the proof of \eqref{tsqf}. The above proof is still valid for \eqref{tsqf+} (case $q=0$), as the Koppelman lemma holds with $\Omega_{0,-1}=0$.
Strictly speaking, the above computation is only valid when $\partial D\in C^3$, since the Koppelman lemma can be verified easily when all Leray maps $W_j\in C^2$. When $\partial D^i\in C^2$, one can still verify the integral formula on the domain $D^1\cap D^2$ by smoothing $g^j$. For instance, see \cites{MR3961327, MR4289246} for details. \end{proof}
\setcounter{equation}{0} \section{A local homotopy formula for
$(n-q)$ convex configuration}
The main purpose of this section is to construct a local homotopy formula near a boundary point of strictly $(n-q)$ convex.
In \rp{hf}, we have derived a local approximate homotopy formula \re{tsqf} for a $(0,q)$ form $f$: $$ f=L_1^1f+L_2^2f+L_{12}^{12}f+\overline\partial H_q f+H_{q+1}\overline\partial f.
$$
To obtain a genuine local homotopy formula, we will show that the boundary integrals $L^1_1f, L^2_2f, L^{12}_{12}f$ vanish when the boundary is $(n-q)$ convex and the Leray mappings $g^1,g^2$ are chosen appropriately.
The constructions in this section and the next are inspired by Henkin-Leiterer~~\cite{MR986248}.
Throughout the paper let $B_r=\{z\in{\bf C}^n\colon|z|<r\}$ be the ball of radius $r$. We first transform a $(n-q)$-convex domain $D$ into a new form $D^1$.
\le{convex-rho}Let $D\subset U$ be a domain defined by a $C^2$ function $\rho^0<0$ satisfying $\nabla\rho^0\neq0$ at each point of $U\cap\partial D$. Suppose that $\partial D$ is $(n-q)$-convex at $\zeta\in U$. \bpp \item There is a local biholomorphic mapping $\psi$ defined in an open set $U$ containing $\zeta$ such that $\psi(\zeta)=0$ while $D^1:=\psi(U\cap D)$ is defined by \eq{qconv-nf}
\rho^1(z)=-y_{n}+\lambda_1|z_1|^2+\cdots+\lambda_{q-1}|z_{q-1}|^2+|z_{q}|^2+
\cdots+|z_{n}|^2+R(z)<0,
\end{equation} where $|\lambda_j|\leq1$ and $R(z)=o(|z|^2)$. There exists $r_1>0$ such that the boundary $\partial\psi(U\cap D)$ intersects the sphere $\partial B_r$ transversally when $0<r<r_1$. Furthermore, the function $R$ in \rea{qconv-nf} is in $C^a(B_{r_1})$ $($resp. $\Lambda^a(B_{r_1}))$,
when $\rho^0\in C^a(U)$ with $a\geq2$ $($resp. $\Lambda^a(U)$ with $a>2)$.
\item Let $\psi$ be as above. There exists $\delta(D)>0$ such that if $\tilde D$ is defined by $\tilde\rho^0<0$ and $\|\tilde\rho^0-\rho^0\|_2<\delta(D)$, then $\psi(U\cap\tilde D)$ is given by \eq{qconv-nf-t}
\tilde\rho^1(z)=-y_{n}+\lambda_1|z_1|^2+\cdots+\lambda_{q-1}|z_{q-1}|^2+|z_{q}|^2+
\cdots+|z_{n}|^2+\tilde R(z)<0
\end{equation} with $|\tilde R-R|_{B_{r_1},a}\leq C_a|\tilde\rho^0-\rho^0|_{U,a}$ for $a>2$ and $\|\tilde R-R\|_{B_{r_1},a}\leq C_a\|\tilde\rho^0-\rho^0\|_{U,a}$ for $a\geq2$.
There exists $r_1>0$ such that the boundary $\partial\psi(U\cap \tilde D)$ intersects the sphere $\partial B_{r_2}$ transversally when $r_1/2<r_2<r_1$. \end{list} Here $\delta(D)$ depends on the modulus of continuity of $\partial^2\rho^0$. \end{lemma}
\begin{rem} For the rest of paper, we will refer to $(\tilde D^1,\tilde\rho^1)$ as $(D^1, \rho^1)$ indicating the various estimates are \emph{uniform} in $\tilde\rho$ or \emph{stable} under small $C^2$ perturbations of $\partial D$. We refer to $B_{r_2}$ as $D_{r_2}^2$ or $D^2$ and set $\rho^2=|\zeta|^2-r_2^2$ with restriction $r_1>r_2>r_{1}/2$. We still refer to $\psi(U)$ as $U$. \end{rem} \begin{proof} (a) Let $D$ be defined by $\rho^0<0$. We may assume that $\zeta=0$. Permuting coordinates yields $\rho^0_{z_n}\neq0$. Let $\tilde z_n=2\rho^0_\zeta\cdot(\zeta-z)-\sum \rho^0_{\zeta_j\zeta_k}(\zeta_j-z_j)(z_k-\zeta_k)$ and $\tilde z'=z'$. Then $\rho^0(z)=\rho_1(\tilde z)$, where the new domain has a defining function $$
\rho_1(z)=-y_n+\sum a_{j\overline k}z_j\overline z_k+o(|z|^2). $$
Choose a nonsingular matrix $U$ so that with $U\tilde z=z$ and $\tilde z_n=z_n$. The new defining function $\rho_2(\tilde z)=\rho_1(z)$ has the form $$\rho_2(z)=-y_n+\sum_{j<q}\lambda_j|z_j|^2+\sum_{j=q}^{n-1}|z_{j}|^2 +\sum_{j=1}^{n}\operatorname{Re}\{a_jz_j\overline z_n\}+o(|z|^2), $$ where $a_n$ is a real constant. Setting $\rho_3=\rho_2+\mu\rho_2^2$ with $\mu=a_{n}+1$, we get $$
\rho_3(z)=-y_n+\sum_{j<q}\lambda_j|z_j|^2+\sum_{j=q}^{n}|z_{j}|^2+\operatorname{Re}\Bigl\{\mu z_n^2+\sum_{j=1}^{n-1}a_jz_j\overline z_n\Bigr\}+o(|z|^2). $$ Using new coordinates $\tilde z_n=z_n-i\mu z_n^2+iz_n\sum_{j=1}^{n-1}a_j$ and $\tilde z'=z'$, we get $$
\rho_4(z)=-y_n+\lambda_1|z_1|^2+\cdots+\lambda_{q-1}|z_{q-1}|^2+|z_{q}|^2+
\cdots+|z_{n}|^2+y_n\operatorname{Re}\{\sum_{j=1}^{n}b_jz_j\}+o(|z|^2). $$
We get $|\lambda_j|<1$ after a dilation. Then $\rho_4+\rho_4\operatorname{Re}\{\sum b_jz_j\}$, renamed as $\rho^1$, has the form \re{qconv-nf}. As usual, the implicit function theorem can be proved by using the inverse mapping theorem~~\cite{MR0385023}*{pp.~224-225}. Then the inverse mapping theorem for Zygmund spaces~~\cite{GG} yields the desired smoothness of $R$.
The details are left to the reader. The transversality of $\partial D^1$ and $\partial D^2_{r_2}$ also follows from the computation below.
(b) The above construction of $\psi$ is explicit in $\rho^0$ with the exception of the linear change of coordinates $z\to Uz$ that is fixed for all small perturbations $\tilde\rho^0$ of $\rho^0$. Thus, it is easily to check that $\|\tilde R-R\|_a\leq C_a\|\tilde\rho^0-\rho^0\|_a$ for $a\geq2$ and $|\tilde R-R|_a\leq C_a|\tilde\rho^0-\rho^0|_a$ for $a>2$.
We want to show that $\nabla \tilde\rho^1$ is not proportional to $\nabla\rho^2$ on the common zero set of $\tilde\rho^1,\rho^2$. Suppose that $\nabla\rho^2=\mu\nabla\tilde\rho^1$
when $\tilde\rho^1(z)=\rho^2(z)=0$. We get $2y_n=\mu (-1+2 y_n+\tilde R_{y_n})$. When $r<1/4$ and $\delta(D)$ are sufficient small, by $|z|=r$ we obtain $|-1+2 y_n+\tilde R_{y_n}|<1/2$. Hence $-\mu^{-1}y_n\in(1/4,3/4)$ as \eq{}
\|\tilde R\|_{2}<1/C. \end{equation} For $j<n$, we have
$2y_j=2\mu\tilde\rho_{y_j}$. This shows that $|y_j|\leq C|y_n|$. Also, $|x_k|\leq C|y_n|$. Thus $\tilde\rho^1(z)=0$ implies $|y_n|\leq C'|y_n|^2+|\tilde R(z)|$. In view of $C'|y_n|<1/2$, we get $$
|z|\leq C\|R\|_2|z|^2+C\delta(D). $$
By choosing $\delta(D)$ depending on $r_1$, we get $|z|<r_1^2/C$. The latter contradicts the vanishing of $\rho^2(z)=r^2-|z|^2$ since $r>r_1/2$.
\end{proof}
Recall that our original domain $D$ is normalized as $D^1$. We now fix notation. Let $(D^1,U,\phi,\rho^1)$ be as in \rl{convex-rho}. Thus $\rho^1$ is given by \re{qconv-nf} (or \re{qconv-nf-t}). Recall that \eq{rho2}
\rho^2=|z|^2-r^2_2 \end{equation}
where $0<r_2<r_1$ and $r_1/2<r_2<r_1$ for \rl{convex-rho} (a), (b). Let us define \begin{gather}{}\label{d12} D^1\colon\rho^1<0, \quad D^2\colon\rho^2<0, \quad D^{12}=D^1\cap D^2,\\ \label{s12} \partial D^{12}=S^1\cup S^2, \quad S^i\subset\partial D^i,\\ \label{g02}
g^2( z,\zeta )=\partial_{\zeta}\rho^2=\overline\zeta. \end{gather} It is well-known that \eq{W2}
| g^2(z,\zeta)\cdot(\zeta-z)|>0, \quad (z,\zeta)\in D^2\times\partial D^2. \end{equation}
\le{}Let $(D^1,U,\phi,\rho^1)$ be as in \rla{convex-rho}. Define \eq{}\label{g1} g^{1}_j( z,\zeta )=\begin{cases} \DD{\rho^1}{\zeta_j},&q\leq j\leq n,\\ \DD{\rho^1}{\zeta_j}+(\overline\zeta_j-\overline z_j),& 1\leq j<q. \end{cases} \end{equation} Then for $\zeta,z\in U$ and by shrinking $U$ if necessary, we have \begin{gather}\label{W1-dist}
2\operatorname{Re}\{ g^1( z,\zeta )\cdot(\zeta-z)\}\geq \rho^1(\zeta)-\rho^1(z)+\f{1}{2}|\zeta-z|^2, \\
|g^1(z,\zeta)\cdot(\zeta-z)|\geq 1/C_*, \quad \forall\zeta\in S^{12}, |z|<\delta_0(D).\label{g1z}\end{gather}
\end{lemma} \begin{proof} We have $
\operatorname{Re}\{ g^1( z,\zeta )\cdot(\zeta-z)\}=\operatorname{Re}\{ \rho^1( z,\zeta )\cdot(\zeta-z)\}+\sum_{j<q}|\zeta_j-z_j|^2. $
By Taylor theorem, we have \eq{taylor} \rho^1(z)-\rho^1(\zeta)=2\operatorname{Re}\{\rho^1_{\zeta}\cdot(z-\zeta)\}+ H_\zeta\rho^1(z-\zeta)+\operatorname{Re}\{\rho^1_{\zeta_j\zeta_k}(z_j-\zeta_j)(z_k-\zeta_k)\}+R( z,\zeta ). \end{equation} Note that $H_\zeta\rho$, restricted to $(z_q,\cdots, z_n)$, is a positive definite quadratic form. Also, $\rho_{\zeta_j\zeta_k}$ and second-order derivatives of $R$ are small. We can show that \begin{align*}\rho^1(z)-\rho^1(\zeta)
&\geq2\operatorname{Re}\{\rho^1_{\zeta}\cdot(z-\zeta)\}+\sum_{j\geq q}|\zeta_j-z_j|^2-c|\zeta-z|^2 \end{align*} where $c<1/2$. Now \re{W1-dist} follows from \re{qconv-nf}. By \re{taylor} and \re{W1-dist} for case \re{qconv-nf}, we also get \re{W1-dist} for the $\tilde\rho^1$ in
\re{qconv-nf-t} when $\|\tilde\rho^1-\rho^1\|_2\leq C\delta(D)$ is small. Note that \re{g1z} follows from \re{W1-dist}. \end{proof}
\begin{defn}\bpp \item As in~~\cite{MR986248}, the $(U,D^1,\psi,\rho^1, \rho^2)$ in \rl{convex-rho} (a) and \re{rho2} is called an {\it $(n-q)$-convex configuration}. \rl{convex-rho} (b) is referred to as the {\it stability} of the configuration. In brevity, we call $(D^1,D^2_{r_2})$ an $(n-q)$-convex configuration. \item The $g^1,g^2$ given by \re{g02} and \re{g1} are called the {\it canonical Leray maps} for the $(n-q)$-convex configuration $(D^1,D^2_{r_2})$.
\end{list} \end{defn}
Note that $g^2(z,\zeta)$ is holomorphic in $z$ and $g^1(z,\zeta)$ is anti-holomorphic in merely $q-1$ variables of $z$. Checking the types, we have \begin{gather} \label{type-2}
\Omega_{0,k}^{2}( z,\zeta )=0, \quad k\geq1,\quad \overline\partial_z\Omega^2_{0,0}(z,\zeta)=0;\\ \label{type-1}
\Omega_{0,k}^{1}( z,\zeta )=0, \quad k\geq q;\quad \overline\partial_z\Omega_{0,q-1}^1( z,\zeta )=0;
\\ \label{type-12} \Omega_{0,k}^{12}( z,\zeta )=0;\quad k\geq q;\quad \overline\partial_z\Omega_{0,q-1}^{12}( z,\zeta )=0. \end{gather} By \re{type-2}-\re{type-12}, we have \begin{gather}\label{L110} L_i^if(z)=\int_{S^i}\Omega_{0,q}^i(z,\zeta)\wedge f(\zeta)=0, \ i=1,2; \quad L_{12}^{12}f=0;\\
\overline\partial_z\int_{U_1}\Omega_{0,q-1}^1(z,\zeta)\wedge Ef=0, \quad \int_{U_1}\Omega_{0,q}^1(z,\zeta)\wedge E\overline\partial f=0. \end{gather}
This shows that given by \re{hq2}, the $H^{(2)}_q$ can be written as \eq{nhq2} H^{(2)}_qf=L_{1^+}^{01} Ef +L_{2}^{02} f+L_{12}^{012}f. \end{equation} Therefore, we have obtained the following local homotopy formula. \begin{thm}\label{hf-c} Let $ 0<q\leq n$. Let $(D^1,D_{r_2}^2)$ be a $(n-q)$-convex configuration with Leray maps \rea{g02}-\rea{g1}.
Suppose that $U^1\subset{\bf C}^n\setminus\overline{D^1}$, $\partial U^1=S^1\cup S^1_+$ and $S_+^1\cap D^{12}=\emptyset$.
Suppose that $ f$ is a $(0,q)$ form such that $f$ and $\overline\partial f$ are in $C^1(\overline {D^{12}})$. Then on $D^{12}$ \begin{gather}\label{tsqf-c}
f= \overline\partial H_q f+ H_{q+1}\overline\partial f
\end{gather} with $H_q=H_q^{(1)}+H_q^{(2)}$. Here $H^{(1)}_q$ is defined by \rea{hq1} and \begin{align}{} \label{Hqv}
H_q^{(2)} f&:= \int_{S^1_+}\Omega_{0,q}^{01}\wedge Ef+\int_{ S^2}\Omega^{02}_{0,q-1}\wedge f+\int_{S^{1}\cap S^{2}}\Omega_{0,q}^{012}\wedge f.
\end{align}
\end{thm} To be used later we remark that the integral kernel in $H^{(2)}_q$ is smooth, since $S^1_{+}, S^2$ and $ S^{12}$ do not intersect small neighborhoods of the origin in $\partial D^1$. Therefore, terms in $H^{(2)}_q$ can be estimated easily, while the main term $H_q^{(1)}$ in $H_q$ will be estimated in section~\ref{sec:1form}.
\setcounter{equation}{0} \section{A local $\overline\partial$ solution operator for
$(q+1)$-concave configuration}
We recall again from \rp{hf} the approximate local homotopy formula for a $(0,q)$ form $f$: \eq{f=l11} f=L_1^1f+L_2^2f+L_{12}^{12}f+\overline\partial H_q f+H_{q+1}\overline\partial f.
\end{equation}
As the strictly $(n-q)$-convex case, we will show that $L^1_1f, L^2_2f$ vanish when the boundary is $(q+1)$-concave and the Leray mappings $g_1,g_2$ are chosen appropriately. However, the boundary integral $L^{12}_{12}f$ may not vanish. We will show that this term is $\overline\partial$-closed for a $\overline\partial$-closed $f$, and this allows us to use a third Leray mapping to transform it into a genuine $\overline\partial$ solution operator $f=\overline\partial H_qf$ for possibly different $H_q$. Thus, the $(q+1)$ concavity is sufficiently to construct a $\overline\partial$ solution operator.
The presence of $L_{12}^{12}f$ will lead to a subtlety. For a local homotopy formula for forms which are not necessarily $\overline\partial$-closed, we however need an {\it extra} negative Levi eigenvalue, which will be assumed at the end of the section.
The following is a restatement of \rl{convex-rho}, by considering the complement $(D^1)^c$ and $-\rho_0$ where $\rho_0$ defines $D^1$. We include the last assertion on $D^1_*$, which holds obviously. The $D^1_*$ has $C^\infty$ boundary and it will be useful to obtain a sharp regularity for $\overline\partial u=f$ when $f$ are $(0,1)$ forms and $\partial D$ is merely $C^2$.
\le{concave-rho}Let $D\subset U$ be a $C^2$ domain.
Suppose that $U\cap\partial D$ is strictly $(q+1)$-concave at $\zeta\in U\cap\partial D$. Let $D,\tilde D$ be domains in $U$ defined by $\rho^0<0,\tilde\rho^0<0$ respectively. There is a biholomorphic mapping $\psi$ with $\psi(\zeta)=0$ such that $D^1=\psi(D\cap U)$ and $\tilde D^1=\psi(\tilde D\cap U)$ are defined by $\rho^1<0$ and $\tilde\rho^1<0$ with \begin{gather}\label{rho1-v}
\rho^1(z)=-y_{q+2}-|z_1|^2-\cdots-|z_{q+2}|^2+\lambda_{q+3}|z_{q+3}|^2+
\cdots+\lambda_n|z_n|^2+R(z),\\ \label{rho1-t-v}
\tilde\rho^1(z)=-y_{q+2}-|z_1|^2-\cdots-|z_{q+2}|^2+\lambda_{q+3}|z_{q+3}|^2+
\cdots+\lambda_n|z_n|^2+\tilde R(z). \end{gather}
Here $|\lambda_j|<1$ for $j>q+2$. Also assertions $(a),(b)$ in \rla{convex-rho} are valid. Furthermore, $D^1\subset\psi(U)\colon\rho^1<0$ contains $D^1_*\subset B_r\colon\rho^1_*<0$ for \eq{rho1*}
\rho^1_*= -y_{q+2}-\f{1}{2}|z_1|^2-\cdots-\f{1}{2}|z_{q+2}|^2+2|z_{q+3}|^2+
\cdots+2|z_n|^2. \end{equation} \end{lemma} As in \rl{convex-rho}, we rename $\psi(U)$ as $U$.
When $\rho^1$ has the form \re{rho1-v}, as in~~\cite{MR986248} define
\eq{}\label{HL2pg81} g^1_{j}( z,\zeta )=\begin{cases} \DD{\rho^1}{z_j},&1\leq j\leq q+2,\\ \DD{\rho^1}{z_j}+\overline z_j-\overline \zeta_j,& q+3\leq j\leq n. \end{cases} \end{equation} Note that this kind of Leray mapping was first used by Hortmann~~\cite{MR422688, MR627759} for strictly concave domains. Then we have \eq{W1s-dist}
-2\operatorname{Re}\{g^1( z,\zeta )\cdot(\zeta-z)\}\geq \rho(\zeta)-\rho(z)+|\zeta-z|^2/C. \end{equation} An essential difference between the $q$-convex and $(q+1)$ concave cases is that $g^1( z,\zeta )$ is no longer $C^\infty$ in $z$ when $\partial D$ is only finitely smooth. A useful feature is that $g^1(z,\zeta)$ is holomorphic in $\zeta_1,\dots, \zeta_{q+2}$.
As $(n-q)$-convex case, we use $ D^2_{r_2}\colon
\rho^2(z):=|z|^2-r_2^2<0. $ We still take Leray maps $g^0(z,\zeta)=\overline\zeta-\overline z$ and $ g^2=(\f{\partial\rho^2}{\partial\zeta_1},\dots, \f{\partial\rho^2}{\partial\zeta_n})=\overline \zeta. $ Denote by $\text{deg}_\zeta$ the degree of a form in $\zeta$. We get \begin{gather} \label{Ca-type-1} \text{deg}_\zeta\, \Omega_{0,*}^{1}\leq 2n-q-2. \end{gather} Therefore, we still have \re{L110}. Thus \re{f=l11} becomes \eq{f=l11+} f=L_{12}^{12}f+\overline\partial H_q f+H_{q+1}\overline\partial f.
\end{equation}
However, unlike the $(n-q)$ convex case, $ L_{12}^{12}f=\int_{S^{12}} \Omega^{12}_{0,q}\wedge f $ may not be identically zero. Let us try to transform this integral via Stokes' formula. We intersect $D^1\cap D^2$ with a third domain \eq{defD3} D^3\colon \rho^3<0, \quad 0\in D^3 \end{equation} where \eq{rho3}
\rho^3=-y_{q+2}+\sum_{j=q+3}^n3|z_j|^2-r^2_3 \end{equation}{} with $r_3>0$.
Define \begin{gather} \label{defW-3} g^{3}_j( z,\zeta )=\begin{cases} 0,&1\leq j<q+2,\\ \f{i}{2},&j= q+2,\\ 3(\overline\zeta_j+\overline z_j),& q+3\leq j\leq n. \end{cases} \end{gather} We can verify \eq{HH} \operatorname{Re}\{g^3(z,\zeta)\cdot(\zeta-z)\}=\rho^3(\zeta)-\rho^3(z). \end{equation}
Then we have the following. \le{y123} Let $\rho^i, g^i$ be defined by \rea{rho1-v}, \rea{HL2pg81}, \rea{rho2}, \rea{g02}, \rea{rho3}, \rea{defW-3} for $i=1,2,3$. \bpp \item There exists $r_1\in(0,1/6)$ such that $\partial D^1,\partial D^2_{r_2},\partial D^3_{r_3}$ are pairwise in the transversal position when $0<C_nr_3<r_2<r_1$ and $r_1<2r_2$.
\item Let $\tilde\rho^0,\tilde\rho^1$ be as in \rla{y123}. If $\delta(D)$ is sufficiently small, $\|\tilde \rho^0-\rho^0\|_2<\delta(D)$, and $1/{C_n'}<C_nr_3<r_2<r_1$ and $r_2<r_1/2$, then $\partial\tilde D^1,\partial D^2_{r_2},\partial D^3_{r_3}$ are also pairwise in general position.
\item Let $r_1,r_2,r_3$ be as in $(b)$. Then
\begin{gather}\label{gizC}\partial\tilde D^1\cap\partial D^2_{r_2}\cap D^3_{r_3}=\emptyset,\quad
\partial\tilde D^1\cap\partial D^3_{r_3}\cap\partial D^2_{r_2}=\emptyset,\\
|g^i(z,\zeta)\cdot(\zeta-z)|\geq 1/C, \quad \forall(z,\zeta)\in B_{r_4} \times \overline{D^2_{r_2}\setminus(D^1\cup D^3_{r_3})}, \ i=0,1,2,3,
\label{gicZ+}\\ S^{12}\subset \overline{D^2_{r_2}\setminus(D^1\cup D^3_{r_3})}. \label{gicZ=}
\end{gather}
\end{list} \end{lemma} \begin{proof}
$(a)$ Suppose $\nabla\rho^1(z)=\mu\nabla\rho^3(z)$, and $\rho^1(z)=\rho^3(z)=0$. We have $-1+R_{y_{q+2}}=\mu$. This shows that $-3/4<\mu<-1/2$. We also have $z_j+o(|z|)=0$ for $j=1,\dots q+1$,
$x_{q+2}+o(|z|)=0$, and
$\lambda_jz_j+o(|z|)=3\mu z_j$
for $j>q+2$. The latter implies that $|z_j|=o(|z|)$ since $|3\mu|-|\lambda_j|>1/4$. Hence, $\rho^1(z)=0$ yields $y_{q+2}=o(|z|)$. This shows that $z=0$, which contradicts $\rho^3=r_3>0$.
To show $\partial D^2_{r_2}$ and $\partial D^3_{r_3}$ intersect transversally, suppose that at an intersection point $z$ we have $\nabla\rho^2=\mu\nabla\rho^3$. We first get $2y_{q+2}=\mu(-1+R_{y_{q+2}})$. Hence $1/4<-\mu^{-1}y_{q+2}<3/4$ and $|\mu|<4|y_{q+2}|$. Then $\nabla\rho^2=\mu\nabla\rho^3$ implies that $|z|\leq C_n|y_{q+2}|$. We get $|y_{q+2}|\geq r_2/{C_n}$ and $|y_{q+2}|\leq r_3+|z|^2\leq r_3+C_n^2|y_{q+2}|^2$. Then $2r_3\geq|y_{q+2}|\geq r_2/{C_n}$, a contradiction to the assumption $r_3<r_2/C$.
That $\partial D_{r}^1,\partial D_{r_2}^2$ intersect transversally is proved in \rl{convex-rho}.
$(b)$ We leave the details to the reader.
$(c)$ Suppose $\rho^1(z)=0$ and $\rho^3(z)<0$. Then we have
$$
|z_1|^2+\cdots+|z_{q+2}|^2+\sum_{j>q+2}(\lambda_j+3)|z_j|^2-\tilde R(z)<r_3^2.
$$
This shows that $|z|<2r_3$. We obtain the first identity in \re{gizC}. The proof of the second identity in \re{gizC} is similar.
We now verify \re{gicZ+}. The cases for $i=0,2$ are trivial. Case $i=1$ is proved in \re{g1z}. To verify \re{W1s-dist} for case $i=3$, by \re{HH} we have $$ \operatorname{Re}\{g^3(\zeta,z)\cdot(\zeta-z)\}=\rho^3(\zeta)-\rho^3(z). $$
Note that $D^3$ is defined by $\rho^3<0$. Thus for $\zeta\not\in D^3_{r_3}$ and $z\in D^3_{r_3}$, we have $r^3(\zeta)-r^3(z)>0$. When $r_4<r_3/C$, $B_{r_4}$ is contained in $D^3_{r_3}$. This shows that when $z\in B_{r_4}$ and $\zeta\not\in D^3_{r_3}$ we have $\rho^3(\zeta)-\rho^3(z)>1/C$. Now it is easy to see that $\operatorname{Re}(g^3(\zeta,z)\cdot(\zeta-z))>1/C$ when $|z|<1/C$.
Finally, \re{gicZ=} follows from \re{gizC}. \end{proof}
\begin{defn}\label{ccav} The $(D^1, U,\phi,\rho^1)$ in \rla{concave-rho} $(a)$ is called a {\it $(q+1)$-concave configuration}, while \rl{concave-rho} $(b)$ is referred as the {\it stability} of the configuration. In brevity, we call $(D^1,D^2_{r_2},D^3_{r_3})$ in \rla{concave-rho}, a $(q+1)$-concave configuration, in which $\partial D^1,\partial D^2_{r_2},\partial D^3_{r_3}$ intersect pairwise transversally when $1/{C_n'}<C_nr_3<r_2<r_1$ and $r_2<r_1/2$. The $g^1,g^2,g^3$ in \rea{HL2pg81}, \rea{g02} and \rea{defW-3} are called the \emph{standard Leray maps} of the configuration. \end{defn}
So far, we have been following Henkin-Leiterer~~\cite{MR986248}. We could derive a homotopy formula on $D^1\cap D^2_{r_2}\cap D^3_{r_3}$ as in~~\cite{MR986248}. However, since we only need a local homotopy formula near a boundary point, we now departure from the approach in~~\cite{MR986248}. Let us still use the approximate homotopy formula on $D^1\cap D^2_{r_2}$. Modify it only for $z\in D^1\cap D^2_{r_2}\cap B_{r_4}$ using mainly \re{gizC}-\re{gicZ+} to derive a homotopy formula on this smaller domain. Thus our starting point is still the approximate homotopy formula \re{f=l11}. We will however use Koppelman's lemma for $g^1,g^2,g^3$ on the set $S^{12}$.
Note that the anti-holomorphic differentials $d\overline\zeta_j,d\overline z_k$ appear in $\Omega^3$ as a wedge product in some of $$ d(\overline\zeta_j+\overline z_j), \quad q+3\leq j\leq n. $$ Consequently, $d\overline z_j$ and $d\overline\zeta_j$, having the \emph{same} index, cannot appear in $\Omega_{0,q}^3$ simultaneously. Therefore \begin{gather}\label{type-3} \text{deg}_\zeta\, \Omega^3_{0,\ell}( z,\zeta )\leq n+([n-(q+3)+1]-\ell)= 2n-q-\ell-2, \quad\forall\ell.
\end{gather}
We now derive a result analogous to ~\cite{MR986248}*{p. 122, Lemma 13.6 $(iii)$} but for different boundary integrals $L^{\bigcdot}_{12}$.
\begin{lemma} Let $0<q\leq n-2$. Let $(D^1,D^2_{r_2},D^3_{r_3})$ be a $(q+1)$ concave configuration with Leray maps $(g^1,g^2,g^3)$. Then \begin{gather} \label{db13=0} \Omega^{13}_{0,\ell}( z,\zeta )=0,\quad\ell<q;\quad \overline\partial_\zeta\Omega^{13}_{0,q}( z,\zeta )=0. \end{gather} \bpp\item Suppose that $f$ is $\overline\partial$-closed $(0,q)$ form on $D_1$ and $f$ is in $C^1(\overline D_1)$. Then on $D^1\cap D_{r_2}^2\cap D_{r_3}^3$, \begin{gather}
\label{dL2312}L_{12}^{13}f(z)=0,\\
L_{12}^{12}f=-L_{12}^{23}f+\overline\partial L_{12}^{123}f+L_{12}^{123}\overline\partial f. \label{L1212} \end{gather} \item If $(D^1,D_{r_2}^2,D_{r_3}^3)$ is a $(q+2)$-concave configuration and $0<q\leq n-3$, then \rea{dL2312} and \rea{L1212} are valid for any $(0,q)$ forms $f\in C^1(\overline D_1)$. \end{list} \end{lemma} \begin{proof}To verify \re{db13=0}, we note that $\Omega^{13}_{0,q}( z,\zeta )$ has type $(0,q)$ in $z$. It has type $(n,n-2-q)$ in $\zeta$ and it is holomorphic in $\zeta_1,\dots, \zeta_{q+2}$. After taking $\overline\partial_\zeta$, it has type $(n,n-q-1)$ in $\zeta$ for anti-holomorphic variables $\zeta_{q+3}, \dots, \zeta_n$. However, the number of these anti-holomorphic variables is $<n-(q+3)+2=n-q-1$. We have verified \re{db13=0}.
$(a)$ To verify \re{dL2312}, we need an approximation theorem by Henkin-Leiterer~~\cite{MR986248}*{Lemma 12.5 (iii), p. 122}. Fix $z\in B_{r_4}$. By \re{gicZ+}, we know that $\Omega^{13}$ is a continuous $(n,n-q-2)$ in $\zeta\in K:=\overline{D^2\setminus(D^1\cup D^3)}$. Consequently, we can find a sequence $\omega^\nu_z$ of $\overline\partial_\zeta$-closed continuous $(n,n-q-2)$-forms on $U\supset D^2$ such that $\omega^\nu_z$ is a sequence of $\overline\partial$-closed $(n,n-q-2)$ forms on $U$ converges to $\Omega^{13}(z,\cdot)$ uniformly on $K$. Using a standard smoothing, we may assume that $\omega^\nu_z$ are $C^1$ in $\zeta\in D^2$, $\overline\partial$-closed and approximate $\Omega^{13}(z,\cdot)$ uniformly on $K$. By \re{gicZ=}, $S^{12}$ is contained in $K$. Applying Stokes' formula for $\omega^\nu_z$ contained in $K$ and we obtain for $z\in D^1\cap D^2\cap B_{r_4}$, $$ L_{12}^{13}f=\lim_{\nu\to\infty}\int_{S^{12}}\omega^\nu_z\wedge f=\lim_{\nu\to\infty}\int_{S^1}\omega^\nu_z\wedge \overline\partial_\zeta f=0. $$ Now \re{L1212} follows from
Koppelman's lemma: $$ \Omega_{0,q}^{12}=\Omega_{0,q}^{13}-\Omega_{0,q}^{23}+\overline\partial_\zeta \Omega_{0,q}^{123}+\overline\partial_z \Omega_{0,q-1}^{123}. $$
$(b)$ Note that when $(D^1,D^2,D^3)$ is a $(q+2)$ concave configuration. We have $L^{13}_{12}f=0$ for any $(0,q)$ forms $f$ that are not necessarily $\overline\partial$-closed by \re{db13=0} that now holds for $\ell<q+1$. We get the desired conclusion immediately.
\end{proof} \begin{remark}\label{no-control} In the proof for case $(i)$, we do not have any control on $\omega^\nu$ outside $\overline {D_{r_2}^2\setminus(D^1\cup D_{r_3}^3)}$ other than the uniform convergence. Therefore, in $(a)$ it is crucial that $f$ is $\overline\partial$-closed.\end{remark} We recall the homotopy formula $$ f=\overline\partial T_{q,B_{r_4}}f+T_{q+1,B_{r_4}}\overline\partial f $$
on the ball $B_{\delta}$ centered at the origin with radius $\delta$.
We now transform $L_{12}^{23}f$ in \re{L1212}.
The following is analogous to \cite{MR986248}*{Lemma 13.7, p. 125} for $L_{23}^{23}$. \begin{lemma} Let $1\leq q\leq n-2$. Let $(D^1,D^2_{r_2},D^3_{r_3})$ be a $(q+1)$-concave configuration. Let $L_{\bigcdot}^{\bigcdot}$ be defined by \rea{defnLR}. For a $(0,q)$ form $f$, we have \begin{gather}{}\overline\partial L^{23}_{12}f=\int_{S^{12}}\Omega^{23}_{0,q+1}\wedge \overline\partial f,\\ L^{23}_{12}f=\overline\partial T_{B_{r_4},q}L^{23}_{12}f+T_{B_{r_4}, q+1}L_{12}^{23}\overline\partial f, \quad \text{on $B_{r_4}$},\\ \label{MqL} L_{12}^{23}\colon Z_{0,q}(C^0(\overline D))\to Z_{0,q}(C^0(\overline D^2\cap \overline D^3)\cap C^\infty(D^2\cap D^3)). \end{gather} \end{lemma} \begin{proof}By \re{gicZ+} the form $\Omega^{23}$ is smooth in $z\in B_{r_4}$ and $\zeta\in S^{12}$. We have $$ \overline\partial_\zeta\Omega_{0,q+1}^{23}+\overline\partial_z\Omega_{0,q}^{23}=\Omega_{0,q+1}^2-\Omega_{0,q+1}^3. $$ By \re{type-2}, $\Omega_{0,q+1}^2=0$. Thus $\int_{S^{12}}\Omega_{0,q+1}^2( z,\zeta )\wedge f=0$.
By \re{type-3}, the degree of $f(\zeta)\wedge \Omega^3_{0,q+1}( z,\zeta )$ is less than $ 2n-3$, which is less than $\dim (S^1\cap S^2)$. This shows that \eq{} \int_{S^{12}}\Omega_{q+1}^3( z,\zeta )\wedge f(\zeta)=0, \quad q>0. \end{equation} By Stokes' formula and $\partial(S^{12})=\emptyset$, we obtain $$ \overline\partial L^{23}_{12}f=-\int_{S^{12}}\overline\partial_\zeta\Omega_{0,q+1}^{23}\wedge f=\int_{S^{12}} \Omega_{0,q+1}^{23}\wedge\overline\partial f. \qedhere $$ \end{proof} In summary, we have the following local $\overline\partial$-solution operator for the concave case. \th{cchf-closed}Let $1\leq q\leq n-2$. Let $(D^1,D^2_{r_2},D^3_{r_3})$ be a $(q+1)$-concave configuration. Let $f$ be a $\overline\partial$-closed $(0,q)$ form on $D^{12}$. On $D^{12}\cap B_{r_4}$, we have \begin{gather}{} f=\overline\partial H_qf \label{tsqf+-cv-closed}
\end{gather} with $H_q=H_q^{(1)}+H_q^{(2)}+H_q^{(3)}$. Here $ H^{(1)}_q$ and $H^{(2)}_q$ are given by \rea{hq1}, \rea{nhq2} and \begin{align} H_{q}^{(3)}f= L_{12}^{123}f+T_{B_{r_4}, q}L^{23}_{12} f. \end{align} \end{thm}
Although it is not used in this paper, for potential applications, it is worthy to state the following local homotopy formula if we have an extra negative Levi eigenvalue: if $\partial D^1$ is strictly $(q+2)$ concave, then $
L_{12}^{23}f=0 $ by \re{db13=0} for a $(0,q)$-form $f$. Therefore, we have the following. \th{cchf} Let $1\leq q\leq n-3$. Let $(D^1,D^2_{r_2},D^3_{r_3})$ be a $(q+2)$-concave configuration. Let $D^{12}=D^1\cap D^2_{r_2}$. Let $f$ be a $(0,q)$ form on $D^{12}$. On $D^{12}\cap B_{r_4}$, we have \begin{gather}{} f=\overline\partial H_qf+ H_{q+1}\overline\partial f
\end{gather} with $H_q=H_q^{(1)}+H_q^{(2)}+H_q^{(3)}$. Here $ H^{(1)}_q$ and $H^{(2)}_q$ are given by \rea{hq1}, \rea{nhq2} and \begin{align} H_{q}^{(3)}f= L_{12}^{123}f+T_{B_{r_4},q}L^{23}_{12} f. \end{align} \end{thm}
As in the convex case, the integral kernels in $H^{(2)}_q, H^{(3)}_q$ have smooth kernels, since $S^1_{+}, S^2, S^{12}$ do not intersect small neighborhood of the origin in $\partial D^1$. There is another main difference between the convex and concave cases. The kernel for the latter is only $\Lambda^{m-1}$ away from the singularity when $\partial D\in \Lambda^{m}$. Anyway $H_q^{(1)}$ is the main term to be estimated.
\setcounter{equation}{0}
\section{H\"{o}lder-Zygmund spaces and a Hardy-Littlewood lemma}\label{h-space} In this section, we recall the H\"{o}lder-Zygmund norms and indicate how a Hardy-Littlewood lemma can be used to derive the estimates. This version of Hardy-Littlewood lemma allows us to simplify some estimation in~~\cite{MR3961327}.
Let $\Omega$ be a domain in ${\bf R}^n$. Denote by $\|u\|_{D,a}$ the H\"older norm on a domain $D$ for $a\geq0$. Define $\Delta_hf(x)=f(x+h)-f(x)$ and $\Delta^2_h f(x)=f(x+2h)+f(x)-2f(x+h)$.
Following~~\cite{MR2250142}*{Defn. 1.120, p.~76}, define the space ${\Lambda}^r(\Omega)$ for $r>0$, to be the set of functions $f$ with finite norm \eq{firstLambdar}
|f|_{{\Lambda}^r(\Omega)}=\sup_{
x, x-h,x+h\in\Omega, h\neq0;|\alpha|<r}\left\{|D^\alpha f(x)|+ \f{|\Delta^2_hD^\alpha f(x)|}{|h|^{r-|\alpha|}}\right\}. \end{equation} Denote by $\Lambda_{loc}^r(\Omega)$ the space of functions $f$ such that $f\in\Lambda^r(\Omega')$ for any relatively compact subdomains $\Omega'$ of $\Omega$.
\le{HL}Let $0\leq\beta<1$. Let $ D\subset{\bf R}^n$ be a bounded and connected Lipschitz domain. Suppose that $f$ is in $C_{loc}^{2}( D)$ and $$
|\partial^{2}f(x)|\leq A\operatorname{dist}(x,\partial D)^{\beta-1}. $$ Fix $x_0\in D$. Then $
|f|_{ D;{1+\beta}}\leq C_0(|f(x_0)|+|\nabla f(x_0)|)+C_\beta A, $ where constants $C_0,C_\beta$ depend on a finite set of Lipschitz graph defining functions of $\partial D$. \end{lemma} \begin{proof}The proof is standard for $0<\beta<1$. Suppose $\beta=0$. The assumption implies that the second-order derivatives of $f$ is bounded on each compact subset of $ D$ and it suffices to estimate $\Delta_h^2f(x)$ when $x$ is close to the boundary and $h\in{\bf R}^n$ is small. Take a boundary point of $\partial D$. We may assume that $x_0=0$. By definition, we may assume that $ D$ is defined by $x_n>g(x')$ where $g$ is a Lipschitz function satisfying
$|g(\tilde x')-g(x')|\leq L|\tilde x'-x'|$ with $L>1$. Then $$ \operatorname{dist}(y,\partial D)\geq (y_n-g(x'))/L. $$ Suppose that $x,x-h, x+h$ are in $ D$ and close to the origin.
We first consider the special case when $x,h$ satisfy \eq{spcs}
x+th\in D, \quad \operatorname{dist}(x+th,\partial D)\geq |h|/L, \quad \forall t\in[0,2]. \end{equation}
Set $u(s)=f(x+h+sh)+f(x+h-sh)-2f(x+h)$. Thus $u(0)=u'(0)=0$ and $|u''(s)|\leq C_nLA|h|$. This shows that $|u(1)|\leq C_nLA|h|$.
For the general case, set $\tilde h=(0',3L|h|)$. When $y\in D$ is close to the origin and $t\in[0,2]$, we have \begin{gather}\label{yavn} y+t\tilde h\in D,\quad y+\tilde h+th\in D,\\
\operatorname{dist}(y+\tilde h+th,\partial D)\geq |h|.\label{yavn+} \end{gather}
Decompose \begin{align}\label{thedecom} f(x+2h)&+f(x)-2f(x+h)\\ \nonumber &=2f(x+2h+\tilde h)+2f(x+\tilde h)-4f(x+h+\tilde h)\\ \nonumber &\quad - f(x+2\tilde h)-f(x+2h+2\tilde h)+2f(x+h+2\tilde h)\\ \nonumber &\quad + f(x)+f(x+2\tilde h)-2f(x+\tilde h)\\ \nonumber &\quad+ f(x+2h)+f(x+2h+2\tilde h)-2f(x+2h+\tilde h)\\ \nonumber &\quad - 2f(x+h)-2f(x+h+2\tilde h)+4f(x+h+\tilde h). \end{align} We estimate each row on the right-hand side of \re{thedecom}. Let us denote by $[a,b]$ the line segment connecting two points $a,b$ in ${\bf R}^n$. By \re{yavn} and \re{yavn+}, we have $$ [x+\tilde h,x+\tilde h+2h]\subset D, \quad
\operatorname{dist}(x+\tilde h+t h,\partial D)\geq|h|, $$ for $t\in[0,2]$. Therefore, we can estimate the first row using the estimation for the special case \re{spcs} in which $x$ is replaced by $x+\tilde h$. The second row is estimated similarly. For the third row, by \re{yavn+} and $x\in D$, we get $$
\operatorname{dist}(x+\tilde h+s\tilde h,\partial D)\geq (1+s)|h|\geq (1-|s|)| h|, \quad s\in[-1,1]. $$
Take $u(s)=f(x+\tilde h+s\tilde h)+f(x+\tilde h-s\tilde h)-2f(x+\tilde h)$. This yields $|u''(s)|\leq C_n AL| h|/{(1-s)}$ for $s\in[0,1]$ and
$|u(1)|\leq C_nAL| h|$ by
$ u(1)=\int_{0}^1(1-s)u''(s)\, ds$.
This gives us the desired estimate for the third row. The last two rows in \re{thedecom} can be estimated similarly.
\end{proof}
We remark that the decomposition \re{thedecom} shows the following. \begin{prop}\label{equiv-norms}
Let $ D$ be a bounded Lipschitz-graph domain in ${\bf R}^n$. Set $$ D_{h}=\{x\in D\colon [x,x+2h]\subset D\}, \quad \forall h\in{\bf R}^n. $$ The norm ~\rea{firstLambdar} is equivalent to $$
|f|_{\widetilde{\Lambda}^r( D)}=\sup_{x\in D}|f(x)|+\sum_{|\alpha|<r}\sup_{h\in{\bf R}^N\setminus\{0\}}\sup_{x\in D_{h,2}}\f{|\Delta^2_hD^\alpha f(x)|}{|h|^{r-|\alpha|}}. $$
In other words, $c|f|_{\widetilde{\Lambda}^r( D)}\leq
|f|_{{\Lambda}^r( D)}\leq C|f|_{\widetilde{\Lambda}^r( D)}$. Here one can take $c=1$ and $C$ a constant depending on finitely many graph Lipschitz functions defining $\partial D$. \end{prop} There are other equivalent norms for $\Lambda^r$. For other equivalent norms of $\Lambda^r$, see
~\cite{MR0487423}*{Thm. 1} and ~\cite{MR3961327}.
\setcounter{thm}{0}\setcounter{equation}{0} \section{$\f{1}{2}$-gain estimates for local homotopy operators}\label{sec:1form}
In this section, we derive the estimates for homotopy operators. We will give precise estimates which are potentially useful for applications. We remark that the local estimates do not require the forms to be $\overline\partial$ closed.
We first consider the $(n-q)$ convex case. In this case the result is essentially in~~\cite{MR3961327}. We simplified proof by using \rl{HL} which is applicable for $C^2$ domains. \begin{thm}\label{conv-est} Let $r\in(1,\infty)$ and $1\leq q\leq n-1$. Let $(D^1,D^2)$ be a $(n-q)$-convex configuration. The homotopy operator $H_q$ in \rta{hf-c} satisfies \eq{hqfr12-c}
|H_q\varphi|_{D^{1}\cap D^2_{r_3},r+1/2}\leq C( \partial\rho^1, \partial^2\rho^1)|\varphi|_{D^{12},r}, \quad r_1/2<r_3<3r_2/4. \end{equation} \end{thm} \begin{proof} We now derive our main estimate. We will also simplify the proof in~~\cite{MR3961327} by using~\rl{HL}. Recall the homotopy operator $$ H_{q}\varphi=(H^{(1)}_q+H^{(2)}_q)\varphi $$ with $$ H^{(1)}_q\varphi=R_{U_1\cup D^{12}}^0 E \varphi+R_{U_1 }^{01}[\overline\partial,E] \varphi. $$ Near the origin, each term in $H^{(2)}_q\varphi$ given by \re{Hqv} is a linear combination of integrals of the form
$$ Kf(z):=\int_{S^I}\f{A(\partial\rho^1_\zeta,\partial^2_\zeta\rho^1,\zeta, z) f(\zeta )}{(g^1\cdot(\zeta-z))^a(g^2\cdot(\zeta-z))^b |\zeta-z|^{2c}}\, dV $$ where $f$ is the coefficients of $\varphi$, $a,b,c$ are integers. Note that $S^I$ is one of $S^1_+, S^{12}, S^2$. Therefore, the kernel of $K$ is smooth for $z$ close to the origin. We have $
| Kf|_{r+1/2}\leq C_r\|f\|_{0}. $ Here and in what follows $C_r$ denotes a constant depending on $\rho,\partial\rho,\partial^2\rho$.
We now estimate the main term $H^{(1)}\varphi$. Decompose it as
\eq{dbECV} H^{(1)}\varphi=\int_{D^{12}\cup U^1}\Omega_{0,q}^{0}(z,\zeta)\wedge E\varphi(\zeta)+ \int_{\mathcal U\setminus D}\Omega_{0,q}^{01}(z,\zeta)\wedge[\overline\partial,E]\varphi(\zeta).
\end{equation} Denote the first integral by $K_1\varphi$. By estimates on Newtonian potential~~\cite{MR999729}, we have $$
|K_1\varphi|_{r+1/2}\leq C|\varphi|_{r-1/2}. $$ Note that the above is proved in~~\cite{MR999729} when $r$ is not an integer. When it is an integer, the estimate follows the interpolation for the Zygmund spaces. The last integral in \re{dbE} can be written as a linear combination of
\begin{gather}\label{defnKfCV}
K_2f:=\int_{\mathcal U\setminus D} f(\zeta )\f{A(\partial\rho^1_\zeta,\partial^2_\zeta\rho^1,\zeta, z)N_{1}(\zeta-z )}
{\Phi^{n-j}(z,\zeta)|\zeta -z |^{2j}}\, dV(\zeta), \quad 1\leq j<n,\\
\Phi(z,\zeta)=g^1(z,\zeta)\cdot(\zeta-z) \end{gather} where $f$ is a coefficient of the form $[\overline\partial,E]\varphi$ and hence \eq{}
|f|_{r-1}\leq C|\varphi|_r. \end{equation}
Here and in what follows, $N_m(\zeta)$ denotes a monomial of $\zeta$ with degree $m\geq0$. Note that $f$ vanishes on $\overline D$.
Fix $\zeta_0\in\partial D_1$. We first choose local coordinates such that $s_1(\zeta),s_t(\zeta),t(\zeta)=(t_3,\dots, t_{2n})(\zeta)$ vanishing at $\zeta_0$, $D^1$ is defined by $s_1<0$, and
\begin{gather}\label{LbPhi}
|\Phi(z,\zeta)|\geq c_*(d(z)+ s_1(\zeta)+|s_2(\zeta)|+|t(\zeta)|^2),\\
|\Phi(z,\zeta)|\geq c_*|\zeta-z|^2,\quad |\zeta-z|\geq c_* |t(\zeta)|.
\label{LbPhi+}
\end{gather}
Let $r=k+\alpha$ with integer $k\geq1$ and $0<\alpha\leq1$.
Consider first the case that $0<\alpha<1/2$. By fundamental theorem of calculus, we have $|f(\zeta)|\leq C|f|_{r}\operatorname{dist}(\zeta,\partial D_1)^{r-1}$. We have
$|\partial^{k+1}Kf(z)|\leq C_r|f|_{r-1}I(z)$, where \eq{dk+2}
I(z):=\int_{[0,1]\times[-1,1]^{2n-1}}\f{s_1^{r-1}d s_1ds_2dt}{(d(z)+ s_1+|s_2|+|t|^2)^{(n-j)+a}(s_1+|s_2|+|t|)^{2j+b-1}} \end{equation} with $a+b=k+1$ and $1\leq j<2n$. The worst term occurs when $j=n-1$ and $a=k+1$. Therefore, using polar coordinates for $t(\zeta)$ we obtain $I(z)\leq C \tilde I(z)$ for $$
\tilde I(z):= \int_{[0,1]^3}\f{s_1^{r-1}d s_1ds_2dt}{(d(z)+ s_1+s_2+t^2)^{k+2}}. $$
We have $\tilde I(z)\leq \hat I_\alpha(z)$ for \begin{align}
\hat I_\alpha(z):= \int_{t=0}^1\int_{s_1=0}^1\int_{s_2=0}^1 \f{s_1^{\alpha-1}\, ds_1ds_2dt}{(d(z)+ s_1+s_2+t^2)^2} \leq Cd(z)^{(\alpha+1/2)-1} \end{align} because $\alpha-1/2<0$. Here we leave the proof of last inequality to the reader, or see similar arguments in the proof of \rl{henint} below.
When $\alpha+1/2\geq1$, we use $|\partial^{k+2}Kf(z)|\leq |f|_{r-1} I(z)$ defined by \re{dk+2} in which $a+b=k+2$. Then $I(z)\leq C\tilde I _1 (z)$ for \eq{}
\tilde I _1 (z):= \int\f{s_1^{r-1}d s_1ds_2dt}{(d(z)+ s_1(\zeta)+|s_2(\zeta)|+|t(\zeta)|^2)^{k+3}}. \end{equation} We now have $\tilde I_1(z)\leq\hat I_{\alpha-1}(z)\leq d(z)^{(\alpha+1/2)-2}$ because $(\alpha+1/2)<2$. \end{proof}
\le{henint}Let $\beta,\mu_1\in[0,\infty)$, $\lambda\in{\bf R}$, and $0<\delta<1$. Suppose \eq{betap} \beta':=\beta-\lambda+1+\min\{ 0, (\lambda-\mu_1+1)/2-\epsilon\}<0 \end{equation} and $\epsilon>0$. Then \eq{the-int}
\int_0^1\int_0^1\int_{t\in[0,1]^{m}}\f{ s_1^\beta(\delta+s_1+s_2+|t|^2)^{-1-\mu_1}}{(\delta+s_1+s_2+|t|)^{m-1+\lambda-\mu_1} }\, ds_1ds_2dt < \begin{cases} C\delta^{\beta'},& \beta'<0;\\ C, &\beta'>0. \end{cases} \end{equation}
\end{lemma} \begin{proof} We consider the integral in the following regions.
(i) $s_2>\max\{s_1,\delta,|t|\}$. On this region the integral is less than $$
\int_{s_2>\delta}\int_{0<s_1<s_2}\int_{|t|<s_2}\frac{s_1^{\beta}}{s_2^{m+\lambda }}\, dt ds_1ds_2\leq C\delta^{\beta-\lambda +2} $$
if $\beta<\lambda -2$. Also the integral bounded by a constant if $\beta>\lambda-2$. The same bounds can be obtained for the integral on regions $(ii)$ $s_1>\max\{\delta,s_2,|t|\}$ and $(iii)$ $\delta>\max\{s_1,s_2,|t|\}$.
$(iv).$ $|t|^2>\min\{\delta,s_1,s_2\}$. On this region, the integral is less than $$ \int_{\rho>\sqrt\delta}\int_{s_1<{\rho}^2}\int_{s_2<{\rho}^2}\frac{s_1^{\beta}\, ds_1ds_2d\rho}{\rho^{2(1+\mu_1)+\lambda -\mu_1}}\leq \int_{\rho>\sqrt\delta}\rho^{2\beta-\mu_1-\lambda }\, d\rho<C\delta^{\beta-(\mu_1+\lambda -1)/2}. $$
$(v). \ |t|^2<\delta+s_1+s_2<|t|$. On this region, the integral is less than \eq{triple} \int_{(s_1,s_2)\in[0,1]^2}\int_{ \delta+s_1+s_2}^{\sqrt{\delta+s_1+s_2}} \frac{s_1^{\beta}\, d\rho ds_1ds_2}{(\delta+s_1+s_2)^{1+\mu_1}\rho^{\lambda -\mu_1}}. \end{equation} Suppose $\lambda-\mu_1>1$. The latter is less than $$ \int_{(s_1,s_2)\in[0,1]^2} \frac{s_1^{\beta}\, ds_1ds_2}{(\delta+s_1+s_2)^{\lambda }}<C\delta^{\beta-\lambda +1}, $$ for $\beta-\lambda +1<0$. When $\beta-\lambda +1\geq0$, the integral \re{triple} is less than $$
\int_{(s_1,s_2)\in[0,1]^2} \frac{s_1^{\beta}(|\ln (\delta+s_1+s_2)|+(\delta+s_1+s_2)^{\f{\mu_1-\lambda +1}{2}})}{(\delta+s_1+s_2)^{1+\mu_1}}\, ds_1ds_2, $$ which is less than $C_\epsilon\delta^{\beta-(\mu_1+\lambda -3)/2-\epsilon}$ if $$ \beta-(\mu_1+\lambda -3)/2-\epsilon<0. $$
Thus we can take $\beta'=\min\{\beta-\lambda +1, \beta-(\mu_1+\lambda -3)/2-\epsilon\}$, which is \re{betap}. We left the reader to check that when $\beta'>0$ the integral \re{the-int} is bounded above by a constant. \end{proof}
We need the following \begin{gather}{} \label{Zcov}
|uv|_a\leq C(|u|_a\|v\|_0+\|u\|_0|v|_a),\quad a\in(0,\infty);\\ \label{Hcov}
\|u\|_{a+b}\|v\|_{c+d}\leq C_{a,b,c,d}(\|u\|_{a+b+d}\|v\|_d+\|u\|_a\|v\|_{c+b+d}) \end{gather} for $a,b,c,d\in[0,\infty)$. For \re{Zcov}, see ~\cite[Thm 2.86, p. 104]{MR2768550}; for \re{Hcov}, see~\cite{MR2829316}. \begin{thm}\label{concave-est} Let $r\in(1,\infty)$ and $1\leq q\leq n-2$. Let $(D^1,D^2,D^3)$ be a $(q+1)$-concave configuration. The homotopy operators $H_q$ in Theorems~$\ref{cchf-closed}$ and $\ref{cchf}$ satisfy \begin{gather}\label{hqfr12}
|H_q\varphi|_{D^{1}\cap D^2_{r_4}, r+1/2}\leq C_r(\partial\rho,\partial^2\rho) (|\rho^1|_{r+5/2}\|\varphi\|_{1}+ |\varphi|_{D^{12},r})
\end{gather} for $r_1/2<r_4<3r_3/4.$ Moreover, $C_r(\partial\rho,\partial^2\rho)$ is upper-stable under a small $C^2$ perturbation of $\rho$ \end{thm} \begin{proof}
We now derive our main estimates. Recall the homotopy operator $$ H_{q}\varphi=(H^{(1)}_q+H^{(2)}_q+H^{(3)}_q)\varphi $$ where $\varphi$ has type $(0,q)$ and $$ H^{(1)}_q\varphi=R_{U_1\cup D^{12}}^0 E \varphi+R_{U_1 }^{01}[\overline\partial,E] \varphi. $$ Near the origin, each term in $H^{(2)}_q\varphi, H^{(3)}_q\varphi$ is a linear combination of integrals of the form $$ K_0f_0(z)=A(\partial^2_z\rho^1)\tilde K_0f_0 $$ with $f_0$ being a coefficient of $\varphi$, and $A(\partial^2_z\rho)$ being a polynomial in derivatives of $\rho$ of order at most two. Here $\tilde K_0$, which involves only $\partial\rho^1$, is defined by
$$\tilde K_0f_0(z):=\int_{S^I} f_0(\zeta )(g^1\cdot(\zeta-z))^a(g^2\cdot(\zeta-z))^b(g^3\cdot(\zeta-z))^c|\zeta-z|^{2d}(\operatorname{Re}\zeta,\operatorname{Im}\zeta)^e\, dV $$ where $a,b,c,d$ are negative integers and $S^I$ is one of $S^1_+, S^{12}, S^2$. Therefore, for the $g^i$ that appear in the kernel, we have $$
|g^i\cdot(\zeta-z)|\geq c_0 $$
when $|z|$ is sufficiently small and $\zeta\in S^I$. By \re{Zcov}, we have $
|\tilde K_0f_0|_{r+1/2}\leq C_r|\rho^1|_{r+3/2}\|f_0\|_{0} $ and \begin{gather}{}
|K_0f_0|_{r+1/2}\leq C|\rho^1|_{r+5/2}\|\tilde K_0f_0\|_{0}+C |\tilde K_0f_0|_{r+1/2}. \end{gather} Here and in what follows $C_r$ denotes a constant depending on $\rho,\partial\rho,\partial^2\rho$. Therefore, we have $$
|K_0f_0|_{r+1/2}\leq C_r|\rho^1|_{r+5/2}\|f\|_0. $$
We now estimate the main term $H^{(1)}\varphi$. Decompose it as
\eq{dbE} H^{(1)}\varphi=\int_{D^{12}\cup U^1}\Omega_{0,q}^{0}(z,\zeta)\wedge E\varphi(\zeta)+ \int_{\mathcal U\setminus D}\Omega_{0,q}^{01}(z,\zeta)\wedge[\overline\partial,E]\varphi(\zeta).
\end{equation} Denote the first integral by $K_1\varphi$. By estimates on Newtonian potential, we have $$
|K_1\varphi|_{r+1/2}\leq C|\varphi|_{r-1/2}. $$ The last integral in \re{dbE} can be written as a linear combination of
\begin{gather}\label{defnKf} K_2f(z):=\partial_z^2\rho^1\tilde K_2f \end{gather} where $f$ is a coefficient of the form $[\overline\partial,E]\varphi$ and hence \eq{}
|f|_{r-1}\leq C|\varphi|_r. \end{equation} Also $\partial^2\rho^1$ indicates a derivative of order at most two; and \begin{gather}{} \tilde K_2f:=\int_{\mathcal U\setminus D} f(\zeta )\f{A( z,\zeta )N_{1}(\zeta-z )}
{\Phi^{n-j}(z,\zeta)|\zeta -z |^{2j}}\, dV(\zeta), \quad 1\leq j<n,\\
\Phi(z,\zeta)=g^1(z,\zeta)\cdot(\zeta-z).
\label{defnKf+}
\end{gather}
Here and in what follows, $N_m(\zeta)$ denotes a monomial of $\zeta$ with degree $m\geq0$. Also, $A( z,\zeta )$ is a monomial in $ z,\zeta $. Note that $f$ vanishes on $\overline D$.
We have $$
|K_2f|_{r+1/2}\leq C_r(|\rho^1|_{r+5/2}\|\tilde K_2f\|_0+ |\tilde K_2f|_{r+1/2}). $$
We have $\|\tilde K_2f\|_{1/2}\leq C\|f\|_0$, by the estimate in \rt{conv-est}. For later purpose we note that this also gives us \eq{C0est}
\|K_2f\|_0\leq C\|f\|_0. \end{equation}
The rest of the proof is devoted to the proof of \begin{gather}\label{tK2f}
|\tilde K_2f|_{r+1/2}\leq C_r( \|\rho^1\|_{r+2}\|f\|_0+ |f|_{r-1}), \quad r>1. \end{gather} Then combining above estimates yields the proof for \re{hqfr12}.
A technical difficulty to prove \re{tK2f} is that we do not have a version of \re{Hcov} for Zygmund norms. Therefore, we must use \re{Hcov} as a substitute to treat Zygmund norms. The computation is tedious and our main observation is that the kernel of $\tilde K$ involves only $\partial^1\rho$ instead of $\partial^2\rho$. Therefore, \re{Hcov} is still good enough to derive \re{tK2f} which is a crude estimate.
Let $r=k+\alpha$ with integer $k\geq1$ and $0<\alpha\leq1$. In the following cases, we will apply \rl{henint} several times. For clarity, we will call values $(\beta,\beta',\lambda)$ as $(\beta_i,\beta_i',\lambda_i)$ when we use the lemma.
(i) $0<\alpha<1/2$. Recall that the kernel of $K_2$ involves only first-order derivatives of $\rho^1$ in $z$-variables and it does not involve $\zeta$-derivatives of $\rho^1$. The expansion of $\Phi^1$ contains some $\zeta_j-z_j$. Since $\rho^1\in \Lambda^{k+\alpha+5/2}\subset C^{k+2}$, we therefore express $\partial_z^{k+1}\tilde K_2f$ as a sum of \eq{rhotimesK} K_{\mu,\nu}^{(k+1)}f:= \partial^{1+\nu'_1}_z\rho^1\cdots\partial_z^{1+\nu'_{\mu_1}}\rho^1K_\mu f
\end{equation}
with
\eq{Kmuf}
K_\mu f(z):=\int_{U^1}\f{f(\zeta)N_{1-\mu_0+\mu_1-\nu_1''-\cdots-\nu_{\mu_1}''+\mu_2}(\zeta-z)}
{(\Phi^1(z,\zeta))^{n-j+\mu_1}|\zeta-z|^{2j+2\mu_2}}dV. \end{equation} Here $1\leq j<n$, $\nu_i''=0,1$, and \eq{sumk1} \mu_0+\mu_1+\mu_2+\sum(\nu'_i+\nu''_i)\leq k+1. \end{equation} To estimate \re{rhotimesK}, we use the facts that $z\in D$ and $\zeta\in U$ and $$
C|\zeta-z|\geq |\Phi^1(\zeta,z)|\geq \operatorname{dist}(z,\partial D)+\operatorname{dist}(\zeta,\partial D)+|\operatorname{Im}\Phi^1(\zeta,z)|+|\zeta-z|^2. $$ Consequently, the worst term for $K_\mu f$ occurs when $j=n-1$, which we now assume.
We want to show that \eq{Kmuf}
|K_\mu f(z)|\leq |f|_{\beta_1}\operatorname{dist}(z)^{\beta_1'}, \quad\beta_1':=\alpha-1/2 \end{equation} where $\beta_1\geq0$ is to be specified. The case all $\nu'_i=0,1$ can be estimated as in \rt{conv-est}, for $\beta_1=\mu_0+\mu_1+\mu_2-1+\alpha\leq r-1$.
Thus we may assume that all $\nu'_i\geq2$ for $1\leq i\leq\mu_1'$, $\nu'_i=0,1$ for $i>\mu_1'$, and $\mu_1'>0$. Define
\eq{defla}
\lambda:=\mu_0+\mu_2+\sum\nu_i''.
\end{equation}
We have $\mu_1\geq\mu_1'\geq1$. By \rl{henint} with $ \beta_1'=\alpha-1/2<0, $ we need to find $\beta_1\geq0$ satisfying \eq{defbp} \beta_1'\leq\beta_1-\lambda+1+\min\{0,(\lambda-\mu_1+1)/2-\epsilon\}<0. \end{equation} Thus we take \eq{defbet} \beta_1=\max\{0,\alpha-3/2+\lambda -\min\{0,(\lambda -\mu_1+1)/2-\epsilon\}\}. \end{equation}
We obtain \begin{align*}{}
|K_{\mu,\nu}^{(k+1)} f(z)|&\leq C\|\rho^1\|_{1+\nu_1}\cdots\|\rho^1\|_{1+\nu_{\mu_1}}\|f\|_{\beta_1}
\operatorname{dist}(z,\partial\Omega)^{\alpha-1/2}. \end{align*}
For $i>\mu_1'$, we use $\|\rho^1\|_{1+\nu'_i}\leq \|\rho^1\|_2$.
By \re{Hcov}, we have
\eq{s52f}\|\rho^1\|_{1+\nu'_1}\cdots\|\rho^1\|_{1+\nu'_{\mu_1'}}\|f\|_{\beta_1}\leq C(\|\rho^1\|_2)\{ \|\rho^1\|_{s_1+2}\|f\|_0+ \|f\|_{s_1}\} \end{equation} for $s_1:=\beta_1+\sum_{i\leq\mu_1'}(\nu'_i-1)=\beta_1-\mu_1'+\sum\nu'_i$.
Recall that $\mu_1\geq\mu_1'\geq1$. We have \eq{beta=0} s_1=\beta_1-\mu_1'+\sum\nu_i'\leq k+1+\beta_1-\mu_1'-\mu_1. \end{equation} Thus $s_1<r-1$ when $\beta_1=0$. Suppose $\beta_1>0$. Then $s_1=\alpha-3/2+\lambda -\mu'_1 +\sum\nu_i'-\min\{0,(\lambda -\mu_1+1)/2-\epsilon\}$. When $\lambda >\mu_1-1$, we have \eq{s1La-} s_1\leq \alpha-3/2+\lambda -\mu_1'+\sum_{i\leq\mu_1'}\nu'_i <r-1. \end{equation} When $0\leq\lambda \leq\mu_1-1$, in view of $\mu_1\geq\mu'_1\geq1$ we get \begin{align}\label{s1La} s_1&\leq \alpha-3/2+\lambda -\mu'_1-(\lambda -\mu_1+1)/2+\sum\nu'_i+\epsilon\\ &\leq r-\mu_1'-\mu_1/2-\lambda/2 +\epsilon< r-1.\nonumber \end{align}
$(iii)\ 1/2<\alpha\leq1$. In this case we can take an extra derivative since $\rho^1\in\Lambda^{k+\alpha+5/2}\subset C^{k+3}$. Write $\partial_z^{k+2}\tilde K_2f$ as a sum of $$
K_{\mu,\nu}^{(k+2)}f:=\partial^{1+\nu'_1}_z\rho^1\cdots\partial_z^{1+\nu'_{\mu_1}}\rho^1K_\mu f
$$ where $K_\mu f$ is defined by \re{Kmuf}
with $1\leq j<n$ and \eq{mu12} \mu_0+\mu_1+\mu_2+\sum(\nu'_i+\nu''_i)\leq k+2. \end{equation}
As before, the worst term occurs for $j=n-1$ in $K_\mu$, which is assumed now. We need to show that \eq{}
| K_{\mu,\nu}^{(k+2)}f(z)|\leq C |f|_{\beta_2} \operatorname{dist}(z,\partial\Omega)^{\beta_2'}, \quad\beta_2'=\alpha-3/2 \end{equation} for $\beta_2$ to be specified. The case that all $\nu_i'\leq1$ can be estimated as in the proof of \rt{conv-est}, for $\beta_2=r-1$. Suppose now $\nu_i'\geq2$ for $i\leq \mu_1'$ with $\mu_1'\geq1$, and $\nu_i'>0$ for $i>\mu_1'$. Recall that $\lambda$ is defined by \re{defla}. In particular, $ \lambda\leq k+1$.
With $\beta_2'=\alpha-3/2$, we take \eq{defbet+} \beta_2=\max\{0,\alpha-5/2+\lambda-\min\{0,(\lambda-\mu_1+1)/2-\epsilon\}\}. \end{equation} Set $s_2:=\beta_2-1+\sum_{i\leq\mu_1'}(\nu'_i-1)$. When $\beta_2=0$, we have $s_2<r-1$ by analogue of \re{beta=0}. Suppose $\beta_2>0$. We have $s_2=\alpha-7/2+\lambda-\mu_1-\min\{0,(\lambda-\mu_1+1)/2-\epsilon\} +\sum_{i\leq\mu_1'}\nu'_i$. Using $\mu_1\geq\mu'_1\geq1$, we can verify \re{s52f} where $s_1$ is replaced by $s_2< r-1$.
$(iii)\ \alpha=1/2$. We need to estimate $|K_1f|_{r+1/2}$ with $r+1/2=k+1$. Recall that $\rho^1\in\Lambda^{k+3}$. We write $\partial^kK_1f$ as a sum of $K_{\mu, \nu}^{(k)}f$ defined by \re{Kmuf} with $1\leq j<n$ and \eq{sumk1half} \mu_0+\mu_1+\mu_2+\sum(\nu'_i+\nu''_i)\leq k. \end{equation} As before, the worst term occurs for $j=n-1$ in $K_\mu$, which is assumed now.
Consider case $\mu_1\geq1$. Then $\lambda\leq k-1$. By \re{Zcov}, we have \begin{align}\label{halfcase}
|K_{\mu, \nu}^{(k)}f|_1
&\leq
|\partial^{1+\nu'_1}\rho^1\cdots\partial^{1+\nu'_{\mu'_1}}\rho^1|_1\|K_\mu f\|_0\\
&\quad
+\|\partial^{1+\nu'_1}\rho^1\cdots\partial^{1+\nu'_{\mu'_1}}\rho^1\|_0|K_\mu f|_1=:I+II.
\nonumber
\end{align}
We first estimate $I$. We have $\|K_\mu f\|_0\leq\|K_\mu f\|_{\beta_3'}\leq C\|f\|_{\beta_3}$ for $\beta_3'=\epsilon$, $\beta_3\geq0$ and
$$
\beta_3'\leq\beta_3-\lambda+1+\min\{ 0, (\lambda-\mu_1+1)/2-\epsilon\}>0.
$$
We take $$ \beta_3=\max\{0,\lambda-1-\min\{0,(\lambda-\mu_1+1)/2+\epsilon\}+\epsilon\}. $$
We have $\|\partial^{1+\nu'_1}\rho^1\cdots\partial^{1+\nu'_{\mu'_1}}\rho^1\|_1\|f\|_{\beta_3}\leq C\|\rho^1\|_{s_3+2}\|f\|_0+ C\|f\|_{s_3}$ with
$$
s_3:=\beta_3-\mu_1'+\sum_{i\leq\mu_1'}\nu_i'.
$$
By analogue of \re{beta=0}, we have $s_3<r-1$ when $\beta_3=0$. Suppose $\beta_3>0$. Then
$$
s_3= \lambda-1-\mu_1'+\sum\nu_i'-\min\{0,(\lambda-\mu_1+1)/2-\epsilon\}+\epsilon <r-1,
$$
which is verified by analogue of \re{s1La-}-\re{s1La}. To estimate $II$, by \rl{henint}, we have $|K_\mu f|_1\leq \|f\|_{\beta_4}$ for $\beta_4\geq0$ and $$ -1\leq \beta_4-\lambda+1+\min\{ 0, (\lambda-\mu_1+1)/2\}. $$ Then $\beta_4=\max\{0,\lambda-2-\min\{ 0, (\lambda-\mu_1+1)/2-\epsilon\}\}$. We obtain $$
\|\partial^{1+\nu'_1}\rho^1\cdots\partial^{1+\nu'_{\mu'_1}}\rho^1\|_1\|f\|_{\beta_4}\leq \|\rho^1\|_{s_4+2}\|f\|_0+ \|f\|_{s_4} $$ with $
s_4:=\beta_4-\mu_1'+\sum_{i\leq\mu_1'}\nu_i'+\epsilon. $ When $\beta_4=0$, we get $s_4<r-1$. Suppose $\beta_4>0$. Then $$ s_4=\lambda-2-\min\{ 0, (\lambda-\mu_1+1)/2-\epsilon\} -\mu_1'+\sum_{i\leq\mu_1'}\nu_i'+\epsilon.
$$ We can verify that $s_4<r-1$ when $\lambda<\mu_1-1$. When $\lambda\geq\mu_1-1$, we also have $$ s_4=\lambda-2- (\lambda-\mu_1+1)/2 -\mu_1'+\sum_{i\leq\mu_1'}\nu_i'+2\epsilon\leq k-5/2-\mu_1/2-\mu_1'+2\epsilon<r-1. $$
Consider the case $\mu_1=0$. Write $\partial_z^2K_\mu f$ as a sum of $$ \partial_z^3\rho^1K_{\tilde\mu+\mu}f,\quad \quad \partial^2\rho^1K_{\tilde\mu+\mu}f. $$ For the last term, by interpolation, we have $$
|K_{\tilde\mu+\mu}f(z)|\leq |f|_{r-1}d(z)^{-1}. $$ This gives us the estimate for $\partial^2\rho^1K_{\tilde\mu+\mu}f$.
For the first term, \rl{henint} implies that $
|K_{\tilde\mu+\mu}f(z)|\leq C\|f\|_{\beta_5} d(z)^{-1} $ for $\beta_5\geq0$ and $$ \beta_5:=\max\{0, (\lambda+\tilde\lambda)+\tilde\mu_2-2-\min\{ 0, (\lambda+\tilde\lambda-\mu_1-\tilde\mu_1+1)/2+\epsilon\}\}. $$
We have $\tilde\mu_1=1$, $\tilde\mu_0=\tilde\mu_2=\tilde\nu_1''=0$, $\tilde\lambda=0$. Thus ${\beta_5}=\max\{0,\lambda-2-\min\{ 0, \lambda/2-\epsilon\}\}$. When $\beta_5=0$, we obtain $|\partial_z^3\rho^1 K_{\tilde\mu+\mu}(z)|\leq C\|\rho^1\|_3\|f\|_0d(z)^{-1}$, while $\|\rho^1\|_3\|f\|_0\leq C |\rho^1|_{r+5/2}\|f\|_0.$ When $\beta_5>0$,
we have $\lambda>2$ and $\beta_5 \leq \lambda-2\leq k-2<r-5/2+\epsilon'$ for a small $\epsilon'>0$. Therefore,
\begin{align*}
\|\rho^1\|_3\|f\|_{r-5/2+\epsilon'}&\leq C\|\rho^1\|_{r+1/2+\epsilon'}\|f\|_0+C\|\rho^1\|_{2}\|f\|_{r-3/2+\epsilon'}\\
&\leq C|\rho^1|_{r+5/2}\|f\|_0+ C|f|_{r-1}. \qedhere
\end{align*} \end{proof}
\setcounter{thm}{0}\setcounter{equation}{0} \section{An estimate of $\overline\partial$ solution for $(0,1)$ forms via Hartogs's theorem}\label{sec:1form}
In this section we obtain the regularity of functional $\overline\partial$-solutions for $(0,1)$-forms can be achieved via Hartogs' theorem for concavity domains that require merely $C^2$ boundary. Here we need $2$-concavity. We will only prove the regularity for local solutions.
Let $D\subset U$ be defined by $\rho<0$. Suppose that $U\cap \partial D$ is strictly $2$-concave. For each $\zeta\in\partial D$, we can apply local biholomorphic map $\psi_\zeta$ such that $\rho\circ\psi_\zeta=a_\zeta \rho_\zeta$ has the form \eq{}
\rho_\zeta(z)=-y_n-3|z_1|^2-3|z_{2}|^2+\sum_{j>2}\lambda_j|z_j|^2+o(|z|^2). \end{equation}
Then $D_\zeta:=\psi_\zeta (D)$ contains $ D_\epsilon=\{z\in\Delta_\epsilon^n\colon\tilde\rho(z)<0\}$, where \eq{}
\tilde\rho(z)=-y_n-2|z_1|^2-2|z_{2}|^2+2\sum_{j=3}^n|z_j|^2. \end{equation} Both $D_\zeta, D_\epsilon$ share a Hartogs's subdomain $H_\epsilon=\{z\in\Delta_\epsilon^n\colon\hat\rho(z)<0\}$, where $$
\hat \rho=-y_n- |z_1|^2+\sum_{j=2}^n (1+\lambda_j)|z_j|^2. $$
Note that $\partial D\cap\partial H_\epsilon=\{0\}$. We want to show that if $f\in\Lambda_r(\overline D)$, any solution $u$ to $\overline\partial u=f$ on $ D$ is in $\Lambda_{r+1/2}$ on $D\cap B_\epsilon(\zeta)$ for $\zeta\in\partial D$ and small $\epsilon$.
We remark that the $C^{1/2}$ estimate in~\cite{MR986248}*{Thm. 14.1} seems to require $\partial D\in C^{5/2}$ to repeat the proof of ~\cite{MR986248}*{Thm. 9.1}, while the latter needs $\rho\in C^2$ for the estimates. Let us first produce a solution $u_0\in C^0(\overline D)$ for $\partial D\in C^2$. For instance, we can take the solution in~\cite{MR986248}*{Thm. 13.10, p. 127} or our solutions with the $C^0$ estimate for $C^2$ boundary given by \re{C0est}.
Since $\partial\tilde D_\epsilon$ is smooth, we have a solution $u$ on $\tilde D_\epsilon$ such that $\overline\partial u=f$ and $ u\in\Lambda_{r+1/2}$. Then $u_0- u$ admits a holomorphic extension $h$ to \eq{}
\hat D_\epsilon:= D_\epsilon\cup\Delta_\delta^n. \end{equation} On $D_\epsilon$, we now know that $u_0\in \Lambda^{r+1/2}(D_\epsilon)$. We can write $$\partial_{x_j}u_0=\partial_{x_j} u+\partial_{x_j}h. $$ We have $C^0$ estimates for $u_0, u$ and $h$. By Cauchy formula, we have $$
h(z)=\f{1}{2\pi}\int_{|\zeta_1|=1}\frac{h(\zeta_1,z')}{\zeta_1-z_1}\, d\zeta_1.$$ This gives estimate for higher order derivatives of $\partial_{z_1}^k h$. Similarly, we can obtain higher order derivatives of $\partial_{v}h$ for any directions $v$ that are small perturbations of the unit vector $(1,0,\dots, 0)$. Now these small perturbations span all unit vectors. Therefore, we can get the desired estimates for partial derivatives of order $m <r+1/2$, where $m=[r+1/2]$ if $r+1/2$ is not an integer, or $m=r-1/2$. Thus $\alpha=r+1/2-m\in(0,1]$.
Next we need to estimate the $\Lambda^\alpha$ norms of $m$-th derivatives of $u_0$. Set $v=\partial^mu_0$. It remains to estimate $ v(z+w)+v(z-w)-2v(z)$ when $ z,z\pm w\in D. $
We may assume that $z$ is sufficiently close to the origin and $|w|$ is small. Let $z^*\in\partial D$ be the closed point to $z$.
Suppose $\delta=|w|$ is small. Let $\tilde w=\delta(z-z^*)/{|z-z^*|}$. Since $\partial D_{z^*}$ is tangent to $\partial D$ at $z^*$, then $z+t\tilde w$ and $z\pm w+t'\tilde w\in D_{z^*}$ for $t\in(0,2)$ and $t'\in[1,2]$. We now use decomposition \re{thedecom} and get \begin{align}\label{thedecom+} v(z+w)&+v(z-w)-2v(z)\\ \nonumber &=2v(z+w+ \tilde w )+2v(z-w+\tilde w)-4v(z+w+ \tilde w )\\ \nonumber &\quad - v(z-w+2 \tilde w )-v(z+w+2 \tilde w )+2v(z+2 \tilde w )\\ \nonumber &\quad + v(z-w)+v(z-w+2 \tilde w )-2v(z-w+ \tilde w )\\ \nonumber &\quad+ v(z+w)+v(z+w+2\tilde w)-2v(z+w+ \tilde w )\\ \nonumber &\quad - 2v(z)-2v(z+2\tilde w)+4v(z+ \tilde w ). \end{align} We can estimate each row because the triple points in each row are in some smooth domain $D_\zeta$ for some $\zeta$. We have obtained the H\"older ratio estimate for $v$. This finishes the proof of \rt{regsol} $(a)$ for its local version.
\setcounter{thm}{0}\setcounter{equation}{0} \section{Proof of \rt{regsol} via canonical solutions}\label{sec1} The proof of regularity of the solutions from local to global uses some standard approaches. See Kerzman~~\cite{MR0281944} for the case when $D$ is a domain in ${\bf C}^n$. We will also derive an global estimate reflecting the norm convexity and this estimate will be used in the next section to prove \rt{regsol+}.
We start with the following. \le{}Let $ D\subset X$ be a domain defined by a $C^2$ function $\rho<0$ and let $D_a$ be defined by $\rho<a$. Let $\rho_t=S_t\rho$, where $S_t$ is the Moser smoothing operator. Suppose that $\partial D$ is an $q_q$ domain. Let $ D^t_a$ be defined by $\rho_t<a$. There exists $t_0=t_0(\partial^2\rho)>0$ and $C>1>c>0$ such that if $0\leq t<t_0$, then \begin{gather} \partial D^t_{-t}\subset D_{-ct}\setminus D_{-Ct} \quad \partial D^t_{t}\subset D_{Ct}\setminus D_{ct}
\end{gather} while $ D_{b}$ and $ D^t_{b}$ still satisfy the condition $a_q$ for $b\in(-t_0,t_0)$. \end{lemma} \begin{proof}Let $\rho_t=S_t\rho$. We have $
\|\rho_t-\rho\|_0\leq Ct^2\|\rho\|_2. $ This shows that \eq{C2t2} \operatorname{dist}(\partial D^t_s,\partial D_s)<C_2t^2. \end{equation}
When $|s|,|s'|$ are sufficiently small and $s'>s$, we also have $$ c_1(s'-s)\leq \operatorname{dist}( D_s,\partial D_{s'})\leq C_1(s'-s),\quad c_1(s'-s)\leq \operatorname{dist}( D^t_s,\partial D^t_{s'})\leq C_1(s'-s). $$ Thus \re{C2t2} implies that \begin{align} \operatorname{dist}(\partial D^t_{t}, D)&\geq \operatorname{dist}(\partial D_{t}, D)-\operatorname{dist}(\partial D^t_{t},\partial D_{t})\\ &\geq c_1t-C_2t^2>c_1t/2. \nonumber \end{align}
Suppose $L_\zeta\rho$ has $(q+1)$ negative Levi eigenvalues bounded above by $-\lambda$ or $(n-q)$ positive Levi eigenvalues bounded below by $\lambda$ for $\zeta\in U$, where $U$ is neighborhood of $\partial D$ and $\lambda$ is positive number. We have $\|\rho_t-\rho\|_{C^2(U)}\leq \epsilon$ when $t<t_0$ and $\zeta\in U$. We find a subspace $W$ of $T_\zeta^{1,0}\partial D$ dimension $q+1$ such $L\rho(\zeta,v)\leq-\lambda$ for $v$ in the unit sphere of $W$. Projecting $W$ onto $\tilde W\subset T_{\tilde \zeta}^{(1,0)}\partial D^t_a$ when $\tilde\zeta\in \partial D^t_a$ is sufficiently close to $\zeta$ and $t$ is close to zero. Then $\dim W\geq q+1$ and $L\rho(\tilde\zeta,v)\leq-\lambda/2$ for $\tilde v$ in the unit sphere of $\tilde W$. One can also verity that if $L_\zeta\rho$ has at least $(n-q)$ positive eigenvalues, so is $L_{\tilde\zeta}\rho^t$ when $\tilde\zeta$ is sufficiently close to $\zeta$. Therefore, $D^t_a$ still satisfies the condition $a_q$.\end{proof}
We know formulate the main result of this paper in details.
\th{regsol-full} Let $r\in(0,\infty]$ and $q\geq 1$. Let $D\colon\rho<0 $ be a relatively compact domain with $C^2$ boundary in a complex manifold $X$ satisfying the condition $a_q$. Let $V$ be a holomorphic vector bundle of finite rank over $X$.
Then there exists a linear $\overline\partial$ solution operator $H_q\colon \Lambda_{(0,q)}^r(D, V)\cap\overline\partial L^2_{loc}(D,V)\to \Lambda_{(0,q-1)}^{r'}(D, V)$ satisfying the following \bppp \item When
$q=1$ or $\partial D$ is strictly $(n-q)$ convex, we have $|H_qf|_{r+1/2}\leq C_r(\rho,\partial\rho,
\partial^2\rho)|f|_r$ \item When $q>1$ and $r'=r+1/2$ and $\partial D\in\Lambda^{r+\f{5}{2}}$, we have
\eq{}|H_qf|_{r+1/2}\leq C_r(\rho,\partial\rho,
\partial^2\rho) ( |\rho|_{r+5/2}\|f\|_1+|f|_r).\end{equation}
\item In both cases, $H_qf\in C^\infty(\overline D)$ when $f\in C^\infty(\overline D)$. \end{list}
Furthermore, the constant $C_r(\rho,\partial\rho,\partial^2\rho)$ is upper-stable under small $C^2$ perturbations of $\rho$; more precisely there exists $\epsilon>0$ such that if $\|\tilde\rho-\rho\|_2<\epsilon$, then
\eq{defupst}
C_r(\tilde\rho,\partial\tilde\rho,\partial^2\tilde\rho)<C_rC_r(\rho,\partial\rho,\partial^2\rho).
\end{equation} We emphasize that $C_r(\rho,\partial\rho,\partial^2\rho)$ involves an unknown constant that is $C_*$ from \rta{3.4.6}. \end{thm} \begin{rem} The stability of estimates on $\overline\partial$ solutions has been discussed extensively in literature; see Greene-Krantz~~\cite{MR644667} for strictly pseudoconvex domains in ${\bf C}^n$, Lieb-Michel~~\cite{MR1900133} strictly pseudoconvex domains with smooth boundary in a complex manifold. The stability in terms \re{defupst} is called upper-stability in Gan-Gong~~\cite{GG} where the reader can find a version of lower stability and its use. \end{rem}
\begin{proof}The proof is a combination of the following: the local regularity results obtained, Grauert's bumping method, the stability of solvability of the $\overline\partial$-equation after the bumping is applied ~\cite{MR0179443}*{Thm.~3.4.1} (see \rt{3.4.6} for the vector bundle version), and the interior estimates of $\overline\partial$-solutions on Kohn's canonical solution.
We will complete the proof in three steps.
\noindent{\em Step 1. Reduction to interior regularity.}
Let $D$ be a relatively compact subset of $ \mathcal U$, defined by $\rho<0$ in $\mathcal U$ in ${\bf C}^n$ with $|\rho|_{C^{5/2}(\mathcal U)}<\infty$ and $\nabla \rho\neq0$ on $\partial D$. For each $p\in\partial D$, we have a configuration $(D,U_p,\psi_p,\rho^1_p)$. Consider the domain \begin{gather}{}
\omega_p':=\psi_p^{-1}(D^1_p\cap D^2_{r_2/2}). \end{gather} Let $\chi\geq0$ be a smooth cut-off function with compact support in $B_{r_2}$ such that $\chi$ equals $1$ on $B_{r_2/2}$. Thus $\omega_p'\cap D$ is contained in $\psi_p^{-1}(D^{12}_p)$. On $D^{1}_p\cap D^2_{r_2}$, we solve the $\overline\partial$ equation $\overline\partial u=(\psi_p^{-1})^*f$ with $u\in \Lambda^{r}(D^{1}_p)$. Then $f_1=f-\psi_p^*\overline\partial(\chi u_p)$ is still in $\Lambda^r$ as $$ (\psi_p^{-1})^*f-\overline\partial(\chi u_p)=(1-\chi)(\psi_p^{-1})^*f-\overline\partial\chi\wedge u_p. $$
In fact, setting $f_1=0$ on $X\setminus D$, we have $f_1\in \Lambda^r(D\cup \tilde B_{r_2/2}(p))$ for $\tilde B_{r_2/2}(p)=\psi_p^{-1}(B_{r_2/2})$. Let $D_p$ be defined by $\rho_p:=\rho-\epsilon\chi\circ\psi_p<0$. When $\epsilon$ is sufficiently small, $D_p$ satisfies the condition $a_q$, while $$ D\cup\omega_p\subset D_p\subset D\cup \tilde B_{r_2/2}(p). $$
Here $\omega_p$ is an open set containing $p$. Note that the size of $\omega_p$ can be chosen uniformly in $\tilde\rho^0$ when $\|\tilde\rho^0-\rho^0\|_2<\delta$, which depends on the modulus of continuity of $\partial^2\rho^0$ and the $\epsilon$.
As in~~\cite{MR1900133,MR3848426}, we find finitely many $p_1,\dots, p_m\in\partial D$ so that $\{\omega_{p_1},\dots, \omega_{p_m}\}$ covers $\partial \Omega$ and $\sum\chi\circ\psi_{p_j}>0$ on $\partial\Omega$. With $\rho_0=\rho$, $D_0=D$ and $\epsilon>0$, set \eq{rhoj=} \rho_j=\rho_{j-1}-\epsilon\chi\circ\psi_{p_j} \end{equation}
and $D_j:=(D_{j-1})_{p_j}\colon\rho_j<0$ for $j\geq1$. We have $D_j\setminus D_{j-1}\subset \tilde B_{r_2}(p_j)$. Also, $D_{j}$ contains $ D\cup \omega_{p_j}$ and $D_j\subset D_{j+1}$. Hence $D_{m}$ contains $\overline D$. Finally, we should choose a small $\epsilon$ that $\|\rho_j-\rho\|_2$ are sufficiently small for $j=1,\dots, m$ in order to apply the stability results in Lemmas~\ref{convex-rho} and \ref{concave-rho}. Using the $\overline\partial$-solution operator $T_j$ for the configuration $(\psi_{p_j}D_j,D_j^2)$ with $\overline\partial T_j(\psi_{p_j}^{-1})^*f_{j}=f_{j}$, we define $$ f_{j+1}=f_j-\overline\partial\{\psi_{p_j}^*(\chi ( T_j\psi_{p_j}^{-1})^*f_{j})\}. $$ Then $f_{m}\in \Lambda^r(D_m)$ is $\overline\partial$ closed on $D_{m}$. We remark that $f\mapsto u_m$ is a linear operator $\mathcal G_D\colon \Lambda^r(D)\cap \ker\overline\partial\to \Lambda^r(D_m)\cap\ker\overline\partial$, and $\mathcal G_D$ is independent of $r$.
We write \eq{ftilde-f} f=\overline\partial u_m+f_m, \quad u_m=\sum \psi_{p_j}^*(\chi ( T_j\psi_{p_j}^{-1})^*f_{j}. \end{equation}
Therefore, we focus on the $\overline\partial$ equation for a {\it fixed} $a_q$ domain $\Omega$ such that \eq{Om*} \Omega_{-c_*}\subset\tilde D\subset\Omega=D_{c_*}\subset\tilde D_m \end{equation} where $\tilde D$ is any small $C^2$ perturbations of $D$ depending on $c_*$ which is a small positive number. We will apply \rt{3.4.6} to this domain $\Omega$ with $c_*$ bing as in \rt{3.4.6}. In what follows, $\tilde D, \tilde D_m$ is denoted by $D, D_m$ respectively.
We also need to estimates the norms for $f_m$. We have \begin{align*}
\|u_{j+1}\|_0&\leq C(D_j)\|f_j\|_0,\\
|u_{j+1}|_{r+1/2}&\leq C(D_j)(|f_j|_r+|\rho_j|_{r+5/2}\|f_j\|_1),\\
|f_{j+1}|_{r}&\leq C(D_j)(|f_j|_{r}+ |\rho_j|_{r+5/2}\|f_j\|_1). \end{align*}
By \re{rhoj=}, we have $|\rho|_{r+5/2}\leq C_{j,r}(1+|\rho|_{r+5/2})\leq 2C_{j,r}|\rho|_{r+5/2}$. Thus, \begin{gather}\label{f_test}
\|f_m\|_0\leq C\|f\|_0, \quad |f_{m}|_{r}\leq C(D)|f|_{r}+C(D_j)|\rho|_{r+5/2}\|f\|_1,\\
\|u_m\|_0\leq \|f\|_0, \quad |u_{m}|_{r}\leq C(D)|f|_{r}+C(D_j)|\rho|_{r+5/2}\|f\|_1. \end{gather}
\noindent{\em Step 2. Smoothing for interior regularity.} To obtain the interior regularity, we will use regularity in Sobolev spaces. We need to avoid the loss in H\"older exponent from the Sobolev embedding. To this end, we will again use a partition of unity to overcome the loss. We can make $f_m$ to be $C^\infty$ on any relatively compact subdomain $U'$ of $U$ via local solutions as follows. Fix $x_0\in D$. We solve $\overline\partial u=f_m$ on an open set $ D$ containing $x_0$. Let $\chi$ be a smooth function with compact support in $\omega$ such that $\chi=1$ on a neighborhood $\omega'$ of $x_0$. Then $\tilde f=f_m-\overline\partial(\chi u)=(1-\chi)f_m+\overline\partial\chi\wedge u$ is still in $\Lambda^r$, while $\tilde f=0$ on $\omega'$. In particular, $\tilde f\in C^\infty(\omega')$. Repeating this finitely many times, we can find $\tilde u\in\Lambda^{r+1}( D)$ with compact support in $ D$ such that \eq{tffm} \tilde f=f_m-\overline\partial \tilde u\in C^\infty( D'). \end{equation}
We can also obtain $$
|\tilde f|_{\Lambda^{r'}( D')}\leq C_{r}|f_m|_{\Lambda^r}, \quad |\tilde u|_{\Lambda^{r+1}( D')}\leq C_{r'}|f_m|_{\Lambda^r} $$
for any relatively compact subset $ D'$ of $ D$ and any $r'>r$. We may assume that $ D'$ is a smooth domain satisfying the condition $a_q$. Furthermore, the defining function $\rho'$ of $D'$ satisfies $\|\rho'\|_a\leq C_a\|\rho\|_2$. Rename $ D', \rho'$ as $ D,\rho$.
\noindent{\em Step 3. Interior regularity with estimates.} Let $\varphi=e^{\tau\rho}$, where $\rho=-\operatorname{dist}_{\partial\Omega}$. Let $L_{p,q}( \Omega,V,\varphi)$ be the space of $V$-valued $(p,q)$ forms $f$ on $ \Omega$ such that $$
\|f\|_{\varphi}^2:=\int_ \Omega |f(x)|^2e^{-\varphi(x)}\, d\upsilon(x)<\infty, $$
where $d\upsilon$ is a volume form on $X$. Write $\|f\|_\varphi$ as $\|f\|$ when $\varphi=0$.
As mentioned early, we want to apply ~\cite{MR0179443}*{Thm.~3.4.6}, and in fact \rt{3.4.6} for the vector bundle version to $ \Omega=D_{c_*}. $
Then $\varphi$ satisfies condition $a_q$ on $\Omega_{c_*}\setminus\Omega_{-c*}$. Let $\varphi_k=\chi_k(\varphi)$. Fix $k$ to be sufficiently large and let $\tilde\varphi=\varphi_k$.
We now consider $T_{q}=\overline\partial$ as densely defined from $ L_{(0,q-1)}^2(\Omega,\tilde\varphi)$ into $ L_{(0,q)}^2( \Omega,\tilde\varphi)$ and $T_{q}^*$ its adjoint. Let $f_m$ be the $(0,q)$ form derived in Step 1. By \rt{3.4.6}, we find a solution $u_0$ satisfying
$
\overline\partial u_0=f_m
$ on $\Omega$ and
$$
\|u_0\|_{\varphi_{k_*}}\leq C_*\|f^m\|_{\varphi_{k_*}}.
$$
For the estimate, we need Kohn's canonical solutions. By ~\cite{MR0179443}*{Thm.~1.1.1}, $R_{T_{q}^*}$ is also closed and $R_{T_q^*}=N_{T_q}^\perp$. We now apply the decomposition \begin{gather} u_0=u+h, \quad u\in N_{T_q}^\perp, \quad h\in N_{T_q}.
\end{gather}
Thus, $u=(T^\varphi_q)^*v$ and
\eq{uvark}
\|u\|_{\varphi_{k_*}}\leq\|u_0\|_{\varphi_{k_*}}\leq C_*\|f_m\|_{\varphi_{k_*}}.
\end{equation}
In particular, in the sense of distributions, we have
$
\vartheta_q^\varphi v=u$ on $D$. Here $\vartheta_q^\varphi$ is the formal adjoint (acting on test forms) of $\overline\partial_q$ in the $L^2$ spaces with weight $\varphi$. Since $\vartheta^\varphi_{q-1}\vartheta^\varphi_q=0$, we get in the sense of distributions
\eq{keyid}
\theta^\varphi_{q-1}u=0.
\end{equation} We now use the system of elliptic equations for $u$ to derive the \emph{interior} estimates, using \re{uvark}.
Let $\|\cdot\|:=\|\cdot \|_{\Omega;k,p}$ denote the norm for space $W^{k,p}(\Omega)$. In local holomorphic coordinates $z$, we have for $u=\sum u_{J}d\overline z^J$ \begin{gather} \overline\partial u=\sum\f{\partial u_J}{\partial \overline z_k}d\overline z_k\wedge d\overline z^J,\quad \vartheta^\varphi_{q-1} u=-\sum \Bigl(\f{\partial u_{jK}}{\partial z_j}+c_{jJK}(\partial\varphi)u_J\Bigr)\, d\overline z^K. \end{gather} Write $\vartheta^\varphi$ as $\vartheta$ when $\varphi=0$. Let $\chi$ be a smooth function with support in a ball $B_R$ of radius $R$ centered at a point in $x_0\in\Omega$. Let $\tilde u=\chi u$. By~\cite{MR1045639}*{Lemma 4.2.3, p.~86}, we obtain $$
\|\overline\partial\tilde u\|_{0,2}+\|\vartheta \tilde u\|_{0,2}\leq C(\|f\|_{0,2}+\|u\|_{0,2})\leq 2C'C_*\|f\|_{0,2}. $$ This shows that for any relatively compact subset $\Omega'$ of $\Omega$, we have $$
\|u\|_{\Omega';{1,2}}\leq C(\Omega',\Omega)C_*\|f\|_{0,2}. $$
Recall the Sobolev compact embedding of $L^{j}_q$ in $L^{j+1}_p$ for $q=\f{p}{1-\f{p}{2n}}$ when $1\leq p<2n$. For the following, we take $p_0=2$ and fix any $2<q<\infty$. We then fix $p_1,\dots, p_{n^*}$ such that $p_{n^*-1}<2n$, $p_{j+1}\leq \f{p_j}{1-\f{p}{2n}}$ and $p_{n^*}>q$.
Let $\Box_{\tilde\varphi}= \overline\partial_{q-1}\vartheta^\varphi_{q-1}+\vartheta_q^\varphi\overline\partial_{q}$. Here $\Delta_g=-\sum g^{jk}\f{\partial^2}{\partial z^j\partial \overline z^k}$ has smooth coefficients and it independent of $\varphi$. The principal part $\Delta_g$ of $\Box^\varphi$ is diagonal and elliptic, where $g$ is the smooth hermitian metric $M$ (see~~\cite{MR2109686}*{pp. 154, 160}). In the sense of distribution, we have \begin{gather}{} \Delta_gu= b(\partial\tilde\varphi)\partial f+c_1(\partial\tilde\varphi)\partial u+ c_0(\partial^2\tilde\varphi)u. \end{gather} Let $\chi$ be a smooth function with support in $B_R$ of radius $R$ centered at a point in $x_0\in\Omega$. Without loss of generality, we may assume $x_0=0$.
Let $\tilde u=\chi u$. Then as a weak solution, $\tilde u$ satisfies \begin{gather}\label{Dgtu} \Delta_g\tilde u= \tilde f, \quad \tilde f:=\chi f+\chi b(\partial\tilde\varphi)\partial f+(\chi c_1(\partial\tilde\varphi)+\chi_1)\partial u+ (\chi c_0(\partial^2\tilde\varphi)+\chi_0)u. \end{gather} Here we recall two interior estimates on systems of elliptic equations from Morrey~~\cite{MR0202511}*{Thm 6.4.4., p. 246}: \begin{gather}{}\label{sobe}
\|\tilde u\|_{k+2,p}\leq C \|\Delta_g\tilde u\|_{k,q}+C_R\|\tilde u\|_{0,1},\quad 1<p<\infty;\\
\|\tilde u\|_{k+2+\alpha}\leq C\|\Delta_g\tilde u\|_{k+\alpha}+C_R\|\tilde u\|_{Lip}, \quad \operatorname{supp}\tilde u\subset B_R \label{holde} \end{gather} provided the right-hand sides are finite.
By Sobolev inequality, $\|\tilde u\|_{1,p_1}\leq C_1\|\tilde u\|_{2,2} \leq C_2\|f\|_{1,2}\leq C_3\|f\|_{1,p_1}$. Repeating this bootstrapping argument, we can show that for any $q<\infty$, we have $\|\tilde u\|_{\Omega';1,q}\leq \|f\|_{1,q}$. By \re{sobe}, we get $\|\tilde u\|_{\Omega';2,q}\leq C \|f\|_{1,q}$.
Recall Sobolev inequality $C^{k,\alpha}\subset L^{k+1, q}$ for $\alpha=1-\f{2n}{q}>0$. Thus we have \eq{est1}
\|\tilde u\|_{\Omega';1+\alpha}\leq C(\Omega',\Omega)\|f\|_{\Omega;1} \end{equation} for any $\alpha<1$. Using \re{holde}, we get \begin{gather}{}
\|\tilde u\|_{\Omega';k+2+\alpha}\leq CC_*(\|\tilde f\|_{\Omega';k+\alpha}+|f|_{\Omega;0,2}). \end{gather}
Next, we prove by induction that \eq{uk1}
\|u\|_{\Omega';k+1+\alpha}\leq CC_*(\|\varphi\|_{k+1+\alpha}\|f\|_0+\|f\|_{k+\alpha}). \end{equation} By \re{est1}, the above holds for $k=1$. Suppose the above hold and we want to verify it when $k$ is replaced by $k+1$. We have for $\tilde f$ in \re{Dgtu} \begin{align*}{}
\|\tilde f\|_{\Omega';k+\alpha}&\leq C\|f\|_{\Omega';k+1+\alpha}
+CC_*\|\varphi\|_{\Omega';k+2+\alpha}(\|f\|_{\Omega';0}+\|u\|_{\Omega';0} ) +C\|u\|_{\Omega';k+1+\alpha}. \end{align*}
By \re{uk1}, we get $\|\tilde f\|_{\Omega';k+\alpha}\leq C\|f\|_{\Omega';k+1+\alpha}
+C\|\varphi\|_{\Omega';k+2+\alpha}\|f\|_{\Omega';0}$. Then \re{uk1} yields
$$\|\tilde u\|_{\Omega';k+2+\alpha}\leq CC_*\|\varphi\|_{k+2+\alpha}\|f\|_0+C\|f\|_{k+1+\alpha}. $$ This gives us \re{uk1} with $k,\Omega'$ being replaced by $k+1$ and any open set $\Omega''$ on which $\chi=1$.
\end{proof}
We conclude this section with an isomorphism theorem on cohomology groups with bounds. Let $Z^r_{(0,q)}(\Omega,V)$ be the space of $\overline\partial$-closed forms on $\Omega$ of class $\Lambda^r$. Define $B_{(0,q)}^{r,r'}(\Omega,V)=\Lambda_{(0,q)}^r(\Omega,V)\cap\overline\partial\Lambda_{(0,q-1)}^{r'}(\Omega,V)$ and $B_{(0,q)}^{r,loc}(\Omega,V)=\Lambda_{(0,q)}^r(\Omega,V)\cap\overline\partial L^{2}_{loc}(\Omega,V)$.
Define $\overline H^{r,r'}_{(0,q)}(\Omega, V)=Z^{r}_{(0,q)}(\Omega,V)/{B^{r,r'}_{(0,q)}(\Omega,V)}$ and $\overline H^{r,loc}_{(0,q)}(\Omega, V)=Z^{r}_{(0,q)}(\Omega,V)/{B^{r,loc}_{(0,q)}(\Omega,V)}$. \begin{thm}Let $\Omega$ be relatively compact $a_q$ domain in $X$ and let $V$ be a holomorphic vector bundle on $X$. There exists $c>0$ such that the restriction $\overline H_{(0,q)}^{r,r'}(\Omega, V)\to\overline H^{r,loc}_{(0,q)}(\Omega_c, V)$ is an isomorphism for the following cases. \bppp \item $r\in(1,\infty)$, $r'=r+1/2$ and $q=1$. \item $r=r'=\infty$. \item $r\in(1,\infty)$, $r'=r+1/2$ and $\partial \Omega$ is strictly $(n-q)$ convex. \item $\partial \Omega\in \Lambda^{r+5/2}$, $r>1$ and $r'=r+1/2$. \item $\partial D\in \Lambda^s$ with $s\geq7/2$, $r>s+5/2$, and $r'=r+1/{50}$. \end{list} \end{thm} \begin{proof}The injectivity follows from the stability result proved in the appendix and our regularity results including \rt{nash-moser-c12} $(iii)$ below for case $(v)$. The surjectivity can be obtained by the Grauert bumping method which is valid as $r'\geq r$ and the same regularity results. \end{proof} We remark that the isomorphism of the restriction $\overline H_{(0,q)}^{0,1/2}(\Omega, V)\to\overline H^{0,0}_{(0,q)}(\Omega_c, V)$ is proved in \cite{MR986248}*{Thm. 12.14, Thm. 15.12}, provided $\Omega$ is either $(n-q)$ strictly convex with $\partial\Omega\in C^2$ or it is $(q+1)$ concave with $\partial\Omega$ in $C^{5/2}$.
\setcounter{thm}{0}\setcounter{equation}{0}
\section{Proof of \rt{regsol+} via a Nash-Moser method}\label{sec:NM} In this section, we will prove \rt{regsol+} for $q\geq2$ when the domains have negative Levi eigenvalues by using the Nash-Moser smoothing operators. Our approach was inspired by a method of
Dufresnoy~~\cite{MR526786} for the $\overline\partial$-equation on a compact set that can be approximated from outside by strictly pseudoconvex domains of which the Levi eigenvalues are well controlled. It is interested that V. Michel~~\cite{MR1198845} showed that if the number of \emph{non-negative} Levi eigenvalues of $\partial \Omega$ is \emph{exact} $n-q'$ near $z_0\in\partial\Omega$, then $\overline\partial u=f_{0,q}$ has a solution in $C^\infty(U\cap\Omega)$ when $\partial\Omega\in C^2$ for all $q\geq q'$. When $\partial\Omega\in C^4$, there is (a possibly different) solution $u$ in $C^\infty(\overline\Omega\cap U)$. For pseudoconvex domains with $C^2$ boundary in ${\bf C}^n$, the $C^\infty$ regularity of $\overline\partial$ solutions under suitable assumptions on the Levi-form has been proved by Zampieri~\cites{MR1757879, MR1749685} and Baracco-Zampieri~\cites{MR2145559, MR2178735}. The reader is referred to the thesis of Yie~~\cite{MR2693230} for the global regularity of $\overline\partial$ solutions with $\partial D\in C^4$. When $\partial D\in C^\infty$ additionally, the existence of $u$ was proved by Kohn~~\cite{MR344703}. Michel-Shaw~\cite{MR1675218}
obtained smooth regularity of the $\overline\partial$ solutions on annulus domain $D_1\setminus \overline{D_2}$ where $D_1$ is a pseudoconvex domain with piecewise smooth boundary and $D_2$ is the intersection of bounded pseudoconvex domains.
The $H^{s}$ solutions was proved by Harrington~~\cite{MR2491606} for pseudoconvex domains $D$ with
$\partial D\in C^{k-1,1}$, $k>s+1/2, k\geq2$, and $s\geq0$.
As observed in~~\cite{MR3961327}, to the author's best knowledge it remains an open problem if $C^\infty(\overline D)$ solutions $u$ to $\overline\partial u=f$ exist on a bounded pseudoconvex domain $D$ in ${\bf C}^n$ with $C^2$ boundary.
We now state a detailed version of \rt{regsol+}. \begin{thm}\label{nash-moser-c12}Let $q>1$. Let $D$ be a relatively compact domain with $C^s$ boundary in a complex manifold $X$ satisfying the condition $a_q$. Let $V$ be a holomorphic vector bundle on $X$. Suppose $r>s+5/2$. Define $\hat r$ as follows. \bppp \item When $s=2$ and $r>26/7$, set $\hat r>r-{19}/{7}$. \item When $2< s< 7/2$, set $$\hat r=r+\f{1}{2}-\f{r}{r-1}\f{r+5/2-s}{(s-1)r-5/2}.$$ \item When $s\geq 7/2$, set $$\hat r= r+\f{1}{2}-\f{1}{s-1}\cdot\f{1}{1-\f{5}{2(s-1)r}}\geq r+\f{1}{50}. $$ \end{list} For $r'<\hat r$, there exists a linear $\overline\partial$ solution operator $$ H_{q}^{r,r'}\colon \Lambda_{(p,q)}^r(D, V)\cap\overline\partial L^2_{loc}(D,V)\to \Lambda_{(p,q-1)}^{r'}(D, V) $$ satisfying $
|H_q^{r,r'}f|_{r'}\leq C_{r,r'}(D)|f|_r. $
Furthermore, $C_{r,r'}(D)$ is stable under small $C^2$ perturbations of $D$, and $H_{q}^{r,r'}f\in C^\infty(\overline D)$ if $f\in \Lambda_{(p,q)}^\infty(D, V)\cap\overline\partial L^2_{loc}(D,V)$ additionally. \end{thm} \begin{proof} It suffices to prove the theorem when $r,r+1/2,r',r'+1/2$ are not integers, by replacing $r$ by a smaller number and $r'$ by a larger number. This allows us to identity the Zygmund spaces with the H\"older spaces for these orders. This also allows us to use the Taylor theorem and the global estimates for smooth domains.
Let $D$ be a $(q+1)$ concave domain with $C^{2}$ boundary. Suppose that $f\in C^r$. Using Moser smoothing operator $S_t$, we define \GA{\tilde S_tu=S_tE_{D^\epsilon}u. } Since $t_1$ is larger than $\epsilon_1$, in what follows we will identify $f$ with $Ef$ to define $S_{t_1}f$. We have \GA{\label{tildeS}
\|\tilde S_tu-u\|_{D^\epsilon,a}=\|S_tE_{D^\epsilon}u-E_{D^\epsilon}u\|_{D^\epsilon,a}\leq C_{a,b}t^{b-a}\|u\|_{D^\epsilon,b},\\ \label{tildeS+}
|\tilde S_tu|{b}\leq C_{a,b}t^{a-b}\|u\|_{D^\epsilon,a}. }
Assume that $\partial\Omega\in C^{s}$ with $$ s\geq 2, \quad r\in(s+5/2,\infty). $$ As in Yie~~\cite{MR2693230}, we apply the above to the defining function $\rho$ of $D$ by setting $$ \rho_{t_1}=\rho \ast\chi_{t_1}, \quad t_1=c_*\epsilon_1^{1/s}, \quad \tilde D^{\epsilon_1}=\{\rho _{t_1}<-\epsilon_1\}. $$ We now have \begin{gather}
\|\rho_{t_i}-\rho\|_{0}\leq C_s t^s_i\|\rho\|_{s},\label{rho1-1-25} \\
\|\rho_{t_i}-\rho\|_{2}\leq \epsilon_*t_i^{s-2}\|\rho\|_2,\label{rho1-2c-25} \quad t_i<t^*(\partial^s\rho),\\
\|\rho_{t_i}\|_b\leq C_{b,s}t_i^{s-b}\|\rho\|_{s}, \quad b\geq s. \label{rho1-3-25} \end{gather} We also have $t^{s}_1=c^{s}_*\epsilon_1$. When $c_*$ is sufficiently small, we have by \re{rho1-1-25} $$ c\epsilon_1<\operatorname{dist}(D^{\epsilon_1},\partial D)<c'\epsilon_1, \quad c'<1, $$ where $c,c'$ are independent of $c_*$. We emphasize that $t^*(\partial^2\rho)$ in \re{rho1-2c-25} depends on the {\it modulus of continuity} of $\partial^2\rho$. This however does not cause any difficulty for domains satisfying the condition $a_q$, because the Levi eigenvalues do not decay towards the boundary. This is decisive for the Nash-Moser iteration to succeed in our proof for $C^2$ domains.
By \re{rho1-3-25}, we have a solution operator satisfying $$
|H_q\varphi|_{D^{1}\cap D^2_{\rho_4}, r+1/2}\leq C_r(\partial\rho,\partial^2\rho) ( |\varphi|_{D^{12},r}+|\rho^1|_{r+5/2}\|\varphi\|_{1}).
$$ Therefore, we obtain \begin{gather}\label{v1t}
|v_1|_{D^{\epsilon_1},r+1/2}\leq C_j |f_1|_{D^{\epsilon_1},r}+C_rt_1^{-\f{1}{2}-r}\|f_1\|_{D^{\epsilon_1},1}, \quad r\in(1,\infty). \end{gather}
Smoothing with a different parameter $\epsilon$, we define $w_1=S_{\epsilon_1}\tilde v_1$ on ${\bf C}^n$. On $D$ define $$ f_2=f_1-\overline\partial w_1. $$ We iterate this. Set $\epsilon_i=\epsilon_{i-1}^d$ with $d>1$. We also find a solution $u_i$ on $D^{\epsilon_i}$ so that $$ f_i=\overline\partial v_i,\quad \text{on $D^{\epsilon_i}$}. $$ Define $\tilde v_i=E_{D_{\epsilon_i}}v_i$, $w_i=S_{t_i}\tilde v_i$ and $f_{i+1}=f_i-\overline\partial w_i$. Then $u:=\sum w_i$ is the desired solution to $\overline\partial u=f_1$ on $D$, provided $f_j$ tends to zero on $D$ as $j\to\infty$.
We have $$ \overline\partial v_2= f_1-\overline\partial w_1, \quad \text{on $D^{\epsilon_2}$}. $$
On $D^{\epsilon_1}$, $\tilde v_1=E_{D^{\epsilon_1}}v_1=v_1$ and hence $\overline\partial v_1=\overline\partial\tilde v_1$. Therefore $$
f_2= f_1-\overline\partial w_1=\overline\partial(\tilde v_1-w_1)=\overline\partial(\tilde v_1-S_{\epsilon_1}\tilde v_1),\quad \text{on $D^{\epsilon_1}$}. $$ We have
\AL{\|w_1-\tilde v_1\|_{D^{\epsilon_1},a}=\|S_{\epsilon_1}\tilde v_1-\tilde v_1\|_{D^{\epsilon_1},a}\leq C_{b,a}\epsilon_1^{b-a}\|\tilde v_1\|_{b}. } Hence by Taylor's theorem, we get \AL{\label{b2t}
\| f_2\|_{D,1}&\leq C_j\sum_{k=1}^{[b]-1}\epsilon_1^{k-1}\| f_2\|_{D^{\epsilon_1},k}+C_j\epsilon_1^{b-1}\| f_2\|_{D,b}\\
&= C_j\sum_{k=1}^{[b]-1}\epsilon_1^{k-1}\|\overline\partial\tilde v_1-\overline\partial w_1\|_{D^{\epsilon_1},k}+C_b\epsilon_1^{b-1}\| f_2\|_{D,b} \nonumber\\
&\leq C_j\sum_{k=1}^{[b]-1}\epsilon_1^{k-1}\epsilon_1^{b-k-\f{1}{2}}\|v_1\|_{D^{\epsilon_1},b+\f{1}{2}}+C_b\epsilon_1^{b-1}\| f_2\|_{D,[b]}\nonumber\\
&= C'_b \epsilon_1^{b-\f{3}{2}}\|v_1\|_{D^{\epsilon_1},b+\f{1}{2}}+C_b\epsilon_1^{b-1}\| f_2\|_{D,b}.\nonumber }
By analogy of \re{v1t} and \re{b2t}, we have \AL{\label{vii-25}
\|v_{i}\|_{D^{\epsilon_{i}},r+1/2}&\leq C_r \| f_i\|_{D^{\epsilon_i},r}+C_rt_i^{s-\f{5}{2}-r}\| f_i\|_{D^{\epsilon_i},1}, \quad r\in(1,\infty)\\ \label{bi+-25}
\| f_{i+1}\|_{D,1}
&\leq C_r \epsilon_i^{r-\f{3}{2}}\|v_i\|_{D^{\epsilon_i},r+\f{1}{2}}+C_r\epsilon_i^{r-1}\| f_{i+1}\|_{D,r}. }
Thus the $r$-norm is estimated by \AL{\label{m-norm--25}
\| f_{i+1}\|_r\leq \| f_i\|_r+C_r\|\tilde v_i\|_{r+1}\leq \| f_i\|_r+C_r\epsilon_i^{-1/2}\|v_i\|_{r+1/2}. } Therefore, by \re{vii-25}, we obtain \AL{\label{m-norm-25}
\| f_{i+1}\|_r\leq 2 C_r \epsilon_i^{-1/2}( \| f_i\|_{D^{\epsilon_i},r}+ t_i^{s-\f{5}{2}-r}\| f_i\|_{D^{\epsilon_i},1}), \quad r\in(1,\infty)^*. } Here $(1,\infty)^*=(1,\infty)\setminus({\bf N}\cup\f{1}{2}{\bf N})$. We now define \eq{Bi1-25} B_{i+1}=\hat C^2_rt_{i}^{-s/2}B_{i}, \end{equation} where $B_{0}$ is fixed, depending on $r$, so that \AL{
\max\left\{\| f_0\|_r, t_0^{s-5/2-r}\| f_0\|_{1}, \|v_0\|_{r+1/2}\right\}\leq B_{0}. }
By induction, let us show that when $r\in(1,\infty)^*$, \AL{\label{bi-25}
\| f_i\|_r&\leq B_{i},\\ \label{bi0-25}
t_i^{s-5/2-r}\| f_i\|_1&\leq\hat C_r B_i,\\ \label{vi-25}
\|v_i\|_{r+\frac{1}{2}}&\leq \hat C^2_rB_i. } Suppose that the three inequalities hold. We want to verify them when $i$ is replaced by $i+1$. Clearly, $\re{bi-25}_{i+1}$ follows from \re{m-norm-25}, \re{Bi1-25}, \re{bi-25}, and $\hat C_r\geq 4C_r$. By \re{bi+-25}, \re{vi-25} and $\re{bi-25}_{i+1}$, we obtain \AL{
t_{i+1}^{s-5/2-r}\| f_{i+1}\|_1&\leq C_r t_{i+1}^{s-5/2- r}\epsilon_i^{r-\f{3}{2}}\|v_i\|_{r+\f{1}{2}}+C_r
t_{i+1}^{s-5/2- r}\epsilon_i^{r-1}\| f_{i+1}\|_r\\ &\leq C''_rt_{i+1}^{ s-5/2-r}t_i^{sr-\f{3s}{2}}\hat C_r^2B_i+C_rt_{i+1}^{ s-5/2 -r}\epsilon_i^{r-1}B_{i+1} \nonumber\\ \nonumber&= C'_r c_*^{-2r}t_{i+1}^{ s-5/2- r}t_i^{sr-s} B_{i+1}+C_rc_*^{-2r}t_{i+1}^{ s-5/2- r}t_i^{sr-s}B_{i+1}, } which gives us $\re{bi0-25}_{i+1}$, provided $\hat C_r>(C_r'+C_r)c_*^{-2r}$ and \eq{condd-25} -d(r+5/2-s)+sr-s\geq0. \end{equation} The latter is assumed now. Then $\re{vi-25}_{i+1}$ follows from $\re{vii-25}_{i+1}$, $\re{bi-25}_{i+1}$ and $\re{bi0-25}_{i+1}$ and $\hat C_r>2C_r$.
By interpolation, we get $$
| f_i|_{1-\theta+\theta r}\leq C_{r,\theta}| f_i|_1^{1-\theta}| f_i|_r^\theta\leq C_{r,\theta}t_i^{(1-\theta)(r+5/2-s)}B_i. $$ We have $$ t_{i}=t_{1}^{d^{i-1}}, \quad B_i=\hat C_r^{i-1}(t_1\cdots t_{i-1})B_1=\hat C_r^{i-1}t_1^{-\f{d^{i-1}-1}{d-1}}B_1\leq \hat C_r^{i-1}t_i^{-\f{1}{d-1}}B_1. $$
Therefore, $| f_i|_{1-\theta+\theta r}\leq C_r^{i-1}B_1t_i^{\lambda}$ converges rapidly if \re{condd-25} holds and \eq{la1--25} \lambda:=(1-\theta)(r+5/2-s)-\f{1}{d-1}>0. \end{equation} By \re{vii-25}, we have for $\theta r\in(0,\infty)^*$ \begin{align}\label{vii-+-25}
|v_{i}|_{D^{\epsilon_{i}},3/2-\theta+\theta r}&\leq C_r | f_i|_{D^{\epsilon_i},1-\theta+\theta r}+C_rt_i^{s-7/2+\theta-\theta r}| f_i|_{D^{\epsilon_i},1}\\ &\leq B_1C_r\hat C_r^{i-1} t_i^{\lambda}+C_r\hat C_r^it_i^{r-1+\theta-\theta r-\f{1}{d-1}}B_1. \nonumber\end{align}
This shows that $|v_j|_{\f{3}{2}-\theta+\theta r}\leq \hat C_r^{i}t_i^{\lambda_*}$ converges rapidly if \eq{}\label{mula} \lambda_*:= (1-\theta)r_* -\f{1}{d-1}>0, \quad r_*:=r-1+\min\{0, 7/2-s\}. \end{equation}
We want to maximize $\theta r$ or $\theta$. Now, $\lambda_*>0$ implies that
$$
\theta <1 -\f{1}{(d-1)r_*}.
$$ The latter is an increasing function of $d$. Assume
\eq{m1d1-25} 1 -\f{1}{(d-1)r_*}>0.
\end{equation} We now specify the parameters. Note that \re{condd-25} and \re{m1d1-25} are equivalent to \eq{existd*} 1+\f{1}{r_*}<d<d_*,\quad d_*:=\f{s(r-1)}{r+5/2-s}. \end{equation} Under the above restriction on $d$, $\f{3}{2}-\theta+\theta r$ has maximum value \eq{hatrval}
\hat r=r+\f{1}{2}-\f{r}{(d_*-1)r_* }.
\end{equation} Then $|v_i|_{r'}<C^i_{r'}t_i^{\lambda'}$ with $\lambda'>0$ where $\lambda'$ depends on $r'$ and $r'<\hat r$. Consequently the solution $u=\sum S_{t_i}E_{D^{\epsilon_i}}v_i$ to $\overline\partial u=f$ is also in $\Lambda^{r'}(\overline D)$.
We now compute the value of $\hat r$. When $2\leq s<7/2$, we have $r_*=r-1$ and there exists $d$ satisfying \re{existd*}. We have $$ \hat r=r+\f{1}{2}-\f{r}{r-1}\f{r+5/2-s}{(s-1)r-5/2}. $$ For $s=2$ and $r>s+5/2$, we get $\hat r>r-\f{19}{7}.$
When $s\geq7/2$ and $r> s+5/2\geq 6$, we have $r_*=r-s+5/2$. One can check that \re{existd*} is satisfied. Then $$ \hat r= r+\f{1}{2}-\f{1}{s-1}\f{1}{1-\f{5}{2(s-1)r}}\geq r+\f{1}{2}-\f{1}{\f{5}{2}}\f{1}{1-\f{1}{r}}> r+\f{1}{50}. $$
Finally, we show that if $f\in C^\infty$, then there is a solution $u\in C^\infty$ on $\omega\cap\overline D$. For notations, we fix $r,r'$ and rename them as $r_0,r_0'$. Thus we have found solutions $\sum w_i$ with $$
|w_i|_0\leq t_i^{r_0+\f{1}{2}}B_i, \quad i=0,1,\dots. $$ Here we have fixed $d\in(1,2),\theta,t_0$ so that $$ (1-\theta_0)r_0 -\f{1}{d-1}>0, \quad -d(r_0+\f{1}{2})+2r_0\geq0, \quad r_0-\f{1}{d-1}\geq0. $$
For any $r>r_0$ satisfying $r,r+\f{1}{2}\not\in{\bf N}$, we choose $B_0$ which depends on $r$ so that \re{bi-25}-\re{vi-25} hold for $i=0$. We define $B_{i+1}$ by \re{Bi1-25}. For the above choice of $\hat C_m$, the proof shows that \re{bi-25}-\re{vi-25} hold for all $i$ since \re{condd-25} and \re{mula}-\re{m1d1-25} hold for the fixed $\theta,d$ (depending on $r_0,r_0'$) and the $r$. This shows that $|v_i|_{1-\theta+\theta r+1/2}$ converges rapidly. Therefore, $u:=\sum w_i\in\Lambda^{1-\theta+\theta r+1/2}$. We concluded that $u\in C^\infty(\overline D)$. \end{proof}
\appendix
\setcounter{thm}{0}\setcounter{equation}{0} \section{Distance function to $C^2$ boundary}\label{sec:vb}
To relax the boundary condition $\partial\Omega\in C^3$ to $\partial\Omega\in C^2$, we need regularity for the distance functions to the boundary of the domains. The following is proved in Li-Nirenberg~~\cite{MR2305073} for $\partial\Omega\in C^{k,\alpha}$ for $k\geq 2$ and $0<\alpha\leq1$ and stated in Spruck~~\cite{MR2351645} for $C^2$ case and proved in Crasta-Malusa~~\cite{MR2336304} for $C^2$ boundary. We provide a proof here, including a stability property for our purpose. See Gilbarg-Trudinger~~\cite{MR1814364} and Krantz-Park~~\cite{MR614221} when domains are in ${\bf R}^N$. \begin{prop}[Li-Nirenberg~~\cite{MR2305073}, Spruck~~\cite{MR2351645}]\label{LNS} Let $s\in[2,\infty]$. Let $M$ be a smooth Riemannian manifold $M$. Let $\Omega\subset M$ be a bounded domain with $C^s$ (resp. $\Lambda^s$ with $s>2$) boundary. Let $\rho$ be the signed distance function of $\partial\Omega$. There is $\delta=\delta(\Omega)>0$ so that $\rho\in C^s$ (resp. $\Lambda^s$ with $s>2$) in $B_\delta(\partial\Omega)=\{x\in M\colon\operatorname{dist}(x,\partial\Omega)<\delta\}$. Furthermore, $\delta(\Omega)$ is upper-stable under small $C^2$ perturbations of $\partial\Omega$. \end{prop} \begin{proof}
We are given a smooth Riemannian metric on $M$. Let $N$ be a subset of $M$. Let $\gamma_p\colon [0,d]\to M$ be a geodesic connecting $\gamma(0)=p\in M\setminus\overline N$ and $p^*=\gamma(d)\in\overline N$. Suppose that $\gamma$ is normal, i.e. $|\gamma_p'|=1$ and length $|\gamma_p|$ of $\gamma_p$ equals $\operatorname{dist}(p,N)$. Then $$|\gamma_p([t,t'])|=t'-t, \quad \operatorname{dist}(\gamma_p(t),N)=d-t. $$ Thus if $N$ is a $C^1$ hypersurface near $p^*\in M$, then $\gamma_p$ is orthogonal to $N$ at $p^*$.
We recall some facts about geodesic balls; see~~\cite{MR1138207}*{Chap.~3, Sect.~4}:
a) For $p\in M$, there is $0<r(p)\leq\infty$ so that the geodesic ball $B:=B_r(p)$, centered at $p$ with radius $r$, is strictly geodesic convex for $0<r<r(p)$. Specifically, any two points $p_0,p_1\in B$ are connected by a unique shortest normal geodesic in $M$ and the geodesic is contained in $B$. Here the uniqueness is up to a reparameterization $t\to \pm t+c$.
b) $\partial B$ is a smooth compact hypersurface.
Let $K$ be a compact set in $M$. By a), we have $r(K):=\inf_{p\in K}r(p)>0$. We can cover $K$ by finitely many open sets $U_i$ and choose coordinate chart $x_i$ on $U_i$ such that if a normal geodesic $\gamma$ is contained in $K$ then $|(x_i\circ\gamma)^{(j)}(t)|\leq C_j(K)$ wherever $x_i\circ\gamma(t)$ is defined.
This implies that if $p\in N$, and $N\cap B$ is closed in $B$, then any point $q\in B_{r/2}(p)$ is connected to a point $q^*\in N\cap B$ via a geodesic $\gamma_q$ in $B$ with length $\operatorname{dist}(q,N)$. However, $q^*$ may not be unique.
Let us use the above facts for $N=\partial\Omega$. Set $r_0=r(\partial\Omega)$. Let $\nu$ be the unit inward normal of $\partial\Omega$ with respect to the Riemann metric and choose sign so that $\rho<0$ in $\Omega$. Then $\nu$ is $C^{s-1}$ on $\partial\Omega$. Let $\gamma(t,p)$ be the geodesic through $p\in\partial\Omega$ with $\partial_t\gamma(0,p)=\nu(p)$ and $|t|<r_0$. Then $\gamma$ is $C^{s-1}$ on $(-r_0,r_0)\times\partial \Omega$. Since $s\geq2$, $\partial_t\gamma(0,p)$ is non zero, normal to $\partial\Omega$, and $\gamma$ fixes $\{0\}\times\partial\Omega$ pointwise, then the Jacobian of $\gamma$ is non-singular. By the inverse mapping theorem, there exists a unique solution $( \tilde\rho,P)\in{\bf R}\times\partial\Omega$ satisfying \eq{gPx}
\gamma( \tilde\rho(x),P(x))=x, \quad |\tilde\rho(x)|<r_1 \end{equation} for $x\in B_{r_2}(\partial\Omega)$. Here $0<r_2<r_1<r_0$ and $r_1,r_2$ are sufficiently small. Furthermore, $\tilde\rho,P$ are in $C^{s-1}(B_{r_2}(\partial\Omega))$.
We want to show that $\tilde\rho=\rho$ on $B_{r_2}(\partial\Omega)$. Fix $x\in B_{r_2}(\partial\Omega)$ and take $x^*\in\partial\Omega$ with $\operatorname{dist}(x,x^*)=\rho(x)(=\operatorname{dist}(x,\partial\Omega))$. Since $r_2<r(\partial\Omega)$, then $x$ is connected to the center $x^*$ by a geodesic $\tilde\gamma$ in the geodesic ball $B_{r_2}(x^*)$. Since $\operatorname{dist}(x,x^*)=\rho(x)$, then $\tilde\gamma$ is orthogonal to $\partial\Omega$ at $x^*$. Then $\tilde\gamma$ must be contained in the normal geodesic $\gamma(\cdot,x^*)$ with tangent vector $\nu(x^*)$. Next we choose the parametrization of $\tilde \gamma$ so that $\tilde\gamma'(0)=\nu(x^*)$. We get $$ \gamma(\rho( x),x^*)=\tilde \gamma(\rho(x))=x, \quad\tilde\gamma(0)=x^*=\gamma(0,x^*),\quad
|\rho(x)|=\operatorname{dist}(x,x^*)<r_2. $$
By the uniqueness of solution to \re{gPx}, we conclude that $\tilde\rho(x)=\rho(x)$ (and $x^*=P(x)$, $|\tilde\rho(x)|<r_2$).
Next we verify $\rho\in C^s$. Recall that the vector field $X_1(x):=\partial_t\gamma(\rho(x),P(x))$ is $C^{s-1}$ in $x=\gamma(\rho(x),P(x))$. Fix $x_0\in B_{r_2}(\partial\Omega)$. In a small neighborhood $U$ of $x_0$ in $B_{r_2}(\partial\Omega)$, we use Gram-Schmidt orthogonalization to find pointwise linearly independent vector fields $X_2,\dots, X_n$ of class $C^{s-1}$ that are orthogonal to $X_1$. We already know that $\rho=\tilde\rho\in C^{s-1}\subset C^{1}$. This allows us to compute a directional derivative of $\rho$ via any $C^1$ curve that is tangent to the direction. Since $\gamma$ is normal, then $X_1\rho=1$. Let $j>1$ and we want to show that $X_j\rho=0$. At $x\in U\setminus\partial\Omega$, Gauss lemma says that $X_2,\dots, X_n$ are tangent to the smooth geodesic sphere $\partial B_{|\rho(x)|}(P(x))$. We have $|\rho(x)|=\operatorname{dist}(x,\partial\Omega)>0$ and $$
|\rho(q)|=\operatorname{dist}(q,\partial\Omega)\leq \operatorname{dist}(q,P(x))=|\rho(x)|, \quad \forall q\in\partial B_{|\rho(x)|}(P(x)). $$ Hence on this geodesic sphere, the $C^{s-1}$ function $\rho$ attains a local extreme at $x$. This shows that $X_j\rho=0$ on $U\setminus\partial\Omega$. By continuity, $X_j\rho(x)=0$ on $U$. Therefore all $X_i\rho$ are $C^\infty$ functions on $U$. As observed by Spruck~~\cite{MR2305073}, since all $X_i$ are $C^{s-1}$, then $\rho\in C^{s}(U)$. Therefore, $\rho\in C^{s}(B_{r_2}(\partial\Omega))$.
Finally, the stability of $\operatorname{dist}(\cdot, \partial\Omega)$ near $\partial\Omega$ is a consequence of the geodesic equations of the second-order ODE system. We leave the details to the reader. \end{proof}
\setcounter{thm}{0}\setcounter{equation}{0} \section{Stability of $L^2$ solutions on pseudoconvex manifolds with $C^2$ boundary satisfying condition $a_q$}\label{sec:vb}
Let $\Omega$ be a relatively compact domain in a complex manifold $X$. Let $V$ be a holomorphic vector bundle on $X$. Let $f$ be a $V$-valued $(0,q)$ form on $\Omega$. Suppose that $\overline\partial u=f$ can be solved on $\Omega$ for some $u\in L^2_{loc}(\Omega)$ and $f=\tilde f+\overline\partial v$ for $v\in L_{loc}^2(\Omega)$ and $\tilde f$ is a $\overline\partial$ closed form on a larger domain $\Omega'$ containing $\overline\Omega$. We want to know if there exists a neighborhood $\tilde\Omega$ of $\overline\Omega$, that is independent of $f$
such that $\tilde f=\overline\partial \tilde u$ for some $\tilde u\in L^2_{loc}(\tilde\Omega)$. If such a domain $\tilde \Omega$ exists, we say the solvability of the $\overline\partial$-equation on $\Omega$ is \emph{stable}. This stability is proved by H\"ormander~~\cite{MR0179443} when $\Omega$ is an $a_q$ domain and $V$ is the trivial bundle and by Andreotti-Vesentini~~\cite{MR0175148}*{Lemma 29, p. 122} for vector bundles on domains that are strictly $(n-q)$ convex with smooth boundary. For completeness, we sketch a proof for the case of vector bundle. We also take this opportunity to relax the boundary condition $\partial\Omega\in C^3$ to the minimum $C^2$ smoothness and we also formulate a stability for the $L^2$ bounds of the $\overline\partial$ equation on $a_q$ domains.
For the reader's convenience, we will give our statements for the vector bundle case the references in~~\cite{MR0179443} for a reader to locate them easily.
We fix smooth hermitian metrics on $X$ and $V$. Cover $\overline \Omega $ by finitely open sets $U_j$ of $X$. We assume that each $U_j$ is biholomorphic to the unit ball in ${\bf C}^n$
by a coordinate map $z_j$ which is biholomorphic on $\overline{U_j}$.
In what follows, we will denote by $U$ one of $U_j'$s or their subdomains.
Let $\{e_1,\dots, e_m\}$ be a smooth unitary basis of $V$ on $U$. Let $u=\sum u^\mu e_\mu\in C^1_{(0,q-1)}(\Omega,V,loc)$. We have \eq{} \overline\partial u=Au +Ru, \quad A(u^\nu e_\nu)=(\overline\partial u^\nu) e_\nu, \end{equation} where $Au^\nu=\overline\partial u^\nu$ is as in~~\cite{MR0179443} for the scalar case, and $Ru$ involves no derivatives of $u$, i.e. $Ru$ is of {\it order zero} in $u$ with smooth coefficients. Therefore, the principal part $A$ of $\overline\partial$
is locally {\it diagonal}.
This is an important property so that the proofs for the scalar case can be carried out to the vector bundles case without difficulty.
Let $\omega_1,\dots, \omega_n$ be unitary smooth $(1,0)$ forms on $X$. For a $V$-valued $(p,q)$-forms $f=\sum f^\nu e_\nu$, define $$
\left|\sum f^\nu e_\nu(x)\right|=\sum_\nu{\sum_{I,J}}^{\prime}|f^\nu_{I,J}(x)|^2, \quad f^\nu= {\sum_{I,J}}^{\prime}f^\nu_{I,J}\omega^I\wedge\overline\omega^J. $$ We take the volume form on $X$ as $$ d\upsilon=
(\sqrt{-1})^n\omega^1\wedge\cdots\wedge \omega^n\wedge\overline\omega^1\wedge\cdots\wedge\overline\omega^n. $$
For $q\geq0$, let $L_{(p,q)}^2(\Omega ,V,loc)$ and $\Lambda^r_{(p,q)}(\Omega ,V)$ be the spaces of $V$-valued $(p,q)$ forms of which the coefficients on $U$ are in $L^2_{loc}(\Omega\cap U)$, $\Lambda^r({ \Omega\cap U})$, respectively. Let $\mathcal D^{(p,q)}(\Omega ,V)$ be the space of smooth $V$-valued $(p,q)$ forms of which the coefficients are in $\mathcal D(\Omega )$, i.e. smooth functions with compact support. Let $\mathcal D'_{(p,q)}(\Omega ,V)$ be the space of $V$-valued $(p,q)$ forms of which the coefficients are distributions in $\Omega $.
If $\varphi$ is a real $L^\infty$ function in $\Omega$, let $L_{(p,q)}^2(\Omega ,V,\varphi)$ be the space of sections of $V$-valued $(p,q)$ forms satisfying $$
\|f\|_{\varphi}^2=\int_\Omega |f(x)|^2e^{-\varphi(x)}\, d\upsilon(x)<\infty. $$
We will write $\jq{\cdot,\cdot}_\varphi$ the induced hermitian product and $\|\cdot\|_\varphi$ the norm on $L^2_{(p,q)}(\Omega ,V, \varphi)$. The latter is independent of $\varphi$ as sets, and the norms are equivalent for all weights $\varphi\in L^\infty$.
Throughout the appendix, we assume $q\geq1$. The operator $\overline\partial$ defines a linear, closed, densely defined operator $$ T\colon L^2_{(p,q-1)}(\Omega, V ,\varphi)\to L_{(p,q)}^2(\Omega ,V,\varphi) $$ while $Tu=f$ holds if $\overline\partial u=f$ in the sense of distributions. We abbreviate $ T=T_{q}, S=T_{q+1}. $ We will write $T_q$ for $T$ if needed. Note that the domain $D_T$ and range $R_T$ are independent of $\varphi\in L^\infty$.
For $f\in L_{0,q}^2(\Omega,V,\varphi)$, write $v=T^* f$ if $ \jq{u,v}_\varphi=\jq{\overline\partial u, f}_\varphi $ for all $u\in D_{T}$.
Throughout the section, we assume that $\partial\Omega\in C^2$. By \rp{LNS}, $\Omega$ has a $C^2$ defining function $\rho$ in $X$ satisfying
\eq{2dr=1}
2|\overline\partial\rho|=1 \quad \text{on $\partial\Omega$}. \end{equation} We also assume that $\varphi\in Lip(\Omega)$. Then $D_{T^*}$ is independent of $\varphi$, while $R_{T^*}$ depends on $\varphi$.
\begin{rem}
As in~~\cite{MR0179443}, $|T^*f|_\psi$ is {\it always} referred to as the dual with respect to $\psi$, where $\psi$ will be chosen appropriately. For clarity, we write $T_\psi^*$ for $T^*$ when $\psi$ needs to be specified. \end{rem}
Using integration by parts, we can verify that if $f\in C^1_{(p,q)}(\overline \Omega,V )$ has compact support in $U\cap\overline \Omega $, then $f\in D_{T^*}$ if and only if \eq{bvc} \sum_{j=1}^nf^\nu_{I,jK}\DD{\rho}{\omega^j}=0, \quad\text{on $U\cap\partial \Omega $}, \quad \nu=1,\dots, m. \end{equation} Define $ \mathcal D^1_{T^*}(\overline\Omega):=C^1(\overline\Omega,V)\cap D_{T^*}.$ We have from~~\cite{MR0179443}*{p. 148} $$ T^* f=B f +R^*f, \quad B f:=(B f^\nu)e_\nu, \quad\text{on $U$} $$ with $$ B f^\nu=-\sum_j{\sum_{J,K}}'\delta_j f^\nu_{I,jK}\omega^I\wedge\overline\omega^K. $$ Thus $R^*$, {\it independent} of $\varphi$, is an operator of the zero-th order with smooth coefficients, and the $B$ is {\it diagonal} and its principle part is also independent of $\varphi$. Thus the boundary condition is principle and zero-th order.
In summary, we have \pr{density}Let $\Omega$ be a relatively compact $C^2$ domain in $X$ and let $\varphi\in Lip(\Omega)$.
Then $D_{T^*}$ is independent of $\varphi$. Let $\psi\in L^\infty(\Omega)$ be a real function. \bppp \item For all $f=\sum f^\nu e_\nu\in C_{(p,q)}^1(U\cap\overline\Omega,V )$, \begin{gather}\label{deco}
\left|\|Sf\|_\psi^2-\|Af\|_\psi^2\right|\leq C(\Omega)\|f\|_\psi^2. \end{gather} \item For all $f=\sum f^\nu e_\nu\in C_{(p,q)}^1(\overline \Omega,V )\cap D_{T^*}$ which have compact support in $U\cap\overline \Omega $,
\begin{gather}\label{deco+} \left|\|T^*f\|_\psi^2- \|B f\|_\psi^2\right|\leq C(\Omega)\|f\|_\psi^2. \end{gather}
\item $\mathcal D^1_{T^*}(\overline\Omega)$ is dense in $D_{T^*}\cap D_{S}$ w.r.t. the graph norm $f\to |f|_\psi+|Sf|_\psi+|T^*f|_\psi$.
\end{list} Furthermore, the constants $C(\Omega)$, independent of $\varphi,\psi$, depend only on the diameter of $\Omega$. \end{prop} Here the last assertion follows from \cite{MR0179443}*{p.~121}.
We also have \begin{prop}[\cite{MR0179443}*{eq.~(3.1.9)}]\label{MKH} Let $\Omega$ be a relatively compact $C^2$ domain in $X$. Let $\rho$ be the signed distance function of $\partial\Omega$. Let $\varphi\in C^{1,1}(\Omega)$. For all $f\in C^1_{p,q}(\overline \Omega,V )$ with compact support in $U\cap\overline\Omega$, we have \begin{align}\label{t4}
\|Af\|_\varphi^2+\|B f\|_\varphi^2&=\sum_{\nu=1}^{m}\|Af^\nu \|_\varphi^2+\|B f^\nu \|_\varphi^2\\ &=\sum_{\nu=1}^{m}(Q_1+Q_2+t_1+t_2+t_3+t_4) (f^\nu,f^\nu)\nonumber\\ &=:(Q_1+Q_2+t_1+t_2+t_3+t_4) (f,f), \nonumber \\
Q_1(f,f)&:=\sum_{I,J}\sum_j\int_{U\cap \Omega }\left|\DD{f_{I,J}}{\overline\omega^j}\right|^2e^{-\varphi}\, d\upsilon,\\ Q_2(f,f)&:= \sum_{I,K}\sum_{k,j}\int_{U\cap \Omega }\varphi_{j\overline k}f_{I,jK}\overline{ f_{I,kK}}e^{-\varphi}\, d\upsilon, \end{align} \begin{align} t_1(f,f)&:=\sum_{I,K}\sum_{k,j}\int_{U\cap\partial \Omega }\left(f_{I,jK}\DD{\rho}{\omega^j}\overline{\delta _kf_{I,kK}}- f_{I,jK}\DD{\rho}{\overline\omega^k}\overline{\DD{f_{I,kK}}{\overline\omega^j}}\right)e^{-\varphi}\, d\sigma,\\ t_2(f,f)&:=\sum_{I,K}\sum_{k,j}\int_{U\cap \Omega }\left(f_{I,jK}\overline\sigma_j\overline{\delta _kf_{I,kK}}- f_{I,jK}\sigma_k\overline{\DD{f_{I,kK}}{\overline\omega^j}}\right)e^{-\varphi}\, d\upsilon,\\ t_3(f,f)&:=\sum_{I,K}\sum_{i,j,k}\int_{U\cap \Omega } f_{I,jK}\overline c_{jk}^i\overline{\delta_if_{I,kK}} \, d\upsilon,\\ t_4(f,f)&:=-\sum_{I,K}\sum_{i,j,k}\int_{U\cap\partial \Omega } f_{I,jK} c_{jk}^i\overline{\delta_if_{I,kK}} \, d\upsilon. \end{align} \end{prop} \begin{proof} The proof for $f\in C^2$ and $\varphi\in C^2$ is in~~\cite{MR0179443}. The case for $f\in C^1$ can be obtained by $C^1$ approximation of $C^2$ forms as in~~\cite{MR0179443}*{p.~101}.
We remark that computation from $C^2$ forms does not require the forms to be in $D_{T^*}$ and this allows us to apply the formulas or estimates to $C^1$ forms that are in the domain of $T^*$. \end{proof}
Let $\varphi$ be a $C^2$ real function defined in a neighborhood of $z_0\in X$. Let $1\leq q<n$. Recall from ~\cite{MR0179443}*{Def.~3.3.4} that $\varphi$ satisfies {\it the condition $a_q$} at $z_0$,
if \eq{dpsi0} \nabla\varphi(z_0)\neq0\end{equation} and the Levi-form $L_{z_0}\varphi$, the restriction of $H_{z_0}\varphi(t):=\sum\DD{^2\varphi}{\omega_j\overline\omega_k}(z_0)t_j\overline t_k$ on $T_{z_0}'\varphi:=\{t\in{\bf C}^n\colon\sum t_j\DD{\varphi}{\omega_j}(z_0)=0\}$, has at least $q+1$ negative or at least $n-q$ positive eigenvalues. Let $\mu_1(z)\leq\mu_2(z)\leq\cdots\leq\mu_{n-1}(z)$ be the eigenvalues of the Levi form $L_{z_0}\varphi$ and let $\lambda_1(z)\leq\lambda_2(z)\leq\cdots\leq\lambda_n(z)$ be the eigenvalue of the hermitian form $H_\zeta\varphi(t):=\sum\DD{^2\varphi}{\omega_j\overline\omega_k}t_j\overline t_k$. Recall that the minimum-maximum principle for the eigenvalues says that $$
\lambda_j(z)=\min_{\dim V=j}\max_{v\in V, |v|=1}\{H_z\varphi (v)\}. $$ Thus $\lambda_1(z)\leq\mu_1(z)\leq\cdots\leq\mu_{n-1}(z)\leq \lambda_n(z)$.
Let $r^-=\max(-r,0)$ for a real $r$. Then at $z_0$, condition $a_q$ is valid if and only if
$$
\mu_1+\cdots+\mu_q+\sum_{j=1}^{n-1}\mu_j^->0.
$$
If $\psi<\psi(z_0)$ is strictly pseudoconvex at $z_0$ then $\psi$ satisfies the $a_q$ condition for $q=0,\dots, n-1$.
Recall from~~\cite{MR0179443}*{Def. ~3.3.2} that $\psi$ satisfies {\it the condition $A_q$} at $z_0$, if \re{dpsi0} holds
and
$$
\lambda_1(z_0)+\cdots+\lambda_q(z_0)+\sum_{j=1}^{n-1}\mu_j^-(z_0)>0.
$$ When needed, we denote the above eigenvalues $\lambda_j,\mu_k$ by $\lambda_j(z_0,\varphi),\mu_k(z_0,\varphi)$. Let us prove the following estimate for \emph{weighted} eigenvalues. \begin{lemma}[\cite{MR0179443}*{Lem.~3.3.3}]\label{3.3.3} Suppose that $\varphi$ satisfies the condition $a_q$ at $\zeta$. Then $e^{\tau\varphi}$ satisfies the condition $A_q$. More specifically, there exists $c(\varphi)>0$ and $\tau_0(\varphi)$ such that for $ \tau>\tau_0$ \eq{newAq}e^{-\tau\varphi(\zeta)}\left \{ \lambda_1(\zeta,e^{\tau\varphi})+\cdots+\lambda_q(\zeta,e^{\tau\varphi})+\sum_{j=1}^{n-1}\mu_j^-(\zeta,e^{\tau\varphi})\right\} >c(\varphi)\tau. \end{equation} Furthermore, $c(\varphi), \tau_0(\varphi)$ are stable under small $C^2$ perturbation of $\varphi$. \end{lemma} \begin{proof}The proof in~\cite{MR0179443} uses a proof-by-contradiction argument. For stability, we need a direct proof. We have $$\lambda_1(z_0)+\cdots+\lambda_q(z_0)+\sum_{j=1}^{n-1}\mu_j^-(z_0)\geq \lambda_1(z_0)+\cdots+\lambda_q(z_0)+\sum_{j=1}^{n-1}\mu_j^-(z_0). $$ For $t$, decompose $t\cdot\f{\partial}{\partial \zeta}=t'+t''$ where $t'$ is in the complex tangent space $T'_\zeta \varphi$ and $t''$ is in its orthogonal complement. We have $$ \tilde H^\tau_\zeta(t):=\tau^{-1} e^{-\tau\varphi}He^{\tau\varphi}_\zeta(t)= H_\zeta\varphi(t)+
\tau|\partial\varphi(\zeta)|^2|t''|^2. $$ Restricted on $T_\zeta'$, the above is still the Levi form $L_\zeta\varphi$ of which the eigenvalues are $\mu_1\leq\cdots\leq\mu_{n-1}$. Let $\lambda_1(\tau),\dots,\lambda_n(\tau)$ be the eigenvalues of the above quadratic form. We still have $\lambda(\tau)\leq\mu_1\leq\cdots\leq\mu_n\leq\lambda_n(\tau)$. For any $\delta>0$, we choose $\tau_0$ so that $$
\tau|\partial\varphi(\zeta)|^2\delta^2>\lambda_{n}(0)+1, \quad \forall\tau>\tau_0. $$ Then $\lambda_1(\tau)\geq H^\tau_\zeta\varphi(t)\geq \mu_1-\epsilon$ when $\delta$ is sufficiently small. Analogously, we get $\lambda_j(\tau)\geq \mu_j-\epsilon$ for $j=1,\dots, n-1$ when $\delta$ is sufficiently small. We can choose $\epsilon$ depending on $ \mu_1+\cdots+\mu_q+\sum_{j=1}^{n-1}\mu_j^-$
and modulus of continuity of $\partial^2\varphi$ to obtain \re{newAq}. \end{proof}
\begin{thm}[\cite{MR0179443}*{Thm.~3.3.1}]\label{3.3.1} Let $\Omega$ be a relatively compact $C^2$ domain in $X$. Let $z_0\in\Omega$. Suppose that $\varphi\in C^{2}(\overline\Omega)$. Then \eq{eq3.3.1}
\tau \|f\|^2_{\tau\varphi}\leq C_\varphi^*\left\{ \|T_{\tau\varphi}^*f\|_{\tau\varphi}^2+\|S f\|^2_{\tau\varphi} + |f|_{\tau\varphi}^2\right\} \end{equation} holds for some neighborhood $U\subset\Omega$ of $z_0$, some $C_\varphi,\tau_\varphi$, and all $\tau>\tau_\varphi$ and all $f\in C_{(p,q)}^1(\overline \Omega,V ) $ with compact support in $U\cap\overline\Omega$, if and only if the hermitian form $\sum\varphi_{jk}(z_0)t_j\overline t_k$ on ${\bf C}^n$ has either at least $q+1$ negative or at least $n-q+1$ positive eigenvalues. Furthermore, we can take $$ C_\varphi^*=\f{C(\Omega)}{\min_{z_0\in \Omega\setminus\Omega_c}(\sum_1^{n-1}\mu^-_j(z_0,\varphi)+\sum_{j=1}^q\mu_j(z_0,\varphi))} $$ where $\mu_1(z_0,\varphi)\leq \cdots\leq \mu_{n-1}(z_0,\varphi)$ are eigenvalues of $L_{z_0}\varphi$ with respect the hermitian metric on $X$, while $U$ depends on the modulus of continuity of $\partial^2 \varphi$. The constants $C_\varphi^*, \tau_\varphi$ are stable under $C^2$ perturbation of $\partial\Omega$. \end{thm} \begin{proof}Take any $g\in C^1_{(p,q)}(\overline\Omega)\cap D_{T^*}$ with compact support in $U\cap\overline\Omega$. Apply \re{eq3.3.1} to $f=ge_1$, which is actually proved in~\cite{MR0179443} for the $g$; see (3.3.4), (3.3.5), (3.3.6) in~~\cite{MR0179443}. By \re{deco} we get \begin{align*}
\tau \|g\|^2_{\tau\varphi}&\leq C_\varphi(\|T^*(ge_1)\|_{\tau\varphi}^2+\|\overline\partial (ge_1)\|^2_{\tau\varphi})\\
&\leq C_\varphi(\|T^*g\|_{\tau\varphi}^2+\|\overline\partial g\|^2_{\tau\varphi}+C_1\|g\|^2_{\tau\varphi}), \end{align*} where $C_\varphi$ depends on the eigenvalues of $\varphi$ and $C_1$ is independent of $\tau$ and $\varphi$. Assume further that $\tau>2C_\varphi C_1$. Then we get \re{eq3.3.1} in which $f,C_\varphi$ are replaced by $g, 2C_0$. Note that the constant $C$ in \re{deco} is independent of $\tau$. By \cite{MR0179443}*{Thm 3.3.1}, we get the eigenvalue condition. Assume that the eigenvalue condition holds. Then \re{eq3.3.1} holds when $f$ is replaced by $f^\nu$ for each $\nu$. By \re{deco} again, we get \re{eq3.3.1} by adjusting $\tau_0$ and $C_0$. \end{proof}
\begin{thm}[\cite{MR0179443}, Thm.~$3.3.5$]\label{3.3.5}Let $\Omega$ be a relatively compact $C^2$ domain in $X$. Let $\varphi$ satisfy condition $A_q$ at $z_0\in\overline\Omega$. If $z_0\in\partial\Omega$ assume further that $\varphi<\varphi|_{\partial\Omega}=\varphi(z_0)$ in $\Omega$. Then there are a neighborhood $U$ of $z_0$ and a constant $C_\varphi^*$ such that for all convex increasing function $C^2$ function $\chi$ in ${\bf R}$ we have \eq{eq3.3.11}
\int\chi'(\varphi)|f|^2e^{-\chi(\varphi)}\, d\upsilon\leq C^*_\varphi(\|T^*f\|_{\chi(\varphi)}^2+
\|\overline\partial f\|_{\chi(\varphi)}^2+\|f\|_{\chi(\varphi)}^2) \end{equation} for all $f\in C_{(p,q+1)}^1(\overline \Omega,V )\cap D_{T^*}$ with compact support in $U\cap\overline\Omega$. \end{thm} \begin{proof}We apply the scalar version of the result as in the proof of \rt{3.3.1}. The proof in \cite{MR0179443} is valid via $C^1$ density. \end{proof}
By partition of unity, the above yields the following.
\begin{prop}[\cite{MR0179443}*{Prop.~3.4.4}]\label{3.4.4} Let $\Omega$ be a relatively compact $C^2$ domain in $X$.
Let $\varphi<0$ in $\Omega$ and vanish in $\partial\Omega$ with $\varphi\in C^2(\overline\Omega)$. Let $\Omega_a=\{z\in\Omega\colon\varphi(z)<a\}$. Suppose that $\varphi$ satisfies condition $A_q$ in $\overline\Omega\setminus\Omega_{-c}$ for some $c>0$. Then there are a compact subset $K$ of $\Omega_{-c}$ and a constant $C_\varphi^*$ such that for all convex increasing function $\chi\in C^2({\bf R})$ \eq{eq3.4.2}
\int_{\Omega\setminus K}\chi'(\varphi)|f|^2e^{-\chi(\varphi)}\, d\upsilon\leq C^*_\varphi(\|T^*f\|_{\chi(\varphi)}^2+
\|S f\|_{\chi(\varphi)}^2+\|f\|_{\chi(\varphi)}^2) \end{equation}
holds for all $f\in C^1_{(p,q)}(\overline\Omega, V)\cap D_{T^*}$. \end{prop}
\begin{thm}[\cite{MR0179443}*{Thm.~3.4.1}]\label{3.4.1} Let $\Omega$ be a relatively compact $C^2$ domain in $X$. Suppose that $\partial\Omega$ satisfies the condition $a_q$. Fix $C^2$ defining function $\rho$ of $\Omega$ such that $\rho$ is the signed distance function to $\partial\Omega$ and fix
$\varphi=e^{\lambda\rho}$ with $\lambda$ sufficiently large. Then there exist compact subset $K$ of $\Omega$ and constant $\tau_\varphi$
such that if $\tau>\tau_\varphi$
and $ f\in D_{S}\cap D_{T^*}\cap L^2_{p,q}(\Omega,V)$ we have \eq{eq3.4.1}
\int_{\Omega\setminus K}|f|^2e^{-\tau\varphi}\leq\|T^*f\|_{\tau\varphi}^2+\|S f\|_{\tau\varphi}^2+\int_K|f|^2e^{-\tau\varphi}\, d\upsilon.
\end{equation} The latter implies that
$R_{T}$ is closed and finite codimensional in $N_{S}$.
\end{thm} \begin{proof}
Here we need to go through the proof of \cite{MR0179443}*{Thm.~3.4.1}. There is a compact set $K$ in $\Omega$ such that \eq{tOms-}
\tau\int_{\Omega\setminus K}|f|^2e^{-\tau\varphi}\, d\upsilon\leq C^*_\varphi(\|T^*f\|^2_{\tau\varphi}+\|Sf\|^2_{\tau\varphi}+\|f\|^2_{\tau\varphi}), \end{equation} where $ C^*_\varphi$ is independent of $\tau$. The above is proved in \cite{MR0179443}*{Thm.~3.4.1} when $V$ is trivial. Thus it also holds for any $V$ by \re{deco} and \re{deco+}.
We get \re{eq3.4.1} for $f\in C^1(\overline\Omega, V)\cap D_{T^*}$ when $\tau>2 C^*_\varphi$. By the density theorem, it holds for $f\in D_{T^*}\cap D_S$. The proof for the other direction in \cite{MR0179443}*{Thm.~3.4.1} is valid without any change.
\end{proof}
So far, all the constants in the estimates are stable under $C^2$ perturbations of the domain $\Omega$ and these constants are explicit to some extent. The next constant is however not explicit since it comes from a proof by contradiction. Nevertheless, it leads no essential difficulty in our applications.
Fix $\gamma>2C_\varphi^*$, where $C_\varphi^*$ is in \re{eq3.4.1}. Let $\chi_k\in C^2$ be an increasing sequence of convex increasing functions such that \eq{chik} \chi_k(\tau)=\gamma\tau,\quad \text{when $\tau<c$}; \qquad \chi'_k(\tau)\to\infty, \quad \text{as $k\to\infty, \ \tau>c$}. \end{equation} Set $\varphi_k=\chi_k(\varphi)$. Note that $\varphi_k\in C^2(\overline\Omega)$. We have the following. \begin{prop}[\cite{MR0179443}*{Prop.~3.4.5}]\label{3.4.5}Let $\Omega, \Omega_{-c}, \varphi$ satisfy the hypotheses in \rpa{3.4.4}. In particular, $\varphi$ satisfies condition $A_q$ in $\overline\Omega\setminus\Omega_{-c}$.
There exist constants $C_*$ and $k_*$, depending on $\varphi,c,\gamma$, and the sequence $\chi_k$ such that for $k>k_*$
\eq{eq3.4.4a} \|f\|^2_{\varphi_k}\leq C_*(\|T^*f\|_{\varphi_k}^2+\|S f\|^2_{\varphi_k}) \end{equation} provided $f\in D_{T^*}\cap D_{S}\cap L_{(p,q)}^2(\Omega,V)$ with $q\geq1$ and \eq{eq3.4.4b} \int_{\Omega_{-c}}\ip{f,g}e^{-\gamma\varphi}\, d\upsilon=0, \quad \forall g\in N_{(p,q)}(\Omega_{-c}, V,\gamma\varphi). \end{equation} Furthermore, $k_*, c, C_*$ are stable under $C^2$ perturbation of $\partial\Omega$ in the sense defined in \rta{3.4.6} $(ii)$ below. \end{prop} \begin{proof}Fix $\gamma>2C^*_\varphi$ for the constant $C^*_\varphi$ in \rea{eq3.4.2}. Define \eq{Npq} N_{(p,q)}(\Omega_{-c}, V,\gamma\varphi):=N_{S_c}\cap N_{T_c^*}, \end{equation} where $T_c^*$ is the adjoint of $T_c=\overline\partial\colon L^2_{(p,q-1)}(\Omega_{-c},V,\gamma\varphi)\to L^2_{(p,q)}(\Omega_{-c},V,\gamma\varphi)$, while $S_c$ is the operator $\overline\partial\colon L^2_{(p,q)}(\Omega_{-c},V,\gamma\varphi) \to L^2_{(p,q+1)}(\Omega_{-c},V,\gamma\varphi)$.
Assume that estimate \re{eq3.4.4a} is false. Then
we can find $f_k\in D_{T^*}\cap D_{S}$ such that \begin{gather}\label{eq3.4.5a}
\|f_k\|_{\varphi_k}=1, \quad \|T^*f_k\|_{\varphi_k}+\| Sf_k\|_{\varphi_k}<1/k,\\ \label{eq4.4.5aa} \int_{\Omega_{-c}}\ip{f_k,g}e^{-\gamma \varphi}\, d\upsilon=0, \quad \forall g\in N_{(p,q)}(\Omega_{-c}, V,\gamma\varphi). \end{gather} By the density theorem, we may assume that $f_k\in C_{(p,q)}^1(\overline \Omega,V )\cap D_{T^*}$, while \re{eq3.4.5a} still holds and \re{eq4.4.5aa} is, however, weakened to \eq{eq3.4.5b}
\left|\int_{\Omega_c}\ip{f_k,g}e^{-\gamma\varphi}\, d\upsilon\right|<1/k, \quad \forall g\in N_{(p,q)}(\Omega_{-c}, V,\gamma\varphi), \quad |g|_{\Omega_{-c},\gamma\varphi}\leq1. \end{equation} (Compare \re{eq3.4.5a} and \re{eq3.4.5b} with \cite{MR0179443}*{eq.~(3.4.5)}.) Here we used $\varphi_k=\gamma\varphi$ on $\Omega_{-c}$ and Cauchy-Schwarz inequality $$
\left|\int_{\Omega_{-c}}\ip{f_k,g}e^{-\gamma\varphi}\, d\upsilon\right|=\left|\int_{\Omega_{-c}}\ip{f_k-f,g}e^{-\gamma\varphi}\, d\upsilon\right| \leq C\left\{\int_{\Omega}|f_k-f|^2e^{-\varphi_k}\, d\upsilon\right\}^{1/2}. $$
The rest of proof in~\cite{MR0179443} was stated for $f_k\in C^2$. However the arguments are valid for $f_k\in C^1$ without any change. We will not repeat here. \end{proof}
\begin{thm}[\cite{MR0179443}*{Thm.~3.4.6}]\label{3.4.6}Let $\Omega$ be a relatively compact $C^2$ domain in $X$. Let $\Omega_{-c}, \varphi, k,\varphi_k$ be as in \rpa{3.4.4}. Let $V$ be a holomorphic vector bundle in $X$. Assume that $\varphi$ satisfies the condition $a_q$ in $\overline \Omega\setminus\Omega_{-c}$. There exist $k_*$ and $C_*$ satisfying the following. \bppp\item If $f\in L^2_{(p,q)}(\Omega,V)$
and the equation
$\overline\partial u_0=f$ has a solution $u_0$ in $L^2(\Omega_{-c},V)$, then
it has a solution $u$ in $L^2_{(p,q-1)}(\Omega,V)$. In other words, the restriction $\overline H_{(p,q)}(\Omega, V)\to\overline H_{(p,q)}(\Omega_{-c}, V)$ is injective. Moreover,
\eq{uCs}
\|u\|_{\varphi_k}\leq C_*\|f\|_{\varphi_k}, \quad k\geq k_*.
\end{equation}
where $C_*, c, k_*$ are the constants in \rea{eq3.4.4a}.
\item Furthermore, $C_*,c, k_*$ are stable under $C^2$ perturbations of $\partial\Omega$ in the following sense: Fix $c_*:=c$. If $\Omega,\tilde\Omega$ have $C^2$ defining functions $\rho,\tilde \rho$ such that $\|\tilde\rho-\rho\|_2<\delta$, then we have
\eq{Omcs}
\Omega_{-c_*}\subset \tilde\Omega_{-c_*/2}\subset\Omega
\end{equation}
for some $c_*>0$. If $\overline\partial u=f$ with $f\in L^2(\Omega)$ admits a solution $u_0\in L^2_{loc}(\tilde\Omega_{-c_*/2})$ then there is a solution $u\in L^2(\Omega_{c_*})$ such that $\overline\partial u=f$ on $\tilde\Omega$.
Furthermore,
$$
|u|_{\Omega,\varphi_k}\leq C_k^*|f|_{\Omega_{c_*},\varphi_k}, \quad k\geq k_*.
$$
\end{list}
Therefore, $C_k^*, c_*, k_*$ are independent of $\tilde\Omega$ and $\tilde\rho$, and $\delta$ depends on $c_*$ and $\varphi$. \end{thm} \begin{proof}The proof is identical to that of \cite{MR0179443}*{Thm.~3.4.6}, using \rp{3.4.5}. \end{proof} We should mention that there is a detailed study in Lieb-Michel~~\cite{MR1900133}*{Chapt.~VIII, Sect.~8} on the stability of estimates for the $\overline\partial$-Neumann operator on $\Omega_c$, when $\Omega$ is a strictly pseudoconvex manifold with smooth boundary. In our case, we must treat a slightly more general situation where $\tilde \Omega$ can be any $C^2$ perturbations of $\Omega$. We emphasize that we do not know if $C_k^*, k_*$ are stable under $C^2$ perturbations in the sense of \re{defupst}. Nevertheless, using Grauert's bumping method for $a_q$ domains, \rt{3.4.6} suffices our purposes.
\newcommand{\doi}[1]{\href{http://dx.doi.org/#1}{doi:#1}} \newcommand{\arxiv}[1]{\href{https://arxiv.org/pdf/#1}{arXiv:#1}}
\def\MR#1{\relax\ifhmode\unskip\spacefactor3000 \space\fi
\href{http://www.ams.org/mathscinet-getitem?mr=#1}{MR#1}}
\nocite{}
\begin{bibdiv} \begin{biblist}
\bib{MR0175148}{article}{
author={Andreotti, A.},
author={Vesentini, E.},
title={Carleman estimates for the {L}aplace-{B}eltrami equation on
complex manifolds},
date={1965},
ISSN={0073-8301},
journal={Inst. Hautes \'Etudes Sci. Publ. Math.},
number={25},
pages={81\ndash 130},
url={http://www.numdam.org/item?id=PMIHES_1965__25__81_0},
review={\MR{0175148}}, }
\bib{MR2768550}{book}{
author={Bahouri, H.},
author={Chemin, J.-Y.},
author={Danchin, R.},
title={Fourier analysis and nonlinear partial differential equations},
series={Grundlehren der mathematischen Wissenschaften [Fundamental
Principles of Mathematical Sciences]},
publisher={Springer, Heidelberg},
date={2011},
volume={343},
ISBN={978-3-642-16829-1},
url={https://doi-org.ezproxy.library.wisc.edu/10.1007/978-3-642-16830-7},
review={\MR{2768550}}, }
\bib{MR2145559}{article}{
author={Baracco, L.},
author={Zampieri, G.},
title={Regularity at the boundary for {$\overline\partial$} on
{$Q$}-pseudoconvex domains},
date={2005},
ISSN={0021-7670},
journal={J. Anal. Math.},
volume={95},
pages={45\ndash 61},
url={https://doi-org.ezproxy.library.wisc.edu/10.1007/BF02791496},
review={\MR{2145559}}, }
\bib{MR2178735}{article}{
author={Baracco, L.},
author={Zampieri, G.},
title={Boundary regularity for {$\overline\partial$} on
{$q$}-pseudoconvex wedges of {$\Bbb C^N$}},
date={2006},
ISSN={0022-247X},
journal={J. Math. Anal. Appl.},
volume={313},
number={1},
pages={262\ndash 272},
url={https://doi.org/10.1016/j.jmaa.2005.03.091},
review={\MR{2178735}}, }
\bib{MR1888228}{article}{
author={Barkatou, M.-Y.},
title={{$\scr C^k$} estimates for {$\overline\partial$} on {$q$}-convex
wedges},
date={2002},
ISSN={0025-5874},
journal={Math. Z.},
volume={239},
number={2},
pages={335\ndash 352},
url={https://doi-org.ezproxy.library.wisc.edu/10.1007/s002090100299},
review={\MR{1888228}}, }
\bib{MR2844676}{article}{
author={Barkatou, M.-Y.},
author={Khidr, S.},
title={Global solution with {$\scr C^k$}-estimates for
{$\overline\partial$}-equation on {$q$}-convex intersections},
date={2011},
ISSN={0025-584X},
journal={Math. Nachr.},
volume={284},
number={16},
pages={2024\ndash 2031},
url={https://doi-org.ezproxy.library.wisc.edu/10.1002/mana.200910063},
review={\MR{2844676}}, }
\bib{MR886418}{article}{
author={Beals, R.},
author={Greiner, P.~C.},
author={Stanton, N.~K.},
title={{$L^p$} and {L}ipschitz estimates for the
{$\overline\partial$}-equation and the {$\overline\partial$}-{N}eumann
problem},
date={1987},
ISSN={0025-5831},
journal={Math. Ann.},
volume={277},
number={2},
pages={185\ndash 196},
url={https://doi-org.ezproxy.library.wisc.edu/10.1007/BF01457358},
review={\MR{886418}}, }
\bib{MR1800297}{book}{
author={Chen, S.-C.},
author={Shaw, M.-C.},
title={Partial differential equations in several complex variables},
series={AMS/IP Studies in Advanced Mathematics},
publisher={American Mathematical Society, Providence, RI; International
Press, Boston, MA},
date={2001},
volume={19},
ISBN={0-8218-1062-6},
review={\MR{1800297}}, }
\bib{MR2336304}{article}{
author={Crasta, G.},
author={Malusa, A.},
title={The distance function from the boundary in a {M}inkowski space},
date={2007},
ISSN={0002-9947},
journal={Trans. Amer. Math. Soc.},
volume={359},
number={12},
pages={5725\ndash 5759},
url={https://doi-org.ezproxy.library.wisc.edu/10.1090/S0002-9947-07-04260-2},
review={\MR{2336304}}, }
\bib{MR1138207}{book}{
author={do~Carmo, M.~P.},
title={Riemannian geometry},
series={Mathematics: Theory \& Applications},
publisher={Birkh\"{a}user Boston, Inc., Boston, MA},
date={1992},
ISBN={0-8176-3490-8},
url={https://doi-org.ezproxy.library.wisc.edu/10.1007/978-1-4757-2201-7},
note={Translated from the second Portuguese edition by Francis
Flaherty},
review={\MR{1138207}}, }
\bib{MR526786}{article}{
author={Dufresnoy, A.},
title={Sur l'op\'{e}rateur {$d^{\prime\prime}$} et les fonctions
diff\'{e}rentiables au sens de {W}hitney},
date={1979},
ISSN={0373-0956},
journal={Ann. Inst. Fourier (Grenoble)},
volume={29},
number={1},
pages={xvi, 229\ndash 238},
url={http://www.numdam.org/item?id=AIF_1979__29_1_229_0},
review={\MR{526786}}, }
\bib{MR2597943}{book}{
author={Evans, L.~C.},
title={Partial differential equations},
edition={Second},
series={Graduate Studies in Mathematics},
publisher={American Mathematical Society, Providence, RI},
date={2010},
volume={19},
ISBN={978-0-8218-4974-3},
url={https://doi-org.ezproxy.library.wisc.edu/10.1090/gsm/019},
review={\MR{2597943}}, }
\bib{GG}{article}{
author={Gan, C.},
author={Gong, X.},
title={Global newlander-nirenberg theorem for domains with {$C^2$}
boundary},
date={2020},
journal={submitted},
pages={41 pages},
note={\url{https://arxiv.org/abs/2005.07679}}, }
\bib{MR1814364}{book}{
author={Gilbarg, D.},
author={Trudinger, N.S.},
title={Elliptic partial differential equations of second order},
series={Classics in Mathematics},
publisher={Springer-Verlag, Berlin},
date={2001},
ISBN={3-540-41160-7},
note={Reprint of the 1998 edition},
review={\MR{1814364}}, }
\bib{MR3961327}{article}{
author={Gong, X.},
title={H\"{o}lder estimates for homotopy operators on strictly
pseudoconvex domains with {$C^2$} boundary},
date={2019},
ISSN={0025-5831},
journal={Math. Ann.},
volume={374},
number={1-2},
pages={841\ndash 880},
note={\doi{10.1007/s00208-018-1693-9}},
review={\MR{3961327}}, }
\bib{MR3848426}{article}{
author={Gong, X.},
author={Kim, K.-T.},
title={The {$\overline{\partial}$}-equation on variable strictly
pseudoconvex domains},
date={2018},
ISSN={0025-5874},
journal={Math. Z.},
volume={290},
number={1-2},
pages={111\ndash 144},
url={https://doi-org.ezproxy.library.wisc.edu/10.1007/s00209-017-2011-z},
note={\doi{10.1007/s00209-017-2011-z}},
review={\MR{3848426}}, }
\bib{MR4289246}{article}{
author={Gong, X.},
author={Lanzani, L.},
title={Regularity of a {$\overline\partial$}-solution operator for
strongly {$\bf C$}-linearly convex domains with minimal smoothness},
date={2021},
ISSN={1050-6926},
journal={J. Geom. Anal.},
volume={31},
number={7},
pages={6796\ndash 6818},
url={https://doi-org.ezproxy.library.wisc.edu/10.1007/s12220-020-00443-w},
review={\MR{4289246}}, }
\bib{MR2829316}{article}{
author={Gong, X.},
author={Webster, S.~M.},
title={Regularity for the {CR} vector bundle problem {II}},
date={2011},
ISSN={0391-173X},
journal={Ann. Sc. Norm. Super. Pisa Cl. Sci. (5)},
volume={10},
number={1},
pages={129\ndash 191},
review={\MR{2829316}}, }
\bib{MR273057}{article}{
author={Grauert, Hans},
author={Lieb, I.},
title={Das {R}amirezsche {I}ntegral und die {L}\"{o}sung der {G}leichung
{$\bar \partial f=\alpha $} im {B}ereich der beschr\"{a}nkten {F}ormen},
date={1970},
ISSN={0035-4996},
journal={Rice Univ. Stud.},
volume={56},
number={2},
pages={29\ndash 50 (1971)},
review={\MR{273057}}, }
\bib{MR644667}{article}{
author={Greene, R.~E.},
author={Krantz, S.~G.},
title={Deformation of complex structures, estimates for the {$\bar
\partial $}\ equation, and stability of the {B}ergman kernel},
date={1982},
ISSN={0001-8708},
journal={Adv. in Math.},
volume={43},
number={1},
pages={1\ndash 86},
url={https://doi-org.ezproxy.library.wisc.edu/10.1016/0001-8708(82)90028-7},
review={\MR{644667}}, }
\bib{MR0499319}{book}{
author={Greiner, P.~C.},
author={Stein, E.~M.},
title={Estimates for the {$\overline \partial $}-{N}eumann problem},
series={Mathematical Notes, No. 19},
publisher={Princeton University Press, Princeton, N.J.},
date={1977},
ISBN={0-691-08013-5},
review={\MR{0499319}}, }
\bib{MR2491606}{article}{
author={Harrington, P.~S.},
title={Sobolev estimates for the {C}auchy-{R}iemann complex on {$C^1$}
pseudoconvex domains},
date={2009},
ISSN={0025-5874},
journal={Math. Z.},
volume={262},
number={1},
pages={199\ndash 217},
url={https://doi.org/10.1007/s00209-008-0369-7},
review={\MR{2491606}}, }
\bib{MR986248}{book}{
author={Henkin, G.~M.},
author={Leiterer, J.},
title={Andreotti-{G}rauert theory by integral formulas},
series={Progress in Mathematics},
publisher={Birkh\"{a}user Boston, Inc., Boston, MA},
date={1988},
volume={74},
ISBN={0-8176-3413-4},
url={https://doi-org.ezproxy.library.wisc.edu/10.1007/978-1-4899-6724-4},
review={\MR{986248}}, }
\bib{MR0293121}{article}{
author={Henkin, G.~M.},
author={Romanov, A.~V.},
title={Exact {H}\"{o}lder estimates of the solutions of the {$\bar
\delta $}-equation},
date={1971},
ISSN={0373-2436},
journal={Izv. Akad. Nauk SSSR Ser. Mat.},
volume={35},
pages={1171\ndash 1183},
review={\MR{0293121}}, }
\bib{MR0161012}{book}{
author={H\"{o}rmander, L.},
title={Linear partial differential operators},
series={Die Grundlehren der mathematischen Wissenschaften, Band 116},
publisher={Academic Press, Inc., Publishers, New York; Springer-Verlag,
Berlin-G\"{o}ttingen-Heidelberg},
date={1963},
review={\MR{0161012}}, }
\bib{MR0179443}{article}{
author={H\"ormander, L.},
title={{$L^{2}$} estimates and existence theorems for the {$\bar
\partial $}\ operator},
date={1965},
ISSN={0001-5962},
journal={Acta Math.},
volume={113},
pages={89\ndash 152},
url={https://doi.org/10.1007/BF02391775},
review={\MR{0179443}}, }
\bib{MR1045639}{book}{
author={H\"ormander, L.},
title={An introduction to complex analysis in several variables},
edition={Third},
series={North-Holland Mathematical Library},
publisher={North-Holland Publishing Co., Amsterdam},
date={1990},
volume={7},
ISBN={0-444-88446-7},
review={\MR{1045639}}, }
\bib{MR422688}{article}{
author={Hortmann, M.},
title={\"{U}ber die {L}\"{o}sbarkeit der {$\bar \partial $}-{G}leichung
auf {R}inggebieten mit {H}ilfe von {$L^{p}$}-, {${\mathcal C}^{k}$}- und
{${\mathcal D}$}-stetigen {I}ntegraloperatoren},
date={1976},
ISSN={0025-5831},
journal={Math. Ann.},
volume={223},
number={2},
pages={139\ndash 156},
url={https://doi-org.ezproxy.library.wisc.edu/10.1007/BF01360878},
review={\MR{422688}}, }
\bib{MR627759}{inproceedings}{
author={Hortmann, M.},
title={Globale holomorphe {K}erne zur {L}\"{o}sung der
{C}auchy-{R}iemannschen {D}ifferentialgleichungen},
date={1981},
booktitle={Recent developments in several complex variables ({P}roc.
{C}onf., {P}rinceton {U}niv., {P}rinceton, {N}. {J}., 1979)},
series={Ann. of Math. Stud.},
volume={100},
publisher={Princeton Univ. Press, Princeton, N.J.},
pages={199\ndash 226},
review={\MR{627759}}, }
\bib{MR0487423}{article}{
author={Johnen, H.},
author={Scherer, K.},
title={On the equivalence of the {$K$}-functional and moduli of
continuity and some applications},
date={1977},
pages={119\ndash 140. Lecture Notes in Math., Vol. 571},
review={\MR{0487423}}, }
\bib{MR0281944}{article}{
author={Kerzman, N.},
title={H\"older and {$L^{p}$} estimates for solutions of {$\bar \partial
u=f$} in strongly pseudoconvex domains},
date={1971},
ISSN={0010-3640},
journal={Comm. Pure Appl. Math.},
volume={24},
pages={301\ndash 379},
url={https://doi-org.ezproxy.library.wisc.edu/10.1002/cpa.3160240303},
review={\MR{0281944}}, }
\bib{MR2109686}{book}{
author={Kodaira, K.},
title={Complex manifolds and deformation of complex structures},
edition={English},
series={Classics in Mathematics},
publisher={Springer-Verlag, Berlin},
date={2005},
ISBN={3-540-22614-1},
url={https://doi-org.ezproxy.library.wisc.edu/10.1007/b138372},
note={Translated from the 1981 Japanese original by Kazuo Akao},
review={\MR{2109686}}, }
\bib{MR0153030}{article}{
author={Kohn, J.~J.},
title={Harmonic integrals on strongly pseudo-convex manifolds. {I}},
date={1963},
ISSN={0003-486X},
journal={Ann. of Math. (2)},
volume={78},
pages={112\ndash 148},
url={https://doi-org.ezproxy.library.wisc.edu/10.2307/1970506},
review={\MR{0153030}}, }
\bib{MR0208200}{article}{
author={Kohn, J.~J.},
title={Harmonic integrals on strongly pseudo-convex manifolds. {II}},
date={1964},
ISSN={0003-486X},
journal={Ann. of Math. (2)},
volume={79},
pages={450\ndash 472},
url={https://doi-org.ezproxy.library.wisc.edu/10.2307/1970404},
review={\MR{0208200}}, }
\bib{MR344703}{article}{
author={Kohn, J.~J.},
title={Global regularity for {$\bar \partial $} on weakly pseudo-convex
manifolds},
date={1973},
ISSN={0002-9947},
journal={Trans. Amer. Math. Soc.},
volume={181},
pages={273\ndash 292},
url={https://doi-org.ezproxy.library.wisc.edu/10.2307/1996633},
review={\MR{344703}}, }
\bib{MR614221}{article}{
author={Krantz, S.~G.},
author={Parks, H.~R.},
title={Distance to {$C^{k}$} hypersurfaces},
date={1981},
ISSN={0022-0396},
journal={J. Differential Equations},
volume={40},
number={1},
pages={116\ndash 120},
url={https://doi-org.ezproxy.library.wisc.edu/10.1016/0022-0396(81)90013-9},
review={\MR{614221}}, }
\bib{MR1207871}{incollection}{
author={Laurent-Thi\'{e}baut, C.},
author={Leiterer, J.},
title={The {A}ndreotti-{V}esentini separation theorem with {$C^k$}
estimates and extension of {CR}-forms},
date={1993},
booktitle={Several complex variables ({S}tockholm, 1987/1988)},
series={Math. Notes},
volume={38},
publisher={Princeton Univ. Press, Princeton, NJ},
pages={416\ndash 439},
review={\MR{1207871}}, }
\bib{MR2305073}{article}{
author={Li, Y.},
author={Nirenberg, L.},
title={Regularity of the distance function to the boundary},
date={2005},
ISSN={0392-4106},
journal={Rend. Accad. Naz. Sci. XL Mem. Mat. Appl. (5)},
volume={29},
pages={257\ndash 264},
review={\MR{2305073}}, }
\bib{MR1900133}{book}{
author={Lieb, I.},
author={Michel, J.},
title={The {C}auchy-{R}iemann complex},
series={Aspects of Mathematics, E34},
publisher={Friedr. Vieweg \& Sohn, Braunschweig},
date={2002},
ISBN={3-528-06954-6},
url={https://doi-org.ezproxy.library.wisc.edu/10.1007/978-3-322-91608-2},
note={Integral formulae and Neumann problem},
review={\MR{1900133}}, }
\bib{MR597825}{article}{
author={Lieb, I.},
author={Range, R.~M.},
title={L\"{o}sungsoperatoren f\"{u}r den {C}auchy-{R}iemann-{K}omplex
mit {${\mathcal C}^{k}$}-{A}bsch\"{a}tzungen},
date={1980},
ISSN={0025-5831},
journal={Math. Ann.},
volume={253},
number={2},
pages={145\ndash 164},
url={https://doi-org.ezproxy.library.wisc.edu/10.1007/BF01578911},
review={\MR{597825}}, }
\bib{MR928297}{article}{
author={Michel, J.},
title={Randregularit\"{a}t des {$\overline\partial$}-{P}roblems f\"{u}r
st\"{u}ckweise streng pseudokonvexe {G}ebiete in {${\bf C}^n$}},
date={1988},
ISSN={0025-5831},
journal={Math. Ann.},
volume={280},
number={1},
pages={45\ndash 68},
url={https://doi-org.ezproxy.library.wisc.edu/10.1007/BF01474180},
review={\MR{928297}}, }
\bib{MR1038709}{article}{
author={Michel, J.},
author={Perotti, A.},
title={{$C^k$}-regularity for the {$\overline\partial$}-equation on
strictly pseudoconvex domains with piecewise smooth boundaries},
date={1990},
ISSN={0025-5874},
journal={Math. Z.},
volume={203},
number={3},
pages={415\ndash 427},
url={https://doi-org.ezproxy.library.wisc.edu/10.1007/BF02570747},
review={\MR{1038709}}, }
\bib{MR1675218}{article}{
author={Michel, J.},
author={Shaw, M.-C.},
title={The {$\overline\partial$} problem on domains with piecewise
smooth boundaries with applications},
date={1999},
ISSN={0002-9947},
journal={Trans. Amer. Math. Soc.},
volume={351},
number={11},
pages={4365\ndash 4380},
url={https://doi.org/10.1090/S0002-9947-99-02519-2},
review={\MR{1675218}}, }
\bib{MR1198845}{article}{
author={Michel, V.},
title={Sur la r\'{e}gularit\'{e} {$C^\infty$} du {$\overline\partial$}
au bord d'un domaine de {$\bold C^n$} dont la forme de {L}evi a exactement
{$s$} valeurs propres strictement n\'{e}gatives},
date={1993},
ISSN={0025-5831},
journal={Math. Ann.},
volume={295},
number={1},
pages={135\ndash 161},
url={https://doi-org.ezproxy.library.wisc.edu/10.1007/BF01444880},
review={\MR{1198845}}, }
\bib{MR0099060}{article}{
author={Morrey, C.~B., Jr.},
title={The analytic embedding of abstract real-analytic manifolds},
date={1958},
ISSN={0003-486X},
journal={Ann. of Math. (2)},
volume={68},
pages={159\ndash 201},
url={https://doi-org.ezproxy.library.wisc.edu/10.2307/1970048},
review={\MR{0099060}}, }
\bib{MR0202511}{book}{
author={Morrey, C.~B., Jr.},
title={Multiple integrals in the calculus of variations},
series={Die Grundlehren der mathematischen Wissenschaften, Band 130},
publisher={Springer-Verlag New York, Inc., New York},
date={1966},
review={\MR{0202511}}, }
\bib{MR2088929}{article}{
author={Polyakov, Peter~L.},
title={Versal embeddings of compact 3-pseudoconcave {CR} submanifolds},
date={2004},
ISSN={0025-5874},
journal={Math. Z.},
volume={248},
number={2},
pages={267\ndash 312},
url={https://doi-org.ezproxy.library.wisc.edu/10.1007/s00209-004-0598-3},
review={\MR{2088929}}, }
\bib{MR338450}{article}{
author={Range, R.~M.},
author={Siu, Y.-T.},
title={Uniform estimates for the {$\bar \partial $}-equation on domains
with piecewise smooth strictly pseudoconvex boundaries},
date={1973},
ISSN={0025-5831},
journal={Math. Ann.},
volume={206},
pages={325\ndash 354},
url={https://doi-org.ezproxy.library.wisc.edu/10.1007/BF01355986},
review={\MR{338450}}, }
\bib{MR1992543}{article}{
author={Ricard, H.},
title={Estimations {$\scr C^k$} pour l'op\'{e}rateur de
{C}auchy-{R}iemann sur des domaines \`a coins {$q$}-convexes et
{$q$}-concaves},
date={2003},
ISSN={0025-5874},
journal={Math. Z.},
volume={244},
number={2},
pages={349\ndash 398},
url={https://doi-org.ezproxy.library.wisc.edu/10.1007/s00209-003-0504-4},
review={\MR{1992543}}, }
\bib{MR0385023}{book}{
author={Rudin, W.},
title={Principles of mathematical analysis},
edition={Third},
series={International Series in Pure and Applied Mathematics},
publisher={McGraw-Hill Book Co., New York-Auckland-D\"{u}sseldorf},
date={1976},
review={\MR{0385023}}, }
\bib{MR4244873}{article}{
author={Shi, Z.},
title={Weighted {S}obolev {$L^p$} estimates for homotopy operators on
strictly pseudoconvex domains with {$C^2$} boundary},
date={2021},
ISSN={1050-6926},
journal={J. Geom. Anal.},
volume={31},
number={5},
pages={4398\ndash 4446},
url={https://doi-org.ezproxy.library.wisc.edu/10.1007/s12220-020-00438-7},
review={\MR{4244873}}, }
\bib{https://doi.org/10.48550/arxiv.2111.09245}{misc}{
author={Shi, Z.},
author={Yao, L.},
title={Existence of solutions for $\overline\partial$ equation in
sobolev spaces of negative index},
publisher={arXiv},
date={2021},
url={https://arxiv.org/abs/2111.09245}, }
\bib{https://doi.org/10.48550/arxiv.2107.08913}{misc}{
author={Shi, Z.},
author={Yao, L.},
title={Sobolev $\frac{1}{2}$ estimates for $\overline{\partial}$
equations on strictly pseudoconvex domains with $c^2$ boundary},
publisher={arXiv},
date={2021},
url={https://arxiv.org/abs/2107.08913}, }
\bib{MR330515}{article}{
author={Siu, Y.-T.},
title={The {$\bar \partial $} problem with uniform bounds on
derivatives},
date={1974},
ISSN={0025-5831},
journal={Math. Ann.},
volume={207},
pages={163\ndash 176},
url={https://doi-org.ezproxy.library.wisc.edu/10.1007/BF01362154},
review={\MR{330515}}, }
\bib{MR2351645}{article}{
author={Spruck, J.},
title={Interior gradient estimates and existence theorems for constant
mean curvature graphs in {$M^n\times\bold R$}},
date={2007},
ISSN={1558-8599},
journal={Pure Appl. Math. Q.},
volume={3},
number={3, Special Issue: In honor of Leon Simon. Part 2},
pages={785\ndash 800},
url={https://doi-org.ezproxy.library.wisc.edu/10.4310/PAMQ.2007.v3.n3.a6},
review={\MR{2351645}}, }
\bib{MR2250142}{book}{
author={Triebel, H.},
title={Theory of function spaces. {III}},
series={Monographs in Mathematics},
publisher={Birkh\"auser Verlag, Basel},
date={2006},
volume={100},
ISBN={978-3-7643-7581-2; 3-7643-7581-7},
review={\MR{2250142}}, }
\bib{MR999729}{article}{
author={Webster, S.~M.},
title={A new proof of the {N}ewlander-{N}irenberg theorem},
date={1989},
ISSN={0025-5874},
journal={Math. Z.},
volume={201},
number={3},
pages={303\ndash 316},
url={https://doi-org.ezproxy.library.wisc.edu/10.1007/BF01214897},
review={\MR{999729}}, }
\bib{MR995504}{article}{
author={Webster, S.~M.},
title={On the proof of {K}uranishi's embedding theorem},
date={1989},
ISSN={0294-1449},
journal={Ann. Inst. H. Poincar\'{e} Anal. Non Lin\'{e}aire},
volume={6},
number={3},
pages={183\ndash 207},
url={http://www.numdam.org/item?id=AIHPC_1989__6_3_183_0},
review={\MR{995504}}, }
\bib{MR2693230}{book}{
author={Yie, S.~L.},
title={Solutions of {C}auchy-{R}iemann equations on pseudoconvex domain
with nonsmooth boundary},
publisher={ProQuest LLC, Ann Arbor, MI},
date={1995},
url={http://gateway.proquest.com.ezproxy.library.wisc.edu/openurl?url_ver=Z39.88-2004&rft_val_fmt=info:ofi/fmt:kev:mtx:dissertation&res_dat=xri:pqdiss&rft_dat=xri:pqdiss:9601610},
note={Thesis (Ph.D.)--Purdue University},
review={\MR{2693230}}, }
\bib{MR1757879}{article}{
author={Zampieri, G.},
title={{$q$}-pseudoconvexity and regularity at the boundary for
solutions of the {$\overline\partial$}-problem},
date={2000},
ISSN={0010-437X},
journal={Compositio Math.},
volume={121},
number={2},
pages={155\ndash 162},
url={https://doi-org.ezproxy.library.wisc.edu/10.1023/A:1001811318865},
review={\MR{1757879}}, }
\bib{MR1749685}{article}{
author={Zampieri, G.},
title={Solvability of the {$\overline\partial$} problem with
{$C^\infty$} regularity up to the boundary on wedges of {${\bf C}^N$}},
date={2000},
ISSN={0021-2172},
journal={Israel J. Math.},
volume={115},
pages={321\ndash 331},
url={https://doi.org/10.1007/BF02810593},
review={\MR{1749685}}, }
\end{biblist} \end{bibdiv}
\end{document} |
\begin{document}
\title{Classifying Financial Markets up to Isomorphism}
\author{ John Armstrong} \date{}
\maketitle
\begin{abstract} Two markets should be considered isomorphic if they are financially indistinguishable. We define a notion of isomorphism for financial markets in both discrete and continuous time. We then seek to identify the distinct isomorphism classes, that is to classify markets.
We classify complete one-period markets. We define an invariant of continuous time complete markets which we call the absolute market price of risk. This invariant plays a role analogous to the curvature in Riemannian geometry. We classify markets when the absolute market price of risk is deterministic.
We show that, in general, markets with non-trivial automorphism groups admit mutual fund theorems. We prove a number of such theorems. \end{abstract}
\section*{Introduction}
Two financial markets should be considered equivalent if there is a bijective correspondence between the investment strategies in each market which preserves both the costs and the payoff distributions of these strategies. This intuition allows us to define a formal notion of isomorphism for financial markets. In the language of category theory \cite{eilenbergmaclane}, we shall define the category of financial markets.
We shall then demonstrate that one can prove financially interesting classification theorems. We classify Gaussian markets and complete one-period markets. We also prove a partial classification theorem for complete continuous-time markets with a fixed risk-free rate which we will now describe.
The minimum number of assets required to replicate an arbitrary contingent claim gives one basic invariant of such markets, the dimension. The next useful invariant we identify is the length of the market-price-of-risk vector, which we call the {\em absolute market price of risk}. While it is easy to define other invariants, this has the advantage of being a {\em local invariant}, by which we mean that it can be calculated from the coefficients of an SDE defining the asset price dynamics by simple algebra and differentiation. The absolute market price of risk gives a basic invariant of markets up to isomorphism. In this sense it is analogous to the Riemannian curvature, which gives a basic invariant of Riemannian manifolds up to isometry.
We classify continuous-time complete markets whose absolute market price of risk is deterministic. Markets with constant absolute market price of risk are determined up to isomorphism by just their dimension, the risk-free rate and the absolute market price of risk and are isomorphic to Black--Scholes--Merton markets.
Our classification theorems have a number of interesting financial applications.
Firstly, one can often use a classification theorem to illuminate a mathematical proof using without-loss-of-generality arguments. We will see that one can specify an $n$-dimensional Black--Scholes--Merton market up to isomorphism using only the parameters of dimension, risk-free rate and absolute market price of risk. This allows one to prove financial results for these markets by considering only markets with particularly simple forms. Our classification of one-period complete markets admits similar applications.
Secondly, we establish a connection between the automorphisms of a market and mutual-fund theorems. We prove that investment strategies solving invariant convex optimization problems in a market can be assumed to be invariant under automorphisms. For markets with large symmetry groups such as Black--Scholes--Merton markets, this imposes strong limitations on the form of optimal investment strategies, giving a significant generalization of the classical mutual-fund theorems.
Thirdly, we will see that a surprisingly large number of markets are isomorphic to a Black--Scholes--Merton market, and so financial results proved for such markets can be applied more widely than one might expect. In particular given any diffusion model, one can obtain a related Black--Scholes--Merton market by making an appropriate choice of drift. This is significant since the drift is difficult to estimate from statistical evidence and its functional form is usually chosen for parsimony. This result implies that one can find, for example, stochastic-volatility models which are isomorphic to Black--Scholes--Merton markets.
The effect of transformations on a market has been considered by many authors. If one considers the asset prices as stochastic trajectories in $\mathbb{R}^n$ one can ask how the dynamics change under diffeomorphisms of $\mathbb{R}^n$. Stochastic differential equations (SDEs) on manifolds have been studied extensively and this has lead to a variety of geometric formulations \cite{armstrongBrigoJets, belopolskaya2012stochastic,elworthy, emery, gliklikh2010global, itoManifold, hsu}. The diffusion term of a non-degenerate SDE on a manifold can be interpreted as defining a Riemannian metric and this yields a connection between Riemannian curvature and SDEs. The geometric theory of SDEs on manifolds has been successfully applied to finance in, for example, \cite{henryLabordere}. However, the maps induced by diffeomorphisms of $\mathbb{R}^n$ are hard to interpret financially since financially important properties, such as whether a process is a martingale, are not preserved by diffeomorphisms.
In this paper, the transformations we consider are those that do preserve financially important properties. They are given by maps between the underlying probability spaces defining the markets rather than on the space $\mathbb{R}^n$. The objects in our categories are given by filtered probability spaces equipped with cost functionals. This probabilistic definition of a market is influenced by the work of Pennanen \cite{pennanen, pennanenDuality}. The morphisms we define are built upon the theory of probability-space homomorphisms developed by Rokhlin in \cite{rokhlin} (who extended the work of von Neumann in \cite{vonNeumann}).
Rokhlin's classifications of standard probability spaces and their homomorphisms are key ingredients in our classification of one-period complete markets. While Rokhlin's results are all we need for this paper, we note that the category theory of probability has been developed by other authors, for example the theory of stochastic processes is explored from a categorical viewpoint in \cite{giryCategorical}.
Category theory was applied to financial markets in \cite{armstrongMarkowitz}, which classifies Markowitz markets and relates the classical mutual-fund theorems to the symmetries of the market. The formulation of this earlier paper is purely algebraic. The probabilistic formulation we will develop is more fundamental and more general.
The structure of the paper is as follows.
In Section \ref{sec:markowitz} we define the category of discrete-time markets. We prove a general mutual-fund theorem for markets with automorphisms. We prove the equivalence (or more precisely the duality) between the formulation of categories given in this paper and the algebraic approach of \cite{armstrongMarkowitz}. We illustrate our mutual-fund theorem with the example of Gaussian markets.
In Section \ref{sec:completeOnePeriod} we classify complete one-period markets. We first give a simplified classification by assuming that one can additionally invest in a ``casino'', which is a complete market where the $\mathbb P$ and $\mathbb Q$ measures coincide. This form of the classification is sufficient for most applications. We give a full classification for markets without using the casino. Our general mutual-fund theorem only applies to convex optimization problems, but for complete one-period markets we are also able to prove a mutual-fund theorem for problems where all agents have monotonic preferences. This generalization is useful for solving problems involving investors with S-shaped utility, as motivated by the theory of Kahneman and Tversky \cite{kahnemanTversky}.
In Section \ref{sec:ctstime} we extend out category to multi-period and continuous-time markets. We classify complete continuous-time markets with constant absolute market price of risk.
Since the primary novelty of this paper is our definitions of financial categories, the resulting classification results and their financial applications, the proofs of our results have been placed in an online appendix. If the reader is unfamiliar with category theory, a short review of the basic terminology we require can be found in a second appendix. The appendices may be found on the journal website or in the arXiv version of this manuscript \cite{classificationArxiv}.
\section{Finite-dimensional linear markets} \label{sec:markowitz}
In this section we give a coordinate-free definition of a one-period financial market and relate this to the elementary, coordinate-based approach of defining a market in $n$-assets using a probability distribution on $\mathbb{R}^n$. We will illustrate with the example of the Markowitz model. We will use this to demonstrate the relationship between invariant investment strategies and mutual-fund theorems.
We begin by recalling a number of definitions due to Rokhlin \cite{rokhlin} for morphisms between probability spaces.
\begin{definition}
Let $(\Omega_1, {\cal F}_1, \mathbb P_1)$ and $(\Omega_2, {\cal F}_2, \mathbb P_2)$ be two
probability spaces. A map $\phi:\Omega_1 \to \Omega_2$ is called a {\em homomorphism}
if $\phi$ is measurable and if $\mathbb P_1(\phi^{-1}U)=\mathbb P_2(U)$ for all $U \in {\cal F}_2$.
A homomorphism $\phi$ is called an {\em isomorphism} if it is bijective and its inverse is a homomorphism. We call $\phi$ a {\em mod 0 isomorphism} if there are subspaces $\Omega_1^\prime \subseteq \Omega_1$ and $\Omega_2^\prime \subseteq \Omega_2$ both of full measure such that $\phi$ restricted to $\Omega_1$ is an isomorphism to $\Omega_2$. \end{definition}
\noindent From the point of view of probability theory, two probability spaces should be considered as equivalent if they are mod 0 isomorphic. We define the category ${\mathrm{Prob}}$ to have objects given by probability spaces and morphisms given by almost-sure equivalence classes of homomorphisms. Rohklin's definition of a mod 0 isomorphism does not coincide exactly with the set of isomorphisms in ${\mathrm{Prob}}$. The next lemma explains how the two notions are related.
\begin{lemma}
A measurable function is a mod 0 isomorphism if and only if its almost-sure equivalence class is a ${\mathrm{Prob}}$ isomorphism.
\label{lemma:mod0Lemma} \end{lemma}
An important functor is the contravariant functor $L^0$ which maps the category ${\mathrm{Prob}}$ to the category ${\mathrm{Vec}}$ of vector spaces. $L^0$ acts on the objects of ${\mathrm{Prob}}$ by mapping a probability space to its vector space of almost-sure equivalence classes of measurable functions. Given a ${\mathrm{Prob}}$ morphism $f:\Omega_1 \to \Omega_2$ and $X \in L^0(\Omega_2)$ we define a linear transformation $L^0(f):L^0(\Omega_2)\to L^0(\Omega_1)$ by $L^0(f)(X)=X \circ f$.
\begin{definition}
A {\em one-period financial market} $((\Omega, {\cal F},\mathbb P),c)$ consists of: a probability space $(\Omega, {\cal F},\mathbb P)$; a function $c:L^0(\Omega; \mathbb{R}) \to \mathbb{R} \cup \{\pm \infty\}$.
We call $c^{-1}(\mathbb{R} \cup \{-\infty\})$ the {\em domain} of $c$, denoted $\dom c$. \end{definition}
We interpret a real random variable $X$ on $\Omega$ as an investment strategy with payoff $X(\omega)$ in scenario $\omega \in \Omega$. $c(X)$ denotes the up front cost of strategy $X$ and is equal to $\infty$ if one cannot pursue a strategy. A strategy with $c(X)=-\infty$ results in liabilities so bad that the market is willing to pay arbitrarily large incentives to encourage someone to take these liabilities on. A typical investment strategy is the purchase of an asset or of a portfolio of assets which are then sold at a final time $T$. In this case $c(X)$ would be the cost of purchasing the asset. However, one can also model a commitment to pursue a continuous-time trading strategy as yielding a single payoff at the final time $T$ and our definition of a market is flexible enough to include such strategies.
This definition is deliberately minimal. To obtain interesting markets one would typically want to impose additional conditions, such as that the market should be arbitrage free. This condition can be expressed as: for random variables $X$, if $X \geq 0$ and $X \neq 0$ then $c(X)>0$.
In this section we will be interested only on one-period markets so we will refer to them simply as markets. \begin{definition}
\label{def:morphism}
A {\em morphism} of markets $M_1=((\Omega_1, {\cal F}_1,\mathbb P_1),c_1)$ and $M_2=((\Omega_2, {\cal F}_2,\mathbb P_2),c_2)$ is a
${\mathrm{Prob}}$ morphism $\phi:\Omega_1 \to \Omega_2$ satisfying
$c_2(X) \geq c_1(X\circ \phi)$ for all $X \in L^0(\Omega_2; \mathbb{R})$. \end{definition}
Financially, a market morphism $\psi:M_1 \to M_2$ represents an inclusion of the market $M_2$ in $M_1$: given an investment strategy represented by the random variable $X$ in $M_2$, we have the investment strategy $X \circ \psi$ in $M_1$ which has identical payoff distribution but which has lower up-front cost. So if one can afford to pursue the strategy $X$, one can also afford to pursue $X \circ \psi$. The contravariance between $\psi$ and the financial notion of inclusion stems from the contravariance of the functor $L^0$.
Our primary interest in this paper is in market isomorphisms. We may describe them as follows. \begin{lemma}
\label{lemma:isomorphism}
An isomorphism of markets $((\Omega_1, {\cal F}_1,\mathbb P_1),c_1)$ and $((\Omega_2, {\cal F}_2,\mathbb P_2),c_2)$ is the
almost-sure equivalence class of a mod 0 isomorphism $\phi:\Omega_1 \to \Omega_2$ satisfying
$c_2(X)=c_1(X\circ \phi)$ for all $X \in L^0(\Omega_2; \mathbb{R})$. \end{lemma}
In finance, optimal investment problems are often convex optimization problems (see for example \cite{pennanen}). A convex optimization problem is a problem requiring finding the set of minimizers of a convex objective function over a convex domain. For example, a risk-averse agent will have a concave utility function, and so the objective in expected utility maximization problems can be expressed as the minimization of their convex expected disutility function. Cost constraints are typically linear, and hence define a convex domain. Additional constraints imposed by a risk manager will further restrict the domain, but if one uses expected-shortfall constraints, or any other coherent, or simply convex, risk measure (see \cite{follmerSchied}), this too will yield a convex domain.
The solution set of a convex optimization problem is itself a convex set. We also expect that if the solution set is financially meaningful, it will be invariant under the automorphism group of the market. Our next result will show that one may then find an element of the solution set which is itself invariant.
To state our result, let us define the necessary terminology. A measurable group $G$ has a {\em left-invariant} probability measure, $\mathbb{G}$ if for all measurable sets $A \subseteq G$ and elements $h \in G$ we have $\mathbb{G}(A)=\mathbb{G}(h A )$. A {\em representation} of such a group on a Banach space $V$ is a group homorphism $\rho:G \to \Aut V$, where $\Aut V$ is the group of linear isometries of $V$. We think of $\rho$ as defining an action of $G$ on $V$ on the left, given by $g v$=$\rho(g) v$.
\begin{theorem}
Let $G$ be a measurable group with a left-invariant probability measure $\mathbb{G}$. Let $\rho:G \to \Aut V$ be a representation. Suppose that for all $v$ in $V$ the map $g \to \rho(g) v$ is measurable.
If $S$ is a non-empty $G$-invariant convex subset of $V$,
then $S$ contains a $G$-invariant element.
\label{thm:genericMutualFundTheorem}
If $G$ is a finite group, we only need require that $V$ is a vector space and $G$ acts by linear automorphisms. \end{theorem}
The theorem is proved by taking an arbitrary element of the set and then averaging over the action of the group.
For financial applications, we we may take $G$ to be a subgroup of the automorphism group of the market which admits a left-invariant density and $\rho$ to be the standard action of $G$ on $L^1(\Omega; \mathbb{R})$. This allows us to simplify invariant convex optimization problems by restricting attention to invariant investment strategies.
We will see a number of applications of this general result throughout this paper. In this section we will use this result to prove the classical two-mutual-fund theorem of \cite{mertonMutualFund}. A similar argument was used in \cite{armstrongMarkowitz} to prove the classical two-mutual-fund theorem but the notion of isomorphism was different. Before proving the two-mutual-fund theorem we will show how the notion of isomorphism in \cite{armstrongMarkowitz} relates to our new definition. We will do this by defining a general notion of a ``finite-dimensional linear market'' and giving a classification result for such markets and their isomorphisms.
\begin{definition}
A one-period financial market $M=((\Omega, {\cal F},\mathbb P),c)$ is {\em separated} if there is a subset $\mathring{\Omega} \subset \Omega$ of full measure such that for any distinct
$\omega_1$, $\omega_2 \in \mathring{\Omega}$ there exists $X \in \dom c$ with $X(\omega_1)\neq X(\omega_2)$.
A one-period financial market is {\em linear} if $\dom c$ is a linear subspace
of $L^0(\Omega; \mathbb{R})$ and $c$ is linear on $\dom c$. The {\em dimension} of a linear market is the dimension of $\dom c$.
On a linear market, we may define a map $\pi$ from $\Omega$ to $(\dom c)^*$, the algebraic dual space of $\dom c$, by
\begin{equation}
\pi(\omega)(X)=X(\omega)
\label{eq:definitionOfPi}
\end{equation}
for $X \in \dom C$ and $\omega \in \Omega$. One checks that
$\pi(\omega)(\alpha X_1+X_2)=(\alpha X_1+X_2)(\omega) = \alpha X_1(\omega)+X_2(\omega)=\alpha \pi(X_1) + \pi(X_2)$,
so $\pi(\omega) \in (\dom c)^*$ as claimed. The map
$\pi$ induces a sigma algebra and measure on $(\dom c)^*$. We write $d_M$ for this measure, which we call the {\em distribution}
of the market. If $M$ is separated, then $\pi$ is a mod 0 isomorphism. \end{definition}
Financially, a market is linear if all traded assets can be bought and sold in unlimited quantities at a fixed price per unit. A market is separated if the probability space contains no information other than that captured by asset prices.
A finite-dimensional real vector space has a natural topology defined by the requirement that linear isomorphisms to $\mathbb{R}^n$ are homeomorphisms. We would like to require that the measure $d_M$ is in some sense compatible with this topology. To be precise we recall the following definition. \begin{definition}(see \cite{itoIntroduction})
A {\em regular} probability measure is a probability
measure arising as the Lebesgue extension of a Borel probability measure on a topological space. \end{definition} We would like to be able to ensure that $d_M$ is a regular probability measure. To do this we require an additional condition on the probability space $(\Omega,\cal F,\mathbb P)$.
\begin{definition}(see \cite{rokhlin} and \cite{itoIntroduction})
A probability space $(\Omega,\cal F,\mathbb P)$ is {\em standard} if
it is isomorphic mod $0$ to either: the Lebesgue measure on $[0,1]$;
a probability space on a finite or countable number of atoms; a convex combination of both. \end{definition}
The study of standard probability spaces was started by \cite{vonNeumann}. Although it may appear to be a highly restrictive condition, it is in fact a very mild assumption. It\^o summarised the situation in \cite{itoIntroduction} as ``all probability spaces appearing in practical applications are standard''. We note a number of important examples that justify this claim. All regular probability measures on a complete separable metric space are standard. This includes all regular measures on $\mathbb{R}^n$ and the Wiener measure on $C^0[0,\infty)$. Finite and countable products of standard spaces are standard. A non-null measurable subset of a standard probability space becomes a standard probability space when endowed with the conditional measure. For proofs of these assertions see \cite{rokhlin} or \cite{itoIntroduction}.
\begin{lemma}
If $M$ is a finite-dimensional linear market based on a standard probability space,
then $d_M \in \mathbb P((\dom c)^*)$ where $\mathbb P(S)$ denotes the set of regular probability measures on $S$.
\label{lemma:regularity} \end{lemma}
\begin{definition}
A regular probability measure on a finite-dimensional vector space, $V$, is said
to be {\em non-degenerate} if for any $X,Y\in V^*$, $X=Y$ almost everywhere implies $X=Y$. \end{definition} Degenerate probability measures arise when the measure is concentrated on a vector subspace. \begin{definition}
${\mathrm{VecM}}$ is defined to be the category with objects consisting of triples
$(V,d,c)$ with $V$ a finite-dimensional vector space, $d \in \mathbb P(V)$ with $d$ non-degenerate and $c \in V$. ${\mathrm{VecM}}$ is equipped with a notion of morphism given by linear transformations $T:(V_1,d_1,c_1)\to(V_2,d_2,c_2)$ satisfying:
\begin{enumerate}[nosep,label=(\roman*)]
\item for any Borel measurable set $A \subseteq V_2$
\begin{equation}
d_2(A) = d_1(T^{-1}(A))
\label{eq:distributionConditionForVecM};
\end{equation}
\item the vectors $c_1$ and $c_2$ are related by
\begin{equation}
c_2 = T(c_1).
\label{eq:costConditionForVecM}
\end{equation}
\end{enumerate} \end{definition} \begin{definition}
${\mathrm{DualM}}$ is defined to be the category with objects consisting of triples $(V,d^*,c^*)$ with $V$ a finite-dimensional vector space, $d^* \in \mathbb P(V^*)$ with $d^*$ non-degenerate and $c^* \in V^*$.
Morphisms $T:(V_1,d^*_1,c^*_1)\to (V_2,d^*_2,c^*_2)$ in ${\mathrm{DualM}}$ are given by a linear transformation $T:V_1 \to V_2$ whose
whose vector space dual $T^*$ is a ${\mathrm{VecM}}$ morphism $T^*:(V_1^*,d^*_1,c^*_1)\to
(V_2^*,d^*_2,c^*_2)$. \end{definition} \begin{definition}
${\mathrm{FinM}}$ is defined to be the category with objects given by separated finite-dimensional linear markets whose probability space is standard, and morphisms given by market morphisms. \end{definition}
For any element $M$ of ${\mathrm{FinM}}$ define \[{\mathrm{Vec}}(M)=((\dom c)^*,d_M,c).\] In the opposite direction, for any element $((V,d,c))$ of ${\mathrm{VecM}}$ we define \[ {\mathrm{Fin}}((V,d,c))=((V,{\cal F},d),{\underline c}) \] where ${\cal F}$ is the sigma algebra associated with $d$ and the map $\underline{c}:L^0(V;\mathbb{R}) \to \mathbb{R}$ satisfies \[ \underline{c}(X)= \begin{cases} X(c) & \text{if $X$ is equal to a linear map almost everywhere,} \\ \infty & \text{otherwise}. \\ \end{cases} \]
\begin{theorem}[Equivalence of vector space and probabilistic categories of market]
${\mathrm{Vec}}(M)$ lies in ${\mathrm{VecM}}$ and the map ${\mathrm{Vec}}:{\mathrm{FinM}} \to {\mathrm{VecM}}$ defines a bijection on isomorphism classes. ${\mathrm{Fin}}((V,d,c))$ lies
in ${\mathrm{FinM}}$. We may extend ${\mathrm{Vec}}$ and ${\mathrm{Fin}}$ to functors by defining their action on morphisms such that ${\mathrm{Vec}}$ and ${\mathrm{Fin}}$ define an equivalence of categories. Similarly the map
${\mathrm{Dual}}:\ob({\mathrm{FinM}})\to\ob({\mathrm{DualM}})$
given by
${\mathrm{Dual}}(M)=(\dom c,d_M,c)$
may be extended to a give a duality of the categories ${\mathrm{FinM}}$ and ${\mathrm{DualM}}$.
\label{thm:linearmarkets} \end{theorem}
To interpret this result financially, we suppose that we have a market of $n$ assets. The space of portfolios in these assets is an $n$-dimensional vector space $V$. The cost of a portfolio defines a linear functional $c^*$ on this vector space. The eventual payoff of a portfolio gives rise to a random linear functional acting on the space of portfolios. The distribution of this payoff functional is given by $d^*$. Together this data defines an element $(V,d^*,c^*) \in \ob({\mathrm{DualM}})$. Thinking of the space of portfolios as a vector space with no preferred basis represents the financial idea that a portfolio of assets can be viewed as an asset in its own right. The category ${\mathrm{DualM}}$ is therefore the appropriate category to use if one believes that the distinction between an asset traded on the market and a portfolio of assets is not financially significant.
The significance of Theorem \ref{thm:linearmarkets} is that it shows the notion of equivalence of markets obtained by treating all portfolios as equally valid investment strategies is the same as the notion of equivalence given in Definition \ref{def:morphism}. This relates the definitions of \cite{armstrongMarkowitz} to the definitions in this paper. The advantage of our new Definition \ref{def:morphism} is that it can be applied to infinite markets, as we shall see when we discuss complete markets later, and to non-linear markets.
The proof of Theorem \ref{thm:linearmarkets} shows that morphisms in ${\mathrm{VecM}}$ are surjective linear transformations. It follows that the morphisms of ${\mathrm{DualM}}$ are injective. This backs up the claim we made earlier that market morphisms are a contravariant representation of market inclusion.
We now apply this general theory to the case of assets following a multivariate normal distribution, as considered by Markowitz \cite{markowitz}.
Let $g_{\mu}$ be the multivariate normal distribution with mean $\mu \in \mathbb{R}^n$ and covariance matrix given by the identity $\mathrm{id}_n$. We say that a market is Gaussian if it is isomorphic to a market on $\mathbb{R}^n$ with density $g_{\mu}$. Trivially any Gaussian market is isomorphic to a market of the form ${\mathrm{Fin}}(\mathbb{R}^n,g_{\mu},c)$ for some $\mu, c \in \mathbb{R}^n$. Let $\{e_i\}$ be the standard basis for $\mathbb{R}^n$. Since isometries of $\mathbb{R}^n$ preserve the Gaussian measure, we may apply a rotation so that $\mu$ lies in the span of $e_1$ and $c$ lies in the span of $e_1$ and $e_2$. This shows that any Gaussian market can be written in the form \begin{equation} {\mathrm{Fin}}( \mathbb{R}^n, g_{\alpha\, e_1}, \beta \, e_1 + \gamma \, e_2 ), \quad \alpha,\beta,\gamma \in \mathbb{R}. \label{eq:genericMarkowitz} \end{equation}
We now have the following classification theorem. \begin{theorem}[Classification of Markowitz markets]
Let $M \in {\mathrm{FinM}}$ be a market and suppose that $\{X_i\}$ is a basis for $\dom c$
given by assets following a multivariate normal distribution. Then $M$ is Gaussian,
and hence is isomorphic to a market of the form \eqref{eq:genericMarkowitz}
\label{thm:markowitzClassification}. \end{theorem} \noindent This theorem is essentially a restatement of the main classification result of \cite{armstrongMarkowitz} in the language of one-period markets. \begin{corollary}
All invariant investment strategies $X \in \dom c$ in a Gaussian market
lie in a two-dimensional vector subspace of $\dom c$.
\label{cor:generalMutualFund} \end{corollary} \begin{corollary}
(Two-mutual-fund theorem \cite{mertonMutualFund})
Suppose we have $n$ assets of a given cost whose payoffs follow a multivariate normal distribution. We wish to find the portfolio of assets with minimum variance but with a given expected payoff $C_1$ and cost $C_2$. There are two portfolios $X_1$ and $X_2$ independent of $C_1$ and $C_2$ such that we can solve these mean--variance optimization problems for any $C_1$ and $C_2$ simply by considering linear combinations of $X_1$ and $X_2$. \end{corollary} \noindent The portfolios $X_1$ and $X_2$ are the two ``mutual funds'' that give this theorem its name.
We remark that Corollary \ref{cor:generalMutualFund} is a much stronger result than the classical two-mutual-fund theorem. The paper \cite{armstrongMarkowitz} gives numerous concrete examples of financially interesting results arising from invariance arguments other than just the two-mutual-fund-theorem.
We also remark that the concrete isomorphism found in Theorem \ref{thm:markowitzClassification} makes it extremely easy to solve the classical mean-variance optimization problem directly, thereby recovering the full set of results found in \cite{mertonMutualFund}. This approach is pursued in \cite{armstrongMarkowitz}.
\section{One-period complete markets} \label{sec:completeOnePeriod}
\begin{definition}
A one-period market $M=((\Omega,{\cal F},\mathbb P),c)$ is {\em complete} if there exists
a measure $\mathbb Q$ on $\Omega$ equivalent to $\mathbb P$, and $C > 0$ such that
\begin{equation}
c(X) = \begin{cases}
C (\mathbb{E}_\mathbb Q(X^+) - \mathbb{E}_\mathbb Q(X^-) ) & \text{one of $\mathbb{E}_\mathbb Q(X^{\pm})$ is finite} \\
\infty & \text{otherwise}. \\
\end{cases}
\label{eq:defQ}
\end{equation}
In this formula $X^+$ and $X^-$ denote the positive and negative parts of the random variable $X$.
We note that $c(1)=C$, so we interpret $(C-1)$ as a deterministic interest rate. \end{definition}
\begin{example}
Let $I$ be the market given by taking the $\mathbb P$ and $\mathbb Q$ measure to both be equal to the Lebesgue measure on $[0,1)$ and with cost of the constant function with value $1$, equal to $1$. In this market prices are given by expectations, so we call $I$ a {\em casino}. (Our casino is of course an idealized one, in which the profits and losses of a typical client form a martingale rather than a supermartingale.) \end{example}
Given a complete market $M$ we may define a new complete market $M \times I$ by taking the product measures for both the $\mathbb P$ and the $\mathbb Q$ measures and taking the constant $C$ to be that given by the market $M$.
From a financial point of view the market $M \times I$ represents the market obtained by considering investment strategies where one first invests in the market $M$ and then places a bet at the casino.
In applications it is not unreasonable to assume that there is a casino available should a trader wish to use it. So classifying complete markets of the form $M \times I$ should be just as useful in practice as a full classification. The theorem below gives a classification for markets of this form. \begin{theorem}[Classification of complete markets up to a casino]
Let $M$ be a complete market on a standard probability space. Then $M \times I$ is isomorphic to $\tilde{M} \times I$,
where $\tilde{M}$ is the market with probability space given by
$\tilde{\Omega}=[0,1]$ equipped with the Lebesgue measure and
with pricing function
\[
\tilde{c}(X) = C \int_0^1 F^{-1}_\frac{\mathrm d \mathbb Q}{\mathrm d \mathbb P} X(x) \mathrm d x.
\]
\label{thm:simpleCompleteMarket1}
Here $F^{-1}_{\frac{\mathrm d \mathbb Q}{\mathrm d \mathbb P}}$ is the inverse distribution function
of $\frac{\mathrm d \mathbb Q}{\mathrm d \mathbb P}$ on $M$. \end{theorem}
The first step toward proving this is to observe that we may recover $\mathbb Q$ from $c$ since for any measurable set $A \subset \Omega$ we have \[ \mathbb Q(A) = \mathbb{E}_\mathbb Q(1_A) = \frac{c(1_A)}{c(1)}. \] It follows that two one-period complete markets $((\Omega_i,{\cal F}_i,\mathbb P_i),c_i)$ ($i=1,2$) are isomorphic if and only if (a) there is a mod 0 isomorphism for the $\mathbb P_i$ measures which is also a mod 0 isomorphism for the $\mathbb Q_i$ measures; and (b) the cost of the constant function with value $1$ is equal in both markets.
There may be more than just $2$ measures on the market which are of financial interest. A trader with views about the market represented by a measure $\mathbb P$ may be constrained by a risk manager or regulator with different views about the market. These can be represented by alternative measures. Let us state a classification result similar to Theorem \ref{thm:simpleCompleteMarket1} that applies to this situation.
\begin{theorem}[Classification of complete markets with multiple views]
Let $I$ denote the interval $[0,1)$ with the Lebesgue measure. We suppose that $\mathbb P_0, \mathbb P_1, \ldots, \mathbb P_n$
are equivalent probability measures on $(\Omega, {\cal F})$. We assume $\mathbb P_0$ is standard. Then there is a unique Lebesgue measure $\mathbb P_0^\prime$ on
$\Omega^\prime=(0,\infty)^n$ such that $\mathbb P_0 \times I$ and $\mathbb P_0^\prime \times I$ are mod 0 isomorphic via an isomorphism which also acts as a mod 0 isomorphism between the measures $\mathbb P_i \times I$ and $\mathbb P_i^\prime \times I$ where $\mathbb P^\prime_i$ is the Lebesgue measure given by
\[
\mathbb P^\prime_i(A)=\int_{(0,\infty)^n} \omega_i 1_A( \omega) \, \mathrm d \mu.
\]
In this formula, $A$ is a measurable set, $1_A$ is the indicator function $A$ and $\omega_i$ is the $i$-th coordinate function on $\mathbb{R}^n$. Note that we must have $\mathbb{E}_{\mathbb P_0^\prime}(\omega_i)=1$ for these $\mathbb P^\prime_i$ to be probability measures.
\label{thm:simpleCompleteMarketN} \end{theorem}
We note the following financial implication (using the notation of Theorem \ref{thm:simpleCompleteMarketN}).
\begin{corollary}[Convex mutual-fund theorem for complete markets]
Let $A$ be a non-empty convex subset of the space
of $\mathbb P_0$-integrable random variables on $\Omega$. Suppose that $A$ is also invariant under mod 0 isomorphisms that preserve all the $\mathbb P_i$. Then $A$ contains an element which can be written as a function of the Radon--Nikodym derivatives $\frac{\mathrm d \mathbb P_i}{\mathrm d \mathbb P_0}$. \label{cor:convexMutualFund} \end{corollary}
For example, $A$ might arise as the optimal investment strategies in a convex optimization problem with a cost constraint and risk-management constraints imposed by a number of regulators and risk managers given in terms of the $\mathbb P_i$.
A special case of the result above is the problem of expected-utility optimisation in a complete market subject to a single cost constraint for a concave, increasing utility function. In this case it is well-known that the optimal investment has a payoff function given as a function of the Radon--Nikodym derivative (see \cite{follmerSchiedBook}).
Let us now give the definitions needed to state a full classification for complete one-period markets. Write ${\cal S}$ for the set of mod 0 isomorphism classes of standard probability spaces. We call ${\cal S}$ the moduli space of standard probability spaces.
Given $m \in {\cal S}$, we define $m_0$ to be the measure of the continuous component of $m$ (or zero if it has no continuous component) and we define $m_i$ for $i>0$ to be the measure of the $i$-th largest atom in our probability space (or $0$ if there less than $i$ atoms). Thus we have identified a correspondence between ${\cal S}$ and sets of numbers $m_i$ ($i \in \mathbb{N}$) which satisfy \begin{equation} m_i \in [0,1]; \qquad \forall i \in \mathbb{N}^+, \, m_i \geq m_{i+1} ; \qquad \text{and } m_0 = 1-\sum_{i=1}^\infty m_i. \label{eq:defCalS} \end{equation} We give ${\cal S}$ the topology induced by thinking of it as a subset of $\mathbb{R}^\infty$ in this way. Thus we may talk about measurable maps to ${\cal S}$, or ${\cal S}$-valued random variables.
The theory of disintegration of measure tells us that for a complete market $M$ based on a standard probability space, there is a $\mu_M$-almost-surely unique measurable function \[ m_M:(0,\infty) \to {\cal S} \] with $m_M(x)$ given by the mod 0 isomorphism class of the $\mathbb P$ conditional measure conditioned on the value of $\frac{\mathrm d \mathbb Q}{\mathrm d \mathbb P}=x$ and where $\mu_M$ denotes the measure on $(0,\infty)$ induced by $\frac{\mathrm d \mathbb Q}{\mathrm d \mathbb P}$.
\begin{definition}
Let ${\mathrm{Measures}}(n)$ be the set consisting of pairs $(\mu,m)$ where:
\begin{enumerate}[nosep,label=(\roman*)]
\item $\mu$ is a regular probability
measure on $(0,\infty)^n$ satisfying
$\mathbb{E}_{\mu}( \omega_i ) = 1$
for the $i$th coordinate function $\omega_i$ on $\mathbb{R}^n$;
\item $m$ is an ${\cal S}$ valued $\mu$ random variable.
\end{enumerate} \end{definition}
\begin{theorem}[Generalised classification of complete markets]
Standard probability spaces $(\Omega, {\cal F}, \mathbb P_0)$ equipped with $n$-additional
equivalent measures $\mathbb P_1$, \ldots, $\mathbb P_n$ are classified up to joint
$\mathbb P_0$-, \ldots, $\mathbb P_n$- mod 0 isomorphism by elements $(\mu_q,m_q) \in {\mathrm{Measures}}(n)$.
Here $\mu_q$ is the measure on $(0,\infty)^n$ induced by the $\mathbb{R}^n$ vector valued function $q$
with $i$-th component given by the Radon--Nikodym derivative $\frac{\mathrm d \mathbb P_i}{\mathrm d \mathbb P_0}$. \label{thm:classificationComplete} \end{theorem}
The proof uses Rokhlin's theory of the decomposition of measure.
\subsection{Non-convex problems and rearrangement} \label{sec:rearrangement}
We show in this section that Theorem \ref{thm:simpleCompleteMarketN} allows us to identify a mutual-fund theorem that applies to optimization in complete markets when we assume that the problem is ``monotonic'' rather than convex.
We have in mind applications to behavioural economics based on the observations of Kahneman and Tversky in \cite{kahnemanTversky}. For examples of applications of Kahneman and Tversky's ideas to mathematical finance and risk management, see, for example, \cite{xyzFirst}, the review \cite{xyzReview}, and \cite{armstrongBrigoSShaped} which contains numerous further references.
It has been observed in this literature (see for example \cite{xyzPortfolioChoiceViaQuantiles}) that the solution to optimal investment problems in complete markets involving S-shaped utility functions can be obtained by considering monotonic functions of the Radon--Nikodym derivative $\frac{\mathrm d \mathbb Q}{\mathrm d \mathbb P}$. The aim of this section is to show how these results arise from general monotonicity properties, automorphism invariance and our classification theorems. We take the opportunity to show how these results can be generalized to situations where there are more than two measures $\mathbb P$ and $\mathbb Q$, for example, to the case where risk managers and traders have different beliefs about the future evolution of the market.
Given two random variables $X$, $Y$ on a probability space $(\Omega, {\cal F}, \mathbb P)$ we write \[ d^\mathbb P(X) \preceq d^\mathbb P(Y) \] if $F_X(k):=\mathbb P(X \leq k) \geq \mathbb P(Y \leq k)=:F_Y(k)$ for all $k$. The notation $d^{\mathbb P}(X)$ is intended to suggest ``the $\mathbb P$-distribution of $X$''. Given a third random variable $Z$ we write \[ d^\mathbb P(X \mid Z) \preceq d^\mathbb P(Y \mid Z) \] if $\mathbb P(X \leq k \mid Z) \geq \mathbb P(Y \leq k \mid Z)$ almost surely for all $k$.
We suppose that market participants such as traders and risk managers impose some form of relation $\preceq^\prime$ on random variables to express their preferences between different investment opportunities. One might reasonably expect that \begin{equation} X \preceq Y \implies X \preceq^\prime Y. \end{equation} If this condition holds, we will say that $\preceq^\prime$ is {\em increasing}. We say that $\preceq^\prime$ is {\em decreasing} if the reversed relation is increasing. We say that a relation on random variables is {\em monotonic} if it is either increasing or decreasing. We say that the {\em sign} of a monotonic relation is $1$ if it increasing or $-1$ if it is decreasing.
\begin{definition}[Rearrangement]
\label{def:rearrangement}
Let $m$ be a Lebesgue probability measure on $(0,\infty)$. Let $F_m$ denote
the cumulative distribution function of $m$. Write $x, y$ for the coordinate functions on $(0,\infty)\times[0,1)$. Define $U_m:(0,\infty)\times[0,1)\to [0,1]$ by
\[
U_m(\omega) = (1-y(\omega)) \lim_{x^\prime\to x(\omega)-} F_m(x^\prime) + y(\omega)\lim_{x^\prime\to x(\omega)+} F_m(x^\prime).
\]
$U_m$ is well-defined since $F_m$ is c\`adl\`ag.
We write $\mathbb P_m$ for the product measure on $(0,\infty)\times[0,1)$. If
\begin{equation}
\mathbb{E}_{\mathbb P_m}( x(\omega) ) = 1
\label{eq:conditionOnM}
\end{equation}
then $x$ is the Radon--Nikodym derivative of an equivalent measure we call $\mathbb Q_m$.
Given $X \in L^0_{\mathbb P_m}((0,\infty) \times [0,1]; \mathbb{R})$ we define
the {\em increasing and decreasing rearrangements} of $X$ by
\[
R^+_m(X) = F_X^{-1}(U_m), \quad R^-_m(X) = -F_{-X}^{-1}(U_m)
\]
respectively, where $F^{-1}_X$ is the $\mathbb P_m$ inverse distribution function of $X$. \end{definition}
Our next theorem shows that the notion of rearrangement can be generalized to situations when there are more than two probability measures under consideration.
\begin{theorem}[Monotone mutual-fund theorem for complete markets]
Let $(\Omega,{\cal F},\mathbb P_0)$ be a standard probability space equipped with
$n$ equivalent measures $\mathbb P_i$ ($1 \leq i \leq n$). Let $I=[0,1)$. Let $\preceq_i$ ($1 \leq i \leq n$) be monotonic relations on the set of probability distributions on $\mathbb{R}$. Write $\sign i$ for the sign of $\preceq_i$. There exists a mapping $R:L^0(\Omega \times I)\to L^0(\Omega \times I)$, which we call {\em rearrangement}, with the following properties.
\begin{enumerate}[nosep,label=(\roman*)]
\item Rearrangment does not change $\mathbb P_0$ distributions:
\[
d^{\mathbb P_0}(X) = d^{\mathbb P_0}(R(X)).
\]
\item Rearrangement increases or decreases $\mathbb P_i$ distributions according to the sign of $\preceq_i$:
\[
d^{\mathbb P_i}((\sign i) X) \preceq d^{\mathbb P_i}((\sign i) R(X)), \quad 1 \leq i \leq n.
\]
\item Let $q$ denote the vector of $n$ Radon--Nikodym derivatives $\frac{\mathrm d \mathbb P_i}{\mathrm d \mathbb P_0}$.
Define $\preceq$ on $\mathbb{R}^n$ by $x \preceq y$ if $(\sign i)x_i \leq (\sign i)y_i$ for all components $i$, and hence
define $\prec$ on $\mathbb{R}^n$. Then $R(X)$ satisfies
\[
R(X)(\omega) \leq R(X)(\omega^\prime)
\quad \text{ if } \quad q(\omega) \prec
q(\omega^\prime).
\]
\end{enumerate}
\label{thm:rearrangement} \end{theorem}
This theorem gives a general structural theorem about optimal investments in complete markets containing a casino. So long as the optimality criterion and any pricing or risk constraints are monotonic in some measures $\mathbb P_i$, we can restrict our attention to strategies that lie in the image of $R$. We interpret this as a mutual-fund theorem since it says that, for a general class of optimization problems, we can safely restrict attention to a subset of the random variables available in the market.
The assumption that there is a casino can be dropped in many cases since, as one might intuitively expect, one often doesn't take any real advantage of the casino. This is formalized in the next corollary.
\begin{corollary}
Let $(\Omega, {\cal F}, \mathbb P_i)$ ($1 \leq i \leq n)$ be as in the previous
Theorem \ref{thm:rearrangement}
We can find a map $\tilde{R}:L^0(\Omega)\to L^0(\Omega)$ which shares properties (i), (ii) and (iii) described in Theorem \ref{thm:rearrangement} so long as either:
(a)
\item $\mathbb P_0$ is atomless and $n=1$;
or (b) for some $j$, the distribution of $\frac{\mathrm d \mathbb P_j}{\mathrm d \mathbb P_0}$
conditioned on the value of all the other Radon--Nikodym derivatives
is almost surely continuous. In case (b)
$\tilde{R}$ can be assumed to depend only on the
value of $q$.
\label{cor:rearrangement} \end{corollary} Note that the theory of conditional distributions detailed in \cite{itoIntroduction} ensures that the conditional distribution exists in case (b).
\section{Continuous-Time Markets} \label{sec:ctstime}
Let us extend our definitions of markets to the multi-period setting.
\begin{definition} A multi-period market consists of the following. \begin{enumerate}[nosep,label=(\roman*)]
\item A filtered probability space
$(\Omega, {\cal F}_t, \mathbb P)$ where $t \in {\cal T} \subseteq [0,T]$ for
some index set ${\cal T}$ containing both $0$ and $T$.
We write ${\cal F}={\cal F}_T$. We require ${\cal F}_0=\{\emptyset,\Omega\}.$
\item For each $X \in L^0(\Omega;\mathbb{R})$, an ${\cal F}_t$-adapted process $c_t(X)$ defined for $t$ in ${\cal T}\setminus T$. \end{enumerate} \end{definition}
Random variables $X \in L^0(\Omega, {\cal F}_T; \mathbb{R})$ are interpreted as contracts which have payoff $X$ at time $T$. The cost of this contract at time $t$ is $c_t(X)$.
We note that this is deliberately bare-bones definition of a market. In practice would want to impose additional conditions on the $c_t$. For example, one would normally wish to forbid arbitrage and to impose ``the usual conditions'' on the filtered probability space.
\begin{definition}
A {\em filtration isomorphism} of filtered spaces $(\Omega, {\cal F}, {\cal F}_t, \mathbb P)$ where $t \in {\cal T}$ for some index set ${\cal T}$ is a mod $0$ isomorphism for ${\cal F}$ which is also a mod $0$ isomorphism for each ${\cal F}_p$.
An {\em isomorphism} of multi-period markets is a filtration isomorphism that preserves the cost functions. \end{definition}
Given a one-period market $((\Omega, {\cal F}, \mathbb P),c)$ we can trivially define a filtration ${\cal F}_0=\{\emptyset, \Omega\}$, ${\cal F}_1={\cal F}$ indexed by $\{0,1\}$ and we may define $c_0=c$. Hence we can define a multi-period market in a canonical fashion from a one-period market. The notion of isomorphism is preserved. In this sense, our definition of multi-period markets and their isomorphisms is a generalization of the corresponding notions for one-period market.
\begin{definition}[Exchange market]
\label{def:exchangeMarket}
Let $(\Omega, {\cal F}_t, \mathbb P)$ be $n$-dimensional Wiener space, that is the probability space generated by the $n$-dimensional
Brownian motion $\bm{W}_t$. Let $\bm{X}_t$ be an $n$-dimensional
stochastic processes defined by a stochastic differential equation of the form
\begin{equation}
\mathrm d {\bm{X}}_t = \bm{\mu}(\bm{X}_t,t) \, \mathrm d t +
\bm{\sigma}(\bm{X}_t,t) \, \mathrm d {\bm{W}}_t.
\label{eq:nDDiffusion}
\end{equation}
Here $\bm{\mu}$ is an $\mathbb{R}^n$-vector valued function and $\bm{\sigma}$
is an invertible-matrix valued function. We assume the coefficients $\bm{\mu}$ and $\bm{\sigma}$ are sufficiently well-behaved for the solution of the equation to be well-defined on $[0,T]$. The components, $X^i_t$, of the vector $\bf{X}_t$ are intended to model the prices of $n$-assets.
The {\em exchange market} for $\eqref{eq:nDDiffusion}$ with risk-free rate $r$
over a time period $[0,T]$ is given by defining $c_t:L^0(\Omega;\mathbb{R})\to \mathbb{R}$ for $t \in [0,T)$ by
\begin{subnumcases}{c_t( X ) = }
\alpha_0 \, e^{-r(T-t)} + \textstyle \sum_{i=1}^n \alpha_i \, X^i_t &
\text{if $X=\alpha_0 + \sum_{i=1}^n \alpha_i \, X^i_T$}, \label{eqn:bsmCommon} \\
\infty & \text{otherwise}.
\label{eqn:bsmBuyAndHold}
\end{subnumcases}
This is well-defined so long as we assume that $X^i_T$ are linearly independent random variables. This will be the case in all situations of interest. \end{definition}
The market defined above is called an exchange market because it models the basic assets that can be purchased directly on an exchange, but does not take into account the possibility of replicating payoffs via hedging. The next definition does take this into account.
\begin{definition}[Superhedging market]
\label{defn:superhedgingMarket}
The {\em superhedging market} for $\eqref{eq:nDDiffusion}$ with risk-free rate $r$
over a time period $[0,T]$ is given by defining $c_t(X)$ to be the infimum of the
cost at time $t$ of self-financing trading strategies that superhedge $X$.
See \cite{harrisonPliska} for a definition of a self-financing trading strategy.
A self-financing trading strategy superhedges $X \in L^0(\Omega;{\cal F}_T)$ if the final payoff of the strategy is always greater than or equal to $X$. \end{definition}
Thus the superhedging market represents the effective market of derivatives that a trader can achieve given the exchange market. The cost function $c_t$ for such a market is the superhedging price. Of particular interest are complete markets where any contingent claim may be both superhedged and subhedged. One expects that the price in an arbitrage-free market can be expressed as a risk-neutral probability. These remarks motivate the next definition.
\begin{definition}
A continuous-time market $(\Omega, {\cal F}_t, \mathbb P),c_t)$ on $[0,T]$ is called a {\em continuous-time complete market with risk-free rate $r$} if there exists a measure $\mathbb Q$ equivalent to $\mathbb P$ with
\begin{equation}
c_t(X) = e^{-r(T-t)} \mathbb{E}_\mathbb Q( X \mid {\cal F}_t)
\label{eq:costFunctionInQ}
\end{equation}
for $\mathbb Q$-integrable random variables $X$ and equal to $\infty$ otherwise. We follow our usual conventions on expectations to allow $-\infty$ when the positive part of an expectation is finite and the negative part is infinite. \end{definition}
Using our new terminology, the theory of Harrison and Pliska \cite{harrisonPliska} shows how the superhedging market associated with the SDE \eqref{eq:nDDiffusion} gives rise to a continuous-time complete market, subject to sufficient regularity assumptions on the coefficients.
\begin{definition}
The continuous-time complete market with risk-free rate and cost function given
by the superhedging market is called the {\em complete market associated with the SDE \eqref{eq:nDDiffusion}} (subject to the required regularity assumptions for
$q_t$ to be a well-defined $\mathbb P$-martingale). \end{definition}
We differ slightly in our presentation from Harrison and Pliska \cite{harrisonPliska} in that they discuss replication and we consider superhedging. This is why we are willing to ascribe a cost of $-\infty$ to some $X \in L^0(\Omega,{\cal F}_T)$, whereas if one insists on replication, $X$ must be absolutely integrable. The definition of the superhedging market associated to a given market can be applied equally well to incomplete markets where there is a more meaningful difference between replication and superhedging. This is why we prefer to think in terms of superhedging, and in this we are influenced by the presentation of \cite{pennanenDuality}.
\begin{definition}
The {\em absolute market price of risk} in the complete market associated with the SDE \eqref{eq:nDDiffusion} is
the element of $L^0(\Omega \times [0,T], \mathbb P)$ defined by
\[
\mathrm{AMPR}_t = | \bm{\sigma}^{-1}(r \bm{X}_t - \bm{\mu}) |.
\] \end{definition}
\begin{theorem}
Let $F$ be the contravariant functor mapping a continuous-time market, $M$ with underlying probability space $\Omega$ to
the vector space $L^0(\Omega \times [0,T], \mathbb P \times \lambda)$ where $[0,T]$ is equipped with the Lebesgue measure $\lambda$, and where $F$ acts on morphisms
$\phi:\Omega_1 \to \Omega_2$ by
$
F(\phi)(X)=X \circ( \phi \times \mathrm{id})
$
for $X \in L^0(\Omega_2 \times [0,T], \mathbb P \times \lambda)$. We recall that elements of $L^0(\Omega \times [0,T], \mathbb P \times \lambda)$
are defined to be almost-sure equivalence classes. Write
$\mathrm{AMPR}(M) \in L^0(\Omega \times [0,T], \mathbb P \times \lambda)$ then for any
market isomorphism $\phi$
\[
\mathrm{AMPR}(\phi(M))=F(\phi^{-1}) \mathrm{AMPR}(M).
\]
We summarize this by saying that the absolute market price of risk is an invariantly-defined element for $F$.
\label{thm:marketPriceOfRiskInvariant} \end{theorem}
Theorem \ref{thm:marketPriceOfRiskInvariant} can be viewed as an analogue of Gauss's Theorema Egregium for the category of continuous-time complete markets. Of course, we are only claiming that this is an analogy. We have not established any relationship between markets and Gaussian curvature. If one is interested in direct relationships between curvature and finance, one can consider the theory of SDEs on manifolds, the Riemannian metric defined by a non-degenerate volatility term and the corresponding curvature tensor (see, for example, \cite{henryLabordere}). Note that the Riemannian metric arising in this way is independent of the choice of drift term, and so one may have non-zero curvature even when $\mathbb P=\mathbb Q$.
The proof of Theorem \ref{thm:marketPriceOfRiskInvariant} suggests we extend the definition of $\mathrm{AMPR}_t$ to all complete markets as follows. \begin{definition}
In a continuous-time complete market we define $\mathrm{AMPR}_t \in L^0_{\geq 0}(\Omega \times \mathbb{R})$ (if it exists)
to be the solution of
\begin{equation}
\int_0^t \frac{1}{Q^2_t} \, \mathrm d [Q, Q]_t := \int_0^t \mathrm{AMPR}^2_t \, \mathrm d t,
\label{eq:amprDefGeneral}
\end{equation}
where
\begin{equation}
Q_t:= \frac{\mathrm d \mathbb Q}{\mathrm d \mathbb P} \Big|_{{\cal F}_t}.
\label{eq:qtDefGeneral}
\end{equation} \end{definition}
An additional invariant we need to consider is the dimension of our market. This is given by the number of independent Brownian motions $n$. Our next result shows that this is an invariant of the market; indeed it is an invariant of the filtered probability space $(\Omega, {\cal F}_t,\mathbb P)$.
\begin{definition}
The $n$-dimensional {\em Wiener space} on $[0,T]$ is the filtered probability space generated by $n$ independent standard Brownian motions on $[0,T]$. A
filtered probability space is called a Wiener space if it is filtration isomorphic to an $n$-dimensional Wiener space. \end{definition}
\begin{theorem}
The dimension of a Wiener space is invariant under filtration isomorphisms. \label{thm:invarianceDimension} \end{theorem}
We are now ready to state a classification theorem for complete markets with deterministic absolute market price of risk. We recall that $\{e_i\}$ is the standard basis for $\mathbb{R}^i$ and $\mathrm{id}_n$ is the identity matrix.
\begin{theorem}[The test case]
Let $M$ be a continuous-time complete market with risk-free rate $r$, time period $T$
based on a Wiener space of dimension $n$ and with $\mathrm{AMPR}$ given by
\[
\mathrm{AMPR}_t = A(t) \geq 0
\]
for a bounded measurable function of time $A(t)$. Suppose
that the process $q_t$ is continuous.
In these circumstances $M$ is isomorphic
to the complete market associated with the SDE \eqref{eq:nDDiffusion} with
\[
\bm{\mu}=r \bm{X}_t + A(t) \, e_1, \quad \text{and }
\bm{\sigma}=\mathrm{id}_n
\]
and $\bm{X}_0=0$.
\label{thm:testcase} \end{theorem} We call markets of this form {\em canonical Bachelier markets}.
The key step in the proof of this theorem is to invariantly define a Brownian motion corresponding to the first component of ${\bm W}_t$. To do this, one shows that \[ -\int_0^t \frac{1}{A(s)} \mathrm d (\log Q)_s \] is a Brownian motion using Levy's characterisation of Brownian motion.
We have called Theorem \ref{thm:testcase} ``the test case'' as it is an analogous result to the theorem in differential geometry that a Riemannian manifold with vanishing curvature is flat. This latter result is called ``the test case'' in \cite{spivak}.
\begin{example}
The $n$-dimensional Black--Scholes--Merton market is isomorphic to a Bachelier market,
since market price of risk in the Black--Scholes--Merton market is a deterministic
constant vector. \end{example}
\begin{example}
Given a positive real number $A$, an invertible matrix $\bm{\sigma}$ and a vector $\bm{X}$, the set of vectors $\bm{\mu}$ satisfying $|\bm{\sigma}^{-1}(r \bm{X} - \bm{\mu})|=A$ is non-empty; indeed, it as an ellipsoid. Hence given a complete continuous-time market modelled by an SDE, we may modify the drift to obtain a market
isomorphic to a Black--Scholes--Merton market with market price of risk $A$. \end{example}
It is difficult to estimate the drift of a volatile asset. As a result, the functional form of the drift is usually chosen for parsimony; one then uses long-term data to calibrate this functional form. If one is following this approach, in the absence of statistical evidence to the contrary it might be be reasonable to choose the functional form of the drift to ensure that the resulting model has a constant market price of risk, and hence is isomorphic to a Black--Scholes--Merton model.
Our result shows that the many financial results that have been proved for the Black--Scholes--Merton model can be applied to a far wider range of markets than one might at first sight expect. Even markets which seem superficially very different from the Black--Scholes--Merton market, such as stochastic-volatility models, may still provide isomorphic investment opportunities.
These observations suggests that one should separate optimal investment problems into two components. One has the {\em strategic} problem of optimal investment for a particular isomorphism class of market. Additionally one has the {\em tactical} problem of finding a concrete realisation (or approximate realisation) of the strategy, which can be interpreted as the task of finding a concrete morphism. This division of investment problems into strategic and tactical problems is already widely used in practice (see \cite{campbellViceira}).
Although we have restricted ourselves to considering markets with deterministic absolute market price of risk, this approach can be generalized. Rather than attempt to model asset price dynamics directly, one may choose a market model by attempting to model invariantly-defined quantities. For example, if one has a view on the dynamics of the absolute market price of risk, one may develop a market model to reflect this. We expect this approach to yield a systematic method for developing low-dimensional (and hence numerically tractable) market models which still capture the essential features of the market. We will explore this in future research.
As an application of our classification theorem, we may now prove a mutual-fund theorem. \begin{theorem}[Continuous-time one-mutual-fund theorem]
Let M be a complete continuous-time market with continuous $q_t$
and with deterministic, bounded absolute market price of risk. Let $X^i_t$ for
($1 \leq i \leq n$) be a collection of square integrable stochastic
processes representing $n$ basic assets, then there exist $n$
predictable real valued processes $\alpha^i_t$ such that any invariant,
non-empty, convex
set of martingales contains an element which can be replicated by a continuous-time trading strategy using only the risk-free asset and the portfolio consisting of $\alpha^i_t$ units of asset $X^i_t$.
In complete markets arising from SDEs of the form \eqref{eq:nDDiffusion}
which also have a deterministic absolute market price of risk,
we may take the portfolio $\bm{\alpha}$ with components $\alpha_i$ to be given by the vector
\[
(\bm{\sigma \sigma^\top})^{-1}(r \bm{X}_t - \bm{\mu}).
\]
\label{thm:ctsTimeMutualFundTheorem} \end{theorem} We note that a convex set of martingales can be interpreted as a convex set of self-financing trading strategies or as a convex set of derivative securities.
We call this result a one-mutual-fund theorem because it shows that a fund manager can create a single mutual fund that can be used to implement these trading strategies. A key difference between our result and the classical one-mutual-fund theorem is that an investor needs to trade in our mutual fund in continuous time.
This result explains the general form of the solution to the portfolio optimization problem studied by Merton in \cite{mertonPortfolio}. However, it goes considerably beyond this.
As an example, consider the problem of managing the investment and pension payments for a collective pension. Suppose that the fund is heterogenous, so each individual may have a distinct mortality distribution, initial wealth and risk appetite. Assume that fund may invest in a Black--Scholes--Merton market, and that the individuals preferences and mortality are independent of this market. Assume that the investors preferences are convex. Our theorem now shows that one need only consider investments in the risk-free asset and the mutual fund we have identified when deciding how to manage the pension. We can say this without actually formulating an optimal investment problem describing how such a heterogeneous fund should be managed.
In summary, our classification theorems have identified interesting isomorphisms between markets that are not obviously related. We have found large families of automorphisms for the classical markets of Markowitz and Black--Scholes--Merton. We have shown that considering these automorphisms allows one to prove very general mutual-fund theorems.
\section{Funding}
I received no funding for this study.
\begin{appendices}
\appendix
\section{Proofs}
\renewcommand{\thesection.\arabic{subsection}}{\thesection.\arabic{subsection}}
\subsection{Proofs for Section \ref{sec:markowitz}}
\begin{proof}[Proof of Lemma \ref{lemma:mod0Lemma}]
Let $f:\Omega_1 \to \Omega_2$ be a ${\mathrm{Prob}}$ isomorphism. We can then find a homomorphism $g:\Omega_2 \to \Omega_1$ such
that $g \circ f = \mathrm{id}_1$ almost surely and $f \circ g = \mathrm{id}_2$ almost surely. Define $\Omega_1^\prime$ to be the
set of points where $fg(x)=x$ and $\Omega_2^\prime$ to be the set of points where $gf(y) = y$. $\Omega_1^\prime$ and $\Omega_2^\prime$
will be of full measure.
If $x_1, x_2 \in \Omega_1$ and $f(x_1)=f(x_2)$, then
$gf(x_1)=gf(x_2)$, hence $x_1=x_2$. Thus $f$ is injective on $\Omega^\prime_1$. If $y \in \Omega^\prime_2$ then $fg(y)=y$, so
$gfg(y)=g(y)$ and hence $g(y)\in \Omega_1^\prime$ with $y=fg(y)$. Thus $f$ maps $\Omega_1^\prime$ onto $\Omega_2^\prime$. Hence $f$
is a mod 0 isomorphism.
The converse follows trivially from the definitions. \end{proof}
\begin{proof}[Proof of Lemma \ref{lemma:isomorphism}]
Let $\phi:\Omega_1 \to \Omega_2$ be a ${\mathrm{Prob}}$ morphism with two-sided inverse $\phi^{-1}$.
Suppose $\phi$ is, moreover, a market isomorphism.
Using the fact that $\phi$ and $\phi^{-1}$ are both market morphisms, we have that for any $X\in L^0(\Omega_2; \mathbb{R})$ we have
\[
c_2(X) = c_2(X \circ \phi \circ \phi^{-1}) \leq c_1(X \circ \phi) \leq c_2(X).
\]
Hence we must have equality throughout. Hence $c_2(X)=c_1(X\circ \phi)$.
The result now follows from Lemma \ref{lemma:mod0Lemma}. \end{proof}
\begin{proof}[Proof of Theorem \ref{thm:genericMutualFundTheorem}]
Given $h \in G$, define $\phi_h:G \to G$ by left multiplication, so $\phi_h(g)=hg$. Let $A$ be a measurable set and let $1_A$ denote
the indicator function of $A$ then
\[
1_A \circ \phi_h = 1_{h^{-1} A}.
\]
We deduce that
\begin{equation}
\mathbb{E}(X \circ \phi_h)=\mathbb{E}(X)
\label{eq:rvinvariance}
\end{equation}
if $X$ is an indicator function of a set, and hence this holds for all integrable random variables $X$.
By assumption $S$ is non-empty, so we may choose an element $s^\prime \in S$. We define a random variable $X:G \to V$ by
\begin{equation}
X(g)=\rho(g) s^\prime.
\label{eq:defnOfX}
\end{equation}
Because $G$ acts by isometries on $V$, $\|X(g)\|=\|\rho(g) s^\prime\|=\|s^\prime\|$ for all $g$. Hence by the dominated convergence theorem
we may define an element $s$ by
\begin{equation}
s:=\mathbb{E}_\mathbb{G}( X ).
\label{eq:defnOfS}
\end{equation}
By the convexity of $S$, $s \in S$. Given $h\in G$, we now compute that
\begin{equation*}
s = \mathbb{E}_{\mathbb{G}}(X)
= \mathbb{E}_{\mathbb{G}}(X \circ \phi_{h})
= \mathbb{E}_{\mathbb{G}}(\rho( h g) s^\prime)
= \mathbb{E}_{\mathbb{G}}(\rho( h) \rho(g) s^\prime)
= \rho( h) \mathbb{E}_{\mathbb{G}}( \rho(g) s^\prime)
= \rho( h) s,
\end{equation*}
using \eqref{eq:rvinvariance}, \eqref{eq:defnOfX}, that $\rho$ is a
homomorphism, linearity of expectation, and finally \eqref{eq:defnOfX}
and \eqref{eq:defnOfS}.
So $s$ is invariant under $G$.
If $G$ is finite, the expectation is a finite sum, so we do not need the dominated convergence theorem. \end{proof}
\begin{proof}[Proof of Lemma \ref{lemma:regularity}]
We recall that a {\em perfect} probability measure
is a complete probability measure, $\mu$ on a set $S$ such that for every measurable map $f:S \to \mathbb{R}$
the image measure is a regular measure on $\mathbb{R}$. Lemma 2.4.3.\ of \cite{itoIntroduction} proves
that all standard probability spaces are perfect. Let $S$ be a perfect probability space and
let $V$ be a finite-dimensional real vector space, then Exercise
3.1(iii) of \cite{itoIntroduction} shows that any measurable map $f:S \to V$ induces
a regular measure on $V$. Thus it suffices to show that $\pi$ defined by \eqref{eq:definitionOfPi} is
measurable.
Choose a basis $\{X_i\}$ for $\dom c$. Define a map $X:\Omega \to \mathbb{R}^n$ by requiring that the $i$-th component of $X(\omega)$
is given by $X(\omega)_i = X_i(\omega)$. This map is measurable since each $X_i$ is measurable. Define a map $X^{**}:(\dom c)^* \to \mathbb{R}^n$
by requiring that the $i$-th component of $X^{**}(f)$ is given by $X^{**}(f)_i = f(X_i)$. $(X^{**})^{-1}$ is a linear isomorphism and so
is measurable by the definition of the topology on $\dom c$. Since $\pi=(X^{**})^{-1}\circ X$, $\pi$ is measurable. \end{proof}
\begin{proof}[Proof of Theorem \ref{thm:linearmarkets}]
We first show that ${\mathrm{Vec}}(M)$ lies in ${\mathrm{VecM}}$.
We have already seen in Lemma \ref{lemma:regularity} that $d_M$ is regular.
We must also show that $d_M$ is non-degenerate. Given $X \in \dom c$
we may define a linear functional $X^{**} \in (\dom c)^{**}$ by $X^{**}(f)=f(X)$. Double duality is an isomorphism, so given distinct $\tilde{X}, \tilde{Y} \in (\dom c)^{**}$ we may find distinct $X, Y \in (\dom c)$ with $X^{**}=\tilde{X}$ and $Y^{**}=\tilde{Y}$.
For any $Z \in (\dom c)$, $Z^{**}\circ \pi=Z$. $M$ is separated, so $\pi$ is a mod 0 isomorphism. Since $X$ and $Y$ are not equal, it then follows that $\tilde{X}=X^{**}$ and $\tilde{Y}=Y^{**}$ are not equal almost everywhere. So $d_M$ is non-degenerate, as claimed.
This completes the proof that ${\mathrm{Vec}}(M)$ lies in ${\mathrm{VecM}}$.
We now define an additional map, also denoted ${\mathrm{Vec}}$, which sends morphisms of ${\mathrm{FinM}}$ to morphisms of ${\mathrm{VecM}}$. Given a market morphism $T$ between two such markets $M_i=((\Omega_i, {\cal F}_i,\mathbb P_i),c_i)\in {\mathrm{FinM}}$ ($i=1,2$) we define $T^*:\dom c_2 \to \dom c_1$ by
$
T^*(f)=f\circ T.
$
We define ${\mathrm{Vec}}(T)=T^{**}:(\dom c_1)^* \to (\dom c_2)^*$ to be the ordinary vector space dual of $T^*$.
We wish to show that ${\mathrm{Vec}}(T)$ is a morphism in ${\mathrm{VecM}}$. Since
$T$ is a market morphism we compute that for any $v \in (\dom c_1)^*$
\[
({\mathrm{Vec}}(T) c_1) (v) = T^{**}(c_1)(v) = c_1 (T^* v) = c_1( v \circ T ) \leq c_2(v).
\]
Applying the same calculation to $-v$ and using linearity, we also have
$
({\mathrm{Vec}}(T) c_1) (v) \leq -c_2(v).
$
Hence
\begin{equation}
({\mathrm{Vec}}(T) c_1) (v) = c_2(v).
\label{eq:morphismCondition1}
\end{equation}
We note that
\begin{equation*}
T^{**}(v) = w \iff \forall f \in V_2^*, \, T^*f(v) = f(w)
\iff \forall f \in V_2^*, \, f T(v) = f(w)
\iff T(v) = w.
\end{equation*}
It follows that given a set $A \subseteq V_2$
\[
(T^{**})^{-1}(A) = T^{-1}(A).
\]
So if $A$ is Borel measurable we have
\begin{equation}
d_1({\mathrm{Vec}}(T)^{-1} A) = d_1((T^{**})^{-1}(A)) = d_1(T^{-1}(A))= d_2(A).
\label{eq:morphismCondition2}
\end{equation}
Together \eqref{eq:morphismCondition1}
and \eqref{eq:morphismCondition2} show that ${\mathrm{Vec}}(T)$ is a morphism in ${\mathrm{VecM}}$, as claimed.
We must show that ${\mathrm{Fin}}((V,d,c))$ is
an element of ${\mathrm{FinM}}$.
We first note that the probability space underlying ${\mathrm{Fin}}((V,d,c))$ is standard,
since a regular distribution on a real vector space always defines a standard probability distribution.
Since all elements of ${\mathrm{VecM}}$ have non-degenerate distributions, $\dom \underline{c} \subset L^0(V;\mathbb{R})$ is equal to $V^*$ (rather than a non-trivial quotient space of $V^*$ by equivalence almost everywhere). The dual space of a finite-dimensional vector space separates the points of the vector space, so ${\mathrm{Fin}}((V,d,c))$ is separated. It is now clear that ${\mathrm{Fin}}((V,d,c))$ lies in ${\mathrm{FinM}}$.
We define a mapping on morphisms, also called ${\mathrm{Fin}}$, by ${\mathrm{Fin}}(T)=T$ for any morphism $T$ of ${\mathrm{VecM}}$. We must show that a ${\mathrm{VecM}}$ morphism
is automatically a market morphism. Equation \eqref{eq:distributionConditionForVecM}
shows that ${\mathrm{Fin}}(T)$ is a ${\mathrm{Prob}}$ morphism.
Next observe that a
${\mathrm{VecM}}$ morphism is automatically surjective. Suppose for contradiction
that $T$ is not surjective, then we can find a non-zero linear functional $X$ which annihilates $\Image(T)$.
Since $\Image(T)$ is of full measure, $X$ is almost-surely zero, and hence $d_2$ is degenerate, yielding the desired
contradiction.
Now let $T:V_1 \to V_2$ be a morphism
in ${\mathrm{VecM}}$ and $X \in L^0(V_2; \mathbb{R})$.
First suppose $X$ is linear, then equation \eqref{eq:costConditionForVecM} shows that $\underline{c}_1(X\circ T)=\underline{c}_2(X)$. Next suppose $X$ is not linear, so we may find $v,w \in V_2$
and $\alpha \in \mathbb{R}$ with $X(\alpha v + w) \neq \alpha X(v) + X(w)$. Since
$T$ is surjective we may find $v^\prime, w^\prime \in V_1$ with $T v^\prime = v$
and $T w^\prime=w$. Then $XT(\alpha v^\prime + w^\prime) \neq \alpha XT(v^\prime) + XT(w^\prime)$. So $XT$ is also non-linear, and hence $\underline{c}_1(X \circ T) = \infty = c_2(X)$. Thus $c_1(X\circ T)=c_2(X)$ for all $X \in L^0(V_2; \mathbb{R})$. So
${\mathrm{Fin}}(T)$ is a market morphism as claimed.
Since $\dom \underline{c}=V^*$, we have $(\dom \underline{c})^*=V^{**}$. Hence the composition ${\mathrm{Vec}} \circ{\mathrm{Fin}}$ is given by double duality of vector spaces. In particular ${\mathrm{Vec}} \circ {\mathrm{Fin}}(V,d,c)$ is naturally isomorphic to $(V,d,c)$.
We note that ${\mathrm{Fin}} \circ {\mathrm{Vec}} (M)$ is naturally isomorphic to $M$ with the isomorphism given by $\pi$ defined in \eqref{eq:definitionOfPi}.
We have now shown that ${\mathrm{Vec}}$ and ${\mathrm{Fin}}$ define an equivalence of the categories ${\mathrm{FinM}}$ and ${\mathrm{VecM}}$. It is trivial to check that vector-space duality defines
a duality of the categories ${\mathrm{VecM}}$ and ${\mathrm{DualM}}$. The statement that ${\mathrm{Vec}}$
and ${\mathrm{Dual}}$ define bijections follows by elementary category theory \cite{eilenbergmaclane}. \end{proof}
\begin{proof}[Proof of Theorem \ref{thm:markowitzClassification}]
Let $\Cov:\dom c \times \dom c \to \mathbb{R}$ be given by the covariance. This is a non-degenerate symmetric bilinear form and hence defines an inner product on $\dom c$. All real inner-product spaces of dimension $n$ are isomorphic to the standard Euclidean space $\mathbb{R}^n$, hence we can find a second basis $\{Y_i\}$ for $\dom c$ with
covariance matrix $\mathrm{id}_n$. The distribution of these assets will still be a multivariate normal distribution, but now with covariance matrix $\mathrm{id}_n$. This shows that the market is Gaussian. \end{proof}
\begin{proof}[Proof of Corollary \ref{cor:generalMutualFund}]
It suffices to prove the result for markets of the form \eqref{eq:genericMarkowitz}. Let $\phi:\mathbb{R}^n \to \mathbb{R}^n$ be the linear transformation given by the matrix
\[
\phi_{ij}=\begin{cases}
1 & \text{$i=j$ and $i, j \leq 2$}, \\
-1 & \text{$i=j$ and $i, j > 2$}, \\
0 & \text{otherwise}.
\end{cases}
\]
$\phi$ defines an automorphism of any market of the form \eqref{eq:genericMarkowitz}.
Any invariant investment strategy must be invariant under $\phi^*$. $\phi^*$ has the same matrix representation as $\phi$ when written with respect to the standard dual basis $\{e_i^*\}$ for $(\mathbb{R}^n)^*$. If $X$ is an invariant investment strategy, its components $(X)_i$ written with respect to this basis satisfy $X_i=0$ for $i > 2$. \end{proof}
\subsection{Proofs for Section \ref{sec:completeOnePeriod}}
We review the features of the theory of disintegration of measures we will need.
\begin{definition}
Let $\{S_\alpha\}$ be a countable collection of subsets of a set $S$. We write $\zeta(\{S_\alpha\})$ for the collection of
sets of the form
\[
\bigcap_{i=1}^\infty S^\prime_\alpha, \quad (S^\prime_\alpha = S_\alpha\text{ or }
S^\prime_\alpha = S \setminus S_\alpha).
\]
These sets are disjoint and cover $S$ so they define a decomposition of $S$ called the {\em decomposition generated by $\{S_\alpha\}$}. A decomposition of a measurable set $S$ generated by a countable
collection of measurable sets is called a {\em measurable decomposition}. Here we are using the terminology of \cite{rokhlin} p5 and p26. These decompositions are called {\em separable decompositions} in \cite{itoIntroduction}.
We say that two measurable decompositions $\zeta$ and $\zeta^\prime$ of probability spaces $\Omega$ and $\Omega^\prime$ are {\em mod 0 isomorphic} if there is a mod 0 isomorphism of $\Omega$ mapping the elements of $\zeta$ to the elements of $\zeta^\prime$. \end{definition}
Given a decomposition $\zeta$ of a probability space $\Omega$ we may define a projection map, $\pi_\zeta:\Omega \to \zeta$ by sending a point $\omega$ to the element of $\zeta$ containing $\omega$. This projection map induces a measure $\mu_\zeta$ on $\zeta$. Rokhlin refers to the resulting measurable space as the quotient space $\Omega/\zeta$ (see p4 of \cite{rokhlin}).
\begin{definition}
\label{def:canonical}
Let $\zeta$ be a decomposition of a standard probability
space $\Omega$.
Let $\mu_C$ be a set of measures defined indexed by $C \in \zeta$. We say that $\mu_C$ is {\em canonical with respect to $\zeta$} if the following hold.
\begin{enumerate}[nosep,label=(\roman*)]
\item $\mu_C$ is a standard probability
space for $\mu_\zeta$-almost-all $C \in \zeta$.
\item If $A$ is a measurable subset of $\Omega$ then:
\begin{enumerate}[nosep,label=(\alph*)]
\item the set
$A \cap C$ is $\mu_C$ measurable for $\mu_\zeta$-almost-all $C$;
\item $\mu_C(A \cap C)$ defines a $\mu_\zeta$-measurable function acting on $C \in \zeta$;
\item the measure $A$ can be recovered by integrating over $\zeta$, i.e.
\[
\mu(A) = \int_{\zeta} \mu_C(A \cap C) \, \mathrm d \mu_\zeta.
\]
\end{enumerate}
\end{enumerate} \end{definition}
This definition is simply a translation of the definition on p25 of \cite{rokhlin} into our notation. We note that what we call a standard probability space, Rohklin calls a Lebesgue space. The equivalence of these notions is given on p20 of \cite{rokhlin}.
We may now state two theorems, both due to Rohklin. \begin{theorem}
\label{thm:rokhlin1}
Let $\Omega$ be a standard probability space.
There exists a set of measures $\mu_C$ canonical with respect to $\zeta$ if and only
$\zeta$ is a measurable decomposition (\cite{rokhlin} p26). Moreover, $\mu_C$ is defined
essentially uniquely: if $\mu_C$ and $\mu_{C^\prime}$ are both canonical for $\zeta$ then $\mu_C$ is mod 0 isomorphic to $\mu_{C^\prime}$ for $\mu_\zeta$-almost-all $C$ (\cite{rokhlin} p25). \end{theorem} \begin{theorem}
\label{thm:rokhlin2}
Let $\Omega$ be a standard probability space and $\zeta$ a measurable decomposition.
Let $m_\zeta:\zeta \to {\cal S}$ be given by mapping the measure $\mu_C$ to the element of ${\cal S}$
corresponding to its isomorphism class. Then $m_\zeta$ is $\mu_\zeta$ measurable. Two decompositions $\zeta$ and $\zeta^\prime$ are mod 0 isomorphic if and only $\mu_\zeta$ and $\mu_{\zeta^\prime}$
are mod 0 isomorphic via a map sending $m_\zeta$ to $m_\zeta^\prime$ (\cite{rokhlin} p40). \end{theorem}
Finally, Theorem 3.3.1 of \cite{itoIntroduction} tells us that if $X$ is a real random variable, and if we define $\zeta$ to be the set of sets of the form $X^{-1}(x)$ then $\zeta$ is a measurable decomposition. When we apply Theorem \cite{rokhlin} to the level sets of a random variable $\zeta$, the measure $\mu_{X^{-1}(x)}$ on the level set $X^{-1}(x)$ for $x \in \mathbb{R}$ is called the {\em conditional probability measure}, conditioned on $X=x$ (see \cite{itoIntroduction} Section 3.5). Note that in this case the projection map sending the level set $X^{-1}(x)$ to $x$ defines a mod 0 isomorphism between $\zeta$ with measure $\mu_\zeta$ and the probability measure on $\mathbb{R}$ induced by $X$.
\begin{proof}[Proof of Theorem \ref{thm:classificationComplete}]
First note that $(\mu_q,m_q) \in {\mathrm{Measures}}(n)$ is manifestly an invariant of $\Omega$.
Given a pair $M=(\mu,m) \in {\mathrm{Measures}}(n)$, let us see how to define $\Omega(M)$ with $(\mu_q,m_q)=M$.
Let $a_0$ be the probability space $[0,1]$. For $i> 0$, let $a_i$ be a probability space consisting of a single atom. We take as probability space
\[
\Omega(M)=(0,\infty)^n
\times \left(\sqcup_{i=0}^\infty a_i \right).
\]
This has a measure we denote by $(\mu \times \lambda)$ induced by taking the standard construction of product measures and measures on disjoint unions and then obtaining the Lebesgue extension.
Using our concrete realisation of ${\cal S}$, given in \eqref{eq:defCalS}, we define the components $m_{i}$ of the function $m$ for $i \in \mathbb{N} \cup \{ \infty \}$. Let $\pi_1:\Omega_M \to (0,\infty)^n$ denote the projection onto the $(0,\infty)^n$ component. We then obtain measurable functions $m_i \circ \pi_1$ defined on $\Omega$. Given a Lebesgue measurable subset $A$ of $\Omega$, we define a measure
$\mathbb P_{0}(A)$ by
\begin{align}
\mathbb P_{0}(A) &:= \int_\Omega \sum_{i=0}^\infty (m_i\circ \pi_1) \cdot 1_{A \cap ((0,\infty)^n\times a_i)} \, \mathrm d (\mu \times \lambda) \nonumber \\
&= \int_{(0,\infty)^n} \sum_{i=0}^\infty (m_i\circ \pi_1) \int_{a_i} 1_{A \cap ((0,\infty)^n\times a_i)} \, \mathrm d (\mu \times \lambda|_{a_i}) \nonumber \\
&= \int_{(0,\infty)^n} \sum_{i=0}^\infty m_i \, \mathbb P_{a_i}({A \cap {\pi_1}^{-1}(\omega)} \cap a_i) \, \mathrm d \mu
= \int_{(0,\infty)^n} \mathbb P_m({A \cap {\pi_1}^{-1}(\omega)}) \, \mathrm d \mu. \label{eq:definingFulfilled}
\end{align}
Let $\zeta$ be the decomposition of $\Omega(M)$ given by the pre-images $\pi_1^{-1}(\omega)$ for $\omega \in (0,\infty)^n$. For $\omega \in (0,\infty)^n$, let $\mu_{\pi_1^{-1}(\omega)}$ be the measure $m(\omega)$. We observe
that $m(\omega)$ is canonical with respect to $\zeta$.
We explicitly check the requirements given in Definition \ref{def:canonical}. Property (i) follows since $\pi_1^{-1}(\omega)$ is always standard. Similarly property (ii) (a) follows since $A \cap \pi_1^{-1}(\omega)$ is always measurable. Property (ii) (b) follows from Fubini's theorem, as used in the derivation of equation \eqref{eq:definingFulfilled} above. Property (ii) (c) is given by \eqref{eq:definingFulfilled} itself.
For $1\leq i \leq n$, we define measures $\mathbb P_{i,M}$ by
\begin{equation}
\mathbb P_{i,M}(A) = \omega_i \mathbb{E}_\mu( \pi_1 \cdot 1_A)
\label{eq:defPi}
\end{equation}
where $\omega_i$ is the $i$th coordinate function on $(0,\infty)^n$ as before.
This is an equivalent probability measure to $\mathbb P_0$ since $\omega_i$ is positive
and has $\mathbb P_0$ expectation of 1.
We see that $\Omega(M)$ equipped with these measures satisfies $(\mu_q,m_q)=M$.
Suppose $\Omega$ is a probability space with $n$ additional equivalent measures $\mathbb P_i$.
Let $M=(\mu_q,m_q)$.
By Theorem \ref{thm:rokhlin2} we can find a mod 0 isomorphism, $\phi$, from $\Omega$ to $\Omega_{M}$ equipped with measure $\mathbb P_0$ which also sends $q$ to $\pi_1$ for each $i$. It follows from \eqref{eq:defPi} that $\phi$ must be a $\mathbb P_i$-isomorphism too. \end{proof}
\begin{proof}[Proof of Theorem \ref{thm:simpleCompleteMarketN}]
Let $S$ be a standard probability space and $\zeta$ a decomposition of $S$. Let $T$ be another standard probability space. We write $\zeta \star T$ for the decomposition of $S \times T$ given by taking the product of elements of $\zeta$ with $T$. Given a set of measures $\mu_C$ on $\zeta$ we write $\mu_C \times \mu_T$ for the product measures. It is clear that if $\mu_C$ is canonical with respect to $\zeta$ then $\mu_C$ is canonical with respect to $\zeta \star T$. Thus the conditional measures of $\frac{\mathrm d \mathbb P_i}{\mathrm d \mathbb P_0}$ on $\Omega \times I$ are all given by products with the standard measure on $I$. Hence $(m_\Omega \times I)_0=1$ $\mu_{\Omega \times I}$-almost-everywhere.
On the other hand, taking the product
of a $\Omega$ with $I$ does not
affect the distribution $\mu_\Omega$. So if
we take $\Omega^\prime$ to be the space defined in the
statement of Theorem \ref{thm:simpleCompleteMarketN}, we will have that the invariants
of $\Omega^\prime \times I$ are equal to
the invariants of $\Omega \times I$.
The result now follows from Theorem \ref{thm:classificationComplete}. \end{proof}
\begin{proof}[Proof of Theorem \ref{thm:simpleCompleteMarket1}]
Pick a Lebesgue measure $\mathbb P$ on $(0,\infty)$ with $\mathbb{E}_{\mathbb P}(\omega_1) = 1$.
The coordinate function $\omega_1$ on $(0,\infty)$ is just the identity.
Define a measure $\mathbb Q$ by requiring that the Radon--Nikodym derivative is $\frac{\mathrm d \mathbb Q}{\mathrm d \mathbb P}=\omega_1=\mathrm{id}$.
Let $F$
be the distribution function of this measure and $F^{-1}:[0,1]\to (0,\infty)$ its inverse
distribution function. We equip the interval $[0,1]$ with the Lebesgue measure $\mathbb P^\prime$ and a measure $\mathbb Q^\prime$ given by requiring that the Radon--Nikodym
derivative $\frac{\mathrm d \mathbb Q^\prime}{\mathrm d \mathbb P^\prime}=F^{-1}$.
If we can find a simultaneous mod 0 isomorphism between the measures $(\mathbb P,\mathbb Q)$ on $M \times I$
and $(\mathbb P^\prime, \mathbb Q^\prime)$ on $\tilde{M} \times I$ we see that Theorem \ref{thm:simpleCompleteMarket1}
follows from Theorem \ref{thm:simpleCompleteMarketN}. We take $\mathbb P_0=\mathbb P$ and $\mathbb P_1=\mathbb Q$ when applying Theorem \ref{thm:simpleCompleteMarketN}.
We will now find the required isomorphism.
In what follows, if $X$ is a set with measure $\mu$ we will write $X_\mu$ to emphasize the measure on $X$.
Let $0\leq p_1 \leq p_2 \leq 1$.
Suppose that $p_1$ and $p_2$ are the two ends of a connected component of $\image F$ then
$F$ is continuous between $p_1$ and $p_2$ and so $F$ defines a mod $0$ isomorphism between
$[F^{-1}(p_1),F^{-1}(p_2))_{\mathbb P}$ and $[p_1,p_2)_{\mathbb P^\prime}$.
So $(F^{-1}[p_1,p_2))_{\mathbb P} \times I$ is mod 0 isomorphic to $(p_1,p_2)_{\mathbb P^\prime}\times I$ via $F \times \mathrm{id}$. This isomorphism maps the random variable $\omega_1 = \mathrm{id}$ to $F^{-1}_X$. Hence it is also
a mod 0 isomorphism for the measures $\mathbb Q$ and $\mathbb Q^\prime$.
Suppose that $p_1$ and $p_2$ are the two ends of a connected component of $[0,1]\setminus \image F$. $(F^{-1}[p_1,p_2))_\mathbb P$ is mod 0 isomorphic to the atom
$\{F^{-1}(p_1)\}_\mathbb P$ with mass $(p_2-p_1)$.
So $(F^{-1}[p_1,p_2))_{\mathbb P_0} \times I$ is mod 0 isomorphic to $[p_1,p_2)_{\mathbb P^\prime}$
which in turn is mod 0 isomorphic to $[p_1,p_2)_{\mathbb P^\prime}\times I$.
The $\mathbb Q$-measure on the atom $\{F^{-1}(p_1)\}$ is equal to
$F^{-1}(p_1)$, which is equal to $F^{-1}(p)$ for all $p_1 \leq p \leq p_2$.
Hence $(F^{-1}[p_1,p_2))_{\mathbb Q_0} \times I$ is simultaneously
mod 0 isomorphic to $[p_1,p_2)_{\mathbb Q^\prime}\times I$.
We may therefore cover $[0,1)\times I$ with
a countable set of
disjoint intervals of the form $[p_1,p_2)\times I$
which are simultaneously $\mathbb P$/$\mathbb Q$ mod 0 isomorphic to
$(F^{-1}[p_1,p_2)) \times I$.
We may therefore combine these mod 0 isomorphisms on intervals to obtain
the desired mod 0 isomorphism for the $\mathbb P$ and $\mathbb Q$ measures. \end{proof}
\begin{proof}[Proof of Corollary \ref{cor:convexMutualFund}]
We have the obvious inclusion $\iota: L^1_{\mathbb P_0}(\Omega) \to L^1_{\mathbb P_0}(\Omega \times I)$. Any element of $L^1_{\mathbb P_0}(\Omega \times I)$ which can be written as a function of the Radon--Nikodym derivatives $\frac{\mathrm d \mathbb P_i}{\mathrm d \mathbb P_0}$ must lie in the image of $\iota$. Hence
it suffices to prove that $\iota A$ contains
an element which can be written as a function
of these Radon--Nikodym derivatives.
By Theorem \ref{thm:simpleCompleteMarketN} we
may assume without loss of generality that the market $\Omega \times I$ is given by $\Omega^\prime \times I=(0,\infty)^n \times I$ and $\mathbb P^\prime_i$ as described in Theorem \ref{thm:simpleCompleteMarketN}. In this case the Radon--Nikodym derivatives are given by the coordinate functions $\omega_i$.
Let $G = S^1 \cong \mathbb{R} / \mathbb{Z}$ with measure given by the quotient measure. Since each element of $\mathbb{R} / \mathbb{Z}$ has a unique representative on $[0,1)$, $G$ is strictly isomorphic to $[0,1)$ as a probability space. Hence we may define an action of $G$ on any product space $X \times I$ by using the action on the right-hand side of the product. We can apply Theorem \ref{thm:genericMutualFundTheorem} with this choice of $G$ and taking as $\iota A$ as the convex set. The result now follows. \end{proof}
We collect together the key properties of rearrangement in a single lemma.
\begin{lemma}
\label{lemma:rearrangement}
Let $m$ be a Lebesgue measure on $(0,\infty)$ satisfying condition \eqref{eq:conditionOnM}. Then
$U_m$ is a uniformly-distributed random variable. Let $X$ be a random variable in $X \in L^0_{\mathbb P_m}((0,\infty) \times [0,1); \mathbb{R})$.
The $\mathbb P_m$ distribution is left fixed by rearrangement of $X$. The $\mathbb Q_m$ distributions are increased or decreased according to whether one applies the increasing
or decreasing rearrangement. Symbolically:
\begin{equation}
d^{\mathbb P_m}(X)=d^{\mathbb P_m}(R^\pm_m(X))
\label{eq:rearrangementFixingProperty},
\end{equation}
\begin{equation}
d^{\mathbb Q_m}(X) \preceq d^{\mathbb Q_m}(R^+_m(X))
\label{eq:increasingRearrangementProperty},
\end{equation}
\begin{equation}
d^{\mathbb Q_m}(X) \succeq d^{\mathbb Q_m}(R^-_m(X))
\label{eq:decreasingRearrangementProperty}.
\end{equation}
In addition:
\begin{equation}
\frac{\mathrm d \mathbb Q_m}{\mathrm d \mathbb P_m}(\omega)<\frac{\mathrm d \mathbb Q_m}{\mathrm d \mathbb P_m}(\omega^\prime) \implies
R^\pm_m(\pm X(\omega)) \leq R^\pm_m(\pm X(\omega^\prime))
\label{eq:increasingInNikodym1},
\end{equation}
\begin{equation}
d^{\mathbb P_m}(X) \preceq d^{\mathbb P_m}(Y) \implies d^{\mathbb Q_m}(R^+_m(X)) \preceq d^{\mathbb Q_m}(R^+_m(Y))
\label{eq:transitivity},
\end{equation}
\begin{equation}
F_{\frac{\mathrm d \mathbb Q_m}{\mathrm d \mathbb P_m}}\text{ is continuous at }x \implies R^\pm_m(X)(x,y_1)=R^\pm(X)_m(x,y_2) \quad \forall y_1, y_2
\label{eq:constantOnFibres}.
\end{equation} \end{lemma} \begin{proof}
Pick $z \in (0,1)$. Since $F_m$ is an increasing function, we can find $x_0 \in (0,\infty)$ with
$\lim_{x^\prime \to x_0-} F_m(x) \leq z \leq \lim_{x^\prime \to x_0+} F_m(x)$. Hence we can find $y_0$ with $U_m(x_0,y_0)=z$. Since $F_m$ is increasing,
we deduce that
\begin{align}
\mathbb P_m( U_m(\omega) \leq z)
&= \mathbb P_m( x(\omega)<x_0 \text{ or } (x(\omega)=x_0 \text{ and }y(\omega)\leq y_0) ) \nonumber \\
&= \mathbb P_m( x(\omega)<x_0) + \mathbb P_m(x(\omega)=x_0)\mathbb P_m(y(\omega)\leq y_0) \nonumber \\
&= \lim_{x \to x_0-} F_m(x) + y (\lim_{x \to x_0+} F_m(x)
- \lim_{x \to x_0-} F_m(x)) = z. \label{eq:uniformProperty}
\end{align}
We deduce first that $U_m$ is measurable since its sublevel sets are measurable. We then deduce that $U_m$ is a uniform random variable as \eqref{eq:uniformProperty} is the defining property of uniform random variables.
Property \eqref{eq:rearrangementFixingProperty} of rearrangement follows immediately from the fact that $U_m$ is uniform and from the definition of rearrangement.
We note that for $\alpha \in (0,1)$,
\begin{align*}
\inf \{ x \in \mathbb{R} \mid F_X(x) \geq \alpha \} \leq k
&\implies
F_X(k) \geq \alpha.
\end{align*}
So from the definition of rearrangement
\begin{align*}
\mathbb P(R^+_m(X)(\omega) \leq k)
&=\mathbb P(F^{-1}_X(U_m(\omega)) \leq k)
=\mathbb P(\inf \{ x \in \mathbb{R} \mid F_X(x) \geq U_m(\omega) \} \leq k ) \\
&\leq \mathbb P(F_X(k) \geq U_m(\omega) )
= F_X(k).
\end{align*}
The last step uses \eqref{eq:uniformProperty}. We have established \eqref{eq:increasingRearrangementProperty}. Property \eqref{eq:decreasingRearrangementProperty}
is now obvious.
From the definition of $U_m$, if $x(\omega)\leq x(\omega^\prime)$ then $U_m(\omega)\leq U_m(\omega^\prime)$. $F_X$ is increasing and $x$ is equal to the Radon--Nikodym derivative $\frac{\mathrm d \mathbb Q_m}{\mathrm d \mathbb P_m}$. Hence \eqref{eq:increasingInNikodym1} follows.
From the definition of $U_m$, $U_m(x,y)$ is independent of $y$ when $F_m$ is continuous at $x$. Hence $R^\pm_m(X)(x,y)$ is also independent of $y$. Note that $F_m=F_\frac{\mathrm d \mathbb Q_m}{\mathrm d \mathbb P_m}$. This establishes \eqref{eq:constantOnFibres}.
To establish \eqref{eq:transitivity} let us suppose $d^{\mathbb P_m}(X) \preceq d^{\mathbb P_m}(Y)$. This means that
\[
F_X(k) \geq F_Y(k) \quad \forall k \in \mathbb{R}
\]
where $F_X$ and $F_Y$ are the $\mathbb P_m$-measure distribution functions of $X$ and $Y$.
Hence
\begin{equation}
F_X^{-1}(p) \leq F_Y^{-1}(p) \quad \forall p \in [0,1].
\label{eq:finversecomp}
\end{equation}
We then find
\begin{align*}
\mathbb Q(R^+_m(X) \leq k) = \mathbb{E}_m( x 1_{(R^+_m(X)\leq k)} )
= \mathbb{E}_m( x 1_{(F^{-1}_X \circ U_m \leq k)} )
&\geq \mathbb{E}_m( x 1_{(F^{-1}_Y \circ U_m \leq k)} ) \quad \text{by }\eqref{eq:finversecomp} \\
&= \mathbb Q(R^+_m(Y) \leq k).
\end{align*}
So $d^{\mathbb Q_m}(R^+_m(X)) \preceq d^{\mathbb Q_m}(R^+_m(Y))$ as claimed. \end{proof}
\begin{lemma}
If $(\Omega, {\cal F}, \mathbb P)$ is a probability space, $X$ and $Y$
are real random variables and $Z$ is an $\mathbb{R}^k$ random variable
satisfying
\[
d^\mathbb P(X \mid Z) \preceq d^\mathbb P( Y \mid Z)
\]
then $d^\mathbb P(X) \preceq d^\mathbb P( Y )$.
\label{lemma:conditionalExpectation} \end{lemma} \begin{proof} $\mathbb P(X \leq k)=\int_{\mathbb{R}^k} \mathbb P(X \leq k \mid Z ) \, \mathrm d Z \leq\int_{\mathbb{R}^k} \mathbb P(Y \leq k \mid Z ) \, \mathrm d Z = \mathbb P(Y \leq k).$ \end{proof}
\begin{proof}[Proof of Theorem \ref{thm:rearrangement}]
By Theorem \ref{thm:simpleCompleteMarketN}, we only need consider the case when
$\Omega=(0,\infty)^n$ equipped with a measure $\mu$ satisfying
$
\mathbb{E}_\mu(x_i)=1
$
for each coordinate function $x_i$.
Given an integer $j$, $1 \leq j \leq n$, we
define a random $n-1$ vector $\hat{q}_j(\omega)$ consisting of all the components of $q$ except the $j$th. We write $\hat{\mu}_j$ for the measure induced on $(0,\infty)^{n-1}$ by $q_{\hat{j}}$. We write $q_j$ for the $j$th component of $q$, and write $\mu_j$ for the measure on $(0,\infty)$ induced by $q_j$.
Given a random variable $X$ on $(0,\infty)^n \times [0,1)$ and a value $Q \in (0,\infty)^{n-1}$
we may define $X_{j,Q}:(0,\infty)\times[0,1) \to \mathbb{R}$ by
\[
X_{j,Q}(x,y)=X(Q \oplus_j x, y),
\]
where $Q \oplus_j x$ is the vector obtained by inserting a new component with value $x$ at the $j$th index of the vector $Q$. $X_{j,Q}$ is $\hat{\mu}_j$-almost-surely measurable.
Let $y$ denote the final coordinate function on $(0,\infty)^n \times [0,1)$. We define {\em conditional rearrangements} $R^+_j$ and $R^-_j$ as follows
\[
R^{\pm}_j(X)(\omega) := R^\pm_{\mu_j}(X_{j,\hat{q}_j(\omega)}) \left( q_j(\omega), y(\omega) \right).
\]
We define $R_j=R^+_j$ if $\sign j=1$, and $R_j=R^-_j$ otherwise.
Since $X_{j,Q}$ is $\hat{\mu}_j$-almost-surely measurable, $R^{\pm}_j$ is well-defined mod $0$.
We need to check that
$R^{\pm}_j$ is measurable. We note that
\begin{equation*}
F^{-1}_{X_{j,\hat{q}_j(\omega)}}(p)
= \inf \{ z \in \mathbb{R} \mid F_{X_{j,\hat{q}_j(\omega)}}(z) \geq p \}
= \inf \{ z \in \mathbb Q \mid F_{X_{j,\hat{q}_j(\omega)}}(z) \geq p \}
\end{equation*}
using the monotonicity of distribution functions. Define
\[
f(z,\omega,p)=\begin{cases}
z & F_{X_{j,\hat{q}_j(\omega)}}(z) \geq p, \\
\infty & \text{otherwise}.
\end{cases}
\]
It is obvious from chasing through the definitions that $f$ is measurable.
The infimum of a countable sequence of measurable functions is measurable. Hence
$F^{-1}_{X_{j,\hat{q}_j(\omega)}}(p)$ is measurable as a function of the pair $(\omega,p)$.
By definition
\[
R^+_{\mu_j}(X_{j,\hat{q}_j(\omega)})(x,y)=F^{-1}_{X_{j,\hat{q}_j(\omega)}}(U_{\mu_j}(x,y)),
\]
so this quantity is measurable as a function of $(\omega,x,y)$. The measurability of $R^{\pm}_j(X)$ is now immediate.
We inductively define $R^*_0(X)=X$ and $R^*_j(X)=R_j(R^*_{j-1}(X))$ for $1\leq j \leq n$. We define $R(X)=R^*_n(X)$.
Let us suppose as induction hypothesis that we have established for some $j<n$ that
\begin{equation}
\begin{split}
d^{\mathbb P_i}(X) &= d^{\mathbb P_i}(R_j^*(X)) \quad \text{if } i=0 \text{ or } i > j, \\
d^{\mathbb P_i}((\sign j)X) &\preceq d^{\mathbb P_i}(R_j^*((\sign j) X)) \quad \text{otherwise}.
\end{split}
\label{eq:inductionHypothesis}
\end{equation}
We may then apply equations \eqref{eq:rearrangementFixingProperty}, \eqref{eq:increasingRearrangementProperty}, \eqref{eq:decreasingRearrangementProperty} and \eqref{eq:transitivity}
to find
\begin{equation}
\begin{split}
d^{\mathbb P_i}(X \mid \hat{q}_{j+1}) &= d^{\mathbb P_i}(R^*_{j+1}(X)) \mid \hat{q}_{j+1}) \quad \text{if } i=0 \text{ or } i > j+1, \\
d^{\mathbb P_i}(R^*_{j+1}((\sign j) X) \mid \hat{q}_{j+1}) &\preceq d^{\mathbb P_i}( R_{j+1}^*((\sign j) X) \mid \hat{q}_{j+1}) \text{ otherwise}.
\label{eq:inductionDeduction}
\end{split}
\end{equation}
Applying Lemma \ref{lemma:conditionalExpectation} below, we may deduce from equations \eqref{eq:inductionDeduction} that our induction hypothesis \eqref{eq:inductionHypothesis} will also hold when $j\to j+1$.
We deduce that \eqref{eq:inductionHypothesis} holds for $0 \leq j \leq n$. This establishes properties (i) and (ii) of $R(X)$.
For each $i$ ($0\leq i \leq n$), define a partial order $\preceq_i$ on $\mathbb{R}^n$
by
\[
x \preceq_i y \iff \begin{cases}
(\sign j)x_j \leq (\sign j)y_j & 1 \leq j \leq i \\
x_j = y_j & i < j \leq n.
\end{cases}
\]
We suppose as induction hypothesis that for some $1\leq i\leq n-1$,
\begin{equation}
R_{i-1}^*(X)(\omega) \leq R_{i-1}^*(X)(\omega^\prime) \quad \text{if} \quad q(\omega) \prec_i q(\omega^\prime).
\label{eq:inductionHypothesis2}
\end{equation}
Write $q^a(\omega)$ for the vector containing the first $(i-1)$ components
of $q(\omega)$, $q^b(\omega)$ for the $i$th component of $q(\omega)$ and $q^c(\omega)$ for the remaining components. So $q(\omega)=q^a(\omega)\oplus q^b(\omega) \oplus q^c(\omega)$.
Suppose that $q(\omega) \prec_{i+1} q(\omega^\prime)$ then $q^a(\omega)\preceq q^a(\omega^\prime)$, $q^b(\omega) \leq q^b(\omega^\prime)$,
$q^c(\omega) = q^c(\omega^\prime)$. We also have either: (a) $q^a(\omega)\prec q^a(\omega^\prime)$ and
$q^b(\omega)= q^b(\omega^\prime)$;
(b) $q^a(\omega)= q^a(\omega^\prime)$ and $q^b(\omega)< q^b(\omega^\prime)$;
or (c) $q^a(\omega)\prec q^a(\omega^\prime)$ and $q^b(\omega)< q^b(\omega^\prime)$.
In case (a), our induction hypothesis \eqref{eq:inductionHypothesis2} tells us that
\[
R_{i-1}^*(X)(\omega) \leq R_{i-1}^*(X)(\omega^\prime).
\]
Hence by property \eqref{eq:transitivity} of rearrangement
\[
R_i^*(X)(\omega)=R_i(R_{i-1}^*(X))(\omega) \leq R_i(R_{i-1}^*(X))(\omega^\prime)
= R_i^*(X)(\omega^\prime).
\]
In case (b), we may apply \eqref{eq:increasingInNikodym1} to
the rearrangement $R_i$ of the random variable $R_{i-1}^*(X)$ to find
that $R_i^*(X)(\omega)\leq R_i^*(X)(\omega^\prime)$. In case (c) we apply our results for case (a) and case (b) in succession and use the transitivity of $\leq$ to again find that $R_i^*(X)(\omega)\leq R_i^*(X)(\omega^\prime)$. Thus \eqref{eq:inductionHypothesis2} remains true when we change $(i-1)\to i$.
The induction hypothesis \eqref{eq:inductionHypothesis2} is trivially true when $i=1$, so claim (iii) follows. \end{proof}
\begin{proof}[Proof of Corollary \ref{cor:rearrangement}]
Let $X \in L^0(\Omega)$. We define $\tilde{X} \in L^0(\Omega \times [0,1)$ by $\tilde{X}(\omega,y)=X(\omega)$.
This will satisfy $d^{\mathbb P_i}(X)=d^{\mathbb P_i}(\tilde{X})$ for all $i$.
Consider case (b) of our claim. By property \eqref{eq:constantOnFibres} of rearrangement, $R_j$, and hence $R$, only depends upon $q$. So we may
write $R(\tilde{X})=\hat{X}(q)$ for some $\hat{X}$. We define $\tilde{R}(X)=\hat{X}(q)$, and it will satisfy all the desired properties.
Now consider case (a) of our claim. Let us write $\{x_n\}$
for the countable set of discontinuities of $F_{q_1}$.
We define a set $
\Delta_n := \left(q_1\right)^{-1} (x_n)$.
Since the probability space is standard and atomless, there is a mod $0$
isomorphism $\phi_n$ from the set $\Delta_n$
to the set
$
\{ x_n \} \times I.
$
We write $\Delta=\bigcup \Delta_n$. Property \eqref{eq:constantOnFibres}
tells us that the rearrangement $R(\tilde{X})(\omega,y)$ only depends upon
$y$ if $x \in \Omega \setminus \Delta$. So we may define a function $\hat{X}$
on $(0,\infty) \setminus \{ x_n \}$ by $\hat{X}(q_1)=R(\tilde{X})$ on $\Omega \setminus \Delta$. We now define
\[
\tilde{R}(X)(\omega)=\begin{cases}
\hat{X}(q_1(\omega)) & \omega \in \Omega \setminus \Delta, \\
R(\tilde{X})(\phi(X)) & \text{otherwise}.
\end{cases}
\]
Since each $\phi_n$ is a mod 0 isomorphism on $\Delta_n$ and preserves the
Radon--Nikodym derivatives, we see that
\[
d^{\mathbb P_i}(\tilde{R}(X))=d^{\mathbb P_i}(R(\tilde{X}))
\]
for $i=0,1$. The result follows. \end{proof}
\subsection{Proofs for Section \ref{sec:ctstime}}
Let us briefly review how the measure $\mathbb Q$ is constructed. Suppose that further to the assumptions of Definition \ref{def:exchangeMarket}, we may define a process $Z_t$ by \begin{equation} Z_t = \int_0^t (\bm{\sigma}^{-1}(r \bm{X}_s - \bm{\mu})) \cdot \mathrm d \bm{W}_s \label{eq:defOfZT} \end{equation} where $\cdot$ denotes the usual inner product of vectors. We have suppressed the parameters $(\bm{X}_s,s)$ of the functions $\bm{\sigma}$ and $\bm{\mu}$ to keep our expressions readable, and will do this throughout this section. We then define $q_t$ to be the Dol\'eans-Dade exponential of $Z_t$, \begin{equation} q_t = \exp\left( Z_t - \frac{1}{2} [Z,Z]_t \right), \label{eq:qFirstDef} \end{equation} so that $q$ is a positive process and a local $\mathbb P$-martingale. If $q_t$ is a $\mathbb P$-martingale, then the measure $\mathbb Q$ can be defined by \begin{equation} \mathbb Q(A) = \mathbb{E}_\mathbb P(q_T A) \label{eq:defofQMeasure} \end{equation} for a measurable set $A \subset \Omega$.
\begin{proof}[Proof of Theorem \ref{thm:marketPriceOfRiskInvariant}]
Applying It\^o's Lemma to the defining equation for the Dol\'eans-Dade exponential we compute that
\[
\int_0^t \frac{1}{q^2_s} \, \mathrm d [q, q]_s = [Z,Z]_t.
\]
Hence by \eqref{eq:defOfZT}
\begin{equation}
\int_0^t \frac{1}{q^2_s} \, \mathrm d [q, q]_s = \int_0^t |\bm{\sigma}^{-1}(r \bm{X}_s - \bm{\mu})|^2 \mathrm d s.
\label{eq:amprInvariant}
\end{equation}
Since
\[
q_t = \frac{\mathrm d \mathbb Q}{\mathrm d \mathbb P}\Big|_{{\cal F}_t},
\]
$q_t$ is manifestly an invariantly-defined stochastic process (for the obvious choice of functor). Hence the left-hand side of equation \eqref{eq:amprInvariant} is manifestly an invariantly-defined stochastic process. We can
characterise the process $|\bm{\sigma}^{-1}(r \bm{X}_t - \bm{\mu})|$ as the unique non-negative element in $A_t \in L^0(\Omega \times [0,T], \mathbb P \times \lambda)$
satisfying
\[
\int_0^t \frac{1}{q^2_s} \, \mathrm d [q, q]_s = \int_0^t A^2_s \, \mathrm d s.
\]
$A_t$ defined in this way is manifestly invariantly defined, so the absolute market
price of risk is also invariantly defined. \end{proof}
\begin{proof}[Proof of Theorem \ref{thm:invarianceDimension}]
Suppose for a contradiction that $n$-dimensional Wiener space, $\Omega_n$, is isomorphic to $m$-dimensional Wiener space with $m>n$. Using this isomorphism we may find $m$ independent standard Brownian motions on $\Omega_n$, $\tilde{W}^j_t$ ($1 \leq j \leq m$). By the martingale
representation theorem, there are unique, predictable processes $\alpha^{ij}_t$ ($1 \leq i \leq n$, $1 \leq i \leq m$)
such that
\[
\tilde{W}^j_t = \int_0^t \sum_{a=1}^n \alpha^{aj}_s \mathrm d W^a_s.
\]
Let $\alpha_t$ be the $n \times m$ matrix with components $\alpha^{ij}$ and let
$\mathrm{id}_m$ denote the identity matrix of dimension $m$. We compute the quadratic-covariation matrix of each side in the above expression to obtain
$\mathrm{id}_m = (\alpha_t) (\alpha_t)^\top$.
Since $\alpha_t$ has rank less than or equal to $n$, and $\mathrm{id}_m$
has rank $m$ we obtain the desired contradiction. \end{proof}
\begin{proof}[Proof of Theorem \ref{thm:testcase}]
Given such a complete market, let $Q_t$ be defined as in \eqref{eq:qtDefGeneral} and let
\begin{equation}
\tilde{Z}_t = \log Q_t + \frac{1}{2} \int_0^t A(s)^2 \mathrm d s.
\label{eq:defztilde}
\end{equation}
We compute
\begin{align*}
\mathrm d \tilde{Z}_t &= \mathrm d(\log Q_t) + \frac{1}{2} A(t)^2 \, \mathrm d t
= \frac{1}{Q_t} \, \mathrm d Q_t - \frac{1}{2 Q^2} \, \mathrm d [Q,Q]_t + \frac{1}{2} A(t)^2 \, \mathrm d t
= \frac{1}{Q_t} \, \mathrm d Q_t.
\end{align*}
Hence $\tilde{Z}_t$ is a continuous local martingale.
We now define
\begin{equation}
\tilde{W}^1_t = -\int_0^t \frac{1}{A(s)} \, \mathrm d \tilde{Z}_s.
\label{eq:defwtilde}
\end{equation}
$W^1_t$ is a continuous local martingale by our assumptions on $A(t)$. We compute its
quadratic variation.
\begin{equation*}
[\tilde{W}^1,\tilde{W}^1]_t =
\int_0^t \frac{1}{A(s)^2} \mathrm d [\tilde{Z},\tilde{Z}]_s
= \int_0^t \frac{1}{A(s)^2} \mathrm d [ \log Q, \log Q]_s
= \int_0^t \frac{1}{Q_s^2 A(s)^2}\mathrm d [Q,Q]_s = \int_0^t \mathrm d s = t
\end{equation*}
by \eqref{eq:defwtilde}, \eqref{eq:defztilde}, It\^o's Lemma and
\eqref{eq:amprDefGeneral}.
It follows by L\'evy's characterisation of Brownian motion that $\tilde{W}^1_t$ is Brownian motion.
We may now find additional Brownian motions, $\tilde{W}^i_t$ for $2 \leq i \leq n$, such that the vector process $\bm{\tilde{W}}_t$ with components $\tilde{W}^i_t$ is a standard $n$-dimensional Brownian motion.
To see this, we use the fact that $\Omega$ is assumed to be an $n$-dimensional Wiener space, so admits an $n$-dimensional standard Brownian motion $\bm{\hat{W}_t}$. Using the martingale representation theorem, we may write $\tilde{W}^1_t=\int_0^t \bm{\alpha}_s \cdot \mathrm d \bm{\hat{W}}_s$ for a predictable vector process $\bm{\alpha}_t$ of norm 1. Given a vector $v \in \mathbb{R}^n$ of norm 1, we define a number $i_k$ for each $2 \leq k \leq n$ by $i_k = \inf\{i \mid \dim \langle v, e_1, e_2, \ldots, e_{i} \rangle \geq k\}$. Then $\{v, e_{i_2}, e_{i_3}, \ldots, e_{i_n}\}$ is a basis of $\mathbb{R}^n$. Applying the Gram--Schmidt process to this basis yields an orthonormal basis $\{v_i \}$ for $\mathbb{R}^n$ with $v_1=v$ and which is determined entirely by $v$. Applying this construction with $v=\bm{\alpha}_s$ we obtain a predictable orthonormal basis $\{ \bm{\alpha}^i_t \}$.
We now define
\[
\tilde{W}^i_t = \int_0^t \bm{\alpha}^i_s \cdot \mathrm d \bm{W}_s.
\]
The process $\bm{\tilde{W}}_t$ is a continuous semi-martingale
and its quadratic-covariation matrix has components
\[
[\tilde{W}^i, \tilde{W}^j]_t= \int_0^t \bm{\alpha}^i_s \cdot \bm{\alpha}^j_s \, \mathrm d s = t\, \delta^{ij}.
\]
Hence by L\'evy's characterisation this is indeed $n$-dimensional Brownian motion.
We now define a stochastic process ${\bm X}_t$ by
\begin{equation}
\mathrm d {\bm{X}}_t = (r \bm{X}_t + A(t) e_1) \mathrm d t + \mathrm d \bm{\tilde{W}}_t.
\label{eq:dyanmicsForX}
\end{equation}
Here we use the boundedness and measurability of $A$ to ensure existence
and uniqueness of the solution to this SDE.
The continuous-time market associated to \eqref{eq:dyanmicsForX} has $Z_t$ given by formula
\eqref{eq:defOfZT}, so
\begin{equation}
\mathrm d Z_t = - A(t)\, \mathrm d \tilde{W}^1_t.
\label{eq:Zsde}
\end{equation}
In particular $
\mathrm d [Z,Z]_t=A(t)^2 \mathrm d t$, so equation \eqref{eq:qFirstDef} becomes
\[
\log( q_t ) = Z_T - \frac{1}{2} \int_0^t A(s)^2 \, \mathrm d s.
\]
So we find
\begin{equation*}
\mathrm d (\log q_t) = \mathrm d Z_T - \frac{1}{2} A(t)^2 \, \mathrm d t
= -A(t) \mathrm d \tilde{W}^1_t - \frac{1}{2} A(t)^2 \, \mathrm d t, \quad \text{ by \eqref{eq:Zsde}}.
\end{equation*}
On the other hand we compute from \eqref{eq:defztilde} and \eqref{eq:defwtilde} that
\begin{equation*}
\mathrm d (\log Q_t) = \mathrm d \tilde{Z}_t - \frac{1}{2}A(t)^2\, \mathrm d t
= -A(t) \mathrm d \tilde{W}^1_t - \frac{1}{2} A(t)^2\, \mathrm d t.
\end{equation*}
Since we also have $q_0=Q_0=1$, we see that $Q_t=q_t$.
Prices in $M$ are, by definition, given by
\[
c_t(X) = \mathbb{E}( e^{-r(T-t)} Q X \mid {\cal F}_t ) = \mathbb{E}( e^{-r(T-t)} Q_t X_t ).
\]
Prices in the complete market associated with \eqref{eq:dyanmicsForX}
are given by the same formulae with $Q$ replaced by $q$. Hence the costs are the
same in both markets, showing that we have identified a market isomorphism. \end{proof} \begin{proof}[Proof of Theorem \ref{thm:ctsTimeMutualFundTheorem}]
Without loss of generality our market is a canonical Bachelier market.
Let $A$ be an invariant convex set of martingales. Let $Y$ be an element of $A$. By the martingale
representation theorem
\[
Y_t =Y_0 + \sum_{i=1}^n \int_0^t a^i_s \, \mathrm d W^i_s
\]
for some predictable processes $a^i_s$. By invariance of $A$, we see that
\[
Y_t =Y_0 + \int_0^t a^1_s \, \mathrm d W^1_s - \sum_{i=2}^n \int_0^t a^i_s \, \mathrm d W^i_s
\]
is also in $A$, as flipping the signs of the Brownian motions $W^k_t$ for
$2\leq k\leq n$ induces an isomorphism of the canonical Bachelier model.
By the convexity of $A$,
\[
Y_t = Y_0 + \int_0^t a^1_s \, \mathrm d W^i_s
\]
lies in $A$. Hence by the theory of \cite{harrisonPliska}, the martingale $Y_t$ can be replicated using a predictable
self-financing trading strategy using only the asset $W^1_t$ and the
risk-free asset. A second application of the martingale representation
theorem shows that the asset $W^1_t$ may itself be replicated by
a trading strategy using only the assets $X^i_t$. The hedging portfolio
obtained in this way gives rise to the portfolio referred to in the
statement of the theorem.
We wish to compute this portfolio explicitly in the case
of markets of the form \eqref{eq:nDDiffusion}.
We may read off from \eqref{eq:defOfZT} and \eqref{eq:defwtilde} that
\[
\mathrm d \tilde{W}^1_t = - \frac{1}{A(s)} \bm{\sigma}^{-1}(r\bm{X}_s-\bm{\mu}) \cdot \mathrm d \bm{W}_s.
\]
From \eqref{eq:nDDiffusion} we may write
\begin{equation*}
\mathrm d \tilde{W}^1_t = -\tfrac{1}{A(t)} \bm{\sigma}^{-1}(r\bm{X}_t-\bm{\mu}) \cdot ( \bm{\sigma}^{-1} (\mathrm d \bm{X}_t - \bm{\mu} \,\mathrm d t) )
= -\tfrac{1}{A(t)} (\bm{\sigma} \bm{\sigma}^\top)^{-1}(r\bm{X}_t-\bm{\mu}) \cdot (\mathrm d \bm{X}_t - \bm{\mu} \,\mathrm d t).
\end{equation*}
We can now read off that the portfolio of risky assets one should
hold in order to replicate $W^1_t$ is proportional to
$
(\bm{\sigma} \bm{\sigma}^\top)^{-1}(r\bm{X}_s-\bm{\mu}).
$ \end{proof}
\section{Basic concepts of category theory} \label{sec:category}
In this section, we review the concepts from category theory required for this paper.
\begin{definition}
A {\em category} $C$ consists of the following data:
\begin{enumerate}[label=(\roman*)]
\item a class $\ob(C)$ of {\em objects}.
\item a class $\hom(C)$ of {\em morphisms}. To each morphism $f$
are associated a source $a \in \ob(C)$ and target $b \in \ob(C)$. We write $f:a \to b$. $\hom(a,b)$ is the class of all morphisms from $a$ to $b$.
\item for all $a, b, c \in \ob{C}$ a binary operation $\hom(a,b) \times \hom(b,c) \to \hom(a,c)$ called composition. If $f:a \to b$, $g:b \to c$ we write $g \circ f$ or just $g f$ for the composition.
\end{enumerate}
The composition satisfies
\begin{enumerate}[label=(\roman*)]
\item Associativity: If $f:a\to b$, $g:b\to c$, $h:c\to d$
\[ f \circ (g \circ h) = (f \circ g) \circ h \]
\item Identity: For all $x \in \ob(C)$ there exists a morphism ${\mathbf 1}_x:x \to x$
with the property that if $f:a \to x$, ${\mathbf 1}_x \circ f=f$ and if $g:x \to a$, $g \circ {\mathbf 1}_x = g$.
\end{enumerate} \end{definition}
A basic example is the category $\text{Set}$ of all ``small sets''. To define this, one first chooses a large set which contains all the sets you will be interested in. A small set is then defined to be a subset of this large set. We define the morphisms between small sets to be given by functions. One has to consider small sets rather than the category of all possible sets in order to avoid Russell's paradox. In all our definitions of categories below, the objects will be restricted to those given by small sets.
With this technicalities out of the way, we can list various familiar categories: the category ${\mathrm{Group}}$ of groups with morphisms given by homomorphism; the category ${\mathrm{Vec}}$ of vector spaces with morphisms given by linear transformations; the category ${\mathrm{Top}}$ of topological spaces with morphisms given by continuous functions.
An isomorphism is defined to be a morphism $f$ which admits a two-sided inverse. An automorphism is an isomorphism whose source and target coincide.
A basic technique in proving classification theorems is to identify invariants of the objects one is studying. Category theory allows us to formalize this concept.
A covariant functor is a mapping between categories and their morphisms that respects composition and identities. \begin{definition}
A {\em covariant functor} $F$ from a category $C$ to a category $D$ is a mapping which
\begin{enumerate}[nosep,label=(\roman*)]
\item associates to each object $x \in \ob(C)$ an object in $F(x) \in \ob(D)$.
\item associates to a morphism $f:x \to y$ in $\hom(C)$ a morphism $F(f):F(x)\to F(y)$ in $\hom(D)$.
\end{enumerate}
and which satisfies
\begin{enumerate}[nosep,label=(\roman*)]
\item For all $x \in \ob(C)$, $F({\mathbf 1}_x) = {\mathbf 1}_{F(x)}$
\item If $f:a \to b$ and $g:b \to c$ then $F(g \circ f)=F(g) \circ F(f)$.
\end{enumerate} \end{definition}
A contravariant functor is a mapping between categories and their morphisms that reverses composition and identities. \begin{definition}
A {\em contravariant functor} $F$ from a category $C$ to a category $D$ is a mapping which
\begin{enumerate}[nosep,label=(\roman*)]
\item associates to each object $x \in \ob(C)$ an object in $F(x) \in \ob(D)$.
\item associates to a morphism $f:x \to y$ in $\hom(C)$ a morphism $F(f):F(y)\to F(x)$ in $\hom(D)$.
\end{enumerate}
and which satisfies
\begin{enumerate}[nosep,label=(\roman*)]
\item For all $x \in \ob(C)$, $F({\mathbf 1}_x) = {\mathbf 1}_{F(x)}$
\item If $f:a \to b$ and $g:b \to c$ then $F(g \circ f)=F(f) \circ F(g)$.
\end{enumerate} \end{definition}
We note that in all our examples the objects are sets and the morphisms are maps between these sets. But the definitions of category theory allow other types of object and morphism. In particular to any category one can define the {\em opposite category} by reversing the direction of morphisms. This allows one to alternatively define a contravariant functor as a covariant functor to the opposite category.
The mapping that sends a vector space $V$ to its dual and a linear transformation to its dual is an example of a contravariant functor. The mapping that sends a vector space to its double dual is an example of a covariant functor.
We may now give a formal definition of an invariant (taken from \cite{armstrongMarkowitz}).
\begin{definition}
Let $C$ be a category and let $F$ be a covariant functor from $C$ to $\Set$. Then an {\em invariantly-defined
element} for $F$ is a map
\[
\phi: \ob(C) \to \Set
\]
such that $\phi(c) \in F(c)$ and $\phi(f c)=F(f)\phi(c)$ for all isomorphisms $f$ (recall that in set theory the elements of sets are themselves sets which is why the codomain of $\phi$ is $\Set$ even
though we think of the values of $\phi$ primarily as elements rather than as sets).
If $F$ is a contravariant functor, an invariantly-defined element is defined in the same way except we instead require that $\phi(f c)=F(f^{-1}) \phi(c)$
for all isomorphisms $f$.
If $F$ is a functor from category $C$ to category $D$ and if $D$ is a category whose morphisms are in fact functions, we say that $\phi$ is an invariantly-defined element for $F$ if it is an invariantly-defined element for $U\circ F$ where $U$ is the forgetful functor. \end{definition}
For example consider the category of smooth surfaces with morphisms given by isometries. Gauss's Theorema Egregium says that the Gaussian curvature is an invariantly-defined element for the contravariant functor $C^\infty$ which maps a surface to the set of smooth functions on that surface.
In general, if one performs a mathematical construction which does not involve arbitrary choices on invariantly-defined input, one will obtain an invariantly-defined output. To justify this statement rigorously one needs to show how to mirror the basic constructions of mathematics using category theory. This is discussed in more detail in \cite{armstrongMarkowitz}. As a result we say that a mathematical object is {\em manifestly invariantly defined} if it is constructed from invariantly-defined inputs without arbitrary choices. For example the square of the Gaussian curvature on a manifold is manifestly invariantly defined once one knows that the Gaussian curvature itself is invariantly defined. What makes the Theorema Egregium remarkable, is that the Gaussian curvature is not manifestly invariantly defined.
The notion of invariantly-defined elements is closely connected to the notion of invariance under the action of a group. Given a category of groups, we may write $\Aut c$ for the group of automorphisms of an object $c$. Let $D$ be a category whose morphisms are in fact functions. Given a functor $F:C\to D$ we define an action on the set $F(c)$ by \[ f(s)=F(f)f(s) \] for $f \in \Aut c$ and $s \in F(c)$. It is easy to show that if $\phi$ is invariantly defined for $F$ then $\phi(c)$ is invariant under $\Aut c$ (see \cite{armstrongMarkowitz}).
When we come to define categories for markets, we will choose the objects and morphisms to ensure that financially interesting questions are manifestly invariantly defined. For example, the solutions sets for portfolio optimization problems in our markets will be invariantly defined. It follows that the solution sets for portfolio optimization problems will be invariant under automorphisms of the markets. For markets with large automorphism groups, this implies significant restrictions on the possible solutions of {\em any} financially interesting question in such a market.
One further notion that we will use from category theory is the notion of an equivalence of categories. Let us give the necessary definitions.
\begin{definition}
A {\em natural transformation}, $\eta$, from a functor $F:C\to D$ to a functor $G:C \to D$ is a family of morphisms
satisfying
\begin{enumerate}[nosep,label=(\roman*)]
\item For each $X \in \ob(C)$ we have a morphism $\eta_X:F(X)\to G(X)$ in $D$.
\item For every morphism $f:X \to Y$ in $C$ we have
\[
\eta_Y \circ F(f)=F(f) \circ \eta_X.
\]
\end{enumerate}
If each $\eta_X$ is an isomorphism, $\eta$ is called a {\em natural isomorphism}. \end{definition}
\begin{definition}
An {\em equivalence of categories} $C$ and $D$ consists of two covariant functors $F:C \to D$ and $G:D \to C$,
a natural isomorphism $\epsilon$ from $F\circ G$ to $\mathrm{id}_C$ and a natural isomorphism $\eta$ from $G \circ F$ to $\mathrm{id}_D$.
Here $\mathrm{id}_X$ denotes the identity functor acting on a category $X$. \end{definition}
We say that two categories are equivalent if an equivalence between the categories exists. We say that two categories $C$ and $D$ are {\em in duality} if $C$ is equivalent to the opposite category of $D$.
\end{appendices}
\end{document} |
\begin{document}
\title{Schemes for Parallel Quantum Computation \\ Without Local Control of Qubits} \author{S. C. Benjamin, Univ. of Oxford.} \author{s.benjamin@physics.ox.ac.uk} \date{\today} \maketitle
\begin{abstract} Typical quantum computing schemes require transformations (`gates') to be targeted at specific elements (`qubits'). In many physical systems, direct targeting is difficult to achieve; an alternative is to encode local gates into globally applied transformations. Here we demonstrate the minimum physical requirements for such an approach: a one-dimensional array composed of two alternating `types' of two-state system. Each system need be sensitive only to the {\em net} state of its nearest neighbors, i.e. the number in state `$\uparrow $' minus the number in `$\downarrow $'. Additionally, we show that all such arrays can perform quite general {\em parallel} operations. A broad range of physical systems and interactions are suitable: we highlight two examples.
\end{abstract}
Presently there is tremendous interest in the new field of quantum computation. Information is recognized as a physical quantity, with its representation and processing being governed by the laws of quantum mechanics.\ Rather than `bits', the fundamental units of classical information theory, we instead employ `qubits' which represent a general quantum superposition of `0' and `1'. A computation on a device containing $ N $ qubits is a sequence of unitary transformations within its $2^{N}$ dimensional Hilbert space. Researchers have already discovered quantum algorithms which exploit state superposition, entanglement and interference in order to solve certain problems more quickly than any known classical procedure \cite{SteaneRev}. Two important cases are those of factoring large numbers, where the quantum device has an exponential speed advantage \cite {factoring}, and the task of searching among $N$ elements, where a classical device requires time of order $N$ but the quantum device requires only $ \sqrt{N}$ \cite{search1}, or $\sqrt[3]{N}$ with a corrresponding size cost \cite{our2DCA}.
Efforts toward experimental realization of a quantum computer (QC) have focused principally on NMR and atomic trap implementations \cite {NMR,SteaneIon}. Numerous recent proposals have also drawn attention to possible solid state realizations \cite{Kane,DiVen,our2DCA}. Typically such proposals demand manipulation of the Hamiltonian locally, on the scale of the individual component qubits. However this is not a fundamental requirement: it can be sufficient to apply only global manipulations to which all elements are subjected simultaneously. This would be a highly desirable economy for many implementations, because it would lower the number of channels by which the computer interacts with its environment, and hence reduce the decoherence rate. Moreover, it may enable new implementations where it is difficult or impossible to perform individual addressing (e.g. quantum dot arrays may be driven by EM radiation of a wavelength far greater than the dot-dot separation). Lloyd has suggested one such model \cite{LloydScience,sethLong} based on a one-dimensional cellular automaton (CA). The model consists of a line of `cells',\ where each cell is a quantum system possessing two long-lived internal eigenstates. The algorithm is represented by a series of update `rules' which are applied globally\ to all cells, so that there is {\em no need} to address units individually. To realize LLoyd's CA model one would need to produce three `types' of cell in the pattern $ABCABC$..., and moreover one must find a means of applying asymmetric rules such as, ``all cells of type $A$ now invert their state if, and only if, the left neighbor is in state 1 and the right is in state 0''. Clearly it is important to know if these are the minimum physical requirements. The present work demonstrates that they can in fact be relaxed significantly, to {\em two} cell types {\em without} the ability to distinguish the left neighbor from the right. These are the minimum requirements for any globally-driven system, given that we must have more than one cell `type' \cite{need2Types}. The simplifications enhance the practicality of the model; neighbor indistinguishability is particularly significant in broadening the range of potential implementations, two of which we later discuss. This paper also provides a mechanism, compatible with any CA\ computer, for performing operations in parallel. We note the implications in terms of device size and speed. Parallelism may be essential for quantum error correction schemes to function efficiently \cite {parErrorCor}.
Our scheme consists of a two `types' of cell, A and B, alternating along a one-dimensional array. Each cell has two internal eigenstates $|\downarrow
\rangle $ and $|\uparrow \rangle $, and can represent any quantum superposition of these states. Each qubit of quantum information is represented by {\em four} consecutive cells: the qubit basis state $
|0\rangle $ is represented by $|\uparrow \uparrow \downarrow \downarrow
\rangle $ whilst the state $|1\rangle $ is represented by $|\downarrow
\downarrow \uparrow \uparrow \rangle $. The basis states of a qubit $X$ can therefore be compactly written as $\overline{xx}xx$, where $x$ corresponds to $\downarrow $\ if $X=0$ and $\uparrow $\ if $X=1$, with the opposite applying to $\overline{x}$. Figure 1(a) shows an array containing three qubits, each pair being separated by spacer cells in the $|\downarrow \rangle $ state (the minimum acceptable spacing is four cells \cite{webFigs} , but eight are used here for clarity). The array is subject to update rules specified by the notation $A_{f}^{{\bf U}}$ which means, each cell of type $ A $ is subjected to unitary transform ${\bf U}$ if, and only if, its `field' has value $f$. When the ${\bf U}$ is omitted a simple inversion is implied, $
|\downarrow \rangle $ $\rightleftarrows $ $|\uparrow \rangle $. The `field'
is defined as the number of nearest neighbors in state $|\uparrow \rangle $\
minus the number in state $|\downarrow \rangle $. This is the proper control variable since in a physical realization the cells will be aware of their neighbors through the {\em net} effect of, for example, their electrostatic fields.
In classical computing we have the idea of a universal set of gates, i.e. a set of elementary operations (such as AND, OR, NOT) which are sufficient to represent any classical algorithm. In quantum computing the same concept applies. We first consider the general `one-qubit gate', i.e. any chosen unitary transform applied to any particular qubit regardless of the other qubits in the computer. How can we single out one qubit, given that the array is structurally regular and our rules must be sent to all elements globally? One solution \cite{LloydScience} is to introduce a `control unit' (CU). Our CU is represented by six consecutive cells in the pattern $ \uparrow \uparrow \downarrow \downarrow \uparrow \uparrow $, which exists only in one place along the array. In Figs. 1(b) and 1(c) we utilize the CU\ by applying updates which move it relative to the qubits, together with an update sequence which has a net effect only on the qubit nearest the CU. Clearly, by varying the update sequence we could have transformed another qubit, or indeed simply moved the CU without altering any of them. Thus we can implement a general one-qubit gate. For a universal set we require a two-qubit gate: the `control-${\bf U}$' is more than sufficient. This gate applies the transform ${\bf U}$ to a certain qubit (referred to as the `target') if, and only if, a second qubit (the `control') is in state 1. Figure 2 shows the implementation of this gate schematically; an explicit depiction analogous to Fig.1(b) is also available \cite{webFigs}. The `cost' of restricting ourselves to global manipulations is now apparent: each qubit requires a total of eight physical cells (four for the encoding plus four spacers), and a 1-qubit gate requires about ten elementary pulses. These numbers would be somewhat smaller if we permitted ourselves more cell types and/or more complex interactions \cite{LloydScience}.
To input information we may exploit the cells at the ends of the array \cite {LloydScience}, for whom the possible values of the `field' variable are $ \{1,-1\}$ (in contrast to the $\{-2,0,2\}$ values for all other cells). We can use the updates $\{$ $A_{-1}$,$A_{1}$,$B_{-1}$,$B_{1}\}$ to manipulate the end states, and the other updates to shift-load those states into the array. The means of output will depend on the available measurement techniques. If a cell on one end were associated with a measuring device, then one would first swap the qubit to be measured with the qubit nearest the end (by a series of three CNOTs, for example), then move it onto the end cell by the reverse of the input technique. A superior output procedure would be possible if, for example, the cells of type $B$ had some third state `$\rightsquigarrow $' exhibiting rapid spontaneous decay to the $ \downarrow $ state. Then we could measure the state of a qubit anywhere along the array using the 1-qubit gate of Fig. 1(b) and choosing
${\bf U}$=$\left( \begin{array}{ccc} 0 & 0 & 1 \\ 0 & 1 & 0 \\ 1 & 0 & 0 \end{array} \right) $ in the basis $\{\downarrow ,\uparrow ,\rightsquigarrow \}$.
\noindent If the subject qubit was previously in state 1, then the transformation would leave its representative cell in the unstable state $ \rightsquigarrow $. From there it would decay back to $\downarrow $\ with an emission. The presence (or absence) of this emission could be detected and used to infer the state of the qubit. Repeated application of the transform would produce a stream of emissions (i.e. a fluorescence ), thus increasing the detection efficiency. Note that the existence of such a dissipative, irreversible transition may be essential in order to implement quantum error correction efficiently. Furthermore, our chosen representation of the qubit basis states and the CU means that dissipitive transitions can be used to prevent these objects from delocalizing \cite{longPaper}.
So far we have assumed that there is only one control unit (CU)\ in the computer. Consequently we cannot apply a gate at several points simultaneously. We could load an initial state containing $P$ CUs distributed along the array (e.g. 1 every 20 qubits) \cite{LloydScience}, although we would then be constrained to apply exactly $P$ identical gates simultaneously at {\em every} step, and always with the same spatial distribution. Completely general parallelism would allow us to apply a different number of simultaneous gates at each step, and at varying points along the array. How can this ideal be approached? We cannot directly create/annihilate CUs at specific locations because we are constrained to use global updates. Figure 3 depicts an alternative solution which is appropriate for any CA-like device. We increase the spacing between qubits considerably, and in {\em each} space we put a CU and a set of classical bits (using the same encoding employed for the qubits). Some of these classical bits encode a label, for example we might label each space uniquely using a binary number, and the others form an auxiliary `work-pad'. Together the CU and the classical bits effectively constitute an entire computer in the space between each qubit; we will refer to these as `sub-computers'. Now suppose we are `running' a parallelized quantum algorithm, and the next step calls for a specific 1- or 2-qubit gate $G$ to be applied simultaneously at $P$ points along our array of $N$ qubits. This operation would require $P$ CUs, located at {\em just} those points. However we initially have one CU in {\em each} of the $N$ `sub-computers'. Therefore we send an update sequence which causes a computation \cite{haveCCN} simultaneously within each `sub-computer': the label bits are the input and the output is a binary variable represented by some transformation applied/not applied to the CU. This transformation disables the CU: when we subsequently apply updates to move the CUs away from their sub-computers to perform the gate operation $G$ on neighboring qubits, this will only occur where the CUs are untransformed. One such transformation is shown in Fig. 3(b). Having thus implemented the step required by our parallel quantum algorithm, we can now reverse the computation previously applied to the `sub-computers' in order to return them to their initial state.
There are costs and constraints associated with using this procedure. The size of the array must be increased by a factor $f$ due to the inclusion of the `sub-computers'; unique labeling would imply $f$ of order $\ln (N)$. The time $\tau $ associated with the `sub-computer' computation must be less than $O(N)$ otherwise it would have been quicker to perform the parallel gates in series. One could not enable/disable a completely arbitrary sub-set of the $N$ CUs under this time constraint \cite{acknowledgeWim}, so our procedure does not efficiently implement a completely general arrangement of gates. However, there are a great many useful distributions of CUs which do correspond to sufficiently fast $\tau $. The most obvious examples include: all CUs, a given CU, one in every $2^{p}$ CUs, all CUs in some interval. For these and many other patterns, $\tau $\ is merely of order $\ln (N)$. An obvious variation is to place a `sub-computer' only every ten qubits, say. Another is to `nest' the procedure to provide parallel computation within the `sub-computers' at a cost of $\ln (\ln (N))$. The process performed by the `sub-computer' could be generalized to apply a range of transformations to the CU, corresponding to different subsequent gates operations on the qubits. Most interestingly one could generalize the classical bits in each `sub-computer' to qubits, so that the computation determining which CUs are disabled becomes a quantum process producing CUs in a superposition of the enabled/disabled state. It is unclear whether this could have significant advantages for algorithmic efficiency.
In all the procedures described above the cells are only sensitive to their immediate neighbors. In isolation a cell would have a certain energy gap between its two states; in the array environment this is split into distinct levels corresponding to the values of the field variable. However in a real system the cells would also be influenced by the states of non-neighboring cells, with the result that each level would be split into a multiplet of many levels. In order to drive a transition in reasonable time it would be desirable to address a multiplet collectively \cite{NMRpulse}; this could only be achieved if the multiplets are non-overlapping. This condition translates to a constraint on how short-range the physical cell-cell interaction must be. It is easy to show that any one-dimensional system with a symmetric interaction (right and left neighbors indistinguishable) has multiplets that are well separated if the interaction is $r^{-3}$ or shorter. Dipole elements such as nuclear or electron spins constitute one class of examples. Note that when the multiplets are well separated, it is possible to tolerate a degree of physical variation between cells which are nominally of the same type, since the effect of such variation is merely to broaden the multiplet correspondingly. Similarly, modest variations in the inter-cell spacing and coupling strength could be tolerated. Note also that if the interaction is not diagonal in the basis of the cell's states, the scheme still functions provided that the difference between the fundamental frequencies of the $A$\ and the $B$\ cells is large compared to the magnitude of any off-diagonal terms \cite{sethLong}.
The present scheme is suited to systems where it is experimentally difficult or impossible to target specific units for manipulation. One example involves the nuclear magnetic resonance (NMR) approach \cite{NMR} which has been successfully used to realize 3-qubit computers. Here the computer is a molecule possessing a number of spin-non-zero nuclei, the states of which are used to represent the qubits. Probably the most fundamental obstacle preventing experimentalists from extending the number of qubits, $N$, is the difficultly of distinguishing $N$ unique sets of energy levels. This obstacle is removed by our model, which could be realized by a linear molecule with $A$ and $B$ sites alternating along its length: we need not distinguish between any two sites of a given type, hence we have only two fixed sets of energy levels {\em regardless} of $N$. For a second example, consider the solid state realization recently proposed by Kane \cite{Kane}. Here qubits are again realized by the states of nuclear spins, but these belong to donor impurity atoms embedded in Si. In order to gain control over specific qubits, Kane introduces a set of electrostatic gates located near the donors, with two gates being required for each donor. These electrodes represent both a principal source of decoherence in the system, and a major difficulty for experimental realization. By switching to the model presented here, where there is no need to address qubits individually, the essential role of the electrodes is removed. If, as seems entirely plausible \cite {longPaper}, their remaining functions can be obviated by design modifications, then it will be possible to dispense with \ them entirely.
To conclude, we have exhibited a model of quantum computation which requires only global manipulations and yet has very modest physical requirements. We have shown that it is possible to efficiently perform non-trivial parallel operations on such a model. The model operates with interaction ranges as great as $r^{-3}$, and is thus applicable to a wide range of QC implementations where it may significantly reduce the obstacles to experimental realization.
The author would like to thank Seth Lloyd, Mike Mosca and Wim Van Dam for useful discussions. This work was supported by an EPSRC\ fellowship.
\begin{references} \bibitem{SteaneRev} A. M. Steane, Rep. Prog. Phys. {\bf 61}, 117 (1998).
\bibitem{factoring} A result due to P. Shor; for an analysis see, e.g., A. Ekert and R. Joza, Rev. Mod. Phys. {\bf 68}, 733 (1996).
\bibitem{search1} L. Grover, Phys. Rev. Lett. {\bf 79}, \ 325 (1997).
\bibitem{our2DCA} S. C. Benjamin and N. F. Johnson, to appear in Phys. Rev. A.
\bibitem{NMR} J. A. Jones and M. Mosca, J. Chem. Phys. {\bf 109}, 1648 (1998).
\bibitem{SteaneIon} A. M. Steane, Appl. Phys. B {\bf 64}, 623 (1997).
\bibitem{Kane} B. E. Kane, Nature {\bf 393}, 133 (1998).
\bibitem{DiVen} R. Vrijen {\em et al}, quant-ph/9905096.
\bibitem{LloydScience} S. Lloyd, Science {\bf 261}, 1569 (1993).
\bibitem{sethLong} Seth Lloyd, ``Programming Pulse Driven Quantum Computers'', Los Alamos Preprint (1992).
\bibitem{need2Types} Using two alternating types guarantees that while a cell is physically subjected to an update rule, its neighbors, on whom the rule is defined, are static.
\bibitem{parErrorCor} D. Aharonov and M. Ben-Or, {\em Proc. 29}$^{th}${\em \ ACM Symposium on Theory of Comp.} (1997 ACM Press).
\bibitem{webFigs} For additional Figures, including a compact form of the useful Control-Control-${\bf U}$ gate, please see web page www.qubit.org/research/Nano/1DCA/Figures.html.
\bibitem{longPaper} S. C. Benjamin, in preparation.
\bibitem{haveCCN} This computation may be entirely classical. A suitable universal classical gate can be obtained from the CCU of Ref. \cite{webFigs} by choosing ${\bf U}$ to be a simple inversion (i.e. a NOT).
\bibitem{acknowledgeWim} There must be patterns of CUs which {\em cannot} be produced using a sequence of less than O$(N)$ updates, given that the number of distinct updates is limited. Otherwise we would have a means of compressing $N$ arbitrary bits, corresponding to the $N$ enabled/disabled CUs, into less than O$(N)$ symbols. The author thanks W. van Dam for this observation.
\bibitem{NMRpulse} For an introduction to the art of addressing multiplets through shaped pulses, composite pulses, etc., see e.g. R. Freeman, {\em Spin Choreography} (Spektrum, Oxford, 1997). \end{references}
\noindent {\bf Fig.1 }A section of the array containing the control unit (CU) and three qubits, $X$,$Y$\ \& $Z$, each encoded over four cells. All other cells are in state `$\downarrow $'.{\bf \ }White cells are of type $A$ , shaded cells of type $B$. (a) The effect of the update $B_{0}$: the CU moves one cell to the left, all the qubits move one cell to the right, yet the form of the qubits and the CU are preserved. (b) \& (c) A general `one-qubit' gate. For clarity the states `$\downarrow $' are written as `$-$ '. In response to the updates $A_{0}$,$B_{0}$,$A_{0}$,$B_{0}...$ the CU passes through qubit $X$, leaving it {\em unchanged}, and continues until mid-way through passing qubit $Y$. Then additional updates are applied: the effect of the last is to apply a unitary transform ${\bf U}$ only to the cell representing the $Y$ qubit, yielding qubit $T={\bf U}.Y$. As indicated in (c), re-applying the updates in reverse order then moves the CU moves away from $T$.
\noindent {\bf Fig.2 }Schematic of the `two-qubit' control-${\bf U}$ process. The target qubit is $S$, and the control is $Y$. The CU moves transparently past the $Z$ qubit, and continues until mid-way through passing $Y$. To this point is the process is identical to Fig 1(c), however now the CU itself is subject to a transformation: it is altered from $ \uparrow \uparrow \downarrow \downarrow \uparrow \uparrow $ to $\uparrow \uparrow \uparrow \uparrow \uparrow \uparrow $ if, and only if, $Y=0$. Both forms of the CU will pass transparently through the intervening qubits $W$,$ X $ in answer to the same update sequence. When qubit $S$ is reached a new sequence is applied, the last of which subjects $S$ to a transform ${\bf U}$ \ if, and only if, the CU arrived in its unaltered form. Finally we re-apply all the updates preceding the last\ {\em in reverse order} so as to return the CU\ to its initial state. An explicit depiction of the process is available from Ref. \cite{webFigs}.
\noindent {\bf Fig.3 }(a) Generalization from the simple serial model (i) to the parallel model (ii) employing `sub-computers'. (b) One means of disabling the CU simply by delaying it. The CU is delayed (or not delayed) depending only on the states of four auxiliary bits, which have been set by the proceeding `sub-computer' computation. The sequence of updates applied is the {\em same} for both cases. The delayed CU is in an `empty' region of the array when the non-delayed CU has reached its target qubit, here denoted $Q$. An explicit depiction of the process is available from Ref. \cite {webFigs}.
\noindent Caption for the Web Figures - Included on the web \cite{webFigs}.
\noindent {\bf NOT\ INTENDED\ FOR\ JOURNAL\ PUBLICATION
}
\noindent {\bf Web: \ FigureA.pdf.} The explicit description of the control-$ {\bf U}$ process which is shown schematically in Figure 2. Note that the update sequence is the same in both parts of the Figure, but only in the $ Y=1 $ case does the last update, $B_{2}^{{\bf U}}$, have an effect. After this update has been applied, the preceding updates would be re-applied in reverse order to complete the process.
The two blue rows show the control unit having moved from the $X$ qubit to the $W$ without changing its form. It follows that the CU\ could cross any number of intervening qubits to reach its target.
Note: in this Figure we use only 4 spacer cells between each qubit; this is the minimum that permits qubit gate operations. A consequence of this tight packing is that one of the neighbors of the target qubit must be disturbed during the operation (here $W$ is disturbed). However the final update, $ B_{2}^{{\bf U}}$, affects only the target qubit and hence the disturbance of the neighbor is undone when the preceding updates are re-applied in reverse order.
\noindent {\bf Web: FigureB.pdf.} An explicit depiction of the delaying transformation which is shown schematically in Figure 3(b). Note that the transform is applied/not applied to the control unit depending only on the values stored in the four auxiliary bits, i.e. the update sequence is the same for both cases. The auxiliary bits will have been set by the proceeding `sub-computer' calculation.
\noindent {\bf Web: FigureC.pdf.} The most compact implementation of the important Control-Control-${\bf U}$ gate. The transformation ${\bf U}$ is applied to the target qubit $W$ if, and only if, both the control qubits $X$ and $Y$ are in state 1.
\end{document} |
\begin{document}
\title{Persistent hyperdigraph homology and persistent hyperdigraph Laplacians}
\paragraph{Abstract}
Hypergraphs are useful mathematical models for describing complex relationships among members of a structured graph, while hyperdigraphs serve as a generalization that can encode asymmetric relationships in the data. However, obtaining topological information directly from hyperdigraphs remains a challenge. To address this issue, we introduce hyperdigraph homology in this work. We also propose topological hyperdigraph Laplacians, which can extract both harmonic spectra and non-harmonic spectra from directed and internally organized data. Moreover, we introduce persistent hyperdigraph homology and persistent hyperdigraph Laplacians through filtration, enabling the capture of topological persistence and homotopic shape evolution of directed and structured data across multiple scales. The proposed methods offer new multiscale algebraic topology tools for topological data analysis.
\paragraph{Keywords}
Topological hyperdigraph, Topological hyperdigraph Laplacians, Homology, Filtration, Persistence.
\tableofcontents
\section{Introduction}\label{section:introduction}
Topology and homology study the invariant properties of geometric objects under continuous deformations, providing a high level of abstraction for these objects \cite{kaczynski2004computational}. The well-known joke that topologists cannot distinguish between a coffee mug and a doughnut highlights the difficulty of topology in describing real-world objects. However, topological data analysis (TDA) has recently emerged as a way to overcome this difficulty. TDA facilitates topological deep learning, an emerging paradigm in data science that has been successful in various applications \cite{townsend2020representation,cang2017topologynet}. The main tool of TDA is persistent homology, which creates a family of multiscale topological spaces from a given dataset by filtration, allowing the extraction and analysis of the topological invariants of the data at various scales \cite{zomorodian2004computing,edelsbrunner2008persistent, bubenik2017persistence}. Through comparative analysis, persistent homology can be used to infer the shape of data \cite{carlsson2009topology}. However, a limitation of persistent homology is that at each dimension, the Betti number only counts the number of independent components and does not describe the properties of each component. For instance, a heterogeneous ring is counted the same as a homogeneous ring at dimension 1. To address this limitation, persistent cohomology was introduced, which embeds both geometric and non-geometric properties of the data into topological invariants \cite{cang2020persistent}. Moreover, topological invariants are qualitative rather than quantitative. For example, at dimension 1, a ring with five members is counted the same as a ring with six members. These issues can limit the power of persistent homology in network analysis and other applications.
The graph Laplacian was originally introduced by Kirchhoff in 1847 to analyze electrical networks \cite{kirchhoff1847ueber}. For instance, the second-smallest eigenvalue, also known as the Fiedler eigenvalue \cite{fiedler1973algebraic}, describes the algebraic connectivity of a graph. In 1944, Eckmann generalized the graph Laplacian to the simplicial complex setting, resulting in what is now known as combinatorial Laplacians. Combinatorial Laplacians are the discrete counterpart of Hodge Laplacians, which are also called Laplace-de Rham operators in differential geometry. The associated de Rham cohomology is often used to study the topology of manifolds and the behavior of vector fields on them. The connection between Hodge Laplacians on a differentiable manifold and combinatorial Laplacians on a point cloud is non-trivial and was studied in the context of discrete exterior calculus to understand discrete equivalents of differential forms \cite{hirani2003discrete,desbrun2005discrete,arnold2006finite}. A notable property of both Hodge Laplacians and combinatorial Laplacians is that their harmonic spectra give rise to corresponding topological invariants or Betti numbers, which is why we refer to them as topological Laplacians \cite{wei2023topological}. There are several other topological Laplacians, such as sheaf Laplacians defined on cellular sheaves \cite{hansen2019toward} and path Laplacians \cite{gomes2019path} derived from path complexes and path homology introduced by Yau and coworkers \cite{grigor2012homologies,grigor2020path}. Path homology and persistent path homology provide a topological analysis of digraphs and offer promising applications in molecular and material sciences \cite{chen2023path}. Compared to corresponding homology theories, topological Laplacians are capable of describing the quantitative properties of the underlying system in their non-harmonic spectra \cite{horak2013spectra}.
In 2019, Wei and coworkers extended the power of topological Laplacians by introducing persistent topological Laplacians, which offer superior performance over persistent homology \cite{chen2019evolutionary,wang2019persistent}. Persistent Laplacian, also known as persistent spectral graphs or persistent combinatorial Laplacian, was introduced using filtration by Wang et al. \cite{wang2019persistent}. It has been studied mathematically by Memoli et al. \cite{memoli2022persistent} and Liu et al. \cite{liu2023algebraic} and explored in biological contexts by Meng et al. \cite{meng2021persistent}, Chen et al. \cite{chen2022persistent}, Wee et al. \cite{wee2022persistent}, and Qiu and Wei \cite{qiu2023persistent}. An open-source online package has also been developed \cite{wang2021hermes}. The evolutionary de Rham-Hodge Laplacians, or persistent Hodge Laplacians, were defined on a family of evolving manifolds \cite{chen2021evolutionary}. Discrete exterior calculus was utilized to implement various boundary conditions on manifolds with boundaries, which helped to accurately compute topological invariants from the harmonic spectra of persistent Hodge Laplacian operators. Persistent Hodge Laplacians provided a mathematical model for musical instruments that cannot be described by persistent homology \cite{wei2023topological}. Additionally, Wei and coworkers introduced persistent sheaf Laplacians on cellular sheaves \cite{wei2021persistent} and persistent path Laplacians on path complexes \cite{wang2023persistent}. The former allows for the embedding of heterogeneous information, such as atomic partial charges, into topological variants, while the latter facilitates the topological analysis of digraphs and directed networks. Ameneyro et al. proposed persistent Dirac operators for the efficient quantum computation of persistent Betti numbers \cite{ameneyro2022quantum}. This approach utilizes the relationship between Dirac operators and Laplacian operators.
It has been demonstrated that persistent (topological) Laplacians can capture the homotopic shape evolution of data that is not present in the corresponding persistent homology analysis \cite{wei2023topological}. An interesting property of non-harmonic eigenvalues is that they are discontinuous when there is a topological change.
These new persistent topological Laplacians have significantly expanded the application domain and power of TDA.
None of the methods mentioned earlier describe the internal organization of a network. Hypergraph is a popular mathematical model for data with complex relationships, and it has been widely applied in physics \cite{qu2013encoding}, computer science \cite{eiter2002hypergraph}, and engineering \cite{akhremtsev2017engineering}. However, traditional hypergraphs do not capture the topological information in the data. To address this issue, Wu and coworkers generalized traditional hypergraphs into topological hypergraphs using simplicial complexes \cite{ren2018hodge,bressan2019embedded}. The embedded homology of hypergraphs was introduced to study the topological invariants of hypergraphs \cite{bressan2019embedded}. Hodge-decomposition type of weighted hypergraphs was also studied \cite{ren2018hodge}, and discrete Morse functions for hypergraphs were considered \cite{ren2018discrete}. Additionally, Grbic et al. introduced the concept of super-hypergraphs and studied the embedded homology of super-hypergraphs \cite{grbic2022aspects}. More recently, Liu et al. introduced filtration to topological hypergraph Laplacians \cite{liu2021persistent}.
Hypergraphs do not apply to directed graphs (digraphs) and directed networks. To address this limitation, hyperdigraphs have been introduced as a generalization of digraphs \cite{gallo1993directed,berge1973graphs,dorfler1980category}. Essentially, a hyperdigraph is a hypergraph with an additional direction assigned to each hyperedge. As stated in \cite{ausiello2017directed}, ``hyperdigraphs have been applied in several domains where we are interested in representing implication systems (database theory, logics, artificial intelligence, etc.)". As a powerful mathematical model for data analysis, hyperdigraphs have been widely applied in various fields of computer science \cite{gallo1998directed,ramaswamy1997using}. Since hyperdigraphs can encode the orientation information between different objects, they have a natural advantage for material structure modeling, molecular group interaction analysis, biological system analysis, and more. However, because of the complexity of hyperdigraphs, there is currently no established framework for topological hyperdigraphs or hyperdigraph homology. Efforts have been made to analyze hyperdigraphs by identifying path complexes on them with the help of path homology theory \cite{muranov2021path}. Nevertheless, constructing topological hyperdigraphs requires the use of embedded homology techniques \cite{ren2018hodge,bressan2019embedded} or equivalent methods specifically tailored for hyperdigraph homology.
This work introduces several new TDA models: hyperdigraph homology (HDGH), topological hyperdigraph Laplacians (THDGLs), persistent hyperdigraph homology (PHDGH), and persistent hyperdigraph Laplacians (PHDGLs). To introduce a topological structure, we use sequences as the building blocks for hyperdigraph homology. The chain complex of the hyperdigraph homology is the maximal chain complex embedded into the space generated by these sequences. We develop an embedded homology technique to create HDGH. To construct topological hyperdigraph Laplacians, we use boundary operators and adjoints. Additionally, we define PHDGH and PHDGLs by equipping a filtration process to analyze geometric objects at various filtration scales. We propose both a volume-based filtration and a distance-based filtration for PHDGH and PHDGLs.
The remainder of the paper is organized as follows. In the next section, we review hypergraph homology and topological hypergraph Laplacians to establish notations. In Section \ref{section:hyperdigraph}, we propose hyperdigraph homology and topological hyperdigraph Laplacians. In Section \ref{section:persistence_on_hyper}, we define persistence on hyperdigraph homology and topological hyperdigraph Laplacians. Finally, we demonstrate the application of our proposed persistent hyperdigraph Laplacians to a protein-ligand complex in Section \ref{section:application}.
\section{A brief review of topological hypergraphs}\label{section:hypergraph}
Hypergraph, as a kind of generalization of the simplicial complex, has attracted considerable attention in theory and application. The embedded homology provides a realization of topological hypergraphs \cite{bressan2019embedded}. In this section, we review hypergraph homology and topological hypergraph Laplacians. From now on, $\mathbb{K}$ is always assumed to be a ground field and $|X|$ denotes the number of elements in a finite set $X$.
Let $V$ be a nonempty finite ordered set. Let $\mathbf{P}(V)$ denote the power set of $V$ excluding the empty set, that is, the set of nonempty subsets of $V$. So we have that $\mathbf{P}(V)=\coprod\limits_{n=1}^{|V|}\mathbf{P}_{n}(V)$, where $\mathbf{P}_{n}(V)$ is the set of subsets with $n$ elements in $V$.
A \emph{hypergraph} is a pair $\mathcal{H}=(V,E)$, where $E$ is a subset of $\mathbf{P}(V)$. The set $E$ is called the hyperedge set and an element $e\in E\cap \mathbf{P}_{n+1}(V)$ is called an $n$-hyperedge.
In particular, the hypergraph $\mathcal{H}(V)=(V,\mathbf{P}(V))$ is called a \emph{complete hypergraph}. A hypergraph $\mathcal{H}=(V,E)$ can be regarded as a sub hypergraph of its associated complete hypergraph $\mathcal{H}(V)$. It is worth noting that a complete hypergraph is exactly an abstract simplicial complex.
Now, let $\mathcal{H}=(V,E)$ be a hypergraph. Let $C_{p}(\mathcal{H};\mathbb{K})$ be the $\mathbb{K}$-linear space generated by the $p$-hyperedges of $\mathcal{H}$. Then $C_{\ast}(\mathcal{H};\mathbb{K})=(C_{p}(\mathcal{H};\mathbb{K}))_{p\geq 0}$ is a graded $\mathbb{K}$-linear space. In particular, let $C_{p}(V;\mathbb{K})$ be the $\mathbb{K}$-linear space generated by the $p$-hyperedges of $\mathcal{H}(V)$. Then $C_{\ast}(V;\mathbb{K})=(C_{p}(V;\mathbb{K}))_{p\geq 0}$ is a chain complex with the boundary operator $d_{p}:C_{p}(V;\mathbb{K})\to C_{p-1}(V;\mathbb{K})$ given by
\begin{equation}
d_{p}\{v_{0},v_{1},\dots,v_{p}\}=\sum\limits_{i=0}^{p}(-1)^{i}\{v_{0},\dots,\widehat{v_{i}},\dots,v_{p}\},\quad p\geq 1.
\end{equation}
Here, $\widehat{v_{i}}$ means the element $v_{i}$ is omitted. We make the convention $d_{0}\{v_{0}\}=0$ for all $v_{0}\in V$.
The boundary operator $d_{p}$ on $C_{p}(V;\mathbb{K})$ restricts to a map $d_{p}:C_{p}(\mathcal{H};\mathbb{K})\to C_{p-1}(V;\mathbb{K})$. Then we have the infimum chain complex defined by
\begin{equation}\label{equation:infimum_chaincomplex}
\mathrm{Inf}_{p}(\mathcal{H};\mathbb{K})=\{x\in C_{p}(\mathcal{H};\mathbb{K})|d_{p}x\in C_{p-1}(\mathcal{H};\mathbb{K})\}
\end{equation}
for $p\geq 1$ and $\mathrm{Inf}_{0}(\mathcal{H};\mathbb{K})=C_{0}(\mathcal{H};\mathbb{K})$. It can be verified that $\mathrm{Inf}_{\ast}(\mathcal{H};\mathbb{K})=(\mathrm{Inf}_{p}(\mathcal{H};\mathbb{K}))_{p\geq 0}$ is indeed a chain complex.
\begin{definition}
The \emph{embedded homology} of a hypergraph $\mathcal{H}$ is defined by
\begin{equation}
H_{p}(\mathcal{H};\mathbb{K})=H_{p}(\mathrm{Inf}_{\ast}(\mathcal{H};\mathbb{K})),\quad p\geq 0.
\end{equation}
\end{definition}
In this work, the topological hypergraph or hypergraph homology considered is the embedded homology of hypergraphs. The corresponding \emph{$p$-th Betti number} for the hypergraph $\mathcal{H}$ is defined by $\beta_{p}=\dim H_{p}(\mathcal{H};\mathbb{K})$ for $p\geq 0$.
\begin{proposition}[\cite{bressan2019embedded}]
The homology $H_{\ast}(-;\mathbb{K}):\mathbf{Hyper}\to \mathbf{Vec}_{\mathbb{K}}$ is a functor from the category of hypergraphs to the category of $\mathbb{K}$-linear spaces.
\end{proposition}
Now, consider the case $\mathbb{K}=\mathbb{R}$. Endow $C_{\ast}(\mathcal{H};\mathbb{K})$ with the standard inner product given by
\begin{equation*}
\langle x,y\rangle=\left\{
\begin{array}{ll}
1, & \hbox{$x=y$;} \\
0, & \hbox{otherwise.}
\end{array}
\right.
\end{equation*}
Here, $x,y$ are hyperedges of $\mathcal{H}$. Thereby, $C_{\ast}(\mathcal{H};\mathbb{R})$ is an finite-dimensional inner product space. The infimum chain complex $\mathrm{Inf}_{\ast}(\mathcal{H};\mathbb{R})$ inherits the inner product structure of $C_{\ast}(\mathcal{H};\mathbb{R})$. We also denote the boundary operator on $\mathrm{Inf}_{p}(\mathcal{H};\mathbb{R})$ by $d_{p}:\mathrm{Inf}_{p}(\mathcal{H};\mathbb{R})\to \mathrm{Inf}_{p-1}(\mathcal{H};\mathbb{R})$. Then, we have the adjoint operator $(d_{p})^{\ast}:\mathrm{Inf}_{p-1}(\mathcal{H};\mathbb{R})\to \mathrm{Inf}_{p}(\mathcal{H};\mathbb{R})$ of $d_{p}$ with respect to the above inner product.
\begin{definition}
The \emph{topological hypergraph Laplacian} $\Delta^{\mathcal{H}}_{p}:\mathrm{Inf}_{p}(\mathcal{H};\mathbb{R})\to \mathrm{Inf}_{p}(\mathcal{H};\mathbb{R})$ of $\mathcal{H}$ is defined by
\begin{equation*}
\Delta^{\mathcal{H}}_{p}=(d_{p})^{\ast}\circ d_{p}+d_{p+1}\circ (d_{p+1})^{\ast},\quad p\geq 0.
\end{equation*}
\end{definition}
In particular, if $\mathcal{H}$ is an abstract simplicial complex, then the embedded homology of $\mathcal{H}$ coincides with the simplicial homology of $\mathcal{H}$. Moreover, the topological hypergraph Laplacian of $\mathcal{H}$ reduces to the Laplacian of simplicial complexes.
We choose a family of standard orthogonal bases of $\mathrm{Inf}_{p}(\mathcal{H};\mathbb{R})$ for $p\geq 0$. Let $B_{p}$ be the representation matrix of $d_{p}$ with respect to the chosen standard basis. Then the representation matrix \footnote{Here, the representation matrix is given by the left multiplication action on the basis. For the right multiplication action on the basis, the Laplacian matrix $L_{p}^{\mathcal{H}}=B_{p+1}B_{p+1}^{T}+B_{p}^{T}B_{p}$.} of $\Delta^{\mathcal{H}}_{p}$ is given by
\begin{equation}\label{equation:laplacian}
L_{p}^{\mathcal{H}}=B_{p+1}^{T}B_{p+1}+B_{p}B_{p}^{T}.
\end{equation}
Here, $B_{p}^{T}$ denote the transpose matrix of $B_{p}$.
The following proposition shows that all the eigenvalues of $\Delta^{\mathcal{H}}_{p}$ is non-negative.
\begin{proposition}
The operator $\Delta^{\mathcal{H}}_{p}$ is self-adjoint and non-negative definite.
\end{proposition}
\begin{proof}
Obviously, for any $x,y\in\mathrm{Inf}_{p}(\mathcal{H};\mathbb{R})$, we have
\begin{equation*}
\langle \Delta^{\mathcal{H}}_{p}x,y\rangle=\langle d_{p}x,d_{p}y\rangle+\langle (d_{p+1})^{\ast}x,(d_{p+1})^{\ast}y\rangle=\langle x,\Delta^{\mathcal{H}}_{p}y\rangle.
\end{equation*}
On the other hand, one has
\begin{equation*}
\langle \Delta^{\mathcal{H}}_{p}x,x\rangle=\langle d_{p}x,d_{p}x\rangle+\langle (d_{p+1})^{\ast}x,(d_{p+1})^{\ast}x\rangle\geq 0.
\end{equation*}
Thus $\Delta^{\mathcal{H}}_{p}$ is self-adjoint and non-negative definite.
\end{proof}
By the algebraic Hodge decomposition theorem, one has the following.
\begin{proposition}
$\mathrm{Inf}_{p}(\mathcal{H};\mathbb{R})=\ker \Delta^{\mathcal{H}}_{p}\oplus \mathrm{Im}d_{p}\oplus\mathrm{Im}(d_{p+1})^{\ast}$. Moreover, we have $\ker \Delta^{\mathcal{H}}_{p}=\ker d_{p}\cap \ker (d_{p+1})^{\ast}\cong H_{p}(\mathcal{H};\mathbb{R})$.
\end{proposition}
The above proposition shows that the number of zero eigenvalues of $\Delta^{\mathcal{H}}_{p}$ equals to the $p$-th Betti number of $\mathcal{H}$.
\begin{example}\label{example:hypergraph1}
Let $\mathcal{H}=(V,E)$ be a hypergraph with $V=\{1,2,3\}$ and
\begin{equation*}
E=\{\{1\},\{2\},\{1,2\},\{1,3\},\{2,3\},\{1,2,3\}\}.
\end{equation*}
Thus, one has
\begin{eqnarray*}
C_{0}(\mathcal{H};\mathbb{R}) &=& \mathrm{span}\{\{1\},\{2\}\}, \\
C_{1}(\mathcal{H};\mathbb{R}) &=& \mathrm{span}\{\{1,2\},\{1,3\},\{2,3\}\}, \\
C_{2}(\mathcal{H};\mathbb{R}) &=& \mathrm{span}\{\{1,2,3\}\}.
\end{eqnarray*}
By Eq. (\ref{equation:infimum_chaincomplex}), we obtain
\begin{eqnarray*}
\mathrm{Inf}_{0}(\mathcal{H};\mathbb{R}) &=& \mathrm{span}\{\{1\},\{2\}\}, \\
\mathrm{Inf}_{1}(\mathcal{H};\mathbb{R}) &=& \mathrm{span}\{\{1,2\},\{1,3\}-\{2,3\}\}, \\
\mathrm{Inf}_{2}(\mathcal{H};\mathbb{R}) &=& \mathrm{span}\{\{1,2,3\}\}.
\end{eqnarray*}
The infimum complex $\mathrm{Inf}_{\ast}(\mathcal{H};\mathbb{R})$ refers to the maximal sub chain complex contained in the $\mathbb{K}$-linear space $C_{\ast}(\mathcal{H};\mathbb{R})$.
Here, the standard orthogonal basis \footnote{It is to point out that in the work of persistent path Laplacian \cite{wang2023persistent}, the standard orthogonal basis was not chosen, and as a result, the calculated non-zero eigenvalues were not unique.} of $\mathrm{Inf}_{\ast}(\mathcal{H};\mathbb{R})$ is chosen as follows,
\begin{equation*}
\{1\},\{2\},\{1,2\},\frac{1}{\sqrt{2}}\{1,3\}-\frac{1}{\sqrt{2}}\{2,3\},\{1,2,3\}.
\end{equation*}
Then the boundary matrices are given by
\begin{eqnarray*}
d_0\left(
\begin{array}{cc}
\{1\}\\ \{2\} \\
\end{array}
\right)&=&\left(
\begin{array}{cc}
0\\ 0 \\
\end{array}
\right),\\
d_{1}\left(
\begin{array}{c}
\{1,2\} \\
\frac{1}{\sqrt{2}}\{1,3\}-\frac{1}{\sqrt{2}}\{2,3\} \\
\end{array}
\right) &=& \left(
\begin{array}{cc}
-1 & 1 \\
-\frac{1}{\sqrt{2}} & \frac{1}{\sqrt{2}} \\
\end{array}
\right)\left(
\begin{array}{c}
\{1\} \\
\{2\} \\
\end{array}
\right), \\
d_{2}\{1,2,3\} &=& \left(
\begin{array}{cc}
1 & -\sqrt{2} \\
\end{array}
\right)\left(
\begin{array}{c}
\{1,2\} \\
\frac{1}{\sqrt{2}}\{1,3\}-\frac{1}{\sqrt{2}}\{2,3\} \\
\end{array}
\right).
\end{eqnarray*}
Thus the representation matrices of the Laplacians $\Delta^{\mathcal{H}}_{0},\Delta^{\mathcal{H}}_{1}, $ and $ \Delta^{\mathcal{H}}_{2}$ are listed as follows.
\begin{equation*}
L_{0}^{\mathcal{H}}=\left(
\begin{array}{cc}
-1 & 1 \\
-\frac{1}{\sqrt{2}} & \frac{1}{\sqrt{2}} \\
\end{array}
\right)^{T}\left(
\begin{array}{cc}
-1 & 1 \\
-\frac{1}{\sqrt{2}} & \frac{1}{\sqrt{2}} \\
\end{array}
\right)=\left(
\begin{array}{cc}
\frac{3}{2} & -\frac{3}{2} \\
-\frac{3}{2} & \frac{3}{2} \\
\end{array}
\right).
\end{equation*}
\begin{equation*}
L_{1}^{\mathcal{H}}=\left(
\begin{array}{cc}
1 & -\sqrt{2} \\
\end{array}
\right)^{T}\left(
\begin{array}{cc}
1 & -\sqrt{2} \\
\end{array}
\right)+\left(
\begin{array}{cc}
-1 & 1 \\
-\frac{1}{\sqrt{2}} & \frac{1}{\sqrt{2}} \\
\end{array}
\right)\left(
\begin{array}{cc}
-1 & 1 \\
-\frac{1}{\sqrt{2}} & \frac{1}{\sqrt{2}} \\
\end{array}
\right)^{T}=\left(
\begin{array}{cc}
3 & 0 \\
0 & 3 \\
\end{array}
\right).
\end{equation*}
\begin{equation*}
L_{2}^{\mathcal{H}}=\left(
\begin{array}{cc}
1 & -\sqrt{2} \\
\end{array}
\right)\left(
\begin{array}{cc}
1 & -\sqrt{2} \\
\end{array}
\right)^{T}=3.
\end{equation*}
Therefore, the spectra for the Laplacian matrices $L_{0}^{\mathcal{H}},L_{1}^{\mathcal{H}}, $and$ L_{2}^{\mathcal{H}}$ are $\{0,3\},\{3,3\}, $ and $\{3\}$, respectively.
\end{example}
\begin{figure}
\caption{
{\bf a} Illustration of hypergraph $\mathcal{H}$ in Example \ref{example:hypergraph2}.
{\bf b}, {\bf c}, and {\bf d} Illustration of 0-hyperedges, 1-hyperedges, and 2-hyperedges, respectively. The solid black vertices indicate 0-hyperedges, purple edges indicate 1-hyperedges, and pink areas indicate 2-hyperedges. The dashed hollow circle indicates a vertex that is not 0-hyperedge.}
\label{figure:hypergraph2}
\end{figure}
\begin{example}\label{example:hypergraph2}
As shown in Figure \ref{figure:hypergraph2}{\bf a}, let $\mathcal{H}=(V,E)$ be a hypergraph with vertex set $V=\{0, 1, 2, 3, 4, 5\}$ and the hyperedge set $E$, where $E$ contains the 0-hyperedges $E^0=\{\{1\}, \{2\}, \{3\}, \{4\}, \{5\}\}$ (Figure \ref{figure:hypergraph2}{\bf b}), 1-hyperedges $E^1 = \{\{0, 1\}, \{1, 2\}, \{1, 4\}, \{2, 3\}, \{2, 4\}$, $\{2, 5\}, \{3, 4\}\}$ (Figure \ref{figure:hypergraph2}{\bf c}), and the 2-hyperedges $E^2=\{\{0, 1, 2\}, \{0, 1, 5\}, \{1, 2, 4\}, \{2, 3, 4\}\}$ (Figure \ref{figure:hypergraph2}{\bf d}). To follow Eq. (\ref{equation:infimum_chaincomplex}), the standard orthogonal basis of $\mathrm{Inf}_{*}(\mathcal{H};\mathbb{R})$ is chosen as follows,
\begin{equation*}
\{1\},\{2\},\{3\},\{4\},\{5\};\{1,2\}, \{1,4\}, \{2,3\}, \{2,4\},\{2, 5\}, \{3, 4\};\{1, 2, 4\}, \{2, 3, 4\}.
\end{equation*}
Then, the representation matrices of $d_0$,$d_1$, and $d_2$ are given as follows,
\begin{eqnarray*}
d_0\left(
\begin{array}{cc}
\{1\}\\ \{2\}\\ \{3\}\\ \{4\}\\ \{5\} \\
\end{array}
\right)&=&\left(
\begin{array}{cc}
0\\ 0\\ 0\\ 0\\ 0 \\
\end{array}
\right), \\
d_{1}\left(
\begin{array}{c}
\{1,2\}\\ \{1,4\}\\ \{2,3\}\\ \{2,4\}\\ \{2, 5\}\\ \{3, 4\}\\
\end{array}
\right)&=&\left(
\begin{array}{ccccc}
-1& 1& 0&0&0\\
-1& 0& 0&1&0\\
0&-1& 1&0&0\\
0&-1& 0&1&0\\
0&-1& 0&0&1\\
0& 0&-1&1&0\\
\end{array}
\right)\left(
\begin{array}{cc}
\{1\}\\ \{2\}\\ \{3\}\\ \{4\}\\ \{5\} \\
\end{array}
\right), \\
d_{2}\left(
\begin{array}{c}
\{1, 2, 4\}\\ \{2, 3, 4\}\\
\end{array}
\right) &=& \left(
\begin{array}{cccccc}
1&-1&0& 1&0&0\\
0& 0&1&-1&0&1\\
\end{array}
\right)\left(
\begin{array}{c}
\{1,2\}\\ \{1,4\}\\ \{2,3\}\\ \{2,4\}\\ \{2, 5\}\\ \{3, 4\}\\
\end{array}
\right).
\end{eqnarray*}
Based on Eq. (\ref{equation:laplacian}), the representation matrices of the Laplacians are listed as follows,
\begin{eqnarray*}
L_{0}^{\mathcal{H}}&=&\left(
\begin{array}{ccccc}
2& -1& 0& -1& 0\\
-1& 4& -1& -1&-1\\
0& -1& 2& -1& 0\\
-1& -1& -1& 3& 0\\
0& -1& 0& 0& 1\\
\end{array}
\right), \\
L_{1}^{\mathcal{H}}&=&\left(
\begin{array}{cccccc}
3& 0& -1& 0& -1& 0\\
0& 3& 0& 0& 0& 1\\
-1& 0& 3& 0& 1& 0\\
0& 0& 0& 4& 1& 0\\
-1& 0& 1& 1& 2& 0\\
0& 1& 0& 0& 0& 3\\
\end{array}
\right), \\
L_{2}^{\mathcal{H}}&=&\left(
\begin{array}{cc}
3& -1\\
-1& 3\\
\end{array}
\right).
\end{eqnarray*}
Then, the spectra of Laplacian matrices can be generated, which are $\mathbf{Spec}(L_{0}^{\mathcal{H}})=\{0, 1, 2, 4, 5\}$, $\mathbf{Spec}(L_{1}^{\mathcal{H}})=\{1, 2, 2, 4, 4, 5\}$, and $\mathbf{Spec}(L_{2}^{\mathcal{H}})=\{2,4\}$. The Betti numbers are $\beta_0=1, \beta_1=0, $ and $ \beta_2=0$, and the smallest eigenvalues of the non-harmonic spectra for $L_{0}^{\mathcal{H}},L_{1}^{\mathcal{H}}, $ and $ L_{2}^{\mathcal{H}}$ are $\lambda_0=1,\lambda_1=1$, and $\lambda_2=2$, respectively.
\end{example}
\section{Hyperdigraph homology and topological hyperdigraph Laplacians}\label{section:hyperdigraph}
There are various definitions of hyperdigraphs \cite{ausiello2017directed,thakur2009linear}. Additionally, both directed hypergraphs and oriented hypergraphs are used to refer to hyperdigraphs. For the sake of simplicity, we adopt the definition of hyperdigraphs as given in \cite{berge1984hypergraphs}, in which the hyperdigraph consists of sequences of distinct elements in a finite set. These sequences, called directed hyperedges, are the fundamental building blocks for hyperdigraph homology. In contrast, the building blocks for hypergraph homology are the sets of finite elements, while the building blocks for path homology are the directed paths. The topological structures for hyperdigraph homology, hypergraph homology, and path homology are from the corresponding maximal chain complexes embedded in the spaces generated by their building blocks. This section explores the hyperdigraph homology and topological hyperdigraph Laplacians for hyperdigraphs. We follow the technique from topological hypergraphs and path complexes using sequences to construct chain complexes for hyperdigraph homology. Our approach is inspired by path homology \cite{grigor2012homologies,grigor2017homologies,grigor2020path} and embedded homology of hypergraphs \cite{bressan2019embedded}.
\subsection{Hyperdigraph homology}
A \emph{hyperdigraph} $\vec{\mathcal{H}}=(V,\vec{E})$ on $V$ is a pair such that $\vec{E}$ is a subset of $\mathbf{S}(V)=\coprod\limits_{n=1}^{|V|}\Sigma_{n}\times \mathbf{P}_{n}(V)$. Here, $\Sigma_{n}$ is the permutation group of order $n$. An element $(\sigma,e)\in \Sigma_{n}\times \mathbf{P}_{n}(V)$ is called a \emph{$\sigma$-directed $(n-1)$-hyperedge}. In particular, if all the directed hyperedges are trivially directed, i.e., the corresponding permutations are trivial permutations, then we say $\vec{\mathcal{H}}$ is \emph{undirected} or simply a hypergraph as usual.
Let $\vec{\mathcal{H}}=(V,\vec{E}),\vec{\mathcal{H}}'=(V,\vec{E}')$ be two hyperdigraphs.
A \emph{morphism of hyperdigraphs} $f:\vec{\mathcal{H}}\to \vec{\mathcal{H}}'$ is map $f:V\to V'$ such that $(\sigma,f(e))\in \vec{E}'$ for any $(\sigma,e)\in \vec{E}$.
Let $X$ be an $n$-element ordered set. Let $\Sigma_{n} X$ be the set of permutations of elements in $X$.
\begin{lemma}\label{formula:isomorphism}
There is a bijection $\Sigma_{n}\times \{X\}\to \Sigma_{n}X$ between the set $\Sigma_{n}\times \{X\}$ and the set $\Sigma_{n}X$ given by
\begin{equation*}
\Sigma_{n}\times \{X\}\to \Sigma_{n}X,\quad (\sigma,\{X\})\mapsto (x_{\sigma(1)},x_{\sigma(2)},\dots,x_{\sigma(n)})
\end{equation*}
for $X=\{x_{1},x_{2},\dots,x_{n}\}$ and $\sigma\in \Sigma_{n}$.
\end{lemma}
\begin{proof}
The surjectivity is from the fact that each sequence $(x_{k_{1}},x_{k_{2}},\dots,x_{k_{n}})$ in $\Sigma_{n}X$ can be uniquely written as the permutation $\sigma$ action on the sequence $(x_{1},x_{2},\dots,x_{n})$. On the other hand, if $(x_{\sigma(1)},x_{\sigma(2)},\dots,x_{\sigma(n)})=(x_{\tau(1)},x_{\sigma(2)},\dots,x_{\tau(n)})$, the permutation group action shows that $\tau^{-1}\sigma=1$. Here, $1$ is the identity element. Thus we have $\sigma=\tau$, the injectivity follows.
\end{proof}
\begin{remark}
We can also use the ordered sequence to define hyperdigraph homology. Precisely,
a hyperdigraph $\vec{\mathcal{H}}=(V,\vec{E})$ on $V$ is a pair such that $\vec{E}$ is a subset of the set of sequences of distinct elements in $V$. A directed $p$-hyperedge of $\mathcal{H}$ is a sequence $v_{0}v_{1}\cdots v_{p}$ of distinct elements in $V$. This definition is equivalent to the previous definition of the hyperdigraph.
\end{remark}
Recall that a $(p,q)$-shuffle permutation is a permutation $\sigma$ in $\Sigma_{p+q}$ such that
\begin{equation*}
\sigma(1)<\cdots<\sigma(p),\quad \sigma(p)<\cdots<\sigma(p+q).
\end{equation*}
Let $\vec{\mathcal{H}}=(V,\vec{E})$ be a hyperdigraph such that each directed hyperedge is equipped with a $(p,q)$-shuffle permutation. Then we call $\vec{\mathcal{H}}$ a \emph{shuffle-hyperdigraph}. In other words, for each directed hyperedge $(\sigma,e)$ of $\vec{\mathcal{H}}$, the permutation $\sigma$ is a $(p,q)$-shuffle permutation for some $p+q=|e|$. Note that the shuffle-hyperdigraph can lead to another definition of hyperdigraph \cite{gallo1993directed}.
\begin{example}\label{example:acyclic}
Recall that a directed acyclic graph (DAG) is a digraph containing no directed cycle \cite{harary2014graphical}.
A path complex of an acyclic digraph is a hypergraph.
A path complex such that each path has distinct vertices is a hyperdigraph. Let $(V,\mathcal{P})$ be a path complex given by $V=\{0,1,2\}$ and
\begin{equation*}
\mathcal{P}=\{e_{0},e_{1},e_{2},e_{01},e_{12},e_{20},e_{012}\}.
\end{equation*}
The path complex $(V,\mathcal{P})$ can be identified with the hyperdigraph $(V,\vec{E})$, where
\begin{equation*}
\vec{E}=\{(0),(1),(2),(0,1),(1,2),(2,0),(0,1,2)\}.
\end{equation*}
\end{example}
Now, let $S_{p}(V;\mathbb{K})$ be the $\mathbb{K}$-linear space generated by the elements in $\Sigma_{p+1}\times \mathbf{P}_{p+1}(V)$. Then $S_{\ast}(V;\mathbb{K})=(S_{p}(V;\mathbb{K}))_{p\geq 0}$ is a chain complex with the boundary operator $d_{p}:S_{p}(V;\mathbb{K})\to S_{p-1}(V;\mathbb{K})$ given by $d_{p}=\sum\limits_{i=0}^{p}(-1)^{i}\partial_{i}$. Here,
\begin{equation*}
\partial_{i}(x_{0},x_{1},\dots,x_{p})=(x_{0},\dots,\widehat{x_{i}},\dots,x_{p}),\quad 0\leq i\leq p,
\end{equation*}
where $(x_{0},x_{1},\dots,x_{p})$ is a sequence in $\Sigma_{p+1}V$ as given in Lemma \ref{formula:isomorphism}. In particular, we make the convention $d_{0}e=0$ for each $e\in V$. It can be verified that $d_{p-1}\circ d_{p}=0$ on $S_{\ast}(V;\mathbb{K})$ for $p\geq 1$.
Let $F_{p}(\vec{\mathcal{H}};\mathbb{K})$ be the $\mathbb{K}$-linear space generated by $\vec{E}^{p}$. Here, $\vec{E}^{p}\subseteq \Sigma_{p+1}\times \mathbf{P}_{p+1}(V)$ is the set of directed $p$-hyperedges of $\vec{\mathcal{H}}$. Obviously, $F_{p}(\vec{\mathcal{H}};\mathbb{K})$ is a subspace of $S_{p}(V;\mathbb{K})$. Recall that $S_{\ast}(V;\mathbb{K})=(S_{p}(V;\mathbb{K}))_{p\geq 0}$ is a chain complex. The boundary operator $d_{p}$ on $S_{p}(V;\mathbb{K})$ can restrict to a map $d_{p}:F_{p}(\vec{\mathcal{H}};\mathbb{K})\to S_{p-1}(V;\mathbb{K})$.
We denote
\begin{equation}\label{equation:omega_chaincomplex}
\Omega_{p}(\vec{\mathcal{H}};\mathbb{K})=F_{p}(\vec{\mathcal{H}};\mathbb{K})\cap d^{-1}F_{p-1}(\vec{\mathcal{H}};\mathbb{K})=\{x\in F_{p}(\vec{\mathcal{H}};\mathbb{K})|dx\in F_{p-1}(\vec{\mathcal{H}};\mathbb{K})\}.
\end{equation}
Then $\Omega_{\ast}(\vec{\mathcal{H}};\mathbb{K})=(\Omega_{p}(\vec{\mathcal{H}};\mathbb{K}))_{p\geq 0}$ is a chain complex.
\begin{definition}
The \emph{embedded homology} of a hyperdigraph $\vec{\mathcal{H}}$ is defined by
\begin{equation*}
H_{p}(\vec{\mathcal{H}};\mathbb{K})=H_{p}(\Omega_{\ast}(\vec{\mathcal{H}};\mathbb{K})),\quad p\geq 0.
\end{equation*}
\end{definition}
If $\vec{\mathcal{H}}=(V,E)$ is an (undirected) hyperdigraph, the above definition reduces to the embedded homology of hypergraphs.
\begin{example}
The embedded homology of topological hyperdigraphs can reflect more information than the embedded homology of topological hypergraphs. For example, consider the hyperdigraph $\vec{\mathcal{H}}=(V,\vec{E})$, where $V=\{0,1\}$ and $\vec{E}=\{(0,1),(1,0)\}$. A straightforward calculation shows that
\begin{equation*}
H_{p}(\vec{\mathcal{H}};\mathbb{K})=\left\{
\begin{array}{ll}
\mathbb{K}, & \hbox{$p=1$;} \\
0, & \hbox{otherwise.}
\end{array}
\right.
\end{equation*}
The cycle in $F_{\ast}(\vec{\mathcal{H}};\mathbb{K})$ is generated by $(0,1)+(1,0)$. However, the $1$-dimensional homology of any topological hypergraph with vertex set $\{0,1\}$ is trivial.
\end{example}
\begin{example}
Example \ref{example:acyclic} continued. Let $\mathcal{P}$ be a path complex such that each path has distinct vertices. We can regard $\mathcal{P}$ as a hyperdigraph. Then the embedded homology of $\mathcal{P}$ coincides with the path homology of $\mathcal{P}$, that is,
\begin{equation*}
H_{p}^{emb}(\mathcal{P};\mathbb{K})=H_{p}^{path}(\mathcal{P};\mathbb{K}),\quad p\geq 0.
\end{equation*}
\end{example}
Following a similar discussion as in \cite{grbic2022aspects}, we can establish the following proposition, which ensures that the persistent hyperdigraph homology introduced in Section \ref{section:persistence} is a persistence module.
\begin{proposition}
The homology $H_{\ast}(-;\mathbb{K}):\vec{\mathbf{H}}\mathbf{yper}\to \mathbf{Vec}_{\mathbb{K}}$ is a functor from the category of hyperdigraphs to the category of $\mathbb{K}$-linear spaces.
\end{proposition}
\subsection{From hyperdigraph homology to hypergraph homology}
Now, let $\vec{\mathcal{H}}=(V,\vec{E})$ be a hyperdigraph. For each directed hyperedge $(\sigma,e)$, we have a subset $e$ of $\mathbf{P}(V)$. Thus we have a hypergraph $\overline{\mathcal{H}}=(V,E)$, where each hyperedge $e$ in $E$ is a projection of some directed hyperedge $(\sigma,e)$ in $\vec{E}$. We call $\overline{H}$ the \emph{reduced hypergraph} of the hyperdigraph $\vec{\mathcal{H}}$.
Moreover, we have a map of $\mathbb{K}$-linear spaces
\begin{equation*}
\pi: F_{\ast}(\vec{\mathcal{H}};\mathbb{K})\to C_{\ast}(\overline{\mathcal{H}};\mathbb{K})
\end{equation*}
given by $\pi(\sigma,e)= (-1)^{\epsilon(\sigma)} e$. Here, $\epsilon(\sigma)$ is the Levi-Civita symbol of the permutation $\sigma$. Then $\pi(\vec{\mathcal{H}})$ is a hypergraph. Thus we have the commutative diagram
\begin{equation*}
\xymatrix{
F_{\ast}(\vec{\mathcal{H}};\mathbb{K})\ar@{->}[r]^{\pi}\ar@{^{(}->}[d]&C_{\ast}(\overline{\mathcal{H}};\mathbb{K})\ar@{^{(}->}[d]\\
S_{\ast}(V;\mathbb{K})\ar@{->}[r]^{\pi}&C_{\ast}(V;\mathbb{K}).
}
\end{equation*}
It induces a $\mathbb{K}$-linear map
\begin{equation*}
\pi:\Omega_{\ast}(\vec{\mathcal{H}};\mathbb{K})\to \mathrm{Inf}_{\ast}(\overline{\mathcal{H}};\mathbb{K}).
\end{equation*}
\begin{lemma}
The map $\pi:\Omega_{\ast}(\vec{\mathcal{H}};\mathbb{K})\to \mathrm{Inf}_{\ast}(\overline{\mathcal{H}};\mathbb{K})$ is a chain map.
\end{lemma}
\begin{proof}
It suffices to show the following diagram commutes.
\begin{equation*}
\xymatrix{
F_{p}(\vec{\mathcal{H}};\mathbb{K})\ar@{->}[r]^{\pi}\ar@{->}[d]_{d_{p}}&C_{p}(\overline{\mathcal{H}};\mathbb{K})\ar@{->}[d]^{d_{p}}\\
S_{p-1}(V;\mathbb{K})\ar@{->}[r]^{\pi}\ar@{->}[r]&C_{p-1}(V;\mathbb{K}).
}
\end{equation*}
We will show $(-1)^{\sigma(i)}\partial_{\sigma(i)}\circ\pi(v_{\sigma(0)},v_{\sigma(1)},\dots,v_{\sigma(p)})=(-1)^{i}\pi\circ \partial_{i}(v_{\sigma(0)},v_{\sigma(1)},\dots,v_{\sigma(p)})$, which implies $\partial_{p}\circ\pi=\pi\circ \partial_{p}$. Here, $e=\{v_{0},v_{1},\dots,v_{p}\}$ is an ordered set. By a direct calculation, one has
\begin{equation*}
(-1)^{\sigma(i)}\partial_{\sigma(i)}\circ\pi (v_{\sigma(0)},v_{\sigma(1)},\dots,v_{\sigma(p)})=(-1)^{\epsilon(\sigma)+\sigma(i)}\partial_{\sigma(i)}e.
\end{equation*}
On the other hand, let $n_{i}(\sigma)$ be the sum of $|\{j<i|v_{\sigma(j)}>v_{\sigma(i)}\}|$ and $|\{j>i|v_{\sigma(j)}<v_{\sigma(i)}\}|$. Then we have
\begin{equation*}
\pi\circ \partial_{i}(v_{\sigma(0)},v_{\sigma(1)},\dots,v_{\sigma(p)})=(-1)^{i+\epsilon(\sigma)+n_{i}(\sigma)}\partial_{\sigma(i)}e.
\end{equation*}
If we fix $v_{\sigma(i)}$ in the sequence $v_{\sigma(0)},v_{\sigma(1)},\dots,v_{\sigma(p)}$ and permutate any two other elements $v_{\sigma(s)},v_{\sigma(t)}$, we obtain a sequence $v_{\tau(0)},v_{\sigma(1)},\dots,v_{\tau(p)}$ such that $\tau(j)=\sigma(j)$ for $j\neq s,t$ and $\tau(s)=\sigma(t),\tau(t)=\sigma(s)$. It is obvious that $n_{i}(\sigma)$ and $n_{i}(\tau)$ differ by an even number. This permutation does not change the signature. So the signature of $(v_{\sigma(0)},v_{\sigma(1)},\dots,v_{\sigma(p)})$ can be reduced to the signature of the sequence
\begin{equation*}
(v_{0},\dots,v_{i-1},v_{\sigma(i)},v_{i+1},\dots,v_{\sigma(i)-1},v_{i},v_{\sigma(i)+1},\dots,v_{p})
\end{equation*}
for $\sigma(i)>i$ or the sequence
\begin{equation*}
(v_{0},\dots,v_{\sigma(i)-1},v_{i},v_{\sigma(i)+1},\dots,v_{i-1},v_{\sigma(i)},v_{i+1},\dots,v_{p})
\end{equation*}
for $\sigma(i)<i$.
It follows that $(-1)^{n_{i}(\sigma)}=(-1)^{\sigma(i)-i}$. Thus we have $\partial_{p}\circ\pi=\pi\circ \partial_{p}$.
\end{proof}
The above chain morphism induces a morphism of embedded homology.
\begin{theorem}
Let $\vec{\mathcal{H}}$ be a hyperdigraph. There is a natural morphism
\begin{equation*}
H_{\ast}(\pi):H_{\ast}(\vec{\mathcal{H}};\mathbb{K})\to H_{\ast}(\overline{\mathcal{H}};\mathbb{K}).
\end{equation*}
\end{theorem}
\begin{proof}
Let $f:\mathcal{H}\to \mathcal{H}'$ be a morphism of hyperdigraphs. Then there is $\mathbb{K}$-linear maps
\begin{equation*}
F_{p}(f):F_{p}(\vec{\mathcal{H}};\mathbb{K})\to F_{p}(\vec{\mathcal{H}}';\mathbb{K}),\quad (\sigma,e)\mapsto (\sigma,f(e))
\end{equation*}
and
\begin{equation*}
C_{p}(\overline{f}):C_{p}(\overline{\mathcal{H}};\mathbb{K})\to C_{p}(\overline{\mathcal{H}'};\mathbb{K}),\quad e\mapsto f(e).
\end{equation*}
Consider the following diagram
\begin{equation*}
\xymatrix{
F_{p}(\vec{\mathcal{H}};\mathbb{K})\ar@{->}[r]^{\pi}\ar@{->}[d]_{F_{p}(f)}&C_{p}(\overline{\mathcal{H}};\mathbb{K})\ar@{->}[d]^{C_{p}(\overline{f})}\\
F_{p}(\vec{\mathcal{H}}';\mathbb{K})\ar@{->}[r]^{\pi}\ar@{->}[r]&C_{p}(\overline{\mathcal{H}'};\mathbb{K}).
}
\end{equation*}
For any $(\sigma,e)\in \vec{E}$, one has
\begin{equation*}
C_{p}(\overline{f})\circ\pi(\sigma,e)=(-1)^{\epsilon(\sigma)}f(e)=\pi\circ F_{p}(f)(\sigma,e).
\end{equation*}
Thus we obtain $C_{p}(\overline{f})\circ\pi=\pi\circ F_{p}(f)$. Since the embedded hyperdigraph homology $H_{\ast}$ is a functor, we have the desired result.
\end{proof}
Generally, the map $H_{\ast}(\pi)$ is not necessarily a surjection or an injection.
\begin{example}
Let $\vec{\mathcal{H}}_{1}=(V_{1},\vec{E}_{1})$ be a hyperdigraph with $V_{1}=\{1,2,3\}$ and
\begin{equation*}
\vec{E}_{1}=\{(1,2),(1,3),(3,2),(1,2,3)\}.
\end{equation*}
It follows that $H_{p}(\vec{\mathcal{H}}_{1};\mathbb{K})=\left\{
\begin{array}{ll}
\mathbb{K}, & \hbox{$p=1$;} \\
0, & \hbox{otherwise.}
\end{array}
\right.
$ and $H_{p}(\overline{\mathcal{H}_{1}};\mathbb{K})=0$. Thus $H_{\ast}(\pi):H_{\ast}(\vec{\mathcal{H}}_{1};\mathbb{K})\to H_{\ast}(\overline{\mathcal{H}_{1}};\mathbb{K})$ is not an injection.
Let $\vec{\mathcal{H}}_{2}=(V_{2},\vec{E}_{2})$ be a hyperdigraph with $V_{2}=\{1,2,3,4\}$ and
\begin{equation*}
\vec{E}_{2}=\{(1,2,3),(2,3,4),(3,4,1),(4,1,2)\}.
\end{equation*}
Then we have $H_{p}(\vec{\mathcal{H}}_{2};\mathbb{K})=0$ and $H_{p}(\overline{\mathcal{H}_{2}};\mathbb{K})=\left\{
\begin{array}{ll}
\mathbb{K}, & \hbox{$p=2$;} \\
0, & \hbox{otherwise.}
\end{array}
\right.
$ Then the map $H_{\ast}(\pi):H_{\ast}(\vec{\mathcal{H}}_{2};\mathbb{K})\to H_{\ast}(\overline{\mathcal{H}_{2}};\mathbb{K})$ is not a surjection.
\end{example}
\subsection{Topological hyperdigraph Laplacians}
In this section, we will introduce topological hyperdigraph Laplacians as a mathematical tool that can be used to study high-dimensional topological structures. Furthermore, it can be used to develop a topological persistence that can be applied to data analysis. We will provide some examples and calculations to illustrate these concepts for readers. From now on, we always take $\mathbb{K}=\mathbb{R}$.
Let $\vec{\mathcal{H}}=(V,\vec{E})$ be a hyperdigraph. We have an infimum complex
\begin{equation*}
\cdots\to \Omega_{p+1}(\vec{\mathcal{H}};\mathbb{R})\stackrel{d_{p+1}}{\to} \Omega_{p}(\vec{\mathcal{H}};\mathbb{R})\stackrel{d_{p}}{\to} \Omega_{p-1}(\vec{\mathcal{H}};\mathbb{R})\to \cdots.
\end{equation*}
We regard the collection of directed hyperedges $\vec{E}_{1},\vec{E}_{2},\cdots,\vec{E}_{n}$ as a standard basis of the $\mathbb{K}$-linear space $F_{\ast}(\vec{\mathcal{H}};\mathbb{R})$. Here, $n=|\vec{E}|$ is the number of directed hyperedges of $\vec{\mathcal{H}}$. Let $v_{1},v_{2},\dots,v_{N}$ be a standard basis of $\mathbb{K}[V]$. Here, $N=|V|$. Endow $F_{\ast}(\vec{\mathcal{H}};\mathbb{R})$ and $\mathbb{K}[V]$ with the standard inner products, respectively, given by
\begin{equation*}
\langle v_{i}, v_{j}\rangle =\left\{
\begin{array}{ll}
1, & \hbox{$i=j$;} \\
0, & \hbox{otherwise.}
\end{array}
\right.\quad \langle \vec{E}_{i}, \vec{E}_{j}\rangle =\left\{
\begin{array}{ll}
1, & \hbox{$i=j$;} \\
0, & \hbox{otherwise.}
\end{array}
\right.
\end{equation*}
Then the chain complex $\Omega_{\ast}(\vec{\mathcal{H}};\mathbb{R})$ inherits the inner product of $F_{\ast}(\vec{\mathcal{H}};\mathbb{R})$ as subspace. The adjoint functor of $d_{p}:\Omega_{p}(\vec{\mathcal{H}};\mathbb{R})\to \Omega_{p-1}(\vec{\mathcal{H}};\mathbb{R})$ is a $\mathbb{K}$-linear map $(d_{p})^{\ast}:\Omega_{p-1}(\vec{\mathcal{H}};\mathbb{R})\to \Omega_{p}(\vec{\mathcal{H}};\mathbb{R})$ such that
\begin{equation*}
\langle d_{p}x,y\rangle=\langle x,(d_{p})^{\ast}y\rangle
\end{equation*}
for any $x\in \Omega_{p}(\vec{\mathcal{H}};\mathbb{R})$ and $y\in \Omega_{p-1}(\vec{\mathcal{H}};\mathbb{R})$.
\begin{definition}
The \emph{hyperdigraph Laplacian} $\Delta^{\vec{\mathcal{H}}}_{p}:\Omega_{p}(\vec{\mathcal{H}};\mathbb{R})\to \Omega_{p}(\vec{\mathcal{H}};\mathbb{R})$ of $\vec{\mathcal{H}}$ is defined by
\begin{equation*}
\Delta^{\vec{\mathcal{H}}}_{p}=(d_{p})^{\ast}\circ d_{p}+d_{p+1}\circ (d_{p+1})^{\ast},\quad p\geq 0.
\end{equation*}
\end{definition}
Similar to the discussion in Section \ref{section:hypergraph}, we have the following propositions.
\begin{proposition}
The operator $\Delta^{\vec{\mathcal{H}}}_{p}$ is self-adjoint and non-negative definite.
\end{proposition}
\begin{proposition}\label{proposition:decomposition1}
$\Omega_{p}(\vec{\mathcal{H}};\mathbb{R})=\ker \Delta^{\vec{\mathcal{H}}}_{p}\oplus \mathrm{Im}d_{p}\oplus\mathrm{Im}(d_{p+1})^{\ast}$. Moreover, we have $\ker \Delta^{\vec{\mathcal{H}}}_{p}=\ker d_{p}\cap \ker (d_{p+1})^{\ast}\cong H_{p}(\vec{\mathcal{H}};\mathbb{R})$.
\end{proposition}
This above proposition shows that the number of zero eigenvalues of $\Delta^{\vec{\mathcal{H}}}_{p}$ equals to the $p$-th Betti number of $\vec{\mathcal{H}}$.
\begin{theorem}
Let $\vec{\mathcal{H}}=(V,\vec{E})$ be a hyperdigraph. Let $\lambda_{0}(k)$ be the $k$-th smallest Laplacian eigenvalue of $\Delta^{\vec{\mathcal{H}}}_{0}$. If $\vec{E}_{0}\neq \emptyset$, then
\begin{enumerate}
\item[$(i)$] $\lambda_{0}(1)=0$;
\item[$(ii)$] $\lambda_{0}(2)>0$ if and only if for any directed $0$-hyperedges $(v),(w)$ of $\vec{\mathcal{H}}$, there is a path of $1$-hyperedges from $(v)$ to $(w)$.
\end{enumerate}
\end{theorem}
\begin{proof}
$(i)$ Since there exists at least one directed $0$-hyperedge in $\vec{E}$, we have $\Omega_{0}(\vec{\mathcal{H}};\mathbb{R})=F_{0}(\vec{\mathcal{H}};\mathbb{R})$. Moreover, the representation matrix of $\Delta^{\vec{\mathcal{H}}}_{0}:\Omega_{0}(\vec{\mathcal{H}};\mathbb{R})\to \Omega_{0}(\vec{\mathcal{H}};\mathbb{R})$ is an $r\times r$ matrix. Here, $r=\dim \Omega_{0}(\vec{\mathcal{H}};\mathbb{R})\geq 1$. Consider the augmented chain complex
\begin{equation*}
\cdots\to \Omega_{2}(\vec{\mathcal{H}};\mathbb{R})\stackrel{d_{2}}{\to} \Omega_{1}(\vec{\mathcal{H}};\mathbb{R})\stackrel{d_{1}}{\to} \Omega_{0}(\vec{\mathcal{H}};\mathbb{R})\stackrel{\tilde{d}_{0}}{\to} \mathbb{K}\to 0,
\end{equation*}
where $\tilde{d}(\sum\limits_{j=1}^{m}a_{j}\cdot(v_{j}))=\sum\limits_{j=1}^{m}a_{j}$ for $\sum\limits_{j=1}^{m}a_{j}\cdot(v_{j})\in \Omega_{0}(\vec{\mathcal{H}};\mathbb{R})$. It follows that
\begin{equation*}
{\mathrm{Im}\hspace{0.1em}} d_{1}\subseteq \ker \tilde{d}_{0}.
\end{equation*}
Choose a directed $0$-hyperedge $(v)\in \vec{E}$. Observing that $\ker \tilde{d}_{0}(v)\neq 0$. One has ${\mathrm{Im}\hspace{0.1em}} d_{1}\subsetneq \Omega_{0}(\vec{\mathcal{H}};\mathbb{R})$. It follows that
\begin{equation*}
H_{0}(\vec{\mathcal{H}};\mathbb{R})\neq 0.
\end{equation*}
By Proposition \ref{proposition:decomposition1}, $\dim\ker\Delta^{\vec{\mathcal{H}}}_{0}=\dim H_{0}(\vec{\mathcal{H}};\mathbb{R})>0$. It follows that $\lambda_{0}(1)=0$.
$(ii)$ If $\lambda_{0}(2)>0$, one has $\dim H_{0}(\vec{\mathcal{H}};\mathbb{R})=\dim\ker\Delta^{\vec{\mathcal{H}}}_{0}=1$. So for any $(v),(w)\in \vec{E}^{0}$, we have $(v)-(w)\in {\mathrm{Im}\hspace{0.1em}} d_{1}$. Suppose there is no path of $1$-hyperedges from $(v)$ to $(w)$. Let $(v)-(w)=d_{1}\sum\limits_{j=1}^{k}a_{i}(v_{j}w_{j})$. Consider the function $f:\Omega_{0}(\vec{\mathcal{H}};\mathbb{R})\to \mathbb{R}$ given by
\begin{equation*}
f((x))=\left\{
\begin{array}{ll}
0, & \hbox{if there is a path from $(v)$ to $(x)$;} \\
1, & \hbox{otherwise.}
\end{array}
\right.
\end{equation*}
Then we have
\begin{equation*}
-1=f((v)-(w)))=\sum\limits_{j=1}^{k}a_{i}(f(w_{j})-f(v_{j})).
\end{equation*}
Note that $f(w_{j})=f(v_{j})$ since $(v_{j}w_{j})$ is a directed $1$-hyperedge. One has $\sum\limits_{j=1}^{k}a_{i}(f(w_{j})-f(v_{j}))=0$, contradiction. So there is a path of $1$-hyperedges from $(v)$ to $(w)$.
\end{proof}
\begin{example}
Let $\vec{\mathcal{H}}=(V,\vec{E})$ be a hyperdigraph with $V=\{1,2,3\}$ and
\begin{equation*}
\vec{E}=\{(1),(2),(1,2),(1,3),(3,2),(1,2,3)\}.
\end{equation*}
Then we have
\begin{eqnarray*}
F_{0}(\vec{\mathcal{H}};\mathbb{R}) &=& \mathrm{span}\{(1),(2)\}, \\
F_{1}(\vec{\mathcal{H}};\mathbb{R}) &=& \mathrm{span}\{(1,2),(1,3),(3,2)\}, \\
F_{2}(\vec{\mathcal{H}};\mathbb{R}) &=& \mathrm{span}\{(1,2,3)\}.
\end{eqnarray*}
Note that $d(1,2,3)=(1,2)-(1,3)+(2,3)\notin F_{1}(\vec{\mathcal{H}};\mathbb{R})$. By Eq. (\ref{equation:omega_chaincomplex}), we obatin
\begin{eqnarray*}
\Omega_{0}(\vec{\mathcal{H}};\mathbb{R}) &=& \mathrm{span}\{(1),(2)\}, \\
\Omega_{1}(\vec{\mathcal{H}};\mathbb{R}) &=& \mathrm{span}\{(1,2),(1,3)+(3,2)\}, \\
\Omega_{2}(\vec{\mathcal{H}};\mathbb{R}) &=& 0.
\end{eqnarray*}
Now, we choose the standard orthogonal basis of $\Omega_{\ast}(\vec{\mathcal{H}};\mathbb{R})$ as $(1),(2),(1,2),\frac{(1,3)+(3,2)}{\sqrt{2}}$. Then the boundary matrices are given by
\begin{eqnarray*}
d_0\left( \begin{array}{c}
(1) \\
(2) \\
\end{array}
\right)&=&\left(
\begin{array}{cc}
0\\ 0\\
\end{array}
\right), \\
d_{1}\left(
\begin{array}{c}
(1,2) \\
\frac{(1,3)+(3,2)}{\sqrt{2}} \\
\end{array}
\right)&=&\left(
\begin{array}{cc}
-1 & 1 \\
-\frac{1}{\sqrt{2}} & \frac{1}{\sqrt{2}} \\
\end{array}
\right)\left(
\begin{array}{c}
(1) \\
(2) \\
\end{array}
\right) .
\end{eqnarray*}
Note that $\mathrm{rank} \left(
\begin{array}{cc}
-1 & 1 \\
-\frac{1}{\sqrt{2}} & \frac{1}{\sqrt{2}} \\
\end{array}
\right)=1$.
We have the hyperdigraph homology $H_{p}(\vec{\mathcal{H}};\mathbb{R})=\left\{
\begin{array}{ll}
\mathbb{K}, & \hbox{$p=0,1$;} \\
0, & \hbox{otherwise.}
\end{array}
\right.
$
Moreover, the representation matrix of $\Delta^{\vec{\mathcal{H}}}_{p}$ with respect to the chosen basis is given by
\begin{equation*}
L^{\vec{\mathcal{H}}}_{0}= \left(
\begin{array}{cc}
-1 & 1 \\
-\frac{1}{\sqrt{2}} & \frac{1}{\sqrt{2}} \\
\end{array}
\right)^{T}\left(
\begin{array}{cc}
-1 & 1 \\
-\frac{1}{\sqrt{2}} & \frac{1}{\sqrt{2}} \\
\end{array}
\right)= \left(
\begin{array}{cc}
\frac{3}{2} & -\frac{3}{2} \\
-\frac{3}{2} & \frac{3}{2} \\
\end{array}
\right)
\end{equation*}
and
\begin{equation*}
L^{\vec{\mathcal{H}}}_{1}= \left(
\begin{array}{cc}
-1 & 1 \\
-\frac{1}{\sqrt{2}} & \frac{1}{\sqrt{2}} \\
\end{array}
\right)\left(
\begin{array}{cc}
-1 & 1 \\
-\frac{1}{\sqrt{2}} & \frac{1}{\sqrt{2}} \\
\end{array}
\right)^{T}= \left(
\begin{array}{cc}
2 & \sqrt{2} \\
\sqrt{2} & 1 \\
\end{array}
\right).
\end{equation*}
The spectrum of $\Delta_{0}^{\vec{\mathcal{H}}}$ is $\{0,3\}$, the number of zero eigenvalues is exactly $1$. Similarly, the spectrum of $\Delta_{1}^{\vec{\mathcal{H}}}$ is $\{0,3\}$, which is consistent with the Betti number. Note that if we choose the standard orthogonal basis of $\Omega_{\ast}(\vec{\mathcal{H}};\mathbb{R})$ as $\frac{(1)+(2)}{\sqrt{2}},\frac{(1)-(2)}{\sqrt{2}},(1,2),\frac{(1,3)+(3,2)}{\sqrt{2}}$, one can obtain the corresponding representation matrices of Laplacians as
\begin{equation*}
\tilde{L}^{\vec{\mathcal{H}}}_{0}= \left(
\begin{array}{cc}
0 & 0 \\
0 & 3 \\
\end{array}
\right),\quad \tilde{L}^{\vec{\mathcal{H}}}_{1}= \left(
\begin{array}{cc}
2 & \sqrt{2} \\
\sqrt{2} & 1 \\
\end{array}
\right).
\end{equation*}
In fact, the eigenvalues of the above matrices coincides with the previous ones.
\end{example}
\begin{figure}\label{figure:hyperdigraph}
\end{figure}
\begin{example}\label{example:hyperdigraph2}
As shown in Figure \ref{figure:hyperdigraph}{\bf a}, let $\mathcal{\vec{H}}=(V,\vec{E})$ be a hyperdigraph with vertex set $V=\{0, 1, 2, 3, 4, 5\}$ and the directed hyperedge set $\vec{E}$, where $\vec{E}$ contains the directed 0-hyperedges (Figure \ref{figure:hyperdigraph}{\bf b}), directed 1-hyperedges (Figure \ref{figure:hyperdigraph}{\bf c}), and the directed 2-hyperedges (Figure \ref{figure:hyperdigraph}{\bf d}) as follows,
\begin{eqnarray*}
\vec{E}^0&=&\{(1), (2), (3), (4), (5)\}, \\
\vec{E}^1&=&\{(0, 1), (1, 2), (2, 1), (2, 3), (2, 4), (2, 5), (3, 4), (4, 1) \}, \\
\vec{E}^2&=&\{(0, 1, 2), (0, 5, 1), (2, 3, 4), (2, 4, 1)\}.
\end{eqnarray*}
So the standard orthogonal basis of $\Omega_{\ast}(\vec{\mathcal{H}};\mathbb{R})$ can be chosen as follows,
\begin{equation*}
(1), (2), (3), (4), (5), (1, 2), (2, 1), (2, 3), (2, 4), (2, 5), (3, 4), (4, 1), (2, 3, 4), (2, 4, 1).
\end{equation*}
Then, the representation matrices of the boundary operators $d_0$,$d_1$, and $d_2$ are given as follows,
\begin{eqnarray*}
d_0\left( \begin{array}{c}
(1) \\
(2) \\
(3) \\
(4) \\
(5) \\
\end{array}
\right)&=&\left(
\begin{array}{cc}
0\\ 0\\ 0\\ 0\\ 0 \\
\end{array}
\right),\\
d_{1}\left(
\begin{array}{c}
(1, 2)\\(2, 1)\\(2, 3)\\(2, 4)\\(2, 5)\\(3, 4)\\(4, 1)\\
\end{array}
\right) &=& \left(
\begin{array}{ccccccc}
-1& 1& 0& 0&0\\
1& 1& 0& 0&0\\
0&-1& 1& 0&0\\
0&-1& 0& 1&0\\
0&-1& 0& 0&1\\
0& 0&-1& 1&0\\
1& 0& 0&-1&0\\
\end{array}
\right)\left(
\begin{array}{c}
(1)\\(2)\\(3)\\(4)\\(5)\\
\end{array}
\right), \\
d_{2}\left(
\begin{array}{c}
(2, 3, 4)\\(2, 4, 1)\\
\end{array}
\right)&=&\left(
\begin{array}{ccccccc}
0&0&1&-1&0&1&0\\
0&-1&0&1&0&0&1\\
\end{array}
\right)\left(
\begin{array}{c}
(1, 2)\\(2, 1)\\(2, 3)\\(2, 4)\\(2, 5)\\(3, 4)\\(4, 1)\\
\end{array}
\right).
\end{eqnarray*}
The representation matrices of the Laplacian $\Delta^{\vec{\mathcal{H}}}_{p}$ with respect to the chosen basis are listed as follows,
\begin{eqnarray*}
L_{0}^{\mathcal{\vec{H}}}&=&\left(
\begin{array}{ccccc}
3&-2& 0&-1& 0\\
-2& 5&-1&-1&-1\\
0&-1& 2&-1& 0\\
-1&-1&-1& 3& 0\\
0&-1& 0& 0& 1\\
\end{array}
\right), \\
L_{1}^{\mathcal{\vec{H}}}&=&\left(
\begin{array}{ccccccc}
2& -2& -1& -1& -1& 0&-1\\
-2& 3& 1& 0& 1& 0& 0\\
-1& 1& 3& 0& 1& 0& 0\\
-1& 0& 0& 4& 1& 0& 0\\
-1& 1& 1& 1& 2& 0& 0\\
0& 0& 0& 0& 0& 3&-1\\
-1& 0& 0& 0& 0&-1& 3\\
\end{array}
\right), \\
L_{2}^{\mathcal{\vec{H}}}&=&\left(
\begin{array}{cc}
3& -1\\
-1& 3\\
\end{array}
\right).
\end{eqnarray*}
Then the spectra of the Laplacian matrices can be generated, which are $\mathbf{Spec}(L_{0}^{\mathcal{\vec{H}}})=\{0, 1.044$, $2.332, 4.080, 6.544\}$, $\mathbf{Spec}(L_{1}^{\mathcal{\vec{H}}})=\{0, 1.044, 2, 2.332, 4, 4.080, 6.544\}$, and $\mathbf{Spec}(L_{2}^{\mathcal{\vec{H}}})=\{2,4\}$. The corresponding Betti numbers are $\beta_0=1,\beta_1=1, $ and $ \beta_2=0$, and the smallest eigenvalues of the non-harmonic spectra are $\lambda_0=1.044,\lambda_1=1.044$, and $\lambda_2=2$, respectively.
\end{example}
There are various homologies and topological Laplacians on different objects. We list these objects, which are a family of discrete objects satisfying certain properties, and the corresponding topological Laplacians in Table \ref{table:graph_Lap}.
Fix a nonempty finite ordered set $V$. Recall that $\mathbf{S}(V)$ is the set of all sequence with distinct elements in $V$. Let $\mathbf{\tilde{S}}(V)$ be the set of all sequence with elements in $V$.
For a subset $K$ of $\mathbf{P}(V)$, let $\partial_{\ast}K=\{\tau\subset \sigma|\sigma\in K\}$. For a subset $P$ in $\mathbf{\tilde{S}}(V)$, let $\partial_{0}P=\{(x_{1},\dots,x_{p})|(x_{0},x_{1},\dots,x_{p})\in P\}$ and $\partial_{\infty}P=\{(x_{0},\dots,x_{p-1})|(x_{0},x_{1},\dots,x_{p})\in P\}$.
A graph is a vertex set $V$ equipped with a collection of $1$-dimensional edges, that is, a subset of $\mathbf{P}_{2}(V)$. A simplicial complex can be regarded as a vertex set equipped with a subset $K$ of $\mathbf{P}(V)$ satisfying $\partial_{\ast}K\subseteq K$. A digraph is a vertex set $V$ equipped with an edge set $E$ satisfying $E\subseteq \tilde{\mathbf{S}}_{2}(V)$. A path complex can be thought as a vertex set $V$ equipped with a collection $P$ satisfying $\partial_{0}P,\partial_{\infty}P\subseteq P$. The hypergraph homology and hyperdigraph homology can be regarded as a vertex set $V$ equipped with the corresponding edge sets $E\subseteq \mathbf{P}(V)$ and $\vec{E}\subseteq \mathbf{S}(V)$, respectively.
\begin{table}[h]
\centering
\caption{Topological Laplacians on different objects}\label{table:graph_Lap}
\begin{tabular}{c|c|c|c}
\hline
Homologies & notation & restriction & topological Laplacians \\
\hline
graph & $G=(V,E)$, $E\subseteq \mathbf{P}_{2}(V)$ & none& Laplacian \\
simplicial complex & $\mathcal{K}=(V,K)$, $K\subseteq \mathbf{P}(V)$ & $\partial_{\ast}K\subseteq K$& Laplacian \\
digraph & $G=(V,E)$, $E\subseteq \mathbf{S}_{2}(V)$ & none & path Laplacian \\
path complex & $\mathcal{P}=(V,P)$, $P\subseteq \mathbf{\tilde{S}}(V)$& $\partial_{0}P,\partial_{\infty}P\subseteq P$ & path Laplacian \\
hypergraph & $\mathcal{H}=(V,E)$, $E\subseteq \mathbf{P}(V)$ & none& hypergraph Laplacian \\
hyperdigraph & $\vec{\mathcal{H}}=(V,\vec{E})$, $\vec{E}\subseteq \mathbf{S}(V)$ & none & hyperdigraph Laplacian \\
\hline
\end{tabular}
\end{table}
\subsection{The calculations of topological hyperdigraph Laplacians}
In this section, we provide more examples to illustrate topological hyperdigraph Laplacians. Our algorithm is based on the following calculation process. If there is no ambiguity, we will denote the sequence $(v_{0},v_{1},\dots,v_{p})$ by $(v_{0}v_{1}\cdots v_{p})$ for convenience.
\begin{example}
Let $\vec{\mathcal{H}}=(V,\vec{E})$ be a hyperdigraph with $V=\{1,2,3,4,5\}$ and
\begin{equation*}
\vec{E}=\{(12),(25),(13),(35),(14),(45),(125),(135),(145)\}.
\end{equation*}
By a direct calculation, we have
\begin{eqnarray*}
\Omega_{0}(\vec{\mathcal{H}};\mathbb{R}) &=& \mathrm{span}\{(1),(2),(3),(4),(5)\}, \\
\Omega_{1}(\vec{\mathcal{H}};\mathbb{R}) &=& \mathrm{span}\{(12),(25),(13),(35),(14),(45)\}, \\
\Omega_{2}(\vec{\mathcal{H}};\mathbb{R}) &=& \mathrm{span}\{(125)-(135),(125)-(145)\}.
\end{eqnarray*}
We choose the standard orthogonal basis of $\Omega_{\ast}(\vec{\mathcal{H}};\mathbb{R})$ as
\begin{equation*}
(1),(2),(3),(4),(5),(12),(25),(13),(35),(14),(45),(x),(y).
\end{equation*}
Here, $(x)=\frac{1}{\sqrt{2}}(125)-\frac{1}{\sqrt{2}}(135)$ and $(y)=-\frac{1}{\sqrt{6}}(125)-\frac{1}{\sqrt{6}}(135)+\frac{2}{\sqrt{6}}(145)$.
Then the boundary matrices are given by
\begin{equation*}
d_0\left( \begin{array}{c}
(1) \\
(2) \\
(3) \\
(4) \\
(5) \\
\end{array}
\right)=\left(
\begin{array}{cc}
0\\ 0\\ 0\\ 0\\ 0 \\
\end{array}
\right),\quad d_{1} \left(\begin{array}{c}
(12) \\
(25) \\
(13) \\
(35) \\
(14) \\
(45) \\
\end{array}
\right)=\left(
\begin{array}{ccccc}
-1 & 1 & 0 & 0 & 0 \\
0 & -1 & 0 & 0 & 1 \\
-1 & 0 & 1 & 0 & 0 \\
0 & 0 & -1 & 0 & 1 \\
-1 & 0 & 0 & 1 & 0 \\
0 & 0 & 0 & -1 & 1 \\
\end{array}
\right)\left(
\begin{array}{c}
(1) \\
(2) \\
(3) \\
(4) \\
(5) \\
\end{array}
\right),
\end{equation*}
\begin{equation*}
d_{2}\left(
\begin{array}{c}
(x) \\
(y) \\
\end{array}
\right)=\left(
\begin{array}{cccccc}
\frac{1}{\sqrt{2}} & \frac{1}{\sqrt{2}} & -\frac{1}{\sqrt{2}} & -\frac{1}{\sqrt{2}}& 0 & 0 \\
-\frac{1}{\sqrt{6}} & -\frac{1}{\sqrt{6}} & -\frac{1}{\sqrt{6}} & -\frac{1}{\sqrt{6}} & \frac{2}{\sqrt{6}} & \frac{2}{\sqrt{6}}
\\
\end{array}
\right)\left(
\begin{array}{c}
(12) \\
(25) \\
(13) \\
(35) \\
(14) \\
(45) \\
\end{array}
\right).
\end{equation*}
Hence, the representation matrices of $\Delta_{0}^{\vec{\mathcal{H}}},\Delta_{1}^{\vec{\mathcal{H}}}, $and $ \Delta_{2}^{\vec{\mathcal{H}}}$ are
\begin{equation*}
L_{0}^{\vec{\mathcal{H}}}=\left(
\begin{array}{ccccc}
3 & -1 & -1 & -1 & 0 \\
-1 & 2 & 0 & 0 & -1 \\
-1 & 0 & 2 & 0 & -1 \\
-1 & 0 & 0 & 2 & -1 \\
0 & -1 & -1 & -1 & 3 \\
\end{array}
\right),L_{1}^{\vec{\mathcal{H}}}=\left(
\begin{array}{cccccc}
\frac{8}{3} & -\frac{1}{3} & \frac{2}{3} & -\frac{1}{3} & \frac{2}{3} & -\frac{1}{3} \\
-\frac{1}{3} & \frac{8}{3} & -\frac{1}{3} & \frac{2}{3} & -\frac{1}{3} & \frac{2}{3} \\
\frac{2}{3} & -\frac{1}{3} & \frac{8}{3} & -\frac{1}{3} & \frac{2}{3} & -\frac{1}{3} \\
-\frac{1}{3} & \frac{2}{3} & -\frac{1}{3} & \frac{8}{3} & -\frac{1}{3} & \frac{2}{3} \\
\frac{2}{3} & -\frac{1}{3} & \frac{2}{3} & -\frac{1}{3} & \frac{8}{3} & -\frac{1}{3} \\
-\frac{1}{3} & \frac{2}{3} & -\frac{1}{3} & \frac{2}{3} & -\frac{1}{3} & \frac{8}{3} \\
\end{array}
\right)
\end{equation*}
and
\begin{equation*}
L_{2}^{\vec{\mathcal{H}}}=\left(
\begin{array}{cc}
2 & 0 \\
0 & 2 \\
\end{array}
\right).
\end{equation*}
The spectra of $\Delta_{0}^{\vec{\mathcal{H}}},\Delta_{1}^{\vec{\mathcal{H}}}, $and $ \Delta_{2}^{\vec{\mathcal{H}}}$ are $\{0,2,2,3,5\}$, $\{2,2,2,2,3,5\}$, and $\{2,2\}$, respectively.
Note that the transpose matrix of the representation matrix of the boundary operator $d_{p}$ is the representation matrix of its adjoint operator $d_{p}^{\ast}$ with respect to the standard orthogonal basis. However, it is not always true if the chosen basis is not the standard orthogonal basis. For example, let us choose the basis as
\begin{equation*}
(1),(2),(3),(4),(5),(12),(25),(13),(35),(14),(45),(125)-(135),(125)-(145).
\end{equation*}
Then the corresponding boundary matrix of $d_{2}$ is
\begin{equation*}
\tilde{B}_{2}=\left(
\begin{array}{cccccc}
1 & 1 & -1 & -1 & 0 & 0 \\
1 & 1 & 0 & 0 & -1 & -1 \\
\end{array}
\right).
\end{equation*}
If $\tilde{B}_{2}^{T}$ is the representation of $d_{2}^{\ast}$, then we would have
\begin{equation*}
\langle d_{2}((125)-(135)),(12)\rangle=1\neq 3=\langle (125)-(135),d_{2}^{\ast}(12)\rangle.
\end{equation*}
This is a contradiction. Thus the transpose matrix of the representation matrix of the boundary operator $d_{p}$ is not always the representation matrix of its adjoint operator $d_{p}^{\ast}$ if the chosen basis is not the standard orthogonal basis.
\end{example}
\begin{example}\label{example:three_combination}
Let $\vec{\mathcal{H}}=(V,\vec{E})$ be a hyperdigraph with $V=\{1,2,3\}$. Here, $\vec{E}$ is given by all the sequence of distinct elements in $V$.
\begin{equation*}
\vec{E}=\{(1),(2),(3),(12),(13),(21),(23),(31),(32),(123),(132),(213),(231),(312),(321)\}.
\end{equation*}
It follows that
\begin{eqnarray*}
\Omega_{0}(\vec{\mathcal{H}};\mathbb{R}) &=& \mathrm{span}\{(1),(2),(3)\}, \\
\Omega_{1}(\vec{\mathcal{H}};\mathbb{R}) &=& \mathrm{span}\{(12),(13),(21),(23),(31),(32)\}, \\
\Omega_{2}(\vec{\mathcal{H}};\mathbb{R}) &=& \mathrm{span}\{(123),(132),(213),(231),(312),(321)\}.
\end{eqnarray*}
We choose the standard orthogonal basis as
\begin{equation*}
(1),(2),(3),(12),(13),(21),(23),(31),(32),(123),(132),(213),(231),(312),(321).
\end{equation*}
Then the calculation is shown in Table \ref{table:three_combination}.
\begin{table}[h]
\caption{Illustration of hyperdigraph in Example \ref{example:three_combination}}\label{table:three_combination}
\centering
\begin{small}
\begin{tabular}{c|c|c|c}
\hline
$n$ & $n=0$ & $n=1$ & $n=2$ \\
\hline
$B_{n+1}$ & $\left(\begin{array}{ccc}
-1 & 1 & 0 \\
-1 & 0 & 1 \\
1 & -1 & 0 \\
0 & -1 & 1 \\
1 & 0 & -1 \\
0 & 1 & -1 \\
\end{array}
\right)$ & $\left(
\begin{array}{cccccc}
1 & -1 & 0 & 1 & 0 & 0 \\
-1 & 1 & 0 & 0 & 0 & 1 \\
0 & 1 & 1 & -1 & 0 & 0 \\
0 & 0 & -1 & 1 & 1 & 0 \\
1 & 0 & 0 & 0 & 1 & -1 \\
0 & 0 & 1 & 0 & -1 & 1 \\
\end{array}
\right)$
& $6\times 0$ empty matrix \\
\hline
$L_{n}$ & $\left(
\begin{array}{ccc}
4 & -2 & -2 \\
-2 & 4 & -2 \\
-2 & -2 & 4 \\
\end{array}
\right)$
& $\left(
\begin{array}{cccccc}
5& -1& -2& 0& 0& -1\\
-1& 5& 0& -1& -2& 0\\
-2& 0& 5& -1& -1& 0\\
0& -1& -1& 5& 0& -2\\
0& -2& -1& 0& 5& -1\\
-1& 0& 0& -2& -1& 5\\
\end{array}
\right)$
& $\left(
\begin{array}{cccccc}
3& -2& -2& 1& 1& 0\\
-2& 3& 1& 0& -2& 1\\
-2& 1& 3& -2& 0& 1\\
1& 0& -2& 3& 1& -2\\
1& -2& 0& 1& 3& -2\\
0& 1& 1& -2& -2& 3\\
\end{array}
\right)$
\\
\hline
$\beta_{n}$ & 1 & 0 & 2 \\
\hline
$\mathbf{Spec}(L_{n})$ & \{0,6,6\} & \{1,4,4,6,6,9\} & \{0,0,1,4,4,9\} \\
\hline
\end{tabular}
\end{small}
\end{table}
\end{example}
\begin{example}\label{example:example4}
Let $\vec{\mathcal{H}}=(V,\vec{E})$ be a hyperdigraph with $V=\{1,2,3,4,5\}$. Here, $\vec{E}$ is the set of $(p,1)$-shuffles as follows
\begin{equation*}
\vec{E}=\{(1)(2),(2)(1),(1)(3),(3)(2),(4)(2),(4)(3),(5)(2),(14)(2),(14)(3),(15)(2),(34)(2)\}.
\end{equation*}
By Eq. (\ref{equation:omega_chaincomplex}), we obtain that
\begin{eqnarray*}
\Omega_{0}(\vec{\mathcal{H}};\mathbb{R}) &=&0,\\
\Omega_{1}(\vec{\mathcal{H}};\mathbb{R}) &=& \mathrm{span}\{(1)(2)+(2)(1),(1)(3)+(3)(2)+(2)(1),(3)(2)-(4)(2)+(4)(3)\}, \\
\Omega_{2}(\vec{\mathcal{H}};\mathbb{R}) &=& \mathrm{span}\{(14)(2)-(14)(3)\}.
\end{eqnarray*}
We choose the standard orthogonal basis given by
\begin{equation*}
\begin{split}
& \frac{1}{\sqrt{2}}(1)(2)+\frac{1}{\sqrt{2}}(2)(1),-\frac{1}{\sqrt{10}}(1)(2)+\frac{1}{\sqrt{10}}(2)(1)+\frac{2}{\sqrt{10}}(1)(3)+\frac{2}{\sqrt{10}}(3)(2), \\
& \frac{1}{\sqrt{65}}(1)(2)-\frac{1}{\sqrt{65}}(2)(1)-\frac{2}{\sqrt{65}}(1)(3)+\frac{3}{\sqrt{65}}(3)(2)-\frac{5}{\sqrt{65}}(4)(2)+\frac{5}{\sqrt{65}}(4)(3),\\
& \frac{1}{\sqrt{2}}(14)(2)-\frac{1}{\sqrt{2}}(14)(3).
\end{split}
\end{equation*}
Then we have the calculation results shown in Table \ref{table:shuffle}. \begin{table}
\centering
\caption{Illustration of hyperdigraph in Example \ref{example:example4}}\label{table:shuffle}
\begin{small}
\begin{tabular}{c|c|c|c}
\hline
$n$ & $n=0$ & $n=1$ & $n=2$ \\
\hline
$B_{n+1}$ & $0\times 3$ empty matrix & $\left(
\begin{array}{ccc}
-\frac{1}{2} & \frac{3}{2\sqrt{5}} &- \frac{\sqrt{13}}{\sqrt{10}} \\
\end{array}
\right)$
& $6\times 0$ empty matrix \\
\hline
$L_{n}$ & none
& $\left(
\begin{array}{ccc}
\frac{1}{4}& -\frac{3}{4\sqrt{5}} & \frac{\sqrt{13}}{2\sqrt{10}} \\
-\frac{3}{4\sqrt{5}} & \frac{9}{20} & -\frac{3\sqrt{13}}{10\sqrt{2}} \\
\frac{\sqrt{13}}{2\sqrt{10}} & -\frac{3\sqrt{13}}{10\sqrt{2}} & \frac{13}{10} \\
\end{array}
\right)$
& 2 \\
\hline
$\beta_{n}$ & 0 & 2 & 0 \\
\hline
$\mathbf{Spec}(L_{n})$ & none & \{0,0,2\} & \{2\} \\
\hline
\end{tabular}
\end{small}
\end{table}
\end{example}
\section{Persistent hyperdigraph homology and persistent hyperdigraph Laplacians }\label{section:persistence_on_hyper}
Topological persistence is a powerful computational tool in the field of topology, which allows us to analyze the topological characteristics of a given dataset. It comprises a set of topological invariants that depend on the filtration parameter of the dataset. By tracking changes in these topological features, one can identify the key parameter values where significant changes occur in the dataset. These values correspond to the topological features that are of interest to us, and they can provide insights into the underlying structure and geometry of the data.
Persistent topological Laplacians, proposed by Wei and coworkers \cite{chen2019evolutionary, wang2019persistent, wei2023topological}, can overcome various limitations of persistent homology or persistent topology. These approaches are more quantitative and specific than persistent homology, and they enable the characterization of non-topological shape evolution and the embedding of physical laws in topological invariants \cite{wei2021persistent}. In this section, we introduce the concepts of persistent hyperdigraph homology and persistent hyperdigraph Laplacians. It is worth noting that persistence on topological hyperdigraphs can be reduced to that of topological hypergraphs, since a topological hypergraph can be seen as a topological hyperdigraph with trivial directions.
\subsection{Topological persistence on hyperdigraphs and hyperdigraph Laplacians}\label{section:persistence}
Let $(\mathbb{R},\leq)$ be a category with objects given by the real numbers and the morphisms given by $a\to b$ for $a\leq b$. A \emph{persistence hyperdigraph} is a functor $\mathcal{P}:(\mathbb{R},\leq)\to \vec{\mathbf{H}}\mathbf{yper}$ from $(\mathbb{R},\leq)$ to the category of hyperdigraphs. For real numbers $a\leq b$, the \emph{$(a,b)$-persistent homology} of a persistent hyperdigraph homology $\mathcal{P}:(\mathbb{R},\leq)\to \vec{\mathbf{H}}\mathbf{yper}$ is given by
\begin{equation*}
H_{p}^{a,b}(\mathcal{P};\mathbb{K})={\mathrm{Im}\hspace{0.1em}}(H_{p}(\mathcal{P}(a);\mathbb{R})\to H_{p}(\mathcal{P}(b);\mathbb{R})).
\end{equation*}
The $(a,b)$-\emph{persistent Betti number} of $\mathcal{P}$ is defined to be $\beta_{p}^{a,b}=\dim H_{p}^{a,b}(\mathcal{P};\mathbb{R})$ for $p\geq 0$.
Next, we formulate persistent hyperdigraph Laplacians. Let $\vec{\mathbf{H}}\mathbf{yper}^{\hookrightarrow}$ be the subcategory of $\vec{\mathbf{H}}\mathbf{yper}$ with topological hyperdigraphs as objects and inclusions of hyperdigraphs as morphisms. Let $\mathcal{P}:(\mathbb{R},\leq)\to \vec{\mathbf{H}}\mathbf{yper}^{\hookrightarrow}$ be a persistent hyperdigraph homology.
For real numbers $a\leq b$, we have an inclusion $j^{a,b}:\mathcal{P}(a)\hookrightarrow \mathcal{P}(b)$ of topological hyperdigraphs. Let $\Omega^{t}_{p}=\Omega^{t}_{p}(\mathcal{P}(a);\mathbb{R})$ for any $t\in \mathbb{R}$. The inclusion $j^{a,b}$ induces an inclusion of chain complexes $\mathfrak{j}^{a,b}_{p}:\Omega^{a}_{p}\hookrightarrow \Omega^{b}_{p}$ for $p\geq 0$.
Let $\Omega_{p+1}^{a,b}=\{x\in\Omega_{p+1}^{b}|\partial^{b}_{p+1} x\in \Omega_{p}^{a}\}$. Let $d_{p+1}^{a,b}=(\mathfrak{j}_{p}^{a,b})^{\ast}\circ d_{p+1}^{b}\circ\iota^{a,b}_{p+1}$.
\begin{equation*}
\xymatrix{
\Omega_{p+1}^{a}\ar@{->}[rr]^{ d_{p+1}^{a}}\ar@{^{(}->}[dd]_{\mathfrak{j}^{a,b}_{p}}&&\quad \Omega_{p}^{a}\quad\ar@<0.75ex>[rr]^-{\textcolor[rgb]{0.00,0.07,1.00}{ d_{p}^{a}} } \ar@{^{(}->}[dd]^{\mathfrak{j}^{a,b}_{p}}\ar@<0.75ex>[ld]^-{\textcolor[rgb]{0.00,0.07,1.00}{( d_{p+1}^{a,b})^{\ast}}}&&\quad \Omega_{p-1}^{a}\ar@<0.75ex>[ll]^-{\textcolor[rgb]{0.00,0.07,1.00}{( d_{p}^{a})^{\ast}}}\ar@{^{(}->}[dd]^{\mathfrak{j}^{a,b}_{p}}\\
&\Omega_{p+1}^{a,b}\ar@<0.75ex>[ru]^-{\textcolor[rgb]{0.00,0.07,1.00}{ d_{p+1}^{a,b}} }\ar@{^{(}->}[ld]_{\iota_{p+1}^{a,b}}&&& \\
\Omega_{p+1}^{b}\ar@{->}[rr]^{ d_{p+1}^{b}}&&\quad \Omega_{p}^{b}\quad\ar@{->}[rr]^{ d_{p}^{b}}&&\quad \Omega_{p-1}^{b}
}
\end{equation*}
The \emph{$p$-th $(a,b)$-persistent hyperdigraph Laplacian} $\Delta_{p}^{a,b}:\Omega_{p}^{a}\to \Omega_{p}^{a}$ is defined by
\begin{equation*}
\Delta_{p}^{a,b}= d_{p+1}^{a,b}\circ ( d_{p+1}^{a,b})^{\ast}+( d_{p}^{a})^{\ast}\circ d_{p}^{a}.
\end{equation*}
We choose two families of the standard orthogonal bases of $\Omega_{p}^{a} $ and $ \Omega_{p+1}^{a,b}$ for $p\geq 0$, respectively. Let $B_{p}^{a}$ and $B_{p}^{a,b}$ be the representation matrices of $d_{p}^{a}$ and $d_{p}^{a,b}$, respectively. Then, the representation matrix of the Laplacian $\Delta_{p}^{a,b}$ is given by
\begin{equation*}
L_{p}^{a,b}= (B_{p+1}^{a,b})^{T}\circ B_{p+1}^{a,b}+B_{p}^{a}\circ (B_{p}^{a})^{T}.
\end{equation*}
The spectrum of $L_{p}^{a,b}$ is displayed as
\begin{equation*}
\mathbf{Spec}(L_{p}^{a,b})=\{\lambda_{p}^{a,b}(1),\lambda_{p}^{a,b}(2),\dots,\lambda_{p}^{a,b}(n)\},\quad p\geq 0,
\end{equation*}
where $n=\dim \Omega_{p}^{a}$. The smallest non-zero eigenvalue in $\mathbf{Spec}(L_{p}^{a,b})$ is denoted by $\tilde{\lambda}_{p}^{a,b}$, which has relationship with the Cheeger isoperimetric constant.
By the persistent Hodge decomposition theorem \cite{liu2023algebraic}, one has
\begin{theorem}
$\Omega_{p}^{a}=\ker \Delta_{p}^{a,b}\oplus {\mathrm{Im}\hspace{0.1em}} d_{p+1}^{a,b}\oplus {\mathrm{Im}\hspace{0.1em}} ( d_{p}^{a})^{\ast}$, where $\ker \Delta_{p}^{a,b}\cong H_{p}^{a,b}(\mathcal{P};\mathbb{R})$.
\end{theorem}
Given a persistent hyperdigraph $\mathcal{P}:(\mathbb{R},\leq)\to \vec{\mathbf{H}}\mathbf{yper}$, the theorem says that the number of zero eigenvalues of $\Delta_{p}^{a,b}$ equals to the $(a,b)$-persistent Betti number $\beta_{p}^{a,b}$.
\begin{corollary}
$N(\Delta_{p}^{a,b})=\beta_{p}^{a,b}$, where $N(\Delta_{p}^{a,b})$ denotes the number of zeros of $\Delta_{p}^{a,b}$.
\end{corollary}
If $a=b$, we have a decomposition
\begin{equation}\label{equation:decom}
\Omega_{p}^{a}=\ker \Delta_{p}^{a}\oplus {\mathrm{Im}\hspace{0.1em}} d_{p+1}^{a}\oplus {\mathrm{Im}\hspace{0.1em}} ( d_{p}^{a})^{\ast},
\end{equation}
where $\ker \Delta_{p}^{a}\cong H_{p}^{a}(\mathcal{P};\mathbb{R})$. Let $\pi_{p}^{a}:\Omega_{p}^{a}\to \ker \Delta_{p}^{a}$. Then for any element $x\in \Omega_{p}^{a}$, the element $\psi_{p}^{a}(x)$ is the harmonic component of $x$. Let $\mathcal{H}_{p}^{a}=\ker\Delta_{p}^{a}$. For real numbers $a\leq b$, we denote the \emph{$(a,b)$-persistent harmonic space} by
\begin{equation*}
\mathcal{H}^{a,b}_{p}={\mathrm{Im}\hspace{0.1em}} (\pi_{p}^{b}\circ\mathfrak{j}^{a,b}_{p}:\mathcal{H}^{a}_{p}\to \mathcal{H}^{b}_{p}).
\end{equation*}
\begin{theorem}\label{theorem:isomorphism}
For any real numbers $a\leq b$, we have
\begin{equation*}
\mathcal{H}_{p}^{a,b}\cong H_{p}^{a,b}(\mathcal{P};\mathbb{R}),\quad p\geq 0.
\end{equation*}
\end{theorem}
\begin{proof}
We will first prove that $i^{a}:\mathcal{H}_{\ast}^{a}\hookrightarrow \Omega_{\ast}^{a}$ is a quasi-isomorphism. Indeed, for any $x\in \mathcal{H}_{\ast}^{a}$, if $i^{a}(x)$ is a boundary in $\Omega_{\ast}^{a}$, then $x$ is a boundary in $\Omega_{\ast}^{a}$. By Eq. (\ref{equation:decom}), one has $x=0$. So $H(i^{a}):\mathcal{H}_{\ast}^{a}\to H^{a}_{\ast}(\mathcal{P};\mathbb{R})$ is an injection. On the other, for any cycle $z$ in $\Omega_{\ast}^{a}$, by Eq.
(\ref{equation:decom}), we can write \begin{equation*}
z=z_{0}+d^{a}_{\ast}z_{1} \end{equation*} for some $z_{0}\in \mathcal{H}_{\ast}^{a}$ and $z_{1}\in {\mathrm{Im}\hspace{0.1em}} d_{\ast}^{a}$. Thus we have $H(i^{a})[z_{0}]=[z_{0}]=[z]$. So the map $H(i^{a}):\mathcal{H}_{\ast}^{a}\to H^{a}_{\ast}(\mathcal{P};\mathbb{R})$ is a surjection.
Now, to prove our theorem, it suffices to show the following diagram commutates.
\begin{equation*}
\xymatrix{
\mathcal{H}^{a}_{p}\ar@{->}[rr]^{\pi_{p}^{b}\circ\mathfrak{j}^{a,b}_{p}}\ar@{->}[d]_{\cong}&& \mathcal{H}^{b}_{p}\ar@{->}[d]^{\cong}\\
H^{a}_{p}(\mathcal{P};\mathbb{R})\ar@{->}[rr]^{H_{p}(\mathfrak{j}^{a,b}_{\ast})}&& H^{b}_{p}(\mathcal{P};\mathbb{R})
}
\end{equation*} For any $x\in \mathcal{H}^{a}_{p}$, we have \begin{equation*}
H_{p}(i^{b})\circ\pi_{p}^{b}\circ\mathfrak{j}^{a,b}_{p}(x)=[\pi_{p}^{b}(x)]. \end{equation*} On the other hand, we obatin \begin{equation*}
H_{p}(\mathfrak{j}^{a,b}_{\ast})\circ H_{p}(i^{a})(x)=H_{p}(\mathfrak{j}^{a,b}_{\ast}\circ i^{a})(x)=[x]. \end{equation*} By Eq. (\ref{equation:decom}), we can write \begin{equation*}
z=\pi_{p}^{b}(x)+d^{a}_{\ast}z_{1} \end{equation*} for some $z_{1}\in {\mathrm{Im}\hspace{0.1em}} d_{\ast}^{a}$. It follows that $H_{p}(i^{b})\circ\pi_{p}^{b}\circ\mathfrak{j}^{a,b}_{p}(x)=H_{p}(\mathfrak{j}^{a,b}_{\ast})\circ H_{p}(i^{a})(x)$. Thus the above diagram is commutative. The desired result follows.
\end{proof} Theorem \ref{theorem:isomorphism} says that the $(a,b)$-persistent homology coincides with the $(a,b)$-persistent harmonic space. Or more precisely, the homology and the harmonic space possess the same persistence.
At the end of this section, it is worth noting that a hypergraph homology can be regarded as a hyperdigraph homology with trivial directions, and therefore, the definitions and results discussed above are also applicable to topological hypergraphs. Consequently, we do not provide a redundant description of persistent hypergraph Laplacians in this context.
\subsection{Volume-based filtration} In this section, our focus is on constructing the persistence of point sets using the topological hyperdigraph model, which involves considering the volume of hyperedges. To illustrate this approach, we provide a specific example construction.
Let $\vec{\mathcal{H}}=(V,\vec{E})$ be a hyperidgraph. Let $f$ be a real-valued function on $\vec{E}$. For each real number $a$, we have a hyperdigraph $\vec{\mathcal{H}}^{f}(a)=(V,\vec{E}(a))$, where $\vec{E}(a)=\{x\in E|f(x)\leq a\}$. Thus $\vec{\mathcal{H}}^{f}:(\mathbb{R},\leq)\to \mathbf{Hyper},a\mapsto \vec{\mathcal{H}}^{f}(a)$ is a persistent hyperdigraph homology. Then we have the persistent hyperdigraph homology of function $f$ on $\vec{\mathcal{H}}$ given by
\begin{equation*}
H_{p}^{a,b}(\vec{\mathcal{H}}^{f};\mathbb{R})={\mathrm{Im}\hspace{0.1em}}(H_{p}(\vec{\mathcal{H}}^{f}(a);\mathbb{R})\to H_{p}(\vec{\mathcal{H}}^{f}(b);\mathbb{R})).
\end{equation*}
The corresponding persistent hyperdigraph Laplacian for $f$ on $\mathcal{H}$ can be built in a similar way.
Now, let $\vec{\mathcal{H}}=(V,\vec{E})$ be a hyperidgraph homology such that the vertex set $V$ is set of points embedded in the Euclidean space $\mathbb{R}^{n}$.
For each directed hyperedge $e=(v_{0},v_{1},\dots,v_{p})$, let $A=(v_{1}-v_{0},v_{2}-v_{0},\dots,v_{p}-v_{0})$. We have a volume given by
\begin{equation*}
{\mathbf{vol}\hspace{0.1em}}(e)=\frac{1}{p!}\sqrt{|\det(A^{T}A)|},\quad p\geq 1.
\end{equation*}
We make the convention that ${\mathbf{vol}\hspace{0.1em}} (\{v_{0}\})=0$ for any $v_{0}\in V$.
Then $f(e)=\left(p!{\mathbf{vol}\hspace{0.1em}}(e)\right)^{\frac{1}{p}}=|\det(A^{T}A)|^{\frac{1}{2p}}$ is a real-valued function defined on the set $\mathbf{P}(V)$. Here, we set $f(\{v_{0}\})=0$ for any $v_{0}\in V$. The volume-based $(a,b)$-persistent hyperdigraph homology is given by
\begin{equation*}
H_{p}^{a,b}(\vec{\mathcal{H}}^{f};\mathbb{R})={\mathrm{Im}\hspace{0.1em}}(H_{p}(\vec{\mathcal{H}}^{f}(a);\mathbb{R})\to H_{p}(\vec{\mathcal{H}}^{f}(b);\mathbb{R})).
\end{equation*}
The $p$-th $(a,b)$-persistent hyperdigraph Laplacian $\Delta_{p}^{a,b}:\Omega_{p}(\vec{\mathcal{H}}^{f}(a);\mathbb{R})\to \Omega_{p}(\vec{\mathcal{H}}^{f}(a);\mathbb{R})$ for $\vec{\mathcal{H}}^{f}$ is given by
\begin{equation*}
\Delta_{p}^{a,b}= d_{p+1}^{a,b}\circ ( d_{p+1}^{a,b})^{\ast}+( d_{p}^{a})^{\ast}\circ d_{p}^{a}.
\end{equation*}
Here, $d_{p}^{a}$ and $d_{p}^{a,b}$ are defined as in Section \ref{section:persistence}.
\begin{example}\label{example:volume}
Consider hyperidgraph $\vec{\mathcal{H}}=(V,\vec{E})$ such that $V=\{v_{0},v_{1},v_{2}\}$ with $v_{0}=(0,0),v_{1}=(1,2),v_{2}=(2,1)$ in the Euclidean space $\mathbb{R}^{2}$ and
\begin{equation*}
\vec{E}=\{(v_{0}),(v_{1}),(v_{2}),(v_{0}v_{1}),(v_{1}v_{2}),(v_{0}v_{2}),(v_{0}v_{1}v_{2})\}.
\end{equation*}
It follows that
\begin{equation*}
f((v_{0},v_{1}))=\sqrt{5},\quad f((v_{0},v_{2}))=\sqrt{5},\quad f((v_{1},v_{2}))=\sqrt{2},\quad f((v_{0},v_{1},v_{2}))=\sqrt{3}.
\end{equation*}
Then we have a filtration of hypergraphs given by
\begin{eqnarray*}
\vec{\mathcal{H}}^{f}(0) &=& (V,\{(v_{0}),(v_{1}),(v_{2})\}), \\
\vec{\mathcal{H}}^{f}(\sqrt{2}) &=& (V,\{(v_{0}),(v_{1}),(v_{2}),(v_{1}v_{2})\}), \\
\vec{\mathcal{H}}^{f}(\sqrt{3}) &=& (V,\{(v_{0}),(v_{1}),(v_{2}),(v_{1}v_{2}),(v_{0}v_{1}v_{2})\}), \\
\vec{\mathcal{H}}^{f}(\sqrt{5}) &=& (V,\{(v_{0}),(v_{1}),(v_{2}),(v_{0}v_{1}),(v_{1}v_{2}),(v_{0}v_{2}),(v_{0}v_{1}v_{2})\}).
\end{eqnarray*}
\begin{table}
\centering
\caption{The Betti numbers and spectra for the volume-based filtration of hyperdigraphs in Example \ref{example:volume}}\label{table:volume}
\begin{tabular}{c|c|c|c|c|c|c}
\hline
\multirow{2}{0.4in}{$\mathcal{H}^{f}(t)$} &\multicolumn{3}{c|}{Betti numbers} & \multicolumn{3}{c}{Spectra} \\
& $\beta_{0}$ & $\beta_{1}$ & $\beta_{2}$ & $p=0$ & $p=1$ & $p=2$ \\
\hline
$t=0$ & 3 & 0 & 0 & \{0,0,0\} & none & none \\
$t=\sqrt{2}$ & 2 & 0 & 0 & \{0,0,2\} & \{2\} & none \\
$t=\sqrt{3}$ & 2 & 0 & 0 & \{0,0,2\} & \{2\} & none \\
$t=\sqrt{5}$ & 1 & 0 & 0 & \{0,3,3\} & \{3,3,3\} & \{3\} \\
\hline
\end{tabular}
\end{table}
By a straightforward calculation, we obtain the Betti numbers and the spectra for the volume-based filtration of hyperdigraph homology in Table \ref{table:volume}.
\end{example}
\subsection{Distance-based filtration}
In this section, we will consider the distance-based filtration of a given persistent hyperdigraph Laplacian embedded into the Euclidean space. This construction is similar to the persistence on the Vietoris-Rips complex. The application of this work is mainly based on the distance-based filtration.
Let $\vec{\mathcal{H}}=(V,\vec{E})$ be a hyperdigraph homology such that $V$ is set of points in the Euclidean space. For each real number $a\in \mathbb{R}$, we set $\mathcal{P}(a)=(V,\vec{E}(a))$, where
\begin{equation*}
\vec{E}(a)=\{x\in \vec{E}|\text{ the distance between every pair of points in $x$ is at most $a$}\}.
\end{equation*}
Then $\mathcal{P}:(\mathbb{R},\leq)\to \vec{\mathbf{H}}\mathbf{yper}^{\hookrightarrow},a\mapsto \mathcal{P}(a)$ is a persistent hyperdigraph homology. Similarly, the $(a,b)$-persistent hyperdigraph homology of $\vec{\mathcal{H}}$ is given by
\begin{equation*}
H_{p}^{a,b}(\vec{\mathcal{H}};\mathbb{R})={\mathrm{Im}\hspace{0.1em}}(H_{p}(\mathcal{P}(a);\mathbb{R})\to H_{p}(\mathcal{P}(b);\mathbb{R})).
\end{equation*}
Moreover, we can define the $p$-th $(a,b)$-persistent hyperdigraph Laplacian $\Delta_{p}^{a,b}:\Omega_{p}(\mathcal{P}(a);\mathbb{R})\to \Omega_{p}(\mathcal{P}(a);\mathbb{R})$ by
\begin{equation*}
\Delta_{p}^{a,b}= d_{p+1}^{a,b}\circ ( d_{p+1}^{a,b})^{\ast}+( d_{p}^{a})^{\ast}\circ d_{p}^{a}.
\end{equation*}
Here, $d_{p}^{a}$ and $d_{p}^{a,b}$ are defined as in Section \ref{section:persistence}. Choose two family of standard orthogonal basis of $\Omega_{p}(\mathcal{P}(a);\mathbb{R})$ and $\Omega_{p+1}^{a,b}$ for $p\geq 0$, respectively. Here, $\Omega_{p+1}^{a,b}=\{x\in\Omega_{p+1}(\mathcal{P}(b);\mathbb{R})|\partial^{b}_{p+1} x\in \Omega_{p}(\mathcal{P}(a);\mathbb{R})\}$. Let $B_{p}^{a}$ and $B_{p}^{a,b}$ be the representation matrices of $d_{p}^{a}$ and $d_{p}^{a,b}$, respectively, with respect to the chosen standard basis. We have the representation matrix of $\Delta^{a,b}_{p}$ given by
\begin{equation*}
L_{p}^{a,b}= (B_{p+1}^{a,b})^{T}\circ B_{p+1}^{a,b}+B_{p}^{a}\circ (B_{p}^{a})^{T}.
\end{equation*}
The corresponding Betti number $\beta_{p}^{a,b}$ is equal to the number of zero eigenvalues of $L_{p}^{a,b}$. Moreover, the spectra can be arranged in an ascending order as
\begin{equation*}
\mathbf{Spec}(L_{p}^{a,b})=\{\lambda_{p}^{a,b}(1),\lambda_{p}^{a,b}(2),\dots,\lambda_{p}^{a,b}(n)\}.
\end{equation*}
In particular, the Fiedler value $\lambda_{p}^{a,b}(2)$, the second smallest Laplacian eigenvalue of $L_{p}^{a,b}$, is an important feature in various fields.
\begin{figure}\label{figure:persistence_example}
\end{figure}
As shown in Figure \ref{figure:persistence_example}, the hypergraph homology ($\mathcal{H}$) and hyperdigraph homology ($\mathcal{\vec{H}}$) are defined on the same vertex set $V=\{0, 1, 2, 3, 4, 5\}$ in the Euclidean space. The distance between $i$th and $j$th vertices are denoted as $D_{ij}$. Here, we have $D_{45}=D_{12}=6$, $D_{05}=D_{01}=D_{23}=D_{34}=\sqrt{5}$, and $D_{24}=D_{15}=4$. The largest hypergraph homology ($\mathcal{H}_5$) and largest hyperdigraph homology ($\mathcal{\vec{H}}_5$) are predefined in this example. The hyperedge set of $\mathcal{H}_5$ is the same as the Example \ref{example:hypergraph2}. The $\mathcal{H}_1, \mathcal{H}_2,\dots,\mathcal{H}_5$ in Figure \ref{figure:persistence_example}{\bf a} represent the hypergraph homology along the distance-based filtration, where the corresponding filtration parameters are 0, $\sqrt{5}$, 4, 6, and $\sqrt{53}$, respectively. The $\mathcal{\vec{H}}_1,\mathcal{\vec{H}}_2, \dots , \mathcal{\vec{H}}_5$ in Figure \ref{figure:persistence_example}{\bf b} represent the hyperdigraph homology along the distance-based filtration. And the filtration parameters for $\mathcal{\vec{H}}_n$ ($n=1,2,3,4,5$) are the same as the corresponding filtration parameters for $\mathcal{H}_n$.
\begin{figure}\label{figure:persistence_betti_lambda}
\end{figure}
Figure \ref{figure:persistence_betti_lambda} illustrates persistent hypergraph Laplacians and persistent hyperdigraph Laplacians for the objects given in Figure \ref{figure:persistence_example}{\bf a} and Figure \ref{figure:persistence_example}{\bf b}. For the 6-vertex system in this example, $\beta_0$ and $\beta_2$ are always the same throughout the filtration of the persistent hypergraph homology and persistent hyperdigraph homology, with $\beta_0$ decreasing as the filtration parameter increases and $\beta_2$ always being 0. The $\beta_1$ of the persistent hypergraph homology is always 0 throughout, while when the filtration parameter starts at 6, the $\beta_1$ of the persistent hyperdigraph homology changes from 0 to 6 and persists to the end, which means that there is a 1-dimensional cycle formation on the corresponding persistent hyperdigraph homology. For the non-harmonic spectra of two types of persistent topological Laplacians, as shown in the bottom panel of Figure \ref{figure:persistence_betti_lambda}. Except for $\lambda_2$, both $\lambda_0$ and $\lambda_1$ will produce different eigenvalues when the parameters start from 6, and the corresponding persistent hypergraph Laplacian and persistent hyperdigraph Laplacians are $\mathcal{H}_4$ and $\mathcal{\vec{H}}_4$, respectively. For the persistent hypergraph Laplacian, when the filtration parameter is 6, 1-hyperedge $\{1, 2\}$ are newly generated, and for the persistent hyperdigraph Laplacian, directed 1-hyperedges $(12)$ and $(21)$ and directed 2-hyperedge $(012)$ are generated. It is worth noting that for the persistent hyperdigraph Laplacian, $(12)$ and $(21)$ are two different directed 1-hyperedges, while the persistent hypergraph Laplacian has only 1-hyperedge $\{1, 2\}$, which indicates that the persistent hyperdigraph Laplacian can distinguish vertex directions and contain richer information.
\begin{figure}\label{figure:persistence_both_compare}
\end{figure}
To demonstrate that persistent topological Laplacians provide more information than persistent topological homologies, we present another example in Figure \ref{figure:persistence_both_compare}{\bf a} and Figure \ref{figure:persistence_both_compare}{\bf c}. We use a system of 6 vertices with positive hexagons, where the side lengths are $D_{01}=D_{12}=D_{23}=D_{34}=D_{45}=D_{50}=2$. The largest hypergraph homology ($\mathcal{H}_4$) and hyperdigraph homology ($\vec{\mathcal{H}}_4$) of the system are consistent with the ones in Figure \ref{figure:persistence_example}. The persistent hypergraph Laplacian features of dimensions 0, 1, and 2 are shown in Figure \ref{figure:persistence_both_compare}{\bf b}, where the green line shows the Betti numbers of the corresponding filtration parameter, and the orange color shows the smallest eigenvalue of the non-harmonic spectra for the corresponding filtration parameter. We observe that when the filtration parameter is $2\sqrt{2}$, the corresponding Betti numbers are unable to distinguish the changes from $\mathcal{H}_2$ to $\mathcal{H}_3$, while the $\lambda_{0}$, $\lambda_{1}$, and $\lambda_{2}$ can identify the differences. This suggests that using only the harmonic spectra, i.e., the Betti numbers, can sometimes overlook changes within the structure during the filtration process, while the non-harmonic spectra can capture the variations more sensitively. We also find a similar phenomenon for persistent hyperdigraph Laplacian features in Figure \ref{figure:persistence_both_compare}{\bf d}. When the filtration parameter increases to $2\sqrt{2}$, the directed edges $(5,1)$ and $(2,4)$ are formed. However, the corresponding Betti numbers cannot reflect the changes in connectivity, while $\lambda_{0}$, $\lambda_{1}$, and $\lambda_{2}$ can detect the variations.
\section{Application}\label{section:application}
In this section, we demonstrate the application of persistent hyperdigraph Laplacians to analyze a protein-ligand system. Specifically, we use the protein-ligand complex structure of 1a99 from the Protein Data Bank (PDB) as a case study. As depicted in Figure \ref{figure:persistent_dihypergraph}{\bf b}, the complex includes a small ligand, C$4$H${14}$N$_2^{2+}$. To facilitate visualization of the protein-ligand binding, we limit our analysis to the C atoms from the protein located within 4 {\AA} of the ligand, as illustrated in Figure \ref{figure:persistent_dihypergraph}{\bf a}.
\begin{figure}
\caption{Illustration of the persistent hyperdigraph Laplacian analysis of a protein-ligand complex (PDB ID: 1a99).
{\bf a} Illustration of a filtration-induced topological hyperdigraph. Only C-atoms within 4 {\AA} from the ligand are considered for the protein structure (brown dots). The green line segment indicates the directed 1-hyperedges and the orange line indicates the directed 2-hyperedges.
{\bf b} The three dimensional structure of the protein-ligand complex.
{\bf c} Betti numbers $\beta_n$ ($n$=0,1,2) and the smallest eigenvalues of non-harmonic spectra $\lambda_n$ ($n$=0,1,2) of persistent hyperdigraph Laplacian for the protein-ligand complex.
}
\label{figure:persistent_dihypergraph}
\end{figure}
Figure \ref{figure:persistent_dihypergraph} depicts the analysis of a protein-ligand complex (PDB ID: 1a99) using persistent hyperdigraph Laplacians. For this analysis, we focus only on the interactions between the ligand and the surrounding carbon atoms from the protein, represented by directed edges. The direction of each edge is determined by the electronegativity of the atoms involved. Specifically, an atom with lower electronegativity points to an atom with higher electronegativity, while two atoms with the same electronegativity are connected by two edges with opposite directions. For the protein-ligand complex, we can determine the following atomic directions: H(2.2) $\rightarrow$ S(2.44) $\rightarrow$ C(2.5) $\rightarrow$ N(3.07) $\rightarrow$ O(3.5), where the values in parentheses denote the electronegativities of the atoms.
To better capture the interactions between the protein and ligand, we exclude the interactions within the protein. However, we include the internal covalent bonding relationships of the ligand, as shown in the first subfigure of Figure \ref{figure:persistent_dihypergraph}{\bf a}. The persistent hyperdigraph Laplacian analysis then reveals important structural changes in the complex, as demonstrated by the persistent Betti numbers and non-harmonic persistent eigenvalues shown in Figure \ref{figure:persistent_dihypergraph}{\bf c} and \ref{figure:persistent_dihypergraph}{\bf d}. These results suggest that non-harmonic persistent spectra are more informative than persistent Betti numbers or harmonic persistent spectra in capturing the complex's structural changes. Therefore, persistent hyperdigraph Laplacians provide a more powerful tool for analyzing data, particularly when combined with machine learning and deep learning algorithms, as previously discussed in the literature \cite{chen2022persistent}.
Directed 0-hyperedges in the figure are discrete points, while directed 1-hyperedge are given by the edges with green backgrounds and directed 2-hyperedges are labeled by edges with orange backgrounds. It is worth noting that directed 1-hyperedges represent two different vertices with fixed orders and directed 2-hyperedges represent three different vertices with fixed orders. Thus, in our persistent hyperdigraph Laplacian theory, for a closed loop consisting of two vertices, e.g., 1$\rightarrow$2$\rightarrow$1, will be treated as a directed 1-hyperedge. This is where it differs from persistent path Laplacian theory \cite{wang2023persistent}, in which a closed loop can generate an arbitrary high-dimensional path. Figure \ref{figure:persistent_dihypergraph}{\bf a} illustrates the filtration-induced persistent hyperdigraph $\mathcal{\vec{H}}$, i.e., $\mathcal{\vec{H}}_1^0$, $\mathcal{\vec{H}}_2^{2.5}$, $\mathcal{\vec{H}}_3^{2.8}$, $\mathcal{\vec{H}}_4^{2.9}$, $\mathcal{\vec{H}}_5^{3.0}$, $\mathcal{\vec{H}}_6^{3.2}$, $\mathcal{\vec{H}}_7^{3.5}$, and $\mathcal{\vec{H}}_8^{4.0}$, where the superscripts are the corresponding filtration parameters.
Figures \ref{figure:persistent_dihypergraph}{\bf c} and \ref{figure:persistent_dihypergraph}{\bf d} respectively show the persistent Betti numbers and non-harmonic persistent eigenvalues obtained from persistent hyperdigraph Laplacians. In this analysis, the smallest non-zero eigenvalues of Laplacian matrices were selected to represent the non-harmonic spectral information. The results demonstrate that persistent Betti numbers and non-harmonic persistent eigenvalues exhibit completely different features. For example, at a filtration parameter of about 2.5 (corresponding to the first and second diagrams in Figure \ref{figure:persistent_dihypergraph}{\bf a}), the ligand connects to the C atoms in the protein, forming new directed 1-hyperedges. At this point, the number of directed 0-hyperedges of the complex decreases, and persistent Betti numbers $\beta_0$ decrease. However, the higher dimensional Betti numbers $\beta_1$ and $\beta_2$ remain unchanged. In contrast, $\lambda_0$ and $\lambda_1$ of the non-harmonic persistent spectra decrease significantly, indicating the formation of large connected complexes. Moreover, the range of $\beta_2$ with changes is smaller than that of $\lambda_2$.
These results suggest that non-harmonic persistent spectra are more informative than persistent Betti numbers or harmonic persistent spectra. Therefore, persistent hyperdigraph Laplacians can better capture structural changes than persistent hyperdigraph homology.
\section{Conclusion} Since hypergraphs are a generalization of graphs, it is natural to consider hyperdigraphs as a generalization of directed graphs (digraphs). However, a problem arises when trying to embed topological structures into hyperdigraphs.
In this work, we introduce the concept of hyperdigraph homology, or topological hyperdigraphs, to embed topological information into hyperdigraphs. The intrinsic vertex order of directed hyperedges in topological hyperdigraphs allows for the distinction of different sets of vertices, making it possible to distinguish all possible permutations of a given vertex set. As a result, topological hyperdigraphs can be viewed as a generalization of topological hypergraphs. We also develop a method to reduce a hyperdigraph homology to a hypergraph homology.
We introduce a new set of Laplacian methods called topological hyperdigraph Laplacians, which serve as a generalization of hyperdigraph homology. These Laplacians provide both harmonic spectra (related to topological invariants or Betti numbers) and non-harmonic spectra.
Furthermore, we propose persistent hyperdigraph homology and persistent hyperdigraph Laplacians through filtration. The persistent hyperdigraph Laplacians not only return the persistent topological invariants of persistent hyperdigraph homology but also provide non-harmonic persistent spectra to capture the homotopic shape evolution of the data at various scales. We demonstrate the usefulness of these proposed topological methods through numerous examples.
Finally, we explore the application of persistent hyperdigraph Laplacians by characterizing the interactions between a protein and a ligand. We believe that these proposed methods, including hyperdigraph homology, topological hyperdigraph Laplacians, persistent hyperdigraph homology, and persistent hyperdigraph Laplacians, provide a powerful set of new tools for topological data analysis (TDA). We anticipate that combining these methods with machine learning and deep learning algorithms will have a significant impact on data science.
\section{Acknowledgments} This work was supported in part by NIH grants R01GM126189 and R01AI164266, NSF grants DMS-2052983, DMS-1761320, and IIS-1900473, NASA grant 80NSSC21M0023, MSU Foundation, Bristol-Myers Squibb 65109, and Pfizer.
The work of Liu and Wu was supported by Natural Science Foundation of China (NSFC grant no. 11971144), High-level Scientic Research Foundation of Hebei Province, the start-up research fund from BIMSA.
\end{document} |
\begin{document}
\maketitle
\begin{abstract} We study the Rees algebra of a perfect Gorenstein ideal of codimension 3 in a hypersurface ring. We provide a minimal generating set of the defining ideal of these rings by introducing a modified Jacobian dual and applying a recursive algorithm. Once the defining equations are known, we explore properties of these Rees algebras such as Cohen-Macaulayness and Castelnuovo-Mumford regularity. \end{abstract}
\section{Introduction}\label{intro}
In this paper we consider the Rees algebra of a particular class of ideals and explore the properties of these rings. For $I =(f_1,\ldots,f_n)$ an ideal of a Noetherian ring $R$, the Rees algebra of $I$ is the graded subalgebra $\mathcal{R}(I) = R[f_1 t,\ldots, f_n t] = R\oplus It \oplus I^2t^2\oplus \cdots \subset R[t]$. Geometrically, $\mathcal{R}(I)$ is the homogeneous coordinate ring of the blowup of $\mathop{\rm Spec}(R)$ along the closed subscheme $V(I)$. There is a natural $R$-algebra epimorphism $\Psi:R[T_1,\ldots,T_n] \rightarrow \mathcal{R}(I)$ given by $\Psi(T_i)= f_it$. The kernel $\mathcal{J} = \ker \Psi$ is the \textit{defining ideal} of $\mathcal{R}(I)$ and is of great interest as $\Psi$ induces an isomorphism $\mathcal{R}(I) \cong R[T_1,\ldots,T_n] / \mathcal{J}$. The search for a set of minimal generators of $\mathcal{J}$, the \textit{defining equations} of $\mathcal{R}(I)$, has become a fundamental problem and has been studied to great extent in recent years (see e.g. \cite{MU,KPU2,Morey,UV,KM,Vasconcelos,HSV1,Johnson,CHW,KPU1,BCS,Weaver}).
Although this problem has been well-studied, the defining equations of Rees algebras are known in few cases. As $\mathcal{J}$ encodes all of the polynomial relations amongst a generating set of $I$, a complete solution requires some knowledge regarding the structure of $I$ and its syzygies. Much work has been accomplished for perfect ideals of grade two, which are generated by the maximal minors of an almost square matrix by the Hilbert-Burch theorem \cite{Eisenbud}. These ideals and their Rees algebras have been studied under a multitude of various assumptions (see e.g. \cite{Morey, MU,BM,CHW,KM,Lan1,Lan2,Weaver}). Furthermore, perfect Gorenstein ideals of grade three and their Rees algebras have been a topic of great interest in recent years. Similar to perfect ideals of grade two, these ideals have prescribed structures and resolutions. These ideals are generated by the submaximal Pfaffians of a square alternating matrix by the Buchsbaum-Eisenbud structure theorem \cite{BE}. The Rees algebras of these ideals have been studied in different settings using a variety of techniques (see e.g. \cite{Johnson,Morey,KPU2}).
In this paper, we consider the Rees algebra of a perfect Gorenstein ideal of grade three in a \textit{hypersurface ring}. Whereas the defining equations of Rees rings have been studied to great length, most results within the literature require that the ideal in question belongs to $k[x_1,\ldots,x_d]$, a polynomial ring over a field $k$. There is strong geometric motivation to consider Rees algebras of ideals in this new setting. As the Rees ring is the algebraic realization of the blowup of $\mathop{\rm Spec}(R)$ along $V(I)$, altering the ring is reflected by the blowup of a different scheme. There has been recent success in the way of determining the equations defining Rees algebras of perfect ideals with grade two in hypersurface rings in \cite{Weaver}. Expanding upon this, we consider perfect Gorenstein ideals of grade three in these rings and study the defining equations of their Rees algebras.
The objective of this paper is to extend one of the classical results within the study of Rees algebras to the setting of a hypersurface ring. In \cite{Morey}, Morey considered a linearly presented perfect Gorenstein ideal of grade three in $k[x_1,\ldots,x_d]$. The defining equations of its Rees ring were produced and it was shown that there is a single nontrivial equation, which can be identified as the greatest common divisor of the maximal minors of a Jacobian dual matrix. In our setting, we show that this fails to be the case, but that a similar phenomenon occurs upon modification and repetition. The main results \Cref{mainresult} and \Cref{depth}(a) are rephrased below.
\begin{theorem} Let $S=k[x_1,\ldots,x_{d+1}]$ for $k$ an infinite field, $f\in S$ a homogeneous polynomial of degree $m$, and $R=S/(f)$. Let $I$ be a perfect Gorenstein $R$-ideal of grade 3 with alternating presentation matrix $\varphi$ consisting of linear entries. Let $\overline{\,\cdot\,}$ denote images modulo $(f)$. If $I$ satisfies $G_d$, $I_1(\varphi) = \overline{(x_1,\ldots,x_{d+1})}$, and $\mu(I) = d+1$, then the defining ideal $\mathcal{J}$ of $\mathcal{R}(I)$ is $$\mathcal{J} =\overline{\mathcal{L}_m + \big(\mathop{\rm gcd} I_{d+1}(\mathcal{B}_m)\big)}$$ where the pair $(\mathcal{B}_m,\mathcal{L}_m)$ is the $m^{\text{th}}$ gcd-iteration of $(B,\mathscr{L})$, for $B$ a modified Jacobian dual with respect to $\underline{x}=x_1,\ldots,x_{d+1}$ and $\mathscr{L}=(\underline{x}\cdot B)$. Additionally, $\mathcal{R}(I)$ is almost Cohen-Macaulay and is Cohen-Macaulay if and only if $m=1$. \end{theorem}
Traditionally, one searches for the nontrivial equations of Rees algebras by using a \textit{Jacobian dual} matrix corresponding to a presentation matrix of the ideal. However, in the setting above, the Jacobian dual is insufficient and such a matrix must be altered. Repeating the construction in \cite{Weaver}, we introduce a \textit{modified Jacobian dual} matrix. A recursive algorithm of \textit{gcd-iterations} is then developed in order to produce the equations of $\mathcal{J}$. This iterative procedure is similar to the methods used in \cite{BM,CHW,Weaver}.
We now describe how this paper is organized. In \Cref{prelims} we briefly review the preliminary material on Rees algebras of ideals necessary for the scope of this paper. Additionally, we restate the result of Morey \cite[4.3]{Morey} and describe some properties of the Jacobian dual of an alternating matrix. In \Cref{hypring} we begin the study of the Rees algebra $\mathcal{R}(I)$, for $I$ a linearly presented perfect Gorenstein ideal of grade three in a hypersurface ring $R=S/(f)$. We introduce a perfect Gorenstein ideal $J$ of grade three in the polynomial ring $S$ and compare the Rees algebras $\mathcal{R}(I)$ and $\mathcal{R}(J)$. We also introduce the modified Jacobian dual matrix. In \Cref{iterationssec} we introduce the recursive algorithm of \textit{gcd-iterations}, which produces equations belonging to the defining ideal. We then give a sufficient condition for when the defining ideal agrees with the ideal obtained from this algorithm. In \Cref{defidealsec} we show that this condition is satisfied and that the method of gcd-iterations produces a \textit{minimal} generating set of $\mathcal{J}$. Properties such as Cohen-Macaulayness and Castelnuovo-Mumford regularity of $\mathcal{R}(I)$ are then studied.
\section{ Preliminaries}\label{prelims}
We now introduce the necessary conventions and preliminary information required for this paper.
\subsection{Rees Algebras of Ideals}
Let $R$ be a Noetherian ring and $I=(f_1, \ldots,f_n)$ an $R$-ideal of positive grade. There is a natural homogeneous epimorphism of $R$-algebras $$\Psi:\, R[T_1,\ldots,T_n] \longrightarrow \mathcal{R}(I)$$ given by $T_i\mapsto f_it$. This map induces an isomorphism $$\mathcal{R}(I) \cong R[T_1,\ldots,T_n]/\mathcal{J}$$ for $\mathcal{J} = \ker \Psi$, which is the \textit{defining ideal} of $\mathcal{R}(I)$. Additionally, any minimal generator of $\mathcal{J}$ is called a \textit{defining equation} of $\mathcal{R}(I)$. The map $\Psi$ factors through the symmetric algebra $\mathop{\rm Sym}(I)$ via the natural map $$\sigma:\, R[T_1,\ldots,T_n] \longrightarrow \mathop{\rm Sym}(I)$$ where the kernel $\mathscr{L}=\ker \sigma$ can be described easily from a presentation of $I$. Indeed, if $R^m\overset{\varphi}{\rightarrow}R^n \rightarrow I\rightarrow 0$ is any presentation of $I$, then $\mathscr{L}$ is generated by the linear forms $\ell_1,\ldots,\ell_m$ where $$[T_1\ldots T_n]\cdot \varphi = [\ell_1 \ldots \ell_m].$$
We note that if $R$ is a standard graded ring, each of the maps and ideals above are bihomogeneous. As $\Psi$ factors through $\sigma$, we have the containment of ideals $\mathscr{L}\subseteq \mathcal{J}$. This containment is often strict, but if $\mathscr{L} = \mathcal{J}$ we say that $I$ is of \textit{linear type}. As mentioned, $\Psi$ factors through $\sigma$, hence there is a natural epimorphism $\mathop{\rm Sym}(I) \rightarrow \mathcal{R}(I)$ with kernel $\mathcal{Q} =\mathcal{J}/\mathscr{L}$. This ideal $\mathcal{Q}$ is typically used to measure how greatly $\mathop{\rm Sym}(I)$ and $\mathcal{R}(I)$ differ when $I$ is not an ideal of linear type.
We now introduce a common source of higher-degree generators of $\mathcal{J}$. With $R^m\overset{\varphi}{\rightarrow}R^n \rightarrow I\rightarrow 0$ a presentation of $I$ as before, there exists an $r\times m$ matrix $B(\varphi)$ consisting of linear entries in $R[T_1,\ldots,T_n]$ with $$[T_1 \ldots T_n] \cdot \varphi= [x_1\ldots x_r]\cdot B(\varphi) $$ where $(x_1,\ldots,x_r)$ is an ideal containing the entries of $\varphi$. The matrix $B(\varphi)$ is called a \textit{Jacobian dual} of $\varphi$, with respect to the sequence $x_1,\ldots,x_r$. Notice that $[x_1\ldots x_r]\cdot B(\varphi) = [\ell_1\ldots \ell_m]$, where $\ell_1,\ldots,\ell_m$ are the equations defining $\mathop{\rm Sym}(I)$ as before. We note that this matrix is not unique in general. However, if $R=k[x_1,\ldots,x_d]$ and the entries of $\varphi$ are linear, there is a Jacobian dual $B(\varphi)$, with respect to $x_1,\ldots,x_d$, consisting of linear entries in $k[T_1,\ldots,T_n]$ which is unique.
The ideal $I$ is said to satisfy the condition $G_s$ if $\mu(I_\mathfrak{p}) \leq \mathop{\rm dim} R_\mathfrak{p}$ for all $\mathfrak{p} \in V(I)$ with $\mathop{\rm dim} R_\mathfrak{p}\leq s-1$. Equivalently, $I$ satisfies $G_s$ if and only if $\mathop{\rm ht} {\rm Fitt}_j(I)\geq j+1$ for all $1\leq j\leq s-1$, where ${\rm Fitt}_j(I) = I_{n-j}(\varphi)$ is the $j^{\text{th}}$ \textit{Fitting ideal} of $I$ for any presentation $\varphi$ as above. If $I$ satisfies $G_s$ for all $s$, $I$ is said to satisfy $G_\infty$.
Lastly, we introduce two algebras related to $\mathcal{R}(I)$. The \textit{associated graded ring} of $I$ is $\mathcal{G}(I) =\mathcal{R}(I) \otimes_R R/I\cong \mathcal{R}(I)/I\mathcal{R}(I)$. If $R$ is a local ring with maximal ideal $\mathfrak{m}$ and residue field $k$, the \textit{special fiber ring} of $I$ is $\mathcal{F}(I) = \mathcal{R}(I)\otimes_R k \cong \mathcal{R}(I)/\mathfrak{m} \mathcal{R}(I)$. Its Krull dimension $\ell(I) = \mathop{\rm dim} \mathcal{F}(I)$ is the \textit{analytic spread} of $I$.
\subsection{Perfect Gorenstein Ideals of Grade Three} The Rees algebras of perfect Gorenstein ideals of grade three are a rich source of interesting phenomena and anomalies. These ideals are a natural candidate to study due to their prescribed structures and resolutions. By the Buchsbaum-Eisenbud theorem \cite[2.1]{BE}, these ideals are presented by an alternating matrix. Moreover, these ideals can be generated by the submaximal Pfaffians of such a matrix. We begin by recalling a classic result within the study of equations of Rees algebras due to Morey and we restate it for reference.
\begin{theorem}[{\cite[4.3]{Morey}}]\label{Moreyresult} Let $R=k[x_1,\ldots,x_d]$ for $k$ a field, and let $I$ be a perfect Gorenstein $R$-ideal of grade 3 with alternating presentation matrix $\varphi$ consisting of linear entries. If $I$ satisfies the condition $G_d$ and $\mu(I)=d+1$, then the defining ideal of $\mathcal{R}(I)$ is $$\mathcal{J} = \mathscr{L} + \big(\mathop{\rm gcd} I_d(B(\varphi))\big)$$ where $B(\varphi)$ is the Jacobian dual of $\varphi$ with respect to $\underline{x}=x_1,\ldots,x_d$ and $\mathscr{L} = (\underline{x}\cdot B(\varphi))$. Additionally, $\mathcal{R}(I)$ is Cohen-Macaulay. \end{theorem}
Here $\mathop{\rm gcd} I_d(B(\varphi))$ denotes the greatest common divisor of the $d\times d$ minors of the Jacobian dual $B(\varphi)$, which consists of entries in $k[T_1,\ldots,T_{d+1}]$. Whereas this result does describe a generating set of the defining ideal, for our purposes we will require a more precise description in how this greatest common divisor arises. In order to describe how the minors of the Jacobian dual above factor, we introduce a lemma of Cramer's rule.
\begin{lemma}[{\cite[4.3]{BM}}]\label{crlemma} Let $R$ be a commutative ring, $[a_1\ldots a_r]$ a $1\times r$ matrix, and $M$ an $r\times (r-1)$ matrix with entries in $R$. For $1\leq t\leq r$, let $M_t$ denote the $(r-1)\times (r-1)$ submatrix of $M$ obtained by deleting the $t^{\text{th}}$ row of $M$ and set $m_t=\operatorname{det} M_t$. Then in the ring $R/(\underline{a}\cdot M)$ $$\overline{a_t}\cdot\overline{m_k} = (-1)^{t-k} \overline{a_k}\cdot \overline{m_t}$$ for all $1\leq k,t\leq r$. \end{lemma}
With this, we may describe how the maximal minors of $B(\varphi)$ factor in the setting of \Cref{Moreyresult}.
\begin{proposition}\label{JDminors} With the assumptions of \cref{Moreyresult}, let $B_i$ denote the submatrix obtained by deleting the $i^{\text{th}}$ column of $B(\varphi)$. There exists a polynomial $g \in k[T_1,\ldots, T_{d+1}]$ such that for all $1\leq j \leq d+1$, one has $ \operatorname{det} B_i = (-1)^{i+1} T_i \cdot g$. \end{proposition}
\begin{proof} Writing $\underline{x}=x_1,\ldots,x_d$ and $\underline{T} =T_1,\ldots,T_{d+1}$ for the two sequences of indeterminates, we claim that $B(\varphi) \cdot [\,\underline{T}\,]^t = 0$. As $[\,\underline{x}\,] \cdot B(\varphi) = [\,\underline{T}\,] \cdot \varphi$, we have $$[\,\underline{x}\,] \cdot B(\varphi) \cdot [\,\underline{T}\,]^t = [\,\underline{T}\,] \cdot \varphi \cdot [\,\underline{T}\,]^t =0 $$ since $\varphi$ is an alternating matrix. As $\varphi$ consists of linear entries in $k[x_1,\ldots,x_d]$, the entries of $B(\varphi)$ belong to $k[T_1,\ldots,T_{d+1}]$. Thus it follows that $B(\varphi) \cdot [\,\underline{T}\,]^t = 0$. Now applying \cref{crlemma} to $[\,\underline{T}\,]$ and the transpose of $B(\varphi)$, it follows that $$T_i \cdot (\operatorname{det} B_j) = (-1)^{i-j} \,T_j \cdot (\operatorname{det} B_i)$$ in $k[T_1,\ldots,T_{d+1}]$ for all $1\leq i,j\leq d+1$, and the claim follows. \end{proof}
Notice that the equation in \Cref{JDminors} is precisely the greatest common divisor of the minors of $B(\varphi)$, $g=\mathop{\rm gcd} I_d(B(\varphi))$, as in \Cref{Moreyresult}.
\section{Ideals of Hypersurface Rings}\label{hypring}
We now begin our study of the Rees algebra $\mathcal{R}(I)$, for $I$ a perfect Gorenstein ideal of grade three in a hypersurface ring. We introduce a second ideal $J$ which is also perfect Gorenstein of grade three and is closely related to $I$. We study the relation between the Rees rings $\mathcal{R}(I)$ and $\mathcal{R}(J)$, and their defining ideals.
\begin{setting}\label{setting1} Let $S=k[x_1,\ldots,x_{d+1}]$ for $k$ an infinite field, $f\in S$ a homogeneous polynomial of degree $m\geq 1$, and $R= S/(f)$. Let $I$ be a perfect Gorenstein $R$-ideal of grade 3 with alternating presentation matrix $\varphi$ consisting of linear entries in $R$. Further assume that $I$ satisfies the condition $G_d$, $\mu(I)=d+1$, and $I_1(\varphi) = \overline{(x_1,\ldots,x_{d+1})}$. \end{setting}
Notice that $d$ is necessarily even by \cite[2.2]{BE}. Following the path of \cite{Weaver}, we immediately return to the polynomial ring and produce an $S$-ideal related to $I$, which will also be perfect and Gorenstein of grade three.
\begin{notation}\label{notation1} Let $\overline{\,\cdot\,}$ denote images modulo the ideal $(f)$ and let $\psi$ be an $(d+1)\times (d+1)$ alternating matrix consisting of linear entries in $S$ with $I_1(\psi) =(x_1,\ldots,x_{d+1})$, such that $\varphi = \overline{\psi}$. Writing $[\ell_1 \ldots \ell_{d+1}]= [T_1 \ldots T_{d+1}] \cdot \psi$, we consider the $S[T_1,\ldots,T_{d+1}]$-ideal $\mathscr{L}=(\ell_1,\ldots,\ell_{d+1},f)$. \end{notation}
Certainly, such a matrix $\psi$ exists and we note that it is unique and automatically has $I_1(\psi) =(x_1,\ldots,x_{d+1})$ if $m\geq 2$. If $m=1$, then $\psi$ is not unique, but any such matrix may be chosen.
\begin{proposition}\label{Jlineartype} There exists a perfect Gorenstein $S$-ideal $J$ with grade 3, which is presented by $\psi$. Additionally, $J$ is of linear type. \end{proposition}
\begin{proof} To show that $\psi$ is the presentation matrix of a perfect Gorenstein ideal with grade 3, it suffices to show that $\mathop{\rm ht} \mathop{\rm Pf}_d(\psi) \geq 3$ by \cite[2.1]{BE}. Notice that the image of this ideal in $R$ is exactly the corresponding ideal of Pfaffians of $\varphi$. As the height can only decrease by passing to $R$, we have $\mathop{\rm ht} \mathop{\rm Pf}_d(\psi) \geq \mathop{\rm ht} \overline{\mathop{\rm Pf}_d(\psi)} = \mathop{\rm ht} \mathop{\rm Pf}_d(\varphi)=3$, as $I$ is perfect and Gorenstein of grade 3, using \cite[2.1]{BE}. Thus the first claim follows and such an ideal $J$ exists.
To show that $J$ is of linear type, it suffices to show that $J$ satisfies $G_{\infty}$ by \cite[2.6]{HSV1}. However, as $\mu(J) = d+1 =\mathop{\rm dim} S$, it is enough to show that $J$ satisfies $G_{d+1}$. Recall from \Cref{prelims} that this condition can be interpreted in terms of heights of Fitting ideals. Repeating the previous argument, notice that the images of the Fitting ideals of $J$ in $R$ are the corresponding Fitting ideals of $I$. Moreover, the heights of these ideals can only decrease when passing to $R$. Hence $\mathop{\rm ht} {\rm Fitt}_i(J) \geq \mathop{\rm ht} {\rm Fitt}_i(I) \geq i+1$ for all $1\leq i\leq d-1$, since $I$ satisfies $G_d$. With this, it follows that $J$ satisfies $G_d$ as well. Thus we need only show that the $d^{\text{th}}$ Fitting ideal of $J$ has height at least, and hence equal to, $d+1$ to conclude that $J$ satisfies $G_{d+1}$. However, this ideal is ${\rm Fitt}_d(J) = I_1(\psi) = (x_1,\ldots,x_{d+1})$, which of course has maximal height. \end{proof}
As $J$ is of linear type, notice that the $S[T_1,\ldots,T_{d+1}]$-ideal $(\ell_1,\ldots,\ell_{d+1})$ is precisely the ideal defining $\mathop{\rm Sym}(J) \cong \mathcal{R}(J)$. Moreover, it follows that $\overline{\mathscr{L}}$ is the defining ideal of $\mathop{\rm Sym}(I)$, as $\varphi = \overline{\psi}$. With this, we see that $S[T_1,\ldots,T_{d+1}]/ \mathscr{L} \cong R[T_1,\ldots,T_{d+1}]/\overline{\mathscr{L}} \cong \mathop{\rm Sym}(I)$, hence $\mathscr{L}$ is the ideal defining $\mathop{\rm Sym}(I)$, as a quotient of $S[T_1,\ldots,T_{d+1}]$. Hence there is a clear relation between the $S[T_1,\ldots,T_{d+1}]$-ideals defining $\mathop{\rm Sym}(J)$ and $\mathop{\rm Sym}(I)$, as these ideals differ only by the generator $f$. Naturally one could ask if there is a similar connection between the ideals defining $\mathcal{R}(J)$ and $\mathcal{R}(I)$. Before we answer this, we provide an alternative description of the defining ideal $\mathcal{J}$ of $\mathcal{R}(I)$ and then introduce an ideal defining $\mathcal{R}(I)$ as a quotient of $S[T_1,\ldots,T_{d+1}]$.
\begin{proposition}\label{Jasat} With the assumptions of \Cref{setting1} and $\mathscr{L}$ as in \Cref{notation1}, the defining ideal of $\mathcal{R}(I)$ satisfies $\mathcal{J} = \overline{\mathscr{L}:(x_1,\ldots,x_{d+1})^\infty}$. \end{proposition}
\begin{proof} As $I$ satisfies the condition $G_d$, for any non-maximal homogeneous prime $R$-ideal $\mathfrak{p}$, $I_\mathfrak{p}$ satisfies $G_\infty$ as an $R_\mathfrak{p}$-ideal and is hence of linear type by \cite[2.6]{HSV1}. Thus $\mathcal{J}_\mathfrak{p} = \overline{\mathscr{L}}_\mathfrak{p}$ for any such prime ideal $\mathfrak{p}$ and so the quotient $\mathcal{Q} = \mathcal{J}/\overline{\mathscr{L}}$ is supported only at the homogeneous maximal $R$-ideal $\overline{(x_1,\ldots,x_{d+1})}$. Hence $\mathcal{Q}$ is annihilated by some power of $\overline{(x_1,\ldots,x_{d+1})}$, which shows that $\mathcal{J} \subseteq \overline{\mathscr{L}}:\overline{(x_1,\ldots,x_{d+1})}^\infty$. However, we have the containment $\overline{\mathscr{L}}:\overline{(x_1,\ldots,x_{d+1})}^\infty \subseteq \mathcal{J}$ as $\overline{\mathscr{L}} \subseteq \mathcal{J}$ and modulo $\mathcal{J}$, the image of $\overline{(x_1,\ldots,x_{d+1})}$ in $\mathcal{R}(I)$ is an ideal of positive grade. \end{proof}
The statement regarding the grade of $\overline{(x_1,\ldots,x_{d+1})}\mathcal{R}(I)$ in the proof above follows from the well-known correspondence between the associated primes of $R$ and $\mathcal{R}(I)$ \cite{HS}\cite[1.5]{EHU}.
Notice that $\mathcal{Q}=\mathcal{J}/\overline{\mathscr{L}}$, as in the proof of \Cref{Jasat}, is the kernel of the natural bihomogeneous map $\mathop{\rm Sym}(I(\delta))\rightarrow \mathcal{R}(I)$, where $\delta = \frac{d}{2}$ is the degree of the generators of $I$ (recall that $d$ is even). Writing $\mathfrak{m} = (x_1,\ldots,x_{d+1})$ for the homogeneous maximal $S$-ideal, the description of $\mathcal{J}$ in \Cref{Jasat} shows that $\mathcal{Q}\cong H_{\overline{\mathfrak{m}}}^0\big(\mathop{\rm Sym}(I(\delta))\big)$, the zeroth local cohomology module of $\mathop{\rm Sym}(I(\delta))$ with support in $\overline{\mathfrak{m}}$. Thus $\mathcal{Q}$ is concentrated in only finitely many degrees and so we may use the tools developed in \cite{KPU3} to bound these degrees.
\begin{proposition}\label{IndexOfSat} With the assumptions of \Cref{setting1}, $\mathcal{J}=\overline{\mathscr{L}:(x_1,\ldots,x_{d+1})^m}$. \end{proposition}
\begin{proof} It is clear that $\overline{\mathscr{L}:(x_1,\ldots,x_{d+1})^m} \subseteq \mathcal{J}$, following \Cref{Jasat}, hence we need only show the reverse containment. In order to show that $\overline{\mathfrak{m}}^m \mathcal{J} \subseteq \overline{\mathscr{L}}$, it suffices to show that $\overline{\mathfrak{m}}^m \mathcal{Q} =0$, where $\mathcal{Q}$ is as above. As mentioned, $\mathcal{Q}\cong H_{\overline{\mathfrak{m}}}^0\big(\mathop{\rm Sym}(I(\delta))\big)$ and so, with the bigrading $\deg \overline{x_i}=(1,0)$ and $\deg T_i = (0,1)$ on $R[T_1,\ldots,T_{d+1}]$, we may write $$\mathcal{Q}_{(*,q)} = \bigoplus_p \mathcal{Q}_{(p,q)} \cong H_{\overline{\mathfrak{m}}}^0\big({\rm Sym}_q(I(\delta)) \big).$$ As $\mathcal{Q}$ lives in finitely many degrees, it is enough to show that $\mathcal{Q}$ vanishes past degree $m-1$ in the first component of the bigrading.
By \cite[3.8]{KPU3}, it follows that $\mathcal{Q}_{(p,q)} =0$ for all $p > b_0 (\mathscr{D}_d^q) +a(R)$ and any $q$, where $\mathscr{D}_d^q$ is the $d^{\text{th}}$ module of a homogeneous complex $\mathscr{D}_{\bullet}^q$ of finitely generated graded $R$-modules with zeroth homology $H_0(\mathscr{D}_{\bullet}^q) \cong \mathop{\rm Sym}_q(I(\delta))$, $b_0 (\mathscr{D}_d^q)$ is the \textit{maximal generator degree} of $\mathscr{D}_d^q$ from \cite[2.2]{KPU3}, and $a(R)$ is the $a$-invariant of $R$. Since $R$ is a Cohen-Macaulay $k$-algebra, the $a$-invariant of $R$ is $a(R) = \mathop{\rm reg} R -d$, where $\mathop{\rm reg} R$ denotes the Castelnuovo-Mumford regularity of $R$. As $R$ is a hypersurface ring defined by a polynomial of degree $m$, it follows that $a(R) = \mathop{\rm reg} R -d = (m-1)-d$, hence we need only show that $b_0 (\mathscr{D}_d^q) \leq d$ for any $q$.
Since $\varphi$ is a $(d+1)\times (d+1)$ homogeneous alternating matrix which presents $I$ minimally, we may take $$\mathscr{D}_{\bullet}^q (\varphi): \, 0 \longrightarrow \mathscr{D}_d^q \longrightarrow \mathscr{D}_{d-1}^q \longrightarrow \cdots\cdots \longrightarrow \mathscr{D}_1^q \longrightarrow \mathscr{D}_0^q\longrightarrow 0$$ to be the complex from \cite[2.15, 4.7]{KU} associated to $\varphi$. The zeroth homology of $\mathscr{D}_{\bullet}^q (\varphi)$ is $H_0\big( \mathscr{D}_{\bullet}^q (\varphi) \big) \cong \mathop{\rm Sym}_q(I(\delta))$, hence we may consider this complex and the maximal generator degree of $\mathscr{D}_d^q$. Following the description and notation of this complex given in \cite{KU} and restated in \cite{KPU2}, and noting that the entries of $\varphi$ are linear, for all $1\leq r\leq d$ we have \[ \mathscr{D}_r^q = \left\{
\begin{array}{ll}
K_{q-r,r} = R(-r)^{\beta_r^q} & \text{if $r \leq \mathop{\rm min}\{q,d\}$}\\[1ex]
Q_q = R(-(r-1)-\frac{1}{2}(d-r+2)) & \text{if $r =q+1\leq d$, $q$ odd}\\[1ex]
0& \text{if $r =q+1$, $q$ even}\\[1ex]
0& \text{if $r \geq \mathop{\rm min}\{q+2,d+1\}$}\\[1ex]
\end{array}
\right. \] for some nonzero Betti numbers $\beta_r^q$. Allowing $r=d$, which is even, the expressions above simplify to \[ \mathscr{D}_d^q = \left\{
\begin{array}{ll}
K_{q-d,d} = R(-d)^{\beta_r^q} & \text{if $q\geq d$}\\[1ex]
Q_q = R(-d) & \text{if $q =d-1$}\\[1ex]
0& \text{if $q\leq d-2$}.\\[1ex]
\end{array}
\right. \] If $\mathscr{D}_d^q=0$, then $b_0(\mathscr{D}_d^q) = -\infty$ by convention Moreover, if $\mathscr{D}_d^q\neq 0$, we see that $b_0(\mathscr{D}_d^q) =d$. Thus $b_0 (\mathscr{D}_d^q) \leq d$ for any $q$, as required. \end{proof}
\begin{notation}\label{Anotation} With the result of \Cref{IndexOfSat}, let us denote the ideal $\mathcal{A} = \mathscr{L}:(x_1,\ldots,x_{d+1})^m = \mathscr{L}:(x_1,\ldots,x_{d+1})^\infty$ in $S[T_1,\ldots,T_{d+1}]$. \end{notation}
Notice that $\overline{\mathcal{A}} = \mathcal{J}$, hence $\mathcal{A}$ defines $\mathcal{R}(I)$ as a quotient of $S[T_1,\ldots,T_{d+1}]$ since $S[T_1,\ldots,T_{d+1}]/\mathcal{A} \cong R[T_1,\ldots,T_{d+1}]/\mathcal{J} \cong \mathcal{R}(I)$. For much of the duration of this paper, the ideal $\mathcal{A}$ will be the object of our focus. This ideal is a defining ideal of $\mathcal{R}(I)$, in a sense, and belongs to the polynomial ring $S[T_1,\ldots,T_{d+1}]$, where a colon ideal $\mathcal{A} = \mathscr{L}:(x_1,\ldots,x_{d+1})^m$ is more easily studied.
We follow a path parallel to the traditional one by approximating the defining ideal of $\mathcal{R}(I)$ using the defining ideal of $\mathop{\rm Sym}(I)$, now using the ideals $\mathcal{A}$ and $\mathscr{L}$ in place of $\mathcal{J}$ and $\overline{\mathscr{L}}$. Traditionally, one then employs a Jacobian dual matrix, however as we have updated our ideals, we must also update such a matrix. We recall the notion of a \textit{modified Jacobian dual} as presented in \cite{Weaver}. This matrix is associated to the generators of $\mathscr{L}$ and the sequence $x_1,\ldots,x_{d+1}$.
Before introducing this object, we provide the notation necessary for its definition and the constructions in the proceeding section. Notice that $S[T_1,\ldots,T_{d+1}]$ is naturally bigraded with $\deg x_i = (1,0)$ and $\deg T_i = (0,1)$.
\begin{notation}\label{delnotation}
For $F \in (x_1,\ldots,x_{d+1})S[T_1,\ldots,T_{d+1}]$ a nonzero bihomogeneous polynomial, let $\partial F$ denote a column consisting of bihomogeneous entries with bidegree $\deg F -(1,0)$, such that $[x_1,\ldots,x_{d+1}]\cdot \partial F = F$. As a convention, we take $\partial F$ to consist of zeros if $F=0$. \end{notation}
In general, there are many choices for $\partial F$. As the notation suggests, there is a natural choice for $\partial F$ using differentials, if $k$ is a field of characteristic zero. Writing $\deg F = (r,*)$ for $r>0$, we have the Euler formula $r\cdot F \,= \,\sum_{i=1}^{d+1}\,\frac{\partial F}{\partial x_i} \cdot x_i$, noting that $r$ is a unit. Hence $\partial F$ can be chosen to have entries $\frac{1}{r}\cdot\frac{\partial F}{\partial x_i}$ in this setting.
\begin{definition}\label{mjddefn}
With $\mathscr{L}$ and $\psi$ as in \Cref{notation1}, we take a \textit{modified Jacobian dual} of $\psi$ to be the $(d+1) \times (d+2)$ matrix $B=[B(\psi)\,|\,\partial f]$ where $B(\psi)$ is the Jacobian dual of $\psi$, consisting of linear entries in $k[T_1,\ldots,T_{d+1}]$, and $\partial f$ is a column corresponding to $f$, as in \Cref{delnotation}. Here $|$ denotes the usual matrix concatenation. \end{definition}
Notice that the entries of the matrix product $[x_1\ldots x_{d+1}]\cdot B$ are precisely the generators of $\mathscr{L}$. In the next section we will employ the modified Jacobian dual and similar constructions to produce equations in $\mathcal{A}$. For now however, we must produce another description of $\mathcal{A}$.
Following the approach in \cite{KPU1}, we find a ring which maps onto $\mathcal{R}(I)$ such that the kernel is an ideal of height one. We take this ring to be the Rees algebra $\mathcal{R}(J)$, noting that $J$ is of linear type by \Cref{Jlineartype}. We now study how these Rees algebras, and their defining ideals, relate to each other.
\subsection{Ideals in $\mathcal{R}(J)$}
Before we study the relation between $\mathcal{R}(J)$ and $\mathcal{R}(I)$, we introduce a third, and final, perfect Gorenstein ideal of grade three. It will be seen that this ideal satisfies the assumptions of \Cref{Moreyresult}. The defining ideal of its Rees algebra will then be used to produce a description of the image of $\mathcal{A}$ in $\mathcal{R}(J)$. We begin by providing a short lemma, commonly used to avoid certain ideals in graded rings.
\begin{lemma}\label{idealavoidance} Let $A=k[y_1,\ldots,y_n]$ for $k$ an infinite field and let $J$ be an ideal generated by homogeneous elements of degree $r$. Suppose that $I_1,\ldots,I_s$ are ideals of $A$, none of which contains $J$. There exists a homogeneous element $z\in J$ of degree $r$ such that $z\notin I_j$ for all $1\leq j\leq s$. \end{lemma}
\begin{proof} This follows from the well-known fact that a vector space over an infinite field is not a finite union of proper subspaces, and then applying Nakayama's lemma in the graded setting. \end{proof}
\begin{proposition}\label{J'ideal} With the assumptions of \Cref{setting1}, let $S' =k[x_1,\ldots,x_d] \cong S/(x_{d+1})$ and consider the matrix $\psi' = \psi S'$. After a possible linear change of coordinates, there exists an $S'$-ideal $J'$ that is perfect and Gorenstein of grade $3$, which is presented by $\psi'$. Moreover, $J'$ satisfies $G_d$. \end{proposition}
\begin{proof} Notice that $\psi'$ is an alternating $(d+1)\times (d+1)$ matrix with entries in $S'$. By \cite[2.1]{BE}, the existence of such an ideal depends only on the height of an ideal of Pfaffians of $\psi' = \psi S'$. Moreover, the condition $G_d$ depends on the heights of ideals of minors of $\psi'$. Thus it suffices to show that, after making a suitable change of coordinates, $\mathop{\rm ht} \mathop{\rm Pf}_d(\psi')\geq 3$ and $\mathop{\rm ht} I_j(\psi') \geq d-j+2 $ for all $2\leq j \leq d$. Notice that $\mathop{\rm Pf}_d(\psi') = \mathop{\rm Pf}_d(\psi)S'$ and $I_j(\psi') = I_j(\psi)S'$. Recall that $J$ is presented by $\psi$ and is of linear type by \Cref{Jlineartype}, and hence satisfies $G_\infty$.
Recall that $I$ is an ideal of height 3 in $R$, which is $d$-dimensional. As $d$ is even, it follows that $d\geq 4$, hence $\mathop{\rm dim} S = d+1 \geq 5$ and so $\mathop{\rm ht} \mathop{\rm Pf}_d(\psi)=3 <\mathop{\rm dim} S$. Now consider the determinantal ideals $I_j(\psi)$ with height at most $d$, for $2\leq j\leq d$. There are finitely many non-maximal minimal primes of $\mathop{\rm Pf}_d(\psi)$ and the ideals $I_j(\psi)$ with non-maximal height, and none of them contains $(x_1,\ldots,x_{d+1})$. Hence there exists a linear form not contained in any of these minimal primes by \Cref{idealavoidance}. After a potential linear change of coordinates, it can be assumed that $x_{d+1}$ is precisely this linear form.
With this, we see that $\mathop{\rm ht} \mathop{\rm Pf}_d(\psi') = \mathop{\rm Pf}_d(\psi) =3$ and $\mathop{\rm ht} I_j(\psi') = \mathop{\rm ht} I_j(\psi)\geq d-j+2$ for all $j$ such that $\mathop{\rm ht} I_j(\psi)\leq d$ and $2\leq j\leq d$. For any of the ideals $I_j(\psi)$ with maximal height and $j$ in this range, the height of $I_j(\psi)$ must drop when passing to $S'$. However, if $\mathop{\rm ht} I_j(\psi) = d+1$, then $\mathop{\rm ht} I_j(\psi') = \mathop{\rm ht} I_j(\psi) -1 =d \geq d-j+2$ as $2\leq j\leq d$. \end{proof}
\begin{remark} In \Cref{J'ideal}, a linear change of coordinates was made and we note that the conditions and constructions introduced so far are amenable to such a change. We proceed assuming that such a linear adjustment has been made and the sequence $x_1,\ldots,x_{d+1}$ has been relabelled accordingly. \end{remark}
Notice that the $S'$-ideal $J'$ satisfies the assumptions of \Cref{Moreyresult}, hence the defining equations of $\mathcal{R}(J')$ are known. In particular, they can be described from the Jacobian dual $B(\psi')$, which is precisely a submatrix of $B(\psi)$. Indeed, the entries of $B(\psi)$ belong to $k[T_1,\ldots,T_{d+1}]$ and the last row of $B(\psi)$ corresponds to $x_{d+1}$. Thus by deleting the last row of $B(\psi)$ we obtain the Jacobian dual of $\psi'$, with respect to $x_1,\ldots,x_d$ in $S'$. Letting $B'$ denote this submatrix of $B(\psi)$ obtained by deleting the last row, we note that there is a nontrivial greatest common divisor among the maximal minors of $B'=B(\psi')$, by \Cref{JDminors}.
\begin{notation}\label{notation2} Recall that $J$ is of linear type by \Cref{Jlineartype}, hence $\mathcal{R}(J) \cong S[T_1,\ldots,T_{d+1}]/ \mathcal{H}$ where $\mathcal{H} =(\ell_1,\ldots,\ell_{d+1})$, following \Cref{notation1}. Let $\widetilde{\,\cdot\,}$ denote images modulo $\mathcal{H}$, in $\mathcal{R}(J)$. As before, let $B'$ be the $d\times (d+1)$ matrix obtained by deleting the last row of $B(\psi)$ and consider the $S[T_1,\ldots,T_{d+1}]$-ideal $\mathcal{K}= (\ell_1,\ldots,\ell_{d+1}) + (\mathop{\rm gcd} I_d(B'))+(x_{d+1})$. \end{notation}
As mentioned, we may identify $B'$ with the Jacobian dual $B(\psi')$, for $\psi'$ as in \Cref{J'ideal}. Hence there is a greatest common divisor amongst the maximal minors of $B'$, in $k[T_1,\ldots,T_{d+1}]$.
\begin{proposition}\label{PropertiesOfA} The ring $\mathcal{R}(J)$ is a Cohen-Macaulay domain of dimension $d+2$ and the ideals $\widetilde{\mathcal{K}}$ and $(\widetilde{x_1,\ldots,x_{d+1}})$ are Cohen-Macaulay $\mathcal{R}(J)$-ideals of height 1. Moreover, $(\widetilde{x_1,\ldots,x_{d+1}})$ is a prime ideal. \end{proposition}
\begin{proof} The claim that $\mathcal{R}(J)$ is a domain of dimension $d+2$ follows easily as $S$ is a domain of dimension $d+1$ and $J$ is an ideal of positive height \cite{VasconcelosBook}. Additionally, $J$ is of linear type by \Cref{Jlineartype}, hence $\mathcal{R}(J)$ is Cohen-Macaulay by \cite[2.6]{HSV1}. Moreover, as $J$ is of linear type, its special fiber ring is $\mathcal{F}(J) \cong k[T_1,\ldots, T_{d+1}]$, hence $(\widetilde{x_1,\ldots,x_{d+1}})$ is indeed a Cohen-Macaulay prime ideal of height 1.
To see that $\widetilde{\mathcal{K}}$ is a Cohen-Macaulay $\mathcal{R}(J)$-ideal of height 1, notice that $\mathcal{K}$ can be written as $(\ell_1',\ldots,\ell_{d+1}') +(\mathop{\rm gcd} I_d(B'))+(x_{d+1})$, where $[\ell_1'\ldots \ell_{d+1}'] = [x_1 \ldots x_d] \cdot B'$. Notice that $(\ell_1',\ldots,\ell_{d+1}') +( \mathop{\rm gcd} I_d(B'))$ is exactly the defining ideal of $\mathcal{R}(J')$ following \Cref{J'ideal} and \Cref{Moreyresult}. In particular, this ideal is Cohen-Macaulay with height $d$. As $x_{d+1}$ is regular modulo this ideal, it then follows that $\mathcal{K}$ has height $d+1$ and is Cohen-Macaulay. Thus $\widetilde{\mathcal{K}}$ is a Cohen-Macaulay ideal of height 1 in $\mathcal{R}(J)$. \end{proof}
\begin{proposition}\label{colons} With $\mathcal{K}$ as in \Cref{notation2}, for any positive integer $i$ we have the following. \begin{enumerate}[(a)] \setlength\itemsep{1em}
\item $(\widetilde{x_1,\ldots,x_{d+1}})^i = (\widetilde{x_1,\ldots,x_{d+1}})^{(i)}$
\item $(\widetilde{x_1,\ldots,x_{d+1}})^{(i)}= (\widetilde{x_{d+1}}^i) :_{\mathcal{R}(J)} \widetilde{\mathcal{K}}^{(i)}$
\item $\widetilde{\mathcal{K}}^{(i)} = (\widetilde{x_{d+1}}^i) :_{\mathcal{R}(J)} (\widetilde{x_1,\ldots,x_{d+1}})^{(i)}$ \end{enumerate} \end{proposition}
\begin{proof} We proceed as in the proof of \cite[3.9]{BM}.
\begin{enumerate}[(a)] \setlength\itemsep{1em}
\item Setting the degrees of the $x_i$ to 1 and the degrees of the $T_i$ to 0 temporarily, we see that $\mathcal{G}\big((\widetilde{x_1,\ldots,x_{d+1}})\big) \cong \mathcal{R}(J)$, where $\mathcal{G}\big((\widetilde{x_1,\ldots,x_{d+1}})\big)$ is the associated graded ring of $(\widetilde{x_1,\ldots,x_{d+1}})$. As $\mathcal{R}(J)$ is a domain, it follows that $(\widetilde{x_1,\ldots,x_{d+1}})^i = (\widetilde{x_1,\ldots,x_{d+1}})^{(i)}$ for all $i$.
\item We first claim that $(\widetilde{x_1,\ldots,x_{d+1}})\widetilde{\mathcal{K}}\subseteq (\widetilde{x_{d+1}})$. Recall that $\mathcal{K} = (\ell_1',\ldots,\ell_{d+1}') +(\mathop{\rm gcd} I_d(B'))+(x_{d+1})$, where $[\ell_1'\ldots\ell_{d+1}'] = [x_1 \ldots x_d] \cdot B'$. Thus modulo $\mathcal{H}$, we see that $(\widetilde{\ell_1',\ldots,\ell_{d+1}'}) \subseteq (\widetilde{x_{d+1}})$. Noting that $B'=B(\psi')$, with $\psi'$ as in \Cref{J'ideal}, we may write $I_d(B') = (g')(T_1,\ldots,T_{d+1})$ where $g' = \mathop{\rm gcd} (I_d(B'))$, by \Cref{JDminors}. By Cramer's rule we have $(x_1,\ldots,x_d) I_d(B') \subseteq (\ell_1',\ldots,\ell_{d+1}')$, hence $$(x_1,\ldots,x_d)(g') \subseteq (\ell_1',\ldots,\ell_{d+1}'):(T_1,\ldots,T_{d+1}).$$ However, notice that $(\ell_1',\ldots,\ell_{d+1}')$ is the defining ideal of $\mathop{\rm Sym}(J')$ following \Cref{J'ideal}, hence $\mathop{\rm ht} (\ell_1',\ldots,\ell_{d+1}') = d$ by \cite[2.1]{Morey}. Modulo $(\ell_1',\ldots,\ell_{d+1}')$, we then see that $(T_1,\ldots,T_{d+1})$ is an ideal of positive grade, which is annihilated by $(x_1,\ldots,x_d)(g')$, hence $(x_1,\ldots,x_d)(g') \subseteq (\ell_1',\ldots,\ell_{d+1}')$. Noting that $(\widetilde{\ell_1',\ldots,\ell_{d+1}'}) \subseteq (\widetilde{x_{d+1}})$, it then follows that $(\widetilde{x_1,\ldots,x_{d+1}})\widetilde{\mathcal{K}}\subseteq (\widetilde{x_{d+1}})$.
With this, we have $(\widetilde{x_1,\ldots,x_{d+1}})^i\widetilde{\mathcal{K}}^i\subseteq (\widetilde{x_{d+1}}^i)$ for any positive integer $i$. Localizing at height one prime ideals of $\mathcal{R}(J)$, we see that $(\widetilde{x_1,\ldots,x_{d+1}})^{(i)}\widetilde{\mathcal{K}}^{(i)}\subseteq (\widetilde{x_{d+1}}^i)$ and so $(\widetilde{x_1,\ldots,x_{d+1}})^{(i)}\subseteq (\widetilde{x_{d+1}}^i):\widetilde{\mathcal{K}}^{(i)}$. Writing $\mathcal{K}=(\ell_1',\ldots,\ell_{d+1}') + (\mathop{\rm gcd} I_d(B'))+(x_{d+1})$ as before, recall that $(\ell_1',\ldots,\ell_{d+1}') + (\mathop{\rm gcd} I_d(B'))$ is the defining ideal of $\mathcal{R}(J')$. Note that $J'$ is not of linear type as $\mu(J') > \mathop{\rm dim} S'$, hence $\mathop{\rm gcd} I_d(B')$ is nonzero in $k[T_1,\ldots,T_{d+1}]$ and so $\widetilde{\mathcal{K}}\nsubseteq (\widetilde{x_1,\ldots,x_{d+1}})$. As $(\widetilde{x_1,\ldots,x_{d+1}})$ is the unique associated prime of $(\widetilde{x_1,\ldots,x_{d+1}})^{(i)}$, it follows that $\widetilde{\mathcal{K}}^{(i)}$ and $(\widetilde{x_1,\ldots,x_{d+1}})^{(i)}$ have no associated prime in common. From this it follows that $(\widetilde{x_{d+1}}^i):\widetilde{\mathcal{K}}^{(i)} \subseteq (\widetilde{x_1,\ldots,x_{d+1}})^{(i)}$.
\item As before, we have $(\widetilde{x_1,\ldots,x_{d+1}})^{(i)}\widetilde{\mathcal{K}}^{(i)}\subseteq (\widetilde{x_{d+1}}^i)$, hence $\widetilde{\mathcal{K}}^{(i)} \subseteq (\widetilde{x_{d+1}}^i) :_{\mathcal{R}(J)} (\widetilde{x_1,\ldots,x_{d+1}})^{(i)}$. To show the reverse containment, recall that $(\widetilde{x_1,\ldots,x_{d+1}})$ is not an associated prime of $\widetilde{\mathcal{K}}^{(i)}$. With this and noting that $\widetilde{x_{d+1}}^i\in \widetilde{\mathcal{K}}^{(i)}$, we see that $(\widetilde{x_{d+1}}^i) :_{\mathcal{R}(J)} (\widetilde{x_1,\ldots,x_{d+1}})^{(i)} \subseteq \widetilde{\mathcal{K}}^{(i)}$. \qedhere \end{enumerate} \end{proof}
With parts (b) and (c) of \Cref{colons}, one says that $(\widetilde{x_1,\ldots,x_{d+1}})^{(i)}$ and $\widetilde{\mathcal{K}}^{(i)}$ are \textit{linked} \cite{Huneke1}.
\begin{corollary}\label{Kscm} The $\mathcal{R}(J)$-ideal $\widetilde{\mathcal{K}}$ is generically a complete intersection and is strongly Cohen-Macaulay. \end{corollary}
\begin{proof} Recall from \Cref{PropertiesOfA} that $\widetilde{\mathcal{K}}$ is a Cohen-Macaulay ideal of height one. From the proof of \Cref{colons} we had seen that $(\widetilde{x_1,\ldots,x_{d+1}})$ is not an associated prime of $\widetilde{\mathcal{K}}$. Thus if $\mathfrak{p}$ is an associated prime of $\widetilde{\mathcal{K}}$, by \Cref{colons} we have $(\widetilde{x_{d+1}})_\mathfrak{p} : \widetilde{\mathcal{K}}_\mathfrak{p} = (\widetilde{x_1,\ldots,x_{d+1}})_\mathfrak{p} = \mathcal{R}(J)_\mathfrak{p}$. Thus we have $\widetilde{\mathcal{K}}_\mathfrak{p} \subseteq (\widetilde{x_{d+1}})_\mathfrak{p}$ and so $\widetilde{\mathcal{K}}_\mathfrak{p}= (\widetilde{x_{d+1}})_\mathfrak{p}$ as $x_{d+1} \in \mathcal{K}$, which shows that $\widetilde{\mathcal{K}}$ is generically a complete intersection.
Notice that $\widetilde{\mathcal{K}} = (\widetilde{g'},\widetilde{x_{d+1}})$ where $g'= \mathop{\rm gcd} I_d(B')$, hence $\widetilde{\mathcal{K}}$ is a Cohen-Macaulay almost complete intersection ideal, following \Cref{PropertiesOfA}. Moreover, we had just seen that $\widetilde{\mathcal{K}}$ is generically a complete intersection, hence it is a strongly Cohen-Macaulay $\mathcal{R}(J)$-ideal by \cite[2.2]{Huneke2}. \end{proof}
We now give an alternative description of the $\mathcal{R}(J)$-ideal $\widetilde{\mathcal{A}}$. Notice that this is the kernel of the induced map of Rees algebras $\mathcal{R}(J)\rightarrow \mathcal{R}(I)$. Consider the fractional ideal $\frac{\widetilde{f}\,\widetilde{\mathcal{K}}^{(m)}}{\widetilde{x_{d+1}}^m}$ and note that this is actually an $\mathcal{R}(J)$-ideal by \Cref{colons}, as $f\in (x_1,\ldots,x_{d+1})^m$.
\begin{theorem}\label{DandA} In $\mathcal{R}(J)$, we have $\widetilde{\mathcal{A}}= \frac{\widetilde{f}\,\widetilde{\mathcal{K}}^{(m)}}{\widetilde{x_{d+1}}^m}$. \end{theorem}
\begin{proof} Writing $\mathcal{D}=\frac{\widetilde{f}\,\widetilde{\mathcal{K}}^{(m)}}{\widetilde{x_{d+1}}^m}$, we begin by showing that $\mathcal{D}\subseteq \widetilde{\mathcal{A}}$. Recall that $\mathcal{R}(J)$ is a domain by \Cref{PropertiesOfA}. Hence for any $a\in (x_1,\ldots,x_{d+1})^{m}$ we have the equality $\frac{\widetilde{f}\,\widetilde{\mathcal{K}}^{(m)}}{\widetilde{x_{d+1}}^m} \cdot \widetilde{a} = \frac{\widetilde{a}\,\widetilde{\mathcal{K}}^{(m)}}{\widetilde{x_{d+1}}^m} \cdot \widetilde{f}$. Notice that $\frac{\widetilde{a}\,\widetilde{\mathcal{K}}^{(m)}}{\widetilde{x_{d+1}}^m}$ is an $\mathcal{R}(J)$-ideal by \Cref{colons}, hence it follows that $\mathcal{D} (\widetilde{x_1,\ldots,x_{d+1}})^{m} \subset (\widetilde{f}) = \widetilde{\mathscr{L}}$. Thus $\mathcal{D} \subseteq \widetilde{\mathscr{L}}: (\widetilde{x_1,\ldots,x_{d+1}})^{m} = \widetilde{\mathcal{A}}$, following \Cref{Anotation}.
To show this containment is actually an equality, we proceed as in the proof of \cite[3.10]{BM}. Recall that $\mathcal{R}(J)$ is a Cohen-Macaulay domain and note that $\widetilde{\mathcal{K}}^{(m)}$ is an unmixed ideal of height one. Equivalently, $\widetilde{\mathcal{K}}^{(m)}$ satisfies Serre's condition $S_2$, as an $\mathcal{R}(J)$-module. Thus $\mathcal{D}$ is also an unmixed $\mathcal{R}(J)$-ideal of height one since $\mathcal{D} \cong \widetilde{\mathcal{K}}^{(m)}$ and the condition $S_2$ is preserved under isomorphism. As $\mathcal{D} \subseteq \widetilde{\mathcal{A}}$, it suffices to show that these ideals agree locally at the associated primes of $\mathcal{D}$, in order to conclude that $\mathcal{D} = \widetilde{\mathcal{A}}$. As these associated primes have height one, we show that $\mathcal{D}_\mathfrak{p} = \widetilde{\mathcal{A}}_\mathfrak{p}$ for any prime $\mathcal{R}(J)$-ideal $\mathfrak{p}$ with height one.
Recall from \Cref{PropertiesOfA} that $(\widetilde{x_1,\ldots,x_{d+1}})$ is a prime ideal of height one in $\mathcal{R}(J)$. If $\mathfrak{p} \neq (\widetilde{x_1,\ldots,x_{d+1}})$, we see that $\widetilde{\mathcal{A}}_\mathfrak{p} = \widetilde{\mathscr{L}}_\mathfrak{p} : (\widetilde{x_1,\ldots,x_{d+1}})_\mathfrak{p}^m = (\widetilde{f})_\mathfrak{p} : \mathcal{R}(J)_\mathfrak{p}$, hence $\widetilde{\mathcal{A}}_\mathfrak{p} \subseteq (\widetilde{f})_\mathfrak{p}$ and so $\widetilde{\mathcal{A}} = (\widetilde{f})_\mathfrak{p}$, as $f\in \mathcal{A}$. Additionally, by \Cref{colons} and repeating the argument in the proof of \Cref{Kscm}, it follows that $\widetilde{\mathcal{K}}_\mathfrak{p} = (\widetilde{x_{d+1}})_\mathfrak{p}$, hence $\mathcal{D}_\mathfrak{p} = (f)_\mathfrak{p}$ as well.
Now suppose that $\mathfrak{p} =(\widetilde{x_1,\ldots,x_{d+1}})$ and we first note that $\widetilde{\mathcal{A}}\nsubseteq (\widetilde{x_1,\ldots,x_{d+1}})$. Indeed, the analytic spread of $J$ is $\ell(J) = d+1$ since $\mathcal{F}(J) \cong k[T_1,\ldots,T_{d+1}]$, which we had seen in the proof of \Cref{PropertiesOfA}. Moreover, we have $\ell(I) =d$ by \cite[4.3]{UV}. With the isomorphism $\mathcal{R}(I) \cong \mathcal{R}(J) / \widetilde{\mathcal{A}}$ and passing to $\mathcal{F}(I)$, it then follows that $\widetilde{\mathcal{A}}\nsubseteq (\widetilde{x_1,\ldots,x_{d+1}})$. With this, we see that $\widetilde{\mathcal{A}}_\mathfrak{p} = \mathcal{R}(J)_\mathfrak{p}$.
Recall from the proof of \Cref{colons} that $\widetilde{\mathcal{K}} \nsubseteq (\widetilde{x_1,\ldots,x_{d+1}})$, hence $\widetilde{\mathcal{K}}^{(m)}_\mathfrak{p}=\mathcal{R}(J)_\mathfrak{p}$ as well. With this and \Cref{colons}, we see that $(\widetilde{x_1,\ldots,x_{d+1}})_\mathfrak{p} = (\widetilde{x_{d+1}})_\mathfrak{p}:\mathcal{R}(J)_\mathfrak{p}$, hence $(\widetilde{x_1,\ldots,x_{d+1}})_\mathfrak{p} = (\widetilde{x_{d+1}})_\mathfrak{p}$. Thus
$$\mathcal{R}(J)_\mathfrak{p} = \widetilde{\mathcal{A}}_\mathfrak{p}=\widetilde{\mathscr{L}}_\mathfrak{p}: (\widetilde{x_1,\ldots,x_{d+1}})^m_\mathfrak{p}=(\widetilde{f})_\mathfrak{p}: (\widetilde{x_1,\ldots,x_{d+1}})^m_\mathfrak{p}$$ and so $(\widetilde{x_1,\ldots,x_{d+1}})^m_\mathfrak{p} \subseteq (\widetilde{f})_\mathfrak{p}$. However, as $f\in(x_1,\ldots,x_{d+1})^m$, we have $(\widetilde{f})_\mathfrak{p} = (\widetilde{x_1,\ldots,x_{d+1}})^m_\mathfrak{p} = (\widetilde{x_{d+1}})^m_\mathfrak{p}$, hence $\mathcal{D}_\mathfrak{p} =\widetilde{\mathcal{K}}^{(m)}_\mathfrak{p}=\mathcal{R}(J)_\mathfrak{p} = \widetilde{\mathcal{A}}_\mathfrak{p}$. \end{proof}
Recall that, as a consequence of \Cref{IndexOfSat}, the ideal $\mathcal{A}$ is a saturation that can be written as $\mathcal{A} =\mathscr{L}:(x_1,\ldots,x_{d+1})^\infty = \mathscr{L}:(x_1,\ldots,x_{d+1})^m$. We end this section by showing that $m$ is the smallest integer for which this second equality holds, i.e. the index of saturation of $\mathcal{A}$.
\begin{proposition}\label{nissmallest} With the assumptions of \Cref{setting1}, $m$ is the smallest integer such that $\mathcal{A}=\mathscr{L}:(x_1,\ldots,x_{d+1})^m$. \end{proposition}
\begin{proof} Suppose, for a contradiction, that there is some positive integer $i<m$ such that $\mathcal{A}=\mathscr{L}:(x_1,\ldots,x_{d+1})^i$. In $\mathcal{R}(J)$, we then have $\widetilde{\mathcal{A}}=\widetilde{\mathscr{L}}:(\widetilde{x_1,\ldots,x_{d+1}})^i$. Now localizing at $\mathfrak{p}=(\widetilde{x_1,\ldots,x_{d+1}})$ and noting that $\widetilde{\mathcal{A}}_\mathfrak{p} = \mathcal{R}(J)_\mathfrak{p}$, as we had seen in the proof of \Cref{DandA}, we have $\mathcal{R}(J)_\mathfrak{p}=(\widetilde{f})_\mathfrak{p}:(\widetilde{x_1,\ldots,x_{d+1}})_\mathfrak{p}^i$, hence $(\widetilde{x_1,\ldots,x_{d+1}})^i_\mathfrak{p} \subseteq (\widetilde{f})_\mathfrak{p}$. As $f$ has degree $m>i$ in $S$, we have $f\in (x_1,\ldots,x_{d+1})^i$, hence $(\widetilde{f})_\mathfrak{p}= (\widetilde{x_1,\ldots,x_{d+1}})^i_\mathfrak{p}$. However, $(\widetilde{f})_\mathfrak{p}= (\widetilde{x_1,\ldots,x_{d+1}})^m_\mathfrak{p}$ as well, which we had seen in the proof of \Cref{DandA}. Thus we have $(\widetilde{x_1,\ldots,x_{d+1}})^i_\mathfrak{p} = (\widetilde{x_1,\ldots,x_{d+1}})^m_\mathfrak{p}$ in $\mathcal{R}(J)_\mathfrak{p}$. Now contracting back to $\mathcal{R}(J)$ and noting that the powers and symbolic powers of this ideal agree by \Cref{colons}, we have $$ (\widetilde{x_1,\ldots,x_{d+1}})^i= (\widetilde{x_1,\ldots,x_{d+1}})^{(i)} = (\widetilde{x_1,\ldots,x_{d+1}})^{(m)} =(\widetilde{x_1,\ldots,x_{d+1}})^m$$ which is impossible. \end{proof}
\section{gcd-iterations}\label{iterationssec}
In this section we present a recursive algorithm which produces equations in $\mathcal{A}$. This algorithm is an adaptation of the method of \textit{modified Jacobian dual iterations} used in \cite{Weaver} and is similar to the methods used in \cite{CHW} and \cite{BM}. This process consists of matrix constructions analogous to a modified Jacobian dual. We begin this section by studying how the maximal minors of such matrices factor, in a manner similar to the proof of \Cref{JDminors}.
\begin{proposition}\label{gcds}
With the assumptions of \Cref{setting1} and $C$ any column with $d+1$ entries in $S[T_1,\ldots,T_{d+1}]$, consider the $(d+1) \times (d+2)$ matrix $\mathfrak{B}=[B(\psi)\,|\,C]$ and let $\mathfrak{B}_j$ denote the submatrix obtained by deleting the $j^{\text{th}}$ column of $\mathfrak{B}$. There exists a polynomial $\mathfrak{g} \in S[T_1,\ldots, T_{d+1}]$ such that for all $1\leq j \leq d+1$, one has $\operatorname{det} \mathfrak{B}_j = (-1)^{j+1} T_j \cdot \mathfrak{g}$. In particular, $\mathfrak{g}$ is the greatest common divisor of the maximal minors of $\mathfrak{B}$. \end{proposition}
\begin{proof} We modify the proof of \Cref{JDminors}. Letting $\underline{x} = x_1,\ldots,x_{d+1}$ and $\underline{T} = T_1,\ldots,T_{d+1}$, notice that $$[\,\underline{x}\,] \cdot B(\psi) \cdot [\,\underline{T}\,]^t = [\,\underline{T}\,] \cdot \psi \cdot [\,\underline{T}\,]^t =0 $$
as $\psi$ is an alternating matrix. As $B(\psi)$ consists of entries in $k[T_1,\ldots,T_{d+1}]$, it follows that $B(\psi) \cdot [\,\underline{T}\,]^t =0$. Let $[\,\underline{T}\,|\,0\,]$ denote the row vector $[T_1 \ldots T_{d+1}\, 0]$ and notice that $\mathfrak{B}\cdot [\,\underline{T}\,|\,0\,]^t =0$. Now applying \cref{crlemma} to $[\,\underline{T}\,|\,0\,]$ and the transpose of $\mathfrak{B}$, we see that $$T_i \cdot (\operatorname{det} \mathfrak{B}_j) = (-1)^{i-j} \,T_j \cdot (\operatorname{det} \mathfrak{B}_i)$$ in $S[T_1,\ldots,T_{d+1}]$ for all $1\leq i,j\leq d+1$, and the claim follows. \end{proof}
\begin{remark}\label{detBpsizero} Notice that $\mathfrak{B}$ has $d+2$ columns, yet we purposely omit the index $j=d+2$ in \Cref{gcds}. Applying \Cref{crlemma} in the proof above at this index shows only that $\operatorname{det} B(\psi)=0$. However, this can already be seen using Cramer's rule as $B(\psi) \cdot [\,\underline{T}\,]^t =0$, or by noting that $J$ is of linear type by \Cref{Jlineartype}. \end{remark}
With \Cref{gcds}, we may now introduce the method of gcd-iterations. Once again, we adopt the bigrading on $S[T_1,\ldots,T_{d+1}]$ given by $\deg x_i = (1,0)$ and $\deg T_i = (0,1)$ throughout this section.
\begin{algorithm}\label{gcdit} We recursively define pairs consisting of a matrix and an ideal. Set $\mathcal{B}_1= B$ and $\mathcal{L}_1 =\mathscr{L}$ for $B$ a modified Jacobian dual and $\mathscr{L}$ as in \Cref{notation1}. Assume that $2\leq i\leq m$ and the following pairs $(\mathcal{B}_1,\mathcal{L}_1), \ldots, (\mathcal{B}_{i-1},\mathcal{L}_{i-1})$ have been constructed inductively. To construct the $i^{\text{th}}$ pair $(\mathcal{B}_i,\mathcal{L}_i)$, let $g_{i-1} = \mathop{\rm gcd} I_{d+1}(\mathcal{B}_{i-1})$ and set
$$ \mathcal{L}_i = \mathcal{L}_{i-1} + (g_{i-1}),\quad \quad \mathcal{B}_i =[B(\psi)\,\vert\,\partial g_{i-1}]$$
where $\partial g_{i-1}$ is a column consisting of bihomogeneous entries with constant bidegree such that $$g_{i-1} = [x_1\ldots x_{d+1}]\cdot \partial g_{i-1}$$ as in \Cref{delnotation}. We refer to the pair $(\mathcal{B}_{i},\mathcal{L}_{i})$ as the $i^{\text{th}}$ \textit{gcd-iteration} of $(B,\mathscr{L})$. \end{algorithm}
Notice that these matrices resemble a modified Jacobian dual, hence it is understood how these greatest common divisors arise by \Cref{gcds}. Recall from \Cref{delnotation} that, as a convention, if $g_{i-1}=0$ then $\partial g_{i-1}$ consists of zeros. Thus the next equation $g_i$, and every other proceeding equation, vanishes as well. Eventually it will be shown that these equations are nonzero, but for now we retain this possibility.
\begin{proposition}\label{deggcdprop} In the setting of \Cref{gcdit}, if $\mathop{\rm gcd} I_{d+1}(\mathcal{B}_i) \neq 0$ for some $1\leq i\leq m$, then it has bidegree $\deg (\mathop{\rm gcd} I_{d+1}(\mathcal{B}_i)) = (m-i,i(d-1))$. \end{proposition}
\begin{proof} As these equations are defined recursively, we proceed by induction. In the case $i=1$, notice that $\mathcal{B}_1 = [B(\psi)\,\vert\,\partial f]$, a modified Jacobian dual matrix. Noting that $B(\psi)$ consists of linear entries in $k[T_1,\ldots,T_{d+1}]$ and $\partial f$ consists of entries in $S[T_1,\ldots, T_{d+1}]$ of bidegree $(m-1,0)$, the initial claim follows from \Cref{gcds}. Now suppose that $i\geq 2$ and $\deg (\mathop{\rm gcd} I_{d+1}(\mathcal{B}_j)) = (m-j,j(d-1))$ for all $1\leq j \leq i-1$, if these equations are nonzero.
Notice that if $g_i=\mathop{\rm gcd} I_{d+1}(\mathcal{B}_i)$ is nonzero, then $g_{i-1} = \mathop{\rm gcd} I_{d+1}(\mathcal{B}_{i-1})$ is nonzero as well, hence $\deg g_{i-1} = (m-i+1,(i-1)(d-1))$ by the induction hypothesis. Thus the entries of $\partial g_{i-1}$ are bihomogeneous with bidegree $(m-i,(i-1)(d-1))$. Again noting that the entries of $B(\psi)$ are of bidegree $(0,1)$, it follows from \Cref{gcds} that $\deg (\mathop{\rm gcd} I_{d+1}(\mathcal{B}_i)) = (m-i,i(d-1))$. \end{proof}
Notice that the method of gcd-iterations terminates after $m$ steps in \Cref{gcdit}. If $g_m=\mathop{\rm gcd} I_{d+1}(\mathcal{B}_m)$ is nonzero, then it must have bidegree $(0,m(d-1))$ following \Cref{deggcdprop}. Thus there is no column corresponding to $g_m$, as in \Cref{delnotation}, and so the process must terminate. If $g_m=0$, the process could continue, however every subsequent equation is zero as well. Thus the same ideal is achieved by the $m^{\text{th}}$ step regardless.
Following \Cref{delnotation}, the matrices in \Cref{gcdit} are not unique as there are often multiple choices for the last column. Regardless, we claim that the ideals produced by this algorithm are well-defined. First however, we provide a short lemma which will be used frequently.
\begin{lemma}[{\cite[4.4]{BM}}]\label{WD} Let $R$ be a ring and $\underline{a}=a_1,\ldots,a_r$ an $R$-regular sequence. If $B$ and $B'$ are two matrices with $r$ rows satisfying $(\underline{a}\cdot B) = (\underline{a}\cdot B')$, then $(\underline{a}\cdot B) +I_r(B) = (\underline{a}\cdot B') +I_r(B')$. \end{lemma}
With this, we now show that the ideals obtained from \Cref{gcdit} are well-defined.
\begin{proposition}\label{gcdwd} The ideals $\mathcal{L}_i$ and $\mathcal{L}_i+(\mathop{\rm gcd} I_{d+1}(\mathcal{B}_i))$ are well-defined for $1\leq i \leq m$. \end{proposition}
\begin{proof}
We proceed by induction. For $i=1$, note that $\mathcal{L}_1 = \mathscr{L}$ is certainly well-defined as it is the ideal defining $\mathop{\rm Sym}(I)$, as a quotient of $S[T_1,\ldots,T_{d+1}]$. Now suppose that $B$ and $B'$ are two candidates for $\mathcal{B}_1$. In other words, $B$ and $B'$ are two modified Jacobian dual matrices. Write $B = [B(\psi)\,|\,C]$ and $B' = [B(\psi)\,|\,C']$ where $C$ and $C'$ are columns with $[\,\underline{x}\,]\cdot C = f= [\,\underline{x}\,]\cdot C'$, as in \Cref{delnotation} and \Cref{mjddefn}, where $\underline{x} = x_1,\ldots,x_{d+1}$. By \Cref{gcds}, there exist polynomials $g$ and $g'$ in $S[T_1,\ldots,T_{d+1}]$ such that $\operatorname{det} B_j= (-1)^{j+1}T_j g$ and $\operatorname{det} B_j'= (-1)^{j+1}T_j g'$. Here $B_j$ and $B_j'$ denote the submatrices of $B$ and $B'$, respectively, obtained by deleting the $j^{\text{th}}$ column, for $1\leq j\leq d+1$. We must show that $\mathscr{L}+(g) = \mathscr{L}+(g')$ to complete the initial step. There is nothing to be shown if both $g$ and $g'$ are zero, hence we may assume that $g\neq 0$, without loss of generality.
Deleting the first columns of $B$ and $B'$, by \Cref{WD} we have $(\ell_2,\ldots,\ell_{d+1},f) + \operatorname{det}(B_1) = (\ell_2,\ldots,\ell_{d+1},f) + \operatorname{det}(B_1')$. Thus from \Cref{gcds}, we have \begin{equation}\label{g1eqn1} (\ell_2,\ldots,\ell_{d+1},f) + (gT_1)= (\ell_2,\ldots,\ell_{d+1},f) + (g'T_1). \end{equation} With this, we see that $gT_1 \in (\ell_2,\ldots,\ell_{d+1},f) + (g'T_1)$. However, recall that $\deg f = (m,0)$ and $\deg gT_1 = (m-1,d)$ by \Cref{deggcdprop}. Hence it follows that $gT_1 \in (\ell_2,\ldots,\ell_{d+1}) + (g'T_1)$. If $g'\neq 0$, repeating this argument shows that $g'T_1 \in (\ell_2,\ldots,\ell_{d+1}) + (gT_1)$ as well. If $g'=0$, this inclusion clearly still holds. With this, (\ref{g1eqn1}) can be refined as \begin{equation}\label{g1eqn2} (\ell_2,\ldots,\ell_{d+1}) + (gT_1)= (\ell_2,\ldots,\ell_{d+1}) + (g'T_1). \end{equation} Hence we have \begin{equation}\label{g1eqn3} (\ell_1,\ldots,\ell_{d+1}) + (gT_1)= (\ell_1,\ldots,\ell_{d+1}) + (g'T_1). \end{equation}
Recall that $\mathcal{H} =(\ell_1,\ldots,\ell_{d+1})$, as in \Cref{notation2}, is a prime ideal by \Cref{PropertiesOfA}. Since $T_1 \notin \mathcal{H}$, it follows that $(\ell_1,\ldots,\ell_{d+1})+(g) = (\ell_1,\ldots,\ell_{d+1})+(g')$, hence $\mathscr{L}+(g) = \mathscr{L}+(g')$ as required.
We are finished if $m=1$, so assume that $m\geq 2$. For the inductive step, assume that both $\mathcal{L}_j$ and $\mathcal{L}_j+(\mathop{\rm gcd} I_{d+1}(\mathcal{B}_j))$ are well-defined for all $1\leq j\leq i-1 $ for some $i\leq m$. Prior to the $i^{\text{th}}$ step in \Cref{gcdit}, suppose that $B_{i-1}$ and $B_{i-1}'$ are two gcd-iteration matrices. From the induction hypothesis, we have $\mathcal{L}_{i-1}+(\mathop{\rm gcd} I_{d+1}(B_{i-1})) = \mathcal{L}_{i-1}+(\mathop{\rm gcd} I_{d+1}(B_{i-1}'))$, which shows that $\mathcal{L}_{i}$ is well-defined. In the $i^{\text{th}}$ iteration, suppose that $B_i$ and $B_i'$ are two candidates for $\mathcal{B}_i$. Setting $g_{i-1}=\mathop{\rm gcd} I_{d+1}(B_{i-1})$ and $g_{i-1}'=\mathop{\rm gcd} I_{d+1}(B_{i-1}')$, we may write $B_i = [B(\psi)\,\vert\,\partial g_{i-1}]$ and $B_i' = [B(\psi)\,\vert\,\partial g_{i-1}']$, where $\partial g_{i-1}$ and $\partial g_{i-1}'$ are two columns as in \Cref{delnotation}. Writing $g_i = \mathop{\rm gcd} I_{d+1}(B_i)$ and $g_i' = \mathop{\rm gcd} I_{d+1}(B_i')$, we must show that $\mathcal{L}_i +(g_i) = \mathcal{L}_i+(g_i')$. As before, there is nothing to be shown if both $g_i$ and $g_i'$ are zero, hence we may assume that $g_i \neq 0$, without loss of generality.
Notice that as $g_i \neq 0$, we have $g_{i-1}\neq 0$ as well. With this we claim that $(\ell_1,\ldots,\ell_{d+1}) + (g_{i-1}) = (\ell_1,\ldots,\ell_{d+1}) + (g_{i-1}')$. We had already seen this for $i=2$ in the proof of the initial case. If $i\geq 3$, notice that the equality $\mathcal{L}_{i-1}+(g_{i-1}) = \mathcal{L}_{i-1}+(g_{i-1}')$ from the induction hypothesis shows that \begin{equation}\label{giminus1eqn1}
g_{i-1} \in \mathcal{L}_{i-1}+(g_{i-1}') = (\ell_1,\ldots,\ell_{d+1}) +(f,g_1,\ldots,g_{i-2}) + (g_{i-1}'), \end{equation} where $g_1,\ldots,g_{i-2}$ are previous equations, following \Cref{gcdit}.
Since $\mathcal{L}_{i-1}$ is well-defined and $g_{i-1}$ is nonzero, it follows that $g_1,\ldots,g_{i-2}$ are nonzero as well. Thus $f,g_1,\ldots,g_{i-2}$ each have bidegree with first component at least $m-i+2$, following \Cref{deggcdprop}. Moreover, we also have $\deg g_{i-1} = (m-i+1, (i-1)(d-1))$ by \Cref{deggcdprop}. By degree considerations, it then follows that (\ref{giminus1eqn1}) can be refined as \begin{equation}\label{giminus1eqn2}
g_{i-1} \in (\ell_1,\ldots,\ell_{d+1}) + (g_{i-1}'). \end{equation} A similar argument shows that $g_{i-1}' \in (\ell_1,\ldots,\ell_{d+1}) + (g_{i-1})$ if $g_{i-1}' \neq 0$ and if $g_{i-1}' =0$, this clearly holds. Thus $(\ell_1,\ldots,\ell_{d+1}) + (g_{i-1}) = (\ell_1,\ldots,\ell_{d+1}) + (g_{i-1}')$ as claimed.
With the equality above, we may write $g_{i-1} = u\cdot g_{i-1}'+y$ for bihomogeneous elements $u\in S[T_1,\ldots,T_{d+1}]$ and $y\in (\ell_1,\ldots,\ell_{d+1})$. If $g_{i-1}' \neq 0$, by \Cref{deggcdprop} and degree considerations it follows that $u$ must be a unit. If $g_{i-1}' =0$, we may clearly assume that $u$ is a unit. With this, the column $\partial g_{i-1}$ can be rewritten as $\partial g_{i-1} = u\cdot \partial g_{i-1}' + \partial y$, where $\partial y = \partial g_{i-1} - u\cdot \partial g_{i-1}'$. Thus the equality $$(\ell_2,\ldots,\ell_{d+1}, g_{i-1}) = (\ell_2,\ldots,\ell_{d+1},u\cdot g_{i-1}'+y)$$ and \Cref{WD} show that
$$(\ell_2,\ldots,\ell_{d+1}, g_{i-1}) + \operatorname{det} (B_i)_1 = (\ell_2,\ldots,\ell_{d+1},u\cdot g_{i-1}'+y) + \operatorname{det} [B(\psi)\,|\,u\cdot \partial g_{i-1}' + \partial y]_1.$$
By \Cref{gcds} and multilinearity of determinants, we then have \begin{equation}\label{gieqn1}(\ell_2,\ldots,\ell_{d+1}, g_{i-1}) + (g_iT_1) = (\ell_2,\ldots,\ell_{d+1},u g_{i-1}'+y) + (u g_i'T_1 + y'T_1), \end{equation}
where $y' = \mathop{\rm gcd} I_{d+1}([B(\psi)\,|\,\partial y])$, following \Cref{gcds}. However, recall that $y\in (\ell_1,\ldots,\ell_{d+1})$, hence $y'T_1= \operatorname{det} [B(\psi)\,|\, \partial y]_1 \in (\ell_1,\ldots,\ell_{d+1})$, by \Cref{WD} and \Cref{detBpsizero}. From (\ref{gieqn1}) we then obtain \begin{equation}\label{gieqn2}(\ell_1,\ldots,\ell_{d+1}, g_{i-1}) + (g_iT_1) = (\ell_1,\ldots,\ell_{d+1},g_{i-1}') + (g_i'T_1), \end{equation} noting that $u$ is a unit.
With (\ref{gieqn2}) above, we have $g_i T_1 \in (\ell_1,\ldots,\ell_{d+1},g_{i-1}') + (g_i'T_1)$. If $g_{i-1}'\neq 0$, then it has bidegree $(m-i+1,(i-1)(d-1))$, hence $g_i T_1 \in (\ell_1,\ldots,\ell_{d+1}) + (g_i'T_1)$ as $\deg g_i = (m-i,i(d-1))$, using \Cref{deggcdprop}. If $g_{i-1}' = 0$, then $g_i'=0$, hence (\ref{gieqn2}) shows that $g_i T_1 \in (\ell_1,\ldots,\ell_{d+1}) + (g_i'T_1)$ in this case as well. A similar argument shows that $g_i' T_1 \in (\ell_1,\ldots,\ell_{d+1}) + (g_iT_1)$. Thus we obtain \begin{equation}\label{gieqn3}(\ell_1,\ldots,\ell_{d+1}) + (g_iT_1) = (\ell_1,\ldots,\ell_{d+1}) + (g_i'T_1). \end{equation} Again noting that $\mathcal{H} = (\ell_1,\ldots,\ell_{d+1})$ is a prime ideal and $T_1\notin \mathcal{H}$, it follows that $(\ell_1,\ldots,\ell_{d+1}) + (g_i) = (\ell_1,\ldots,\ell_{d+1}) + (g_i')$. Hence $\mathcal{L}_i+(g_i) = \mathcal{L}_i +(g_i')$, as $(\ell_1,\ldots,\ell_{d+1}) \subset \mathcal{L}_i$. \end{proof}
\begin{proposition}\label{gcdsinA} There is a containment of ideals, $\mathcal{L}_m+(\mathop{\rm gcd} I_{d+1}(\mathcal{B}_m)) \subseteq \mathcal{A}$. \end{proposition}
\begin{proof} We show that $\mathcal{L}_i+\mathop{\rm gcd} I_{d+1}(\mathcal{B}_i) \subseteq \mathcal{A}$ for $1\leq i\leq m$, inductively. For $i=1$, we clearly have $\mathcal{L}_1 = \mathscr{L} \subset \mathcal{A}$. By Cramer's rule, we have $I_{d+1} (\mathcal{B}_1) = I_{d+1}(B) \subseteq \mathscr{L}:(x_1,\ldots,x_{d+1})\subseteq \mathcal{A}$. Writing $g_1= \mathop{\rm gcd} I_{d+1}(B)$, by \Cref{gcds} we have $(g_1)(T_1,\ldots,T_{d+1}) =I_{d+1}(\mathcal{B}_1) \subseteq \mathcal{A}$. Modulo $\mathcal{A}$, the image of $(T_1,\ldots,T_{d+1})$ in $\mathcal{R}(I)$ is an ideal of positive grade, which is annihilated by the image of $g_1$. Thus $g_1 \in \mathcal{A}$ and so $\mathcal{L}_1+(\mathop{\rm gcd} I_{d+1}(\mathcal{B}_1)) \subseteq \mathcal{A}$, which completes the initial step.
If $m=1$, we are finished, so assume that $m\geq 2$ and $\mathcal{L}_j+ (\mathop{\rm gcd} I_{d+1}(\mathcal{B}_j)) \subseteq \mathcal{A}$ for all $1\leq j\leq i-1$ for some $i \leq m$. By \Cref{gcdit} and the induction hypothesis, we have $\mathcal{L}_i = \mathcal{L}_{i-1} + (\mathop{\rm gcd} I_{d+1}(\mathcal{B}_{i-1})) \subseteq \mathcal{A}$. Hence we must show that $\mathop{\rm gcd} I_{d+1}(\mathcal{B}_i)\in \mathcal{A}$. Writing $g_{i-1} = \mathop{\rm gcd} I_{d+1}(\mathcal{B}_{i-1})$, by Cramer's rule we see that $$I_{d+1} (\mathcal{B}_i) \subseteq (\ell_1,\ldots,\ell_{d+1},g_{i-1}):(x_1,\ldots,x_{d+1}) \subseteq \mathcal{A} : (x_1,\ldots,x_{d+1}) = \mathcal{A}$$ as $\mathcal{A} = \mathscr{L}:(x_1,\ldots,x_{d+1})^\infty$. Using \Cref{gcds} once more, we see that $(g_i)(T_1,\ldots,T_{d+1}) =I_{d+1}(\mathcal{B}_i) \subseteq \mathcal{A}$, where $g_i = \mathop{\rm gcd} I_{d+1}(\mathcal{B}_i)$. Thus $g_i\in \mathcal{A}$ as the image of $g_i$ in $\mathcal{R}(I)$ annihilates an ideal of positive grade, just as before. \end{proof}
With the containment $\mathcal{L}_m+(\mathop{\rm gcd} I_{d+1}(\mathcal{B}_m)) \subseteq \mathcal{A}$, we aim to find sufficient criteria to ensure this is an equality. Recall from \Cref{DandA} that $\widetilde{\mathcal{A}}= \frac{\widetilde{f}\,\widetilde{\mathcal{K}}^{(m)}}{\widetilde{x_{d+1}}^m}$. We now provide a similar description of the ideal obtained from \Cref{gcdit}. By \Cref{colons}, we have $(\widetilde{x_1,\ldots,x_{d+1}}) \widetilde{\mathcal{K}} \subseteq (\widetilde{x_{d+1}})$, hence $(\widetilde{x_1,\ldots,x_{d+1}})^i \widetilde{\mathcal{K}}^i \subseteq (\widetilde{x_{d+1}}^i)$. Thus we may consider the $\mathcal{R}(J)$-ideal $\frac{\widetilde{f} \,\widetilde{\mathcal{K}}^i}{\widetilde{x_{d+1}}^i}$ for any $1\leq i\leq m$.
\begin{theorem}\label{equal} With the assumptions of \Cref{setting1} and $\mathcal{K}$ as in \Cref{notation2}, one has $\frac{\widetilde{f} \,\widetilde{\mathcal{K}}^m}{\widetilde{x_{d+1}}^m} = ({ \mathcal{L}_{m}+(\mathop{\rm gcd} I_{d+1}(\mathcal{B}_m))})^{\sim}$ in $\mathcal{R}(J)$. \end{theorem}
\begin{proof} We proceed in a manner similar to the proof of \cite[4.7]{BM}. Letting $D_i = \frac{\widetilde{f} \,\widetilde{\mathcal{K}}^i}{\widetilde{x_{d+1}}^i}$ and $D_i' =( \mathcal{L}_{i}+(\mathop{\rm gcd} I_{d+1}(\mathcal{B}_i)))^\sim$, it is clear that $D_i \subseteq D_{i+1}$ and $D_i' \subseteq D_{i+1}'$ for any $1\leq i\leq m-1$. We show that $D_i = D_i'$ for all $1\leq i\leq m$ by induction.
Suppose that $i=1$. We first show that $D_1\subseteq D_1' =(\mathscr{L} + (\mathop{\rm gcd} I_{d+1}(B)))^\sim$. As noted in the proof of \Cref{PropertiesOfA}, $\mathcal{K}$ may be written as $\mathcal{K}=(\ell_1',\ldots,\ell_{d+1}') + (\mathop{\rm gcd} I_d(B'))+(x_{d+1})$, where $B'$ is the submatrix obtained by deleting the last row of $B(\psi)$ and $[\ell_1' \ldots \ell_{d+1}'] = [x_1 \ldots x_d] \cdot B'$. Thus modulo $\mathcal{H}$, we see $(\ell_1'\widetilde{,\ldots,}\ell_{d+1}') \subset (\widetilde{x_{d+1}})$ and so $\frac{\widetilde{f} \,(\ell_1'\widetilde{,\ldots,}\ell_{d+1}')}{\widetilde{x_{d+1}}}\subseteq (\widetilde{f}) = \widetilde{\mathscr{L}}$. Let $g' = \mathop{\rm gcd} I_d(B')$ and recall that $B'$ is the Jacobian dual of $\psi'$, an alternating matrix as in \Cref{J'ideal}, hence the minors of $B'$ factor in a similar manner as the minors of $B$, following \Cref{JDminors} and \Cref{gcds}. Let $B_1'$ and $B_1$ denote the submatrices of $B'$ and $B$, respectively, which are obtained by deleting their first columns. By Cramer's rule, in $\mathcal{R}(J)$ we have $\widetilde{\operatorname{det} B_1} \cdot \widetilde{x_{d+1}} =\widetilde{f}\cdot \widetilde{\operatorname{det} B_1'}$. By \Cref{JDminors} we have $\operatorname{det} B_1' = g'\cdot T_1$ and by \Cref{gcds} we have $\operatorname{det} B_1 = g_1\cdot T_1$, where $g_1 = \mathop{\rm gcd} I_{d+1}(B)$. Since $\mathcal{R}(J)$ is a domain, it follows that $\widetilde{g_1} \cdot \widetilde{x_{d+1}} =\widetilde{f}\cdot \widetilde{g'}$. Thus $\frac{\widetilde{f} \widetilde{g'}}{\widetilde{x_{d+1}}}= \widetilde{g_1}$ and so $D_1\subseteq D_1'$.
To show the reverse containment $D_1'\subseteq D_1$, recall $x_{d+1} \in \mathcal{K}$ and so $\widetilde{f} = \frac{\widetilde{f}\widetilde{x_{d+1}}}{\widetilde{x_{d+1}}} \in D_1$. Thus $\widetilde{\mathcal{L}_1} =\widetilde{\mathscr{L}} \subset D_1$ and so it suffices to show that $\widetilde{g_1}\in D_1$, where $g_1= \mathop{\rm gcd} I_{d+1}(B)$ as before. However, from the previous argument above we have $\widetilde{g_1} = \frac{\widetilde{f} \widetilde{g'}}{\widetilde{x_{d+1}}} \in D_1$, and so $D_1'\subseteq D_1$. Thus $D_1' = D_1$, which completes the initial step.
We are finished if $m=1$, so we may assume that $m\geq 2$ and $D_j=D_j'$ for all $1\leq j \leq i-1$ for some $i\leq m$. We begin by showing that $D_i \subseteq D_i'$. Consider $\frac{\widetilde{f} \widetilde{w_1}\cdots \widetilde{w_i}}{\widetilde{x_{d+1}}^i} \in D_i$, for $w_1,\ldots, w_i \in \mathcal{K}$. Writing $\widetilde{w'} = \frac{\widetilde{f} \widetilde{w_1}\cdots \widetilde{w_{i-1}}}{\widetilde{x_{d+1}}^{i-1}}$, notice that $\widetilde{w'} \in D_{i-1} = D_{i-1}' =( \mathcal{L}_{i-1}+(\mathop{\rm gcd} I_{d+1}(\mathcal{B}_{i-1})))^\sim$ by the induction hypothesis. With this we show that $\frac{\widetilde{f} \widetilde{w_1}\cdots \widetilde{w_i}}{\widetilde{x_{d+1}}^i } = \frac{\widetilde{w'}\widetilde{w_i}}{\widetilde{x_{d+1}}}$ is contained in $D_i'$. If $w' \in \mathcal{L}_{i-1}$, then $\widetilde{w'} \in D'_{i-2}= D_{i-2}$ if $i>2$, and $\widetilde{w'} \in (\widetilde{f})$ if $i=2$. In either case, we have $\frac{\widetilde{w'}\widetilde{w_i}}{\widetilde{x_{d+1}}} \in D_{i-1} = D_{i-1}' \subseteq D_i'$, by the induction hypothesis, and we are finished. Hence we may assume that $w' \in (g_{i-1})$, where $g_{i-1} = \mathop{\rm gcd} I_{d+1}(\mathcal{B}_{i-1})$, and it is enough to take $w' = g_{i-1}$.
As noted in the proof of \Cref{Kscm}, recall that $\widetilde{\mathcal{K}} = (\widetilde{g'},\widetilde{x_{d+1}})$, where $g'=\mathop{\rm gcd} I_d(B')$. As $\widetilde{w_i}\in \widetilde{\mathcal{K}}$, there are two cases to consider. If $\widetilde{w_i}\in (\widetilde{x_{d+1}})$, then $\frac{\widetilde{w'}\widetilde{w_i}}{\widetilde{x_{d+1}}} \in D_{i-1}' \subseteq D_i'$ and we are finished. Thus we may assume that $w_i \in (g')$ and it is enough to take $w_i =g'$. Let $(\mathcal{B}_i)_1$ denote the submatrix obtained by deleting the first column of $\mathcal{B}_i$. By Cramer's rule, in $\mathcal{R}(J)$ we have $\widetilde{x_{d+1}}\cdot \widetilde{\operatorname{det} (\mathcal{B}_{i})_1} =\widetilde{g_{i-1}}\cdot \widetilde{\operatorname{det} (B')_1}$, where $(B')_1$ is the submatrix of $B'$ obtained by deleting its first column as before. Once again, we have $\operatorname{det} B_1' = g'\cdot T_1$ by \Cref{JDminors} and $\operatorname{det} (\mathcal{B}_i)_1 = g_i\cdot T_1$ by \Cref{gcds}, where $g_i = \mathop{\rm gcd} I_{d+1} (\mathcal{B}_i)$. Again noting that $\mathcal{R}(J)$ is a domain, it then follows that $\widetilde{x_{d+1}} \cdot \widetilde{g_i} =\widetilde{g_{i-1}}\cdot \widetilde{g'}$. Thus $\frac{\widetilde{w'}\widetilde{w_i}}{\widetilde{x_{d+1}}} = \frac{\widetilde{g_{i-1}}\cdot \widetilde{g'}}{\widetilde{x_{d+1}}} = \widetilde{g_i} \in D_i'$, which shows that $D_i \subseteq D_i'$.
To show that $D_i' \subseteq D_i$, note that $\widetilde{\mathcal{L}_i} = (\mathcal{L}_{i-1} + (\mathop{\rm gcd} I_{d+1}(\mathcal{B}_{i-1})))^\sim$ following \Cref{gcdit}. Thus $\widetilde{\mathcal{L}_i} = D_{i-1}' = D_{i-1}\subset D_i$, by the induction hypothesis. Moreover, the previous argument shows that $\widetilde{g_i} = \frac{\widetilde{g_{i-1}}\cdot \widetilde{g'}}{\widetilde{x_{d+1}}}$, where $g_i = \mathop{\rm gcd} I_{d+1}(\mathcal{B}_i)$, $g_{i-1} = \mathop{\rm gcd} I_{d+1}(\mathcal{B}_{i-1})$, and $g' = \mathop{\rm gcd} I_d(B')$. Notice that $g_{i-1} \in D_{i-1}' = D_{i-1}$ by the induction hypothesis, and $g' \in \mathcal{K}$. Thus $\widetilde{g_i} = \frac{\widetilde{g_{i-1}}\cdot \widetilde{g'}}{\widetilde{x_{d+1}}} \in D_i$ which shows that $D_i' \subseteq D_i$. Hence $D_i'= D_i$, which completes the induction. \end{proof}
We end this section by giving a necessary and sufficient condition for when the ideal of gcd-iterations coincides with $\mathcal{A}$.
\begin{corollary}\label{powersym} With the assumptions of \Cref{setting1}, $\mathcal{A} = \mathcal{L}_m + (\mathop{\rm gcd} I_{d+1}(\mathcal{B}_m))$ in $S[T_1,\ldots,T_{d+1}]$ if and only if $\widetilde{\mathcal{K}}^m = \widetilde{\mathcal{K}}^{(m)}$ in $\mathcal{R}(J)$. \end{corollary}
\begin{proof} Combining \Cref{equal}, \Cref{gcdsinA}, and \Cref{DandA}, we have $$\frac{\widetilde{f} \,\widetilde{\mathcal{K}}^m}{\widetilde{x_{d+1}}^m} = (\mathcal{L}_m + (\mathop{\rm gcd} I_{d+1}(\mathcal{B}_m)))^\sim \subseteq \widetilde{\mathcal{A}} = \frac{\widetilde{f} \,\widetilde{\mathcal{K}}^{(m)}}{\widetilde{x_{d+1}}^m} $$ and the claim follows, noting that $\mathcal{R}(J)$ is a domain. \end{proof}
\section{The Main Result}\label{defidealsec}
In \Cref{powersym}, a condition was given for when the algorithm of gcd-iterations yields a generating set of $\mathcal{A}$, the ideal defining $\mathcal{R}(I)$ as a quotient of $S[T_1,\ldots,T_{d+1}]$. In this section, we show that this condition is satisfied and moreover, this iterative method produces a \textit{minimal} generating set of $\mathcal{A}$. Additional properties of $\mathcal{R}(I)$ such as Cohen-Macaulayness and Castelnuovo-Mumford regularity are studied as well. We proceed in the same manner as section 5 of \cite{BM} throughout.
\begin{proposition}\label{hgtIdB} In $S[T_1,\ldots, T_{d+1}]$, one has $\mathop{\rm ht} I_{d}(B(\psi)) \geq 2$. \end{proposition}
\begin{proof} Recall that $J$ is of linear type by \Cref{Jlineartype}, hence $\mathop{\rm Sym}(J)\cong \mathcal{R}(J)$. In particular, $\mathop{\rm Sym}(J)$ is a domain of dimension $d+2$, following \Cref{PropertiesOfA}. As $\psi$ consists of linear entries in $S$ and $B(\psi)$ consists of linear entries in $k[T_1,\ldots,T_{d+1}]$, there is an isomorphism of symmetric algebras $\mathop{\rm Sym}(J) \cong \mathop{\rm Sym}_{k[\underline{T}]}(E)$, where $E=\mathop{\rm coker} B(\psi)$. Since $\mathop{\rm Sym}(J)$ is a domain, by \cite[6.8]{HSV2} we have $$d+2 = \mathop{\rm dim} \mathop{\rm Sym}(J) = \mathop{\rm dim} {\rm Sym}_{k[\underline{T}]}(E) = \mathop{\rm rank} E + \mathop{\rm dim} k[T_1,\ldots,T_{d+1}].$$ Thus $\mathop{\rm rank} E =1$, hence by \cite[6.8]{HSV2} we have $\mathop{\rm ht} I_d(B(\psi)) \geq 2$. \end{proof}
We now present the main result of this paper.
\begin{theorem}\label{mainresult} With the assumptions of \Cref{setting1}, we have $$\mathcal{A} = \mathcal{L}_m + (\mathop{\rm gcd} I_{d+1}(\mathcal{B}_m)).$$ Moreover, the defining ideal $\mathcal{J}$ of $\mathcal{R}(I)$ satisfies $$\mathcal{J} =\overline{\mathcal{L}_m + (\mathop{\rm gcd} I_{d+1}(\mathcal{B}_m))}$$ where $\overline{\,\cdot\,}$ denotes images modulo $(f)$. \end{theorem}
\begin{proof} We proceed as in the proof of \cite[5.3]{BM}. By \Cref{powersym}, it suffices to show that $\widetilde{\mathcal{K}}^m = \widetilde{\mathcal{K}}^{(m)}$ in order to conclude that $\mathcal{A} = \mathcal{L}_m + (\mathop{\rm gcd} I_{d+1}(\mathcal{B}_m))$, from which it follows that $\mathcal{J}=\overline{\mathcal{L}_m + (\mathop{\rm gcd} I_{d+1}(\mathcal{B}_m))}$. Recall that $\widetilde{\mathcal{K}}$ is a strongly Cohen-Macaulay $\mathcal{R}(J)$-ideal of height one and is generically a complete intersection, by \Cref{PropertiesOfA} and \Cref{Kscm}. Thus by \cite[3.4]{SV}, it is enough to show that $$\mu(\widetilde{\mathcal{K}}_\mathfrak{p}) \leq \mathop{\rm ht} \mathfrak{p} -1 =1$$ for any prime $\mathcal{R}(J)$-ideal $\mathfrak{p}$ containing $\widetilde{\mathcal{K}}$ with $\mathop{\rm ht} \mathfrak{p} =2$, in order to conclude that $\widetilde{\mathcal{K}}^m = \widetilde{\mathcal{K}}^{(m)}$. Let $\mathfrak{p}$ be such a prime ideal in $\mathcal{R}(J)$ and we consider two cases.
Recall that $(\widetilde{x_1,\ldots,x_{d+1}})$ is a prime $\mathcal{R}(J)$-ideal with height one by \Cref{PropertiesOfA}. If $\mathfrak{p} \nsupseteq (\widetilde{x_1,\ldots,x_{d+1}})$, repeating the argument within the proof of \Cref{Kscm} shows that $\widetilde{\mathcal{K}}_\mathfrak{p} = (\widetilde{x_{d+1}})_\mathfrak{p}$. Thus the claim is satisfied in this case.
Now assume that $\mathfrak{p}\supseteq (\widetilde{x_1,\ldots,x_{d+1}})$. Recall that $\mathop{\rm ht} I_d(B(\psi)) \geq 2$ by \Cref{hgtIdB}, and $B(\psi)$ has entries in $k[T_1,\ldots,T_{d+1}]$. Thus the ideal $(x_1,\ldots,x_{d+1})+I_d(B(\psi))$ has height at least $d+3$ in $S[T_1,\ldots,T_{d+1}]$, hence the image of this ideal in $\mathcal{R}(J)$ has height at least $3$. With this, it follows that $\mathfrak{p} \nsupseteq \widetilde{I_d(B(\psi))}$ as $\mathop{\rm ht} \mathfrak{p} =2$. Thus there is some $d\times d$ minor $w$ of $B(\psi)$ with $\widetilde{w}\notin \mathfrak{p}$.
As $B(\psi)$ is a $(d+1) \times (d+1)$ matrix, this minor $w$ is obtained by deleting row $i$ and column $j$ of $B(\psi)$, for some $1\leq i,j\leq d+1$. Deleting column $j$ of $B(\psi)$ and applying \Cref{crlemma}, we have
\begin{equation}\label{wequation}
\widetilde{x_{d+1}} \cdot \widetilde{w} = (-1)^{i-d-1}\widetilde{x_i}\cdot \widetilde{\operatorname{det} (B')_j},
\end{equation}
where $(B')_j$ is the submatrix of $B'$ obtained by deleting column $j$. Recall that $B' = B(\psi')$, where $\psi'$ is as in \Cref{J'ideal}. Hence by \Cref{JDminors} we have $\operatorname{det} (B')_j = (-1)^{j+1}T_j \cdot g'$, where $g' =\mathop{\rm gcd} I_d(B')$. Thus (\ref{wequation}) becomes \begin{equation}\label{wequation2} \widetilde{x_{d+1}} \cdot \widetilde{w} = (-1)^{i-d+j}\widetilde{x_i}\cdot\widetilde{T_j} \cdot \widetilde{g'}. \end{equation} Localizing at $\mathfrak{p}$, $\widetilde{w}$ becomes a unit and (\ref{wequation2}) shows that $(\widetilde{x_{d+1}})_\mathfrak{p} \in (\widetilde{g'})_\mathfrak{p}$. Thus $\widetilde{\mathcal{K}}_\mathfrak{p} = (\widetilde{g'},\widetilde{x_{d+1}})_\mathfrak{p} = (\widetilde{g'})_\mathfrak{p}$, and again the claim is satisfied. \end{proof}
\begin{corollary}\label{gcdsnotzero} For all $1\leq i\leq m$, we have $\mathop{\rm gcd} I_{d+1}(\mathcal{B}_i) \neq 0$. Additionally, $\mathcal{F}(I) \cong k[T_1,\ldots,T_{d+1}]/(\mathfrak{f})$ where $\deg \mathfrak{f} = m(d-1)$. \end{corollary}
\begin{proof} Recall from \Cref{gcdit}, if $\mathop{\rm gcd} I_{d+1}(\mathcal{B}_i) = 0$ for any $1\leq i\leq m$, then $\mathop{\rm gcd} I_{d+1}(\mathcal{B}_j) =0$ for all $i\leq j\leq m$. Thus it suffices to show that $\mathop{\rm gcd} I_{d+1}(\mathcal{B}_m) \neq 0$ to verify the first statement. By \Cref{mainresult}, we have $\mathcal{A}=\mathcal{L}_m +(\mathop{\rm gcd} I_{d+1}(\mathcal{B}_m))$ and we note that $\mathcal{L}_m \subset (x_1,\ldots,x_{d+1})$, which follows from \Cref{gcdit} and \Cref{deggcdprop}. Hence $(x_1,\ldots,x_{d+1}) +\mathcal{A} = (x_1,\ldots,x_{d+1}) + (\mathop{\rm gcd} I_{d+1}(\mathcal{B}_m))$ and so $\mathop{\rm gcd} I_{d+1}(\mathcal{B}_m) \notin (x_1,\ldots,x_{d+1})$ since $\mathcal{A} \nsubseteq (x_1,\ldots,x_{d+1})$, as noted in the proof of \Cref{DandA}. In particular, $\mathop{\rm gcd} I_{d+1}(\mathcal{B}_m)$ is nonzero, as claimed.
Again, noting that $(x_1,\ldots,x_{d+1}) +\mathcal{A} = (x_1,\ldots,x_{d+1}) + (\mathop{\rm gcd} I_{d+1}(\mathcal{B}_m))$ and $\mathop{\rm gcd} I_{d+1}(\mathcal{B}_m)\neq 0$, it follows from \Cref{deggcdprop} that $\mathop{\rm gcd} I_{d+1}(\mathcal{B}_m)$ is of bidegree $(0,m(d-1))$. Hence $\mathop{\rm gcd} I_{d+1}(\mathcal{B}_m)$ is the only equation of $\mathcal{A}$ contained in $k[T_1,\ldots,T_{d+1}]$, i.e. it is the only fiber equation. Thus modulo $(x_1,\ldots,x_{d+1}) +\mathcal{A}$, we see that $\mathcal{F}(I)$ is indeed a hypersurface ring defined by an equation of degree $m(d-1)$. \end{proof}
By \Cref{mainresult}, the method of gcd-iterations gives a generating set of $\mathcal{A}$. We now claim this is a minimal generating set. Recall that the \textit{relation type} of $I$ is the maximum degree, with respect to $T_1,\ldots,T_{d+1}$, of a minimal generator of the defining ideal $\mathcal{J}$ of $\mathcal{R}(I)$. It is denoted by ${\rm rt}(I)$.
\begin{proposition}\label{mingens} In the setting of \Cref{mainresult}, the generating set of $\mathcal{A}= \mathcal{L}_m + (\mathop{\rm gcd} I_{d+1}(\mathcal{B}_m))$ obtained from \Cref{gcdit} is minimal. In particular, $\mu(\mathcal{A}) = d+m+2$ and $\mu(\mathcal{J}) = d+m+1$. Moreover, the relation type of $I$ is ${\rm rt}(I) =m(d-1)$. \end{proposition}
\begin{proof} The generating set of $\mathcal{A}=\mathcal{L}_m + (\mathop{\rm gcd} I_{d+1}(\mathcal{B}_m))$ obtained from the method of gcd-iterations is $\{\ell_1,\ldots,\ell_{d+1},f,g_1,\ldots,g_m\}$ where $g_i = \mathop{\rm gcd} I_{d+1}(\mathcal{B}_i)$, for $\mathcal{B}_i$ a matrix as in \Cref{gcdit}. Recall that $\deg \ell_i = (1,1)$ for $1\leq i\leq d+1$, as $\psi$ consists of linear entries in $S$. Additionally, recall that $\deg f = (m,0)$. Moreover, by \Cref{gcdsnotzero} and \Cref{deggcdprop}, we have $\deg g_i = (m-i,i(d-1))$ for $1\leq i\leq m$. With this, we show that the generating set above is minimal by showing that none of these generators can be expressed in terms of the others.
First, recall that the ideal $\mathcal{H} =(\ell_1,\ldots,\ell_{d+1})$, as in \Cref{notation2}, is the ideal defining $\mathop{\rm Sym}(J)$. As $[\ell_1 \ldots \ell_{d+1}]= [T_1 \ldots T_{d+1}] \cdot \psi$ and $\psi$ minimally presents $J$, it follows that $\ell_1,\ldots,\ell_{d+1}$ minimally generate $\mathcal{H}$. To see that these are minimal generators of $\mathcal{A}$ as well, suppose not for a contradiction and, without loss of generality, assume $\ell_1$ is a non-minimal generator of $\mathcal{A}$. Thus $\ell_1$ can be written as a combination of the remaining generators $\ell_2,\ldots,\ell_{d+1},f,g_1,\ldots,g_m$.
If $m\geq 2$, by degree considerations in both components of the bigrading, it then follows that $\ell_1$ can be written in terms of $\ell_2,\ldots,\ell_{d+1}$, which is a contradiction as $\ell_1,\ldots,\ell_{d+1}$ minimally generate $\mathcal{H}$. In the case $m=1$, by degree considerations once more, $\ell_1$ can then be expressed in terms of $\ell_2,\ldots,\ell_{d+1},f$. If $\ell_1 \in ( \ell_2,\ldots,\ell_{d+1})$, we achieve the same contradiction, hence it follows that there is some element $b$ of bidegree $(0,1)$ such that $b\cdot f \in \mathcal{H} =(\ell_1,\ldots,\ell_{d+1})$. However, recall from \Cref{PropertiesOfA} that $\mathcal{H}$ is a prime ideal, hence either $b\in \mathcal{H}$ or $f\in \mathcal{H}$, both of which are impossible by degree considerations.
A similar argument shows that $f$ and $g_m$ are minimal generators, as they have bidegrees $(m,0)$ and $(0,m(d-1))$ respectively. Now suppose that $g_i$ is a non-minimal generator of $\mathcal{A}$ for some $i\leq m-1$, in which case there are higher-order iterations. Writing $g_i$ as a combination of the remaining generators, by degree considerations in both components of the bigrading, it then follows that $g_i\in (\ell_1,\ldots,\ell_{d+1})$. Thus the column $\partial g_i$ in \Cref{gcdit} can be taken as a combination of the columns of $B(\psi)$ by \Cref{gcdwd}. Hence $I_{d+1}(\mathcal{B}_{i+1}) = I_{d+1}(B(\psi)) = 0$ by \Cref{detBpsizero}, and so $g_{i+1} = 0$ which is a contradiction by \Cref{gcdsnotzero}.
The claim regarding $\mathcal{J}$ then follows as $f$ is a minimal generator of $\mathcal{A}$. Lastly, the relation type of $I$ is seen to be ${\rm rt}(I)=m(d-1)$, as $\deg g_m = (0,m(d-1))$. \end{proof}
We now provide an example that illustrates the process of gcd-iterations, in order to provide a minimal generating set of $\mathcal{A}$. We remark that it is quite simple to perform this algorithm in a computer algebra system, such as \textit{Macaulay2} \cite{M2}.
\begin{example} For $k$ an infinite field, let $S= k[x_1,x_2,x_3,x_4,x_5]$, $f=x_5^3$, and $R=S/(f)$. Consider the matrix $\varphi$, with entries in $R$, and its counterpart $\psi$ with entries in $S$: \[\varphi =\begin{bmatrix} 0&\overline{x_1} & \overline{x_2} & 0 & \overline{x_4} \\[0.5ex] -\overline{x_1}&0&\overline{x_4}& 0& \overline{x_3} \\[0.5ex] -\overline{x_2}&-\overline{x_4}&0&\overline{x_1}& \overline{x_5}\\[0.5ex] 0&0&-\overline{x_1}&0&\overline{x_2}\\[0.5ex] -\overline{x_4}&-\overline{x_3}&-\overline{x_5}&-\overline{x_2}&0 \end{bmatrix} \hspace{10mm} \psi =\begin{bmatrix} 0&x_1 & x_2 & 0 & x_4 \\[0.5ex] -x_1&0&x_4& 0& x_3 \\[0.5ex] -x_2&-x_4&0&x_1& x_5\\[0.5ex] 0&0&-x_1&0&x_2\\[0.5ex] -x_4&-x_3&-x_5&-x_2&0 \end{bmatrix} \] where $\overline{\,\cdot\,}$ denotes images modulo $(f)$. A simple computation shows that $\mathop{\rm ht} \mathop{\rm Pf}_4(\varphi) \geq 3$, hence $I=\mathop{\rm Pf}_4(\varphi)$ is a perfect Gorenstein $R$-ideal of grade $3$ by \cite[2.1]{BE}. Moreover, $I$ satisfies $G_4$ since $\mathop{\rm ht} I_2(\varphi) = 4$, $\mathop{\rm ht} I_3(\varphi) =3$, and $\mathop{\rm ht} I_4(\varphi) = 3\geq 2$, which can be checked easily. Thus the assumptions of \Cref{mainresult} are met.
The Jacobian dual of $\psi$ which consists of entries in $k[T_1,T_2,T_3,T_4,T_5]$ is \[ B(\psi) = \begin{bmatrix} -T_2 & T_1 & -T_4 & T_3 & 0 \\[0.5ex] -T_3 & 0 & T_1 & -T_5 & T_4\\[0.5ex] 0 & -T_5 & 0 & 0 & T_2\\[0.5ex] -T_5 & -T_3 & T_2 & 0 & T_1\\[0.5ex] 0 & 0 & - T_5 & 0 & T_3 \end{bmatrix} \] hence we may construct the modified Jacobian dual and perform the method of gcd-iterations. For the purposes of notation, let $W=T_1T_3T_5 -T_2T_3^2 -T_2^2T_5 - T_4T_5^2$, which happens to be $W = \mathop{\rm gcd} I_4(B')$, following \Cref{notation2}. Following \Cref{gcdit}, we obtain \[ \begin{array}{lll}
\mathcal{B}_1 = \begin{bmatrix} -T_2 & T_1 & -T_4 & T_3 & 0 &0 \\[0.5ex] -T_3 & 0 & T_1 & -T_5 & T_4 &0\\[0.5ex] 0 & -T_5 & 0 & 0 & T_2 & 0\\[0.5ex] -T_5 & -T_3 & T_2 & 0 & T_1 &0\\[0.5ex] 0 & 0 & - T_5 & 0 & T_3 &x_5^2 \end{bmatrix} & \quad & g_1 = \mathop{\rm gcd} I_5(\mathcal{B}_1)= x_5^2 W\\ \, \\ \mathcal{B}_2 = \begin{bmatrix} -T_2 & T_1 & -T_4 & T_3 & 0 &0 \\[0.5ex] -T_3 & 0 & T_1 & -T_5 & T_4 &0\\[0.5ex] 0 & -T_5 & 0 & 0 & T_2 & 0\\[0.5ex] -T_5 & -T_3 & T_2 & 0 & T_1 &0\\[0.5ex] 0 & 0 & - T_5 & 0 & T_3 &x_5 W \end{bmatrix} & \quad & g_2 = \mathop{\rm gcd} I_5(\mathcal{B}_2)= x_5 W^2\\ \, \\ \mathcal{B}_3 = \begin{bmatrix} -T_2 & T_1 & -T_4 & T_3 & 0 &0 \\[0.5ex] -T_3 & 0 & T_1 & -T_5 & T_4 &0\\[0.5ex] 0 & -T_5 & 0 & 0 & T_2 & 0\\[0.5ex] -T_5 & -T_3 & T_2 & 0 & T_1 &0\\[0.5ex] 0 & 0 & - T_5 & 0 & T_3 & W^2 \end{bmatrix} & \quad & g_3 = \mathop{\rm gcd} I_5(\mathcal{B}_2)= W^3, \end{array} \] where the greatest common divisors of the minors occur as in \Cref{gcds}.
By \Cref{mainresult}, we have $\mathcal{A}= \mathscr{L} + (g_1,g_2,g_3)$ and the defining ideal of $\mathcal{R}(I)$ is $\mathcal{J} = \overline{\mathscr{L} + (g_1,g_2,g_3)}$. Notice that $\mathcal{A}$ and $\mathcal{J}$ are not prime ideals, which is to be expected as $R$, and hence $\mathcal{R}(I)$, is not a domain. \end{example}
\begin{remark}\label{f linear case}
We note that \Cref{mainresult} recovers \Cref{Moreyresult} when $m=1$. After a change of coordinates, it can be assumed that the factored equation $f$ is one of the indeterminates, say $f=x_{d+1}$. Thus $R\cong k[x_1,\ldots,x_d] =S'$ and so $I$ and $J'$ are the same ideal with $\varphi = \psi'$, following the notation in \Cref{J'ideal}. Recall that the submatrix $B'$ of $B(\psi)$, as in \Cref{notation2}, is the Jacobian dual $B'=B(\psi')$, with respect to $x_1,\ldots,x_d$. Moreover, the modified Jacobian dual $B=[B(\psi)\,|\,\partial f]$ is unique in this case and the column $\partial f$ consists of all zeros except for a 1 in the last entry. Thus the greatest common divisor of the minors of $B$, the first and only gcd-iteration, is exactly the greatest common divisor of the minors of $B' = B(\psi')$. \end{remark}
\subsection{Depth and Cohen-Macaulayness}\label{depthsec}
In the setting of \Cref{mainresult}, we now study the Cohen-Macaulay property of the Rees ring $\mathcal{R}(I)$, using the isomorphism $\mathcal{R}(I) \cong S[T_1,\ldots,T_{d+1}]/\mathcal{A}$. We continue to follow section 5 of \cite{BM} and begin by creating a handful of short exact sequences which will be useful not only to study the depth of $\mathcal{R}(I)$, but also its regularity with respect to various gradings.
Let $\mathfrak{m} =(x_1,\ldots,x_{d+1})$ and recall that $\widetilde{\mathcal{K}} = (\widetilde{g'},\widetilde{x_{d+1}})$, where $g'= \mathop{\rm gcd} I_d(B')$. Additionally, recall that $\widetilde{\mathcal{K}}$ is a strongly Cohen-Macaulay ideal by \Cref{Kscm} and $\mathfrak{m} \mathcal{R}(J) = (\widetilde{x_{d+1}}):\widetilde{\mathcal{K}}$ by \Cref{colons}. Thus there is a short exact sequence of bigraded $\mathcal{R}(J)$-modules \[ 0\longrightarrow \mathfrak{m} \mathcal{R}(J) (0,-(d-1)) \longrightarrow \mathcal{R}(J)(-1,0)\oplus \mathcal{R}(J)(0,-(d-1))\longrightarrow \widetilde{\mathcal{K}} \longrightarrow 0. \]
Consider the induced sequence obtained by applying the functor $\mathop{\rm Sym}(-)$ to the sequence above. Taking the $m^{\text{th}}$ graded strand, we obtain $$\hspace{10mm}\mathfrak{m} \mathcal{R}(J) (0,-(d-1)) \otimes \text{Sym}_{m-1}\big(\mathcal{R}(J)(-1,0)\oplus \mathcal{R}(J)(0,-(d-1))\big)\overset{\sigma}{\longrightarrow}\hspace{15mm}$$ $$\hspace{20mm}\text{Sym}_{m}\big(\mathcal{R}(J)(-1,0)\oplus \mathcal{R}(J)(0,-(d-1))\big) \longrightarrow \text{Sym}_m(\widetilde{\mathcal{K}}) \longrightarrow 0.$$ Notice that $\ker \sigma$ is torsion due to rank considerations. However, it is a submodule of a torsion-free $\mathcal{R}(J)$-module, hence it must vanish. Thus $\sigma$ is injective and we have the short exact sequence $$0\longrightarrow \mathfrak{m} \mathcal{R}(J) (0,-(d-1)) \otimes \text{Sym}_{m-1}\big(\mathcal{R}(J)(-1,0)\oplus \mathcal{R}(J)(0,-(d-1))\big)\overset{\sigma}{\longrightarrow}\hspace{15mm}$$ $$\hspace{20mm}\text{Sym}_{m}\big(\mathcal{R}(J)(-1,0)\oplus \mathcal{R}(J)(0,-(d-1))\big) \longrightarrow \text{Sym}_m(\widetilde{\mathcal{K}}) \longrightarrow 0.$$ As $\widetilde{\mathcal{K}} = (\widetilde{g'},\widetilde{x_{d+1}})$, the proof of \Cref{Kscm} shows that $\widetilde{\mathcal{K}}$ satisfies $G_\infty$. As $\widetilde{\mathcal{K}}$ is strongly Cohen-Macaulay, by \cite[2.6]{HSV1} it is an ideal of linear type, hence $\mathop{\rm Sym}_m(\widetilde{\mathcal{K}}) \cong \widetilde{\mathcal{K}}^m$. Thus the short exact sequence above can be read as \begin{equation}\label{directsumseq} 0\rightarrow \displaystyle{\bigoplus_{i=0}^{m-1}}\,\mathfrak{m} \mathcal{R}(J)\big(-i,-(m-i)(d-1)\big)\rightarrow \displaystyle{\bigoplus_{i=0}^{m}}\, \mathcal{R}(J)\big(-i,-(m-i)(d-1)\big) \rightarrow \widetilde{\mathcal{K}}^m\rightarrow 0. \end{equation}
We are now ready to compute the depths of $\mathcal{R}(I)$, $\mathcal{F}(I)$, and $\mathcal{G}(I)$. Recall that a Noetherian local ring $A$ is said to be \textit{almost} Cohen-Macaulay if $\mathop{\rm depth} A \geq \mathop{\rm dim} A-1$.
\begin{theorem}\label{depth} In the setting of \Cref{mainresult}, \begin{enumerate}[(a)] \setlength\itemsep{1em}
\item The Rees algebra $\mathcal{R}(I)$ is almost Cohen-Macaulay. Moreover, $\mathcal{R}(I)$ is Cohen-Macaulay if and only if $m=1$.
\item The associated graded ring $\mathcal{G}(I)$ is almost Cohen-Macaulay. Moreover, $\mathcal{G}(I)$ is Cohen-Macaulay if and only if $m=1$.
\item The special fiber ring $\mathcal{F}(I)$ is Cohen-Macaulay. \end{enumerate} \end{theorem}
\begin{proof} Consider the short exact sequence \begin{equation}\label{regseq1} 0 \longrightarrow\mathfrak{m} \mathcal{R}(J) \longrightarrow \mathcal{R}(J) \longrightarrow \mathcal{F}(J)\longrightarrow 0 \end{equation} and recall that $\mathcal{R}(J)$ is a Cohen-Macaulay domain of dimension $d+2$ by \Cref{PropertiesOfA}. Moreover, recall that $J$ is of linear type by \Cref{Jlineartype}, hence $\mathcal{F}(J) \cong k[T_1,\ldots,T_{d+1}]$. Comparing the depths of the $\mathcal{R}(J)$-modules in (\ref{regseq1}), it follows that $\mathop{\rm depth} \mathfrak{m} \mathcal{R}(J) \geq d+2$ and so $\mathop{\rm depth} \mathfrak{m} \mathcal{R}(J) =d+2$, as this is the maximum possible depth. This together with (\ref{directsumseq}) shows that $\mathop{\rm depth} \widetilde{\mathcal{K}}^m \geq d+1$.
We also have the short exact sequence \begin{equation}\label{regseq2} 0 \longrightarrow \widetilde{\mathcal{A}} \longrightarrow \mathcal{R}(J) \longrightarrow \mathcal{R}(I) \longrightarrow 0 \end{equation} and we note that $\widetilde{\mathcal{A}} = \frac{\widetilde{f}\widetilde{\mathcal{K}}^{(m)}}{\widetilde{x_{d+1}}^n} =\frac{\widetilde{f}\widetilde{\mathcal{K}}^{m}}{\widetilde{x_{d+1}}^n} \cong \widetilde{\mathcal{K}}^m$ by \Cref{DandA}, \Cref{mainresult}, and \Cref{powersym}. Comparing the depths of the modules in (\ref{regseq2}), it follows that $\mathop{\rm depth} \mathcal{R}(I) \geq d$, hence $\mathcal{R}(I)$ is almost Cohen-Macaulay. The Cohen-Macaulayness in the case $m=1$ follows from \Cref{f linear case} and \Cref{Moreyresult}. If $m\geq 2$, then $\mathcal{R}(I)$ is not Cohen-Macaulay by \cite[3.1]{PU}. Thus $\mathop{\rm depth} \mathcal{R}(I) =d$ in this case, which shows part (a).
For the proceeding part, we note that $\mathcal{G}(I)$ is certainly almost Cohen-Macaulay if it is Cohen-Macaulay. In the case that $\mathcal{G}(I)$ is not Cohen-Macaulay, we have $\mathop{\rm depth} \mathcal{G}(I) \geq \mathop{\rm depth} \mathcal{R}(I) -1 \geq d-1$ by \cite[3.12]{HM} and (a). Thus $\mathcal{G}(I)$ is almost Cohen-Macaulay. The last assertion of (b) now follows from (a) and \cite[3.1]{PU}.
The assertion on the Cohen-Macaulayness of $\mathcal{F}(I)$ in (c) is clear since it is a hypersurface ring by \Cref{gcdsnotzero}. \end{proof}
\subsection{Regularity}
We now consider the the Castelnuovo-Mumford regularity of $\mathcal{R}(I)$ in the the setting of \Cref{mainresult}. We follow all definitions and conventions as given in \cite{Trung}. Once more, we proceed as in section 5 of \cite{BM} and we note that regularity is easily compared along short exact sequences \cite{Eisenbud}.
Again we use the isomorphism $\mathcal{R}(I) \cong S[T_1,\ldots,T_{d+1}]/\mathcal{A}$ and we note that there are multiple gradings on $\mathcal{R}(I)$. We consider its regularity with respect to the gradings represented by the $S[T_1,\ldots,T_{d+1}]$-ideals $\mathfrak{m} = (x_1,\ldots,x_{d+1})$, $\mathfrak{t}=(T_1,\ldots,T_{d+1})$, and $\mathfrak{n}=(x_1,\ldots,x_{d+1},T_1,\ldots,T_{d+1})$. When computing regularity with respect to $\mathfrak{m}$, we set $\deg x_i=1$ and $\deg T_i =0$. Similarly, when computing regularity with respect to $\mathfrak{t}$, we set $\deg x_i=0$ and $\deg T_i =1$. Lastly, when computing regularity with respect to $\mathfrak{n}$, we adopt the total grading and set $\deg x_i=1$ and $\deg T_i =1$.
\begin{theorem}\label{rtandreg} In the setting of \Cref{mainresult}, we have \[ \begin{array}{ccc}
{\rm reg}_\mathfrak{t} \mathcal{R}(I) = m(d-1)-1, & {\rm reg}_\mathfrak{m} \mathcal{R}(I) = m-1, & {\rm reg}_\mathfrak{n} \mathcal{R}(I) \leq (m+1)(d-1)-1. \end{array} \] Additionally, $\mathop{\rm reg} \mathcal{F}(I) = m(d-1)-1$. \end{theorem}
\begin{proof} For the regularity of $\mathcal{R}(I)$ with respect to $\mathfrak{t}$, it is well-known that ${\rm rt}(I) -1 \leq \mathop{\rm reg}_\mathfrak{t} \mathcal{R}(I)$ \cite[pp. 2813-2814]{Trung}. Hence $\mathop{\rm reg}_\mathfrak{t} \mathcal{R}(I) \geq m(d-1)-1$, using \Cref{mingens}. Similarly, it follows that $\mathop{\rm reg}_\mathfrak{m} \mathcal{R}(I) \geq m-1$, by comparing the degrees, with respect to $\mathfrak{m}$, of the minimal generators of $\mathcal{A}$ in \Cref{mingens}. Thus it must be shown that $\mathop{\rm reg}_\mathfrak{t} \mathcal{R}(I) \leq m(d-1)-1$ and $\mathop{\rm reg}_\mathfrak{m} \mathcal{R}(I) \leq m-1$, which we show with the remaining inequality simultaneously. We use the sequences (\ref{regseq1}) and (\ref{regseq2}) once more.
Recall that $J$ is of linear type by \Cref{Jlineartype}, hence $\mathcal{R}(J)\cong \mathop{\rm Sym}(J)$ is a domain defined by $\mathcal{H} = (\ell_1,\ldots,\ell_{d+1})$. Furthermore, recall that $ [\ell_1\ldots\ell_{d+1}]=[T_1\ldots T_{d+1}]\cdot \psi$ and $\psi$ is an alternating matrix. Hence a graded minimal free resolution of $\mathcal{R}(J)$ is known from section 6 of \cite{Kustin}. Moreover, the resolution given in \cite[6.3]{Kustin} is amenable to each of the gradings, from which it follows that \[ \begin{array}{ccc}
{\rm reg}_\mathfrak{t} \mathcal{R}(J) =0, & {\rm reg}_\mathfrak{m} \mathcal{R}(J) =0, & {\rm reg}_\mathfrak{n} \mathcal{R}(J) =d-1. \end{array} \] As $J$ is of linear type, its special fiber ring is $\mathcal{F}(J) \cong k[T_1,\ldots,T_{d+1}]$. Thus \[ \begin{array}{ccc}
{\rm reg}_\mathfrak{t} \mathcal{F}(J) =0, & {\rm reg}_\mathfrak{m} \mathcal{F}(J) =0, & {\rm reg}_\mathfrak{n} \mathcal{F}(J) =0. \end{array} \] With this and (\ref{regseq1}), we then have \[ \begin{array}{ccc}
{\rm reg}_\mathfrak{t}\, \mathfrak{m} \mathcal{R}(J) \leq 1, & {\rm reg}_\mathfrak{m} \,\mathfrak{m} \mathcal{R}(J) \leq 1, & {\rm reg}_\mathfrak{n}\, \mathfrak{m} \mathcal{R}(J) =d-1.\\ \end{array} \]
For convenience, write \[ \begin{array}{ccc}
M= \displaystyle{\bigoplus_{i=0}^{m-1}\mathfrak{m} \mathcal{R}(J)\big(-i,-(m-i)(d-1)\big)}, & N= \displaystyle{\bigoplus_{i=0}^{m} \mathcal{R}(J)\big(-i,-(m-i)(d-1)\big)} \\ \end{array} \] for the modules in (\ref{directsumseq}). With the inequalities above, it follows that \[ \begin{array}{lcl}
{\rm reg}_\mathfrak{t} M \leq m(d-1)+1, &\quad &{\rm reg}_\mathfrak{t} N = m(d-1) \\[1ex]
{\rm reg}_\mathfrak{m} M \leq m, & \quad &{\rm reg}_\mathfrak{m} N = m\\[1ex]
{\rm reg}_\mathfrak{n} M \leq (m+1)(d-1), &\quad &{\rm reg}_\mathfrak{n} N = (m+1)(d-1).\\
\end{array} \] Now using (\ref{directsumseq}), it then follows that \[ \begin{array}{ccc}
{\rm reg}_\mathfrak{t} \widetilde{\mathcal{K}}^m \leq m(d-1), & {\rm reg}_\mathfrak{m} \widetilde{\mathcal{K}}^m \leq m, & {\rm reg}_\mathfrak{n} \widetilde{\mathcal{K}}^m \leq (m+1)(d-1).\\ \end{array} \] The inequalities above, the bigraded isomorphism $\widetilde{\mathcal{A}} \cong \widetilde{\mathcal{K}}^m(0,0)$, and the sequence (\ref{regseq2}) give \[ \begin{array}{ccc}
{\rm reg}_\mathfrak{t} \mathcal{R}(I) \leq m(d-1)-1, & {\rm reg}_\mathfrak{m} \mathcal{R}(I) \leq m-1, & {\rm reg}_\mathfrak{n} \mathcal{R}(I) \leq (m+1)(d-1)-1. \end{array} \] as claimed.
The assertion on the regularity of $\mathcal{F}(I)$ is clear as $\mathcal{F}(I)$ is a hypersurface ring defined by an equation of degree $m(d-1)$ in $k[T_1,\ldots,T_{d+1}]$, by \Cref{gcdsnotzero}. \end{proof}
\end{document} |
\begin{document}
\title{Combinatorics of the Interrupted Period.}
\begin{abstract} This article is about discrete periodicities and their combinatorial structures. It presents and describes the unique structure caused by the alteration of a pattern in a repetition. Those alterations of a pattern arise in the context of double squares and were discovered while working on bounding the number of distinct squares in a string. Nevertheless, they can arise in other phenomena and are worth being presented on their own. \end{abstract}
\noindent {\small \textbf{Keywords:} \textit{string, period, primitive string, factorization}}
If $\generateur{x}$ is a primitive word, and $\prefix{x}$ a prefix of $\generateur{x}$, the sequence $\generateur{x}^n\prefix{x}\generateur{x}^m$ has a singularity: it has a periodic part of period $\generateur{x}$, an interruption, and a resumption of the pattern $\generateur{x}$. That interruption creates a different pattern, one that does not appear in $\generateur{x}^n$. The goal of this article is to unveil that pattern.
\section{Preliminaries}
In this section, we introduce the notations and present a simple property and two of its corollaries. These observations are straightforward but their proofs introduce the technique used to prove Theorem \ref{ath} and provide insights.
We first fix some notations.
An \emph{alphabet} $A$ is a finite set.
We call \emph{letters} the elements of $A$. If $\lvert A \rvert = 2$, the words are referred to as binary and are used in computers. Another well known example for $\lvert A \rvert = 4$ is DNA. \\
A vector of $A^n$ is a \emph{word} $w$ of length $\lvert w \rvert = n$, which can also be presented under the form of an array $w[1,...,n]$. Two words are \emph{homographic} if they are equal to each other. If $x = \prefix{x}\suffix{x}x_3$ for non-empty words $\prefix{x}, \suffix{x}$ and $x_3$, then $\prefix{x}$ is a \emph{prefix} of $x$, $\suffix{x}$ is a \emph{factor} of $x$, and $x_3$ is a \emph{suffix} of $x$ (if both the prefix and the suffix are non empty, we refer to them as proper). We define \emph{multiplication} as concatenation. In english, $breakfast = break . fast$. In a traditional fashion, we define the \emph{$n^{th}$ power} of a word $w$ as $n$ time the multiplication of $w$ with itself.
A word $x$ is \emph{primitive} if $x$ cannot be expressed as a non-trivial power of another word $x'$.\\
A word $\tilde{x}$ is a \emph{conjugate} of $x$ if $x=\prefix{x}\suffix{x}$ and $\tilde{x}=\suffix{x}\prefix{x}$ for non-empty words $\prefix{x}$ and $\suffix{x}$. The set of conjugates of $x$ together with $x$ form the conjugacy class of $x$ which is denoted $Cl(x)$. \\
A factor $x, \lvert x \rvert =n$ of $w$ has \emph{period} $p$ if $x[i]=x[i+\lvert p \rvert], \forall i \in [1,...n-\lvert p\rvert]$.\\ The \emph{number of occurrences} of a letter $c$ in a word $w$ is denoted $n_c(w)$, the \emph{longest common prefix} of $x$ and $y$ as $lcp(x,y)$ , while $lcs(x,y)$ denotes the \emph{longest common suffix} of $x$ and $y$ (note that $\lcs (x,y)$ and $\lcp (x,y)$ are words).\\\\
The properties presented next rely on a simple counting argument. If the proofs are not interesting in themselves, they still allow for meaningful results.
\begin{property}\label{fr} A word $w$ and all of its conjugates have the same number of occurrences for all of their letters, i.e. $\forall\tilde{w} \in Cl(w), \forall a \in A,\ n_a(w) = n_a(\tilde{w})$. \end{property}
\begin{proof} Note that $\forall \tilde{w} \in Cl(w), \exists w_1, w_2$, such that $w = w_1w_2, \tilde{w}=w_2w_1$. Then, $\forall a \in A, n_a(w) = n_a(w_1) + n_a(w_2) = n_a(\tilde{w})$. \qed \end{proof}
The negation of Property \ref{fr} gives the following corollary:
\begin{corollary}\label{number} If two words do not have the same number of occurrence for the same letter, they are not conjugates. \end{corollary}
Another important corollary of Property \ref{fr} is the following:
\begin{corollary} Let $x$ be a word, $\lvert x \rvert \geq n+1$. If $u=x[1...n]$ and $v=x[2...n+1]$ are conjugates of each other, then $x[1] = x[n+1]$, i.e. $v$ is a cyclic shift of $u$. \end{corollary}
\begin{proof} Note that $u$ and $v$ have the factor $x[2...n]$ in common. Since $u$ and $v$ are conjugates, they have the same number of occurrences for all of their letters (Proposition \ref{fr}). It follows that $n_{x[1]}(u) = n_{x[1]}(x[1...n]) = n_{x[1]}(x[2...n]) + 1 = n_{x[1]}(v) = n_{x[1]}(x[2...n]) +n_{x[1]}(x[n+1])$, hence $n_{x[1]}(x[n+1]) = 1$, i.e. $x[1] = x[n+1]$. \qed \end{proof}
\section{Theorem}
Discrete periods were described by N.J. Fine and H.S. Wilf in 1965 in the article ``Uniqueness theorem for periodic functions'' \cite{FW65}. A corollary of that theorem, the synchronization principle, was proved by W. Smyth in \cite{S05} and L. Ilie in \cite{I05}:
\begin{theorem}\label{fw}
If $w$ is primitive, then, for all conjugates $\tilde{w}$ of $w, w \neq \tilde{w}$.
\end{theorem}
Which is about the synchronization of patterns. The next theorem is about the impossible synchronization when a pattern is interrupted.
First, we need to formalize what we call an interruption of the pattern. Let $\generateur{x}$ be a primitive word and $\prefix{x}$ be a proper prefix of $\generateur{x}$, i.e. $\prefix{x}\ne \generateur{x}$. Write $\generateur{x}=\prefix{x}\suffix{x}$ for some suffix $\suffix{x}$ of $\generateur{x}$. \\\\ Let $W=\generateur{x}^{e_1}\prefix{x}\generateur{x}^{e_2}$ with $e_1\geq1, e_2 \geq 1, e_1+e_2\geq 3$.\\\\ We see that $W$ has a repetition of a pattern $\generateur{x}$ as a prefix: $\generateur{x}^{e_1}\prefix{x}$, and then the repetition is interrupted at position $\lvert \generateur{x}^{e_1}\prefix{x} \rvert$, before starting again in the suffix $\generateur{x}^{e_2}$. We need one more definition (albeit that definition is not necessary, it is presented here for better understanding) before introducing the two factors that we claim have restricted occurrences in $W$.
\begin{definition} Let $W=\generateur{x}^{e_1}\prefix{x}\generateur{x}^{e_2}$ with $e_1\geq1, e_2 \geq 1, e_1+e_2\geq 3$ for a primitive word $\generateur{x}=\prefix{x}\suffix{x}$. Let $\tilde{p}$ be the prefix of length $\lvert \lcp (\prefix{x}\suffix{x}, \suffix{x}\prefix{x})\rvert+1$ of $\prefix{x}\suffix{x}$ and $\tilde{s}$ the suffix of length $\lvert \lcs (\prefix{x}\suffix{x}, \suffix{x}\prefix{x})\rvert+1$ of $\suffix{x}\prefix{x}$. The factor $\tilde{s}\tilde{p}$ starting at position $\lvert \generateur{x}^{e_1} \rvert + \lvert \prefix{x} \rvert - \lvert \lcs (\prefix{x}\suffix{x}, \suffix{x}\prefix{x})\rvert -1$ is the \emph{core of the interrupt} of $W$.\\ \end{definition} If $W$ and its interrupt are clear from the context, we will just speak of the core (of the interrupt).
\begin{example} Consider $\generateur{x} = aaabaaaaaabaaaa$ and $\prefix{x} = aaabaaaaaabaaa$, then $\generateur{x}\prefix{x}\generateur{x}^2$ has $\generateur{x}\prefix{x}\generateur{x} = aaabaaaaaabaaaa\mathbf{aaabaaaaaabaaa}aaabaaaaaabaaaa$ as a prefix and $\suffix{x} = a$. It follows that $\lcp (\prefix{x}\suffix{x},\suffix{x}\prefix{x}) = aaa$, and $ \tilde{p} = aaab, \lcs (\prefix{x}\suffix{x},\suffix{x}\prefix{x}) = aaa$, and $\tilde{s}=baaa$. The core of the interrupt, $\tilde{s}\tilde{p}$, is underlined in: \[ x\prefix{x}x = aaabaaaaaabaaaaaaabaaaaaa\underbrace{baaaaaab}_{\tilde{s}\tilde{p}}aaaaaabaaaa. \] \end{example}
The factors that were previously known to have restricted occurrences in $W$, to the best of the author's knowledge, were the inversion factors defined by A. Deza, F. Franek and A. Thierry in \cite{DFT15}:
\begin{definition} Let $W=\generateur{x}^{e_1}\prefix{x}\generateur{x}^{e_2}$ with $\generateur{x}=\prefix{x}\suffix{x}$ a primitive word and $e_1\geq1, e_2 \geq 1, e_1+e_2\geq 3$. An \emph{inversion factor} of $W$ is a factor that starts at position $i$ and for which: \begin{itemize} \item $W[i+j]=W[i+j+\lvert \generateur{x} \rvert + \lvert \prefix{x}\rvert]$ for $0\leq j < \lvert \prefix{x}\rvert$, and \item $W[i+j]=W[i+j+\lvert \prefix{x}\rvert ]$ for $ \lvert \prefix{x}\rvert \leq j \leq \lvert x\rvert + \lvert \prefix{x}\rvert$. \end{itemize} \end{definition}
Those inversion factors, which have the structure of $\suffix{x}\prefix{x}\prefix{x}\suffix{x} = \tilde{\generateur{x}}\generateur{x}$, and which length are twice the length of $\generateur{x}$, were used as two notches that forces a certain synchronization of certain squares in the problem of the maximal number of squares in a word, and allowed to offer a new bound to that problem. The main anticipated application of the next result is an improvement of that bound, though the technique has already proved useful in the improvement of M. Crochemore and W. Rytter's three squares lemma, \cite{CR95}, by H. Bay, A. Deza and F. Franek, \cite{BDF15}, and in the proof of the New Periodicity Lemma by H. Bay, F. Franek and W. Smyth \cite{BFS15}.
Now, let $w_1$ be the factor of length $\lvert \generateur{x} \rvert$ of $W$ that has the core of the interrupt of $W$ as a suffix, and let $w_2$ be the factor of length $\lvert x \rvert$ that has the core of the interrupt of $W$ as a prefix. We will show that both $w_1$ and $w_2$ have restricted occurrences in $W$.\\
\begin{theorem}\label{ath} Let $\generateur{x}$ be a primitive word, $\prefix{x}$ a proper prefix of $\generateur{x}$ and $W=\generateur{x}^{e_1}\prefix{x}\generateur{x}^{e_2}$ with $e_1\geq1, e_2 \geq 1, e_1+e_2\geq 3$. Let $w_1$ be the factor of length $\lvert \generateur{x} \rvert$ of $W$ ending with the core of the interrupt of $W$, and let $w_2$ be the factor of length $\lvert \generateur{x} \rvert$ starting with the core of the interrupt of $W$. The words $w_1$ and $w_2$ are not in the conjugacy class of $\generateur{x}$.\\ \end{theorem}
\begin{proof} Define $p=\lcp(\prefix{x}\suffix{x}, \suffix{x}\prefix{x})$ and $s=\lcs(\prefix{x}\suffix{x}, \suffix{x}\prefix{x})$ (note that $p$ and $s$ can be empty). \\ Deza, Franek, and Thierry showed that $\lvert \lcs(\prefix{x}\suffix{x},\suffix{x}\prefix{x})\rvert+\lvert \lcp(\prefix{x}\suffix{x},\suffix{x}\prefix{x})\rvert\leq\lvert \prefix{x}\suffix{x}\rvert-2$ when $\prefix{x}\suffix{x}$ is primitive (see \cite{DFT15}). Note that in the case $\lvert \lcs(\prefix{x}\suffix{x},\suffix{x}\prefix{x})\rvert+\lvert \lcp(\prefix{x}\suffix{x},\suffix{x}\prefix{x})\rvert = \lvert x\rvert-2$, $w_1$ $w_2$ are the same factor. \\ Write $\generateur{x}=pr_prr_ss$ and $\tilde{\generateur{x}}=pr'_pr'r'_ss$ for the letters $r_p, r'_p, r_s, r'_s$, $r_p \neq r'_p, r_s \neq r'_s$ (by maximality of the longest common prefix and suffix) and the possibly empty and possibly homographic words $r$ and $r'$.\\ We have, by construction, $w_1=r'r'_sspr_p$ and $w_2=r'_sspr_pr$.\\ Note that $n_{r_p}(w_1) = n_{r_p}(\tilde{\generateur{x}}) + 1$ and that $n_{r'_p}(\tilde{\generateur{x}}) = n_{r'_p}(w_1) + 1$ and, by Corollary \ref{number}, $w_1$ is not a conjugate of $\tilde{\generateur{x}}$, nor of $\generateur{x}$. And because $\lvert w_1\rvert = \lvert x\generateur{x}\rvert, w_1$ is neither a factor of $\generateur{x}^{e_1}\prefix{x}$ nor of $\generateur{x}^{e_2}$.\\ Similarly for $w_2$, $n_{r'_s}(w_2) = n_{r'_s}(\generateur{x}) + 1$ and $n_{r_s}(\generateur{x}) = n_{r_s}(w_2) + 1$ and, by corollary \ref{number}, $w_2$ is not a conjugate of $\generateur{x}$, and because $\lvert w_2\rvert = \lvert x \rvert, w_2$ is neither a factor of $\generateur{x}^{e_1}\prefix{x}$ nor of $\generateur{x}^{e_2}$. \qed \end{proof}
\begin{example} Consider again $\generateur{x} = aaabaaaaaabaaaa$, $\prefix{x} = aaabaaaaaabaaa$ and $\suffix{x}=a$. We have $\lvert \generateur{x} \rvert = 15$, and: \[ \generateur{x}\prefix{x}\generateur{x} = aaabaaaaaabaaaaaaa\rlap{$\overbrace{\phantom{baaaaaa\mathbf{baaaaaab}}}^{w_1}$}baaaaaa\underbrace{\mathbf{baaaaaab}aaaaaab}_{w_2}aaaa \] The core of the interrupt is presented in bold.\\ The two factors $w_1$ and $w_2 = w_1 = baaaaaabaaaaaab$ (note that $w_2$ needs not be equal to $w_1$), starting at different positions, are not factors of $\generateur{x}^2$. Yet, the factor $aaaaaabaaaaaabaaaaaa$ of length $\lvert \generateur{x} \rvert + \lvert \lcs (\generateur{x}, \tilde{\generateur{x}}) \rvert + \lvert \lcp (\generateur{x}, \tilde{\generateur{x}}) \rvert$ and which contains the core of the interrupt is a factor of $\generateur{x}^2$. The same goes for the factors of length $\lvert \generateur{x} \rvert -1$ that starts and ends with the core of the interrupt, $aaaaaabaaaaaab$ and $baaaaaabaaaaaa$: they both are factors of $\generateur{x}^2$. For those reasons, the theorem can be regarded as tight \end{example}
\section{Conclusion}
The key features of the core of the interrupt was understood while studying double squares. Ilie \cite{I05} provided an alternate and shorter proof of Crochemore and Rytter's three squares lemma \cite{CR95}. We offer another concise proof within the framework of the core of the interrupt.
\begin{lemma} In a word, no more that two squares can have their last occurrence starting at the same position. \end{lemma}
\begin{proof} Suppose that three squares $u_1^2, u_2^2, u_3^2, \lvert u_1 \rvert < \lvert u_2 \rvert < \lvert u_3 \rvert$ start at the same position. Because $u_2^2$ and $u_3^2$ start at the same position, we can write $u_2=\generateur{x}^{e_1}\prefix{x}, u_3=\generateur{x}^{e_1}\prefix{x}\generateur{x}^{e_2}$ for $\generateur{x} = \prefix{x}\suffix{x}$ a primitive word, $\prefix{x}$ a proper prefix of $\generateur{x}$ and $e_1 \geq e_2 \geq 1$, hence $u_3$ contains a core of the interrupt. Now, by synchronization principle, Theorem \ref{fw}, $u_1, \lvert u_1 \rvert < \lvert u_2 \rvert$, cannot end in the suffix $\lcs (\prefix{x}\suffix{x}, \suffix{x}\prefix{x})$ of $u_2$ (since $u_1$ has $\generateur{x}$ as a prefix) and ends before the core of the interrupt of $u_3$, but if $\lvert u_1^2 \rvert \geq \lvert u_3 \rvert$, the second occurrence of $u_1$ contains the core of the interrupt and a word of length $\lvert \generateur{x} \rvert$ that starts with it, while the first occurrence doesn't: which, by Theorem \ref{ath}, is a contradiction. \end{proof}
\subsubsection{Thanks}
to my supervisors Antoine Deza and Franya Franek for helpful discussions and advices and to Alice Heliou for proof reading of a preliminary version of this article.
\end{document} |
\begin{document}
\begin{abstract} For twisted $K$-theory whose twist is classified by a degree three integral cohomology of infinite order, universal even degree characteristic classes are in one to one correspondence with invariant polynomials of Atiyah and Segal. The present paper describes the ring of these invariant polynomials by a basis and structure constants. \end{abstract}
\title{A basis of the Atiyah-Segal invariant polynomials}
\section{Introduction} \label{sec:introduction}
\subsection{Atiyah-Segal invariant polynomials}
The ring of Atiyah-Segal invariant polynomials \cite{A-Se2} is a subring in the polynomial ring $A = \mathbb{C}[x_1, x_2, \cdots]$ on generators $x_i$ of degree $i$. Its definition \footnote{In \cite{A-Se2}, the ring is first defined by using the variables $s_i = i!x_i$.} is $J = \mathrm{Ker}(d)$, the kernel of the derivation $d : A \to A$ given by \begin{align*} dx_1 &= 0, & dx_i &= x_{i-1}, \ (i > 1). \end{align*} Originally, $J$ is introduced as the ring of the universal Chern classes of \textit{twisted $K$-theory} \cite{A-Se1} whose twist is classified by a degree three integral cohomology class of infinite order. While more study is dedicated to twisted $K$-theory in recent years, few is known about its Chern classes and $J$. For example, $J$ is not isomorphic to a polynomial ring \cite{A-Se2}, whereas there seems to remain the issue of presenting $J$ by generators and relations.
\subsection{Main result}
The aim of this paper is to describe the ring structure of $J$ by a basis and structure constants. To state the description precisely, we define, for an integer $\ell \ge 0$, a set $B^{(\ell)}$ by $$ B^{(\ell)} = \left\{
\beta = (\beta_1, \beta_2, \cdots, \beta_\ell) \in \mathbb{Z}^\ell \big| \ \begin{array}{c} \beta_1, \cdots, \beta_{\ell - 1} \ge 0, \ \beta_{\ell} \ge 1 \end{array} \right\}, \ (\ell \ge 1), $$ and $B^{(0)} = \{ \beta = (\emptyset) \}$. We also define $B^{(\ell)}(0)$ by the following subset in $B^{(\ell)}$: \begin{align*} B^{(0)}(0) &= B^{(0)}, & B^{(1)}(0) &= \{ \beta = (1) \}, &
B^{(\ell)}(0) &= \{ \beta \in B^{(\ell)} |\ \beta_1 = 0 \}, \ (\ell > 1). \end{align*} For $\beta \in B^{(\ell)}$, $\beta' \in B^{(\ell')}$ and $\beta'' \in B^{(\ell + \ell')}$, we define the number $N_{\beta \beta'}^{\beta''}$ by the following expression of a polynomial: \begin{multline*} e_1(\vec{k}, \vec{k}')^{\beta_1''} \cdots e_{\ell+\ell'}(\vec{k}, \vec{k}')^{\beta''_{\ell+\ell'}} \\ = \sum_{\beta \in B^{(\ell)}, \beta' \in B^{(\ell')}} N_{\beta \beta'}^{\beta''} e_1(\vec{k})^{\beta_1} \cdots e_\ell(\vec{k})^{\beta_\ell} e_1(\vec{k}')^{\beta'_1} \cdots e_{\ell'}(\vec{k}')^{\beta'_{\ell'}}, \end{multline*} where $e_i(\vec{k})$, $e_i(\vec{k}')$ and $e_i(\vec{k}, \vec{k}')$ mean the $i$-th elementary symmetric polynomials in $\{ k_1, \cdots, k_\ell \}$, $\{ k'_1, \cdots, k'_{\ell'}\}$ and $\{ k_1, \cdots, k_\ell, k'_1, \cdots, k'_{\ell'} \}$, respectively. Note that $N_{\beta \beta'}^{\beta''}$ are non-negative integers, because $e_i(\vec{k}, \vec{k}') = \sum_{j}e_j(\vec{k})e_{i-j}(\vec{k}')$.
\begin{thm} \label{thm:main} The ring $J$ of the Atiyah-Segal invariant polynomials is isomorphic to the ring $J'$ constructed as follows: \begin{itemize} \item[(a)] its underlying vector space over $\mathbb{C}$ is generated by $\beta \in B(0) = \bigcup_{\ell \ge 0}B^{(\ell)}(0)$;
\item[(b)] the product of $\beta \in B^{(\ell)}(0)$ and $\beta' \in B^{(\ell')}(0)$ is given by $$ \beta \cdot {\beta'} = \sum_{\beta'' \in B^{(\ell+\ell')}(0)} N_{\beta \beta'}^{\beta''} {\beta''}. $$ \end{itemize} \end{thm}
Denote by $g_\beta(\vec{x})$ the invariant polynomial corresponding to $\beta$. Then $g_{(\emptyset)}(\vec{x}) = 1$ and $g_{(1)} (\vec{x})= x_1$. Some explicit product formulae of the polynomials are: \begin{align*} g_{(1)} g_{(1)} &= g_{(0, 1)}, & g_{(1)} g_{(0, \beta_2, \cdots, \beta_{\ell-1}, \beta_\ell)} &= g_{(0, \beta_2, \cdots, \beta_{\ell-1}, -1 + \beta_\ell, 1)}, \end{align*} \begin{align*} g_{(0, a)} g_{(0, b)} &= \sum_{1 \le r \le \frac{a + b}{2}} \frac{(a + b - 2r)!}{(a - r)!(b - r)!} g_{(0, a + b - 2r, 0, r)}, \\
g_{(0, a)} g_{(0, b, c)} &= \sum_{ \substack{ 0 \le r \le \mathrm{min}\{a-s, b\} \\ 1 \le s \le c \\ }} \frac{(a + b - 2r - s)!}{(a - r - s)!(b - r)!} g_{(0, a + b - 2r - s, c - s, r, s)}. \end{align*}
\subsection{Application}
In \cite{A-Se2}, the Chern classes of twisted $K$-theory corresponding to $J$ live in ordinary cohomology, at the first stage. Then lifting these Chern classes to twisted cohomology is proposed, and the problem to find some natural basis is raised. We answer this problem by constructing a lift of the basis in Theorem \ref{thm:main}
\subsection{Toward a description of $J$ by generators and relations}
If we consider the polynomial algebra generated on $B(0)$ and take the quotient by the ideal generated by relations corresponding to (b) in Theorem \ref{thm:main}, then we get a description of $J$ by generators and relations, which is obviously unsatisfactory. For a satisfactory description, a possible direction would be to eliminate redundant bases. With respect to an order of monomials in $x_i$, the leading term of the invariant polynomial $g_\beta(\vec{x}) $ corresponding to $\beta = (\beta_1, \cdots, \beta_\ell) \in B^{(\ell)}(0)$ is the monomial $x_{\beta_\ell}x_{\beta_\ell + \beta_{\ell-1}} \cdots x_{\beta_\ell + \cdots + \beta_1}$, and its coefficient is always $1$. This fact leads us to introduce the subset of $\{ g_\beta |\ \beta \in B(0) \}$ consisting of bases $g_\beta(\vec{x})$ whose leading terms cannot be the products of the leading terms of other non-trivial bases of strictly lower degree. We can see that the bases in the subset generate the ring $J$ algebraically, and their first non-trivial relations appear in degree $12$: \begin{align*} g_{(0,2)}g_{(0,1,2)} - g_{(0,3)} g_{(0,0,2)} &= g_{(1)} g_{(0,0,1,2)} + g_{(1)}^2 g_{(0,2,2)}, \\
g_{(0,2)}^3 - g_{(0,0,2)}^2 &= 3g_{(1)}^2 g_{(0,2)}g_{(0,3)} - 2g_{(1)}^3g_{(0,0,3)} - 3g_{(1)}^4g_{(0,4)}. \end{align*} However, by computing relations in higher degree, we can also see that the subset still contains many redundant bases to generate $J$ algebraically. The task to find out a minimal set of algebraic generators still seems not easy, and the presentation issue of $J$ needs further study.
\subsection{Plan of the paper}
The description of $J$ in Theorem \ref{thm:main} originates from a construction of characteristic classes for twisted $K$-theory based on the \textit{Chern character} and the \textit{Adams operations} \cite{A-Se2}. Section \ref{sec:motivating_construction} is devoted to this motivating construction. Though this section can be skipped for the proof of Theorem \ref{thm:main}, one with the understanding of the construction will find the definition of our basis natural. The proof of Theorem \ref{thm:main}, which is quite elementary, is given in Section \ref{sec:basis}. The point in the proof is the definition of the polynomials $g_\beta(\vec{x})$ associated to $\beta \in B = \bigcup_{\ell \ge 0} B^{(\ell)}$. With the additive basis formed by these polynomials, the structure constant of $A$ and the subring $J \subset A$ are easy to express. Our lifts of Chern classes are provided at the end of this section.
Finally, a list of invariant polynomials $g_\beta(\vec{x})$ is appended for reference.
\subsection{Acknowledgment}
This work is supported by the Grants-in-Aid for Scientific Research (start-up 21840034), JSPS.
\section{Motivating construction} \label{sec:motivating_construction}
\subsection{Chern character}
One tool for the construction motivating our basis is the \textit{Chern character} for twisted $K$-theory. For simplicity, we assume that $X$ is a smooth manifold. With some subtle technical points understood, the Chern character is a natural homomorphism $$ \mathrm{ch} : \ K_\tau(X) \longrightarrow H^{\mathrm{even}}_\eta(X). $$ Here $K_\tau(X)$ denotes the twisted $K$-group of $X$ with its twist $\tau$. Since the way to realize twists may vary according to the contexts, we just point out that a twist is a geometric object classified by the degree three integral cohomology group $H^3(X; \mathbb{Z})$. For the sake of simplicity, we also assume that the twist $\tau$ is classified by an element in $H^3(X; \mathbb{Z})$ of infinite order. The target of $\mathrm{ch}$ is the cohomology of the complex $(\Omega(X), d - \eta \wedge \cdot )$, where $\Omega(X)$ is the space of differential forms, and $\eta$ is a closed $3$-form whose de Rham cohomology class corresponds to the real image of the element in $H^3(X; \mathbb{Z})$ classifying $\tau$.
A point to note is the following commutative diagram: $$ \begin{CD} K_\tau(X) \times K_{\tau'}(X) @>{\otimes}>> K_{\tau + \tau'}(X) \\ @V{\mathrm{ch} \times \mathrm{ch}}VV @VV{\mathrm{ch}}V \\ H_{\eta}(X) \times H_{\eta'}(X) @>>{\wedge}> H_{\eta + \eta'}(X), \end{CD} $$ where $\otimes$ is the multiplication in twisted $K$-theory, which mixes twists.
From the Chern character, we can compute the Chern class corresponding to an invariant polynomial $f(x_1, x_2, \cdots) \in J$: Let $a \in K_\tau(X)$ be a twisted $K$-class. We express its Chern character as follows: $$ \mathrm{ch}(a) = [x_1(a) + x_2(a) + x_3(a) + \cdots], $$ where $x_n(a)$ is the $2n$-form part. (The $0$-form part $x_0(a)$ is absent, under the assumption on $\tau$.) The differential form $f(x_1(a), x_2(a), \cdots)$ is closed by the invariance of the polynomial $f$. If we denote the de Rham cohomology class of the differential form by $c_f(a)$, then we get the Chern class corresponding to $f$: $$ c_f : \ K_\tau(X) \longrightarrow H^{\mathrm{even}}(X). $$
\subsection{Adams operation}
The other tool for our motivating construction is the \textit{Adams operation} for twisted $K$-theory \cite{A-Se2}, which is given as a natural map $$ \psi^k : K_\tau(X) \longrightarrow K_{k\tau}(X). $$ Here $k$ is allowed to be any integer. The twists in the source and the target of $\psi^k$ are generally different, because the formulation of $\psi^k$ involves the multiplication in twisted $K$-theory. It should be noticed that, under the expression $\mathrm{ch}(a) = [x_1(a) + x_2(a) + \cdots]$, we can express the Chern character of $\psi^k(a)$ as $$ \mathrm{ch}(\psi^k(a)) = [k x_1(a) + k^2 x_2(a) + k^3 x_3(a) + \cdots]. $$
\subsection{Factory of Chern classes}
For integers $k_1, \cdots, k_\ell$, the Adams operations $\psi^{k_i}$ and the product in twisted $K$-theory construct $$ \begin{array}{rcl} \psi^{(k_1, \cdots, k_\ell)} : \ K_\tau(X) & \longrightarrow & K_{(k_1 + \cdots + k_\ell)\tau}(X). \\
& & \\ a \ \quad & \mapsto & \psi^{k_1}(a) \otimes \cdots \otimes \psi^{k_\ell}(a) \end{array} $$ While Atiyah and Segal considered the case of $k_1 + \cdots + k_\ell = 1$ to get an ``internal'' operation, we consider the case of $k_1 + \cdots + k_\ell = 0$. The resulting ``external'' operation followed by the Chern character then gives $$ \mathrm{ch} \circ \psi^{(k_1, \cdots, k_\ell)} : \ K_\tau(X) \longrightarrow H^{\mathrm{even}}(X). $$ Since this map is natural, its $2n$-form part gives rise to a Chern class of twisted $K$-theory. It is natural to ask what kind of Chern classes are produced by this method: These Chern classes correspond to some polynomials in $J$.
By means of the properties explained so far, we can compute examples readily. In the simplest case of $\ell = 2$, we have \begin{align*} \mathrm{ch}(\psi^{(k_1,k_2)}(a)) &= (k_1k_2) \cdot x_1^2 \\ &+ (k_1k_2)^2 \cdot (x_2^2 - 2 x_1x_3) \\ &+ (k_1k_2)^3 \cdot (x_3^2 - 2 x_2x_4 + 2 x_1x_5) \\ &+ (k_1k_2)^4 \cdot (x_4^2 - 2 x_3x_5 + 2 x_2x_6 - 2x_1x_7) \\ &+ (k_1k_2)^5 \cdot (x_5^2 - 2 x_4x_6 + 2 x_3x_7 - 2x_2x_8 + 2 x_1x_9) \\ &+ \cdots, \end{align*} where $x_i = x_i(a)$. In the case of $\ell = 3$, we also have \begin{align*}
\mathrm{ch}(\psi^{(k_1,k_2,k_3)}(a)) &= e_3 \cdot x_1^3 \\ &+ e_2e_3 \cdot x_1(x_2^2 - 2x_1x_3) \\ &+ e_3^2 \cdot (x_2^3 - 3 x_1x_2x_3 + 3 x_1^2x_4) \\ &+ e_2^2 e_3 \cdot x_1(x_3^2 - 2x_2x_4 + 2x_1x_5) \\ &+ e_2e_3^2 \cdot (x_2x_3^2 - 2 x_2^2x_4 - x_1x_3x_4 + 5 x_1x_2x_5 - 5 x_1^2x_6) \\ &+ e_3^3 \cdot (x_3^3 - 3 x_2x_3x_4 + 3 x_2^2x_5 \\ & \quad \quad \quad \quad + 3 x_1x_4^2 - 3 x_1x_3x_5 - 3 x_1x_2x_6 + 3 x_1^2x_7) \\ &\quad + e_2^3e_3 \cdot x_1(x_4^2 - 2 x_3x_5 + 2 x_2x_6 - 2 x_1x_7) \\ &+ \cdots, \end{align*} where $e_2 = k_1k_2 + k_2k_3 + k_1k_3$ and $e_3 = k_1k_2k_3$ for short.
With some experience of calculating polynomials in $J$, one will find that each coefficient of a product of elementary symmetric polynomials in $k_1, \cdots, k_\ell$ is an invariant polynomial. Further, one may guess that the invariant polynomials arising in this way constitute an additive basis of a subspace in $J$: This turns out to be the case, as a result of Theorem \ref{thm:main}. In the next section, we consider $\mathrm{ch} \circ \psi^{(k_1, \cdots, k_\ell)}$ in purely algebraic setting to construct our basis of $J$.
\section{A basis of invariant polynomials} \label{sec:basis}
\subsection{Preliminary}
We define $\mathrm{ch}(k | \vec{x})$ to be the following formal power series in variables $x_1, x_2, \cdots$ and $k$: $$
\mathrm{ch}(k | \vec{x}) = \sum_{i \ge 1} k^i x_i = k x_1 + k^2 x_2 + k^3 x_3 + \cdots. $$
For an integer $\ell \ge 1$, we let $\mathrm{ch}(k_1, \cdots, k_\ell | \vec{x})$ be the following formal power series in the variables $x_1, x_2, \cdots$ and $k_1, \cdots, k_\ell$: $$
\mathrm{ch}(k_1, \cdots, k_\ell | \vec{x}) =
\mathrm{ch}(k_1 | \vec{x}) \cdots \mathrm{ch}(k_\ell | \vec{x}) = \sum_{i_1, \cdots, i_\ell \ge 1} k_1^{i_1} \cdots k_r^{i_\ell} x_{i_1} \cdots x_{i_\ell}. $$
We write $\mathrm{ch}(k_1, \cdots, k_\ell | \vec{x})_n$ for the degree $n$ part of $\mathrm{ch}(k_1, \cdots, k_\ell | \vec{x})$ with respect to $x_i$. By construction, $\mathrm{ch}(k_1, \cdots, k_\ell | \vec{x})_n$ is a symmetric polynomial in $k_1, \cdots k_\ell$ with its coefficients in $A$. In particular, the symmetric polynomial is of degree $n$, provided that each $k_i$ is given degree $1$.
As is well-known \cite{M}, the ring of symmetric polynomials in $k_1, \cdots, k_\ell$ is the polynomial ring in the elementary symmetric polynomials $e_1, \cdots, e_\ell$: $$ e_j(\vec{k}) = e_j(k_1, \cdots, k_\ell) = \sum_{1 \le i_1 < \cdots < i_j \le \ell} k_{i_1} \cdots k_{i_j}. $$ For integers $n$ and $\ell$ such that $n \ge \ell \ge 1$, we put $$ B_{n}^{(\ell)} = \left\{
\beta = (\beta_1, \beta_2, \cdots, \beta_\ell) \in \mathbb{Z}^\ell \bigg| \ \begin{array}{c} \beta_1, \cdots, \beta_{\ell-1} \ge 0, \ \beta_{\ell} \ge 1 \\ \beta_1 + 2 \beta_2 + \cdots + \ell \beta_{\ell} = n \end{array} \right\}. $$ We set $B^{(0)}_0 = B^{(0)}$ and $B^{(0)}_n = \emptyset$ for $n \ge 1$. Then $B^{(\ell)}$ in Section \ref{sec:introduction} is $B^{(\ell)} = \bigcup_{n \ge \ell} B_{n}^{(\ell)}$. To an element $\beta = (\beta_1, \cdots, \beta_\ell) \in B_n^{(\ell)}$, we associate the product of elementary symmetric polynomials $e^\beta(\vec{k}) = e_1(\vec{k})^{\beta_1} \cdots e_\ell(\vec{k})^{\beta_\ell}$. These products form a basis of the vector space of symmetric polynomials of degree $n$ in $k_1, \cdots, k_\ell$.
\begin{dfn} Let $n$ and $\ell$ be integers such that $n \ge \ell \ge 1$. For $\beta \in B_n^{(\ell)}$, we define a polynomial $g_\beta(\vec{x}) \in A$ of degree $n$ by the following formula: $$
\mathrm{ch}(k_1, \cdots, k_\ell | \vec{x})_n = \sum_{\substack{i_1, \cdots, i_r \ge 1 \\ i_1 + \cdots + i_\ell = n}} k_1^{i_1} \cdots k_\ell^{i_\ell} x_{i_1} \cdots x_{i_\ell} = \sum_{\beta \in B_n^{(\ell)}} e^\beta(k_1, \cdots, k_\ell) g_\beta(\vec{x}). $$ For $\beta = (\emptyset) \in B^{(0)}$, we define $g_\beta(\vec{x}) = 1$. \end{dfn}
Another equivalent definition of $g_\beta(\vec{x})$ makes use of the transition matrix $M = M(m, e)$ from elementary symmetric polynomials to monomial symmetric polynomials \cite{M}: let $\Lambda_n^{(\ell)}$ be the set of partitions of $n$ of length $\ell$: $$ \Lambda_n^{(\ell)} = \left\{
\lambda = (\lambda_1, \cdots, \lambda_\ell) \in \mathbb{Z}^\ell | \ \lambda_1 \ge \cdots \ge \lambda_\ell \ge 1, \ \lambda_1 + \cdots + \lambda_\ell = n \right\}, $$ which can be identified with $B_n^{(\ell)}$ through the change of expression: $$ \beta = (\beta_1, \cdots, \beta_\ell) \leftrightarrow \lambda = ( \overbrace{\ell, \cdots, \ell}^{\beta_\ell}, \cdots, \overbrace{2, \cdots, 2}^{\beta_2}, \overbrace{1, \cdots, 1}^{\beta_1} ). $$ For $\lambda \in \Lambda_n^{(\ell)}$, the \textit{monomial symmetric polynomial} $m_\lambda(\vec{k}) = m_\lambda(k_1, \cdots, k_\ell)$ is the polynomial $\sum k_1^{\mu_1} \cdots k_\ell^{\mu_\ell}$ summed over all distinct permutations $(\mu_1, \cdots, \mu_\ell)$ of $\lambda$. They also form a basis of the space of symmetric polynomials of degree $n$ in $k_i$. Let $M_{\lambda \beta}$ be the transition matrix given by the base change $m_\lambda = \sum_\beta M_{\lambda \beta} e^\beta$. Because of the expression $$
\mathrm{ch}(k_1, \cdots, k_\ell | \vec{x})_n = \sum_{\lambda \in \Lambda_n^{(\ell)}} m_\lambda(k_1, \cdots, k_\ell) x_\lambda = \sum_{\beta \in B_n^{(\ell)}} e^\beta(k_1, \cdots, k_\ell) g_\beta(\vec{x}), $$ the other definition of $g_\beta(\vec{x})$ is $$ g_\beta(\vec{x}) = \sum_{\lambda \in \Lambda_n^{(\ell)}} M_{\lambda \beta} x_\lambda, $$ where $x_\lambda = x_{\lambda_1} \cdots x_{\lambda_\ell}$ for $\lambda = (\lambda_1, \cdots, \lambda_\ell)$.
\begin{rem} Since $M_{\lambda \beta} \in \mathbb{Z}$, the latter definition shows $g_\beta(\vec{x}) \in \mathbb{Z}[x_1, x_2, \cdots ] \subset A$. \end{rem}
\begin{rem} The transition matrix $(M_{\lambda \beta})_{\lambda \beta}$ can be computed from the \textit{Kostka matrix}. Some facts about the matrix in \cite{M} imply the expression: $$ g_\beta(\vec{x}) = x_{\beta'} + \sum_{\substack{\lambda \in \Lambda_n^{(\ell)} \\ \beta' < \lambda}} M_{\lambda \beta} x_\lambda. $$ Here $\beta' \in \Lambda_n$ is the \textit{conjugate} of the partition $\beta$. The meaning of $\beta' < \lambda$ is that $\beta' \neq \lambda$ and $\beta' \le \lambda$, where $\le$ is the \textit{natural (partial) ordering} \cite{M}. \end{rem}
\begin{rem} For $n \ge \ell$, let $\omega \in \Lambda_n^{(\ell)}$ be $\omega = (n - \ell + 1, \overbrace{1, \cdots, 1}^{\ell-1})$, which satisfies $\lambda \le \omega$ for all $\lambda \in \Lambda_n^{(\ell)}$. For any $\beta = (\beta_1, \beta_2, \cdots, \beta_\ell) \in B_n^{(\ell)}$, the coefficient $M_{\omega\beta}$ of $x_\omega$ in $g_\beta(\vec{x})$ never vanish: If $n = \ell$, then $M_{\omega\beta} = 1$. If $n > \ell$, then we obtain $$ M_{\omega \beta} = (-1)^{\ell + 1 + \sum_{i}\beta_{2i}} \frac{n - \ell}{(\sum_i \beta_i) - 1} \frac{((\sum_i \beta_i) - 1)!}{\prod_i (\beta_i!)} \beta_\ell, $$ by using so-called \textit{Waring's formula}, an explicit formula expressing the power sum in terms of the elementary symmetric functions (page 33, Example 20, \cite{M}). \end{rem}
\subsection{Proof of Theorem \ref{thm:main}}
We denote by $A_n^{(\ell)} \subset A$ the subspace consisting of polynomials of degree $n$ in $x_1, x_2, \cdots$ which are linear combinations of monomials $x_{i_1} \cdots x_{i_\ell}$ of length $\ell$. We set $A_n = \bigoplus_\ell A^{(\ell)}_n$ and $A^{(\ell)} = \bigoplus_n A^{(\ell)}_n$. By construction, the polynomial $g_\beta(\vec{x})$ with $\beta \in B_n^{(\ell)}$ belongs to the subspace $A_n^{(\ell)}$.
\begin{lem} \label{lem:polynomial_ring} The following holds about the polynomial ring $A = \mathbb{C}[x_1, x_2, \cdots]$: \begin{itemize} \item[(a)]
For $n \ge \ell \ge 0$, the set $\{ g_\beta(\vec{x}) |\ \beta \in B_n^{(\ell)} \}$ is a basis of $A_n^{(\ell)}$.
\item[(b)] For $\beta \in B^{(\ell)}$ and $\beta' \in B^{(\ell')}$, the product of the polynomials $g_\beta(\vec{x})$ and $g_{\beta'}(\vec{x})$ is expresses as $$ g_\beta(\vec{x}) g_{\beta'}(\vec{x}) = \sum_{\beta'' \in B^{(\ell+\ell')}} N_{\beta \beta'}^{\beta''} g_{\beta''}(\vec{x}), $$ where $N_{\beta \beta'}^{\beta''}$ is the non-negative integer introduced in Section \ref{sec:introduction}. \end{itemize} \end{lem}
\begin{proof}
For (a), the set $\{ x_\lambda |\ \lambda \in \Lambda_n^{(\ell)} \}$ clearly provides a basis of $A_n^{(\ell)}$. Since the transition matrix $( M_{\lambda \beta} )$ is invertible, $\{ g_\beta(\vec{x}) |\ \beta \in B_n^{(\ell)} \}$ gives rise to a basis of $A_n^{(\ell)}$ as well. Then, (b) follows from the obvious formula $$
\mathrm{ch}(k_1, \cdots, k_\ell | \vec{x}) \mathrm{ch}(k'_1, \cdots, k'_{\ell'} | \vec{x}) =
\mathrm{ch}(k_1, \cdots, k_\ell, k'_1, \cdots, k'_{\ell'} | \vec{x}), $$ together with the definition of $g_\beta(\vec{x})$ and that of $N_{\beta \beta'}^{\beta''}$. \end{proof}
For $i \ge 0$ and $n \ge \ell > 1$, we define $B_n^{(\ell)}(i)$ to be the following subset in $B_n^{(\ell)}$: $$ B_n^{(\ell)}(i) = \left\{
\beta = (\beta_1, \beta_2, \cdots, \beta_\ell) \in \mathbb{Z}^\ell \bigg| \ \begin{array}{c} \beta_1 = i, \ \beta_2, \cdots, \beta_{\ell-1} \ge 0, \ \beta_{\ell} \ge 1 \\ \beta_1 + 2 \beta_2 + \cdots + \ell \beta_{\ell} = n \end{array} \right\}. $$ In the case of $\ell = 0$ and $\ell = 1$, we also define $B_n^{(\ell)}(i)$ by \begin{align*} B_n^{(0)}(i) &= \left\{ \begin{array}{cc} B^{(0)}, & (n = i = 0), \\ \emptyset, & \mbox{otherwise}, \end{array} \right. &
B_n^{(1)}(i) &= \left\{ \begin{array}{cc} \{ \beta = (i+1) \}, & (n = i+1), \\ \emptyset, & (n \neq i+1). \end{array} \right. \end{align*}
\begin{lem} \label{lem:derivation} For any $n \ge \ell \ge 1$ and $\beta = (\beta_1, \cdots, \beta_\ell) \in B_n^{(\ell)}(i)$, we have: $$ d g_{(\beta_1, \cdots, \beta_\ell)}(\vec{x}) = \left\{ \begin{array}{cl} 0, & (i = 0), \\ g_{(\beta_1 - 1, \beta_2, \cdots, \beta_\ell)}(\vec{x}), & (i > 0). \end{array} \right. $$ \end{lem}
\begin{proof}
By the defining formula of $\mathrm{ch}(k_1, \cdots, k_\ell | \vec{x})_n$, we have $$
d \mathrm{ch}(k_1, \cdots, k_\ell | \vec{x})_n
= e_1(k_1, \cdots, k_\ell) \mathrm{ch}(k_1, \cdots, k_\ell | \vec{x})_{n-1}. $$ This formula and the definition of $g_\beta(\vec{x})$ establish the lemma. \end{proof}
\begin{proof}[The proof of Theorem \ref{thm:main}]
By Lemma \ref{lem:polynomial_ring} and \ref{lem:derivation}, the subspace $J = \mathrm{Ker}(d) \subset A$ of invariant polynomials has the additive basis $\{ g_\beta(\vec{x}) |\ \beta \in B(0) \}$. Since $J \subset A$ is also a subring, we use Lemma \ref{lem:polynomial_ring} again to see that: for $\beta \in B^{(\ell)}(0)$ and $\beta' \in B^{(\ell')}(0)$, the product of the polynomials $g_\beta(\vec{x})$ and $g_{\beta'}(\vec{x})$ is expresses as $$ g_\beta(\vec{x}) g_{\beta'}(\vec{x}) = \sum_{\beta'' \in B^{(\ell+\ell')}} N_{\beta \beta'}^{\beta''} g_{\beta''}(\vec{x}) = \sum_{\beta'' \in B^{(\ell+\ell')}(0)} N_{\beta \beta'}^{\beta''} g_{\beta''}(\vec{x}). $$ Thus, $\beta \mapsto g_\beta(\vec{x})$ induces a ring isomorphism $J' \cong J$. \end{proof}
\subsection{Lifts to twisted cohomology}
In \cite{A-Se2}, the Chern class corresponding to $f \in J$ of positive degree is, at the first stage, constructed as a natural map $$ c_f : \ K_\tau(X) \longrightarrow H^*(X). $$ Then, at the second stage, $c_f$ is lifted to the twisted cohomology $$ C_f : \ K_\tau(X) \longrightarrow H_\eta^*(X) $$ so that its leading term agrees with $c_f$. In the universal setting, such a lift is in one to one correspondence with a formal power series in $x_1, x_2, \cdots$: $$ F(x_1, x_2, \cdots) = f(x_1, x_2, \cdots) + \mbox{higher degree term} $$ satisfying $dF = F$. Clearly, a polynomial $f \in J$ admits various lifts $F$. A way to construct a lift \cite{A-Se2} is as follows: let $\delta : A \to A$ be the derivation defined by $\delta x_i = i x_{i+1}$. Then any $f \in J_n$ has the following series as its lift: $$ \left(\exp \frac{\delta}{n} \right)f = f + \frac{1}{n} \delta f + \frac{1}{2 n^2} \delta^2f + \cdots + \frac{1}{(i!) n^i} \delta^i f + \cdots. $$ Using the basis $\{ g_\beta(\vec{x}) \}$, we provide another way to construct a lift:
\begin{prop} Let $n$ and $\ell$ be such that $n \ge \ell \ge 1$. For $\beta = (\beta_1, \beta_2, \cdots, \beta_\ell) \in B^{(\ell)}_n(0)$, the polynomial $g_\beta(\vec{x}) \in J_n^{(\ell)}$ has the following series as its lift: $$ \tilde{g}_\beta(\vec{x}) = \sum_{i \ge 0} g_{\beta(i)}(\vec{x}) = g_\beta(\vec{x}) + g_{\beta(1)}(\vec{x}) + g_{\beta(2)}(\vec{x}) + \cdots, $$ where $\beta(i) = (\beta_1 + i, \beta_2, \cdots, \beta_\ell) \in B^{(\ell)}_{n+i}(i)$ for $i \ge 0$. \end{prop}
\begin{proof} This is an immediate consequence of Lemma \ref{lem:derivation}. \end{proof}
By Theorem \ref{thm:main}, the lifts $\tilde{g}_\beta(\vec{x})$ form a basis of the universal Chern classes in twisted cohomology, which answers the problem to find a basis \cite{A-Se2}.
For $g_{(1)}(\vec{x}) = x_1 \in J_1$, our lift agrees with that of Atiyah and Segal: $$ (\exp \delta)(g_{(1)}) = \tilde{g}_{(1)}(\vec{x}) =
\mathrm{ch}(1 | \vec{x}) = x_1 + x_2 + x_3 + \cdots, $$ but not in general, as is seen in the case of $g_{(0, 1)}(\vec{x}) = x_1^2 \in J_2^{(2)}$: \begin{align*} \left( \exp \frac{\delta}{2} \right)x_1^2 &= x_1^2 + x_1x_2 + \frac{1}{4}(x_2^2 + 2x_1x_3) + \frac{1}{8}(x_2x_3 + x_1x_4) + \cdots, \\
\tilde{g}_{(0, 1)}(\vec{x}) &= x_1^2 + x_1x_2 + x_1x_3 + x_1x_4 + \cdots. \end{align*}
A motivation of Atiyah and Segal to introduce Adams operations is to construct a characteristic class of twisted $K$-theory living in twisted cohomology: If $k_1, \cdots, k_\ell$ are integers such that $k_1 + \cdots + k_\ell = 1$, then the Chern character and the Adams operations combine to give a characteristic class $$ \mathrm{ch} \circ \psi^{(k_1, \cdots, k_\ell)} : \ K_\tau(X) \longrightarrow H^*_\eta(X). $$
In the universal setting, this corresponds to the series $\mathrm{ch}(k_1, \cdots, k_\ell | \vec{x})$, where $k_1, \cdots, k_\ell$ are now regarded as numbers, rather than formal variables. With respect to our basis, we can express the series as: $$
\mathrm{ch}(k_1, \cdots, k_\ell | \vec{x}) = \sum_{\beta \in B^{(\ell)}(0)} e^\beta(k_1, \cdots, k_\ell) \tilde{g}_\beta(\vec{x}). $$
\appendix
\section{Lists}
\subsection{Poincar\'e polynomial}
The generating function $\mathcal{J}(u,t) = \sum_{n, \ell}\mathrm{dim}J_n^{(\ell)}u^\ell t^n$ for the dimension of $J_n^{(\ell)} = A_n^{(\ell)} \cap J$ has the following formula:
$$ \mathcal{J}(u,t) = \frac{1-t}{(1-ut)(1-ut^2)(1-ut^3)(1-ut^4) \cdots} + t. $$ If we substitute $u = 1$, we get the formula of the generating function $\mathcal{J}(t) = \sum_n(\mathrm{dim}J_n)t^n$ for the dimension of $J_n$ in \cite{A-Se2}: \begin{align*} \mathcal{J}(t) &= \frac{1}{(1-t^2)(1-t^3)(1-t^4) \cdots} + t \\ &= 1 + t + t^2 + t^3 + 2t^4 + 2t^5 + 4t^6 + 4t^7 + 7t^8 + 8t^9 + 12t^{10} \\ & \quad + 14t^{11} + 21t^{12} + 24t^{13} + 34t^{14} + 41t^{15} + 55t^{16} + 66t^{17} + 88 t^{18} \\ & \quad + 105 t^{19} + 137 t^{20} + 165 t^{21} + 210 t^{22} + 235 t^{23} + 320 t^{24} + \cdots. \end{align*} For $\ell \ge 0$, the generating function $\mathcal{J}^{(\ell)}(t) = \sum_n (\mathrm{dim} J_n^{(\ell)}) t^n$ is \begin{align*} \mathcal{J}^{(0)}(t) &= 1, & \mathcal{J}^{(1)}(t) &= t, & \mathcal{J}^{(\ell)}(t) &= \frac{t^\ell}{(1-t^2) (1 - t^3) \cdots (1- t^\ell)}. \ (\ell \ge 2). \end{align*} A calculation gives the table:
\begin{center}
\begin{tabular}{c|cccccccccccccccc|c} $\ell$ & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9 & 10 & 11 & 12 & 13 & 14 & 15 & 16 & total \\
\hline $\mathrm{dim} J_1^{(\ell)}$ & 1 & & & & & & & & & & & & & & & & 1 \\
\hline $\mathrm{dim} J_2^{(\ell)}$ & 0 & 1 & & & & & & & & & & & & & & & 1 \\
\hline $\mathrm{dim} J_3^{(\ell)}$ & 0 & 0 & 1 & & & & & & & & & & & & & & 1 \\
\hline $\mathrm{dim} J_4^{(\ell)}$ & 0 & 1 & 0 & 1 & & & & & & & & & & & & & 2 \\
\hline $\mathrm{dim} J_5^{(\ell)}$ & 0 & 0 & 1 & 0 & 1 & & & & & & & & & & & & 2 \\
\hline $\mathrm{dim} J_6^{(\ell)}$ & 0 & 1 & 1 & 1 & 0 & 1 & & & & & & & & & & & 4 \\
\hline $\mathrm{dim} J_7^{(\ell)}$ & 0 & 0 & 1 & 1 & 1 & 0 & 1 & & & & & & & & & & 4 \\
\hline $\mathrm{dim} J_8^{(\ell)}$ & 0 & 1 & 1 & 2 & 1 & 1 & 0 & 1 & & & & & & & & & 7 \\
\hline $\mathrm{dim} J_9^{(\ell)}$ & 0 & 0 & 2 & 1 & 2 & 1 & 1 & 0 & 1 & & & & & & & & 8 \\
\hline $\mathrm{dim} J_{10}^{(\ell)}$ & 0 & 1 & 1 & 3 & 2 & 2 & 1 & 1 & 0 & 1 & & & & & & & 12 \\
\hline $\mathrm{dim} J_{11}^{(\ell)}$ & 0 & 0 & 2 & 2 & 3 & 2 & 2 & 1 & 1 & 0 & 1 & & & & & & 14 \\
\hline $\mathrm{dim} J_{12}^{(\ell)}$ & 0 & 1 & 2 & 4 & 3 & 4 & 2 & 2 & 1 & 1 & 0 & 1 & & & & & 21 \\
\hline $\mathrm{dim} J_{13}^{(\ell)}$ & 0 & 0 & 2 & 3 & 5 & 3 & 4 & 2 & 2 & 1 & 1 & 0 & 1 & & & & 24 \\
\hline $\mathrm{dim} J_{14}^{(\ell)}$ & 0 & 1 & 2 & 5 & 5 & 6 & 4 & 4 & 2 & 2 & 1 & 1 & 0 & 1 & & & 34 \\
\hline $\mathrm{dim} J_{15}^{(\ell)}$ & 0 & 0 & 3 & 4 & 7 & 6 & 6 & 4 & 4 & 2 & 2 & 1 & 1 & 0 & 1 & & 41 \\
\hline $\mathrm{dim} J_{16}^{(\ell)}$ & 0 & 1 & 2 & 7 & 7 & 9 & 7 & 7 & 4 & 4 & 2 & 2 & 1 & 1 & 0 & 1 & 55 \\ \end{tabular} \end{center}
\subsection{Lists of bases}
In the following, the monomials $x_\lambda$ in the invariant polynomial $g_\beta(\vec{x})$ are arranged by using the ordering $L'_n$ on the set of partitions \cite{M}.
\subsubsection{$\ell = 1$ and $\ell = 2$}
We have \begin{align*} \mathcal{J}^{(1)}(t) &= t, & \mathcal{J}^{(2)}(t) &= t^2 + t^4 + t^6 + t^8 + \cdots \end{align*}
The base in $J^{(1)}$ is $g_{(1)} = x_1$ and the bases in $J^{(2)}$ are \begin{align*}
g_{(0,1)} &= x_1^2 \\
g_{(0,2)} &= x_2^2 - 2 x_1x_3 \\
g_{(0,3)} &= x_3^2 - 2 x_2x_4 + 2 x_1x_5 \\
g_{(0,4)} &= x_4^2 - 2 x_3x_5 + 2 x_2x_6 - 2x_1x_7 \\
g_{(0,5)} &= x_5^2 - 2 x_4x_6 + 2 x_3x_7 - 2x_2x_8 + 2 x_1x_9 \\
& \vdots \\
g_{(0, m)} &= x_m^2 - 2 x_{m-1}x_{m+1} + 2 x_{m-2}x_{m+2} - \cdots + (-1)^{m-1} 2 x_1x_{2m-1} \end{align*}
\subsubsection{$\ell = 3$}
We have $$ \mathcal{J}^{(3)}(t) = t^3 + t^5 + t^6 + t^7 + t^8 + 2t^9 + t^{10} + 2t^{11} + 2t^{12} + 2t^{13} + 2t^{14} + 3t^{15} + \cdots $$
\begin{align*} g_{(0,0,1)} &= x_1^3 \\
g_{(0,1,1)} &= x_1(x_2^2 - 2x_1x_3) \\
g_{(0,0,2)} &= x_2^3 - 3 x_1x_2x_3 + 3 x_1^2x_4 \\
g_{(0,2,1)} &= x_1(x_3^2 - 2x_2x_4 + 2x_1x_5) \\
g_{(0,1,2)} &= x_2x_3^2 - 2 x_2^2x_4 - x_1x_3x_4 + 5 x_1x_2x_5 - 5 x_1^2x_6 \\
g_{(0,0,3)} &= x_3^3 - 3 x_2x_3x_4 + 3 x_2^2x_5 + 3 x_1x_4^2 - 3 x_1x_3x_5 - 3 x_1x_2x_6 + 3 x_1^2x_7 \\
g_{(0,3,1)} &= x_1(x_4^2 - 2 x_3x_5 + 2 x_2x_6 - 2 x_1x_7) \\
g_{(0,2,2)} &= x_2x_4^2 - 2 x_2x_3x_5 + 2 x_2^2x_6 - x_1x_4x_5 + 3 x_1x_3x_6 - 7 x_1x_2x_7 + 7 x_1^2x_8 \\
g_{(0,1,3)} &= x_3x_4^2 - 2 x_3^2x_5 - x_2x_4x_5 + 5 x_2x_3x_6 - 5 x_2^2x_7 \\ &\quad + 4 x_1x_5^2 - 7 x_1x_4x_6 + 2 x_1x_3x_7 + 8 x_1x_2x_8 - 8 x_1^2x_9 \\
g_{(0,4,1)} &= x_1(x_5^2 - 2 x_4x_6 + 2 x_3x_7 - 2 x_2x_8 + 2 x_1x_9) \\
& \\
g_{(0,3,2)} &= x_2x_5^2 - 2 x_2x_4x_6 + 2 x_2x_3x_7 - 2 x_2^2x_8 \\ &\quad - x_1x_5x_6 + 3 x_1x_4x_7 - 5 x_1x_3x_8 + 9 x_1x_2x_9 -9 x_1^2 x_{10} \\
g_{(0,0,4)} &= x_4^3 - 3 x_3x_4x_5 + 3 x_2x_5^2 + 3 x_3^2x_6 - 3 x_2x_4x_6 - 3 x_2x_3x_7 + 3 x_2^2x_8 \\ &\quad - 3 x_1x_5x_6 + 6 x_1x_4x_7 - 3 x_1x_3x_8 - 3 x_1x_2x_9 + 3 x_1^2x_{10} \\
& \\
g_{(0,2,3)} &= x_3 x_5^2 - 2 x_3 x_4 x_6 + 2 x_3^2 x_7 - x_2 x_5 x_6 + 3 x_2 x_4 x_7 - 7 x_2 x_3 x_8 + 7 x_2^2 x_9 \\ &\quad + 5 x_1x_6^2 - 9 x_1x_5x_7 + 6 x_1x_4x_8 + x_1x_3x_9 - 15 x_1x_2x_{10} + 15 x_1^2x_{11} \\
g_{(0,5,1)} &= x_1(x_6^2 - 2 x_5 x_7 + 2 x_4 x_8 - 2 x_3 x_9 + 2 x_2x_{10} - 2 x_1x_{11}) \\
& \\
g_{(0,4,2)} &= x_2x_6^2 - 2 x_2x_5x_7 + 2 x_2x_4x_8 - 2 x_2x_3x_9 + 2 x_2^2x_{10} \\ &\quad - x_1 x_6 x_7 + 3 x_1 x_5 x_8 - 5 x_1 x_4 x_9 + 7 x_1 x_3 x_{10} - 11 x_1 x_2 x_{11} + 11 x_1^2 x_{12} \\
g_{(0,1,4)} &= x_4x_5^2 - 2 x_4^2x_6 - x_3x_5x_6 + 5 x_3x_4x_7 - 5 x_3^2x_8 \\ &\quad + 4 x_2x_6^2 - 7 x_2x_5x_7 + 2 x_2x_4x_8 + 8 x_2x_3x_9 - 8 x_2^2x_{10} \\ & \quad - 4 x_1x_6x_7 + 11 x_1x_5x_8 - 13 x_1x_4x_9 + 5 x_1x_3x_{10} + 11 x_1x_2x_{11} -11 x_1^2x_{12} \\
& \\
g_{(0,0,5)} &= x_5^3 - 3 x_4 x_5 x_6 + 3 x_4^2 x_7 + 3 x_3 x_6^2 - 3 x_3 x_5 x_7 - 3 x_3 x_4 x_8 + 3 x_3^2 x_9 \\ &\quad - 3 x_2 x_6 x_7 + 6 x_2 x_5 x_8 - 3 x_2 x_4 x_9 - 3 x_2 x_3 x_{10} + 3 x_2^2 x_{11} + 3 x_1 x_7^2 \\ &\quad - 3 x_1 x_6 x_8 - 3 x_1 x_5 x_9 + 6 x_1 x_4 x_{10} - 3 x_1 x_3 x_{11} - 3 x_1 x_2 x_{12} + 3 x_1^2 x_{13} \\
g_{(0,3,3)} &= x_3 x_6^2 - 2 x_3 x_5 x_7 + 2 x_3 x_4 x_8 - 2 x_3^2 x_9 - x_2 x_6 x_7 \\ &\quad + 3 x_2 x_5 x_8 - 5 x_2 x_4 x_9 + 9 x_2 x_3 x_{10} - 9 x_2^2 x_{11} + 6 x_1 x_7^2 - 11 x_1 x_6 x_8 \\ &\quad + 8 x_1 x_5 x_9 - 3 x_1 x_4 x_{10} - 6 x_1 x_3 x_{11} + 24 x_1 x_2 x_{12} -24 x_1^2 x_{13} \\
g_{(0,6,1)} &= x_1 (x_7^2 - 2 x_6 x_8 + 2 x_5 x_9 - 2 x_4 x_{10} + 2 x_3 x_{11} - 2 x_2 x_{12} + 2 x_1 x_{13}) \end{align*}
\subsubsection{$\ell = 4$}
We have $$ \mathcal{J}^{(4)}(t) = t^4 + t^6 + t^7 + 2t^8 + t^9 + 3t^{10} + 2t^{11} + 4t^{12} + 3 t^{13} + 5 t^{14} + 4 t^{15} + \cdots $$
\begin{align*} g_{(0,0,0,1)} &= x_1^4 \\
g_{(0,1,0,1)} &= x_1^2(x_2^2 - 2 x_1x_3) \\
g_{(0,0,1,1)} &= x_1(x_2^3 - 3 x_1x_2x_3 + 3 x_1^2x_4) \\
g_{(0,2,0,1)} &= x_1^2(x_3^2 - 2 x_2x_4 + 2 x_1x_5) \\
g_{(0,0,0,2)} &= x_2^4 - 4 x_1x_2^2x_3 + 2x_1^2x_3^2 + 4x_1^2x_2x_4 - 4x_1^3x_5 \\
g_{(0,1,1,1)} &= x_1(x_2x_3^2 - 2 x_2^2x_4 - x_1x_3x_4 + 5 x_1x_2x_5 - 5 x_1^2x_6) \\
& \\
g_{(0,0,2,1)} &= x_1(x_3^3 - 3 x_2 x_3 x_4 + 3 x_2^2 x_5+ 3 x_1 x_4^2 - 3 x_1 x_3 x_5 - 3 x_1 x_2 x_6 + 3 x_1^2 x_7) \\ g_{(0,1,0,2)} &= x_2^2 x_3^2 - 2 x_2^3 x_4 - 2 x_1 x_3^3 + 4 x_1 x_2 x_3 x_4 + 2 x_1 x_2^2 x_5 \\ &\quad - 3 x_1^2 x_4^2 + 2 x_1^2 x_3 x_5 - 6 x_1^2 x_2 x_6 + 6 x_1^3 x_7 \\
g_{(0,3,0,1)} &= x_1^2(x_4^2 - 2 x_3 x_5 + 2 x_2 x_6 - 2 x_1 x_7) \\
& \\
g_{(0,0,1,2)} &= x_2 x_3^3 - 3 x_2^2 x_3 x_4 + 3 x_2^3 x_5 - x_1 x_3^2 x_4 + 5 x_1 x_2 x_4^2 - 2 x_1 x_2 x_3 x_5 - 7 x_1 x_2^2 x_6 \\ &\quad - 5 x_1^2 x_4 x_5 + 7 x_1^2 x_3 x_6 + 7 x_1^2 x_2 x_7 - 7 x_1^3 x_8 \\
g_{(0,2,1,1)} &= x_1(x_2 x_4^2 - 2 x_2 x_3 x_5 + 2 x_2^2 x_6 - x_1 x_4 x_5 + 3 x_1 x_3 x_6 - 7 x_1 x_2 x_7 + 7 x_1^2 x_8) \\
& \\
g_{(0,0,0,3)} &= x_3^4 - 4 x_2x_3^2x_4 + 2 x_2^2x_4^2 + 4 x_2^2x_3x_5 - 4 x_2^3x_6 \\ &\quad + 4 x_1 x_3 x_4^2 - 4 x_1 x_3^2 x_5 - 8 x_1 x_2 x_4 x_5 + 8 x_1 x_2 x_3 x_6 + 4 x_1 x_2^2 x_7 \\ &\quad + 6 x_1^2 x_5^2 - 4 x_1^2 x_4 x_6 - 4 x_1^2 x_3 x_7 - 4 x_1^2 x_2 x_8 + 4 x_1^3 x_9 \\
g_{(0,2,0,2)} &= x_2^2 x_4^2 - 2 x_2^2 x_3 x_5 + 2 x_2^3 x_6 \\ &\quad - 2 x_1 x_3 x_4^2 + 4 x_1 x_3^2 x_5 - 4 x_1 x_2 x_3 x_6 - 2 x_1 x_2^2 x_7 \\ &\quad - 4 x_1^2 x_5^2 + 8 x_1^2 x_4 x_6 - 4 x_1^2 x_3 x_7 + 8 x_1^2 x_2 x_8 - 8 x_1^3 x_9 \\
g_{(0,1,2,1)} &= x_1 \left( x_3x_4^2 - 2 x_3^2x_5 - x_2x_4x_5 + 5 x_2x_3x_6 - 5 x_2^2x_7 \right. \\ &\quad \left. + 4 x_1x_5^2 - 7 x_1x_4x_6 + 2 x_1x_3x_7 + 8 x_1x_2x_8 - 8 x_1^2x_9 \right) \\
g_{(0,4, 0,1)} &= x_1^2(x_5^2 - 2 x_4 x_6 + 2 x_3 x_7 - 2 x_2 x_8 + 2 x_1 x_9) \end{align*}
\subsubsection{$\ell \ge 5$}
$$ \mathcal{J}^{(5)}(t) = t^5 + t^7 + t^8 + 2 t^9 + 2 t^{10} + 3 t^{11} + 3 t^{12} + 5 t^{13} + 5 t^{14} + 7 t^{15} + \cdots. $$ \begin{align*} g_{(0,0,0,0,1)} &= x_1^5 \\
g_{(0,1,0,0,1)} &= x_1^3(x_2^2 - 2 x_1x_3) \\
g_{(0,0,1,0,1)} &= x_1^2(x_2^3 - 3 x_1x_2x_3 + 3 x_1^2x_4) \\
& \\
g_{(0,0,0,1,1)} &= x_1(x_2^4 - 4 x_1x_2^2x_3 + 2 x_1^2x_3^2 + 4 x_1^2x_2x_4 - 4 x_1^3x_5) \\
g_{(0,2,0,0,1)} &= x_1^3(x_3^2 - 2 x_2x_4 + 2 x_1x_5) \\
& \\
g_{(0,0,0,0,2)} &= x_2^5 - 5 x_1 x_2^3 x_3 + 5 x_1^2 x_2 x_3^2 + 5 x_1^2 x_2^2 x_4 - 5 x_1^3 x_3 x_4 - 5 x_1^3 x_2 x_5 + 5 x_1^4 x_6 \\
g_{(0,1,1,0,1)} &= x_1^2(x_2x_3^2 - 2 x_2^2x_4 - x_1x_3x_4 + 5 x_1x_2x_5 - 5 x_1^2x_6) \\
& \\
g_{(0,1,0,1,1)} &= x_1(x_2^2 x_3^2 - 2 x_2^3 x_4 - 2 x_1 x_3^3 + 4 x_1 x_2 x_3 x_4 + 2 x_1 x_2^2 x_5 \\ &\quad - 3 x_1^2 x_4^2 + 2 x_1^2 x_3 x_5 - 6 x_1^2 x_2 x_6 + 6 x_1^3 x_7) \\
g_{(0,0,2,0,1)} &= x_1^2(x_3^3 - 3 x_2 x_3 x_4 + 3 x_2^2 x_5 + 3 x_1 x_4^2 - 3 x_1 x_3 x_5 - 3 x_1 x_2 x_6 + 3 x_1^2 x_7) \\
g_{(0,3,0,0,1)} &= x_1^3 (x_4^2 - 2 x_3 x_5 + 2 x_2 x_6 - 2 x_1 x_7) \end{align*}
$$ \mathcal{J}^{(6)}(t) = t^6 + t^8 + t^9 + 2 t^{10} + 2 t^{11} + 4 t^{12} + 3 t^{13} + 6 t^{14} + 6 t^{15} + \cdots. $$ \begin{align*} g_{(0,0,0,0,0,1)} &= x_1^6 \\
g_{(0,1,0,0,0,1)} &= x_1^4(x_2^2 - 2 x_1x_3) \\
g_{(0,0,1,0,0,1)} &= x_1^3(x_2^3 - 3 x_1x_2x_3 + 3 x_1^2x_4) \\
& \\
g_{(0,0,0,1,0,1)} &= x_1^2(x_2^4 - 4 x_1x_2^2x_3 + 2 x_1^2x_3^2 + 4 x_1^2x_2x_4 - 4 x_1^3x_5) \\
g_{(0,2,0,0,0,1)} &= x_1^4 (x_3^2 - 2 x_2 x_4 + 2 x_1 x_5) \\
& \\
g_{(0,0,0,0,1,1)} &= x_1(x_2^5 - 5 x_1x_2^3x_3 + 5 x_1^2x_2x_3^2 + 5 x_1^2x_2^2x_4 \\ &\quad - 5 x_1^3 x_3 x_4 - 5 x_1^3 x_2 x_5 + 5 x_1^4 x_6) \\
g_{(0,1,1,0,0,1)} &= x_1^3(x_2 x_3^2 - 2 x_2^2x_4 - x_1x_3x_4 + 5 x_1x_2x_5 - 5 x_1^2x_6) \end{align*}
$$ \mathcal{J}^{(7)}(t) = t^7 + t^9 + t^{10} + 2 t^{11} + 2 t^{12} + 4 t^{13} + 4 t^{14} + 6 t^{15} + \cdots $$
\begin{align*} g_{(0,0,0,0,0,0,1)} &= x_1^7 \\
g_{(0,1,0,0,0,0,1)} &= x_1^5(x_2^2 - 2 x_1x_3) \\
g_{(0,0,1,0,0,0,1)} &= x_1^4(x_2^3 - 3 x_1x_2x_3 + 3 x_1^2x_4) \end{align*}
\end{document} |
\begin{document}
\title{Sparse Linear Identifiable Multivariate Modeling}
\author{\name Ricardo Henao \email rhenao@binf.ku.dk \\
\name Ole Winther \email owi@imm.dtu.dk \\
\addr DTU Informatics \\
Richard Petersens Plads, Building 321 \\
Technical University of Denmark \\
DK-2800 Lyngby, Denmark
\AND
\addr Bioinformatics Centre \\
University of Copenhagen \\
Ole Maaloes Vej 5 \\
DK-2200 Copenhagen N, Denmark}
\editor{Aapo Hyv\"{a}rinen}
\maketitle
\begin{abstract}
In this paper we consider sparse and identifiable linear latent variable (factor) and linear Bayesian network models for parsimonious analysis of multivariate data. We propose a computationally efficient method for joint parameter and model inference, and model comparison. It consists of a fully Bayesian hierarchy for sparse models using slab and spike priors (two-component $\delta$-function and continuous mixtures), non-Gaussian latent factors and a stochastic search over the ordering of the variables. The framework, which we call SLIM (Sparse Linear Identifiable Multivariate modeling), is validated and bench-marked on artificial and real biological data sets. SLIM is closest in spirit to LiNGAM \citep{shimizu06}, but differs substantially in inference, Bayesian network structure learning and model comparison. Experimentally, SLIM performs equally well or better than LiNGAM with comparable computational complexity. We attribute this mainly to the stochastic search strategy used, and to parsimony (sparsity and identifiability), which is an explicit part of the model. We propose two extensions to the basic i.i.d.\ linear framework: non-linear dependence on observed variables, called SNIM (Sparse Non-linear Identifiable Multivariate modeling) and allowing for correlations between latent variables, called CSLIM (Correlated SLIM), for the temporal and/or spatial data. The source code and scripts are available from \url{http://cogsys.imm.dtu.dk/slim/}.
\end{abstract}
\begin{keywords}
Parsimony, sparsity, identifiability, factor models, linear Bayesian networks
\end{keywords}
\section{Introduction}
Modeling and interpretation of multivariate data are central themes in machine learning. Linear latent variable models (or factor analysis) and linear directed acyclic graphs (DAGs) are prominent examples of models for continuous multivariate data. In factor analysis, data is modeled as a linear combination of independently distributed factors thus allowing for capture of a rich underlying co-variation structure. In the DAG model, each variable is expressed as regression on a subset of the remaining variables with the constraint that total connectivity is acyclic in order to have a properly defined joint distribution. Parsimonious (interpretable) modeling, using sparse factor loading matrix or restricting the number of parents of a node in a DAG, are good prior assumptions in many applications. Recently, there has been a great deal of interest in detailed modeling of sparsity in factor models, for example in the context of gene expression data analysis \citep{west03,lucas06,knowles07,thibaux07,carvalho08,rai08}. Sparsity arises for example in gene regulation because the latent factors represent driving signals for gene regulatory sub-networks and/or transcription factors, each of which only includes/affects a limited number of genes. A parsimonious DAG is particularly attractable from an interpretation point of view but the restriction to only having observed variables in the model may be a limitation because one rarely measures all relevant variables. Furthermore, linear relationships might be unrealistic for example in gene regulation, where it is generally accepted that one cannot replace the driving signal (related to concentration of a transcription factor protein in the cell nucleus) with the measured concentration of corresponding mRNA. Bayesian networks represent a very general class of models, encompassing both observed and latent variables. In many situations it will thus be relevant to learn parsimonious Bayesian networks with both latent variables and a non-linear DAG parts. Although attractive, by being closer to what one may expect in practice, such modeling is complicated by difficult inference (\citet{chickering96} showed that DAG structure learning is NP-hard) and by potential non-identifiability. Identifiability means that each setting of the parameters defines a unique distribution of the data. Clearly, if the model is not identifiable in the DAG and latent parameters, this severely limits the interpretability of the learned model.
\citet{shimizu06} provided the important insight that every DAG has a factor model representation, i.e.\ the connectivity matrix of a DAG gives rise to a triangular mixing matrix in the factor model. This provided the motivation for the Linear Non-Gaussian Acyclic Model (LiNGAM) algorithm which solves the identifiable factor model using Independent Component Analysis \citep[ICA,][]{hyvarinen01} followed by iterative permutation of the solutions towards triangular, aiming to find a suitable ordering for the variables. As final step, the resulting DAG is pruned based on different statistics, e.g.\ Wald, Bonferroni, $\chi^2$ second order model fit tests. Model selection is then performed using some pre-chosen significance level, thus LiNGAM select from models with different sparsity levels and a fixed deterministically found ordering. There is a possible number of extensions to their basic model, for instance \citet{hoyer08a} extend it to allow for latent variables, for which they use a probabilistic version of ICA to obtain the variable ordering, pruning to make the model sparse and bootstrapping for model selection. Although the model seems to work well in practice, as commented by the authors, it is restricted to very small problems (3 or 4 observed and 1 latent variables). Non-linear DAGs are also a possibility, however finding variable orderings in this case is known to be far more difficult than in the linear case. These methods inspired by \citet{friedman00}, mainly consist of two steps: performing non-linear regression for a set of possible orderings, and then testing for independence to prune the model, see for instance \citet{hoyer08} and \citet{zhang09a}. For tasks where exhaustive order enumeration is not feasible, greedy approaches like DAG-search \citep[see \quotes{ideal parent} algorithm,][]{elidan07} or PC \citep[Prototypical Constraint, see kernel PC,][]{tillman09} can be used as computationally affordable alternatives.
Factor models have been successfully employed as exploratory tools in many multivariate analysis applications. However, interpretability using sparsity is usually not part of the model, but achieved through post-processing. Examples of this include, bootstrapping, rotating the solutions to maximize sparsity (varimax, procrustes), pruning or thresholding. Another possibility is to impose sparsity in the model through $L_1$ regularization to obtain a maximum a-posteriori estimate \citep{jolliffe03,zou06}. In fully Bayesian sparse factor modeling, two approaches have been proposed: parametric models with bimodal sparsity promoting priors \citep{west03,lucas06,carvalho08,henao09}, and non-parametric models where the number of factors is potentially infinite \citep{knowles07,thibaux07,rai08}. It turns out that most of the parametric sparse factor models can be seen as finite versions of their non-parametric counterparts, for instance \citet{west03} and \citet{knowles07}. The model proposed by \citet{west03} is, as far as the authors know, the first attempt to encode sparsity in a factor model explicitly in the form of a prior. The remaining models improve the initial setting by dealing with the optimal number of factors in \citet{knowles07}, improved hierarchical specification of the sparsity prior in \citet{lucas06,carvalho08,thibaux07}, hierarchical structure for the loading matrices in \citet{rai08} and identifiability without restricting the model in \citet{henao09}.
Many algorithms have been proposed to deal with the NP-hard DAG structure learning task. LiNGAM, discussed above, is the first fully identifiable approach for continuous data. All other approaches for continuous data use linearity and (at least implicitly) Gaussianity assumptions so that the model structure learned is only defined up to equivalence classes. Thus in most cases the directionality information about the edges in the graph must be discarded. Linear Gaussian-based models have the added advantage that they are computationally affordable for the many variables case. The structure learning approaches can be roughly divided into stochastic search and score \citep{cooper92,heckerman00,friedman03}, constraint-based (with conditional independence tests) \citep{spirtes01} and two stage; like LiNGAM, \citep{tsamardinos06,friedman99,teyssier05,schmidt07,shimizu06}. In the following, we discuss in more detail previous work in the last category, as it is closest to the work in this paper and can be considered representative of the state-of-the-art. The Max-Min Hill-Climbing algorithm \citep[MMHC,][]{tsamardinos06} first learns the skeleton using conditional independence tests similar to PC algorithms \citep{spirtes01} and then the order of the variables is found using a Bayesian-scoring hill-climbing search. The Sparse Candidate (SC) algorithm \citep{friedman99} is in the same spirit but restricts the skeleton to within a predetermined link candidate set of bounded size for each variable. The Order Search algorithm \citep{teyssier05} uses hill-climbing first to find the ordering, and then looks for the skeleton with SC. $L_1$ regularized Markov Blanket \citep{schmidt07} replaces the skeleton learning from MMHC with a dependency network \citep{heckerman00} written as a set of local conditional distributions represented as regularized linear regressors. Since the source of identifiability in Gaussian DAG models is the direction of the edges in the graph, a still meaningful approach consists of entirely focusing on inferring the skeleton of the graph by keeping the edges undirected as in \citet{dempster72,dawid93,giudici99,rajaratnam08}.
\begin{figure}\label{fg:slimfig}
\end{figure}
In this paper we propose a framework called SLIM (Sparse Linear Identifiable Multivariate modeling, see Figure \ref{fg:slimfig}) in which we learn models from a rather general class of Bayesian networks and perform quantitative model comparison between them\footnote{A preliminary version of our approach appears in NIPS 2009: Henao and Winther, Bayesian sparse factor models and DAGs inference and comparison.}. Model comparison may be used for model selection or serve as a hypothesis-generating tool. We use the likelihood on a test set as a computationally simple quantitative proxy for model comparison and as an alternative to the marginal likelihood. The other two key ingredients in the framework are the use of sparse and identifiable model components \citep[][respectively]{carvalho08,kagan73} and the stochastic search for the correct order of the variables needed by the DAG representation. Like LiNGAM, SLIM exploits the close relationship between factor models and DAGs. However, since we are interested in the factor model by itself, we will not constrain the factor loading matrix to have triangular form, but allow for sparse solutions so pruning is not needed. Rather we may ask whether there exists a permutation of the factor-loading matrix agreeing to the DAG assumption (in a probabilistic sense). The slab and spike prior biases towards sparsity so it makes sense to search for a permutation in parallel with factor model inference. We propose to use stochastic updates for the permutation using a Metropolis-Hastings acceptance ratio based on likelihoods with the factor-loading matrix being masked. In practice this approach gives good solutions up to at least fifty dimensions. Given a set of possible variable orderings inferred by this method, we can then learn DAGs using slab and spike priors for their connectivity matrices. The so-called slab and spike prior is a two-component mixture of a continuous distribution and degenerate $\delta$-function point mass at zero. This type of model implicitly defines a prior over structures and is thus a computationally attractive alternative to combinatorial structure search since parameter and structure inference are performed simultaneously. A key to effective learning in these intractable models is Markov Chain Monte Carlo (MCMC) sampling schemes that mix well. For non-Gaussian heavy-tailed distributions like the Laplace and $t$-distributions, Gibbs sampling can be efficiently defined using appropriate infinite scale mixture representations of these distributions \citep{andrews74}. We also show that our model is very flexible in the sense that it can be easily extended by only changing the prior distribution of a set of latent variables, for instance to allow for time series data (CSLIM, Correlated SLIM) and non-linearities in the DAG structure (SNIM, Sparse non-Linear Identifiable Multivariate modeling) through Gaussian process priors.
The rest of the paper is organized as follows: Section \ref{sc:lin} describes the model and its identifiability properties. Section \ref{sc:noi} provides all prior specification including sparsity, latent variables and driving signals, order search and extensions for correlated data (CSLIM) and non-linearities (SNIM). Section \ref{sc:ms} elaborates on model comparison. Section \ref{sc:inf} and Appendix \ref{ap:inf} provide an overview of the model and practical details on the MCMC-based inference, proposed workflow and computational cost requirements. Section \ref{sc:res} contains the experiments. We show simulations based on artificial data to illustrate all the features of the model proposed. Real biological data experiments illustrate the advantages of considering different variants of Bayesian networks. For all data sets we compare with some of the most relevant existing methods. Section \ref{sc:dis} concludes with a discussion, open questions and future directions.
\section{Linear Bayesian networks} \label{sc:lin}
A Bayesian network is essentially a joint probability distribution defined via a directed acyclic graph, where each node in the graph represents a random variable $x$. Due to the acyclic property of the graph, its node set ${x_1,\ldots,x_d}$ can be partitioned into $d$ subsets $\{V_1,V_2,\ldots,V_d\}\equiv{\cal V}$, such that if $x_j\rightarrow x_i$ then $x_j\in V_i$, i.e.\ $V_i$ contains all \emph{parents} of $x_i$. We can then write the joint distribution as a product of conditionals of the form
\begin{align*}
P(x_1,\ldots,x_d)=\prod_{i=1}^d P(x_i|V_i) \ , \end{align*}
thus $x_i$ is conditionally independent of $\{x_j|x_i\notin V_j\}$ given $V_i$ for $i\neq j$. This means that $p(x_1,\ldots,x_d)$ can be used to describe the joint probability of any set of variables once ${\cal V}$ is given. The problem is that ${\cal V}$ is usually unknown and thus needs to be (at least partially) inferred from observed data.
We consider a model for a fairly general class of linear Bayesian networks by putting together a linear DAG, ${\bf x}={\bf B}{\bf x}+{\bf z}$, and a factor model, ${\bf x}={\bf C}{\bf z}+\bm{\epsilon}$. Our goal is to explain each one of $d$ observed variables ${\bf x}$ as a linear combination of the remaining ones, a set of $d+m$ independent latent variables ${\bf z}$ and additive noise $\bm{\epsilon}$. We have then
\begin{align} \label{eq:PBxCz}
{\bf x}=({\bf R}\odot{\bf B}){\bf x} + ({\bf Q}\odot{\bf C}){\bf z} + \bm{\epsilon} \ , \end{align}
where $\odot$ is the element-wise product and we can further identify the following elements:
\begin{list}{\labelitemi}{\leftmargin=1em}
\item ${\bf z}$ is partitioned into two subsets, ${\bf z}_D$ is a set of $d$ driving signals for each observed variable in ${\bf x}$ and ${\bf z}_L$ is a set of $m$ shared general purpose latent variables. ${\bf z}_D$ is used here to describe the intrinsic behavior of the observed variables that cannot regarded as \quotes{external} noise.
\item ${\bf R}$ is a $d\times d$ binary connectivity matrix that encodes whether there is an edge between observed variables, by means of $r_{ij}=1$ if $x_i\to x_j$. Since every non-zero element in ${\bf R}$ is an edge of a DAG, $r_{ii}=0$ and $r_{ij}=0$ if $r_{ji}\neq0$ to avoid self-interactions and bi-directional edges, respectively. This also implies that there is at least one permutation matrix ${\bf P}$ such that ${\bf P}^\top{\bf R}{\bf P}$ is strictly lower triangular where we have used that ${\bf P}$ is orthonormal then ${\bf P}^{-1} = {\bf P}^\top$.
\item ${\bf Q}=[ {\bf Q}_D \ {\bf Q}_L ]$ is a $d\times (d+m)$ binary connectivity matrix, this time for the conditional independence relations between observed and latent variables. We assume that each observed variable has a dedicated latent variable, thus the first $d$ columns of ${\bf Q}_D$ are the identity. The remaining $m$ columns can be arbitrarily specified, by means of $q_{ij}\neq0$ if there is an edge between $x_i$ and $z_j$ for $d<j\leq m$.
\item ${\bf B}$ and ${\bf C}=[{\bf C}_L \ {\bf C}_D]$ are respectively, $d\times d$ and $d\times(d+m)$ weight matrices containing the edge strengths for the Bayesian network. Their elements are constrained to be non-zero only if their corresponding connectivities are also non-zero. \end{list}
The model \eqref{eq:PBxCz} has two important special cases, (i) if all elements in ${\bf R}$ and ${\bf Q}_D$ are zero it becomes a standard factor model (FM) and (ii) if $m=0$ or all elements in ${\bf Q}_L$ are zero it is a pure DAG. The model is not a completely general linear Bayesian network because connections to latent variables are absent \citep[see for example][]{silva10}. However, this restriction is mainly introduced to avoid compromising the identifiability of the model. In the following we will only write ${\bf Q}$ and ${\bf R}$ explicitly when we specify the sparsity modeling.
\subsection{Identifiability} \label{sc:idf}
We will split the identifiability of the model in equation \eqref{eq:PBxCz} in three parts addressing first the factor model, second the pure DAG and finally the full model. By identifiability we mean that each different setting of the parameters ${\bf B}$ and ${\bf C}$ gives a unique distribution of the data. In some cases the model is only unique up to some symmetry of the model. We discuss these symmetries and their effect on model interpretation in the following.
Identifiability in factor models ${\bf x}={\bf C}_L{\bf z}_L+\bm{\epsilon}$ can be obtained in a number of ways \citep[see Chapter 10,][]{kagan73}. Probably the easiest way is to assume sparsity in ${\bf C}_L$ and restrict its number of free parameters, for example by restricting the dimensionality of ${\bf z}$, namely $m$, according to the Ledermann bound $m\leq(2d+1-(8d+1)^{1/2})/2$ \citep{bekker97}. The Ledermann bound guarantees the identification of $\bm{\epsilon}$ and follows just from counting the number of free parameters in the covariance matrices of ${\bf x}$, $\bm{\epsilon}$ and in ${\bf C}_L$, assuming Gaussianity of ${\bf z}$ and $\bm{\epsilon}$. Alternatively, identifiability is achieved using non-Gaussian distributions for ${\bf z}$. \citet[Theorem 10.4.1,][]{kagan73} states that when at least $m-1$ latent variables are non-Gaussian, ${\bf C}_L$ is identifiable up to scale and permutation of its columns, i.e.\ we can identify $\widehat{{\bf C}}_L = {\bf C}_L {\bf S}_\mathrm{f} {\bf P}_\mathrm{f}$, where ${\bf S}_\mathrm{f}$ and ${\bf P}_\mathrm{f}$ are arbitrary scaling and permutation matrices, respectively. \citet{comon94} provided an alternative well-known proof for the particular case of $m-1=d$. The ${\bf S}_\mathrm{f}$ and ${\bf P}_\mathrm{f}$ symmetries are inherent in the factor model definition in all cases and will usually not affect interpretability. However, some researchers prefer to make the model completely identifiable, e.g.\ by making ${\bf C}_L$ triangular with non-negative diagonal elements \citep{lopes04}. In addition, if all components of $\bm{\epsilon}$ are Gaussian and the rank of ${\bf C}_L$ is $m$, then the distributions of ${\bf z}$ and $\bm{\epsilon}$ are uniquely defined to within common shift in mean \citep[Theorem 10.4.3,][]{kagan73}. In this paper, we use the non-Gaussian ${\bf z}$ option for two reasons, (i) restricting the number of latent variables severely limits the usability of the model and (ii) non-Gaussianity is a more realistic assumption in many application areas such as for example biology.
For pure DAG models ${\bf x} = {\bf B} {\bf x} + {\bf C}_D {\bf z}_D$, identifiability can be obtained using the factor model result from \citet{kagan73} by rewriting the DAG into an equivalent factor model ${\bf x} = {\bf D} {\bf z}$ with ${\bf D}=({\bf I} - {\bf B})^{-1} {\bf C}_D$, see Figure \ref{fg:DAGtoFA}. From the factor model result it only follows that ${\bf D}$ is identifiable up to a scaling and permutation. However, as mentioned above, due to the acyclicity there is at least one permutation matrix ${\bf P}$ such that ${\bf P}^\top{\bf B}{\bf P}$ is strictly lower triangular. Now, if ${\bf x}$ admits DAG representation, the same ${\bf P}$ makes the permuted $\widehat{{\bf D}}=({\bf I} - {\bf P}^\top {\bf B} {\bf P})^{-1} {\bf C}_D$, triangular with ${\bf C}_D$ on its diagonal. The constraint on the number of non-zero elements in ${\bf D}$ due to triangularity removes the permutation freedom ${\bf P}_\mathrm{f}$ such that we can subsequently identify ${\bf P}$, ${\bf B}$ and ${\bf C}_D$. It also implies that any valid permutation ${\bf P}$ will produce exactly the same distribution for ${\bf x}$.
\begin{figure}
\caption{FM-DAG equivalence illustration. In the left side, a DAG model with four variables with corresponding connectivity matrix ${\bf R}$, $b_{ij}=1$ when $r_{ij}=1$ and ${\bf C}_D={\bf I}$. In the right hand side, the equivalent factor model with mixing matrix ${\bf D}$. Note that the factor model is sparse even if its corresponding DAG is dense. The gray boxes in ${\bf D}$ and ${\bf R}\odot{\bf B}$ represent elements that must be zero by construction.}
\label{fg:DAGtoFA}
\end{figure}
In the general case in equation \eqref{eq:PBxCz}, ${\bf D}=({\bf I}-{\bf B})^{-1}{\bf C}$ is of size $d\times(d+m)$. What we will show is that even if ${\bf D}$ is still identifiable, we can no longer obtain ${\bf B}$ and ${\bf C}$ uniquely unless we \quotes{tag} the model by requiring the distributions of driving signals ${\bf z}_D$ and latent signals ${\bf z}_L$ to differ. In order to illustrate why we get non-identifiability, we can write ${\bf x}={\bf D}{\bf z}$ inverting ${\bf D}$ explicitly. For simplicity we consider $m=1$ and ${\bf P}={\bf I}$ but generalizing to $m>1$ is straight forward
{\small \begin{align*}
\left[\begin{array}{c}
x_1 \\ x_2 \\ x_3 \\ \vdots \\ x_d
\end{array}\right] =
\left[\begin{array}{ccccc}
c_{11} & 0 & 0 & \cdots & c_{1L} \\
b_{21}c_{11} & c_{22} & 0 & \cdots & b_{21}c_{1L} + c_{2L} \\
b_{31}c_{11} + b_{32}b_{21}c_{11} & b_{32}c_{22} & c_{33} & \cdots & b_{31}c_{1L} + b_{32}b_{21}c_{1L} + a_{32}c_{2L} + c_{3L} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
c_{11} + \sum_{k=1}^{i-1}b_{ik}d_{k1} & \cdots & \cdots & \cdots & c_{iL} + \sum_{k=1}^{i-1}b_{ik}d_{kL}
\end{array}\right]
\left[\begin{array}{c}
z_1 \\ z_2 \\ z_3 \\ \vdots \\ z_{d+1}
\end{array}\right] . \end{align*} }
We see from this equation that if all latent variables have the same distribution and $c_{1L}$ is non-zero then we may exchange the first and last column in ${\bf D}$ to get two equivalent distributions with different elements for ${\bf B}$ and ${\bf C}$. The model is thus non-identifiable. If the first $i$ elements in latent column of ${\bf C}$ are zero then the $(i+1)$-th and last column can be exchanged. \citet{hoyer08a} made the same basic observation through a number of examples. Interestingly, we also see from the triangularity requirement of the \quotes{driving signal} part of ${\bf D}$ that ${\bf P}$ is actually identifiable despite the fact that ${\bf B}$ and ${\bf C}$ are not. To illustrate that the non-identifiability may lead to quite severe confusion about inferences, consider a model with only two observed variables ${\bf x}=[x_1,x_2]^\top$ and $c_{11}=c_{22}=1$. Two different hypothesis $\{b_{21},c_{1L},c_{2L}\}=\{0,1,1\}$ and $\{b_{21},c_{1L},c_{2L}\}=\{1,1,-1\}$ with graphs shown in Figure \ref{fg:toyDAGL} have equivalent factor models written as
\begin{figure}
\caption{Two DAGs with latent variables. They are equivalent if ${\bf z}$ has the same distribution as ${\bf z}'$.}
\label{fg:toyDAGL1}
\label{fg:toyDAGL2}
\label{fg:toyDAGL}
\end{figure}
\begin{align*}
\left[\begin{array}{c} x_1 \\ x_2 \end{array}\right] = \left[\begin{array}{ccc} 1 & 0 & 1 \\ 0 & 1 & 1 \end{array}\right]\left[\begin{array}{c} z_1 \\ z_2 \\ z_L \end{array}\right] \ \mathrm{and} \ \left[\begin{array}{c} x_1 \\ x_2 \end{array}\right] = \left[\begin{array}{ccc} 1 & 0 & 1 \\ 1 & 1 & 0 \end{array}\right]\left[\begin{array}{c} z'_1 \\ z'_2 \\ z'_L \end{array}\right] \ . \end{align*}
The two models above have the same mixing matrix ${\bf D}$, up to permutation of columns ${\bf P}_\mathrm{f}$. In general we expect the number of solutions with equivalent distribution may be as large as $2^m$, corresponding to the number of times a column of ${\bf D}$ from its latent part (last $m$ columns) con be exchanged with a column from its observed part (first $d$ columns). This readily assumes that the sparsity pattern in ${\bf D}$ is identified, which follows from the results of \citet{kagan73}.
One way to get identifiability is to change the distributions ${\bf z}_D$ and ${\bf z}_L$ such that they differ and cannot be exchanged. Here it is not enough to change the scale of the variables, i.e.\ variance for continuous variables, because this effect can be countered by rescaling ${\bf C}$ with ${\bf S}_{\mathrm{f}}$. So we need distributions that differ beyond rescaling. In our examples we use Laplace and the more heavy-tailed Cauchy for ${\bf z}_D$ and ${\bf z}_L$, respectively. This specification is not unproblematic in practical situations however it can be sometimes restrictive and prone to model mismatch issues. We nevertheless show one practical example which leads to sensible inferences.
In time series applications for example, it is natural to go beyond an i.i.d.\ model for ${\bf z}$. One may for example use a Gaussian process prior for each factor to get smoothness over time, i.e.\ $z_{j1},\ldots,z_{jN}|\nu_j \sim \mathcal{N}(0,{\bf K}_{\nu_j})$, where ${\bf K}_{\nu_j}$ is the covariance matrix with elements $k_{j,nn'}=k_{\upsilon_j,n}(n,n')$ and $k_{\upsilon_j,n}(\cdot)$ is the covariance function. For the i.i.d.\ Gaussian model the source distribution is only identifiable up to an arbitrary rotation matrix ${\bf U}$, i.e.\ the rotated factors ${\bf U}{\bf z}$ are still i.i.d.\ . We can show that contrary to the i.i.d.\ Gaussian model, the Gaussian process factor model is identifiable if the covariance functions differ. We need to show that $\widehat{{\bf Z}}= {\bf U} {\bf Z}$ has a different covariance structure than ${\bf Z}=[{\bf z}_1 \ \ldots \ {\bf z}_N]$. We get $\overline{{\bf z}_n {\bf z}_{n'}^\top}= {\rm diag}(k_{1,nn'},\ldots,k_{d+m,nn'})$ and $\overline{\widehat{{\bf z}}_n\widehat{{\bf z}}_{n'}^\top } = {\bf U} \overline{{\bf z}_n {\bf z}_{n'}^\top} {\bf U}^\top = {\bf U} {\rm diag}(k_{1,nn'},\ldots,k_{d+m,nn'}) {\bf U}^\top$ for the original and rotated variables, respectively. The covariances are indeed different and the model is thus identifiable if no covariance functions $k_{\upsilon_j,n}(n,n')$, $j=1,\ldots,d+m$ are the same.
\section{Prior specification} \label{sc:noi}
In this section we provide a detailed description of the priors used for each one of the elements of our sparse linear identifiable model already defined in equation \eqref{eq:PBxCz}. We start with $\bm{\epsilon}$, the noise term that allow us to quantify the mismatch between a set of $N$ observations ${\bf X}=[{\bf x}_1 \ \ldots \ {\bf x}_N]$ and the model itself. For this purpose, we use uncorrelated Gaussian noise components $\bm{\epsilon} \sim \mathcal{N}(\bm{\epsilon}|{\bf 0},\bm{\Psi})$ with conjugate inverse gamma priors for their variances as follows
\begin{align*}
{\bf X}|{\bf m},\bm{\Psi} & \sim \prod_{n=1}^N\mathcal{N}({\bf x}_n|{\bf m},\bm{\Psi}) \ , \\
\bm{\Psi}^{-1}|s_s,s_r & \sim \prod_{i=1}^d\mathrm{Gamma}(\psi_i^{-1}|s_s,s_r) \ , \end{align*}
where we have already marginalized out $\bm{\epsilon}$, $\bm{\Psi}$ is a diagonal covariance matrix denoting uncorrelated noise across dimensions and ${\bf m}$ is the mean vector such that ${\bf m}_\mathrm{FM}={\bf C}{\bf z}_n$ and ${\bf m}_\mathrm{DAG}={\bf B}{\bf x}_n+{\bf C}{\bf z}_n$. In the noise covariance hyperprior, $s_s$ and $s_r$ are the shape and rate, respectively. The selection of hyperparameters for $\bm{\Psi}$ should not be very critical as long as both \quotes{signal and noise} hypotheses are supported, i.e.\ diffuse enough to allow for small values of $\psi_i$ as well as for $\psi_i=1$ (assuming that the data is standardized in advance). We set $s_s=20$ and $s_r=1$ in the experiments for instance. Another issue to consider when selecting $s_s$ and $s_r$ is the Bayesian analogue of the Heywood problem in which likelihood functions are bounded below away from zero as $\psi_i$ tends to zero, hence inducing multi-modality in the posterior of $\psi_i$ with one of the modes at zero. The latter can be avoided by specifying $s_s$ and $s_r$ such that the prior decays to zero at the origin, as we did above. It is well known, for example, that Heywood problems cannot be avoided using improper reference priors, $p(\psi_i)\propto1/\psi_i$ \citep{martin75}.
The remaining components of the model are described as it follows in five parts named sparsity, latent variables and driving signals, order search, allowing for correlated data and allowing for non-linearities. The first part addresses the interpretability of the model by means of parsimonious priors for ${\bf C}$ and ${\bf D}$. The second part describes the type of non-Gaussian distributions used on ${\bf z}$ in order to keep the model identifiable. The third part considers how a search over permutations of the observed variables can be used in order to handle the constraints imposed on matrix ${\bf R}$. The last two parts describe how introducing Gaussian process process priors in the model can be used to model non-independent observations and non-linear dependencies in the DAGs.
\subsection{Sparsity} \label{sc:sse}
The use of sparse models will in many cases give interpretable results and is often motivated by the principle of parsimony. Also, in many application domains it is also natural from a prediction point of view to enforce sparsity because the number of explanatory variables may exceed the number of examples by orders of magnitude. In regularized maximum likelihood type formulations of learning (maximum a-posteriori) it has become popular to use one-norm ($L_1$) regularization for example to achieve sparsity \citep{tibshirani96}. In the fully Bayesian inference setting (with averaging over variables), the corresponding Laplace prior will not lead to sparsity because it is very unlikely for a posterior summary like the mean, median or mode to be estimated as exactly zero even asymptotically. The same effect can be expected from any continuous distribution used for sparsity like Student's $t$, $\alpha$-stable and bimodal priors \citep[continuous slab and spike priors,][]{ishwaran05}. Exact zeros can only be achieved by placing a point mass at zero, i.e.\ explicitly specifying that the variable at hand is zero or not with some probability. This has motivated the introduction of many variants over the years of so-called slab and spike priors consisting of two component mixtures of a continuous part and a $\delta$-function at zero \citep{lempers71,mitchell88,george93,geweke96,west03}. In this paradigm, the columns of matrices ${\bf C}$ or ${\bf B}$ encode respectively, the connectivity of a factor or the set of parents associated to an observed variable. It is natural then to share information across elements in column $j$ by assuming a common sparsity level $1-\nu_j$, suggesting the following hierarchy
\begin{align} \label{eq:hdss1}
\begin{aligned}
c_{ij}|q_{ij},\cdot \ \sim & \ (1-q_{ij})\delta(c_{ij}) + q_{ij}{\rm Cont}(c_{ij}|\cdot) \ , \\
q_{ij}|\nu_j \ \sim & \ \mathrm{Bernoulli}(q_{ij}|\nu_j) \ , \\
\nu_j|\beta_m,\beta_p \ \sim & \ \mathrm{Beta}(\nu_j|\beta_p\beta_m,\beta_p(1-\beta_m)) \ ,
\end{aligned} \end{align}
where ${\bf Q}$, the binary matrix in equation \eqref{eq:PBxCz} appears naturally, $\delta(\cdot)$ is a Dirac $\delta$-function, ${\rm Cont}(\cdot)$ is the continuous slab component, $\mathrm{Bernoulli}(\cdot)$ and $\mathrm{Beta}(\cdot)$ are Bernoulli and beta distributions, respectively. Reparameterizing the beta distribution as $\mathrm{Beta}(\nu_j|\alpha\beta/m,\beta)$ and taking the number of columns $m$ of ${\bf Q}\odot{\bf C}$ to infinity, leads to the non-parametric version of the slab and spike model with a so-called Indian buffet process prior over the (infinite) masking matrix ${\bf Q}=\{q_{ij}\}$ \citep{ghahramani06}. Note also that $q_{ij}|\nu_j$ is mainly used for clarity to make the binary indicators explicit, nevertheless in practice we can work directly with $c_{ij}|\nu_j,\cdot \sim (1-\nu_j)\delta(c_{ij}) + \nu_j{\rm Cont}(c_{ij}|\cdot)$ because $q_{ij}$ can be marginalized out.
As illustrated and pointed out by \citet{lucas06} and \citet{carvalho08} the model with a shared beta-distributed sparsity level per factor introduces the undesirable side-effect that there is strong co-variation between the elements in each column of the masking matrix. For example, in high dimensions we might expect that only a finite number of elements are non-zero, implying a prior favoring a very high sparsity rate $1-\nu_j$. Because of the co-variation, even the parameters that are clearly non-zero will have a posterior probability of being non-zero, $p(q_{ij}=1|{\bf x},\cdot)$, quite spread over the unit interval. Conversely, if our priors do not favor sparsity strongly, then the opposite situation will arise and the solution will become completely dense. In general, it is difficult to set the hyperparameters to achieve a sensible sparsity level. Ideally, we would like to have a model with a high sparsity level with high certainty about the non-zero parameters. We can achieve this by introducing a sparsity parameter $\eta_{ij}$ for each element of ${\bf C}$ which has a mixture distribution with exactly this property
\begin{align} \label{eq:hdss2}
\begin{aligned}
q_{ij}|\eta_{ij} \ \sim & \ \mathrm{Bernoulli}(q_{ij}|\eta_{ij}) \ , \\
\eta_{ij}|\nu_j,\alpha_p,\alpha_m \ \sim & \ (1-\nu_j)\delta(\eta_{ij})+\nu_j\mathrm{Beta}(\eta_{ij}|\alpha_p\alpha_m,\alpha_p(1-\alpha_m)) \ .
\end{aligned} \end{align}
The distribution over $\eta_{ij}$ expresses that we expect parsimony: either $\eta_{ij}$ is zero exactly (implying that $q_{ij}$ and $c_{ij}$ are zero) or non-zero drawn from a beta distribution favoring high values, i.e.\ $q_{ij}$ and $c_{ij}$ are non-zero with high probability. We use $\alpha_p=10$ and $\alpha_m=0.95$ which has mean $\alpha_m=0.95$ and variance $\alpha_m(1-\alpha_m)/(1+\alpha_p)\approx0.086$. The expected sparsity rate of the modified model is $(1-\alpha_m)(1-\nu_j)$. This model has the additional advantage that the posterior distribution of $\eta_{ij}$ directly measures the distribution of $p(q_{ij}=1|{\bf x},\cdot)$. This is therefore the statistic for ranking/selection purposes. Besides, we may want to reject interactions with high uncertainty levels when the probability of $p(q_{ij}=1|{\bf x},\cdot)$ is less or very close to the expected value, $\alpha_m(1-\nu_j)$.
To complete the specification of the prior, we let the continuous slab part in equation \eqref{eq:hdss1} be Gaussian distributed with inverse gamma prior on its variance. In addition, we scale the variances with $\psi_i$ as
\begin{align} \label{eq:hdss3}
\begin{aligned}
{\rm Cont}(c_{ij}|\psi_i,\tau_{ij}) \ = & \ \mathcal{N}(c_{ij}|0,\psi_i\tau_{ij}) \ , \\
\tau_{ij}^{-1}|t_s,t_r \ \sim & \ \mathrm{Gamma}(\tau_{ij}^{-1}|t_s,t_r) \ . \\
\end{aligned} \end{align}
This scaling makes the model easier to specify and tend to have better mixing properties \citep[see][]{casella08}. The slab and spike for ${\bf B}$ (DAG) is obtained from equations \eqref{eq:hdss1}, \eqref{eq:hdss2} and \eqref{eq:hdss3} by simply replacing $c_{ij}$ with $b_{ij}$ and $q_{ij}$ with $r_{ij}$. As already mentioned, we use $\alpha_p=10$ and $\alpha_m=0.95$ for the hierarchy in equation \eqref{eq:hdss2}. For the column-shared parameter $\nu_j$ defined in equation \eqref{eq:hdss1} we set the precision to $\beta_p=100$ and consider the mean values for factor models and DAGs separately. For the factor model we set a diffuse prior by making $\beta_m=0.9$ to reflect that some of the factors can be in general nearly dense or empty. For the DAG we consider two settings, if we expect to obtain dense graphs we set $\beta_m=0.99$, otherwise we set $\beta_m=0.1$. Both settings can produce sparse graphs, however smaller values of $\beta_m$ increase the overall sparsity rate and the gap between $p(r_{ij}=0)$ and $p(r_{ij}=1)$. A large separation between these two probabilities makes interpretation easier and also helps to spot non-zeros (edges) with high uncertainty. The hyperparameters for the variance of the non-zero elements of ${\bf B}$ and ${\bf C}$ are set to get a diffuse prior distribution bounded away from zero ($t_s=2$ and $t_r=1$), to allow for a better separation between slab and spike components. For the particular case of ${\bf C}_L$, in principle the prior should not have support on zero at all, i.e.\ the driving signal should not vanish, however for simplicity we allow this anyway as it has not given any problems in practice. Figure \ref{fg:single_aeta} shows a particular example of the posterior, $p(c_{ij},\eta_{ij}|{\bf x},\cdot)$ for two elements of ${\bf C}$ under the prior just described. In the example, $c_{64}\neq0$ with high probability according to $\eta_{ij}$, whereas $c_{54}$ is almost certainly zero since most of its probability mass is located exactly at zero, with some residual mass on the vicinity of zero, in Figure \ref{fg:single_a}. In the one level hierarchy equation \eqref{eq:hdss1} sparsity parameters are shared, $\eta_{64}=\eta_{54}=\nu_4$. The result would then be less parsimonious with the posterior density of $\nu_4$ being spread in the unit interval with a single mode located close to $\beta_m$.
\begin{figure}
\caption{Slab and spike prior example. (a) Posterior unnormalized densities for the magnitude of two particular elements of ${\bf C}$. (b) Posterior density for $\eta_{ij}=p(c_{ij}\neq0|{\bf x},\cdot)$. Here, $c_{64}\neq0$ and $c_{54}=0$ correspond to elements of the mixing matrix from the experiment shown in Figure \ref{fg:singlenets}.}
\label{fg:single_a}
\label{fg:single_eta}
\label{fg:single_aeta}
\end{figure}
\subsection{Latent variables and driving signals} \label{sc:smog}
We consider two different non-Gaussian --- heavy-tailed priors for ${\bf z}$, in order to obtain identifiable factor models and DAGs. A wide class of continuous, unimodal and symmetric distributions in one dimension can be represented as infinite scale mixtures of Gaussians, which are very convenient for Gibbs-sampling-based inference. We focus on Student's $t$ and Laplace distributions which have the following mixture representation \citep{andrews74}
\begin{align}
\mathrm{Laplace}(z|\mu,\lambda) \ = & \ \int_0^\infty\mathcal{N}(z|\mu,\upsilon)\mathrm{Exponential}(\upsilon|\lambda^2)d\upsilon \ , \label{eq:sLad} \\
t(z|\mu,\theta,\sigma^2) \ = & \ \int_0^\infty\mathcal{N}(z|\mu,\upsilon\sigma^2)\mathrm{Gamma}\left(\upsilon^{-1}\left|\frac{\theta}{2},\frac{\theta}{2}\right.\right)d\upsilon \ , \label{eq:sStd} \end{align}
where $\lambda>0$ is the rate, $\sigma^2>0$ the scale, $\theta>0$ is the degrees of freedom, and the distributions have exponential and gamma mixing densities accordingly. For varying degrees of freedom $\theta$, the $t$ distribution can interpolate between very heavy-tailed (power law and Cauchy when $\theta=1$) and very light tailed, i.e.\ it becomes Gaussian when the degrees of freedom approaches infinity. The Laplace (or bi-exponential) distribution has tails which are intermediate between a $t$ (with finite degrees of freedom) and a Gaussian. In this sense, the $t$ distribution is more flexible but requires more careful selection of its hyperparameters because the model may become non-identifiable in the large $\theta$ limit (Gaussian).
An advantage of the Laplace distribution is that we can fix its parameter $\lambda=1$ and let the model learn the appropriate scaling from ${\bf C}$ in equation \eqref{eq:PBxCz}. If we use the pure DAG model, we will need to have a hyperprior for $\lambda^2$ in order to learn the variances of the latent variables/driving signals, as in \citet{henao09}. A hierarchical prior for the degrees of freedom in the $t$ distribution is not easy to specify because there is no conjugate prior available with a standard closed form. Although a conjugate prior exists, is not straightforward to sample from it, since numerical integration must be used to compute its normalization constant. Another possibility is to treat $\theta$ as a discrete variable so computing the normalizing constant becomes straight forward.
Laplace and Student's $t$ are not the only distributions admitting scale mixture representation. This mean that any other compatible type can be used as well, if the application requires it, and without considerable additional effort. Some examples include the logistic distribution \citep{andrews74}, the stable family \citep{west87} and skewed versions of heavy-tailed distributions \citep{branco01}. Another natural extension to the mixtures scheme could be, for example, to set the mean of each component to arbitrary values and let the number of components be an infinite sum, thus ending up providing each factor with a Dirichlet process prior. This might be useful for cases when the latent factors are expected to be scattered in clusters due to the presence of subgroups in the data, as was shown by \citet{carvalho08}.
\subsection{Order search}
We need to infer the order of the variables in the DAG to meet the constraints imposed on ${\bf R}$ in Section \ref{sc:lin}. The most obvious way is to try to solve this task by inferring all parameters $\{{\bf P},{\bf B},{\bf C},{\bf z},\bm{\epsilon}\}$ by a Markov chain Monte Carlo (MCMC) method such as Gibbs sampling. However, algorithms for searching over variable order prefer to work with models for which parameters other than ${\bf P}$ can be marginalized analytically \citep[see][]{friedman03,teyssier05}. For our model, where we cannot marginalize analytically over ${\bf B}$ (due to ${\bf R}$ being binary), estimating ${\bf P}$ and ${\bf B}$ by Gibbs sampling would mean that we had to propose a new ${\bf P}$ for fixed ${\bf B}$. For example, exchanging the order of two variables would mean that they also exchange parameters in the DAG. Such a proposal would have very low acceptance, mainly as a consequence of the size of the search space and thus very poor mixing. In fact, for a given $d$ number of variables there are $d!$ possible orderings ${\bf P}$, while there are $d!2^{(d(d+2m-1))/2}$ possible structures for $\{{\bf P},{\bf B},{\bf C}\}$. We therefore opt for an alternative strategy by exploiting the equivalence between factor models and DAGs shown in Section \ref{sc:idf}. In particular for $m=0$, since ${\bf B}$ can be permuted to strictly lower triangular, then ${\bf D}=({\bf I}-{\bf B})^{-1}{\bf C}_D$ can be permuted to triangular. This means that we can perform inference for the factor model to obtain ${\bf D}$ while searching in parallel for a set of permutations ${\bf P}$ that are in good agreement (in a probabilistic sense) with the triangular requirement of ${\bf D}$. Such a set of orderings is found during the inference procedure of the factor model. To set up the stochastic search, we need to modify the factor model slightly by introducing separate data (row) and factor (column) permutations, ${\bf P}$ and ${\bf P}_\mathrm{f}$ to obtain ${\bf x} = {\bf P}^\top {\bf D}{\bf P}_\mathrm{f} {\bf z} + \bm{\epsilon}$. The reason for using two different permutation matrices, rather than only one like in the definition of the DAG model, is that we need to account for the permutation freedom of the factor model (see Section \ref{sc:idf}). Using the same permutation for row and column would thus require an additional step to identify the columns in the factor model. We make inference for the unrestricted factor model, but propose ${\bf P}^\star$ and ${\bf P}_\mathrm{f}^\star$ independently according to $q({\bf P}^\star|{\bf P})q({\bf P}_\mathrm{f}^\star|{\bf P}_\mathrm{f})$. Both distributions draw a new permutation matrix by exchanging two randomly chosen elements, e.g.\ the order may change as $[x_1,x_2,x_3,x_4]^\top \to [x_1,x_4,x_3,x_2]^\top$. In other words, the proposals $q({\bf P}^\star|{\bf P})$ and $q({\bf P}_\mathrm{f}^\star|{\bf P}_\mathrm{f})$ are uniform distributions over the space of transpositions for ${\bf P}$ and ${\bf P}_\mathrm{f}$. Assuming we have no a-priori preferred ordering, we may use a Metropolis-Hastings (M-H) acceptance probability $\mathrm{min}(1,\xi_{\rightarrow\star})$ with $\xi_{\rightarrow\star}$ as a simple ratio of likelihoods with the permuted ${\bf D}$ masked to match the triangularity assumption. Formally, we use the binary mask ${\bf M}$ (containing zeros above the diagonal of its $d$ first columns) and write
\begin{align} \label{eq:rlik}
\xi_{\rightarrow\star}=\frac{{\cal N}({\bf X}|({\bf P}^\star)^\top({\bf M}\odot {\bf P}^\star {\bf D} ({\bf P}_\mathrm{f}^\star)^\top){\bf P}_\mathrm{f}^\star{\bf Z},\bm{\Psi})}{{\cal N}({\bf X}|{\bf P}^\top({\bf M}\odot {\bf P}{\bf D}{\bf P}_\mathrm{f}^\top){\bf P}_\mathrm{f}{\bf Z},\bm{\Psi})} \ , \end{align}
where ${\bf M}\odot{\bf D}$ is the masked ${\bf D}$ and ${\bf Z}=[{\bf z}_1 \ \ldots {\bf z}_N]$. The procedure can be seen as a simple approach for generating hypotheses about good orderings, producing close to triangular versions of ${\bf D}$, in a model where the slab and spike prior provide the required bias towards sparsity. Once the inference is done, we end up having an estimate for the desired distribution over permutations ${\bf P}=\sum_i^{d!}\pi_i\delta_{{\bf P}_i}$, where $\bm{\pi}=[\pi_1 \ \pi_2 \ \ldots]$ is a sparse vector containing the probability for ${\bf P}={\bf P}_i$, which in our case is proportional to the number of times permutation ${\bf P}_i$ was accepted by the M-H update during inference. Note that ${\bf P}_\mathrm{f}$ is just a nuisance variable that does not need to be stored or summarized.
\subsection{Allowing for correlated data (CSLIM)} \label{sc:cslim}
For the case where independence of observed variables cannot be assumed, for instance due to (time) correlation or smoothness, the priors discussed before for the latent variables and driving signals do not really apply anymore, however the only change we need to make is to allow elements in rows of ${\bf Z}$ to correlate. We can assume then independent Gaussian process (GP) priors for each latent variable instead of scale mixtures of Gaussians, to obtain what we have called correlated sparse linear identifiable modeling (CSLIM). For a set of $N$ realizations of variable $j$ we set
\begin{align}
z_{j1},\ldots,z_{jN}|\upsilon_j \ & \sim \ \mathrm{GP}(z_{j1},\ldots,z_{jN}|k_{\upsilon_j,n}(\cdot)) \ , \label{eq:GPj} \end{align}
where the covariance function has the form $k_{\upsilon_j,n}(n,n')=\exp( -\upsilon_j(n-n')^2 )$, $\{n,n'\}$ is a pair of observation indices or time points and $\upsilon_j$ is the length scale controlling the overall level of correlation allowed for each variable (row) in ${\bf Z}$. Conceptually, equation \eqref{eq:GPj} implies that each latent variable $j$ is sampled from a function and the GP acts as a prior over continuous functions. Since such a length scale is very difficult to set just by looking at the data, we further place priors on $\upsilon_j$ as
\begin{align}
\upsilon_j|u_s,\kappa \ \sim \ \mathrm{Gamma}(\upsilon_j|u_s,\kappa) \ , \quad \kappa|k_s,k_r \ \sim \ \mathrm{Gamma}(\kappa|k_s,k_r) \ . \label{eq:GPhyp} \end{align}
Given that the conditional distribution of $\bm{\upsilon}=[\upsilon_1,\ldots,\upsilon_m]$ is not of any standard form, Metropolis-Hastings updates are used. In the experiments we use that $u_s=k_s=2$ and $k_r=0.02$. The details concerning inference for this model are given in Appendix \ref{ap:inf}.
It is also possible to easily expand the possible applications of GP priors in this context by, for instance, using more structured covariance functions through scale mixture of Gaussian representations to obtain a prior distribution for continuous functions with heavy-tailed behavior --- a $t$-processes \citep{yu07}, or learning the covariance function as well using inverse Wishart hyperpriors.
\subsection{Allowing for non-linearities (SNIM)} \label{sc:snim}
Provided that we know the true ordering of the variables, i.e.\ ${\bf P}$ is known then ${\bf B}$ is surely strictly lower triangular. It is very easy to allow for non-linear interactions in the DAG model from equation \eqref{eq:PBxCz} by rewriting it as
\begin{align} \label{eq:PBfxCz}
{\bf P}{\bf x}=({\bf R}\odot{\bf B}){\bf P}{\bf y} + ({\bf Q}\odot{\bf C}){\bf z} + \bm{\epsilon} \ , \end{align}
where ${\bf y}=[y_1,\ldots,y_d]^\top$ and $y_{i1},\ldots,y_{iN}|\upsilon_i\sim \mathrm{GP}(y_{i1},\ldots,y_{iN}|k_{\upsilon_i,x}(\cdot))$ has a Gaussian process prior with for instance, but not limited to, a stationary covariance function like $k_{\upsilon_i,x}({\bf x},{\bf x}')=\exp(-\upsilon_i({\bf x}-{\bf x}')^2)$, similar to equation \eqref{eq:GPj} and with the same hyperprior structure as in equation \eqref{eq:GPhyp}. This is a straight forward extension that we call sparse non-linear multivariate modeling (SNIM) that is in spirit similar to \citet{friedman00,hoyer08,zhang09,zhang09a,tillman09}, however instead of treating the inherent multiple regression problem in equation \eqref{eq:PBfxCz} and the conditional independence of the observed variables independently, we proceed within our proposed framework by letting the multiple regressor be sparse, thus the conditional independences are encoded through ${\bf R}$. The main limitation of the model in equation \eqref{eq:PBfxCz} is that if the true ordering of the variables is unknown, the exhaustive enumeration of ${\bf P}$ is needed. This means that this could be done for very small networks, e.g.\ up to 5 or 6 variables. In principle, an ordering search procedure for the non-linear model only requires the latent variables ${\bf z}$ to have Gaussian process priors as well. The main difficulty is that in order to build covariance functions for ${\bf z}$ we need a set of observations that are not available because ${\bf z}$ is latent.
\section{Model comparison} \label{sc:ms}
Quantitative model comparison between factor models and DAGs is a key ingredient in SLIM. The joint probability of data ${\bf X}$ and parameters for the factor model part in equation \eqref{eq:PBxCz} is
\begin{equation*}
p({\bf X},{\bf C},{\bf Z},\bm{\epsilon},\cdot)=p({\bf X}|{\bf C},{\bf Z},\bm{\epsilon})p({\bf C}|\cdot)p({\bf Z}|\cdot)p(\bm{\epsilon})p(\cdot) \ , \end{equation*}
where $(\cdot)$ indicates additional parameters in the hierarchical model. Formally the Bayesian model selection yardstick, the marginal likelihood for model $\mathcal{M}$
$$p({\bf X}|\mathcal{M})=\int p({\bf X}|\bm{\Theta},{\bf Z}) p(\bm{\Theta}|\mathcal{M}) p({\bf Z}|\mathcal{M}) d\bm{\Theta} d{\bf Z} \ ,$$
can be obtained by marginalizing the joint over the parameters $\bm{\Theta}$ and latent variables ${\bf Z}$. Computationally this is a difficult task because the marginal likelihood cannot be written as an average over the posterior distribution in a simple way. It is still possible using MCMC methods, for example by partitioning of the parameter space and multiple chains or thermodynamic integration \citep[see][]{chib95,neal01,murray07,friel08}, but in general it must be considered as computationally expensive and non-trivial. On the other hand, evaluating the likelihood on a test set ${\bf X}^\star$, using predictive densities $p({\bf X}^\star|{\bf X},\mathcal{M})$ is simpler from a computational point of view because it can be written in terms of an average over the posterior of the {\it intensive variables}, $p({\bf C},\bm{\epsilon},\cdot|{\bf X})$ and the prior distribution of the {\it extensive variables} associated with the test points\footnote{Intensive means not scaling with the sample size. Extensive means scaling with sample size in this case the size of the test sample.}, $p({\bf Z}^\star|\cdot)$ as
\begin{equation} \label{eq:tFA}
\mathcal{L}_\mathrm{FM} \deff p({\bf X}^\star|{\bf X},\mathcal{M}_\mathrm{FM}) = \int p({\bf X}^\star|{\bf Z}^\star,\bm{\Theta}_\mathrm{FM},\cdot) p({\bf Z}^\star|\cdot) p(\bm{\Theta}_\mathrm{FM},\cdot|{\bf X}) d{\bf Z}^\star d\bm{\Theta}_\mathrm{FM} d(\cdot) \ , \end{equation}
where $\bm{\Theta}_\mathrm{FM}=\{{\bf C},\bm{\epsilon}\}$. This average can be approximated by a combination of standard sampling and exact marginalization using the scale mixture representation of the heavy-tailed distributions presented in Section \ref{sc:smog}. For the full DAG model in equation \eqref{eq:PBxCz}, we will not average over permutations ${\bf P}$ but rather calculate the test likelihood for a number of candidates ${\bf P}^{(1)},\ldots,{\bf P}^{(c)},\ldots$ as
\begin{align}\label{eq:tBN}
\mathcal{L}_\mathrm{DAG} & \deff p({\bf X}^\star|{\bf P}^{(c)},{\bf X},\mathcal{M}_\mathrm{DAG}) \ , \nonumber \\
& = \int p({\bf X}^\star|{\bf P}^{(c)},{\bf X},{\bf Z}^\star,\bm{\Theta}_\mathrm{DAG},\cdot) p({\bf Z}^\star|\cdot) p(\bm{\Theta}_\mathrm{DAG},\cdot|{\bf X}) d{\bf Z}^\star d\bm{\Theta}_\mathrm{DAG} d(\cdot) \ , \end{align}
where $\bm{\Theta}_\mathrm{DAG}=\{{\bf B},{\bf C},\bm{\epsilon}\}$. We use sampling to compute the test likelihoods in equations \eqref{eq:tFA} and \eqref{eq:tBN}. With Gibbs, we draw samples from the posterior distributions $p(\bm{\Theta}_\mathrm{FM},\cdot|{\bf X})$ and $p(\bm{\Theta}_\mathrm{DAG},\cdot|{\bf X})$, where $(\cdot)$ is shorthand for example for the degrees of freedom $\theta$, if Student $t$ distributions are used. The average over the extensive variables associated with the test points $p({\bf Z}^\star|\cdot)$ is slightly more complicated because naively drawing samples from $p({\bf Z}^\star|\cdot)$ results in an estimator with high variance --- for $\psi_i\ll\upsilon_{jn}$. Instead we exploit the infinite mixture representation to marginalize exactly ${\bf Z}^\star$ and then draw samples in turn for the scale parameters. Omitting the permutation matrices for clarity, in general we get
\begin{align*}
p({\bf X}^\star|\bm{\Theta},\cdot) = & \int p({\bf X}^\star|{\bf Z}^\star,\bm{\Theta},\cdot)p({\bf Z}^\star|\cdot)d{\bf Z}^\star \ , \\
= & \prod_{n} \int \mathcal{N}({\bf x}_n^\star|{\bf m}_n,\bm{\Sigma}_n)\prod_{j} p(\upsilon_{jn}|\cdot)d\upsilon_{jn}
\approx \frac{1}{N_\mathrm{rep}}\prod_{n} \sum_{r}^{N_\mathrm{rep}} \mathcal{N}({\bf x}_n^\star|{\bf m}_n,\bm{\Sigma}_n) \ , \end{align*}
where $N_\mathrm{rep}$ is the number of samples generated to approximate the intractable integral ($N_\mathrm{rep}=500$ in the experiments). For the factor model ${\bf m}_n={\bf 0}$ and $\bm{\Sigma}_n={\bf C}_D{\bf U}_n{\bf C}_D^\top+\bm{\Psi}$. For the DAG, ${\bf m}_n={\bf B}{\bf x}_n^\star$ and $\bm{\Sigma}_n={\bf C}{\bf U}_n{\bf C}^\top+\bm{\Psi}$. The covariance matrix ${\bf U}_n=\mathrm{diag}(\upsilon_{1n},\ldots,\upsilon_{(d+m)n})$ with elements $\upsilon_{jn}$, is sampled directly from the prior, accordingly. Once we have computed $p({\bf X}^\star|\bm{\Theta}_\mathrm{FM},\cdot)$ for the factor model and $p({\bf X}^\star|\bm{\Theta}_\mathrm{DAG},\cdot)$ for the DAG, we can use them to average over $p(\bm{\Theta}_\mathrm{FM},\cdot|{\bf X},)$ and $p(\bm{\Theta}_\mathrm{DAG},\cdot|{\bf X})$ to obtain the predictive densities $p({\bf X}^\star|{\bf X},\mathcal{M}_\mathrm{FM})$ and $p({\bf X}^\star|{\bf X},\mathcal{M}_\mathrm{DAG})$, respectively.
For the particular case in which ${\bf X}$ and consequently ${\bf Z}$ are correlated variables --- CSLIM, we use a slightly different procedure for model comparison. Instead of using a test set, we randomly remove some proportion of the elements of ${\bf X}$ and perform inference with missing values, then we summarize the likelihood on the missing values. In particular, for the factor model we use ${\bf M}_{\rm miss}\odot{\bf X}={\bf M}_{\rm miss}\odot({\bf Q}_L\odot{\bf C}_L{\bf Z}+\bm{\epsilon})$ where ${\bf M}_{\rm miss}$ is a binary masking matrix with zeros corresponding to test points, i.e.\ the missing values. See details in Appendix \ref{ap:inf}. Note that this scheme is not exclusive to CSLIM thus can be also used with SLIM or when the observed data contain actual missing values.
\section{Model overview and practical details} \label{sc:inf}
The three models described in the previous section namely SLIM, CSLIM and SNIM can be summarized as a graphical model and as a probabilistic hierarchy as follows
\hspace{-5mm}\parbox{0.5\textwidth}{ \begin{align*}
{\bf x}_n|{\bf W},{\bf y}_n,{\bf z}_n,\bm{\Psi} \ \sim & \ \mathcal{N}({\bf x}_n|{\bf W}[{\bf y}_n \ {\bf z}_n]^\top,\bm{\Psi}) \ , \ \ {\bf W}=[{\bf B} \ {\bf C}] \ ,\\
\psi^{-1}_i|s_s,s_r \ \sim & \ \mathrm{Gamma}(\psi_i^{-1}|s_s,s_r) \ , \\
w_{ik}|h_{ik},\psi_i,\tau_{ik} \ \sim & \ (1-h_{ik})\delta_0(w_{ik}) + h_{ik}\mathcal{N}(w_{ik}|0,\psi_i\tau_{ik}) \ , \\
h_{ik}|\eta_{ik} \ \sim & \ \mathrm{Bernoulli}(h_{ik}|\eta_{ik}) \ , \ \ {\bf H}=[{\bf R} \ {\bf Q}] \ , \\
\eta_{ik}|\nu_k,\alpha_p,\alpha_m \ \sim & \ (1-\nu_k)\delta(\eta_{ik})+\nu_k\mathrm{Beta}(\eta_{ik}|\alpha_p\alpha_m,\alpha_p(1-\alpha_m)) \ , \\
\nu_k|\beta_m,\beta_p \ \sim & \ \mathrm{Beta}(\nu_k|\beta_p\beta_m,\beta_p(1-\beta_m)) \ , \\
\tau_{ik}^{-1}|t_s,t_r \ \sim & \ \mathrm{Gamma}(\tau_{ik}^{-1}|t_s,t_r) \ , \\
z_{j1},\ldots,z_{jN}|\upsilon \ \sim & \begin{cases} \prod_n\mathcal{N}(z_{jn}|0,\upsilon_{jn}) \ , & {\rm (SLIM)} \\ \mathrm{GP}(z_{j1},\ldots,z_{jN}|k_{\upsilon_j,n}(\cdot)) \ , & {\rm (CSLIM)} \end{cases} \\
y_{i1},\ldots,y_{iN}|\upsilon \ \sim & \begin{cases} x_{i1},\ldots,x_{iN} \ , & {\rm (SLIM)} \\ \mathrm{GP}(y_{i1},\ldots,y_{iN}|k_{\upsilon_i,x}(\cdot)) \ , & {\rm (SNIM)} \end{cases} \end{align*} }\hspace{-10mm}\parbox{0.5\textwidth}{ \begin{tikzpicture}[ >= latex, font = \small, node distance = 1cm and 1cm, rounded corners = 4pt ]
\tikzstyle{obs} = [ circle, thick, draw = black!80, fill = imp2, minimum size = 3mm ]
\tikzstyle{lat} = [ circle, thick, draw = black!100, fill = red!0, minimum size = 3mm ]
\tikzstyle{par} = [ circle, draw, fill = black!100, minimum width = 1pt, inner sep = 0pt ]
\tikzstyle{every label} = [ black!100 ]
\node [obs] (x) [ label = -135:$x_{in}$ ] {};
\node [lat] (B) [ above of = x, node distance = 1.5cm, label = 135:$w_{ik}$ ] {}
edge [post] (x);
\node [lat] (y) [ below of = x, node distance = 1.5cm, label = 135:$y_{in}$ ] {}
edge [post] (x);
\node [lat] (z) [ left of = x, node distance = 1.5cm and 1cm, label = -135:$z_{jn}$ ] {}
edge [post] (x);
\node [lat] (upsilon) [ below of = z, label = -135:$\upsilon_{jn}$ ] {}
edge [post] (z);
\node [lat] (r) [ right of = B, label = 45:$h_{ik}$ ] {}
edge [post] (B);
\node [lat] (eta) [ above of = r, label = 45:$\eta_{ik}$ ] {}
edge [post] (r);
\node [lat] (nu) [ above of = eta, node distance = 1.0cm, label = 45:$\nu_k$ ] {}
edge [post] (eta);
\node [lat] (tau) [ above of = B, label = 135:$\tau_{ik}$ ] {}
edge [post] (B);
\node [lat] (rc) [ right of = y, label = -45:$\upsilon_i$ ] {}
edge [post] (y);
\node [lat] (phi) [ below of = r, node distance = 0.75cm, label = -45:$\psi_i$ ] {}
edge [post] (x)
edge [post] (B);
\draw ( -0.3,-2.1 ) node {\tiny{$i=1:d$}};
\draw ( -0.1, 4.0 ) node {\tiny{$k=1:2d+m$}};
\draw ( -2.0, 0.6 ) node {\tiny{$n=1:N$}};
\draw ( -1.9,-2.1 ) node {\tiny{$j=1:d+m$}};
\begin{pgfonlayer}{background}
\filldraw[ line width = 1pt, draw = black!50, fill = black!5 ]
( 1.8cm, 3.1cm ) rectangle ( -0.9cm,-2.3cm )
( 1.9cm, 4.2cm ) rectangle ( -1.0cm, 1.1cm )
( 0.6cm, 0.8cm ) rectangle ( -2.7cm,-1.9cm )
( -1.0cm, 0.4cm ) rectangle ( -2.8cm,-2.3cm );
\end{pgfonlayer} \end{tikzpicture} }
\noindent where we have omitted ${\bf P}$ and the hyperparameters in the graphical model. Latent variable and driving signal parameters $\upsilon$ can have one of several priors: $\mathrm{Exponential}(\upsilon|\lambda^2)$ (Laplace), $\mathrm{Gamma}(\upsilon^{-1}|\theta/2,\theta/2)$ (Student's $t$) or $\mathrm{Gamma}(\upsilon|u_s,\kappa)$ (GP), see equations \eqref{eq:sLad}, \eqref{eq:sStd} and \eqref{eq:GPhyp}, respectively. The latent variables/driving signals $z_{jn}$ and the mixing/connectivity matrices with elements $c_{ij}$ or $b_{ij}$ are modeled independently. Each element in ${\bf B}$ and ${\bf C}$ has its own slab variance $\tau_{ij}$ and probability of being non-zero $\eta_{ij}$. Moreover, there is a shared sparsity rate per column $\nu_k$. Variables $\upsilon_{jn}$ are variances if $z_{jn}$ use a scale mixture of Gaussian's representation, or length scales in the GP prior case. Since we assume no sparsity for the driving signals, $\eta_{ik}=1$ for $d+i=k$ and $\eta_{ik}=0$ for $d+i\neq k$. In addition, we can recover the pure DAG by making $m=0$ and the standard factor model by making instead $\eta_{ik}=0$ for $k\leq 2d$. All the details for the Gibbs sampling based inference are summarized in appendix \ref{ap:inf}.
\subsection{Proposed workflow}
We propose the workflow shown in Figure \ref{fg:slimfig} to integrate all elements of SLIM, namely factor model and DAG inference, stochastic order search and model selection using predictive densities.
\begin{enumerate}{\leftmargin=1em}
\item Partition the data into $\{{\bf X},{\bf X}^\star\}$.
\item Perform inference on the factor model and stochastic order search. One Gibbs sampling update consists of computing the conditional posteriors in equations \eqref{eq:gibbsPsi}, \eqref{eq:gibbsZ}, \eqref{eq:gibbsD}, \eqref{eq:gibbstau}, \eqref{eq:gibbsq}, \eqref{eq:gibbseta} and \eqref{eq:gibbsnu} in sequence, followed by several repetitions (we use 10) of the M-H update in equation \ref{eq:rlik} for the permutation matrices ${\bf P}$ and ${\bf P}_\mathrm{f}$.
\item Summarize the factor model, mainly ${\bf C}$, $\{\eta_{ij}\}$ and $\mathcal{L}_\mathrm{FM}$ using quantiles (0.025, 0.5 and 0.975).
\item Summarize the orderings, ${\bf P}$. Select the top $m_\mathrm{top}$ candidates according to their frequency during inference in step 2.
\item Perform inference on the DAGs for each one of the ordering candidates, ${\bf P}^{(1)},\ldots,{\bf P}^{(m_\mathrm{top})}$ using Gibbs sampling by computing equations \eqref{eq:gibbsPsi}, \eqref{eq:gibbsZ}, \eqref{eq:gibbsD}, \eqref{eq:gibbstau}, \eqref{eq:gibbsq}, \eqref{eq:gibbseta} and \eqref{eq:gibbsnu} in sequence, up to minor changes described in Appendix \ref{ap:inf}.
\item Summarize the DAGs, ${\bf B}$, ${\bf C}_L$, $\{\eta_{ik}\}$ and $\mathcal{L}_\mathrm{DAG}^{(1)},\ldots,\mathcal{L}_\mathrm{DAG}^{(m_\mathrm{top})}$ using quantiles (0.025, 0.5 and 0.975). Note that $\{\eta_{ik}\}$ contains non-zero probabilities for ${\bf R}$ and ${\bf Q}$ corresponding to ${\bf B}$ and ${\bf C}_L$, respectively. \end{enumerate}
We use medians to summarize all quantities in our model because ${\bf D}$, ${\bf B}$ and $\{\eta_{ik}\}$ are bimodal while the remaining variables are in general skewed posterior distributions. Inference with GP priors for time series data (CSLIM) or non-linear DAGs (SNIM) is fairly similar to the i.i.d.\ case, see Appendix \ref{ap:inf} for details. Source code for SLIM and all its variants proposed so far has been made available at \url{http://cogsys.imm.dtu.dk/slim/} as Matlab scripts.
\subsection{Computational cost}
The cost of running the linear DAG with latent variables or the factor model is roughly the same, i.e.\ $\mathcal{O}(N_sd^2N)$ where $N_s$ is the total number of samples including the burn-in period. The memory requirements on the other hand are approximately $\mathcal{O}(N_pd^2)$ if all the samples after the burn-in period $N_p$ are stored. This means that the inference procedures scale reasonably well if $N_s$ is kept in the lower ten thousands. The non-linear version of the DAG is considerably more expensive due to the GP priors, hence the computational cost rises up to $\mathcal{O}(N_s(d-1)N^3)$.
The computational cost of LiNGAM, being the closest to our linear models, is mainly dependent on the statistic used to prune/select the model. Using bootstrapping results in $\mathcal{O}(N_b^3)$, where $N_b$ is the number of bootstrap samples. The Wald statistic leads to $\mathcal{O}(d^6)$, while Wald with $\chi^2$ second order model fit test amounts to $\mathcal{O}(d^7)$. As for the memory requirements, bootstrapping is very economic whereas Wald-based statistics require $\mathcal{O}(d^6)$.
\begin{wrapfigure}{r}{0.35\textwidth}
\centering
\begin{psfrags}
\psfrag{time}[c][c][0.65]{Time}\psfrag{d}[c][c][0.65]{$d$}\psfrag{bootstrap}[l][l][0.55]{Bootstrap}\psfrag{wald}[l][l][0.55]{Wald}\psfrag{slim}[l][l][0.55]{SLIM}
\includegraphics[width=0.33\textwidth]{./images/rt_all.eps}
\end{psfrags}
\caption{Runtime comparison.}
\label{fg:rt_all}
\end{wrapfigure} The method for non-linear DAGs described in \citet{hoyer08} is defined for a pair of variables, and it uses GP-based regression and kernelized independence tests. The computational cost is $\mathcal{O}(N_gN^3)$ where $N_g$ is the number of gradient iterations used to maximize the marginal likelihood of the GP. This is the same order of complexity as our non-linear DAG sampler.
Figure \ref{fg:rt_all} shows average running times in a standard desktop machine (two cores, 2.6GHz and 4Gb RAM) over 10 different models with $N=1000$ and $d=\{10,20,50,100\}$. As expected, LiNGAM with bootstrap is very fast compared to the others while our model approaches LiNGAM with Wald statistic as the number of observations increases. We did not include LiNGAM with second order model fit because for $d=50$ it is already prohibitive. For this small test we used a C implementation of our model with $N_s=19000$. We are aware that the performance of a C and a Matlab implementation can be different, however we still do the comparison because the most expensive operations in the Matlab code for LiNGAM are computed through BLAS routines not involving large loops, thus a C implementation of LiNGAM should not be noticeably faster than its Matlab counterpart.
\section{Simulation results} \label{sc:res}
We consider six sets of experiments to illustrate the features of SLIM. In our comparison with other methods we focus on the DAG structure learning part because it is somewhat easier to benchmark a DAG than a factor model. However, we should stress that DAG learning is just one component of SLIM. Both types of model and their comparison are important, as will be illustrated through the experiments. For the reanalysis of flow cytometry data using our models, quantitative model comparison favors the DAG with latent variables rather than the standard factor model or the pure DAG which was the paradigm used in the structure learning approach of \citet{sachs05}.
The first two experiments consist of extensive tests using artificial data in a setup originally from LiNGAM and network structures taken from the Bayesian net repository. We test the features of SLIM and compare with LiNGAM and some other methods in settings where they have proved to work well. The third set of experiments addresses model comparison, the fourth and fifth present results for our DAG with latent variables and the non-linear DAG (SNIM) on both artificial and real data. The sixth uses real data previously published by \citet{sachs05} and the last one provides simple results for a factor model using Gaussian process priors for temporal smoothness (CSLIM), tested on a time series gene expression data set \citep{kao04}. In all cases we ran 10000 samples after a burn-in period of 5000 for the factor model, and a single chain with 3000 samples and 1000 as burn-in iterations for the DAG, i.e.\ $N_s=19000$ used in the computational cost comparison. As a summary statistic we use median values everywhere, and Laplace distributions for the latent factors if not stated otherwise.
\subsection{Artificial data}
We evaluate the performance of our model against LiNGAM\footnote{Matlab package (v.1.42) available at \url{http://www.cs.helsinki.fi/group/neuroinf/lingam/}.}, using the artificial model generator presented and fully explained in \citet{shimizu06}. Concisely, the generator produces both dense and sparse networks with different degrees of sparsity, ${\bf Z}$ is generated from a heavy-tailed non-Gaussian distribution through a generalized Gaussian distribution with zero mean, unit variance and random shape, ${\bf X}$ is generated recursively using equation \eqref{eq:PBxCz} with $m=0$ and then randomly permuted to hide the correct order, ${\bf P}$. Approximately, half of the networks are fully connected while the remaining portion comprises sparsity levels between $10\%$ and $80\%$. Having dense networks ($0\%$ sparsity) in the benchmark is crucial because in such cases the correct order of the variables is unique, thus more difficult to find. This setup is particularly challenging because the model needs to identify both dense and sparse models. For the experiment we have generated $1000$ different dataset/models using $d=\{5,10\}$, $N=\{200,500,1000,2000\}$ and the DAG was selected using the median of the training likelihood, $p({\bf X}|{\bf P}_\mathrm{r}^{(k)},{\bf R}^{(k)},{\bf B}^{(k)},{\bf C}_D^{(k)},{\bf Z},\bm{\Psi},\cdot)$, for $k=1,\ldots,m_\mathrm{top}$.
\begin{figure}
\caption{Ordering accuracies for LiNGAM suite using $d=5$ in (a,b) and $d=10$ in (c,d). (a,c) Total correct ordering rates where DENSE is our factor model without sparsity prior and DS corresponds to DENSE but using the deterministic ordering search used in LiNGAM. (b,c) Correct ordering rate vs. candidates from SLIM. The crosses and horizontal lines correspond to LiNGAM while the triangles are accumulated correct orderings across candidates used by SLIM.}
\label{fg:oerr5}
\label{fg:oerr5c}
\label{fg:oerr10}
\label{fg:oerr10c}
\label{fg:lingam_oerrs}
\end{figure}
\paragraph{Order search.} With this experiment we want to quantify the impact of using sparsity, stochastic ordering search and more than one ordering candidate, i.e.\ $m_\mathrm{top}=10$ in total. Figure \ref{fg:lingam_oerrs} evaluates the proportion of correct orderings for different settings. We have the following abbreviations for this experiment, DENSE is our factor model without sparsity prior, i.e.\ assuming that $p(r_{ij}=1)=1$ a priori. DS (deterministic search) assumes no sparsity as in DENSE but replaces our stochastic search for permutations with the deterministic approach used by LiNGAM, i.e.\ we replace the M-H update from equation \eqref{eq:rlik} by the procedure described next: after inference we compute ${\bf D}^{-1}$ followed by a column permutation search using the Hungarian algorithm and a row permutation search by iterative pruning until getting a version of ${\bf D}$ as triangular as possible \citep{shimizu06}. Several comments can be made from the results, (i) For $d=5$ there is no significant gain for increasing $N$, mainly because the size of the permutation space is small, i.e.\ $5!$. (ii) The difference in performance between SLIM and DENSE is not significative because we look for triangular matrices in a probabilistic sense, hence there is no real need for exact zeros but just very small values, this does not mean that the sparsity in the factor model is unnecessary, on the contrary we still need it if we want to have readily interpretable mixing matrices. (iii) Using more than one ordering candidate considerably improves the total correct ordering rate, e.g.\ by almost $30\%$ for $d=5, \ N=200$ and $35\%$ for $d=10, \ N=500$. (iv) The number of accumulated correct orderings found saturates as the number of candidates used increases, suggesting that further increasing $m_\mathrm{top}$ will not considerably change the overall results. (v) The number of correct orderings tends to accumulate on the first candidate when $N$ increases since the uncertainty of the estimation of the parameters in the factor model decreases accordingly. (vi) When the network is not dense, it could happen that more than one candidate has a correct ordering, hence the total rates (triangles) are not just the sum of the bar heights in Figures \ref{fg:oerr5c} and \ref{fg:oerr10c}. (vii) It seems that except for $d=10, \ N=5000$ it is enough to consider just the first candidate in SLIM to obtain as many correct orderings as LiNGAM does. (viii) From Figures \ref{fg:oerr5} and \ref{fg:oerr10}, the three variants of SLIM considered perform better than LiNGAM, even when using the same single candidate ordering search proposed by \citet{shimizu06}. (ix) In some cases the difference between SLIM and LiNGAM is very large, for example, for $d=10$ using two candidates and $N=1000$ is enough to obtain as many correct orderings as LiNGAM with $N=5000$.
\begin{figure}
\caption{Performance measures for LiNGAM suite. Results include the settings: $d=\{5,10\}$, $N=\{200,500,1000,2000\}$, four model selectors for LiNGAM (bootstrap, Wald, Bonferroni and Wald + $\chi^2$ statistics) and seven $p$-value cutoffs for the statistics used in LiNGAM (0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5). ORACLE corresponds to oracle results for SLIM, both computed for two settings: diffuse $\beta_m=0.99$ and sparse $\beta_m=0.1$ priors. Markers close to the top-left corner denote better results in average.}
\label{fg:lingam_suite}
\end{figure}
\paragraph{DAG learning.} Now we evaluate the ability of our model to capture the DAG structure in the data, provided the permutation matrices obtained in the previous stage as a result of our stochastic order search. Results are summarized in Figure \ref{fg:lingam_suite} using receiving operating characteristic (ROC) curves. The true and false positive rates are averaged over the number of trials (1000) for each setting to make the scaling in the plots more meaningful given the various levels of sparsity considered. The rates are computed in the usual way, however it must be noted that the true number of absent links in a network can be as large as $d(d-1)$, i.e.\ twice the number of links in a DAG, because in the case of an estimated DAG based in a wrong ordering the number of false positives can sum up to $d(d-1)/2$ even if the true network is not empty. For LiNGAM we use four different statistics to prune the DAG after the ordering has been found, namely bootstrapping, Wald, Bonferroni and Wald with second order $\chi^2$ model fit test. In every case we run LiNGAM for 7 different $p$-value cutoffs, namely, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1 and 0.5 to build the ROC curve. For SLIM we consider the two settings for $\beta_m$ discussed in Section \ref{sc:sse}, i.e.\ a diffuse prior supporting the existence of dense graphs, $\beta_m=0.99$ and $\beta_m=0.1$. In order to test how good SLIM is at selecting one DAG out of the $m_\mathrm{top}$ candidates, we also report the oracle results under the name of ORACLE, where in every case we select the candidate with less error instead of $\operatornamewithlimits{argmax}_k \ p({\bf X}|{\bf P}_\mathrm{r}^{(k)},{\bf R}^{(k)},{\bf B}^{(k)},{\bf C}_D^{(k)},{\bf Z},\bm{\Psi},\cdot)$. Using $\beta_m=0.99$ is not very useful in practice because in a real situation we expect that the underlying DAG is sparse, however the LiNGAM suite has as many dense graphs as sparse ones making $\beta_m=0.1$ a poor choice. From Figure \ref{fg:lingam_suite}, it is clear that for $\beta_m=0.99$, SLIM is clearly superior, providing the best true positive rate (TPR) - false positive rate (FPR) tradeoff. For $\beta_m=0.1$ there is no real difference between SLIM and some settings of LiNGAM (Wald and Bonferroni). Concerning SLIM's model selection procedure, it can be seen that the difference between SLIM and ORACLE nicely decreases as the number of observations increases. We also tested the DAG learning procedure in SLIM when the true ordering is known (results not shown) and we found only a very small difference compared to ORACLE. It is important to mention that further increasing or reducing $\beta_m$ does not significantly change the results shown; this is because $\beta_m$ does not fully control the sparsity of the model, thus even for $\beta_m=1$ the model will be still sparse due to element-wise link confidence, $\alpha_m$. As for LiNGAM, it seems that Wald performs better than Wald $+ \ \chi^2$, however just by looking at Figure \ref{fg:lingam_suite}, it is to be expected that for larger $N$ the latter perform better because the Wald statistic alone will tend to select more dense models.
\begin{figure}
\caption{Ground truth and estimated structures. (a) Ground truth mixing matrix. (b) Estimated mixing matrix using our sparse factor model. Note the sign ambiguity in some of the columns. (c) First 50 (out of 92) ordering candidates produced by our method during inference and their frequency, the first $m_\mathrm{top}$ candidates were used for to learn DAGs. (d) Ground truth DAG. (e) Top candidate estimated using SLIM. (f) Estimated median weights for the DAG including $95\%$ credible intervals and ground truth (squares). (g) Summary of link probabilities measured as $\eta_{ij}=p(r_{ij}=1|{\bf X},\cdot)$.}
\label{fg:singletrue_A}
\label{fg:singlesFA_A}
\label{fg:singlecand}
\label{fg:singletrue_B}
\label{fg:singlesFA_B}
\label{fg:singleebars}
\label{fg:singleetabars}
\label{fg:singlenets}
\end{figure}
\paragraph{Illustrative example.} Finally we want to show some of the most important elements of SLIM taking one successfully estimated example from the LiNGAM suite. Figure \ref{fg:singlenets} shows results for a particular DAG with $10$ variables obtained using $500$ observations, see Figures \ref{fg:singletrue_B} and \ref{fg:singlesFA_B} for the ground truth and the estimated DAG, respectively. True and estimated mixing matrices ${\bf D}$ for the equivalent factor model are also shown in Figures \ref{fg:singletrue_A} and \ref{fg:singlesFA_A}, respectively. In total our algorithm produced 92 orderings out of $3.6\times10^6$ possible, from which all $m_\mathrm{top}=10$ candidates were correct. Figure \ref{fg:singlecand} shows the first 50 candidates and their frequency during sampling, the shaded area encloses the $m_\mathrm{top}=10$ candidates. From Figure \ref{fg:singleebars} we see that the elements of ${\bf B}$ are correctly estimated and their credible intervals are small, mainly due to the lack of model mismatch. Figure \ref{fg:singleetabars} shows a good separation between zero and non-zero elements of ${\bf B}$ as summarized by $p(r_{ij}=1|{\bf X},\cdot)$. It is worthwhile mentioning that using $\beta_m=0.99$ instead of $\beta_m=0.1$ in this example, still produces the right DAG, although the separation between zero and non-zero elements in Figure \ref{fg:singleetabars} will be smaller and with higher uncertainty, i.e.\ larger credible intervals.
\subsection{Bayesian networks repository}
Next we want to compare our method against LiNGAM on some realistic structures. We consider 7 well known benchmark structures from the Bayesian network repository\footnote{Network structures available at \url{http://compbio.cs.huji.ac.il/Repository/}.}, namely alarm, barley, carpo, hailfinder, insurance, mildew and water ($d=$ 37, 48, 61, 56, 27, 35, 32 respectively). Since we do not have continuous data for any of the structures, we generated 10 datasets of size $N=500$ for each of them using heavy-tailed distributions with different parameters and equation \eqref{eq:PBxCz} with $m=0$, in a similar way as we did for the previous set of experiments, with ${\bf R}$ set to the ground truth and ${\bf B}$ from $\mathrm{sign}(\mathcal{N}(0,1))+\mathcal{N}(0,0.2)$. For LiNGAM, we only use Wald statistics because as seen in the previous experiment, it performs significantly better that bootstrapping. Again, we estimate models for different $p$-value cutoffs (0.0005, 0.001, 0.005, 0.01, 0.05, 0.1 and 0.5). For SLIM, we set $\beta_m=0.1$ since all the networks in the repository are sparse. Figures \ref{fg:bnrepo_roc1}, \ref{fg:bnrepo_roc2} and \ref{fg:bnrepo_rev} show averaged performance measures respectively as ROC curves and the proportion of links reversed in the estimated model due to ordering errors.
\begin{figure}
\caption{Performance measures for the Bayesian networks repository experiments. Each connected marker correspond to a different $p$-value in LiNGAM, starting left to right from 0.005. Disconnected markers denote SLIM results. Numbers in parentheses indicate number of variables.}
\label{fg:bnrepo_roc1}
\label{fg:bnrepo_roc2}
\label{fg:bnrepo_rev}
\end{figure}
In this case, the results are mixed when looking at the performances obtained. Figure \ref{fg:bnrepo_roc2} shows that SLIM is better than LiNGAM in the larger datasets with a significant difference. Figure \ref{fg:bnrepo_roc1} shows for the remaining four datasets, that LiNGAM is better in two cases corresponding to the insurance and mildew networks. In general, both methods perform reasonably well given the size of the problems and the amount of data used to fit the models. However, SLIM tends to be more stable, when looking at the range of the true positive rates. It is important to note that the best and worst case for SLIM correspond to the largest and smallest network, respectively. We do not have a sensible explanation about why SLIM is performing that poorly on the insurance network. Figure \ref{fg:bnrepo_rev} implicitly reveals that both methods are unable to find the right ordering of the variables.
We also tried the following methods with encoded Gaussian assumptions: standard DAG search, order search, sparse candidate pruning then DAG search \citep{friedman99}, L1MB then DAG search \citep{schmidt07}, and sparse candidate pruning then order search \citep{teyssier05}. We observed (results not shown) that these methods produce similar results to those obtained by either LiNGAM or SLIM when only looking at the resulting undirected graph, i.e.\ removing the directionality of the links. Evaluation of directionality in Gaussian models is out of the question because such methods can only find DAGs up to Markov equivalence classes, thus evaluation must be made using partially directed acyclic graphs (PDAGs). It is still possible to modify some of the methods mentioned above to handle non-Gaussian data by for instance using some other appropriate conditional independence tests, however this is out of the scope of this paper.
\subsection{Model comparison}
In this experiment we want to evaluate the model selection procedure described in Section \ref{sc:ms}. For this purpose we have generated 1000 different datasets/models with $d=5$ and $N=\{500,1000\}$ following the same procedure described in the first experiment, but this time we selected the true model to be either a factor model or a DAG with equal probability. In order to generate a factor model, we basically just need to ensure that ${\bf D}$ cannot be permuted to a triangular form, so the data generated from it does not admit a DAG representation. We kept $20\%$ of the data to compute the predictive densities to then select between all estimated DAG candidates and the factor model. We found that for $N=500$ our approach was able to select true DAGs $96.78\%$ of the times and true factor models $87.05\%$, corresponding to an overall accuracy of $91.9\%$. Increasing the number of observations, i.e.\ for $N=1000$, the true DAG, true factor model rates and overall error increased to $98.99\%$, $95.0\%$ and $96.99\%$, respectively. Figure \ref{fg:mcomp} shows separately the empirical log-likelihood ratio distributions obtained from the 1000 datasets for DAGs and factor models. The shaded areas correspond to the true DAG/factor model regions, with zero as their boundary. Note that when the wrong model is selected the likelihood ratio is nicely close to the boundary and the overlap of the two distributions decreases with the number of observations used, since the quality of the predictive density increases accordingly. The true DAG rates tend to be larger than for factor models because it is more likely that the latter is confused with a DAG due to estimation errors or closeness to a DAG representation, than a DAG being confused with a factor model which is naturally more general. This is precisely why the likelihood ratios tend to be larger on the factor model side of he plots. All in all, these results demonstrate that our approach is very effective at selecting the true underlying structure when the data is generated by one of the two hypotheses.
\begin{figure}
\caption{Log-likelihood ratio empirical distributions for, (a) $N=500$ and (b) $N=1000$. Top bars correspond to true factor models, bottom bars to true DAGs and the ratio is computed as described in Section \ref{sc:ms}. Top bars lying below zero are true factor models predicted to be better explained by DAGs, thus model comparison errors.}
\label{fg:mcomp_500}
\label{fg:mcomp_1000}
\label{fg:mcomp}
\end{figure}
\subsection{DAGs with latent variables}
We will start by illustrating the identifiability issues of the model in equation \eqref{eq:PBxCz} discussed in Section \ref{sc:idf} with a very simple example. We generated $N=500$ observations from the graph in Figure \ref{fg:toyDAGL2} and kept $20\%$ of the data to compute test likelihoods. Now, we perform inference on two slightly different models, namely, (u) where ${\bf z}'=[z'_1 \ z'_2 \ z'_L]$ is provided with Laplace distributions with unit variance, i.e.\ $\lambda=2$, and (i) where $z_1,z_2$ have Laplace distributions with unit variance and $z_L$ is Cauchy distributed. We want to show that even if both models match the true generating process, (u) is non-identifiable whereas (i) can be successfully estimated. In order to keep the experiment controlled as much as possible, we set $\beta_m=0.99$ to reflect that the ground truth is dense and we did not infer ${\bf C}_D$ and set it to the true values, i.e.\ the identity. Then, we ran 10 independent chains for each one of the models and summarized ${\bf B}$, ${\bf C}_L$, ${\bf D}$ and the test likelihoods in Figure \ref{fg:mixDAGtoy}.
Figure \ref{fg:mixDAG_B_ica1} shows that model (u) finds the DAG in Figure \ref{fg:toyDAGL2} (the ground truth) in 3 cases, and in the remaining 7 cases it finds the DAG in Figure \ref{fg:toyDAGL1}. Note also that the test likelihoods in Figure \ref{fg:mixDAG_lik_ica1} are almost identical, as must be expected due to the lack of identifiability of the model, so they cannot be used to select among the two alternatives. Model (i) finds the right structure all the times as shown in Figure \ref{fg:mixDAG_B_ica2}. The mixing matrix of the equivalent factor model, ${\bf D}$ is shown in Figures \ref{fg:mixDAG_D_ica1} and \ref{fg:mixDAG_D_ica2} for (u) and (i), respectively. In Figure \ref{fg:mixDAG_D_ica1}, the first and third column of ${\bf D}$ exchange positions because all the components of ${\bf z}$ have the same distribution, which is not the case of Figure \ref{fg:mixDAG_D_ica2}. The small quantities in ${\bf D}$ are due to estimation errors when computing $b_{21}c_{1L}+c_{2L}$, and this cancels out in the true model. The sign changes in Figures \ref{fg:mixDAG_B_ica1} and \ref{fg:mixDAG_B_ica2} are caused by the sign ambiguity of ${\bf z}_L$ in the product ${\bf C}_L{\bf z}_L$. We also tested the alternative model in Figure \ref{fg:toyDAGL2} obtaining equivalent results, i.e.\ 4 successes for model (u) and 10 for model (i). This small example shows how non-identifiability may lead to two very different DAG solutions with distinct interpretations of the data.
\begin{figure}
\caption{Identifiability experiment for the DAG with latent variables. Connectivities ${\bf B}$ and ${\bf C}_L$ are shown for (u) in (a) and (i) in (d). Equivalent mixing matrix ${\bf D}$ for (u) in (b) and for (i) in (d). Test likelihoods for (u) and (i) are shown in (c) and (f) respectively. The first column in (a,b,d,e) denoted as T is the ground truth. Dark and light boxes are negative and positive numbers, accordingly.}
\label{fg:mixDAG_B_ica1}
\label{fg:mixDAG_D_ica1}
\label{fg:mixDAG_lik_ica1}
\label{fg:mixDAG_B_ica2}
\label{fg:mixDAG_D_ica2}
\label{fg:mixDAG_lik_ica2}
\label{fg:mixDAGtoy}
\end{figure}
\citet{hoyer08a} recently presented an approach to DAGs with latent variables based on LiNGAM \citep{shimizu06}. Their procedure uses probabilistic ICA and bootstrapping to infer the equivalent factor model distribution $p({\bf D}|{\bf X})$, then greedily selects $m$ columns of ${\bf D}$ to be latent variables until the remaining ones can be permuted to triangular and the resulting DAG is compatible with the faithfulness assumption \citep[see,][]{pearl00}. If we assume that their procedure is able to find the exact ${\bf D}$ for the graphs in Figures \ref{fg:toyDAGL1} and \ref{fg:toyDAGL2}, due to the faithfulness assumption, the DAG in Figure \ref{fg:toyDAGL1} will be always selected regardless of the ground truth\footnote{See \citet{robins03} for a very interesting explanation of faithfulness using the same example presented here.}. In practice, the solution obtained for ${\bf D}$ is dense and needs to be pruned, hence we rely on $p({\bf X},{\bf D})$ being larger for the ground truth in Figure \ref{fg:toyDAGL2} than for the graph in Figure \ref{fg:toyDAGL1}, however since both models differ only by a permutation of the columns of ${\bf D}$, they have exactly the same joint density $p({\bf X},{\bf D})$ --- they are non-identifiable, thus the algorithm will select one of the options by chance. Since the source of non-identifiability of their algorithm is permutations of columns of ${\bf D}$, it does not matter if probabilistic ICA match or not the distribution of the underlying process as in our model. Anyway, we decided to try models (u) and (i) described above using the algorithm just described \footnote{Matlab package (v.1.1) freely available at \url{http://www.cs.helsinki.fi/group/neuroinf/lingam/}.}. Regardless of the ground truth, Figures \ref{fg:toyDAGL1} or \ref{fg:toyDAGL2}, the algorithm always selected the DAG in Figure \ref{fg:toyDAGL2}, which in this particular case is due to $p({\bf X},{\bf D})$ being slightly larger for the denser model.
Now we test the model in a more general setting. We generate 100 models and datasets of size $N=500$ using a similar procedure to the one in the artificial data experiment. The models have $d=5$ and $m=1$, no dense structures are generated and the distributions for ${\bf z}$ are heavy-tailed, drawn from a generalized Gaussian distribution with random shape. For SLIM, we use the following settings, $\beta_m=0.1$, ${\bf z}_D$ is Laplace with unit variances and ${\bf z}_L$ is Cauchy. Furthermore, we have doubled the number of iterations of the DAG sampler, i.e.\ 6000 samples and a burn-in period of 2000, so as to compensate for the additional parameters that need to be inferred due to inclusion of latent variables. Our ordering search procedure was able to find the right ordering 78 out of 100 times. The true positive rates, true negative rates and median AUC are 88.28$\%$, 96.40$\%$ and 0.929, respectively, corresponding to approximately 1.5 structure errors per network. Using \citet{hoyer08a} we obtained 1 true ordering out of 100, 91.63$\%$ true positive rate, 65.18$\%$ true negative rate and 0.800 median AUC, showing again the preference of the algorithm for denser models. We regard these results as very satisfactory for both methods considering the difficulty of the task and the lack of identifiability of the model by \citet{hoyer08a}.
\subsection{Non-linear DAGs}
For Sparse Non-linear Identifiable Modeling (SNIM) described in Section \ref{sc:snim}, first we want to show that our method can find and select from DAGs with non-linear interactions. We used the artificial network from \citet{hoyer08} shown here in Figure \ref{fg:SNIMtoy_gt} and generated 10 different datasets corresponding to $N=100$ observations, each time using driving signals sampled from different heavy-tailed distributions. Since we do not yet have an ordering search procedure for non-linear DAGs, we perform DAG inference for all possible orderings and datasets. The results obtained are evaluated in two ways, first we check if we can find the true connectivity matrix when the ordering is correct. Second, we need to validate that the likelihood is able to select the model with less error and correct ordering among all possible candidates so we can use it in practice. Figures \ref{fg:SNIMtoy_err}, \ref{fg:SNIMtoy_lik} and \ref{fg:SNIMtoy_telik} show the median errors, training and test likelihoods (using 20\% of the data) for each one of the orderings, respectively. In this particular case we only have two correct orderings, namely, $(1,2,3,4)$ and $(1,3,2,4)$, corresponding to the first and second candidates in the plots. Figure \ref{fg:SNIMtoy_err} shows that the error is zero only for the two correct orderings, then our model is able to infer the structure once the right ordering is given as desired. As a result of the identifiability, data and test likelihoods shown in Figures \ref{fg:SNIMtoy_lik} and \ref{fg:SNIMtoy_telik} correlate nicely with the structural error in Figure \ref{fg:SNIMtoy_err}. This means that we can use use the likelihoods as a proxy for the structural error just as in the linear case.
\begin{figure}
\caption{Non-linear DAG artificial example. (a) Network with non-linear interactions between observed nodes used as ground truth. (b,c,d) Median error, likelihood and test likelihood for all possible orderings and 10 independent repetitions. The plots are sorted according to number of errors and only the first two are valid according to the ground truth in (a), i.e.\ $(1,2,3,4)$ and $(1,3,2,4)$. Note that when the error is zero in (b) the likelihoods are larger with respect to the remaining orderings in (c) and (d).}
\label{fg:SNIMtoy_gt}
\label{fg:SNIMtoy_err}
\label{fg:SNIMtoy_lik}
\label{fg:SNIMtoy_telik}
\end{figure}
We also tested the network in Figure \ref{fg:SNIMtoy_gt} using three non-linear structure learning procedures namely greedy standard hill-climbing DAG search, the \quotes{ideal parent} algorithm \citep{elidan07} and kernel PC \citep{tillman09}. The first two methods use a scaled sigmoid function to capture the non-linearities in the data. In particular, they assume that a variable $x$ can be explained as scaled sigmoid transformation of a linear combination of its parents. The best median result we could obtain after tuning the parameters of the algorithms was 2 errors and 2 reversed links\footnote{Maximum number of iterations, random restarts to avoid local minima, regularization of the non-linear regression and the number of ranking candidates in ideal parent algorithm.}. Both methods perform similarly in this particular example, the only significant difference being their computational cost, which is considerably smaller for the \quotes{ideal parent} algorithm, as it was also pointed out by \citet{elidan07}. The reason why we consider these algorithms do not perform well here is that the sigmoid function can be very limited at capturing certain non-linearities due to its parametric form whereas the nonparametric GP gives flexible non-linear functions. The third method uses non-linear independence tests together with non-linear regression (relevance vector machines) and the PC algorithm to produce mixed DAGs. The best median result we could get in this case was 2 errors, 0 reversed links and 1 bidirectional links. These three non-linear DAG search algorithms have the great advantage of not requiring exhaustive enumeration of the orderings as our method and others available in the literature. \citet{zhang09} provides theoretical evidence of the possibility for flexible non-linear modeling without exhaustive order search but not a way to do it in practice. Yet another possibility not tried here will be to take the best parts of both strategies by taking the outcome of the non-linear DAG search algorithm and refine it using a nonparametric method like SNIM. However, it is not entirely clear how the non-linearities can affect the ordering of the variables. In the remaining part of this section we only focus on tasks for pairs of variables where the ordering search is not an issue.
The dataset known as Old Faithful \citep{asuncion07} contains 272 observations of two variables measuring waiting time between eruptions and duration of eruptions for the Old Faithful geyser in Yellowstone National Park, USA. We want to test the two possible orderings, duration $\rightarrow$ interval and interval $\rightarrow$ duration. Figures \ref{fg:faithful_blik} and \ref{fg:faithful_btlik} show training and test likelihood boxplots for 10 independent randomizations of the dataset with $20\%$ of the observations used to compute test likelihoods. Our model was able to find the right ordering, i.e.\ duration $\rightarrow$ interval in all cases when the test likelihood was used but only 7 times with the training likelihood due to the proximity of the densities, see Figure \ref{fg:faithful_dlik}. On the other hand, the predictive density is very discriminative, as shown for instance in Figure \ref{fg:faithful_dtlik}. This is not a very surprising result since making the duration a function of the interval results in a very non-linear function, whereas the alternative function is almost linear (data not shown).
\begin{figure}
\caption{Testing $\{$duration, interval$\}$ in Old Faithful dataset. (a,b) Data and test likelihood boxplots for 10 independent repetitions. (c,d) Training and test likelihood densities for one of the repetitions. The test likelihood separates consistently the two tested hypotheses.}
\label{fg:faithful_blik}
\label{fg:faithful_btlik}
\label{fg:faithful_dlik}
\label{fg:faithful_dtlik}
\end{figure}
Abalone is one of the datasets from the UCI ML repository \citep{azzalini90}. It is targeted to predict the age of abalones from a set of physical measurements. The dataset contains 9 variables and 4177 observations. First we want to test the pair $\{$age, length$\}$. For this purpose, we use 10 subsets of $N=200$ observations to build the models and compute likelihoods just as before. Figures \ref{fg:abalone_blik} and \ref{fg:abalone_btlik} show training and test likelihoods respectively as boxplots. Both training and test likelihoods pointed to the right ordering in all 10 repetitions. In this experiment, the separation of the densities for the two hypotheses considered is very large, making age $\rightarrow$ length significantly better supported by the data. Figures \ref{fg:abalone_dlik} and \ref{fg:abalone_dtlik} show predictive densities for one of the trials indicating again that age $\rightarrow$ length is consistently preferred. We also decided to try another three sets of hypotheses: $\{$age, diameter$\}$, $\{$age, weight$\}$ and $\{$age, length, weight$\}$ for which we found the right orderings $\{10,10\}$, $\{10,10\}$ and $\{10,6\}$ out of 10 by looking at the training and the test likelihoods, respectively. In the model with three variables, increasing the number of observations used to fit the model from $N=200$ to $N=400$, increased the number of cases in which the test likelihood selected the true hypothesis from 6 to 8 times, which is more than enough to make a decision about the leading hypothesis.
\begin{figure}
\caption{Testing $\{$length, age$\}$ in Abalone dataset. (a,b) Data and test likelihood boxplots for 10 independent repetitions. (c,d) Training and test likelihood densities for one of the repetitions. The likelihoods largely separate the two tested hypotheses.}
\label{fg:abalone_blik}
\label{fg:abalone_btlik}
\label{fg:abalone_dlik}
\label{fg:abalone_dtlik}
\end{figure}
To conclude this set of experiments we test SNIM against another three recently proposed methods\footnote{Matlab packages available at \url{http://webdav.tuebingen.mpg.de/causality/}.}, namely Non-linear Additive Noise (NAN) model \citep{hoyer08}, Post-Non-Linear (PNL) model \citep{zhang09} and Informational Geometric Causal Inference (IGCI) \citep{daniusis10}, using an extended version of \quotes{cause-effect pairs} task for the NIPS 2008 causality competition\footnote{Data available at \url{http://webdav.tuebingen.mpg.de/cause-effect/}.} \citep{mooji10}. The task consists on distinguishing the cause from the effect of 51 different pairs of observed variables. NAN and PNL rely on an independence test \citep[HSIC, Hilbert-Schmidt Independence Criterion,][]{gretton08} to decide which of the two variable is the cause. NAN was able to take 10 decisions all being accurate. PNL was accurate 40 times out of 42 decisions made. IGCI and SNIM obtained an accuracy of 40 and 39 pairs, respectively\footnote{Results for NAN, PNL and IGCI were taken from \citet{daniusis10} because we were unable to entirely reproduce their results with the software provided by the authors.}. The results indicate (i) that NAN and PNL are very accurate when the independence test used is able to reach a decision and (ii) in terms of accuracy, the results obtained by PNL, IGCI and SNIM are comparable. For SNIM we decide based upon the test likelihood and for IGCI we used a uniform reference measure (rescaling the data between 0 and 1). From the four tested methods we can identify two main trends. One is to explicitly model the data and decide the cause-effect direction using independence tests or test likelihoods like in NAN, PNL and SNIM. The second is to directly define a measure for directionality as in IGCI. The first option has the advantage of being able to convey more information about the data at hand whereas the second option is orders of magnitude faster than the other three because it only tests for directionality.
\subsection{Protein-signaling network}
This experiment demonstrates a typical application of SLIM in a realistic biological large $N$, small $d$ setting. The dataset introduced by \citet{sachs05} consists of flow cytometry measurements of 11 phosphorylated proteins and phospholipids (raf, erk, p38, jnk, akt, mek, pka, pkc, pip$_2$, pip$_3$, plc). Each observation is a vector of quantitative amounts measured from single cells. Data was generated from a series of stimulatory cues and inhibitory interventions. Hence the data is composed of three kinds of perturbations: general activators, specific activators and specific inhibitors. Here we are only using the 1755 observations --- clearly non-Gaussian, e.g.\ see Figure \ref{fg:sachsboxplot}, corresponding to general stimulatory conditions. It is clear that using the whole dataset, i.e.\ using specific perturbations, will produce a richer model, however handling interventional data is out of the scope of this paper mainly because handling that kind of data with a factor model is not an easy task. Thus our current order search procedure is not appropriate. Focused only on the observational data, we want to test all the possibilities of our model in this dataset, namely, standard factor models, pure DAGs, DAGs with latent variables, non-linear DAGs and quantitative model comparison using test likelihoods. The textbook DAG structure taken from \citet[see Figure 2 and Table 3,][]{sachs05} is shown in Figure \ref{fg:tsachs} and the models are estimated using the true ordering and SLIM in Figures \ref{fg:gsachs} and \ref{fg:osachs}, respectively.
\begin{figure}
\caption{Result for protein-signaling network data. (a) Textbook signaling network as reported in \citet{sachs05}. Estimated structure using SLIM: (b) using the true ordering, (c) obtaining the ordering from the stochastic search, (d) top DAG with 2 latent variables and (e) the runner-up (in test likelihood). False positives are shown in red dashed lines and reversed links in green dotted lines. Below each structure we also report the median test likelihood (larger is better).}
\label{fg:tsachs}
\label{fg:gsachs}
\label{fg:osachs}
\label{fg:lsachs1}
\label{fg:lsachs2}
\end{figure}
The DAG found using the right ordering of the variables shown in Figure \ref{fg:gsachs} turned out to be the same structure found by the discrete Bayesian network from \citet{sachs05} without using interventional data (see supplementary material, Figure 4(a)), with one important difference: the method presented by \citet{sachs05} is not able to infer the directionality of the links in the graph without interventional data, i.e.\ their resulting graph is undirected. SLIM in Figure \ref{fg:osachs} finds a network almost equal to the one in Figure \ref{fg:gsachs} apart from one reversed link, plc $\rightarrow$ pip3. Surprisingly this was also found reversed by \citet{sachs05} using interventional data. In addition, there is just one false positive, the pair $\{$jnk, p38$\}$, even with a dedicated latent variable in the factor model mixing matrix shown in Figure \ref{fg:sachsfacs}, thus we cannot attribute such a false positive to estimation errors. A total of 211 ordering candidates were produced during the inference out of approximately $10^7$ possible and only $m_\mathrm{top}=10$ of them were used in the structure search step. Note from Figure \ref{fg:lr_sachs} that the predictive densities for the DAGs correlate well with the structural accuracy, apart from candidate 8. Candidates 3 and 8 have the same number of structural errors, however candidate 8 has 3 reversed links instead of 1 as shown in Figure \ref{fg:osachs}. The predictive densities for the best candidate, third in Figure \ref{fg:lr_sachs} are shown in Figure \ref{fg:lik_sachs} and suggest that the factor model fits the data better. This makes sense considering that estimated DAG in Figure \ref{fg:osachs} is a substructure of the ground truth. We also examined the estimated factor model in Figure \ref{fg:sachsfacs} and we found that several factors could correspond respectively to three unmeasured proteins, namely pi3k in factors 9 and 11, m$_3$ (mapkkk, mek4/7) and m$_4$ (mapkkk, mek3/6) in factor 7, ras in factors 4 and 6.
\begin{figure}
\caption{Results for protein-signaling network data. (a) Boxplot for each one of the 11 variables in the dataset. (b) Estimated factor model. (c) Test likelihoods for the best DAG (dashed) and the factor model (solid). (d) Test likelihoods (squares) and structure errors (circles) included reversed links for all candidates. (e) Non-linear variables $y$ obtained as a function of the observed variables $x$ for pip3 and pkc. Each dot in the plot is an observation and the solid lines are 95$\%$ credible intervals.}
\label{fg:sachsboxplot}
\label{fg:sachsfacs}
\label{fg:lik_sachs}
\label{fg:lr_sachs}
\label{fg:sachsnlsig}
\end{figure}
We also wanted to assess the performance of our method and several others using this dataset, including LiNGAM and those mentioned in the Bayesian network repository experiment, even knowing that this dataset contains non-Gaussian data. We found that all of them have similar results in terms of true and false positive rates when comparing them to SLIM. However the number of reversed links was not in any case less than 6, which corresponds to more than $50\%$ of the true positives found in every case. This means that they are essentially able to find the skeleton in Figure \ref{fg:gsachs}. Besides, we do not have knowledge of any other method for DAG learning using only the observational data that also provides results substantially better than the ones shown in Figure \ref{fg:osachs}. The poor performance of LiNGAM is difficult to explain but the large amount of reversed links may be due to the FastICA based deterministic ordering search procedure.
We also tried DAG models with latent variables in this dataset. The results obtained by the DAG with 2 a priori assumed latent variables are shown in Figures \ref{fg:lsachs1} and \ref{fg:lsachs2}, corresponding to the first and second DAG candidates in terms of test likelihoods. The first option is different to the pure DAG in Figure \ref{fg:osachs} only in the reversed link, p38 $\rightarrow$ pkc, but captures some of the behavior of pik3 and ras in l$_1$ and l$_2$ respectively. It is very interesting to see how, due to the link between pik3 and ras that is not possible to model with our model, the second inferred latent variable is detecting signals pointing towards pip$_2$ and plc. We also considered a second option because l$_1$ in the top model is only connected to a single variable pip$_3$ and thus could be regarded as an estimation error since it can be easily confounded with a driving signal. Comparing Figures \ref{fg:osachs} and \ref{fg:lsachs2} reveals two differences in the observed part, a false negative pip$_3$ $\rightarrow$ plc and a new true (reversed) positive mek $\rightarrow$ pka. This candidate is particularly interesting because the first latent variable captures the connectivity of pik3 while connecting itself to plc due to the lack of connectivity between pip$_3$ and plc. Moreover, the second latent variable resembles ras and the link between pik3 and ras as a link from itself to pip$_3$. In both solutions there is a connection between l$_2$ and mek that might be explained as a link through a phosphorylation of raf different to the observed one, i.e.\ ras$_{s259}$. In terms of median test likelihoods, the model in Figure \ref{fg:lsachs1} is only marginally better than the factor model in Figure \ref{fg:sachsfacs} and in turn marginally worse than the DAG in Figure \ref{fg:lsachs2}.
For SNIM we started from the true ordering of the variables but we could not find any improvement compared to the structure in Figure \ref{fg:osachs}. In particular there are only two differences, plc $\rightarrow$ pip$_2$ and jnk $\rightarrow$ p38 are missing, meaning that at least in this case there are no false positives in the non-linear DAG. Looking at the parameters of the covariance function used, $\bm{\upsilon}$ (not shown) with acceptance rates of approximately $\approx 20\%$ and reasonable credible intervals, we can say that our model found almost linear functions since all the parameters of the covariance functions are rather small. Figure \ref{fg:sachsnlsig} shows two particular non-linear variables learned by the model, corresponding to pip3 and plc. In each case the uncertainty of the estimation nicely increases with the magnitude of the observed variable and although the functions are fairly linear they resemble the saturation effect we can expect in this kind of biological data. From the three non-linear methods non-requiring exhaustive order search described in the previous section (DAG search, \quotes{ideal parent} and kPC), the best result we obtained was 11 structural errors, 10 true positives, 34 true negatives, 2 reversed and 6 bidirectional links for kPC vs 12, 9, 34, 1 and 0 by SLIM and 12, 8, 35, 0 and 0 by SNIM.
\subsection{Time series data}
We illustrate the use Correlated Sparse Linear Identifiable Modeling (CLSIM) on the dataset introduced by \citet{kao04} consisting of temporal gene expression profiles of \emph{E.~coli} during transition from glucose to acetate measured using DNA microarrays. Samples from 100 genes were taken at 5, 10, 15, 30, 60 minutes and every hour until 6 hours after transition\footnote{Data available at \url{http://www.seas.ucla.edu/~liaoj/NCA_module_Data}.}. The general goal is to reconstruct the unknown transcription factor activities from the expression data and some prior knowledge. In \citet{kao04} the prior knowledge consisted of taking the set of transcription factors (ArcA, CRP, CysB, FadR, FruR, GatR, IcIR, LeuO, Lrp, NarL, PhoB, PurB, RpoE, RpoS, TrpR and TyrR) controlling the observed genes and the (up-to-date) connectivity between genes and transcription factors from RegulonDB\footnote{\url{http://regulondb.ccg.unam.mx/}.} \citep{Gama-Castro08}. From this setting, we can immediately relate the transcriptions factors with ${\bf Z}$, such a connectivity with ${\bf Q}_L$, and their relative strengths with ${\bf C}_L$, hence the problem can be seen as a standard factor model. In \citet{kao04} they applied a method called Network Component Analysis (NCA), that uses a least-squares based algorithm to solve a problem similar to the one in equation \eqref{eq:PBxCz}, but assuming that the sparsity pattern (masking matrix ${\bf Q}_L$) of ${\bf C}_L$ is fixed and known. It is well-known that the information in RegulonDB is still incomplete and hard to obtain for organisms different than \emph{E.~coli}. Our goal here is thus to obtain similar transcription factor activities to those found by \citet{kao04} without using the information from RegulonDB, but taking into account that the data at hand is a time series by letting each transcription factor activity have an independent Gaussian process prior as described for CSLIM in Section \ref{sc:cslim}. We will not attempt to use ${\bf Q}_L$ to recover the ground truth connectivity information since RegulonDB is collected from a wide range of experimental conditions and not only from the transcriptional activity produced by the \emph{E.~coli} during its transition from glucose to acetate. The results are shown in Figure \ref{fg:ecoli}.
\begin{figure}
\caption{Results for \emph{E.~coli} dataset. Mixing matrices estimated using: (a) NCA, (b) our formulation when restricting ${\bf Q}_L$ using RegulonDB information and (c) the factor model. (d) Model comparison results using test likelihoods. The restricted model (dash-dotted line) obtained a median negative log-likelihood of $1463.4$ whereas the unrestricted model (solid line) obtained $1317.1$, suggesting no significant model preferences. (e) Estimated transcription factor activities, ${\bf Z}$. Our methods (solid and dash-dotted lines for unrestricted and restricted model respectively) produce similar results to those produced by NCA (dashed line).}
\label{fg:tecoli}
\label{fg:fRecoli}
\label{fg:nfRecoli}
\label{fg:mcecoli}
\label{fg:Zecoli}
\label{fg:ecoli}
\end{figure}
Results in Figure \ref{fg:Zecoli} show the source matrix ${\bf Z}$ recovered by our model together with those from NCA\footnote{Matlab package (v.2.3) available at \url{http://www.seas.ucla.edu/~liaoj/download.htm}.}. In this experiment we ran a single chain and collected 6000 samples after a burn-in period of 2000 samples (approximately 10 minutes in a desktop machine). Most of the profiles obtained by our method are similar to those obtained by NCA \citep{kao04}. We ran two versions of our model, one with ${\bf Q}_L$ fixed to the RegulonDB values, i.e.\ similar in spirit to NCA, and another when we infer ${\bf Q}_L$ without any restriction. The results of NCA and our model with fixed ${\bf Q}_L$ are directly comparable (up to scaling) whereas we had to match the permutation ${\bf P}_\mathrm{f}$ of the unrestricted model to those found by NCA in order to compare, using the Hungarian algorithm. Figure \ref{fg:tecoli} shows the mixing matrices obtained by NCA and our two models. Figures \ref{fg:tecoli} and \ref{fg:fRecoli} are very similar due to the restriction imposed on ${\bf Q}_L$. The mixing matrix obtained by our unrestricted model in Figure \ref{fg:nfRecoli} is clearly denser than the other two, suggesting that there are different ways of connecting genes and transcription factors and still reconstruct the transcription factor activities given the observed gene expression data. When looking to the test log-likelihood densities obtained by our two models in Figure \ref{fg:mcecoli} they are very similar, which suggests that there is no evidence that one of the models makes a better fit on test data. In terms of Mean Squared Error (MSE), NCA obtained $0.0146$ while our model reached $0.0264$ and $0.0218$ on the restricted and unrestricted models, respectively, when using $90\%$ of the data for inference. In addition, the $95\%$ credible intervals for the MSE were $(0.0231,0.0329)$ and $(0.0164,0.0309)$ respectively. The latter shows again that there is no evidence that one of the three models is better than the other two, considering that: (i) NCA is trained on the entire dataset and (ii) our unrestricted model could, in principle, produce mixing matrices arbitrarily denser than the connectivity matrix extracted from RegulonDB, and thus, again in principle, lower MSE values.
\section{Discussion} \label{sc:dis}
We have proposed a novel approach called SLIM (Sparse Linear Identifiable Multivariate modeling) to perform inference and model comparison of general linear Bayesian networks within the same framework. The key ingredients for our Bayesian models are slab and spike priors to promote sparsity, heavy-tailed priors to ensure identifiability and predictive densities (test likelihoods) to perform the comparison. A set of candidate orderings is produced by stochastic search during the factor model inference. Subsequently, a linear DAG with or without latent variables is learned for each of the candidates. To the authors' knowledge this is the first time that a method for comparing such closely related linear models has been proposed. This setting can be very beneficial in situations where the prior evidence suggests both DAG structure and/or unmeasured variables in the data. We also show that the DAG with latent variables can be fully identifiable and that SLIM can be extended to the non-linear case (SNIM - Sparse Non-linear Identifiable Multivariate modeling), if the ordering of the variables is provided or can be tested by exhaustive enumeration. For example in the protein-signaling network \citep{sachs05}, the textbook ground truth suggests both DAG structure and a number of unmeasured proteins. The previous approach \citep{sachs05} only performed structure learning in pure DAGs but our results using observational data alone suggest that the data is better explained by a (possibly non-linear) DAG with latent variables. Our extensive results on artificial data showed one by one the features of our model in each one of its variants, and demonstrated empirically their usefulness and potential applicability. When comparing against LiNGAM, our method always performed at least as well in every case with a comparable computational cost. The presented Bayesian framework also allows easy extension of our model to match different prior beliefs about the problems at hand without significantly changing the model and its conceptual foundations, as in CSLIM and SNIM.
We believe that the priors that give raise to sparse models in the fully Bayesian inference setting, like the two-level slab (continuous) and spike (point-mass in zero) priors used are very powerful tools for simultaneous model and parameter inference. They may be useful in many settings in machine learning where sparsity of parameters is desirable. Although the posterior distributions for slab and spike priors will be non-convex, it is our experience that inference with blocked Gibbs sampling actually has very good convergence properties. In the two-level approach, one uses a hierarchy of two slab and spike priors. The first is on the parameter and the second is on the mixture parameter (i.e.\ the probability that the parameter is non-zero). Instead of letting this parameter be controlled by a single Beta-distribution (one level approach) we have a slab and spike distribution on it with a Beta-distributed slab component biased towards one. This makes the model more parsimonious, i.e.\ the probability that parameters are zero or non-zero is closer to zero and one and parameter settings are more robust.
In the following we will discuss open questions and future directions. From the Bayesian network repository experiment it is clear that we need to improve our ordering search procedure if we want to use SLIM for problems with more than say 50 variables. This basically amounts to finding proposal distributions that better exploit the particularities of the model at hand. Another option could be to provide the proposal distribution with some notion of memory to avoid permutations with low probability and/or expand the coverage of the searching procedure.
It is well studied in the literature on sparse models that for increasing number of observations any model tends to loose its sparsity capabilities. This is because the likelihood starts dominating the inference, making the prior distribution less informative. The easiest way to handle such an effect is to make the hyperparameters of the sparsity prior dependent on $N$. We have not explored this phenomenon in SLIM but it should certainly be taken into account in the specification of sparsity priors.
Directly specifying the distributions of the latent variables in order to obtain identifiability in the general DAG with latent variables requires having different distributions for the driving signals of the observed variables and latent variables. This may introduce model mismatch or be restrictive in some cases as one will not have this kind of knowledge a priori. We thus need more principled ways to specify distributions for ${\bf z}$ ensuring identifiably, without restricting some of its components to having a particular behavior, like having heavier tails than the driving signals for instance. We conjecture that providing ${\bf z}$ with a parameterization of Dirichlet process priors with appropriate base measures would be enough but we are not certain whether this would be sufficient in practice.
We set a priori that the components of ${\bf z}$ are independent. Although this is a very reasonable assumption, it does not allow for connectivity between latent variables as we see for example in the protein signaling network, see Figure \ref{fg:tsachs}. It is straight forward to specify such a model, although identifiability becomes even harder to ensure in this case.
We do not have an ordering search procedure for the non-linear version of SLIM. This is a necessary step since exhaustive enumeration of all possible orderings is not an option beyond say 10 variables. The main problem is that the non-linear DAG has no equivalent factor model representation so we cannot directly exploit the permutation candidates we find in SLIM. However, as long as the non-linearities are weak, one might in principle use the permutation candidates found in a factor model, i.e.\ the linear effects will determine the correct ordering of the variables.
SLIM cannot handle experimental (interventional) data, and consequently around 80\% of the data from the \citet{sachs05} study is not used. It is well-established how to learn with interventions in DAGs \citep[see][]{sachs05}. The problem remains of how to formulate effective inference with interventional data in the factor model.
\section*{Acknowledgments}
We thank the editor and the three anonymous referees for their helpful comments and discussions that improved the presentation of this paper.
\appendix
\section{Gibbs sampling} \label{ap:inf}
Given a set of $N$ observations in $d$ dimensions, the data ${\bf X}=[{\bf x}_1,\dots,{\bf x}_N]$ and $m$ latent variables, MCMC analysis is standard and can be implemented through Gibbs sampling. Note that in the following, ${\bf X}_{i:}$ and ${\bf X}_{:i}$ are rows and columns of ${\bf X}$, respectively, and $i$, $j$, $n$ are indexes for dimensions, factors and observations, respectively. In the following we describe the conditional distributions needed to sample from the standard factor model hierarchy. Below we will briefly discus the modifications needed for the DAG.
\paragraph{Noise variance} We can sample each element of $\bm{\Psi}$ independently using
\begin{align} \label{eq:gibbsPsi}
\psi_i^{-1}|{\bf X}_{i:},{\bf C}_{i:},{\bf Z},{\bf V}_i,s_s,s_r \ \sim & \ \mathrm{Gamma}\left(\psi_i^{-1}\left|s_s+\frac{N+d}{2},s_r+u\right.\right) \ , \end{align}
where ${\bf V}_i$ is a diagonal matrix with entries $\tau_{ij}$ and
\begin{align*}
u = \frac{1}{2}({\bf X}_{i:}-{\bf C}_{i:}{\bf Z})({\bf X}_{i:}-{\bf C}_{i:}{\bf Z})^\top+\frac{1}{2}{\bf C}_{i:}{\bf V}_i^{-1}{\bf C}_{i:}^\top \ . \end{align*}
\paragraph{Factors} The conditional distribution of the latent variables ${\bf Z}$ using the scale mixtures of Gaussians representation can be computed independently for each element of $z_{jn}$ using
\begin{align} \label{eq:gibbsZ}
z_{jn}|{\bf X}_{:n},{\bf C}_{:j},{\bf Z}_{:n},\bm{\Psi},\upsilon_{jn} \ \sim \ \mathcal{N}(z_{jn}|{\bf C}_{:j}^\top\bm{\Psi}^{-1}\bm{\epsilon}_{\backslash jn},u_{jn}) \ , \end{align}
where $u_{jn} = ({\bf C}_{:j}^\top\bm{\Psi}^{-1}{\bf C}_{:j}+\upsilon_{jn}^{-1})^{-1}$ and $\bm{\epsilon}_{\backslash jn}={\bf X}_{:n}-{\bf C}{\bf Z}_{:n}|_{z_{jn}=0}$. If the latent factors are Laplace distributed the mixing variances $\upsilon_{jn}$ have exponential distribution, thus the resulting conditional is
\begin{align*}
\upsilon_{jn}^{-1}|z_{jn},\lambda \ \sim & \ \mathrm{IG}\left(\upsilon_{jn}^{-1}\left|\frac{\lambda}{|z_{jn}|},\lambda^2\right.\right) \ , \end{align*}
and for the Student's \emph{t}, with corresponding gamma densities as
\begin{align*}
\upsilon_{jn}^{-1}|z_{jn},\sigma^2,\theta \ \sim & \ \mathrm{Gamma}\left(\upsilon_{jn}^{-1}\left|\frac{\theta+1}{2},\frac{\theta}{2}+\frac{z_{jn}^2}{2\sigma^2}\right.\right) \ , \end{align*}
where $\mathrm{IG}(\cdot|\mu,\lambda)$ is the inverse Gaussian distribution with mean $\mu$ and scale parameter $\lambda$ \citep{chhikara89}.
\paragraph{Gaussian processes} In practice, the prior distribution for each row of the matrix ${\bf Z}$ in CSLIM has the form $z_{j1},\ldots,z_{jN} \sim \mathcal{N}(0,{\bf K}_j)$, where ${\bf K}_j$ is a covariance matrix of size $N\times N$ built using $k_{\upsilon_j,n}(n,n')$. The conditional distribution for $z_{j1},\ldots,z_{jN}$ can be computed using
\begin{align*}
z_{j1},\ldots,z_{jN}|{\bf X},{\bf C}_{j:},{\bf Z}_{\backslash j},\bm{\Psi} \ \sim \ \mathcal{N}(z_{j1},\ldots,z_{jN}|{\bf C}_{:j}^\top\bm{\Psi}^{-1}\bm{\epsilon}_{\backslash j}{\bf V} ,{\bf V}) \ , \end{align*}
where ${\bf Z}_{\backslash j}$ is ${\bf Z}$ without row $j$, ${\bf V} = ({\bf U}+{\bf K}_j^{-1})^{-1}$, ${\bf U}$ is a diagonal matrix with elements ${\bf C}_{:j}^\top\bm{\Psi}^{-1}{\bf C}_{:j}$ and $\bm{\epsilon}_{\backslash j}={\bf X}-{\bf C}{\bf Z}|_{z_{j1},\ldots,z_{jN}=0}$. The computation of ${\bf V}$ can be done in a numerically stable way by rewriting ${\bf V} = {\bf K}_j - {\bf K}_j ({\bf U}^{-1} + {\bf K}_j )^{-1} {\bf K}_j$ and then using Cholesky decomposition and back substitution to obtain in turn ${\bf L}{\bf L}^\top = {\bf U}^{-1} + {\bf K}_j$ and ${\bf L}^{-1}{\bf K}_j$. The hyperparameters of the covariance function in equation \eqref{eq:GPhyp} can be sampled using
\begin{align*}
\kappa|\bm{\upsilon},k_s,k_r \ \sim \ \mathrm{Gamma}\left(\kappa\middle|k_s+mu_s,k_r+\sum_{j=1}^m \upsilon_j\right) \ . \end{align*}
For the inverse length-scales we use Metropolis-Hastings updates with proposal $q(\upsilon_j^\star|\upsilon_j)=p(\upsilon_j^\star)$ and acceptance ratio
\begin{align*}
\xi_{\rightarrow\star}=\frac{\mathcal{N}(z_{j1},\ldots,z_{jN}|{\bf 0},{\bf K}_j^\star)}
{\mathcal{N}(z_{j1},\ldots,z_{jN}|{\bf 0},{\bf K}_j)} \ , \end{align*}
where ${\bf K}_j^\star$ is obtained using $k_{\upsilon_j^\star,n}(n,n')$. For SNIM, we only need to replace ${\bf C}$ by ${\bf B}$, ${\bf Z}$ by ${\bf Y}=[{\bf y}_1 \ \ldots {\bf y}_N]$ and $k_{\upsilon_j,n}(n,n')$ by $k_{\upsilon_i,x}({\bf x},{\bf x}')$.
\paragraph{Mixing matrix} In order to sample each $c_{ij}$ from the conditional distribution of the matrix ${\bf C}$ we use
\begin{align} \label{eq:gibbsD}
c_{ij}|{\bf X}_{i:},{\bf C}_{\backslash ij},{\bf Z}_{j:},\psi_i,\tau_{ij} \ \sim & \ \mathcal{N}(c_{ij}|u_{ij}\bm{\epsilon}_{\backslash ij}{\bf Z}_{j:}^\top,u_{ij}\psi_i) \ , \end{align}
where $u_{ij} = ({\bf Z}_{j:}{\bf Z}_{j:}^\top+\tau_{ij}^{-1})^{-1}$ and $\bm{\epsilon}_{\backslash ij}={\bf X}_{i:}-{\bf C}_{i:}{\bf Z}|_{d_{ij}=0}$. Note that we only need to sample those $c_{ij}$ for which $r_{ij}=1$, i.e.\ just the slab distribution. Sampling from the conditional distributions for $\tau_{ij}$ can be done using
\begin{align} \label{eq:gibbstau}
\tau_{ij}^{-1}|d_{jn},t_s,t_r \ \sim & \ \mathrm{Gamma}\left(\tau_{ij}^{-1}\left|t_s+\frac{1}{2},t_r+\frac{d_{ij}^2}{2\psi_i}\right.\right) \ . \end{align}
The conditional distributions for the remaining parameters in the slab and spike prior can be written first for the masking matrix ${\bf Q}$ as
\begin{align} \label{eq:gibbsq}
q_{ij}|{\bf X}_{i:},{\bf D}_{i:},{\bf Z},\psi_i,\tau_{ij},\eta_{ij} \ \sim & \ \mathrm{Bernoulli}\left(q_{ij}\middle|\frac{\xi_{\eta_{ij}}}{1+\xi_{\eta_{ij}}}\right) \ , \end{align}
where
\begin{align*} \xi_{\eta_{ij}} \ = & \ \frac{\alpha_m\nu_j}{1-\alpha_m\nu_j}\frac{\psi_i^{1/2}}{({\bf Z}_{j:}{\bf Z}_{j:}^\top+\tau_{ij}^{-1})^{1/2}}\exp\left(\frac{(\bm{\epsilon}_{\backslash ij}{\bf Z}_{j:}^\top)^2}{2\psi_i({\bf Z}_{j:}{\bf Z}_{j:}^\top+\tau_{ij}^{-1})}\right) \ , \end{align*}
and the probability of each element of ${\bf C}$ of being non-zero as
\begin{align} \label{eq:gibbseta}
\eta_{ij}|u_{ij},q_{ij},\alpha_p,\alpha_m \ \sim & \ (1-u_{ij})\delta(\eta_{ij})+u_{ij}\mathrm{Beta}(\eta_{ij}|\alpha_p\alpha_m+q_{ij},\alpha_p(1-\alpha_m) +1-q_{ij}) \ , \end{align}
where $u_{ij}\sim\mathrm{Bernoulli}(h_{ij}|r_{ij}+(1-r_{ij})\nu_j(1-\alpha_m)/(1-\nu_j\alpha_m))$, i.e.\ we set $u_{ij}=1$ if $q_{ij}=1$. Finally, for the column-wise shared sparsity rate we have
\begin{align} \label{eq:gibbsnu}
\nu_j|{\bf u}_{j},\beta_p,\beta_m \ \sim & \ \mathrm{Beta}\left(\nu_j\middle|\beta_p\beta_m+\sum_{i=1}^d u_{ij},\beta_p(1-\beta_m)+\sum_{i=1}^d (1-u_{ij})\right) \ . \end{align}
Sampling from the DAG model only requires minor changes in notation but the conditional posteriors are essentially the same. The changes mostly amount to replacing accordingly ${\bf C}$ by ${\bf B}$ and ${\bf Q}$ by ${\bf R}$. Note that ${\bf Q}_L$ is the identity and ${\bf R}$ is strictly lower triangular a priori, thus we only need to sample their active elements.
\paragraph{Inference with missing values} We introduce a binary masking matrix indicating whether an element of ${\bf X}$ is missing or not. For the factor model we have the following modified likelihood
\begin{align*}
p({\bf X}_{\rm tr}|{\bf C},{\bf Z},\bm{\Psi},{\bf M}_{\rm miss}) = \mathcal{N}({\bf M}_{\rm miss}\odot{\bf X}|{\bf M}_{\rm miss}\odot({\bf C}{\bf Z}),\bm{\Psi}) \ . \end{align*}
Testing on the missing values, ${\bf M}_{\rm miss}^\star={\bf 1}{\bf 1}^\top-{\bf M}$ requires averaging the test likelihood
\begin{align*}
p({\bf X}^\star|{\bf C},{\bf Z},\bm{\Psi},{\bf M}_{\rm miss}^\star) = \mathcal{N}({\bf M}_{\rm miss}^\star\odot{\bf X}|{\bf M}_{\rm miss}^\star\odot({\bf C}{\bf Z}),\bm{\Psi}) \ , \end{align*}
over ${\bf C},{\bf Z},\bm{\Psi}$ given ${\bf X}_{\rm tr}$ (training). We can approximate the predictive density $p({\bf X}^\star|{\bf X}_{\rm tr},\cdot)$ by computing the likelihood above during sampling using the conditional posteriors of ${\bf C}$, ${\bf Z}$ and $\bm{\Psi}$ and then summarizing using for example the median. Drawing from ${\bf C}$, ${\bf Z}$, $\bm{\Psi}$ can be achieved by sampling from their respective conditional distributions as described before with some minor modifications.
\end{document} |
\begin{document}
\title[Kohn decomposition]{Kohn decomposition for forms on coverings of complex manifolds constrained along fibres}
\subjclass[2010]{32A38, 32K99}
\keywords{Kohn decomposition, holomorphic Banach vector bundle, harmonic form}
\author{A.~Brudnyi}
\address{Department of Mathematics and Statistics, University of Calgary, Calgary, Canada}
\email{abrudnyi@ucalgary.ca}
\author{D.~Kinzebulatov}
\address{The Fields Institute, Toronto, Canada}
\email{dkinzebu@fields.utoronto.ca}
\thanks{Research of the authors is partially supported by NSERC}
\begin{abstract} The classical result of J.J.~Kohn asserts that over a relatively compact subdomain $D$ with $C^\infty$ boundary of a Hermitian manifold whose Levi form has at least $n-q$ positive eigenvalues or at least $q+1$ negative eigenvalues at each boundary point, there are natural isomorphisms between the $(p,q)$ Dolbeault cohomology groups defined by means of $C^\infty$ up to the boundary differential forms on $D$ and the (finite-dimensional) spaces of harmonic $(p,q)$-forms on $D$ determined by the corresponding complex Laplace operator. In the present paper, using Kohn's technique, we give a similar description of the $(p,q)$ Dolbeault cohomology groups of spaces of differential forms taking values in certain (possibly infinite-dimensional) holomorphic Banach vector bundles on $D$. We apply this result to compute the $(p,q)$ Dolbeault cohomology groups of some regular coverings of $D$ defined by means of $C^\infty$ forms constrained along fibres of the coverings. \end{abstract}
\maketitle
\section{Introduction} \label{intro}
Let $X$ be a connected Hermitian manifold of complex dimension $n$. A relatively compact subdomain $D=\{x\in X\, :\, \rho(x)<0\}\Subset X$, $\rho\in C^\infty(X)$, with $C^\infty$ boundary $\partial D$ is said to have {\em $Z(q)$-property}, if the Levi form of $\rho$ has at least $n-q$ positive eigenvalues or at least $q+1$ negative eigenvalues at each boundary point of $D$ (e.g., a strongly pseudoconvex subdomain of $X$ has $Z(q)$-property for all $q>0$).
Let $\Lambda^{p,q}(\bar{D})$ be the space of $C^\infty$ ($p,q$)-forms on $D$ that admit $C^\infty$ extension in some open neighbourhood of the closure $\bar{D}$ of $D$ in $X$. Using the Hermitian metric on $X$, in a standard way one defines the Laplace operator $\Box$ on $\Lambda^{p,q}(\bar{D})$, see, e.g., \cite{K} for details. The forms in ${\rm Ker}\,\Box=:\mathcal H^{p,q}(\bar{D})$ are called {\em harmonic}.
The following result is the major consequence of the theory developed by J.J.~Kohn, see \cite{KN}, \cite{K} or \cite{FK}.
\begin{theorem} \label{thm0} Suppose $D$ has $Z(q)$-property. Then ${\rm dim}_{\mathbb C}\mathcal H^{p,q}(\bar{D})<\infty$ and each $\bar{\partial}$-closed form $\omega\in \Lambda^{p,q}(\bar{D})$ is uniquely presented as \begin{equation} \label{kohnd} \omega=\bar{\partial}\xi+\chi,\quad\text{where}\quad \xi \in \Lambda^{p,q-1}(\bar{D}),\ \chi\in\mathcal H^{p,q}(\bar{D}). \end{equation} \end{theorem}
It follows that the map $$ \mathcal H^{p,q}(\bar{D}) \ni \omega \mapsto [\omega] \in H^{p,q}(\bar{D}):=\{\omega \in \Lambda^{p,q}(\bar{D}): \bar{\partial} \omega=0\}/\bar{\partial} \Lambda^{p,q-1}(\bar{D}), $$ where $[\omega]$ stands for the cohomology class of $\omega$, is an isomorphism.
As a corollary, one obtains the characterization of the \textit{Dirichlet cohomology groups} $$ H_0^{r,s}(\bar{D}):=\mathcal Z_0^{r,s}(\bar{D})/\mathcal B_0^{r,s}(\bar{D}), $$ where $$ \mathcal Z_0^{r,s}(\bar{D}):=\{\omega \in \Lambda_0^{r,s}(\bar{D}): \bar{\partial}\omega=0 \}, \quad \mathcal B_0^{r,s}(\bar{D}):=\bar{\partial}\{\omega \in \Lambda^{r,s-1}_0(\bar{D}): \bar{\partial}\omega \in \Lambda_0^{r,s}(\bar{D})\} $$ and $$
\Lambda_0^{r,s}(\bar{D}):=\{\omega \in \Lambda^{r,s}(\bar{D}): \omega|_{\partial D}=0\}. $$
Namely, one has the following result:
\begin{theorem}[\cite{FK}] \label{thm0_2} If $D$ has $Z(q)$-property, then there is a natural isomorphism $$H_0^{n-p,n-q}(\bar{D}) \cong (H^{p,q}(\bar{D}))^{\ast}$$
induced by the map associating to each $\xi \in \mathcal Z_0^{n-p,n-q}(\bar{D})$ the linear functional $$ \mathcal Z^{p,q}(\bar{D}) \ni \theta \mapsto \int_D \theta \wedge \xi. $$ \end{theorem}
Since Theorems \ref{thm0} and \ref{thm0_2} are independent of $p$, they can be viewed as assertions about spaces of $C^\infty$ $(0,q)$-forms on $\bar D$ with values in the (finite-dimensional) holomorphic vector bundle of $(p,0)$-forms on $X$. This manifests a more general fact: Kohn's arguments can be transferred without significant changes to spaces of $C^\infty$ $(p,q)$-forms on $\bar D$ taking values in a finite-dimensional Hermitian holomorphic vector bundle on $X$ (see, e.g.,~\cite[Ch.IV]{FK}).
The goal of the present paper is to extend Theorems \ref{thm0} and \ref{thm0_2} to spaces of $C^\infty$ $(p,q)$-forms on $\bar D$ with values in an \textit{infinite-dimensional} holomorphic Banach vector bundle $E$ on $X$. (Note that if $E$ is not Hilbertian, Kohn's arguments are not applicable.) We apply these results to differential forms on (possibly unbounded!) subdomains $\bar{D}'=r^{-1}(\bar{D}) \subset X'$, where $r:X'\rightarrow X$ is a regular covering of a complex manifold $X$,
satisfying additional constraints along fibres of the covering (see Section~3). Such forms appear within theories of algebras of bounded holomorphic functions on regular coverings of $X$. Another, sheaf-theoretic, approach to the study of such algebras was proposed in \cite{BrK}. It is based on analogues of Cartan theorems A and B for coherent-type sheaves on certain fibrewise complactifications of the covering (a topological space having some properties of a complex manifold).
\section{Main results} \label{main}
Let $\pi:E \rightarrow X$ be a holomorphic Banach vector bundle with fibre $B$. For an open $U\subset X$ by $\Lambda^{p,q}(U,E)$ we denote the space of $C^\infty$ $E$-valued $(p,q)$-forms on $U$, i.e., $C^\infty$ sections of the holomorphic Banach vector bundle $E \otimes\bigl( \wedge^p T^*X\bigr) \wedge\bigl(\wedge^q \overline{T^*X}\bigr)$ over $U$ (here $T^*X$ is the holomorphic cotangent bundle on $X$). Also, we denote by $\mathcal O(X, E)$ the space of holomorphic sections of $E$ equipped with (Hausdorff) topology of uniform convergence on compact subsets of $X$ (defined in local trivializations on $E$ by the norm of $B$). For a compact subset $S\subset X$ by $C(S,E)$ we denote the space of continuous sections of $E$ on $S$ equipped with topology of uniform convergence.
(The former space admits the natural structure of a Fr\'{e}chet space and the latter one of a complex Banach space).
Let $\Lambda^{p,q}(\bar{D},E):=\Lambda^{p,q}(X,E)|_{\bar{D}}$ be the space of restrictions to $\bar{D}$ of $C^\infty$ $E$-valued forms on $X$. In a standard way, using local trivializations on $E$, we equip $\Lambda^{p,q}(\bar{D},E)$ with the Fr\'{e}chet topology determined by a sequence of $C^k$-like norms $\{\|\cdot\|_{k,\bar{D},E}\}_{k=0}^\infty$ (see subsection \ref{results}). Then the standard operator $$\bar{\partial}:\Lambda^{p,q}(\bar D,E) \rightarrow \Lambda^{p,q+1}(\bar D,E)$$ is continuous. Consider the corresponding subspaces of $\bar{\partial}$-closed and $\bar{\partial}$-exact forms $$\mathcal Z^{p,q}(\bar{D},E):=\{\omega \in \Lambda^{p,q}(\bar{D},E): \bar{\partial} \omega=0\}\quad\text{and}\quad \mathcal B^{p,q}(\bar{D},E):=\bar{\partial}\Lambda^{p,q-1}(\bar{D},E)$$ equipped with topology induced from $\Lambda^{p,q}(\bar{D},E)$.
Our results concern the structure of the cohomology group $$ H^{p,q}(\bar{D},E):=\mathcal Z^{p,q}(\bar{D},E)/\mathcal B^{p,q}(\bar{D},E) $$ and its dual, for bundles from the class $\Sigma_0(X)$ consisting of {\em direct summands of holomorphically trivial bundles}, that is, $E\in \Sigma_0(X)$ if there exists a holomorphic Banach vector bundle $E'$ on $X$ such that the Whitney sum of bundles $E\oplus E'$ is holomorphically trivial.
\begin{example} \label{sigma_ex} Each holomorphic Banach vector bundle on a Stein manifold $Y$ is in $\Sigma_0(Y)$ (see, e.g.,~\cite[Th.~3.9]{Obz}). Thus if $f: X\rightarrow Y$ is a holomorphic map, then $E:=f^*E'\in \Sigma_0(X)$ for every holomorphic Banach vector bundle $E'$ on $Y$. The class of such bundles $E$ will be denoted by $\Sigma_0^s(X)$. \end{example}
In what follows, by $Z^m$ we denote the $m$-fold direct sum of a vector space $Z$, and we ignore all objects related to $m=0$. \begin{theorem} \label{thm2} Suppose $E \in \Sigma_0(X)$ and $D\Subset X$ has $Z(q)$-property. Fix a basis $\{\chi_i\}_{i=1}^m\subset \mathcal H^{p,q}(\bar{D})$.
(1) There exist a closed complemented subspace $\mathcal A \subset \mathcal O(X, E)^m$ and a finite subset $S\subset \bar D$ such that \begin{itemize} \item[(a)]
$\mathcal A|_{S}$ is a closed subspace of the Banach space $C(S,E)^m$ and the restriction to $S$ induces an isomorphism of the Fr\'{e}chet spaces
$\mathcal A\cong\mathcal A|_{S}$; \item[(b)] The linear map $L:\mathcal B^{p,q}(\bar D, E)\oplus \mathcal A\rightarrow \mathcal Z^{p,q}(\bar{D}, E)$, \[
L\bigl(\eta, (f_1,\dots,f_m)\bigr):=\eta+\sum_{i=1}^m f_i|_{\bar{D}}\cdot\chi_i,\quad \eta\in \mathcal B^{p,q}(\bar D, E),\ (f_1,\dots,f_m)\in \mathcal A, \] is an isomorphism of Fr\'{e}chet spaces. \end{itemize}
(2) If the group $GL(B)$ of invertible bounded linear operators on the fibre $B$ of $E$ is contractible, and $E\in\Sigma_0^s(X)$, then the restriction map $r_x:\mathcal A\rightarrow \pi^{-1}(x)^m\cong B^m$, $(f_1,\dots, f_m)\mapsto (f_1(x),\dots, f_m(x))$, is a Banach space isomorphism for each $x\in X$.
\end{theorem}
\begin{remark}\label{rem1}
(1)~It follows that $\mathcal B^{p,q}(\bar D, E)$ is a closed subspace of the Fr\'{e}chet space $\mathcal Z^{p,q}(\bar{D}, E)$ and so the quotient space $H^{p,q}(\bar{D},E)$ is Fr\'{e}chet. It is trivial if $m=0$, for otherwise, it is isomorphic (in the category of Fr\'{e}chet spaces) to the complex Banach space $\mathcal A|_{S}\subset C(S,E)^m \cong B^{rm}$; here $r$ is the cardinality of $S$.
(2)~If $X$ is a Stein manifold, then it admits a K\"{a}hler metric. Working with this metric, one obtains that the corresponding harmonic forms $\chi_i$ in Theorem \ref{thm2} are also $d$-closed (see, e.g.~\cite[Ch.0, Sect.7]{GH}).
(3)~The class of complex Banach spaces $B$ with contractible group $GL(B)$ include infinite-dimensional Hilbert spaces, spaces $\ell^p$ and $L^p[0,1]$, $1\le p\le \infty$, $c_0$ and $C[0,1]$, spaces $L_p(\Omega,\mu)$, $1<p<\infty$, of $p$-integrable measurable functions on an arbitrary measure space $\Omega$, some classes of reflexive symmetric function spaces and spaces $C(G)$ for $G$ being infinite dimensional compact topological groups (see, e.g., \cite{M} for details).
\end{remark}
Next, we formulate an analogue of Theorem \ref{thm0_2}. We will need the following notation. Let $V\rightarrow X$ be a holomorphic Banach vector bundle. Set $$
\Lambda_0^{r,t}(\bar{D},V):=\{\omega \in \Lambda^{r,t}(\bar{D},V): \omega|_{\partial D}=0\} $$ and define the \textit{$V$-valued Dirichlet cohomology groups} of $\bar{D}$ by the formula $$ H_0^{r,s}(\bar{D},V):=\mathcal Z_0^{r,s}(\bar{D},V)/\mathcal B_0^{r,s}(\bar{D},V), $$ where \[ \begin{array}{l} Z_0^{r,s}(\bar{D},V):=\{\omega \in \Lambda_0^{r,s}(\bar{D},V): \bar{\partial}\omega=0 \}\qquad\text{and}
\\ B_0^{r,s}(\bar{D},V):=\bar{\partial}\{\omega \in \Lambda^{r,s-1}_0(\bar{D},V): \bar{\partial}\omega \in \Lambda_0^{r,s}(\bar{D},V)\}. \end{array} \] We endow spaces $B_0^{r,s}(\bar{D},V) \subset Z_0^{r,s}(\bar{D},V) \subset \Lambda_0^{r,s}(\bar{D},V)$ with the topology induced by that of $\Lambda^{r,s}(\bar{D},V)$. One can easily check that $Z_0^{r,s}(\bar{D},V)$ and $\Lambda_0^{r,s}(\bar{D},V)$ are Fr\'{e}chet spaces with respect to this topology.
We retain notation of Theorem \ref{thm2}. In the following result $H^{p,q}(\bar{D},E)$, $E\in\Sigma_0(X)$, is equipped with the Fr\'{e}chet space structure given by Theorem \ref{thm2}. By $E^*$ we denote the bundle dual to $E$. Also, for $m>0$, $\{\chi_i\}_{i=1}^m$ is a fixed basis of $\mathcal H^{p,q}(\bar{D})$ and $\mathcal A \subset \mathcal O(X, E)^m$ is the corresponding subspace of Theorem \ref{thm2}.
\begin{theorem} \label{thm6} Suppose $E \in \Sigma_0(X)$ and $D\Subset X$ has $Z(q)$-property. Fix forms $\{\gamma_i\}_{i=1}^m \subset \mathcal Z_0^{n-p,n-q}(\bar{D})$ such that $\int_D \chi_i \wedge \gamma_j=\delta_{ij}$ - the Kronecker delta. (Their existence follows from Theorem \ref{thm0_2}.)
(1) $\mathcal B_0^{n-p,n-q}(\bar{D},E^*)$ is a closed subspace of the Fr\'{e}chet space $\mathcal Z_0^{n-p,n-q}(\bar{D},E^*)$; moreover, the quotient (Fr\'{e}chet) space $H_0^{n-p,n-q}(\bar{D},E^*)$ is naturally isomorphic to the dual space $\bigl(H^{p,q}(\bar{D},E)\bigr)^*$.
(2) There exist a closed subspace $\mathcal B \subset \mathcal O(X, E^*)^m$ isomorphic to the dual of $\mathcal A$ and a finite subset $S^*\subset\bar{D}$ such that \begin{itemize} \item[(a)] The restriction to $S^*$ induces an isomorphism of the Fr\'{e}chet spaces
$\mathcal B\cong\mathcal B|_{S^*}$; \item[(b)] The linear map $M:\mathcal B \rightarrow H_0^{n-p,n-q}(\bar{D},E^*)$ $$
M(h_1,\dots,h_m):=\left[\sum_{i=1}^m h_i|_{\bar D} \cdot \gamma_i\right], \quad (h_1,\dots,h_m) \in \mathcal B, $$ is an isomorphism of Fr\'{e}chet spaces; here $[\eta]$ stands for the cohomology class of $\eta$. \end{itemize} \end{theorem}
The isomorphism in (1) is induced by the map associating to each $\xi \in \mathcal Z_0^{n-p,n-q}(\bar{D},E^*)$ a linear functional $$ \mathcal Z^{p,q}(\bar{D},E) \ni \theta \mapsto J_E(\theta,\xi), $$ where $$ J_E:\Lambda^{p,q}(\bar{D},E) \times \Lambda^{n-p,n-q}(\bar{D},E^*) \rightarrow \mathbb C $$ is a certain continuous bilinear form, see Section 5 below. (In particular, if $E:=X\times\mathbb C$, $J_E(\theta,\xi):=\int_D \theta\wedge\xi$.)
\begin{remark} It is not clear yet to what extent assertions of Theorems \ref{thm2} and \ref{thm6} are valid for holomorphic Banach vector bundles on $X$ not in $\Sigma_0(X)$. In particular, is it true that in this general setting spaces $H^{p,q}(\bar D,E)$ are Hausdorff (in the corresponding quotient topologies), and what can be said about the Serre-type duality between
$H^{p,q}(\bar D,E)$ and $H_0^{n-p,n-q}(\bar D,E^*)\, $? \end{remark}
\section{Applications}
\label{motivation} As it was mentioned in the Introduction, forms taking values in holomorphic Banach vector bundles arise as an equivalent presentation of forms defined on subdomains of coverings of complex manifolds and satisfying additional constraints along the fibres of the coverings. In what follows, we outline the main features of this construction (see \cite{Br}, \cite{BrK} for details).
Let $r:X'\rightarrow X$ be a regular covering with a deck transformation group $G$ of a connected complex manifold $X$. Assume that $X'$ is equipped with a path metric $d'$ determined by the pullback to $X'$ of a smooth hermitian metric on $X$.
\begin{definition}\label{def1} By $C_B(X')=C_B(X',X,r)$ we denote the space of complex continuous functions $f:X' \rightarrow \mathbb C$ uniformly continuous with respect to metric $d'$ on subsets $r^{-1}(U)$, $U \Subset X$, and such that for each $x \in X'$ functions $G \ni g \mapsto f(g \cdot x)$ belong to a complex Banach space $B$ of functions $u:G \rightarrow \mathbb C$ such that $$ u \in B,~~g \in G \quad \Rightarrow \quad R_g u \in B,\quad\text{where}\quad R_g(u)(h):=u(h g)\ (h\in G), $$ and each $R_g$ is an invertible bounded linear operator on $B$. \end{definition}
Here are some examples of such spaces $B$. \begin{example} \label{ex2} \textit{Uniform algebras.~} As space $B$ one can take a closed unital subalgebra of the algebra $\ell_\infty(G)$ of bounded complex functions on $G$ (with pointwise multiplication and $\sup$-norm) invariant with respect to the action of $G$ on $\ell_\infty(G)$ by right translations $R_g$, $g \in G$, e.g.,~algebra $\ell_\infty(G)$ itself, algebra $c(G)$ of bounded complex functions on $G$ that admit continuous extensions to the one-point compactification of group $G$, algebra $AP(G)$ of the von Neumann almost periodic functions on group $G$ (i.e.~uniform limits on $G$ of linear combinations of matrix elements of irreducible unitary representations of $G$), etc. If group $G$ is finitely generated, then in addition to $c(G)$ one can take subalgebras $c_{E}(G) \subset \ell_\infty(G)$ of functions having limits at `$\infty$' along each `path' (see \cite{BrK} for details).
\noindent\textit{Orlicz spaces.~}Let $\mu$ be a $\sigma$-finite regular Borel measure on $G$ such that for each $g\in G$ there exists a constant $c_g>0$ so that $\mu(h\cdot g)\le c_g\cdot\mu (h)$ for all $h\in G$. Let $\Phi: [0,\infty)\to [0,\infty)$ be a convex function such that \[ \lim_{x\to\infty}\frac{\Phi(x)}{x}=\infty\qquad \text{and}\qquad \lim_{x\to 0^+}\frac{\Phi(x)}{x}=0. \]
As space $B$ one can take the space $\ell_\Phi$ of complex $\mu$-measurable functions on $G$ such that $\int_G\Phi(|f|)d\mu<\infty$ endowed with norm \[
\|f\|_{\Phi}:=\inf\left\{C\in (0,\infty)\, :\, \int_G\Phi\left(\frac{|f|}{C}\right)d\mu\le 1\right\}. \] If $\Phi(t):=t^p$, $1< p<\infty$, then one obtains classical spaces $\ell^p(G,\mu)$.
As measure $\mu$ one can take, e.g., the counting measure $\mu_c$ on $G$, in which case all $c_g=1$. If group $G$ is finitely generated, one can take $\mu:=e^u\mu_c$, where $u:G\to \mathbb R$ is a uniformly continuous function with respect to the $G$-invariant metric on $G$ induced by the natural metric on the Cayley graph of $G$ defined by a fixed family of generators of $G$.
\end{example}
It is easily seen that the definition of space $C_B(X')$ does not depend on the choice of the hermitian metric on $X$. If we fix a cover $\mathcal U$ of $X$ by simply connected relatively compact coordinate charts and for a given chart $U \in \mathcal U$ endow the `cylinder' $U':=r^{-1}(U)$ with local coordinates pulled back from $U$ (so that in these coordinates $U'$ is naturally identified with $U \times G$), then every function $f$ in $C_B(X')$, restricted to $U'$, can be viewed as a continuous function on $U$ taking values in space $B$.
We equip $C_B(X')$ with the Fr\'{e}chet topology defined by the family of seminorms $\|\cdot\|_{U}$, $U\Subset X$, \[
\|f\|_{U}:=\sup_{x\in U}\|f_x\|_B, \quad f\in C_B(X'), \]
where $f_x(g):=f(g\cdot x)$, $g\in G$, and $\|\cdot\|_B$ is the norm of $B$.
By $\mathcal O_B(X'):=C_B(X') \cap \mathcal O(X')$ we denote the subspace of holomorphic functions in $C_B(X')$.
\begin{example}[Bohr's almost periodic functions, see, e.g.,~\cite{BrK} for details] \label{ex} A tube domain $T'=\mathbb R^n+i\Omega \subset \mathbb C^n$, where $\Omega \subset \mathbb R^n$ is open and convex, can be viewed as a regular covering $r:T' \rightarrow T\, (:=r(T') \subset \mathbb C^n)$ with deck transformation group $\mathbb Z^n$, where \begin{equation*} r(z):=\bigl(e^{i z_1}, \dots, e^{i z_n}\bigr), \quad z=(z_1,\dots,z_n) \in T'. \end{equation*} Let $B=AP(\mathbb Z^n)$ be the complex Banach algebra of the von Neumann almost periodic functions on group $\mathbb Z^n$ endowed with $\sup$-norm. Then $\mathcal O_B(T')\, (=:\mathcal O_{AP}(T'))$ coincides with the algebra of holomorphic almost periodic functions on $T'$, i.e.~uniform limits on tube subdomains $T''=\mathbb R^n+i\Omega''$ of $T'$, $\Omega'' \Subset \Omega$, of exponential polynomials \begin{equation*} z\mapsto\sum_{k=1}^m c_ke^{i \langle z,\lambda_k\rangle}, \quad z\in T',\quad c_k \in \mathbb C, \quad \lambda_k \in \mathbb R^n, \end{equation*} where $\langle\cdot,\cdot\rangle$ is the Hermitian inner product on $\mathbb C^n$, and $C_{B}(T')\, (=:C_{AP}(T'))$ coincides with the algebra of continuous uniformly almost periodic functions on $T'$.
\end{example}
The theory of almost periodic functions was created in the 1920s by H.~Bohr and nowadays is widely used in various areas of mathematics including number theory, harmonic analysis, differential equations (e.g.,~KdV equation), etc. We are interested, in particular, in studying cohomology groups of spaces of differential forms with almost periodic coefficients. Such forms arise as the special case of the following
\begin{definition} By $\Lambda^{p,q}_B(X')=\Lambda^{p,q}_B(X',X,r)$ we denote the subspace of $C^\infty$ $(p,q)$-forms $\omega$ on $X'$ such that in each `cylinder' $U'=r^{-1}(U)\, (\cong U\times G)$, $U \in \mathcal U$, in local coordinates pulled back from $U$, $$
\omega|_{U'}(z,\bar{z},g)=\sum_{|\alpha|=p, \,|\beta|=q} f_{\alpha,\beta}(z,\bar{z},g)\, dz_\alpha \wedge d\bar{z}_\beta, $$ where $U \ni z \mapsto f_{\alpha,\beta}(z,\bar{z},\cdot)$ are Fr\'{e}chet $C^\infty$ $B$-valued functions (cf.~subsection \ref{results}).
For a subdomain $D\Subset X$ we set $D':=r^{-1}(D)$ and
$\Lambda_B^{p,q}(\bar{D}'):=\Lambda_B^{p,q}(X')|_{\bar{D}'}$. \end{definition}
Comparing definitions of spaces $\Lambda_B^{p,q}(\bar{D}')$ and $\Lambda^{p,q}(\bar{D},E_{X'})$, where $\pi:E_{X'} \rightarrow X$ is the holomorphic Banach vector bundle with fibre $B$ associated to regular covering $r:X' \rightarrow X$ (viewed as a principal bundle on $X$ with fibre $G$, see e.g.,~\cite{BrK}), and likewise endowing $\Lambda_B^{p,q}(\bar{D}')$ with a sequence of $C^k$-like seminorms, we obtain isomorphisms of Fr\'{e}chet spaces \begin{equation} \label{isom} \Lambda^{p,q}_B(\bar{D}') \cong \Lambda^{p,q}(\bar{D},E_{X'}) \end{equation} commuting with the corresponding $\bar\partial$ operators. These induce (algebraic) isomorphisms of the corresponding cohomology groups: \begin{equation}\label{iso} H^{p,q}_{B}(\bar{D}')\cong H^{p,q}(\bar D,E_{X'}), \end{equation} where \[ \begin{array}{c} \displaystyle H^{p,q}_{B}(\bar{D}'):=\mathcal Z^{p,q}_{B}(\bar{D}')/\mathcal B^{p,q}_{B}(\bar{D}');\\ \\ \mathcal Z^{p,q}_{B}(\bar{D}'):=\{\omega \in \Lambda^{p,q}_{B}(\bar{D}')\,:\, \bar{\partial} \omega=0 \},\qquad \mathcal B^{p,q}_{B}(\bar{D}'):=\bar{\partial} \Lambda^{p,q-1}_{B}(\bar{D}'). \end{array} \]
Now, suppose that $D\Subset X$ has $Z(q)$-property. Let $f:X\rightarrow Y$ be a holomorphic map into a connected Stein manifold $Y$. Then $f$ induces a homomorphism of fundamental groups $f_*:\pi_1(X)\rightarrow\pi_1(Y)$. Without loss of generality, we may and will assume that $f_*$ is an epimorphism. (Indeed, if $H:=f_*(\pi_1(X))$ is a proper subgroup of $\pi_1(Y)$, then by the covering homotopy theorem, there exist an unbranched covering $p: Y'\rightarrow Y$ such that $\pi_1(Y')=H$, and a holomorphic map $f':X\rightarrow Y'$ such that $f=p\circ f'$. Moreover, $Y'$ is Stein. Thus, we may replace $f$ by $f'$.)
Next, let $r:X'\rightarrow X$ be a regular covering with a deck transformation group $G$ isomorphic to a quotient group of $\pi_1(Y)$. If $\tilde r: Y'\rightarrow Y$ is the regular covering of $Y$ with the deck transformation group $G$, then by the covering homotopy theorem there exists a holomorphic map $f': X'\rightarrow Y'$ such that $f\circ r=\tilde r\circ f'$. This implies that $E_{X'}=f^*E_{Y'}$ (here $E_{Y'}\rightarrow Y'$ is the holomorphic Banach vector bundle with fibre $B$ defined similarly to $E_{X'}$ above). In particular, $E_{X'}\in \Sigma_0^s(X)$, see Example \ref{sigma_ex}, and hence Theorem \ref{thm2} can be applied to describe cohomology groups $H^{p,q}_{B}(\bar{D}')$. Under the above assumptions we obtain (as before, we ignore all objects related to $m=0$):
\begin{theorem}\label{te3.5} Let $\{\chi_i'\}_{i=1}^m$ be the pullback to $\bar D'$ of a basis in $\mathcal H^{p,q}(\bar{D})$.
(1) There exist a closed complemented subspace $\mathcal A \subset \mathcal O_B(X')^m$ and a finite subset $S\subset \bar D$ such that \begin{itemize} \item[(a)]
$\mathcal A|_{S'}$, $S':=r^{-1}(S)$, is a closed
subspace of the Banach space $(C_B(X')|_{S'})^m\cong B^{cm}$, $c:={\rm card}\, S$, and the restriction $\mathcal A\rightarrow \mathcal A|_{S'}$ is an isomorphism of Fr\'{e}chet spaces; \item[(b)] $\mathcal B_B^{p,q}(\bar D)$ is a closed subspace of the Fr\'{e}chet space $\mathcal Z_B^{p,q}(\bar{D})$ and the linear map $L:\mathcal B_B^{p,q}(\bar D)\oplus \mathcal A\rightarrow \mathcal Z_B^{p,q}(\bar{D})$, \[
L\bigl(\eta, (f_1,\dots,f_m)\bigr):=\eta+\sum_{i=1}^m f_i|_{\bar{D'}}\cdot\chi_i',\quad \eta\in \mathcal B_B^{p,q}(\bar D),\ (f_1,\dots,f_m)\in \mathcal A, \] is an isomorphism of Fr\'{e}chet spaces. \end{itemize}
(2) If the group $GL(B)$ of invertible bounded linear operators on $B$
is contractible, then the restriction map $\mathcal A\rightarrow \mathcal A|_{\pi^{-1}(x)}\cong B^m$ is a Banach space isomorphism for each $x\in X$. \end{theorem} \begin{remark}\label{rem3.6} (1)~The result shows that $H_B^{p,q}(\bar{D})$ is a Fr\'{e}chet space, trivial if $m=0$ and isomorphic to a closed subspace of the Banach space $B^{cm}$ otherwise.
(2)~As follows from the assumptions, Theorem \ref{te3.5} is applicable to nontrivial coverings $r:X'\rightarrow X$ provided that $X$ admits a holomorphic map into a Stein manifold that induces a nontrivial homomorphism of the corresponding fundamental groups. In particular, if $X$ is Stein, the theorem is valid for any regular covering $r:X'\rightarrow X$. If, in addition, $D$ is homotopically equivalent to $X$, then $H_B^{p,q}(\bar{D})=0$ for $p+q>n:={\rm dim}\, X$. Indeed, in this case, due to Remark \ref{rem1}\,(2), $\mathcal H^{p,q}(\bar{D})$ has a basis consisting of $d$-closed forms. Since $X$, being Stein, is homotopically equivalent to an $n$-dimensional CW-complex, these forms must be $d$-exact for $p+q>n$ and, hence, equal to zero (because they are harmonic with respect to the Laplacian defined by $d$). This implies the required statement.
(3)~In view of Remark \ref{rem1}\,(3), group $GL(B)$ is contractible for spaces of Example \ref{ex2} $B=\ell^p(G,\mu)$, $1<p<\infty$, $c(G)$ or $AP(G)$\footnote{Recall that $AP(G)\cong C(bG)$, where $bG$ is a compact topological group called the {\em Bohr compactification} of $G$.} in case $G$ is infinite and maximally almost periodic (i.e.~finite-dimensional unitary representations separate points of $G$, see, e.g.,~\cite{BrK} for examples of such groups). In all these cases, under assumptions of Theorem \ref{te3.5}, we obtain that $H_B^{p,q}(\bar{D})\cong B^m$. In particular, $H_{AP(\mathbb Z^n)}^{p,q}(\bar{D})\cong AP(\mathbb Z^n)^{m}$, where $D\Subset T$ and $r:T'\rightarrow T$ is the covering of Example \ref{ex} ($T\subset\mathbb C^n$ is Stein because it is a relatively complete Reinhardt domain, see, e.g.,~\cite{Shab}).
(4)~Similarly, one can reformulate Theorem \ref{thm6} to deal with forms in $\Lambda_{B^*}^{p,q}(X')$ vanishing on $\partial D':=r^{-1}(\partial D)$ in case the dual space $B^*$ of $B$ is a function space on $G$ satisfying conditions of Definition \ref{def1}. This holds, for instance, if $B$ is a reflexive Orlicz space $\ell_{\Phi}$ satisfying assumptions of Example \ref{ex2} or $c(G)$ and $\ell^1(G,\mu)$ spaces of this example. On the other hand, for space $AP(G)$ with $G$ as above the dual $AP(G)^*$ is the space of regular complex Borel measures on $bG$ (the Riesz representation theorem) and therefore to obtain a version of Theorem \ref{thm6} in this case one works with forms in $\Lambda_0^{r,t}(\bar D,E_{X'}^*)$. We leave the corresponding details to the reader.
\end{remark}
\section{Proof of Theorem \ref{thm2}}
\subsection{Banach-valued differential forms} \label{results}
Let $U\Subset\mathbb C^n$ be a bounded open subset and $B$ a complex Banach space with norm $\|\cdot\|_B$. We fix holomorphic coordinates $z=(z_1,\dots, z_n)$ on $\mathbb C^n$. For tuples $\alpha=(\alpha_1, \dots ,\alpha_p)\in\mathbb N^p$ and $\beta=(\beta_1,\dots,\beta_q)\in\mathbb N^q$, each consisting of increasing sequences of numbers not exceeding $n$, we set \[
|\alpha|:=p,\quad |\beta|:=q\quad\text{and}\quad dz_\alpha\wedge d\bar{z}_\beta:=dz_{\alpha_1}\wedge\cdots\wedge dz_{\alpha_p}\wedge d\bar{z}_{\beta_1}\wedge\cdots\wedge d\bar{z}_{\beta_q}. \] As usual, in real coordinates $x_1,\dots, x_{2n}$, $z_j:=x_j+i x_{n+j}$, $1\le j\le n$, on $\mathbb R^{2n}$, partial (Fr\'{e}chet) derivatives $D^\gamma$, $\gamma=(\gamma_1,\dots,\gamma_{2n})\in\mathbb Z_+^{2n}$, of order ${\rm ord}(\gamma):=\gamma_1+\cdots+\gamma_{2n}$ are given by the formulas \[ D^\gamma:=\frac{\partial^{\gamma_1}}{\partial x_1^{\gamma_1}}\circ\cdots\circ \frac{\partial^{\gamma_{2n}}}{\partial x_{2n}^{\gamma_{2n}}}. \]
Further, for a $C^k$ $B$-valued $(p,q)$-form $\eta$ on $U$, \[
\eta(z,\bar{z})=\sum_{|\alpha|=p,|\beta|=q}f_{\alpha,\beta}(z,\bar{z})dz_\alpha\wedge d\bar{z}_\beta, \]
and a subset $W\subset U$ we define \begin{equation}\label{norms} \begin{array}{l} \displaystyle
\|\eta\|_{k,W,B}:=\sum_{{\rm ord}(\gamma)\le k, |\alpha|=p,|\beta|=q}\left(\sup_{z\in W}\|D^\gamma f_{\alpha,\beta}(z,\bar{z})\|_B\right)\quad\text{and}\\ \\ \displaystyle
\|\eta\|_{k,W,B}':=\sup_{g\in B^*,\, \|g\|_{B*}\le 1}\left\{\sum_{{\rm ord}(\gamma)\le k, |\alpha|=p,|\beta|=q}\left(\sup_{z\in W}\left|g\bigl(D^\gamma f_{\alpha,\beta}(z,\bar{z})\bigr)\right|\right)\right\}. \end{array} \end{equation} One easily shows that \begin{equation}\label{equiv}
\frac{1}{c_{p,q,k,n}}\|\eta\|_{k,W,B}\le \|\eta\|_{k,W,B}'\le\|\eta\|_{k,W,B}, \end{equation} where $c_{p,q,k,n}$ is the cardinality of the set of indices of sums in \eqref{norms}.
By $\hat{\Lambda}^{p,q}(W,B)$ we denote the space of $C^\infty$ $B$-valued $(p,q)$-forms $\eta$ on $U$ such that $\|\eta\|_{k,W,B}<\infty$ for all $k \geqslant 0$. In a standard way one proves that space $\hat{\Lambda}^{p,q}(U,B)$ is complete in the Fr\'{e}chet topology determined by norms $\{\|\cdot\|_{k,U,B}\}_{k\in\mathbb Z_+}$ (cf. \cite[Th.\,7.17]{R}).
Now, let us fix a finite family of coordinate charts $(U_j,\varphi_j)$ on $X$ such that $\mathcal U=(U_j)$ forms a finite open cover of an open neighbourhood of $\bar{D}$ and each $\varphi_j$ maps a neighbourhood of $\bar{U}_j$ biholomorphically onto a bounded domain of $\mathbb C^n$. Let $\pi: E\rightarrow X$ be a holomorphic Banach vector bundle with fibre $B$. Using fixed trivializations $\psi_j:E\rightarrow \bar{U}_j\times B$ of $E$ over $\bar{U}_j$ and the holomorphic coordinates on $U_j$ pulled back by $\varphi_j$ from $\mathbb C^n$, we define spaces $\hat{\Lambda}^{p,q}(W,E)$, $W\subset U_j\cap D$, of $C^\infty$ $E$-valued $(p,q)$-forms on $U_j\cap D$ as pullbacks of spaces
$\hat{\Lambda}^{p,q}(\varphi_j(W),B)$. Seminorms on $\hat{\Lambda}^{p,q}(W,E)$ obtained by pullbacks of seminorms $\|\cdot\|_{k,\varphi_j(W),B}$ are denoted by $\|\cdot\|_{k,W,E}$. Finally, we equip the space $\Lambda^{p,q}(\bar{D},E):=\Lambda^{p,q}(X,E)|_{\bar{D}}$ of $C^\infty$ $E$-valued forms on $\bar{D}$ with topology $\tau_{p,q}=\tau_{p,q}(E)$ defined by the sequence of norms $\|\cdot\|_{k,\bar D,E}$, $k\ge 0$, \[
\|\eta\|_{k,\bar D,E}:=\sum_j \|\eta|_{U_j\cap D}\|_{k,U_j\cap D,E},\quad \eta\in \Lambda^{p,q}(\bar{D},E). \] Using, e.g., the Hestens extension theorem \cite{He}, one checks easily that $\bigl(\Lambda^{p,q}(\bar{D},E),\tau_{p,q}\bigr)$ is a Fr\'{e}chet space and that topology $\tau_{p,q}$ is independent of the choice of coordinate charts $(U_j,\varphi_j)$ and trivializations $\psi_j$ as above.
If in the above construction we will take pullbacks of norms $\|\cdot\|_{k,\varphi_j(U_j\cap D),B}'$, denoted by $\|\cdot\|_{k,U_j\cap D,E}'$, then due to \eqref{equiv} the sequence of norms $\|\cdot\|_{k,\bar D,E}'$, $k\ge 0$, \[
\|\eta\|_{k,\bar D,E}':=\sum_j \|\eta|_{U_j\cap D}\|_{k,U_j\cap D,E}',\quad \eta\in \Lambda^{p,q}(\bar{D},E), \] will produce the same topology on $\Lambda^{p,q}(\bar{D},E)$.
By our definitions, the standard operator $$\bar{\partial}:\bigl(\Lambda^{p,q}(\bar D,E),\tau_{p,q}\bigr) \rightarrow \bigl(\Lambda^{p,q+1}(\bar D,E),\tau_{p,q+1}\bigr)$$ is continuous. Hence, $\mathcal Z^{p,q}(\bar{D},E) \subset \Lambda^{p,q}(\bar{D},E)$ is a closed subspace.
\subsection{Proof of Theorem \ref{thm2}} \
{\bf A.} First we prove part (1) of the theorem for the trivial bundle $E=X\times B$, where $B$ is a complex Banach space. As the required subspace $\mathcal A\subset\mathcal O(X,E)^m$ we will take the space of constant maps $X\rightarrow B^m$ (naturally identified with $B^m$) and as the set $S$ a point of $D$. Then statement (a) of the theorem is obvious.
Let us show that there exist continuous linear maps $$ G_B:\Lambda^{p,q}(\bar{D},E) \rightarrow \Lambda^{p,q-1}(\bar{D},E), $$ $$
H_B:\Lambda^{p,q}(\bar{D},E) \rightarrow \left\{\sum_{i=1}^m f_i\cdot\chi_i:(f_1,\dots,f_m)\in \mathcal A|_{\bar{D}}\right\}\subset \mathcal Z^{p,q}(\bar{D},E) $$ such that \begin{equation} \label{id_id0} \omega=\bar{\partial}G_B(\omega)+H_B(\omega)\quad\text{for all}\quad \omega \in \mathcal Z^{p,q}(\bar{D},E). \end{equation} Then \[
\bar{\partial}G_B\oplus H_B:\mathcal Z^{p,q}(\bar{D}, E) \rightarrow \mathcal B^{p,q}(\bar D, E)\oplus \underbrace{(B\otimes_{\mathbb C}\mathcal H^{p,q}(\bar{D}))}_{\cong \mathcal A|_{\bar{D}}} \] is an isomorphism of the corresponding Fr\'{e}chet spaces. By the definition its inverse coincides with the operator $L$ which completes the proof of the theorem in this case.
Indeed, for $B=\mathbb C$ existence of the operators $G_{\mathbb C}$ and $H_{\mathbb C}$ is proved in \cite[Ch.~III.1]{FK} (in the terminology of \cite{FK}, $G_{\mathbb C}:=\bar{\partial}^*N$, where $N$ is the ``$\bar{\partial}$-Neumann operator'' and $H_{\mathbb C}$ is the ``orthogonal projection'' onto $\mathcal H^{p,q}(\bar{D}$)). Their continuity in the corresponding Fr\'{e}chet topologies follows from \cite[Th.~3.1.14]{FK} and the Sobolev embedding theorem.
In the case of the general bundle $E=X \times B$, first we define the required operators on the (algebraic) symmetric tensor product $B \otimes_{\mathbb C} \Lambda^{p,q}(\bar{D})\subset \Lambda^{p,q}(\bar{D},E)$ by the formulas \[ G_B:={\rm Id}_B\otimes G_{\mathbb C},\qquad H_B:={\rm Id}_B\otimes H_{\mathbb C}, \] where ${\rm Id}_B: B\rightarrow B$ is the identity operator. If $\omega \in B \otimes \Lambda^{p,q}(\bar{D})$, then due to the continuity of the scalar operators $G_{\mathbb C}$ and $H_{\mathbb C}$ we have, for all $k\ge 0$ and the corresponding norms, \[ \begin{array}{l} \displaystyle
\left\|G_B(\omega)\right\|_{k,\bar{D},E}':=\sup_{g\in B^*,\, \|g\|_{B^*}\le 1}\|\bigl(g\otimes {\rm Id}_{\Lambda^{p,q}(\bar{D})}\bigr)\bigl(G_B(\omega)\bigr)\|_{k,\bar{D},X\times\mathbb C}
\\ \displaystyle
=\sup_{g\in B^*,\, \|g\|_{B^*}\le 1}\left\|G_{\mathbb C}\left(\bigl(g\otimes {\rm Id}_{\Lambda^{p,q}(\bar{D})}\bigr)(\omega)\right)\right\|_{k,\bar{D},X\times\mathbb C}
\\ \displaystyle
\le M \cdot\sup_{g\in B^*,\, \|g\|_{B^*}\le 1}\|\bigl(g\otimes {\rm Id}_{\Lambda^{p,q}(\bar{D})}\bigr)(\omega)\|_{k+n+1,\bar{D},X\times\mathbb C}=M\cdot\|\omega\|_{k+n+1,\bar{D},E}' \end{array} \] and, similarly, \[
\|H_B(\omega)\|_{k,\bar{D},E}'\le N \cdot \|\omega\|_{k,\bar{D},E}', \] where $M$ and $N$ are some constants independent of $\omega$ (but depending on $k,n,D$ and the data in definitions of the above norms). \begin{remark}\label{rem4.1} {\rm The shift of index in norms of inequalities for $G_B(\omega)$ results from the fact that in \cite[Th.~3.1.14]{FK} one considers $G_{\mathbb C}$ as a continuous operator between the corresponding Sobolev spaces $W^k$ and therefore to switch to the case of our norms we must apply the Sobolev embedding theorem. On the other hand, the operator $H_{\mathbb C}$ is defined by the inner product with elements of a basis of $\mathcal H^{p,q}(\bar{D})$ and so its norm as an operator acting in $C^k$ spaces can be estimated directly without involving the Sobolev norms. } \end{remark}
The above norm estimates show that linear operators $G_B: \bigl(B \otimes_{\mathbb C} \Lambda^{p,q}(\bar{D}),\tau_{p,q}\bigr)\rightarrow \bigl(B \otimes_{\mathbb C} \Lambda^{p,q}(\bar{D}),\tau_{p,q-1}\bigr)$ and $H_B: \bigl(B \otimes_{\mathbb C} \Lambda^{p,q}(\bar{D}),\tau_{p,q}\bigr)\rightarrow (B \otimes_{\mathbb C} \mathcal H^{p,q}(\bar{D}) ,\tau_{p,q})$ are uniformly continuous. Since $B \otimes \Lambda^{p,q}(\bar{D})$ is dense in $\bigl(\Lambda^{p,q}(\bar{D},E),\tau\bigr)$ (this can be easily seen using, e.g., ~approximation of local coefficients of forms in $\Lambda^{p,q}(\bar{D},E)$ by their Taylor polynomials and then patching these approximations together by suitable partitions of unity), the latter implies that $G_B$ and $H_B$ can be extended to continuous operators on $\bigl(\Lambda^{p,q}(\bar{D},E),\tau_{p,q}\bigr)$ with ranges in $\Lambda^{p,q-1}(\bar{D},E)$ and $B \otimes_{\mathbb C} \mathcal H^{p,q}(\bar{D})$, respectively. We retain the same symbols for the extended operators.
Let us show that so defined operators satisfy identity \eqref{id_id0}.
In fact, for each $g\in B^*$ the linear map $g\otimes {\rm Id}_{\Lambda^{p,q}(\bar{D})}: B\otimes_{\mathbb C}\Lambda^{p,q}(\bar{D})\rightarrow \Lambda^{p,q}(\bar{D})$ is uniformly continuous in the corresponding Fr\'{e}chet topologies and therefore is extended to a linear continuous map \begin{equation} \label{g_op} \hat g_{p,q}: \Lambda^{p,q}(\bar{D},E)\rightarrow \Lambda^{p,q}(\bar{D}). \end{equation} Clearly, \[ \hat g_{p,q}\circ\bar{\partial}=\bar{\partial}\circ\hat g_{p,q-1},\quad \hat g_{p,q-1}\circ G_B=G_{\mathbb C}\circ\hat g_{p,q}\quad\text{and}\quad \hat g_{p,q}\circ H_B=H_{\mathbb C}\circ\hat g_{p,q}. \] In particular, for $\omega\in \mathcal Z^{p,q}(\bar{D},E)$ we have $\hat g_{p,q}(\omega)\in\mathcal Z^{p,q}(\bar{D})$; hence, due to the previous identities and since \eqref{id_id0} is valid for $B=\mathbb C$, \[ \hat g_{p,q}\bigl(\bar{\partial}G_B(\omega)+H_B(\omega)\bigr)=\bar{\partial}G_{\mathbb C}(\hat g_{p,q}(\omega))+H_{\mathbb C}(\hat g_{p,q}(\omega))=\hat g_{p,q}(\omega)\quad\text{for all}\quad g\in B^*. \] It is easily seen that the family of linear maps $\{\hat g_{p,q}\, :\, g\in B^*\}$ separates the points of $\Lambda^{p,q}(\bar{D},E)$. Therefore the latter implies that $\bar{\partial}G_B(\omega)+H_B(\omega)=\omega$ for all $\omega\in \mathcal Z^{p,q}(\bar{D},E)$, as required.
\textbf{B.~}Now, we consider the case of an arbitrary holomorphic Banach vector bundle $E \in \Sigma_0(X)$. By the definition, there exists a holomorphic Banach vector bundle $E_1\rightarrow X$ such that $E_2:=E\oplus E_1$ is holomorphically trivial Banach vector bundle with a fibre $B_2$. By $i:E\rightarrow E_2$ and $r:E_2\rightarrow E$, $r\circ i:= {\rm Id}_E$, we denote the corresponding bundle homomorphisms. In a natural way, they induce continuous linear maps of the corresponding Fr\'{e}chet spaces: $$ \hat{i}^{p,q}:\bigl(\Lambda^{p,q}(\bar{D},E),\tau_{p,q}(E)\bigr) \rightarrow \bigl(\Lambda^{p,q}(\bar{D},E_2),\tau_{p,q}(E_2)\bigr), $$ $$ \hat{r}^{p,q}: \bigl(\Lambda^{p,q}(\bar{D},E_2),\tau_{p,q}(E_2)\bigr) \rightarrow \bigl(\Lambda^{p,q}(\bar{D},E),\tau_{p,q}(E)\bigr) $$ such that $\hat r^{p,q}\circ\hat i^{p,q}={\rm Id}_{\Lambda^{p,q}(\bar{D},E)}$. Moreover, $\hat i^{p,q}$ and $\hat r^{p,q}$ commute with the corresponding $\bar{\partial}$ operators and therefore $\hat i^{p,q}$ embeds $\mathcal Z^{p,q}(\bar{D},E)$ as a closed subspace into $\mathcal Z^{p,q}(\bar{D},E_2)$ and $\hat r^{p,q}$ maps $\mathcal Z^{p,q}(\bar{D},E_2)$ surjectively onto $\mathcal Z^{p,q}(\bar{D},E)$.
Next, we define continuous linear operators $$ G_E := \hat{r}^{p,q-1} \circ G_{B_2} \circ \hat{i}^{p,q}:\Lambda^{p,q}(\bar{D},E)\rightarrow \Lambda^{p,q-1}(\bar{D},E), $$ \[ H_E := \hat{r}^{p,q} \circ H_{B_2} \circ \hat{i}^{p,q}:\Lambda^{p,q}(\bar{D},E)\rightarrow \mathcal Z^{p,q}(\bar{D}, E), \] where $G_{B_2}$ and $H_{B_2}$ are operators constructed in part {\bf A} for the trivial bundle $E_2:=X\times B_2$. Due to identity \eqref{id_id0} for these operators we have \begin{equation}\label{e4.6} \omega=\bar{\partial}G_E(\omega)+H_E(\omega)\quad\text{for all}\quad \omega \in \mathcal Z^{p,q}(\bar{D},E). \end{equation} This implies (since $H_{B_2}$ maps $\bar{\partial}$-exact forms to $0$) \[ H_E(\omega)=H_E^2(\omega)\quad\text{for all}\quad \omega \in \mathcal Z^{p,q}(\bar{D},E). \] Thus $H_E(\mathcal Z^{p,q}(\bar{D},E))$ is a closed complemented subspace of $\mathcal Z^{p,q}(\bar{D},E)$ and \eqref{e4.6} shows that $\mathcal Z^{p,q}(\bar{D},E)=\mathcal B^{p,q}(\bar{D},E)\oplus H_E(\mathcal Z^{p,q}(\bar{D},E))$.
Further, since each $\eta\in B_2\otimes_{\mathbb C}\Lambda^{p,q}(\bar{D})$ is uniquely presented as $\eta=\sum_{i=1}^m c_i(\eta)\cdot\chi_i$ for some $c_i(\eta)\in B_2$, by the open mapping theorem the correspondence $\eta\mapsto (c_1(\eta),\dots,c_m(\eta))$ determines an isomorphism of the Fr\'{e}chet spaces $c: \bigl(B_2\otimes_{\mathbb C}\Lambda^{p,q}(\bar{D}), \tau_{p,q}(E_2)\bigr)\rightarrow B_2^m$. In what follows we regard $B_2$ as the subset of $\mathcal O(X,E_2)$ consisting of constant sections. Also, we equip the space $\mathcal O(X,E)^m$ of holomorphic sections of $\oplus^m E$ with topology of uniform convergence on compact subsets of $X$.
We have the following sequence of continuous linear maps \begin{equation}\label{e4.7} \mathcal O(X,E)^m\stackrel{t}{\longrightarrow}\Lambda^{p,q}(\bar{D},E)\stackrel{H_{B_2}\circ\hat i^{p,q}}{\longrightarrow} B_2\otimes_{\mathbb C}\Lambda^{p,q}(\bar{D})\stackrel{c}{\longrightarrow} B_2^m\stackrel{\hat r}{\longrightarrow} \mathcal O(X,E)^m, \end{equation}
where $t(f_1,\dots,f_m):=\sum_{i=1}^m f_i|_{\bar{D}}\cdot\chi_i$, $(f_1,\dots, f_m)\in \mathcal O(X,E)^m$, and $\hat r:=\oplus^m(\hat r^{0,0}|_{B_2})$.
Let us define the required space $\mathcal A\subset \mathcal O(X,E)^m$ of the theorem as the image of $\mathcal Z^{p,q}(\bar{D},E)$ under the map $P:=\hat r\circ c\circ H_{B_2}\circ\hat i^{p,q}$.
By our definition, $t\circ P=H_{E}$ on $\mathcal Z^{p,q}(\bar{D},E)$, and since $H_E$ is the identity map on $H_E(\mathcal Z^{p,q}(\bar{D},E))$ and zero on $\mathcal B^{p,q}(\bar{D},E)$, the subspace $\mathcal A\subset \mathcal O(X,E)^m$ is closed and $P: H_E(\mathcal Z^{p,q}(\bar{D},E))\to\mathcal A$ is an isomorphism with inverse $t|_{\mathcal A}$. Therefore, $P\circ t: \mathcal O(X,E)^m\rightarrow \mathcal O(X,E)^m$ is a projection onto $\mathcal A$, that is, $\mathcal A\subset \mathcal O(X,E)^m$ is a complemented subspace. Also, the map $L:={\rm Id}_{\mathcal B^{p,q}(\bar D,E)}\oplus t|_{\mathcal A}: \mathcal B(\bar{D},E)\oplus\mathcal A\rightarrow \mathcal Z^{p,q}(\bar{D},E)$ is an isomorphism of the Fr\'{e}chet spaces. Note that ${\rm Ker}\, (P\circ t)$ consists of all $(f_1,\dots, f_m)\in\mathcal O(X,E)^m$ such that $t(f_1,\dots, f_m)\in\mathcal B^{p,q}(\bar D,E)$.
Now, to define the required set $S\subset\bar D$ of the theorem and to prove statement (a) let us prove, first, the following result. \begin{lemma}\label{lem4.2} The restriction map $R_{\bar D}:\mathcal O(X,E)^m\rightarrow C(\bar{D},E)^m$ to $\bar{D}$ maps $\mathcal A$ isomorphically onto a closed
subspace of the space $A(\bar{D},E)^m$, where $A(\bar D,E)$ is the closure in $C(\bar D,E)$ of the trace space $\mathcal O(X,E)|_{\bar D}$. \end{lemma} \begin{proof} Indeed, map $t$ in \eqref{e4.7} can be factorized as $t=\tilde t\circ R_{\bar D}$ for a continuous linear map \[ \tilde t: A(\bar{D},E)^m\rightarrow \bar{\Lambda}^{p,q}(\bar{D},E),\quad \tilde t(g_1,\dots,g_m):=\sum_{i=1}^m g_i\cdot\chi_i,\quad (g_1,\dots, g_m)\in A(\bar D,E)^m, \]
where $\bar{\Lambda}^{p,q}(\bar{D},E)$ is the completion of the normed space $\bigl(\Lambda^{p,q}(\bar{D},E), \|\cdot\|_{0,\bar{D},E}\bigr)$.
\noindent Also, by our construction, see part {\bf A} above, map \[
H_{B_2}: \bigl(\Lambda^{p,q}(\bar D, E_2),\|\cdot\|_{0,\bar{D},E_2}\bigr)\rightarrow \bigl(B_2\otimes_{\mathbb C}\mathcal H^{p,q}(\bar D), \|\cdot\|_{0,\bar{D},E_2}\bigr) \] is continuous and, hence, admits a continuous extension \[
\bar{H}_{B_2}: \bar{\Lambda}^{p,q}(\bar D, E_2)\rightarrow \bigl(B_2\otimes_{\mathbb C}\mathcal H^{p,q}(\bar D), \|\cdot\|_{0,\bar{D},E_2}\bigr); \]
here $\bar{\Lambda}^{p,q}(\bar D, E_2)$ is the completion of the space $\bigl(\Lambda^{p,q}(\bar D, E_2),\|\cdot\|_{0,\bar{D},E_2}\bigr)$.
Then the composite map $c\circ \bar{H}_{B_2}\circ\hat i^{p,q}\circ\tilde t: A(\bar{D},E)^m\rightarrow B_2^m$ is continuous with respect to the corresponding norms $\|\cdot\|_{0,\bar{D},E^m}$ and $\|\cdot\|_{0,\bar{D},E_2^m}$ on $A(\bar{D},E)^m$ and $B_2^m$. (Here for a Banach vector bundle $V\rightarrow X$ we set $V^m:=\oplus^m V$.) Note that topologies defined by these norms coincide with topology of uniform convergence for $A(\bar{D},E)^m$ and topology defined by the Banach norm for $B_2^m$. Therefore, if $\{F_k\}_{k\in\mathbb N}\subset\mathcal A|_{\bar D}$ is a Cauchy sequence, then the sequence $\{b_k:=(c\circ H_{B_2}\circ\hat i^{p,q}\circ\tilde t)(F_k)\}_{k\in\mathbb N}$ converges in $B_2^m$, and, hence, $\{(\hat r(b_k)\}_{k\in\mathbb N}$ converges in $\mathcal O(X,E)^m$ (in topology of uniform convergence on compact subsets of $X$). Since $F_k=(R_{\bar D}\circ \hat r)(b_k)$ for all $k$ and $\mathcal A\subset\mathcal O(X,E)^m$ is closed, $\{F_k\}_{k\in\mathbb N}$ converges in $A(\bar{D},E)^m$ to an element of $\mathcal A|_{\bar D}$, as required. Thus, by the open mapping theorem, $R_{\bar D}:\mathcal A\rightarrow\mathcal A|_{\bar D}$ is an isomorphism of the corresponding Fr\'{e}chet spaces.
\end{proof}
Let $D'\supset\bar D$ be a relatively compact subdomain of $X$. We equip the space $C(\bar{D'},E)$ with a norm $\|\cdot\|_{0,\bar{D'},E}$ defined similarly to $\|\cdot\|_{0,\bar{D},E}$ (see subsection~4.1). Topology defined by this norm is topology of uniform convergence on $\bar{D'}$, and $\bigl(C(\bar{D'},E),\|\cdot\|_{0,\bar{D'},E}\bigr)$ is a Banach space. We define $A(\bar{D'},E)$ to be the closure in $C(\bar{D'},E)$ of the trace space
$\mathcal O(X,E)|_{\bar{D'}}$. We have the following sequence of continuous linear maps (induced by subsequent restrictions $X$ to $\bar{D'}$ and $\bar{D'}$ to $\bar{D}$) \[ \mathcal O(X,E)^m\stackrel{R_{\bar{D'}}}{\longrightarrow}A(\bar{D'},E)^m\stackrel{R_{\bar{D}}^{\bar{D'}}}{\longrightarrow}A(\bar D,E)^m \] such that $R_{\bar D}=R_{\bar{D}}^{\bar{D'}}\circ R_{\bar{D'}}$.
As a straightforward corollary of Lemma \ref{lem4.2} we obtain \begin{lemma}\label{le4.3}
$\mathcal A|_{\bar{D'}}$ is a closed subspace of $A(\bar{D'},E)^m$ and $R_{\bar{D}}^{\bar{D'}}$ maps $\mathcal A|_{\bar{D'}}$ isomorphically onto $\mathcal A|_{\bar{D}}$. \end{lemma}
In particular, this lemma implies that there exists a constant $C>0$ such that \begin{equation}\label{e4.8}
\| R_{\bar{D}}^{\bar{D'}}(v)\|_{0,\bar D,E^m}\ge C \|v\|_{0,\bar{D'},E^m}\quad\text{for all}\quad v\in \mathcal A|_{\bar{D'}}. \end{equation}
Let us fix a complete (smooth) Hermitian metric on $X$ and with its help define the path metric $d: X\times X\rightarrow\mathbb R_+$. For a fixed $\varepsilon>0$ by $S_\varepsilon\subset\bar D$ we denote an $\varepsilon$-net in $\bar D$ with respect to the metric $d$. \begin{proposition}\label{prop4.4}
For a sufficiently small $\varepsilon$ the restriction map $R_{S_\varepsilon}:A(\bar D,E)\rightarrow C(S_\varepsilon,E)^m$ to $S_\varepsilon$ maps $\mathcal A|_{\bar D}$ isomorphically onto a closed
subspace of $C(S_\varepsilon, E)^m$. \end{proposition} \begin{proof} If $v\in A(\bar{D'},E)^m\, (=A(\bar{D'}, E^m)$, then according to the Cauchy estimates for derivatives of bounded holomorphic functions we have for a constant $C'>0$ depending on $D$, $D'$ and definitions of the corresponding norms \[
\|\partial v\|_{1,\bar{D},E^m}\le C'\|v\|_{0,\bar{D'},E^m}. \] This, the definition of the metric $d$ and the intermediate-value inequality imply that there exist a constant $C''>0$ (independent of $v$) such that for all $\varepsilon>0$ and $x_1,x_2\in\bar{D}$ satisfying $d(x_1,x_2)\le\varepsilon$, \begin{equation}\label{e4.9}
\max_{i=1,2}\|v(x_1)-v(x_2)\|_{0,\{x_i\},E^m}\le C''\cdot\varepsilon\cdot \|v\|_{0,\bar{D'},E^m}. \end{equation} Let us choose $\varepsilon$ so that \[ 0<\varepsilon\le\frac{C}{2C''}, \]
where $C$ is defined in \eqref{e4.8}. If $v\in \mathcal A$ is such that $\|v|_{\bar D}\|_{0,\bar{D},E}=1$, then according to \eqref{e4.8}, $\|v|_{\bar{D'}}\|_{0,\bar{D'},E}\le\frac{1}{C}$, and \eqref{e4.9} implies that
$\|v|_{S_{\varepsilon}}\|_{0,S_{\varepsilon},E^m}\ge\frac 12$. Hence, we have \begin{equation}\label{e4.10}
\|R_{S_\varepsilon}(v)\|_{0,S_{\varepsilon},E^m}\ge\frac 12\|v\|_{0,\bar D,E^m}\quad\text{for all}\quad v\in\mathcal A|_{\bar{D}}. \end{equation}
This shows that $R_{S_\varepsilon}$ maps $\mathcal A|_{\bar D}$ isomorphically onto a closed subspace of $C(S_\varepsilon, E)^m$. \end{proof}
Taking $S:=S_\varepsilon$ in statement (a) of the theorem with $\varepsilon$ as in Proposition \ref{prop4.4} we obtain the required result; this completes the proof of part (1) of the theorem.
(2) Suppose $E=f^*E'$, where $f:X\rightarrow Y$ is a holomorphic map into a Stein manifold $Y$ and $E'$ is a holomorphic Banach vector bundle on $Y$ with fibre $B$ such that the group $GL(B)$ is contractible. The latter implies that $E'$ is isomorphic to the trivial bundle $Y \times B$ in the category of topological Banach vector bundles. In turn, since $Y$ is Stein, the Oka principle for holomorphic Banach vector bundles, see \cite{Bun}, implies that $E'$ is holomorphically isomorphic to $Y \times B$ as well, and so $E$ is holomorphically isomorphic to $X \times B$. Thus, the required result follows from part (1) of the theorem applied to the trivial bundle $X \times B$.
The proof of the theorem is complete.
\section{Proof of Theorem \ref{thm6}}
The isomorphism in (1) is induced by the map associating to each $\xi \in \mathcal Z_0^{n-p,n-q}(\bar{D},E^*)$ a linear functional $$ \mathcal Z^{p,q}(\bar{D},E) \ni \theta \mapsto J_E(\theta,\xi), $$ where $$ J_E:\Lambda^{p,q}(\bar{D},E) \times \Lambda^{n-p,n-q}(\bar{D},E^*) \rightarrow \mathbb C $$ is a continuous bilinear form with respect to the product topology $\tau_{p,q}(E)\times\tau_{n-p,n-q}(E^*)$ on $\Lambda^{p,q}(\bar{D},E) \times \Lambda^{n-p,n-q}(\bar{D},E^*)$, see subsection~4.1, defined as follows.
Let $V\rightarrow X$ be a holomorphic Banach vector bundle. Consider a continuous bundle homomorphism $${\rm Tr}_E(V): E\otimes E^*\otimes V \rightarrow V$$ sending a vector $e_x\otimes e_x^*\otimes v_x$ in the fibre of $E\otimes E^*\otimes V$ over $x\in X$ to the vector $e_x^*(e_x) \cdot v_x$ in the fibre of $V$ over $x$ (here $e_x$, $e_x^*$ and $v_x$ are vectors in fibres of $E$, $E^*$ and $V$ over $x$) and then extended by linearity.
To define $J_E$ we take \[ V_{p,q,r,s}:=\biggl(\bigl(\wedge^p T^*\bigr)\wedge (\wedge^q \overline{T}^*) \biggr) \otimes \biggl(\bigl(\wedge^r T^* \bigr)\wedge (\wedge^s \overline{T}^*)\biggr). \] By definition, forms in $\Lambda^{p,q}(\bar D, E)$ are $C^\infty$ sections over $\bar D$ of bundle $E\otimes\bigl(\wedge^p T^*\bigr)\wedge (\wedge^q \overline{T}^*)$, forms in $\Lambda^{r,s}(\bar D, E^*)$ are $C^\infty$ sections over $\bar D$ of bundle $E^* \otimes\bigl(\wedge^r T^* \bigr)\wedge (\wedge^s \overline{T}^*)$. Therefore, if $\theta \in \Lambda^{p,q}(\bar D, E)$ and $\xi \in \Lambda^{r,s}(\bar D, E^*)$, then $\theta \otimes \xi$ is a $C^\infty$ section over $\bar D$ of bundle \begin{equation*} \biggl( E\otimes\bigl(\wedge^p T^*\bigr) \wedge(\wedge^q \overline{T}^*) \biggr)\otimes \biggl( E^*\otimes\bigl(\wedge^r T^* \bigr)\wedge (\wedge^s \overline{T})^*\biggr) \cong E\otimes E^*\otimes V. \end{equation*} In turn, ${\rm Tr}_{E}(V_{p,q,r,s})(\theta \otimes \xi)$ is a $C^\infty$ section over $\bar D$ of bundle $V_{p,q,r,s}$. Let $\Lambda_{p,q,r,s}$ be the canonical quotient homomorphism of bundles $V \rightarrow \bigl(\wedge^{p+r} T^*\bigr) \wedge \bigl(\wedge^{q+s} \overline{T}^*\bigr)$ (obtaining by replacing $\otimes$ by $\wedge$ in the definition of $V$). Assuming that $r=n-p$, $s=n-q$, we set \begin{equation}\label{je} J_E(\theta,\xi):=\int_{D}\Lambda_{p,q,n-p,n-q}\biggl({\rm Tr}_{E}(V_{p,q,n-p,n-q})(\theta \otimes \xi)\biggr) \end{equation}
(by definition, the integrand is in $\Lambda^{n,n}(\bar{D})$, and so the integral is well defined). The construction of $J_E$ and the definition of norms $\|\cdot\|_{0,\bar{D},E}$ and $\|\cdot\|_{0,\bar{D},E^*}$ given in subsection~4.1 imply immediately \begin{lemma} \label{bddlem} There is a constant $C>0$ such that \[
|J_E(\theta,\xi)| \leqslant C\cdot\|\theta\|_{0,\bar{D},E}\cdot \|\xi\|_{0,\bar{D},E^*}\quad\text{for all}\quad \theta \in \Lambda^{p,q}(\bar D,E),\ \xi \in \Lambda^{n-p,n-q}(\bar{D},E^*). \] In particular, $J_E:\bigl(\Lambda^{p,q}(\bar{D},E) \times \Lambda^{n-p,n-q}(\bar{D},E^*), \tau_{p,q}(E)\times\tau_{n-p,n-q}(E^*)\bigr) \rightarrow \mathbb C$ is a continuous bilinear form. \end{lemma}
We are in position to prove the theorem.
\textbf{I.}~First, we prove the result for the case of the trivial bundle $E=X \times B$, where $B$ is a complex Banach space. Let us prove (1). \begin{lemma}\label{stokes} $J_E=0$ on $\bigl(\mathcal Z^{p,q}(\bar{D},E)\times \mathcal B_0^{n-p,n-q}(\bar{D},E^*)\bigr)\bigcup\bigl(\mathcal B^{p,q}(\bar{D},E)\times \mathcal Z_0^{n-p,n-q}(\bar{D},E^*)\bigr)$. \end{lemma}
\begin{proof} For $E=X\times\mathbb C$ (the scalar case) the required result is proved in \cite[Ch.V.1]{FK}. The proof in the general case repeats word-for-word the previous one and is based on the following identities, the first one valid for all $\phi \in \Lambda^{p,q}(\bar{D},E), \psi \in \Lambda^{r,s}(\bar{D},E^*)$, and the second one for all $\phi \in \Lambda^{p,q}(\bar{D},E), \psi \in \Lambda^{n-p,s}(\bar{D},E^*)$ with $q+s=n-1$: $$ \begin{array}{l} \displaystyle \bar{\partial} \Lambda_{p,q,r,s}\biggl({\rm Tr}_{E}(V_{p,q,r,s})(\phi \otimes \psi)\biggr)= \Lambda_{p,q+1,r,s,}\biggl({\rm Tr}_{E}(V_{p,q+1,r,s})(\bar{\partial}\phi \otimes \psi)\biggr)
\\ \displaystyle +(-1)^{p+q}\Lambda_{p,q,r,s+1}\biggl({\rm Tr}_{E}(V_{p,q,r,s+1})(\phi \otimes \bar{\partial}\psi)\biggr), \end{array} $$ $$ \int_{\bar{D}} \bar{\partial} \Lambda_{p,q,n-p,s}\biggl({\rm Tr}_{E}(V_{p,q,n-p,s})(\phi \otimes \psi)\biggr) =\int_{\partial D}
\Lambda_{p,q,n-p,s}\biggl({\rm Tr}_{E}(V_{p,q,n-p,s})(\phi \otimes \psi)\biggr). $$ (The first identity is easily verified in local coordinates. The second one is the Stokes theorem.) \end{proof}
Lemmas \ref{stokes} and \ref{bddlem} and the fact that $\mathcal B^{p,q}(\bar{D},E)\subset \mathcal Z^{p,q}(\bar{D},E)$ is a closed subspace imply that $J_E$ descends to a bilinear form \begin{equation} \label{L2} \mathcal J_E: H^{p,q}(\bar{D},E) \times H_0^{n-p,n-q}(\bar{D},E^*) \rightarrow \mathbb C \end{equation} such that $S_E(h_0):=\mathcal J_E(\cdot,h_0)\in (H^{p,q}(\bar{D},E))^*$ for each $h_0\in H_0^{n-p,n-q}(\bar{D},E^*)$.
Let us prove that the linear map \begin{equation} \label{cohmap} S_E:H_0^{n-p,n-q}(\bar{D},E^*) \rightarrow (H^{p,q}(\bar{D},E))^* \end{equation} is injective and surjective. Along the lines of the proof, we will show that $\mathcal B_0^{n-p,n-q}(\bar{D},E^*)$ is a closed subspace of $\mathcal Z_0^{n-p,n-q}(\bar{D},E^*)$, which will prove assertion (1) in this case.
Thus, we must prove:
a) ({\em surjectivity}) given an element $F \in (H^{p,q}(\bar{D},E))^*$, there exists $\xi \in \mathcal Z_0^{n-p,n-q}(\bar{D},E^*)$ such that $J_E(\theta,\xi)=F([\theta])$ for all $\theta \in \mathcal Z^{p,q}(\bar{D},E)$; here $[\theta]\in H^{p,q}(\bar{D},E)$ denotes the cohomology class of $\theta$.
b) ({\em injectivity}) if $J_E(\theta,\xi)=0$ for all $\theta \in \mathcal Z^{p,q}(\bar{D},E)$, then $\xi \in \mathcal B_0^{n-p,n-q}(\bar{D},E^*)$.
First, let us prove a). Recall that we fix forms $\{\gamma_i\}_{i=1}^m \subset \mathcal Z_0^{n-p,n-q}(\bar{D})$ such that $\int_D \chi_i \wedge \gamma_j=\delta_{ij}$ - the Kronecker delta; here $\{\chi_i\}_{i=1}^m$ is the basis of $\mathcal H^{p,q}(\bar{D})$.
Due to \eqref{id_id0} of subsection~4.2, each form $\theta \in \mathcal Z^{p,q}(\bar{D},E)$ can be uniquely presented as $\theta=\bar{\partial} G_B(\theta)+H_B(\theta)$, where $H_B(\theta)=\sum_{i=1}^m b_i \cdot \chi_i$ and all $b_i \in B$. Therefore the correspondence $[\theta]\mapsto (b_i)_{i=1}^m$ determines an isomorphism of Fr\'{e}chet spaces $H^{p,q}(\bar{D},E)\cong B^m$. Under this isomorphism, $(H^{p,q}(\bar{D},E))^* \cong (B^*)^m$ and so each $F\in H^{p,q}(\bar{D},E))^*$ has a form $F=(b_i^*)_{i=1}^m\in (B^*)^m$ for some $b_i^*\in B^*$, and $F([\theta]):=\sum_{i=1}^m b_i^*(b_i)$.
Now, we set $\xi:=\sum_{i=1}^m b_i^*\cdot\gamma_i \in \mathcal Z_0^{n-p,n-q}(\bar{D},E^*)$. Then, by the definition of $J_E$ we have \[ J_E(\theta,\xi)=J_E\left(\sum_{i=1}^m b_i\cdot\chi_i,\sum_{i=1}^m b_i^* \cdot \gamma_i\right)=\sum_{i,j=1}^mb_i^*(b_j)\int_D \chi_i \wedge \gamma_j= \sum_{i=1}^m b_i^*(b_i)=F([\theta]), \] as required.
Next, let us prove b). We construct a continuous linear operator \begin{equation}\label{qe} Q_{E}:\Lambda^{n-p,n-q}(\bar{D},E^*) \rightarrow \Lambda_0^{n-p,n-q-1}(\bar{D},E^*) \end{equation}
such that if $\xi \in \mathcal Z_0^{n-p,n-q}(\bar{D},E^*)$ and $J_E(\cdot,\xi)=0$, then $\xi=\bar{\partial}(Q_{E}\, \xi)$. Clearly, existence of such an operator would imply b) and, hence, show that $\mathcal B_0^{n-p,n-q}(\bar{D},E^*)$ is a closed subspace of $\mathcal Z_0^{n-p,n-q}(\bar{D},E^*)$.
In case $E=X \times \mathbb C$ the required operator was constructed in \cite[Ch.V.1]{FK}: $$Q_{X \times \mathbb C}\,\psi:=\rho(-\bar{\partial}\alpha+\theta); $$ here $\rho$ is the defining function of $D$ and $\alpha \in \Lambda^{n-p,n-q-2}(\bar{D},E^*),\, \theta \in \Lambda^{n-p,n-q-1}(\bar{D},E^*)$ are uniquely determined by the formula $$\ast\overline{\bar{\partial} N(\ast \bar{\psi})} =: \bar{\partial}\rho \wedge \alpha +\rho \theta~(=\bar{\partial}(\rho\alpha)+\rho(-\bar{\partial}(\rho\alpha)+\theta)).$$ Here $\ast$ is the Hodge star operator and $N$ is the ``$\bar{\partial}$-Neumann operator'' in the terminology of \cite[Ch.V.1]{FK}; the continuity of $Q_{X \times \mathbb C}$ in the Fr\'{e}chet topology on $\Lambda^{n-p,n-q}(\bar{D})$ follows from \cite[Th.~3.1.14]{FK} and the Sobolev embedding theorem.
In the general case, we define $Q_{E}$ using $Q_{X \times \mathbb C}$ similarly to how it was done for operators $G_B$, $H_B$ in part {\bf A} of the proof of Theorem \ref{thm2}, cf. subsection~4.2: first, we define $Q_{E}:={\rm Id}_{B^*} \otimes Q_{X \times \mathbb C}$ on the tensor product $B^* \otimes \Lambda^{n-p,n-q}(\bar{D})$. Then, using the facts that $B^* \otimes \Lambda^{n-p,n-q}(\bar{D})$ is dense in $\Lambda^{n-p,n-q}(\bar{D},E^*)$ and that in virtue of continuity of $Q_{X \times \mathbb C}$ operator $Q_{E}$ is bounded with respect to Fr\'{e}chet seminorms $\|\cdot\|'_{k,\bar{D},E^*}$ on $\Lambda^{n-p,n-q}(\bar{D},E^*)$, we extend $Q_{E}$ by continuity to $\Lambda^{n-p,n-q}(\bar{D},E^*)$. Now, we prove that the constructed operator $Q_{E}$ possesses the required properties.
Indeed, by definition, inclusion $Q_{E}\xi \in \Lambda_0^{n-p,n-q-1}(\bar{D},E^*)$ is equivalent to identity $(Q_{E}\xi)|_{\partial D}=0$. It is verified by applying to $Q_{E}\xi$ ``scalarization operators'' $\hat{g}_{n-p,n-q}:\Lambda^{n-p,n-q}(\bar{D},E^*) \rightarrow \Lambda^{n-p,n-q}(\bar{D})$ (cf.~\eqref{g_op} with $g$ viewed as an element of $B^{**}$), and using that $\hat{g}_{n-p,n-q}(Q_{E}\xi)= Q_{X\times\mathbb C}\, \hat{g}_{n-p,n-q}(\xi)$ and the latter vanishes on $\partial D$. Identity $\xi=\bar{\partial}(Q_{E} \xi)$ for $\xi$ satisfying $J_E(\cdot,\xi)=0$ is also verified by this method. This completes the proof of b).
To finish the proof of assertion (1) it remains to show that $S_E$ and its inverse are continuous (see \eqref{cohmap}). Indeed, continuity of $S_E$ follows from Lemma \ref{bddlem} and the fact that $\mathcal B_0^{n-p,n-q}(\bar{D},E^*)$ is a closed subspace of $\mathcal Z_0^{n-p,n-q}(\bar{D},E^*)$. Continuity of the map inverse to $S_E$ follows from the open mapping theorem for linear continuous maps between Fr\'{e}chet spaces.
Let us prove part (2) of the theorem in the case of trivial bundles. According to part {\bf A} of the proof of Theorem \ref{thm2}, $\mathcal A=B^m$, i.e., consists of constant sections in
$\mathcal O(X,E)^m$. Hence, $\mathcal B:=(B^*)^m$ consists of constant sections in $\mathcal O(X,E^*)^m$. As the required set $S^*$ we take a point in $\bar D$; then the statement $\mathcal B\cong\mathcal B|_{S^*}$ is obvious. The fact that the linear map $M:\mathcal B \rightarrow H_0^{n-p,n-q}(\bar{D},E^*)$, $$
M(h_1,\dots,h_m):=\left[\sum_{i=1}^m h_i|_{\bar D} \cdot \gamma_i\right], \quad (h_1,\dots,h_m) \in \mathcal B, $$ is an isomorphism of Fr\'{e}chet spaces (i.e., statement (2)\,(b)) follows from the arguments presented in the proof of a) and b) above.
This completes the proof of the theorem for trivial bundles.
\textbf{II.~}Now we consider the case of an arbitrary holomorphic Banach vector bundle $E \in \Sigma_0(X)$. Recall that by the definition of class $\Sigma_0(X)$ there exists a holomorphic Banach vector bundle $E_1$ on $X$ such that the Whitney sum $E_2:=E \oplus E_1$ is holomorphically trivial, i.e. $E_2= X \times B_2$ for a complex Banach space $B_2$. We have the corresponding embedding and quotient homomorphisms of bundles $$ i:E \rightarrow E_2 \quad\text{and}\quad r:E_2 \rightarrow E\quad \text{such that}\quad r\circ i={\rm Id}_{E}. $$ In turn, $E_2^*=E^* \oplus E_1^*$ and we have the adjoint homomorphisms $$ i^*:E_2^* \rightarrow E^*\quad\text{and}\quad r^*:E^* \rightarrow E_2^*\quad\text{such that}\quad i^*\circ r^*={\rm Id}_{E^*}. $$ The above homomorphisms induce continuous linear maps between the corresponding Fr\'{e}chet spaces of forms $$\hat{i}^{s,t}:\Lambda^{s,t}(\bar{D},E) \rightarrow \Lambda^{s,t}(\bar{D},E_2),\qquad (\hat{i^*})^{s,t}:\Lambda^{s,t}(\bar{D},E_2^*) \rightarrow \Lambda^{s,t}(\bar{D},E^*),$$ $$\hat{r}^{s,t}:\Lambda^{s,t}(\bar{D},E_2) \rightarrow \Lambda^{s,t}(\bar{D},E),\qquad (\hat{r^*})^{s,t}:\Lambda^{s,t}(\bar{D},E^*) \rightarrow \Lambda^{s,t}(\bar{D},E_2^*).$$ Also, these maps act between the corresponding spaces $\Lambda_0^{s,t}$ of forms vanishing on $\partial D$.
First, we prove assertion (1) of the theorem. To prove that $\mathcal B_0^{n-p,n-q}(\bar{D},E^*)$ is closed in $\mathcal Z_0^{n-p,n-q}(\bar{D},E^*)$, it suffices to construct a continuous linear map $$ Q_{E}:\mathcal B_0^{n-p,n-q}(\bar{D},E^*) \rightarrow \Lambda_0^{n-p,n-q-1}(\bar{D},E^*), $$ such that \begin{equation} \label{idQ} \bar{\partial}Q_{E}={\rm Id}_{\mathcal B_0^{n-p,n-q}(\bar{D},E^*)}. \end{equation} We define $$Q_{E}:=(\hat{i}^*)^{n-p,n-q-1} \circ Q_{E_2} \circ (\hat{r}^*)^{n-p,n-q},$$ where continuous map $Q_{E_2}:\Lambda^{n-p,n-q}(\bar{D},E_2^*) \rightarrow \Lambda_0^{n-p,n-q-1}(\bar{D},E_2^*)$ for the trivial bundle $E_2$ was constructed in part I of the proof, see \eqref{qe}. Then, since operator $\bar{\partial}$ commutes with maps $(\hat{i}^*)^{n-p,n-q-1}$ and $(\hat{r}^*)^{n-p,n-q}$, property \eqref{idQ} follows from the analogous one for $Q_{E_2}$ (see above) and in view of the identity $(\hat{i}^*)^{n-p,n-q-1} \circ (\hat{r}^*)^{n-p,n-q}={\rm Id}_{\Lambda^{n-p,n-q}(\bar{D},E^*)}$. Hence, the quotient space $H_0^{n-p,n-q}(\bar{D},E^*)$ is Fr\'{e}chet.
Further, identity $r\circ i={\rm Id}_E$ and the definition of (continuous) bilinear form $J_E$ clearly imply for all $\theta \in \mathcal Z^{p,q}(\bar{D},E)$, $\xi \in \mathcal Z_0^{n-p,n-q}(\bar{D},E^*)$, \begin{equation} \label{J_gen} J_E(\theta,\xi)=J_{E_2}\bigl(\hat{i}^{p,q}(\theta),(\hat{r}^*)^{n-p,n-q}(\xi)\bigr). \end{equation} In particular, by Lemma \ref{stokes} for $J_{E_2}$ and the fact that $\bar{\partial}$ commutes with $(\hat{i}^*)^{n-p,n-q-1}$ and $(\hat{r}^*)^{n-p,n-q}$ we have $J_E(\theta,\xi)=0$ if $\theta \in \mathcal B^{p,q}(\bar{D},E)$ or $\xi \in \mathcal B_0^{n-p,n-q}(\bar{D},E^*)$. Therefore, $J_E$ descends to a continuous bilinear form $$ \mathcal J_E: H^{p,q}(\bar{D},E) \times H_0^{n-p,n-q}(\bar{D},E^*) \rightarrow \mathbb C. $$ As before, $\mathcal J_E$ determines a continuous linear map $S_E:H_0^{n-p,n-q}(\bar{D},E^*) \rightarrow (H^{p,q}(\bar{D},E))^*$, \begin{equation} \label{cohmap_gen} S_E(h_0):=\mathcal J_E(\cdot, h_0),\qquad h_0\in H_0^{n-p,n-q}(\bar{D},E^*). \end{equation} Note that since maps $\hat{i}^{s,t}$, $(\hat{i}^*)^{s,t}$ and $\hat{r}^{s,t}$, $(\hat{r}^*)^{s,t}$ commute with operator $\bar{\partial}$, they descend to maps between the corresponding cohomology groups (denoted by $\bar{i}^{s,t}$, $(\bar{i}^*)^{s,t}$ and $\bar{r}^{s,t}$, $(\bar{r}^*)^{s,t}$, respectively, and similarly but with the lower index $_0$ in case of maps between $H_0$ cohomology groups).
It follows from \eqref{J_gen}, \eqref{cohmap_gen} and \eqref{cohmap} that $$ S_E=(\bar{i}^{p,q})^* \circ S_{E_2} \circ (\bar{r}^*)_0^{n-p,n-q}. $$
Now, consider the second summand in the decomposition $E_2=E\oplus E_1$. Then we have the corresponding embedding and quotient homomorphisms of bundles $$ i_1:E_1 \rightarrow E_2 \quad\text{and}\quad r_1:E_2 \rightarrow E_1\quad \text{such that}\quad r_1\circ i_1={\rm Id}_{E_1}. $$ Repeating the above arguments with $(E,i,r)$ replaced by $(E_1,i_1,r_1)$ we arrive to a similar identity for continuous linear maps between the corresponding cohomology groups $$ S_{E_1}=(\bar{i}_1^{p,q})^* \circ S_{E_2} \circ (\bar{r}_1^*)_0^{n-p,n-q}. $$ Note that the map \begin{equation} \label{cohmap2} \begin{array}{l} \bigl((\bar{i}^{p,q})^*,(\bar{i}_1^{p,q})^* \bigr) \circ S_{E_2} \circ \Sigma\bigl((\bar{r}^*)_0^{n-p,n-q},(\bar{r}_1^*)_0^{n-p,n-q} \bigr): \\ \\ H_0^{n-p,n-q}(\bar{D},E^*) \oplus H_0^{n-p,n-q}(\bar{D},E_1^*) \rightarrow (H^{p,q}(\bar{D},E))^* \oplus (H^{p,q}(\bar{D},E_1))^*, \end{array} \end{equation} where $\Sigma(u,v)=u+v$ ($u,v \in H_0^{n-p,n-q}(\bar{D},E_2^*)$), is an isomorphism.\\ Indeed, by the result of part I map $S_{E_2}:H_0^{n-p,n-q}(\bar{D},E_2^*) \rightarrow (H^{p,q}(\bar{D},E_2))^*$ is an isomorphism. Also, decomposition $E \oplus E_1=E_2$ implies that maps $\bigl((\bar{i}^{p,q})^*,(\bar{i}_1^{p,q})^* \bigr)$ and $\Sigma\bigl((\bar{r}^*)_0^{n-p,n-q},(\bar{r}_1^*)_0^{n-p,n-q} \bigr)$ are isomorphisms between the corresponding spaces.
Next, by the definition of bilinear form $J_{E_2}$ we have \[ (\bar{i}_1^{p,q})^* \circ S_{E_2} \circ (\bar{r}^*)_0^{n-p,n-q}= (\bar{i}^{p,q})^* \circ S_{E_2} \circ (\bar{r}_1^*)_0^{n-p,n-q}=0. \] Therefore, isomorphism \eqref{cohmap2} coincides with $S_{E} \oplus S_{E_1}$. This implies, in particular, that $S_E$ is an isomorphism and completes the proof of part (1) of the theorem.
Let us prove (2)(b). Let $M:=M_{E_2}$ be the map of part (2)\,(b) of the theorem for the trivial bundle $E_2$. We set $$N_{E_2}:=M_{E_2}^{-1}: H_0^{n-p,n-q}(\bar{D},E_2^*) \rightarrow (B_2^*)^m$$ and define a continuous linear map $$ N_{E}: H_0^{n-p,n-q}(\bar{D},E^*) \rightarrow \mathcal O(X,E^*)^m $$ by the formula $$ N_{E}:=\hat{i}^* \circ N_{E_2} \circ (\bar{r}^*)_0^{n-p,n-q}, $$
where $\hat{i}^*:=\oplus^m \bigl((\hat{i}^*)^{0,0}|_{B^*_2} \bigr)$ (here $B^*_2$ is identified with the space of constant sections in $\mathcal O(X,E^*_2)$). Since $N_{E_2}$ is continuous, map $N_{E}$ is continuous as well.
Let us show that $N_{E}$ is injective. We argue as above. Namely, map \begin{equation} \label{mapinj} (\hat{i}^*, \hat{i}_1^*) \circ N_{E_2} \circ \Sigma\bigl((\bar{r}^*)_0^{n-p,n-q},(\bar{r}^*)_0^{n-p,n-q}\bigr): \end{equation} $$ H_0^{n-p,n-q}(\bar{D},E^*) \oplus H_0^{n-p,n-q}(\bar{D},E_1^*) \rightarrow \mathcal O(X,E^*)^m \oplus \mathcal O(X,E_1^*)^m $$ is injective. Indeed, $N_{E_2}$ is an isomorphism by the corresponding result of part I, and the injectivity of maps $(\hat{i}^*, \hat{i}_1^*)$, $\Sigma\bigl((\bar{r}^*)_0^{n-p,n-q},(\bar{r}^*)_0^{n-p,n-q}\bigr)$ follow from the decomposition $E \oplus E_1=E_2$. Since \[ \hat{i}_1^* \circ N_{E_2} \circ (\bar{r}^*)_0^{n-p,n-q}=\hat{i}^* \circ N_{E_2} \circ (\bar{r}_1^*)_0^{n-p,n-q}=0 \] (because $r_1\circ i=r\circ i_1=0$), injective map \eqref{mapinj} coincides with $N_E \oplus N_{E_1}$, and so map $N_{E}$ must be injective as well.
Now, we define $$\mathcal B:=N_{E} \bigl( H_0^{n-p,n-q}(\bar{D},E^*)\bigr) \subset \mathcal O(X,E^*)^m.$$ (Space $\mathcal B \subset \mathcal O(X,E^*)^m$ is endowed with the Fr\'{e}chet topology of uniform convergence on compact subsets of $X$.) Let us show that $\mathcal B$ is a closed subspace of $\mathcal O(X,E^*)^m$. To this end, we define a continuous linear map $$ M=M_{E}:=(\bar{i}^*)_0^{n-p,n-q} \circ \widetilde M_{E_2} \circ \hat{r}^*:\mathcal B \rightarrow H_0^{n-p,n-q}(\bar{D},E^*), \quad \hat{r}^*=\oplus^m (\hat{r}^*)^{0,0}, $$ where \[
\widetilde M_{E_2}(h_1,\dots,h_m):=\left[\sum_{i=1}^m h_i|_{\bar D} \cdot \gamma_i\right], \quad (h_1,\dots,h_m) \in \mathcal O(X,E_2^*)^m. \]
By definition, $M_{E_2}=\widetilde M_{E_2}|_{(B_2^*)^m}$. Also, one can easily check that \[
(\bar{i}^*)_0^{n-p,n-q} \circ \widetilde M_{E_2} \circ \hat{r}^*\circ\hat{i}^*|_{(B_2^*)^m}=(\bar{i}^*)_0^{n-p,n-q} \circ M_{E_2}. \] From here, using that $M_{E_2} \circ N_{E_2}={\rm Id}_{H_0^{n-p,n-q}(\bar{D},E_2^*)}$, we obtain $M_{E} \circ N_{E}={\rm Id}_{H_0^{n-p,n-q}(\bar{D},E^*)}$. Since $M_E$ is continuous, the latter identity implies that space $\mathcal B$ is complete and hence is closed in $\mathcal O(X,E^*)^m$.
The fact that $\mathcal B$ is isomorphic to the dual of $\mathcal A$ is now immediate, since by what we have proved above $\mathcal B \cong (H^{p,q}(\bar{D},E))^*$, while by Theorem \ref{thm2}(1), $\mathcal A \cong H^{p,q}(\bar{D},E)$. The proof of assertion (2)(b) is complete.
The proof of assertion (2)(a) is analogous to the proof of part (1)(a) of Theorem \ref{thm2}.
The proof of the theorem is complete.
\end{document} |
\begin{document}
\title{Connections between Romanovski and other polynomials} \author{H. J. Weber\\Department of Physics\\University of Virginia\\ Charlottesville, VA 22904, USA}
\maketitle \begin{abstract} A connection between Romanovski polynomials and those polynomials that solve the one-dimensional Schr\"odinger equation with the trigonometric Rosen-Morse and hyperbolic Scarf potential is established. The map is constructed by reworking the Rodrigues formula in an elementary and natural way. The generating function is summed in closed form from which recursion relations and addition theorems follow. Relations to some classical polynomials are also given. \end{abstract}
\leftline{MSC: 33C45, 42C15, 33C30, 34B24} \leftline{PACS codes: 02.30.Gp; 02.30.Hq; 02.30.Jr, 03.65.Ge} \leftline{Keywords: Romanovski polynomials; complexified Jacobi polynomials;} \leftline{~~~~~~~~~~~~~~~generating function; recursion relations; addition theorems}
\section{Introduction and review of basic results}
Romanovski polynomials were discovered in 1884 by Routh~\cite{routh} in the form of complexified Jacobi polynomials on the unit circle in the complex plane and were then rediscovered as real polynomials by Romanovski~\cite{rom} in a statistics framework. Recently real polynomial solutions of the Scarf~\cite{cot} and Rosen-Morse potentials~\cite{ck} in (supersymmetric) quantum mechanics were recognized~\cite{rwack} to be related to the Romanovski polynomials.
Here we apply to Romanovski polynomials a recently introduced natural method of reworking the Rodrigues formula~\cite{hjw} that leads to connections with other polynomials.
The paper is organized as follows. In the Introduction we define the complementary polynomials $Q_\nu^{(\alpha,-a)}(x)$ then establish both a recursive differential equation satisfied by them and the procedure for the systematic construction of the $Q_{\nu}^{(\alpha,-a)}(x),$ derive their Sturm-Liouville differential equation (ODE), their generating function and its consequences, all based on the results of ref.~\cite{hjw}. Section~2 deals with a parameter addition theorem, Sect.~3 with orthogonality integrals and Sect.~4 with connections of these Romanovski polynomials to the classical polynomials. Sect.~5 deals with further applications using auxiliary polynomials.
{\bf Definition.} The Rodrigues formula that generates the polynomials is given by \begin{eqnarray} P_l^{(a,\alpha)}(x)=\frac{1}{w_l(x)}\frac{d^{l}}{dx^{l}}[w_l(x)\sigma(x)^{l}] =\frac{\sigma(x)^l}{w_0(x)}\frac{d^l w_0(x)}{dx^l},
\quad l=0,1,...\ . \label{rds} \end{eqnarray} where $\sigma(x)\equiv 1+x^2$ is the coefficient of $y''$ of the hypergeometric
ODE (1) of ref.~\cite{hjw} that the polynomials satisfy. The variable $x$ is real and ranges from $-\infty$ to $+\infty.$ The corresponding weight functions \begin{eqnarray} w_l(x)=\sigma(x)^{-(l+a+1)} e^{-\alpha \cot^{-1} x}=\sigma(x)^{-l} w_0(x) \label{wf} \end{eqnarray} depend on the parameters $a, \alpha$ that are independent of the degree $l$ of the polynomial $P_l^{(a,\alpha)}(x).~\diamond$
{\it Lemma.} {\it The weight functions of the $P_l^{(a,\alpha)}(x)$ polynomials
satisfy Pearson's first-order ODE} \begin{eqnarray} \sigma(x)\frac{dw_l(x)}{dx}=[\alpha-2x(l+a+1)] w_l(x). \label{pearson} \end{eqnarray} {\bf Proof.} This is straightforward to check using $d\cot^{-1} x/dx=-1/ \sigma(x).~\diamond$
We now apply the method of ref.~\cite{hjw} and introduce the {\it complementary
polynomials} $Q_\nu^{(\alpha,-a)}(x)$ defining them inductively according to the Rodrigues representation (see Eq.~(5) of ref.~\cite{hjw}) \begin{equation} P_l^{(a,\alpha)}(x)=\frac{1}{w_l(x)}\frac{d^{l-\nu}}{dx^{l-\nu}} [\sigma(x)^{l-\nu}w_l(x)Q_\nu^{(\alpha,-a)}(x)],\,\,\,\,\, \nu=0, 1, \ldots, l. ~\diamond \label{Qdef} \end{equation} For Eq.~(\ref{Qdef}) to agree with the Rodigues formula~(\ref{rds}) for $\nu=0$
requires $Q_0^{(\alpha,-a)}(x)\equiv 1.$ Then comparing Eq.~(\ref{pearson}) with Eq.~(4) of ref.~\cite{hjw} we find the coefficient $\tau(x)=\alpha- \sigma'(x)(a+l)$ of $y'$ in the ODE of the polynomials. Comparing instead with the ODE~(37) of ref.~\cite{rwack} gives their parameter $\beta=-a.$
We now {\it identify the polynomials of ref.~\cite{hjw} with those defined in Eq.~(\ref{Qdef})} \begin{eqnarray} {\cal P}_\nu(x;l)=Q_\nu^{(\alpha,-a)}(x),~l\geq \nu.\label{id} \end{eqnarray} We will show below that the polynomials $Q_\nu^{(\alpha,-a)}(x)$ are independent of the parameter $l.~\diamond$
{\it Theorem~1.1. $Q_\nu^{(\alpha,-a)}(x)$ is a polynomial of degree $\nu$ that satisfies the recursive differential relation} \begin{eqnarray}\nonumber Q_{\nu+1}^{(\alpha,-a)}(x)&=&\sigma(x)\frac{dQ_{\nu }^{(\alpha,-a)}(x)}{dx} +[\tau(x)+2x(l-\nu-1)]Q_{\nu}^{(\alpha,-a)}(x)\\\nonumber &=&\sigma(x)\frac{dQ_{\nu }^{(\alpha,-a)}(x)}{dx} +[\alpha-2x(a+\nu+1)]Q_{\nu}^{(\alpha,-a)}(x),\\ \nu&=&0, 1, \ldots.\ \label{rode} \end{eqnarray} {\bf Proof.} The inductive proof of Theorem~2.2 of ref.~\cite{hjw} applied to the polynomial $Q_\nu^{(\alpha,-a)}(x)$ proves this theorem, and Eq.~(\ref{rode}) agrees with Eq.~(76) of ref.~\cite{rwack} provided their parameter $\beta=-a$ in ref.~\cite{hjw}. Since Eq.~(\ref{rode}) is independent of the parameter $l,$ so are the polynomials $Q_\nu^{(\alpha,-a)}(x)$ that are generated from it. $\diamond$
Comparing the recursive ODE~(\ref{rode}) with one stated in~\cite{rom} leads us to the {\it identification of our polynomials} \begin{eqnarray} Q_{k}^{(\alpha,k-m)}(x)=\varphi_k(m,x),\ Q_\nu^{(\alpha,-a)}(x)= \varphi_{\nu}(a+\nu, x) \label{roq} \end{eqnarray} as a {\it Romanovski polynomial} (with its parameter depending on its degree), and comparing with Eq.~(69) of ref.~\cite{rwack}, \begin{eqnarray} Q_\nu^{(\alpha,-a)}(x)=R_\nu^{(\alpha,\beta-\nu)}(x),~\beta=-a.~\diamond \label{comp} \end{eqnarray} Notice that the parameter $\alpha$ ($-\nu$ in ref.~\cite{rom}) is suppressed in Romanovski's notation. The fact that the integer index $\nu$ of the complementary polynomials occurs in the parameter $m$ of the Romanovski polynomials is sometimes disadvantageous (for orthogonality), but also occasionally a definite advantage (for the generating function). Moreover, \begin{eqnarray} {\cal P}_l(x;l)=Q_l^{(\alpha,-a)}(x)=K_l C_l^{(\alpha,-a)}(x), \label{cs} \end{eqnarray} where $K_l$ is a normalization constant and the $C_l^{(\alpha,-a)}(x),$ after a change of variables, become part of the solutions of the Scarf and Rosen-Morse potentials in the Schr\"odinger equation~\cite{cot},\cite{ck}. However, for $C_l^{(\alpha,-a)}(x)$ to be part of the solution of the Schr\"odinger equation with the trigonometric Rosen-Morse potential requires $\alpha=\alpha_l=\frac{2b}{l+a}$~\cite{ck}; but the polynomials may be defined for the general parameter $\alpha.$
Recursion relations and recursive ODEs are practical tools to systematically generate the polynomials.
{\it Theorem~1.2. The polynomial $Q_\nu^{(\alpha,-a)}(x)$ satisfies the basic recursive ODE} \begin{eqnarray} \frac{dQ^{(\alpha,-a)}_{\nu}(x)}{dx}=-\nu(2a+\nu+1)Q^{(\alpha,-a)}_{\nu-1}(x) \equiv -\lambda_\nu Q^{(\alpha,-a)}_{\nu-1}(x). \label{bode} \end{eqnarray} {\bf Proof.} Eq.~(\ref{bode}) follows from a comparison of the recursive ODE~(\ref{rode}) with a three-term recursion relation as outlined in Corollary~4.2 of ref.~\cite{hjw}, is ODE~(32) of ref.~\cite{hjw}, and agrees with Eq.~(75) of ref.~\cite{rwack} provided their $\beta=-a,$ which is consistent with our previous statements. $\diamond$
Thus, taking a derivative of $Q^{(\alpha,-a)}_{\nu}(x)$ just lowers its degree (and index) by unity, up to a constant factor, a property the Romanovski polynomials share with all classical polynomials.
{\it Theorem~1.3. The polynomials $Q_{\nu}^{(\alpha,-a)}(x)$ satisfy the differential equation of Sturm-Liouville type} \begin{eqnarray} \sigma(x)\frac{d^2 Q^{(\alpha,-a)}_\nu(x)}{dx^2}+[\alpha-\sigma'(x)(a+\nu)] \frac{d Q^{(\alpha,-a)}_\nu(x)}{dx}=-\lambda_\nu Q^{(\alpha,-a)}_\nu(x). \label{qode} \end{eqnarray} {\bf Proof.} Substituting the basic ODE~(\ref{bode}) in the recursive ODE~(\ref{rode}) yields \begin{eqnarray} Q^{(\alpha,-a)}_{\nu+1}(x)=-\frac{\sigma(x)}{\lambda_{\nu+1}} \frac{d^2Q^{(\alpha,-a)}_{\nu+1}(x)}{dx^2}-\frac{1}{\lambda_{\nu+1}}[\alpha- (a+\nu+1)\sigma']\frac{dQ^{(\alpha,-a)}_{\nu+1}(x)}{dx} \end{eqnarray} which, for $\nu\to \nu-1$ is the ODE of the theorem. Again, the ODE is independent of the parameter $l$ in Eq.~(\ref{id}). $\diamond$
{\it Theorem~1.4. The polynomial $P_l(x)$ satisfies the ODE} \begin{eqnarray} \sigma(x)\frac{d^2 P_l(x)}{dx^2}+\tau(x)\frac{d P_l(x)}{dx}=-\lambda_l P_l(x). \label{pode} \end{eqnarray} {\bf Proof.} For $\nu=l,$ we use $P_l(x)={\cal P}_l(x;l)$ in the notation of ref.~\cite{hjw} to rewrite the recursive ODE~(\ref{rode}) of ref.~\cite{hjw} and Eq.~(\ref{bode}) as \begin{eqnarray}\nonumber {\cal P}_l(x;l)&=&\sigma(x){\cal P'}_{l-1}(x;l)+\tau(x){\cal P}_{l-1}(x;l) =P_l(x)\\&=&-\frac{\sigma(x)}{\lambda_l}P''_l(x)-\frac{\tau(x)}{\lambda_l} P'_l(x), \label{Qode} \end{eqnarray} which is the ODE~(1) in ref.~\cite{hjw} for the polynomial $P_l(x).$ (Note that $\tau(x)$ is given right after Eq.~(\ref{Qdef}).) $\diamond$
{\it Theorem~1.5. The polynomials $Q^{(\alpha,-a)}_{\nu}(x)$ satisfy the generalized Rodrigues formulas} \begin{eqnarray} Q^{(\alpha,-a)}_{\nu}(x)&=&w^{-1}_l(x) \sigma(x)^{\nu-l} \frac{d^{\nu}}{dx^{\nu}}[w_l(x)\sigma(x)^{l}]=\frac{\sigma(x)^\nu}{w_0(x)} \frac{d^{\nu}w_0(x)}{dx^{\nu}}; \label{qrod}\\\nonumber Q^{(\alpha,-a)}_{\nu}(x)&=&w_l^{-1}(x)\sigma(x)^{\nu-l}\frac{d^{\nu-\mu}} {dx^{\nu-\mu}}\left(\sigma(x)^{l-\mu}w_l(x) Q^{(\alpha,-a)}_{\mu}(x)\right), \\ \mu&=&0, 1, \ldots, \nu . \label{gqrod} \end{eqnarray} {\bf Proof.} These Rodrigues formulas are those of Theorem~2.3 of ref.~\cite{hjw}; they agree with Eqs.~(72) and (73) of ref.~\cite{rwack} provided their $\beta=-a,$ as we found earlier. Note that, from Eq.~(\ref{wf}) the product $w_l(x)\sigma(x)^l$ does not depend on $l,$ so there is no $l$ dependence in Eqs.~(\ref{qrod},\ref{gqrod}). $\diamond$
The $Q^{(\alpha,-a)}_n(x)$ polynomials generalize the $P_n(x)$ in the sense of allowing any power of $\sigma(x)$ in the Rodrigues formula, not just $\sigma(x)^n$ as for the $P_n(x).$ In other words, the $Q^{(\alpha,-a)}_n(x)$ are associated $P_n(x)$ (or $C_n^{(\alpha,-a)}(x))$ polynomials, as in the relationship between Laguerre (Legendre) and associated Laguerre (Legendre) polynomials.
A generalization of Eq.~(\ref{qrod}), \begin{eqnarray} Q^{(\alpha,-a-l)}_{\nu}(x)=\frac{\sigma(x)^{\nu+l}}{w_0(x)} \frac{d^{\nu}}{dx^{\nu}}\left(\sigma(x)^{-l}w_0(x)\right),~l=0,\pm 1,\ldots, \end{eqnarray} just reproduces the same polynomial with a shifted parameter $a\to a+l.$
{\it The generating function for the $Q_\nu^{(\alpha,-a)}(x)$ polynomials is defined as} \begin{eqnarray} Q(x,y;\alpha,-a)=\sum_{\nu=0}^{\infty}\frac{y^{\nu}}{\nu!} Q^{(\alpha,-a)}_{\nu}(x).~\diamond \label{genf} \end{eqnarray}
The generating function is our main tool for deriving recursion relations.
{\it Theorem~1.6 The generating function can be summed in the closed form} \begin{eqnarray}\nonumber w_l(x)Q(x,y;\alpha,-a)&=&\sigma(x)^{1-l}\sum_{\nu=0}^{\infty} \frac{(y\sigma(x))^{\nu}}{\nu!}\frac{d^{\nu}}{dx^{\nu}} \left(\sigma(x)^{-(a+1)}e^{\alpha\cot^{-1}x}\right)\,,\\\nonumber Q(x,y;\alpha,-a)&=&(1+x^2)^{a+1}e^{\alpha\cot^{-1}x} [1+(x+y(1+x^2))^2]^{-(a+1)}\\&\cdot&e^{-\alpha\cot^{-1}(x+y(1+x^2))}\ , \label{3_8} \end{eqnarray} \begin{eqnarray}\nonumber &&\frac{\partial^{\mu}}{\partial y^{\mu}}\left( w_l(x)\sigma(x)^{l-1} Q(x,y;\alpha,-a)\right)\\&=&\sigma(x)^{\mu}\sum_{\nu=\mu}^{\infty} \frac{(y\sigma(x))^{\nu-\mu}}{(\nu-\mu)!}\frac{d^{\nu-\mu}}{dx^{\nu-\mu}} (\sigma(x)^{-(a+\mu)}e^{-\alpha\cot^{-1}x}Q^{(\alpha,-a)}_{\mu}(x)),\\\nonumber &&\frac{\partial^{\mu}Q(x,y;\alpha,-a)}{\partial y^{\mu}}=w_l(x)^{-1} \sigma(x)^{\mu-l}[(1+(x+y\sigma(x))^2]^{-(a+\mu)}\\ &\cdot& e^{-\alpha\cot^{-1}(x+y\sigma(x))^2}Q^{(\alpha,-a)}_{\mu}((x+y\sigma(x))^2) . \label{3_9} \end{eqnarray} {\it Both Taylor series converge if $x$ and $x+y\sigma(x)$ are regular points of the weight function.}
{\bf Proof.} The first relation is derived in Theorem~3.2 of ref.~\cite{hjw} by substituting the Rodrigues formula~(\ref{qrod}) in the defining series~(\ref{genf}) of the generating function and recognizing it as a Taylor series. The other follows similarly. $\diamond$
{\it Theorem~1.7. The generating function satisfies the partial differential equation (PDE)} \begin{eqnarray} \frac{\partial Q(x,y;\alpha,-a)}{\partial y}= \frac{\sigma(x)Q(x,y;\alpha,-a)}{1+[x+y\sigma(x)]^2} Q^{(\alpha,-a)}_1(x+y\sigma(x)). \label{pde} \end{eqnarray} {\bf Proof.} This PDE is derived by straightforward differentiation in Theorem~3.3 of ref.~\cite{hjw} in preparation for recursion relations by translating the case $\mu=1$ in Eq.~(\ref{3_9}) into a partial differential equation (PDE). $\diamond$
One of the main consequences of Theorem~1.7 is a general recursion relation.
{\it Theorem~1.8. The $Q^{(\alpha,-a)}_{\nu}(x)$ polynomials satisfy the three-term recursion relation} \begin{eqnarray}\nonumber Q^{(\alpha,-a)}_{\nu+1}(x)=[\alpha-2x(a+\nu+1)]Q^{(\alpha,-a)}_{\nu}(x) -\nu\sigma(x)(2a+\nu+1)Q^{(\alpha,-a)}_{\nu-1}(x).\\ \label{recu3} \end{eqnarray}
{\bf Proof.} Equation~(\ref{pde}) translates into this recursion relation by substituting Eq.~(\ref{genf}) defining the generating function and thus rewriting this as \begin{eqnarray}\nonumber &&(1+y^2\sigma^2(x)+2xy)\sum_{\nu=1}^{\infty}\frac{y^{\nu-1}}{(\nu-1)!} Q^{(\alpha,-a)}_{\nu}(x)\\&=&[\alpha-2x(a+1)-2y(a+1)\sigma(x)] \sum_{\nu=0}^{\infty}\frac{y^{\nu}}{\nu!}Q^{(\alpha,-a)}_{\nu}(x).~\diamond \end{eqnarray}
Just like the recursive ODE~(\ref{rode}), this recursion allows for a systematic construction of the Romanovski polynomials, in contrast to the Rodrigues formulas which become impractical for large values of the degree $\nu.$
{\it Theorem~1.9. The polynomials $Q_{\nu}^{(\alpha,-a)}(x)$ satisfy the differential equation of Sturm-Liouville type}
\begin{eqnarray}\nonumber \frac{d}{dx}\left(\sigma(x)^{l-\nu+1}w_l(x)\frac{dQ_{\nu}^{(\alpha,-a)}(x)} {dx}\right)&=&-\lambda_{\nu}\sigma(x)^{l-\nu}(x)w_l(x) Q_{\nu}^{(\alpha,-a)}(x);\\\lambda_{\nu}&=&\nu(2a+\nu+1)\ . \label{qsl} \end{eqnarray} {\bf Proof.} This ODE is equivalent to the ODE~(\ref{qode}) and agrees with Eq.~(78) of ref.~\cite{rwack} if $\beta=-a$ there. Note that the inductive proof in Theorem~5.1 in ref.~\cite{hjw} is much lengthier than our proof of Eqs.~(\ref{qode},\ref{pode}). $\diamond$
\section{Parameter Addition}
The multiplicative structure of the generating function of Eq.~(\ref{3_8}) involving the two parameters in the exponents of two separate functions, as displayed in \begin{eqnarray} Q\left(x,\frac{y-x}{\sigma(x)};\alpha,-a\right)=\left(\frac{\sigma(x)} {\sigma(y)}\right)^{a+1}e^{\alpha(\cot^{-1}x-\cot^{-1}y)}, \end{eqnarray} allows for the following theorems.
{\it Theorem~2.1. The $Q_{\nu}^{(\alpha,-a)}(x)$ polynomials satisfy the parameter addition relation} \begin{eqnarray} Q_{N}^{(\alpha_1+\alpha_2,-a_1-a_2-1)}(x)=\sum_{\nu_1=0}^N \left(N\atop \nu_1\right)Q_{\nu_1}^{(\alpha_1,-a_1)}(x) Q_{N-\nu_1}^{(\alpha_2,-a_2)}(x). \end{eqnarray} {\bf Proof.} This formula follows from the Taylor expansion \begin{eqnarray} \sum_{\nu_1, \nu_2=0}^{\infty}\frac{y^{\nu_1+\nu_2}}{\nu_1!\nu_2!} Q_{\nu_1}^{(\alpha_1,-a_1)}(x)Q_{\nu_2}^{(\alpha_2,-a_2)}(x)= \sum_{N=0}^{\infty}\frac{y^{N}}{N!} Q_{N}^{(\alpha_1+\alpha_2,-a_1-a_2-1)}(x). \end{eqnarray} of the {{\it generating function identity} \begin{eqnarray} Q(x,y;\alpha_1,-a_1)Q(x,y;\alpha_2,-a_2)=Q(x,y;\alpha_1+\alpha_2,-a_1-a_2-1).~ \diamond \end{eqnarray}
Given the complexity of the polynomials, the elegance and simplicity of this relation are remarkable.
{\bf Example.} The case $N=0$ is trivial, and $N=1$ becomes the additive identity \begin{eqnarray}\nonumber Q_1^{(\alpha_1+\alpha_2,-(a_1+a_2+1))}(x)&=&\alpha_1+\alpha_2-2x(a_1+a_2+2)\\ \nonumber&=&[\alpha_1-2x(a_1+1)]+[\alpha_2-2x(a_2+1)]\\&=& Q_1^{(\alpha_1,-a_1)}(x)+Q_1^{(\alpha_2,-a_2)}(x). \end{eqnarray} The first case involving additive and multiplicative aspects of the polynomials
is $N=2$ which we decompose and multiply out as follows: \begin{eqnarray}\nonumber Q_{2}^{(\alpha_1+\alpha_2,-a_1-a_2-1)}&=& [\alpha_1+\alpha_2-2x(a_1+a_2+2)][\alpha_1+\alpha_2-2x(a_1+a_2+3)]\\\nonumber &-&2\sigma(x)(a_1+a_2+2)=\{[\alpha_1-2x(a_1+1)]\\\nonumber &+& [\alpha_2-2x(a_2+1)]\}\{[\alpha_1-2x(a_1+2)]\\\nonumber&+&[\alpha_2-2x(a_2+1)] \}-2\sigma(x)[(a_1+1)+(a_2+1)]\\\nonumber&=&[\alpha_1-2x(a_1+1)][\alpha_1 -2x(a_1+2)]-2\sigma(x)(a_1+1)\\\nonumber &+&[\alpha_2-2x(a_2+1)][\alpha_2-2x(a_2+2)]-2\sigma(x)(a_2+1)\\\nonumber &+&[\alpha_1-2x(a_1+1)][\alpha_2-2x(a_2+1)]\\\nonumber&+&[\alpha_2-2x(a_2+1)] [\alpha_1-2x(a_1+1)]\\\nonumber&=& Q_{2}^{(\alpha_1,-a_1)}+Q_{2}^{(\alpha_2,-a_2)}+Q_{1}^{(\alpha_1,-a_1)} Q_{1}^{(\alpha_2,-a_2)}\\&+&Q_{1}^{(\alpha_2,-a_2)}Q_{1}^{(\alpha_1,-a_1)}.~\diamond \end{eqnarray}
The addition theorem is consistent with {\it the homogeneous polynomial theorem in the variables} $x, \alpha, \sqrt{\sigma}$ (without using $\sigma=x^2+1$) {\it which the polynomials satisfy} and can be generalized to an arbitrary number of parameters.
{\it Theorem~2.2. The $Q\nu^{(\alpha,-a)}(x)$ polynomials satisfy the more general polynomial identity} \begin{eqnarray}\nonumber &&Q_N^{(\alpha_1+\alpha_2+\cdots +\alpha_n,-(a_1+a_2+\cdots +a_n+n-1))}(x)\\&=& \sum_{0\leq \nu_j\leq N, \nu_1+\cdots +\nu_n=N+n}\frac{N!} {\prod_1^n \nu_j!}\prod_{j=1}^n Q_{\nu_j}^{(\alpha_j,-a_j)}(x). \end{eqnarray}
{\bf Proof.} This follows similarly from the Taylor expansion of the {\it product identity of $n$ generating functions} \begin{eqnarray}\nonumber \prod_{j=1}^n Q(x,y;\alpha_j,-a_j)=Q(x,y;\alpha_1+\alpha_2+\cdots +\alpha_n, -(a_1+a_2+\cdots +a_n+n-1)).\\ \end{eqnarray} As an application of the parameter addition theorem we now separate the two parameters $a$ and $\alpha$ into two sets of simpler polynomials $Q_{\nu}^{(0,-a)}$ and $Q_{\mu}^{(\alpha,1)}.$ To this end, we expand the generating functions in the {\it identity} \begin{eqnarray} Q(x,y;\alpha,-a)=Q(x,y;0,-a)Q(x,y;\alpha,1) \label{decomp} \end{eqnarray} in terms of their defining polynomials. This yields
{\it Theorem~2.3. The $Q_\nu^{(\alpha,-a)}(x)$ polynomials satisfy the decomposition identity} \begin{eqnarray} Q^{(\alpha,-a)}_N(x)=\sum_{\nu=0}^N\left(N\atop \nu\right) Q^{(0,-a)}_{\nu}(x)Q^{(\alpha,1)}_{N-\nu}(x). \label{simpl} \end{eqnarray} {\bf Proof.} This identity follows from a Taylor expansion of the generating function identity~(\ref{decomp}) in terms of sums of products of polynomials involving only one parameter each. $\diamond$
{\bf Definition.} The generating function \begin{eqnarray} e^{\alpha [\cot^{-1}x-\cot^{-1}(x+y\sigma(x))]}=\sum_{\nu=0}^{\infty} \frac{y^{\nu}}{\nu!}Q^{(\alpha,1)}_{\nu}(x) \end{eqnarray} defines the second set of the polynomials, while the first one will be treated
in detail below upon expanding the polynomials $Q^{(0,-a)}_\nu(x)$ as finite sums of Gegenbauer polynomials in Sect.~IV or finite power series in Eq.~(\ref{expl}). $\diamond$
We also note that $Q_\nu^{(0,-a)}(x)=K_\nu C_\nu^{(0,-a)}(x),$ so the latter also have a Gegenbauer polynomial expansion.
{\it Theorem~2.4. The $Q_\nu^{(\alpha,-a)}(x)$ polynomials satisfy the (parity) symmetry relation} \begin{eqnarray} Q_{\nu}^{(-\alpha,-a)}(x)=(-1)^{\nu}Q_{\nu}^{(\alpha,-a)}(-x). \end{eqnarray}
{\bf Proof.} This relation derives from the {\it generating function identity} \begin{eqnarray} Q(-x,-y;-\alpha,-a)=Q(x,y;\alpha,-a) \end{eqnarray} which holds because $\alpha\cot^{-1}(x+y\sigma(x))$ in the generating function
Eq.~(\ref{genf}) stays invariant under $\alpha\to -\alpha, x\to -x, y\to -y,$ and $\sigma(-x)=\sigma(x).~\diamond$
\section{Orthogonality Integrals}
This section deals with an application of the generating function to integrals that are relevant for studying the orthogonality of the polynomials.
{\bf Definition.} We define orthogonality integrals for the $Q_\nu^{(\alpha,-a)}(x)$ polynomials by~\cite{rwack} \begin{eqnarray}\nonumber O^{(a,\alpha)}_{\mu,\nu}&=&\int_{-\infty}^{\infty}dx \frac{Q^{(\alpha,-a)}_{\mu}(x)Q^{(\alpha,-a)}_{\nu}(x)e^{-\alpha\cot^{-1}x}} {\sigma(x)^{(\mu+\nu)/2+a+2}}=0,\\a&>&-3/2,~\mu+\nu~\rm{even}, \label{orthdef} \end{eqnarray} while for $\mu+\nu$ odd there needs to be an extra $\sqrt{\sigma(x)}$ in the numerator for the orthogonality integrals to vanish. $\diamond$
Thus, the $Q_\nu^{(\alpha,-a)}(x)$ polynomials form two infinite subsets, each with general orthogonality, but polynomials from different subsets are not mutually orthogonal. While displaying infinite orthogonality, this property falls short of the general orthogonality of all classical polynomials. The $Q_\nu^{(\alpha,-a)}(x)$ polynomials form a partition of the set of all Romanovski polynomials, as shown in Eq.~(\ref{comp}), with upper index dependent on the running index $\nu,$ though. The Romanovski polynomials $R_\nu^{(\alpha,\beta)}(x)$ with upper indices independent of the degree $\nu,$
the running index, form another partition that has the finite orthogonality, as discussed in more detail in ref.~\cite{rwack}. The orthogonality of the $C_n^{(\alpha_n,-a)}(x)$ polynomials from the Schr\"odinger equation with $\alpha=\alpha_n$ as discussed below Eq.~(\ref{cs}), is yet another form of orthogonality similar to that of hydrogenic wave functions, which also differs from the mathematical orthogonality of associated Laguerre polynomials, the subject of Exercise~13.2.11 in ref.~\cite{aw}. The orthogonality integrals of Eq.~(\ref{orthdef}) suggest analyzing the following integral of the generating functions \begin{eqnarray}\nonumber I(y)&=&\int_{-\infty}^{\infty}\frac{dx}{\sigma(x)^{a+2}}\left( Q(x,\frac{y}{\sqrt{\sigma}};\alpha,-a)\right)^2 e^{-\alpha \cot^{-1}x}\\ \nonumber &=&\sum_{\nu_1,\nu_2=0}^{\infty}\frac{y^{\nu_1+\nu_2}}{\nu_1! \nu_2!}\int_{-\infty}^{\infty}dx\frac{Q^{(\alpha,-a)}_{\nu_1}(x) Q^{(\alpha,-a)}_{\nu_2}(x)e^{-\alpha\cot^{-1}x}} {\sigma(x)^{(\nu_1+\nu_2)/2+a+2}}\\&=&\sum_{\nu_1,\nu_2=0}^{\infty} \frac{y^{\nu_1+\nu_2}}{\nu_1!\nu_2!}O^{(a,\alpha)}_{\nu_1, \nu_2} \label{orthint} \end{eqnarray} which is written directly in terms of orthogonality integrals $O^{(\alpha,-a)}_{\nu_1, \nu_2}$ defined in Eq.~(\ref{orthdef}). On the other hand, we can express the integral as \begin{eqnarray}\nonumber I(y)&=&\int_{-\infty}^{\infty}\frac{dx e^{-\alpha\cot^{-1}x+2\alpha\cot^{-1}x -2\alpha\cot^{-1}(x+y\sqrt{\sigma})}}{\sigma(x)^{a+2}[1+y^2+\frac{2xy} {\sqrt{\sigma}}]^{2(a+1)}}\\\nonumber&=&\int_0^{\infty}\frac{dx}{\sigma^{a+2}} \frac{e^{\alpha\cot^{-1}x-2\alpha\cot^{-1}(x+y\sqrt{\sigma})}} {[1+y^2+\frac{2xy}{\sqrt{\sigma}}]^{2(a+1)}}\\&+&\int_0^{\infty}\frac{dx} {\sigma^{a+2}}\frac{e^{-\alpha\cot^{-1}x+2\alpha\cot^{-1}(x-y\sqrt{\sigma})}} {[1+y^2-\frac{2xy}{\sqrt{\sigma}}]^{2(a+1)}}, \end{eqnarray} which is manifestly not even in the variable $y.$ If the $Q^{(\alpha,-a)}_{\nu}$ polynomials were orthogonal, then the double sum in $I$ of Eq.~(\ref{orthint}) would collapse to a single sum over normalization integrals multiplied by even powers of $y,$ i.e. $I$ would be an even function of $y.$ This result shows that the $Q^{(\alpha,-a)}_{\nu}$ polynomials are not orthogonal in the conventional sense. In fact, the extra $\sqrt{\sigma}$ in the orthogonality integrals $O_{2\nu,2\mu+1}^{(a,\alpha)}$ is not built into the generating function. In other words, the fact that $I(y)\neq I(-y)$ indirectly confirms that the $Q^{(\alpha,-a)}_{\nu}$ polynomials have more complicated orthogonality properties than the Romanovski polynomials with parameters that are independent of the degree of the polynomial, as discussed in more detail in ref.~\cite{rwack}.
Let us next consider the special parameter $\alpha=0$ and analyze similarly the
integral \begin{eqnarray}\nonumber I_0(y)&=&\int_{-\infty}^{\infty}\frac{dx}{\sigma(x)^{a+2}} Q(x,\frac{y}{\sqrt{\sigma}};0,-a)=\sum_{\nu=0}^{\infty}\frac{y^{\nu}}{\nu!} \int_{-\infty}^{\infty}\frac{dx Q^{(0,-a)}_{\nu}(x)}{\sigma^{\nu/2+2+a}}\\ \nonumber&=&\sum_{\nu=0}^{\infty}\frac{y^{\nu}}{\nu!}O^{(a, 0)}_{\nu, 0} =\int_0^{\infty}\frac{dx}{\sigma^{a+2}}\frac{1}{[1+y^2+\frac{2xy} {\sqrt{\sigma}}]^{a+1}}+\int_0^{\infty}\frac{dx}{\sigma^{a+2}}\frac{1} {[1+y^2-\frac{2xy}{\sqrt{\sigma}}]^{a+1}},\\ \label{int0} \end{eqnarray}
which is an even function of $y.$ If the $Q^{(0,-a)}_{\nu}(x)$ are orthogonal to $Q^{(0,-a)}_{0}(x)=1$ then the sum in Eq.~(\ref{int0}) will collapse to its first term and $I_0$ is a constant. It is quite a surprise that this actually happens in the interval $-1\leq y\leq 1$ for all parameter values $a$ for which the integral $I_0$ converges. For example, $I_0(y)=r(a)\pi=$const. with a rational number $r(a)$ that depends on the exponent $a,$ where $r(0)=1/2, r(1)=3/4, r(2)=5/8, r(3)=35/64, r(4)=3^2\cdot7/2^7,$ if $a$ is a non-negative integer; in general $I_0(y)=\sqrt{\pi}\Gamma(a+3/2)/\Gamma(a+2).$ For $y>|1|, I_0(y)$ varies and deviates from the constant value. From the structure of the integral, this anomalous behavior of $I_0$ is rather unexpected. Since for $\alpha=0$ parity is conserved in the ODE~(\ref{qode}), the orthogonality integrals $O_{2\nu,0}^{(a,0)}$ are zero and $O_{2\nu,2\mu}^{(a,0)}=0$ more generally. Since it is shown in ref.~\cite{rwack} that $O_{2\nu,2\mu}^{(a,0)}$ for $\mu\neq \nu$ vanish, the $Q^{(0,-a)}_{\nu}(x)$ polynomials are orthogonal in the conventional sense. Since each $C_l^{(0,-a)}(x)$ is proportional to $Q^{(0,-a)}_{l}(x),$ the $C_l^{(0,-a)}(x)$ polynomials are orthogonal. This is confirmed by $I_0$ and its constancy in the interval $-1\leq y\leq 1$ is thus proved.
The restriction to parameter value $\alpha=0$ can be removed: \begin{eqnarray}\nonumber I_1(y)&=&\int_{-\infty}^{\infty}\frac{dx}{\sigma(x)^{a+2}} Q(x,\frac{y}{\sqrt{\sigma}};\alpha,-a)=\sum_{\nu=0}^{\infty}\frac{y^{\nu}} {\nu!}O^{(a, \alpha)}_{\nu, 0}\\\nonumber &=&\int_{-\infty}^{\infty}\frac{dx}{\sigma^{a+2}}\frac{e^{-\alpha\cot^{-1} (x+y\sqrt{\sigma})}}{[1+y^2+\frac{2xy}{\sqrt{\sigma}}]^{a+1}}\\ &=&\int_0^{\infty}\frac{dx}{\sigma^{a+2}}\frac{e^{-\alpha\cot^{-1}(x+y \sqrt{\sigma})}}{[1+y^2+\frac{2xy}{\sqrt{\sigma}}]^{a+1}}+\int_0^{\infty} \frac{dx}{\sigma^{a+2}}\frac{e^{-\alpha\cot^{-1}(x+y\sqrt{\sigma})}} {[1+y^2-\frac{2xy}{\sqrt{\sigma}}]^{a+1}}, \label{int1} \end{eqnarray} which is neither even in $y$ nor independent of $y.$ Therefore, if we wish to find the normalizations of the $Q_{\nu}^{(\alpha,-a)}$ polynomials we have to split up the generating function into its even and odd parts in $y$ and integrate them separately, each with the proper power of $\sigma(x)$ in the orthogonality integral.
\section{Relations with Gegenbauer Polynomials}
The relation of the Romanovski polynomials as complexified Jacobi polynomials on the unit circle in the complex plane is described in detail in ref.~\cite{rwack}. Therefore, we focus here on relations with Gegenbauer polynomials.
We start with the simplest case of parameter values $a=0=\alpha,$ which also happens to be relevant for physics~\cite{ck}, to derive from the generating function an expression for $Q_{\nu}^{(\alpha,-a)}(x)$ in terms of a finite sum of Gegenbauer polynomials. For $a=0=\alpha,$ Eq.~(\ref{3_8}) takes the explicit
form \begin{eqnarray} Q(x,y;0,0)=(1+x^2)[1+(x+y(1+x^2))^2]^{-1}=\frac{1}{1+2xy+y^2(1+x^2)}. \label{g00} \end{eqnarray}
{\it Theorem~4.1. The $Q_{m}^{(0, 0)}(x)$ polynomials have the expansion into Gegenbauer polynomials} \begin{eqnarray} Q_{m}^{(0, 0)}(x)=m!\sum_{n=0}^{[m/2]}(-1)^nx^{2n}C_{m-2n}^{(n+1)}(-x),\ m=0, 1,\ldots . \end{eqnarray}
{\bf Proof.} For $|xy|/|1+2xy+y^2|<1,$ the generating function identity~(\ref{g00}) may be expanded as an absolutely converging geometric series \begin{eqnarray} Q(x,y;0,0)=\sum_{n=0}^{\infty}\frac{(-x^2y^2)^n}{(1+2xy+y^2)^{n+1}}. \end{eqnarray} Substituting the generating function of Gegenbauer polynomials~\cite{aw} $C_l^{(n+1)}(x)$, \begin{eqnarray} (1-2xy+y^2)^{-(n+1)}=\sum_{l=0}^{\infty}C_l^{(n+1)}(x)y^l, \end{eqnarray} we obtain the expansion \begin{eqnarray}\nonumber Q(x,y;0,0)&=&\sum_{n=0}^{\infty}(-x^2y^2)^n \sum_{l=0}^{\infty}C_l^{(n+1)}(-x)y^l\\&=&\sum_{m=0}^{\infty}y^m \sum_{n=0}^{[m/2]}(-1)^nx^{2n}C_{m-2n}^{(n+1)}(-x), \end{eqnarray} where $m=l+2n$ was used upon interchanging the summations, with $[m/2]$ the integer part of $m/2$. On comparing with Eq.~(\ref{3_8}) defining the generating function $Q(y, x;\alpha,-a)$ we obtain the expansion of the $Q_{m}^{(0, 0)}(x)$ polynomials as a finite sum of Gegenbauer polynomials of Theorem~4.1. $\diamond$
Since $Q_{m}^{(0, 0)}(x)=K_{m}C_{m}^{(0, 0)}(x),$ this result is also valid for the $C_{m}^{(0, 0)}(x)$ polynomials. It can be generalized to parameter values $a\neq 0:$
{\it Theorem~4.2. The $Q_{N}^{(0,-a)}(x)$ polynomials have the Gegenbauer polynomial expansion} \begin{eqnarray} Q_{N}^{(0,-a)}(x)=N!\sum_{n=0}^{[N/2]}\left(-a-1\atop n\right)x^{2n} C^{(n+a+1)}_{N-2n}(-x). \end{eqnarray}
{\bf Proof.} This relation follows from expanding the generating function \begin{eqnarray}\nonumber Q(x,y;0,-a)&=&\left( \frac{\sigma(x)}{1+x^2+y^2\sigma^2(x)+2xy\sigma(x)} \right)^{a+1}=\frac{1}{[1+2xy+y^2+x^2y^2]^{a+1}}\\\nonumber &=&\sum_{n=0}^{\infty}\left(-a-1\atop n\right)\frac{(x^2y^2)^n} {[1+2xy+y^2]^{n+a+1}}\\\nonumber&=&\sum_{n, l=0}^{\infty}\left(-a-1\atop n \right)(x^2y^2)^nC^{(n+a+1)}_l(-x) y^l\\&=&\sum_{N=0}^{\infty} y^{N} \sum_{n=0}^{[N/2]}\left(-a-1\atop n\right)x^{2n}C^{(n+a+1)}_{N-2n}(-x) \end{eqnarray} in terms of the binomial series and then again using the generating functions of the Gegenbauer polynomials. $\diamond$
{\it Theorem~4.3. The $Q_{N}^{(\alpha,-a)}(x)$ polynomials have the general Gegenbauer polynomial expansion} \begin{eqnarray} \frac{1}{N!}Q_{N}^{(\alpha,-a)}(x)=\sum_{\nu=0}^N\left(N\atop \nu\right) Q^{(\alpha,1)}_{N-\nu}(x)\sum_{n=0}^{[N/2]}\left(-a-1\atop n\right)x^{2n} C^{(n+a+1)}_{N-2n}(-x). \end{eqnarray}
{\bf Proof.} Substituting the expansion of Theorem~4.2 into Eq.(\ref{simpl}) in which the Gegenbauer polynomials depend only on the parameter $a$ while the $Q^{(\alpha,1)}_{\nu}(x)$ depend only on $\alpha$ yields the desired expansion.
$\diamond$
The Gegenbauer polynomials are well-known generalizations of Legendre polynomials. The {\it hyperbolic Gegenbauer} ODE \begin{eqnarray} \sigma(x)y''-(2\lambda+1)xy'+\Lambda_l^{(\lambda)}y=0 \label{ggode} \end{eqnarray} becomes the ODE~(\ref{qode}) for $\alpha=0, \nu=l$ and $2\lambda+1=2(a+\nu),$ so the solutions of Eq.~(\ref{ggode}) are the $Q_{l}^{(0,-(\lambda-l+1/2))}(x)$
polynomials. In fact, for $\alpha=0$ we can directly solve the ODE~(\ref{qode}) for the $Q_{\nu}^{(0,-a)}(x)$ polynomial solutions in terms of finite power series.
{\it Theorem~4.4. The $Q_{N}^{(0,-a)}(x)$ polynomials have the explicit finite power series} \begin{eqnarray}\nonumber Q_{N}^{(0,-a)}(x)&=&\sum_{\mu=0}^{[N/2]}x^{N-2\mu}a_{\mu},\ a_{\mu}=-\frac{(N-2\mu+2)(N-2\mu+1)}{2\mu(2a+2\mu+1)}a_{\mu-1},\\\nonumber a_1&=&-\frac{N(N-1)}{2(2a+3)},\ a_{\mu}=(-1)^{\mu} \frac{N(N-1)\cdots (N-2\mu+1)}{2^{\mu}\mu!(2a+3)(2a+1)\cdots (2a+2\mu+1)}\\ \label{expl} \end{eqnarray} \begin{eqnarray} Q_{N}^{(\alpha,-a)}(x)=\sum_{\nu=0}^N\left(N\atop \nu\right) Q^{(\alpha,1)}_{N-\nu}(x)\sum_{\mu=0}^{[N/2]}x^{N-2\mu}a_{\mu} \end{eqnarray} with $a_\mu$ from Eq.~(\ref{expl}) and $[N/2]$ denoting the integer part of $N/2.$
{\bf Proof.} Since the proof by mathematical induction is straightforward, we just give the results. As the ODE is invariant under the parity transformation,
$x\to -x,$ we have even and odd solutions. Substituting Eq.~(\ref{expl}) in Eq.~(\ref{simpl}) yields the second relation stated in Theorem~4.4. $\diamond$
This is also valid for the $C_l^{(0,-a)}(x)$ polynomials up to the normalization $K_l.$
\section{Auxiliary polynomials}
Carrying out the innermost derivative of the Rodrigues formula~(\ref{rds}), we find \begin{eqnarray}\nonumber P_l(x)&=&\frac{\sigma^l}{w_0}\frac{d^{l-1}}{dx^{l-1}}\left(\frac{w_0}{\sigma} [\alpha-\sigma'(a+1)]\right)\\&=&\alpha Q^{(\alpha,-a-1)}_{l-1}(x)- (a+1)\frac{\sigma^l}{w_0}\frac{d^{l-1}}{dx^{l-1}}\left(\frac{\sigma' w_0} {\sigma}\right), \label{aux} \end{eqnarray} and are led to define {\it auxiliary polynomials:} \begin{eqnarray} S_{l+1}(x)=\frac{\sigma(x)^{l+1}}{w_0(x)}\frac{d^l}{dx^l} \left(\frac{\sigma'(x)w_0(x)}{\sigma(x)}\right).~\diamond \label{defx} \end{eqnarray}
{\bf Example.} \begin{eqnarray} S_1(x)=\sigma'(x),~S_2(x)=\sigma''\sigma(x)+\sigma'(x)[\alpha-\sigma'(x)(a+2)], \ldots .~\diamond \end{eqnarray} So \begin{eqnarray} S_l(x)=\frac{\alpha}{a+1}Q^{(\alpha,-a-1)}_{l-1}(x)-\frac{P_l(x)}{a+1}. \label{alt1} \end{eqnarray} Applying a derivative to $w_0S_l/\sigma^l$ yields \begin{eqnarray} \frac{d^l}{dx^l}\left(\frac{\sigma'(x)w_0(x)}{\sigma(x)}\right)=\frac{\alpha} {a+1}\frac{d}{dx}\left(\frac{w_0}{\sigma^l}Q^{(\alpha,-a-1)}_{l-1}(x)\right) -\frac{1}{a+1}\frac{d}{dx}\left(\frac{w_0 P_l(x)}{\sigma^l}\right). \end{eqnarray} Using the recursive ODEs for $Q^{(\alpha,-a-1)}_{l-1}$ and $P_l$ yields \begin{eqnarray}\nonumber \frac{\sigma(x)^{l+1}}{w_0(x)}\frac{d^l}{dx^l}\left(\frac{\sigma'(x)w_0(x)} {\sigma(x)}\right)&=&\frac{\alpha}{a+1}(Q^{(\alpha,-a-1)}_{l}(x)-\sigma' Q^{(\alpha,-a-1)}_{l-1}(x))\\&-&\frac{Q^{(\alpha,-a)}_{l+1}(x)}{a+1}. \label{alt2} \end{eqnarray} A comparison of Eqs.~(\ref{alt1},\ref{alt2}) yields \begin{eqnarray} P_{l+1}(x)=\alpha\sigma'(x)Q^{(\alpha,-a-1)}_{l-1}(x)+Q^{(\alpha,-a)}_{l+1}(x), \end{eqnarray} complementing the relation ${\cal P}_l(x;l)=K_lC_l^{(\alpha,-a)}(x) =Q^{(\alpha,-a)}_l(x)=P_l(x).$
For Laguerre polynomials $\sigma(x)=x$ and relation~(\ref{aux}) corresponds to \begin{eqnarray} S_{l+1}(x)=x^{l+1}e^x\frac{d^l}{dx^l}\left(\frac{e^{-x}}{x}\right) =l!L_l^{-l-1}(x), \end{eqnarray} while Eq.~(\ref{aux}) becomes \begin{eqnarray} l L_l(x)=l L_{l-1}(x)-xL^1_{l-1}(x). \end{eqnarray} For Jacobi polynomials $\sigma'(x)=-2x.$ As $-2x=1-x-(1+x),$ where $1\pm x$ can be incorporated into the weight functions $w(x)=(1-x)^a(1+x)^b,$ there is no need to introduce auxiliary polynomials. For example, Eq.~(\ref{aux}) becomes \begin{eqnarray} 2l P_l^{(a,b)}(x)=(a+l)(1+x)P_{l-1}^{(a,b+1)}(x)-(b+l)(1-x)P_{l-1}^{(a+1,b)}(x). \end{eqnarray}
\section{Discussion}
We have used a simple and natural method for constructing polynomials $Q_\nu^{(\alpha,-a)}(x)$ that are complementary to the $C_n^{(\alpha,-a)}(x)$ polynomials and related to them by a Rodrigues formula. Similar to the classical orthogonal polynomials, the $Q_\nu^{(\alpha,-a)}(x)$ appear as solutions of a Sturm-Liouville ordinary second-order differential equation and obey Rodrigues formulas themselves. On the other hand, and different from the classical polynomials, their infinite sets of orthogonality integrals are not the standard ones. These real orthogonal polynomials and their nontrivial orthogonality properties are closely related to Romanovski polynomials and to physical phenomena. In summary, all basic properties of Romanovski polynomials derive from the Rodrigues formula~(\ref{rds}) except for the orthogonality integrals.
\section{Acknowledgments}
It is a pleasure to thank M. Kirchbach for introducing me to the $C_n^{(\alpha,-a)}(x)$ polynomials. Thanks are also due to V. Celli for help with some of the orthogonality integrals.
\end{document} |
\begin{document}
\date{} \author{Thabet ABDELJAWAD\footnote{\c{C}ankaya University, Department of Mathematics, 06530, Ankara, Turkey} , Duran T\"{U}RKO\~{G}LU \footnote{ Department of Mathematics, Faculty of Science and Arts, Gazi University, 06500, Ankara-Turkey. dturkoglu@gazi.edu.tr.}}
\title{Locally Convex Valued Rectangular Metric Spaces and The Kannan's Fixed Point Theorem } \maketitle
\begin{abstract} Rectangular TVS-cone metric spaces are introduced and Kannan's fixed point theorem is proved in these spaces. Two approaches are followed for the proof. At first we prove the theorem by a direct method using the structure of the space itself. Secondly, we use the nonlinear scalarization used recently by Wei-Shih Du in [A note on cone metric fixed point theory and its equivalence, {Nonlinear Analysis},72(5),2259-2261 (2010).] to prove the equivalence of the Banach contraction principle in cone metric spaces and usual metric spaces. The proof is done without any normality assumption on the cone of the locally convex topological vector space, and hence generalizing several previously obtained results. \end{abstract}
\emph{Keywords}: TVS-cone metric space, rectangular TVS-cone metric space, Kannan's fixed point theorem.
\section{Introduction and Preliminaries } \label{s:1}
Many authors attempted to generalize the notion of the metric space.
In 2007, Huang and Zhang \cite{HZ} announced the notion of cone metric spaces (CMS) by using the same idea, namely, by replacing real numbers with an ordering real Banach space. In that paper, they also discussed some properties of convergence of sequences and proved the fixed point theorems of contractive mapping for cone metric spaces: Any mapping $T$ of a complete cone metric space $X$ into itself that satisfies, for some $0\leq k<1$, the inequality $d(Tx,Ty)\leq k d(x,y)$, for all $x,y \in X$, has a unique fixed point. Lately, many results on fixed point theorems have been extended to cone metric spaces (see e.g.\cite{HZ},\cite{RH},\cite{Ishak},\cite{TAA},\cite{TAA2},\cite{K},\cite{T},\cite{AK}, \cite{TA}). For Kannan's fixed point theorem in rectangular metric spaces (R-MS) we refer to \cite{Das} and for the contraction principle and Kannan's fixed point theorem in rectangular cone metric space (R-CMS) see \cite{Akbar} and \cite{Beg}, respectively.
Recently, Du \cite{D_2009} gave the definition of generalized cone metric space, namely topological vector space-cone metric space (TVS-CMS), and proved some fixed point theorems on that class. The author showed also that Banach contraction principles in usual metric spaces and in TVS-CMS are equivalent.
In this manuscript, we first introduce the notion of rectangular TVS-cone metric spaces (R-TVS-CMS) and then prove Kannan's fixed point theorem in this class of spaces. The obtained result generalizes those in \cite{Beg} and \cite{Das} and hence the classical Kannan's fixed point theorem. Two proofs are presented and the proofs are done without any normality assumption.
Throughout this paper, $(E,S)$ stands for real Hausdorff locally convex topological vector space (t.v.s.) with $S$ its generating system of seminorms. A non-empty subset $P$ of $E$ is called cone if $P+P \subset P$, $\lambda P \subset P$ for $\lambda \geq 0$ and $P \cap (-P) =\{0\}$. The cone $P$ will be assumed to be closed and has nonempty interior as well. For a given cone $P$, one can define a partial ordering (denoted by $\leq$ or $\leq_P$) with respect to $P$ by $x\leq y$ if and only if $y-x \in P$. The notation $x<y$ indicates that $x\leq y$ and $x\neq y$ while $x<<y$ will show $y-x\in intP$, where $intP$ denotes the interior of $P$. Continuity of the algebric operations in a topological vector space and the properties of the cone imply the relations: $$intP+intP\subseteq intP ~\emph{and}~\lambda intP \subseteq intP~(\lambda > 0).$$ We appeal to these relations in the following.
\begin{definition} \cite{Ali} A cone $P$ of a topological vector space $(X,\tau)$ is said to be normal whenever $\tau$ has a base of zero consisting of $P-$ full sets. Where a subset of $A$ of an order vector space via a cone $P$ is said to be $P-$full if for each $x, y \in A$ we have $\{a \in E: x\leq a \leq y\}\subset A$. \end{definition} \begin{theorem} \cite{Ali} (a) A cone $P$ of a topological vector space $(X,\tau)$ is normal if and only if whenever $\{x_\alpha\}$ and $\{y_\alpha\}$, $\alpha \in \Delta$ are two nets in $X$ with $0\leq x_\alpha \leq y_\alpha$ for each $\alpha \in \Delta$ and $y_\alpha \rightarrow 0$, then $x_\alpha \rightarrow 0$.
(b) The cone of an ordered locally convex space $(X,\tau)$ is normal if and only if $\tau$ is generated by a family of monotone $\tau-$ continuous seminorms. Where a seminorm $q$ on $X$ is called monotone if $q(x)\leq q(y)$ for all $x, y \in X$ with $0\leq x \leq y$. \end{theorem}
In particular, if $P$ is a cone of a real Banach space $E$, then it is called \textit{normal} if there is a number $K \geq 1$ such that for all $x,y \in E$:\ $
0\leq x \leq y\Rightarrow \|x\|\leq K \|y\|.$ The least positive integer $K$, satisfying this inequality, is called the normal constant of $P$. Also, $P$ is said to be \textit{regular} if every increasing sequence which is bounded from above is convergent. That is, if $\{x_n\}_{n\geq 1}$ is a sequence such that $x_1 \leq x_2\leq \cdots\leq y$ for some $y \in E$, then there is $x \in E$
such that $\lim_{n\rightarrow\infty} \|x_n-x\|=0$. For more details about cones in locally convex topological vector spaces we may refer the reader to \cite{Ali}.
\
\begin{definition} (See \cite{CHY}, \cite{D_2008}, \cite{D_2009}) For $e \in intP$, the nonlinear scalarization function $\xi_e:E\rightarrow \mathbb R$ is defined by \[\xi_e(y)=\inf\{t \in \mathbb R: y \in te-P\}, \ \mbox{for all} \ y \in E.\] \end{definition} \begin{lemma} (See \cite{CHY}, \cite{D_2008}, \cite{D_2009}) For each $t\in \mathbb R$ and $y \in E$, the following are satisfied: \begin{itemize} \item[$(i)$] $\xi_e(y)\leq t\Leftrightarrow y \in te-P$, \item[$(ii)$] $\xi_e(y)> t\Leftrightarrow y \notin te-P$, \item[$(iii)$] $\xi_e(y)\geq t\Leftrightarrow y \notin te-intP$, \item[$(iv)$] $\xi_e(y)< t\Leftrightarrow y \in te-intP$, \item[$(v)$] $\xi_e(y)$ is positively homogeneous and continuous on $E$, \item[$(vi)$] if $y_1\in y_2+P$, then $\xi_e(y_2)\leq \xi_e(y_1)$, \item[$(vii)$] $\xi_e(y_1+y_2)\leq \xi_e(y_1)+\xi_e(y_2)$, for all $y_1,y_2 \in E$. \end{itemize} \label{lemma_scalarization} \end{lemma}
\begin{definition} Let $X$ be a non-empty set and $E$ as usual a Hausdorff locally convex topological space. Suppose a vector-valued function $p:X\times X\rightarrow E$ satisfies: \begin{enumerate} \item[$(M1)$] $0\leq p(x,y)$ for all $x,y \in X$, \item[$(M2)$] $p(x,y)=0$ if and only if $x=y$, \item[$(M3)$] $p(x,y)=p(y,x)$ for all $x,y \in X$ \item[$(M4)$] $p(x,y) \leq p(x,z)+p(z,y)$, for all $x,y,z \in X$. \end{enumerate} Then, $p$ is called TVS-cone metric on $X$, and the pair $(X,p)$ is called a TVS-cone metric space (in short, TVS-CMS). \end{definition}
Note that in \cite{HZ}, the authors considered $E$ as a real Banach space in the definition of TVS-CMS. Thus, a cone metric space (in short, CMS) in the sense of Huang and Zhang \cite{HZ} is a special case of TVS-CMS.
\begin{lemma} (See \cite{D_2009}) Let $(X,p)$ be a TVS-CMS. Then, $d_p:X \times X\rightarrow [0,\infty)$ defined by $d_p=\xi_e\circ p$ is a metric. \label{lemma_usual_metric} \end{lemma}
\begin{remark} Since a cone metric space $(X,p)$ in the sense of Huang and Zhang \cite{HZ}, is a special case of TVS-CMS, then $d_p:X \times X\rightarrow [0,\infty)$ defined by $d_p=\xi_e\circ p$ is also a metric. \label{remark_CMS_usual_ms} \end{remark}
\begin{definition}(See \cite{D_2009}) Let $(X,p)$ be a TVS-CMS, $x\in X$ and $\{x_n\}_{n=1}^{\infty}$ a sequence in $X$. \label{definition_convergence} \begin{itemize} \item[($i$)] $\{x_n\}_{n=1}^{\infty}$ TVS-cone converges to $x\in X$ whenever for every $0<<c\in E$, there is a natural number $M$ such that $p(x_n,x)<<c$ for all $n\geq M$ and denoted by $cone-\lim_{n\rightarrow \infty}x_n=x$ (or $x_n\stackrel{cone}{\rightarrow} x$ as $n\rightarrow \infty$), \item[($ii$)] $\{x_n\}_{n=1}^{\infty}$ TVS-cone Cauchy sequence in $(X,p)$ whenever for every $0<<c\in E$, there is a natural number $M$ such that $p(x_n,x_m)<<c$ for all $n,m \geq M$, \item[($iii$)] $(X,p)$ is TVS-cone complete if every sequence TVS-cone Cauchy sequence in $X$ is a TVS-cone convergent. \end{itemize} \end{definition}
\begin{lemma} (See \cite{D_2009}) Let $(X,p)$ be a TVS-CMS, $x\in X$ and $\{x_n\}_{n=1}^{\infty}$ a sequence in $X$. Set $d_p=\xi_e \circ p$. Then the following statements hold: \begin{itemize} \item[($i$)] If $\{x_n\}_{n=1}^{\infty}$ converges to $x$ in TVS-CMS $(X,p)$, then $d_p(x_n,x)\rightarrow 0$ as $n\rightarrow \infty,$ \item[($ii$)] If $\{x_n\}_{n=1}^{\infty}$ is a Cauchy sequence in TVS-CMS $(X,p)$, then $\{x_n\}_{n=1}^{\infty}$ is a Cauchy sequence (in usual sense) in $(X,d_p)$, \item[($iii$)] If $(X,p)$ is a complete TVS-CMS, then $(X,d_p)$ is a complete metric space. \end{itemize} \label{lemma_eq_statements} \end{lemma}
\begin{proposition}(See \cite{D_2009}) Let $(X,p)$ be a complete TVS-CMS and $T:X\rightarrow X$ satisfy the contractive condition \begin{equation} p(Tx,Ty)\leq k p(x,y) \label{contraction} \end{equation} for all $x,y \in X$ and $0 \leq k <1$. Then, $T$ has a unique fixed point in $X$. Moreover, for each $x\in X$, the iterative sequence $\{T^nx\}_{n=1}^{\infty}$ converges to fixed point. \label{Du_thm22} \end{proposition}
\begin{definition} \label{defn of rec TVS-cone} Let $X$ be a nonempty set. A vector-valued function $p:X \times X \rightarrow E$ is said to be a rectangular $TVS-$ cone metric, if the following conditions hold: \begin{itemize} \item[($RC1$)] $0 \leq p(x,y)$ for all $x,y \in X$ and $p(x,y)=0$ if and only if $x=y$, \item[($RC2$)]$p(x,y)=p(y,x)$ for all $x,y \in X$ , \item[($RC3$)] $p(x,z)\leq p(x,y)+p(y,w)+p(w,z)$ for all $x,y \in X$ and for all distinct points $z,w \in X$ each of them different from $x$ and $y$. \end{itemize} The pair $(X,p)$ is then called a rectangular TVS-cone metric space (R-TVS-CMS). When $E$ is Banach space $(X,p)$ is called rectangular cone metric space (R-CMS). When $E=\mathbb{R}$ and $P=[0,\infty)$, $(X,p)$ is called rectangular metric space (R-MS). \end{definition}
Every TVS-CMS is R-TVS-CMS. However, the converse need not be true. \begin{example} \label{not} ( \cite{Akbar}, see also \cite{Branciari}) Let $X=\{1,2,3,4\}$, $E=\mathbb{R}^2$ and $P=\{(x,y):x, y \geq 0\}$. Define $d:X \times X\rightarrow E$ as follows: $$d(1,2)=d(2,1)=(3,6),~~d(2,3)=d(3,2)=d(1,3)=d(3,1)=(1,2),~~$$ $$d(1,4)=d(4,1)=d(2,4)=d(4,2)=d(3,4)=d(4,3)=(2,4).$$ Then $(X,d)$ is a R-CMS which is not a CMS, because $$(3,6)=d(1,2)>d(1,3)+d(3,2)=(1,2)+(1,2)=(2,4).$$ \end{example}
\begin{definition} \label{conver} Let $(X,p)$ be a rectangular TVS-cone metric space, $x \in X$ and $\{x_n\}$ a sequence in $X$.
(i) $\{x_n\}$ is said to be a Cauchy sequence if for any $0\ll c$ there exists $n_0\in \mathbb{N}$ such that for all $m,n\in \mathbb{N}$, $n\geqslant n_0$, one has $p(x_n,x_{n+m})\ll c$.
(ii)$\{x_n\}$ is said to converge to $x$ if for any $0\ll c$ there exists $n_0\in \mathbb{N}$ such that for all $n\geqslant n_0$, one has $p(x_n,x)\ll c$.
(iii) $(X,p)$ is called \textbf{complete} if every Cauchy sequence in $X$ is convergent in $X$.\\ \end{definition}
Let $T:X\rightarrow X$ be a mapping where $X$ is a R-TVS-CMS. For each $x\in X$, let \begin{displaymath} \textbf{O}(x)=\{x,Tx,T^2x,T^3x,\dotso\}. \end{displaymath}
\begin{definition} A cone metric space $X$ is said to be $T$-orbitally complete if every Cauchy sequence which is contained in $\textbf{O}(x)$ for some $x\in X$ converges in $X$. \end{definition}
\section{Kannan's Fixed Point Theorem in R-TVS-CMS} In order to realize the difference between TVS-CMS and R-TVS-CMS, we first prove Kannan's fixed point theorem in TVS-CMS.
\begin{theorem} \label{SA} Let $(X,d)$ be a TVS-CMS and the mapping $T:X\rightarrow X$ satisfy the contractive condition \begin{equation} \label{K} d(Tx,Ty)\leqslant \beta [d(x,Tx)+d(y,Ty)] \end{equation} holds for all $x,y\in X$ where $\displaystyle 0<\beta<\frac{1}{2}$. If $X$ is $T$-orbitally complete then $T$ has a unique fixed point in $X$. \end{theorem}
\textbf{Proof} Let $x\in X$. \begin{displaymath} \begin{array}{r c l} d(Tx,T^2x)&\leqslant& \beta [d(x,Tx)+d(Tx,T^2x)]\\[3mm] i.e.,~~d(Tx,T^2x)&\leqslant& \frac{\beta}{1-\beta}d(x,Tx) \end{array} \end{displaymath} Again, \begin{displaymath} \begin{array}{r c l} d(T^2x,T^3x)&\leqslant& \beta [d(Tx,T^2x)+d(T^2x,T^3x)]\\[3mm] i.e.,~~d(T^2x,T^3x)&\leqslant& \frac{\beta}{1-\beta}d(Tx,T^2x)\leqslant {\left(\frac{\beta}{1-\beta}\right)}^2d(x,Tx) \end{array} \end{displaymath} Similarly, \begin{displaymath} d(T^3x,T^4x)\leqslant {\left(\frac{\beta}{1-\beta}\right)}^3d(x,Tx) \end{displaymath} Thus in general, if $n$ is a positive integer, then \begin{equation} d(T^nx,T^{n+1}x)\leqslant r^nd(x,Tx) \end{equation} where $\displaystyle r=\frac{\beta}{1-\beta}$. Since $\displaystyle 0<\beta< \frac{1}{2}$, clearly $0<r<1$.\\
Now, our aim is to show that $\{T^nx\}$ is a Cauchy sequence. Assume $m\in \mathbb{N}$ and $n>m$, then we have \begin{displaymath} \begin{array}{r c l} d(T^nx,T^mx)&\leqslant& d(T^nx,T^{n-1}x)+d(T^{n-1}x,T^{n-2}x)+\dotso+d(T^{m+1}x,T^mx)\\[3mm] &\leqslant& (r^{n-1}+r^{n-2}+\dotso+r^m)d(x,Tx)\\[3mm] &\leqslant& \frac{r^m}{1-r}d(x,Tx) \end{array} \end{displaymath}
Let $0\ll c$ be given. Find $\delta >0$ and $q \in S$ such that $q(b) < \delta$ implies $b\ll c$.\\ Now, since \begin{displaymath} \frac{r^m}{1-r}d(x,Tx) \to 0 \hspace{2 mm} as \hspace{2 mm} m\to \infty \end{displaymath} then find $n_0$ such that : \begin{displaymath}
q( {\frac{r^m}{1-r}d(x,Tx)}) < \delta \hspace{3 mm} \forall m\geqslant n_0 \end{displaymath} Hence, $\displaystyle \frac{r^m}{1-r}d(x,Tx)\ll c$, $\forall m\geqslant n_0$.\\
Thus, $d(T^nx,T^mx)\ll c$ for $n>m \geq n_0$. Therefore, $\{T^nx\}$ is a Cauchy sequence in $(X,d)$. Since $(X,d)$ is $T$-orbitally complete, there exists $x^*\in X$ such that $T^nx \to x^*$.\\ Choose a natural number $n_1$ such that $d(T^{n-1}x,T^nx)\ll \frac{c}{2}$ and $\displaystyle d(T^nx,x^*)\ll \frac{c}{2}$, for all $n\geqslant n_1$. Hence, for $n>n_1$ we have \begin{displaymath} \begin{array}{r c l} d(Tx^*,x^*)&\leqslant& d(TT^{n-1}x,Tx^*)+d(T^nx,x^*)\\[3mm] &\leqslant& \beta[d(T^{n-1}x,T^nx)+d(x^*,Tx^*)]+d(T^nx,x^*)\\[3mm] &=&\beta d(T^{n-1}x,T^nx)+\beta d(x^*,Tx^*)+d(T^nx,x^*)\\[3mm] &\leqslant& \frac{c}{2}+ \beta d(x^*,Tx^*)+\frac{c}{2} \end{array} \end{displaymath}
So, \begin{displaymath} (1-\beta)d(Tx^*,x^*)\ll c \end{displaymath} Hence, \begin{displaymath} (1-\beta)d(Tx^*,x^*)\ll \frac{c}{m}\hspace{3 mm}\forall m\geqslant1 \end{displaymath}
Hence, $\displaystyle \frac{c}{m}-(1-\beta)d(Tx^*,x^*)\in P$, for all $m\geqslant1$. Since $\displaystyle \frac{c}{m}\to 0$ as $m\to \infty$ and $P$ is closed; \begin{displaymath} -(1-\beta)d(Tx^*,x^*)\in P\hspace{3 mm}and \hspace{3 mm}(1-\beta)d(Tx^*,x^*)\in P \end{displaymath} from the cone properties, $(1-\beta)d(Tx^*,x^*)=0$. Since $(1-\beta)$ never be equal to zero, then $d(Tx^*,x^*)=0$. Thus $Tx^*=x^*$.\\
Now, if $y^*$ is another fixed point of $T$ then $Tx^*=x^*$ and $Ty^*=y^*$. Then, we have \begin{displaymath} 0\leqslant d(x^*,y^*)=d(Tx^*,Ty^*)\leqslant \beta d(x^*,Tx^*)+\beta d(y^*,Ty^*)=0 \end{displaymath} Hence, $d(x^*,y^*)=0$ and so $x^*=y^*$. Therefore, the fixed point of $T$ is unique.\\
Now, we prove Kannan's fixed point theorem in R-TVS-CMS.
\begin{theorem} \label{AS} Let $T:X\rightarrow X$ be a mapping where $(X,d)$ is a $T$-orbitally complete R-TVS-CMS such that \begin{equation} \label{Ka} d(Tx,Ty)\leqslant \beta [d(x,Tx)+d(y,Ty)] \end{equation} holds for all $x,y\in X$ and $\displaystyle 0<\beta< \frac{1}{2}$. Then, $T$ has a unique fixed point in $X$. \end{theorem} \begin{proof}
As in the proof of Theorem \ref{SA}, for a fixed $x\in X$, we have for all $n \in \mathbb{N}$
\begin{equation} d(T^nx,T^{n+1}x)\leqslant r^nd(x,Tx) \label{d} \end{equation} where $\displaystyle r=\frac{\beta}{1-\beta}$. Since $\displaystyle 0<\beta< \frac{1}{2}$, clearly $0<r<1$.\\ Since we are not able to use the triangle inequality, we divide the proof into two cases so that we can make use of the rectangle inequality.
\textbf{Case I}: First assume that $T^mx\neq T^nx$ for $m,n\in N,m\neq n$. Then, for $n \in N$. Clearly, \begin{displaymath} \begin{array}{r c l} d(T^nx,T^{n+1}x)&\leqslant& r^nd(x,Tx)<\left(\frac{r^n}{1-r}\right)d(x,Tx)\\[3mm] \texttt{and}~~ d(T^nx,T^{n+2}x)&\leqslant& \beta [d(T^{n-1}x,T^nx)+d(T^{n+1}x,T^{n+2}x)]\\[3mm] &\leqslant&\beta\left[{\left(\frac{\beta}{1-\beta}\right)}^{n-1}d(x,Tx)+{\left(\frac{\beta}{1-\beta}\right)}^{n+1}d(x,Tx)\right] \textbf{\hspace{1cm}(by\hspace{2mm}\ref{d})}\\[4mm] &\leqslant& {\left(\frac{\beta}{1-\beta}\right)}^nd(x,Tx)+{\left(\frac{\beta}{1-\beta}\right)}^{n+1}d(x,Tx)\\[4mm] &\leqslant& \left(\frac{r^n}{1-r}\right)d(x,Tx) \end{array} \end{displaymath} since $\displaystyle 0<\beta< \frac{1}{2}$, $\displaystyle \beta \leqslant \frac{\beta}{1-\beta}$.\\
Now if $m>2$ is odd then writing $m=2k+1$, $k\geqslant 1$ and using the fact that $T^px\neq T^rx$ for $p,r\in N$, $p\neq r$ we can easily show that by the rectangular inequality \begin{equation} \begin{array}{r c l} d(T^nx,T^{n+m}x)&\leqslant& d(T^nx,T^{n+1}x)+d(T^{n+1}x,T^{n+2}x)+\dotso+d(T^{n+2k}x,T^{n+2k+1}x)\\[3mm] &\leqslant& r^nd(x,Tx)+r^{n+1}d(x,Tx)+\dotso+r^{n+2k}d(x,Tx) \textbf{\hspace{1cm}(by\hspace{2mm}\ref{d})}\\[3mm] &\leqslant& \left(\frac{r^n}{1-r}\right)d(x,Tx) \label{e} \end{array} \end{equation} Again if $m>2$ is even then writing $m=2k$, $k\geqslant2$ and using the same arguments as before we can get by the rectangular inequality \begin{displaymath} \begin{array}{r c l} d(T^nx,T^{n+m}x)&\leqslant& d(T^nx,T^{n+2}x)+d(T^{n+2}x,T^{n+3}x)+\dotso+d(T^{n+2k-1}x,T^{n+2k}x)\\[3mm] &\leqslant&[r^n+r^{n+1}+r^{n+3}+\dotso+r^{n+2k-1}]d(x,Tx) \hspace{1cm}\textbf{(by\hspace{2mm}\ref{d},\hspace{2mm}\ref{e})} \end{array} \end{displaymath} Thus combining all the cases we have \begin{equation} d(T^nx,T^{n+m}x)\leqslant \left(\frac{r^n}{1-r}\right)d(x,Tx)\label{f} \end{equation} for all $m,n\in N$. Since $0<r<1$, $r^n \to 0$ as $n\to\infty $ and so by following a similar argument as in the proof of Theorem \ref{SA}, $\{T^nx\}$ is a Cauchy sequence. Since $X$ is $T$-orbitally complete, $\{T^nx\}$ is convergent. Let $u$ is defined as: \begin{equation} u=\lim_{n \to \infty}{T^nx} \end{equation} We shall now show that $Tu=u$. Without any loss of generality we assume that $T^nx\neq u$ and $T^nx\neq Tu$ for any $n\in N$. Then by (\ref{Ka}) and the rectangular inequality, we obtain \begin{displaymath} \begin{array}{r c l} d(u,Tu)&\leqslant& d(u,T^nx)+d(T^nx,T^{n+1}x)+d(T^{n+1}x,Tu)\\[3mm] &\leqslant& d(u,T^nx)+d(T^nx,T^{n+1}x)+\beta [d(T^nx,T^{n+1}x)+d(u,Tu)]\\[3mm] i.e., ~~d(u,Tu)&\leqslant& \frac{1}{1-\beta}[d(u,T^nx)+(1+\beta)d(T^nx,T^{n+1}x)] \end{array} \end{displaymath}
Since $T^nx\rightarrow u$ and $\{T^nx\}$ is Cauchy then we obtain $0\leq d(u,Tu)\ll c$ for all $c\gg 0$. Then closeness of the cone $P$ implies that $u=Tu$.\\
\textbf{Case II}: Let $T^mx=T^nx$ for some $m,n\in N$, $m\neq n$. Let $m>n$. Then $T^{m-n}(T^nx)=T^nx$ i.e.,$T^ky=y$ where $k=m-n,y=T^nx$. Now if $k>1$ \begin{displaymath} d(y,Ty)=d(T^ky,T^{k+1}y)\leqslant {\left(\frac{\beta}{1-\beta}\right)}^kd(y,Ty) \hspace{1cm}\textbf{(by\hspace{2 mm}\ref{d})} \end{displaymath} Since $\displaystyle 0<\frac{\beta}{1-\beta}<1$, $d(y,Ty)=0$ i.e., $Ty=y$. That the fixed point of $T$ is unique easily follows from (\ref{Ka}). This completes the proof of the theorem. \end{proof} Theorem \ref{AS} above generalizes the results obtained in \cite{Beg}, where Kannan's fixed point theorem was proved in CMS and under the normality assumption. However, the proofs in this article are done without any normality type assumption.
\section{ The nonlinear scalarization and Kannan's fixed point theorem} In this section, we use the nonlinear scalarization function to obtain a simpler shorter proof for the Kannan's fixed point theorem in R-TVS-CMS. \begin{theorem} \label{rectangular} Let $(X,p)$ be a rectangular TVS-cone metric space. Then $(X,d_p)$, where $d_p:=\xi_e \circ p$, is a rectangular metric space (R-MS). \end{theorem}
\begin{proof} By $RC1$, the definition of $\xi_e$ and that $P\cap -P=\{0\}$ we have $d_p(x,y)\geq 0$ for all $x, y \in X$. By $RC2$, $d_p(x,y)=d_p(y,x)$ for all $x, y \in X$. If $x=y$, then by $RC1$ $d_p(x,y)=\xi_e(0)=0$. Conversely, if $d_p(x,y)=0$, then by Lemma \ref{lemma_scalarization}, $RC1$ and that $P\cap -P=\{0\}$, we conclude that $p(x,y)=0$ and hence by $RC2$, $x=y$. Finally the rectangular inequality follows by Lemma \ref{lemma_scalarization} $(vi)$, $(vii)$ and $RC3$. \end{proof}
\begin{lemma} \label{TH} Let $(X,p)$ be a R-TVS-CMS, $x\in X$ and $\{x_n\}_{n=1}^{\infty}$ a sequence in $X$. Set $d_p=\xi_e \circ p$. Then the following statements hold: \begin{itemize} \item[($i$)] $\{x_n\}_{n=1}^{\infty}$ converges to $x$ in the R-TVS-CMS $(X,p)$ if and only if $d_p(x_n,x)\rightarrow 0$ as $n\rightarrow \infty,$ \item[($ii$)] $\{x_n\}_{n=1}^{\infty}$ is Cauchy sequence in the R-TVS-CMS $(X,p)$ if and only if $\{x_n\}_{n=1}^{\infty}$ is a Cauchy sequence in the rectangular metric space $(X,d_p)$, \item[($iii$)] $(X,p)$ is a complete R-TVS-CMS if and only if $(X,d_p)$ is a complete rectangular metric space. \end{itemize} \end{lemma}
\begin{proof}
Applying Theorem \ref{rectangular}, $d_p$ is a rectangular metric on $X$. Regarding (i) First, assume $\{x_n\}$ converges to $x$ in the R-TVS-CMS $(X,p)$ and let $\epsilon > 0$ be given. Find $n_0$ such that $p(x_n,x)\ll \epsilon e$ for all $n>n_0$. Therefore, by Lemma \ref{lemma_scalarization} (iv), $d_p(x_n,x)=\xi_e \circ p(x_n,x)<\epsilon $, for all $n>n_0$. Conversely, we prove that if $x_n\rightarrow x$ in $(X,d_p)$ then $x_n\rightarrow x$ in the R-TVS-CMS $(X,p)$. To this end, let $c>>0$ be given, then find $q \in S$ and $\delta >0$ such that $q(b)<\delta$ implies that $b<< c$. Since $\frac{e}{n}\rightarrow 0$ in $(E,S)$ find $\epsilon = \frac{1}{n_0}$ such that $\epsilon q(e)=q(\epsilon e)<\delta $ and hence $\epsilon e << c$. Now, find $n_0$ such that $d_p (x_n,x)=\xi_e\circ p (x_n,x)< \epsilon$ for all $n\geq n_0$. Hence, by Lemma \ref{lemma_scalarization} (iv) $p (x_n,x)<< \epsilon e<< c$ for all $n\geq n_0$. The proof of (ii) is similar to the proof of (i). Finally, (iii) is immediate from (i) and (ii). \end{proof} Now the proof of Theorem \ref{SA} can be achieved by Lemma \ref{TH}, Theorem \ref{rectangular} and by Kannan's fixed point theorem (see \cite{Das}) applied to the R-MS $(X,d_p)$.
\end{document} |
\begin{document}
\begin{abstract} We describe new irreducible components of the moduli space of rank $2$ semistable torsion free sheaves on the three-dimensional projective space whose generic point corresponds to non-locally free sheaves whose singular locus is either 0-dimensional or consists of a line plus disjoint points. In particular, we prove that the moduli spaces of semistable sheaves with Chern classes $(c_1,c_2,c_3)=(-1,2n,0)$ and $(c_1,c_2,c_3)=(0,n,0)$ always contain at least one rational irreducible component. As an application, we prove that the number of such components grows as the second Chern class grows, and compute the exact number of irreducible components of the moduli spaces of rank 2 semistable torsion free sheaves with Chern classes $(c_1,c_2,c_3)=(-1,2,m)$ for all possible values for $m$; all components turn out to be rational. Furthermore, we also prove that these moduli spaces are connected, showing that some of sheaves here considered are smoothable. \end{abstract}
\maketitle \tableofcontents
\section{Introduction}
Following the proof of existence of a projective moduli scheme parametrizing S-equivalence classes of semistable sheaves on a projective variety by Maruyama \cite{Maruyama}, the study of the geometry of such moduli spaces has been a central topic of research within algebraic geometry. Although a lot is known for curves and surfaces, general results for three dimensional varieties are still lacking. In fact, moduli spaces of sheaves on 3-folds turn out to be quite complicated spaces (as it is illustrated by Vakil's Murphy's law \cite{V}), particularly with several irreducible components of various dimensions.
The goal of this paper is to advance on the study of the moduli space of semistable rank 2 sheaves on $\mathbb{P}^3$ with fixed Chern classes $(c_1,c_2,c_3)$, which we will denote by ${\mathcal M}(c_1,c_2,c_3)$. Additionally, we will also consider the open subset consisting of stable reflexive sheaves, denoted by ${\mathcal R}(c_1,c_2,c_3)$; when $c_3=0$, this is actually the moduli space of stable locally free sheaves, and this will be denoted by ${\mathcal B}(c_1,c_2)$. Questions on the geometry of such spaces, such as connectedness, or the number of irreducible components, seem to be less explored if compared to the study of the geometry of the Hilbert schemes of curves in the projective $3$-space for instance; some results for Hilbert schemes of curves can be found in \cite{H1966,KO2015, K2017,N1997,NS2001}.
A rich literature on these moduli spaces was produced, especially in the 1980's and 1990's, studying ${\mathcal R}(c_1,c_2,c_3)$ and ${\mathcal B}(c_1,c_2)$ for specific values of the Chern classes. For instance, the geometry of ${\mathcal B}(0,c_2)$ and ${\mathcal B}(-1,c_2)$ is completely understood for $c_2$ up to $5$, see \cite{Barth1,H1978,C1983,ES1981, AJTT2017} for $c_1=0$, and \cite{Hart2,Banica} for $c_1=-1$. In addition, Ein characterized a infinite series of irreducible components of ${\mathcal B}(c_1,c_2)$ and proved that the number of irreducible components of $\mathcal{B}(c_1,c_2 )$ goes to infinity as the $c_2$ goes to infinity \cite{Ein}.
Regarding reflexive sheaves, ${\mathcal R}(c_1,c_2,c_3)$ is known for $c_2\le3$ and all possible values for $c_3$, see \cite{Chang} and the references therein. Some extremal values are also known, namely, ${\mathcal R}(-1, c_2, c_2^2)$ was studied by Hartshorne in \cite{Harshorne-Reflexive}, Chang described ${\mathcal R}(0, c_2, c_2^2 -2c_2 +4)$ in \cite{Chang2}, while Mir\'o-Roig studied ${\mathcal R}(-1;c_2;c_2^2-2c_2 +4)$ in \cite{Maria-Miro2}, and the moduli spaces ${\mathcal R}(-1, c_2, c_2^2 - 2rc_2 +2r(r+1))$ for $1 \leq r \leq (-1 + \sqrt{4c_2 -7})\slash 2$, and $c_2$ greater than $5$, and ${\mathcal R}(-1, c_2, c_2^2 -2(r-1)c_2 )$ for $c_2$ greater than $8$ in \cite{Maria-Miro}.
Even less is known for torsion free sheaves. Okonek and Spindler proved in \cite{OS1985} that ${\mathcal M}(0,c_2,c_2^2-c_2+2)$ and ${\mathcal M}(-1,c_2,c_2^2)$ are irreducible for $c_2 \geq 6$. For small values of $c_2$, Mir\'o-Roig and Trautmann proved in \cite{Miro-Trautmann} that ${\mathcal M}(0,2,4)$ is irreducible, while Le Potier showed in \cite[Chapter 7]{LeP1993} that ${\mathcal M}(0,2,0)$ has exactly $3$ irreducible rational components; more recently, it was shown in \cite{JMT} that ${\mathcal M}(0,2,0)$ is connected. Trautmann has also argued that ${\mathcal M}(0,2,2)$ has exactly $2$ irreducible components \cite{Tr}. More recently, Schmidt proved in \cite{S2018}, that ${\mathcal M}(0,c_2,c_2^2-c_2+2)$ and ${\mathcal M}(-1,c_2,c_2^2)$ are irreducible for any $c_2 \geq 0$, using methods different from the ones employed by Okonek and Spindler and by Mir\'o-Roig and Trautmann.
${\mathcal M}(0,c_2,0)$ for $c_2\ge2$ was studied in \cite{JMT}, where new infinite series of irreducible components are described. The starting point is the identification of three different types of torsion free sheaves; more precisely, let $E$ be a torsion free sheaf on $\mathbb{P}^3$, and set $Q_E := E^{\vee \vee} \slash E$, which we assume to be nontrivial; we have the following fundamental sequence \begin{equation}\label{fundamental} 0 \to E \to E^{\vee \vee} \to Q_E \to 0 \end{equation} and say that $E$ has \begin{itemize} \item \textit{0-dimensional singularities} if $\dim Q_E =0$; \item \textit{1-dimensional singularities} if $Q_E$ has pure dimension 1; \item \textit{mixed singularities} if $\dim Q_E =1$, but $Q_E$ is not pure. \end{itemize}
With this definition in mind, a systematic way of producing examples of irreducible components of ${\mathcal M}(0,c_2,0)$ whose generic point corresponds to a torsion free sheaf with $0$-dimensional and $1$-dimensional singularities is given in \cite{JMT}. Furthermore, the third author and Ivanov \cite{IT} constructed irreducible components of ${\mathcal M}(0,3,0)$ whose generic point corresponds to a torsion free sheaf with mixed singularities. Additionally, in a recent paper \cite{I2019}, Ivanov proved that ${\mathcal M}(0,3,0)$ has at least 11 irreducible components.
Our first goal in this paper is to generalize the results presented in \cite{IT,JMT}, and show how to produce irreducible components of ${\mathcal M}(c_1,c_2,c_3)$, for values of $c_1$, $c_2$ and $c_3$ also including cases with $c_1=-1$ and $c_3 \neq 0$, for sheaves with $0$-dimensional, $1$-dimensional, and mixed singularities. More precisely, we prove the following two statements.
\begin{MainTeo}\label{main1} For each $e \in \{-1,0\}$, let $n$ and $m$ be positive integers such that $en \equiv m (\mathrm{mod~2})$. Let $\mathcal{R}^{*}$ be a nonsingular, irreducible component of $\mathcal{R}(e,n,m)$ of expected dimension $8n-3+2e$. \begin{itemize} \item[(i)] For each $l\ge1$, there exists an irreducible component $$ \mathrm{T}(e,n,m,l)\subset \mathcal{M}(e,n,m-2l) $$ of dimension $8n-3+2e+4l$ whose generic sheaf $[E]$ satisfies $[E^{\vee\vee}]\in\mathcal{R}^{*}$ and $\mathrm{length}(Q_E)=l$. \item[(ii)] For each $r\ge2$ and $s\ge1$ such that $2r+2s\le m+e+2$, or $r=1$ and $s=0$ when $-e=n=m=1$, there exists an irreducible component $$ \mathrm{X}(e,n,m,r,s)\subset{\mathcal M}(e,n+1,m+2+c_1-2r-2s)$$ of dimension of dimension $8n+4s+2r+2+e$, whose generic sheaf $[E]$ satisfies $[E^{\vee\vee}]\in\mathcal{R}^{*}$ and $Q_E$ is supported on a line plus $s$ points. \end{itemize} \end{MainTeo}
The case $e=0$ of the first part of the previous theorem is just \cite[Theorem 7]{JMT}; we prove here the case $e=-1$. The second part is a generalization of \cite[Theorem 3]{IT}, which covers the cases $e=0$, $n=2$, $m=2,4$.
Our second goal in this paper concerns the problem of rationality of irreducible components of the moduli spaces ${\mathcal M}(e,n,m)$. The study of this problem for the moduli components of locally free sheaves, which are contained in ${\mathcal M}(-1,2n,0)$ and ${\mathcal M}(0,n,0),\ n\ge1$, dates back to late 70-ies and early 1980-ies. The rationality of these moduli components was proved for $n\le 3$ in case $e=0$ \cite{Barth1,H1978,ES1981,Hart2}. The first infinite series of rational moduli components were constructed and studied in \cite{BH,ES1981,V1,V2}. Recently, A. Kytmanov, A. Tikhomirov and S. Tikhomirov \cite{KTT} showed that there is a large infinite series of rational moduli components of locally free sheaves from ${\mathcal M}(-1,2n,0)$ and ${\mathcal M}(0,n,0)$ which includes the above mentioned series. These are the so-called Ein components which were first found and studied by A. P. Rao \cite{Rao} and L. Ein \cite{Ein}. However, it is still an open question whether these components exist for every $n$ sufficiently (there are gaps for some small values of $n$, see \cite{KTT} for details). One of the central results of our paper states that, for any $n\ge1$ there exist rational irreducible components of ${\mathcal M}(-1,2n,0)$ and of ${\mathcal M}(0,n,0)$. The precise statement is given by the following theorem.
\begin{MainTeo}\label{main2} ~~\newline \begin{itemize} \item[(i)] For any $n\ge1$, the scheme ${\mathcal M}(-1,2n,0)$ contains at least one rational, generically reduced, irreducible component with generic sheaf having 0-dimen-sional singularities. For any $n\ge3$, ${\mathcal M}(-1,2n,0)$ contains at least one rational, generically reduced, irreducible component with generic sheaf having purely 1-dimensional singularities, respectively, at least $2(n^2-n-1)$ rational, generically reduced, irreducible components with generic sheaves having singularities of mixed dimension.
\item[(ii)] For any $n\ge2$, the scheme ${\mathcal M}(0,n,0)$ contains at least one rational, generically reduced, irreducible component with generic sheaf having 0-dimensional singularities. For any $n\ge3$, the scheme ${\mathcal M}(0,n,0)$ contains at least one rational, generically reduced, irreducible component with generic sheaf having purely 1-dimensional singularities. For any $n\ge4$, the scheme ${\mathcal M}(0,n,0)$ contains at least $\frac{n(n-3)}{2}$ rational, generically reduced, irreducible components with generic sheaves having singularities of mixed dimension. \end{itemize} \end{MainTeo}
In addition, we also show that ${\mathcal M}(e,n,m)$ has rational irreducible components for $e=-1,0$ and $n,m$ varying in a wide range (see Theorem \ref{Tmain2} for the precise statement).
The proof of this theorem is based on the above mentioned results of Chang \cite{Chang2}, Mir\'o-Roig \cite{Maria-Miro}, Okonek--Spindler \cite{OS1985} and Schmidt \cite{S2018} on reflexive sheaves, and uses elementary transformations of reflexive sheaves along finite sets of points.
We give two applications of our constructions. First, we prove that the number of irreducible components of ${\mathcal M}(-1,n,0)$ whose generic point corresponds to a sheaf with mixed singularities grows as $n$ grows, see Theorem \ref{einmixed} below. Second, we provide a full description of the irreducible components of ${\mathcal M}(-1,2,m)$.
\begin{MainTeo}\label{main3} The moduli spaces ${\mathcal M}(-1,2,m)$ are connected and \begin{itemize} \item[(i)] ${\mathcal M}(-1,2,4)$ is irreducible and rational of dimension 11; \item[(ii)] ${\mathcal M}(-1,2,2)$ is connected and has exactly $2$ irreducible rational components of dimensions 11, and 15; \item[(iii)] ${\mathcal M}(-1,2,0)$ is connected and has exactly $4$ irreducible rational components of dimensions 11, 11, 15, and 19. \end{itemize} \end{MainTeo}
Note that the rationality of all of these components of follows directly from Main Theorem \ref{main2}.
\begin{figure}
\caption{This is a representation of the geography of the moduli space ${\mathcal M}(-1,2,0)$. Each segment represents one of the irreducible components of ${\mathcal M}(-1,2,0)$ and it is accompanied by its dimension. The blue component is the closure of ${\mathcal B}(-1,2)$. The red components are of the type described in the first part of Main Theorem 1, namely $\mathrm{T}(l)=\mathrm{T}(-1,2,2l,l)$. The black component is of the type described in the second part of Main Theorem 1, namely $\mathrm{X}=\mathrm{X}(-1,1,1,1,0)$. The intersection of lines indicate when the corresponding components intersect.}
\label{figure}
\end{figure}
We emphasize that proving that these moduli spaces are connected is quite relevant, since it is not known whether moduli spaces of rank 2 sheaves are in general connected, as it is the case for Hilbert schemes. In addition, we also provide very concrete descriptions of the generic points in each irreducible component; for a more detailed statement, see Theorems \ref{M(-1,2,2)} and \ref{M(-1,2,0)} for the cases $c_3=2$ and $c_3=0$, respectively. A representation of the geography of ${\mathcal M}(-1,2,0)$ is presented in Figure \ref{figure}, showing how the various irreducible components intersect one another.
Another important aspect of the proof of the connectedness part of Main Theorem \ref{main3} is that we are implicitly showing that some of the sheaves presented in Main Theorem \ref{main1} are smoothable. To be more precise, a semistable non locally free sheaf with $c_3=0$ is said to be \emph{smoothable} if it can be deformed into a stable locally free sheaf, that is, if it lies in the closure of an irreducible component of ${\mathcal B}(c_1,c_2)$ within ${\mathcal M}(c_1,c_2,0)$. In the observations following the proof of Theorem \ref{connected2} we provide certain sufficient conditions for smoothability of sheaves in ${\mathcal M}(-1,2,0)$.
The paper is organized as follows. In Section \ref{First Computations} we build up some basic techniques, and preliminary results. We compute the dimensions of the Ext groups of torsion free sheaves in terms of their Chern classes, and use it in Section \ref{New Irreducible Components} in order to produce the examples of irreducible components of the moduli space of torsion free sheaves, and to prove Main Theorem \ref{main1}. These results are then explored in end of Section \ref{New Irreducible Components} to prove that the number of irreducible components of ${\mathcal M}(c_1,c_2,0)$ whose generic point correspond to a sheaf with mixed singularities goes to infinity as $c_2$ goes to infinity, thus providing our first application. Main Theorem \ref{main2} is proved in Section \ref{Rational Components}.
The remainder of the paper is occupied with the proof of Main Theorem \ref{main3}. The irreducibility of ${\mathcal M}(-1,2,4)$ is established in Section \ref{irreducible of M(-1,2,4)}. After further technical results in Sections \ref{irreducible of M(2)} and \ref{descr of X} regarding the families of sheaves introduced in Main Theorem \ref{main1}, we dedicate Sections \ref{irreducible of M(-1,2,2)} and \ref{irreducible of M(-1,2,0)} to describing all irreducible components of ${\mathcal M}(-1,2,m)$ for $c_3=2$ and $c_3=0$, respectively. The connectedness of ${\mathcal M}(-1,2,m)$ is finally established in Section \ref{Connectedness of M(2)}.
\noindent{\bf Acknowledgements.} This work started with discussions among the authors during a visit to SISSA in May 2016; we thank Ugo Bruzzo and SISSA for its support and hospitality. CA was supported by the FAPESP grants number 2014/08306-4 and 2016/14376-0; part of this work was made when he was visiting the University of Barcelona, and he is grateful for its warm hospitality, also thanking Rosa Maria Mir\'o Roig for the several useful discussions on this topic. MJ is partially supported by the CNPq grant number 302889/2018-3 and the FAPESP Thematic Project 2018/21391-1; part of this work was done during a visit to the University of Edinburgh, and later completed during a visit to the Simons Center for Geometry and Physics; MJ is grateful for the hospitality of both institutions. AT was supported by funding within the framework of the State Maintenance Program for the Leading Universities of the Russian Federation "5-100". AT also thanks the Max Planck Institute for Mathematics in Bonn for hospitality, where this work was partially done during the winter of 2017. This work was also partially funded by CAPES - Finance Code 001.
\section{First computations}\label{First Computations}
In order to study the moduli spaces of torsion free sheaves on $\mathbb{P}^3$ we will need an explicit method to compute $\dim \ext^1(E,E)$, which gives us the dimension of the tangent space of the isomorphism class of a stable torsion free sheaf $E$ as a point the moduli space. Our main goal in this section is to prove the following theorem.
\begin{Teo}\label{dimext1} Let $E$ be a stable rank 2 torsion free sheaf on $\mathbb{P}^3$ with $e:=c_1(E)\in\{-1,0\}$. Then $$\dim\ext^1(E,E)-\dim\ext^2(E,E)=8c_2(E)-3-2c_1(E)^2=8c_2(E)- 3-2e.$$ \end{Teo}
Note that this result generalizes \cite[Lemma 5d)]{JMT} and \cite[Lemma 10]{JMT}, which establish the formula above for stable rank 2 torsion free sheaves with 0- and 1-dimensional singularities, respectively, in the case $c_1(E)=0$. The proofs for sheaves with 0- and 1-dimensional singularities with arbitrary $c_1$ are quite similar to the one in \cite{JMT}; therefore, we only include here the proof for sheaves with mixed singularities.
\noindent Theorem \ref{dimext1} together with the deformation theory yields \begin{Cor}\label{dim calm} Any irreducible component of the moduli space ${\mathcal M}(e,c_2,c_3)$ has dimension at least $8c_2-3+2e$. \end{Cor}
\begin{Lema}\label{exts general} If $E$ is a torsion free sheaf on $\mathbb{P}^3$, then: \begin{itemize} \item[(i)] $\ext^1(E,E) = \mathrm{H}^1(\lhom(E,E))\oplus\ker d^{01}_2$; \item[(ii)] $\ext^2(E,E) = \ker d^{02}_3 \oplus \ker d^{11}_2 \oplus \coker d^{01}_2$; \item[(iii)] $\ext^3(E,E) = \coker d^{02}_3$. \end{itemize} Here, $d^{pq}_j$ are the differentials in the j-th page of the spectral sequence for local to global ext's $E^{pq}_2:=\mathrm{H}^p(\lext^q(E,E))$. In particular, we have $$ \sum_{j=0}^{3}(-1)^j\dim \ext^j(E,E) = \chi(\lhom(E,E)) - \chi(\lext^1(E,E)) + h^0(\lext^2(E,E)). $$ \end{Lema} \begin{proof} The first part is a standard calculation with the spectral sequence $E^{pq}_2:=\mathrm{H}^p(\lext^q(E,E))$, which converges in its forth page, because the spectral maps vanish. Note that $\mathrm{H}^p(\lext^q(E,E))=0$ for $p\ge2$ and $q\ge1$, since $\dim\lext^q(E,E)\le1$ for $q\ge1$. Furthermore, applying the functor $\lhom(\cdot,E)$ to the fundamental sequence \eqref{fundamental}, we get an epimorphism $\lext^3(E^{\vee\vee},E)\twoheadrightarrow\lext^3(E,E)$ and the isomorphism $\lext^2(E,E)\simeq\lext^3(Q_E,E)$; however, the sheaf on the left vanishes because $E^{\vee\vee}$ is reflexive, so $\lext^3(E,E)=0$ as well. Finally, we also check that $\dim\lext^2(E,E)=0$; indeed, $E$ admits a resolution of the form \begin{equation}\label{res-e} 0 \to L_2 \to L_1 \to L_0 \to E \to 0, \end{equation} where $L_k$ are locally free sheaves; we then get an epimorphism $$ \lext^3(Q_E,\op3)\otimes L_0 \twoheadrightarrow \lext^3(Q_E,E), $$ which implies that $\dim\lext^3(Q_E,E)=0$ since $\dim\lext^3(Q_E,\op3)=0$.\\ The second claim is an immediate consequence of the first, since $\dim\lext^2(E,E)=0$. \end{proof}
Assuming that $E$ is $\mu$-semistable provides a useful simplification of the previous general result.
\begin{Lema}\label{Extisomixed} If $E$ be a $\mu$-semistable torsion free sheaf on $\mathbb{P}^3$, then: \begin{itemize} \item[(i)] $\ext^1(E,E) = \mathrm{H}^1(\lhom(E,E))\oplus\ker d^{01}_2$; \item[(ii)] $\ext^2(E,E) = \mathrm{H}^0(\lext^2(E,E)) \oplus \mathrm{H}^1(\lext^1(E,E)) \oplus \coker d^{01}_2$; \item[(iii)] $\ext^3(E,E)=0$. \end{itemize} Here, $d^{01}_2$ is the spectral sequence differential $d^{01}_2: \mathrm{H}^0(\lext^1(E,E)) \to \mathrm{H}^2(\lhom(E,E))$. \end{Lema} \begin{proof} The last item follows from Serre duality, we have $$ \ext^3(E,E) \simeq \Hom(E,E(-4))^* = 0, $$ with the vanishing given by $\mu$-semistability. In addition, we argue that $\mu$-semistability also implies that $\mathrm{H}^3(\lhom(E,E))=0$. Indeed, applying the functors $\lhom(\cdot,E)$ and $\lhom(E^{\vee\vee},\cdot)$ to the fundamental sequences \eqref{fundamental} we obtain, respectively, $$ 0 \to \lhom(E^{\vee\vee},E) \to \lhom(E,E) \to \lext^1(Q_E,E) \to \cdots $$ and $$ 0 \to \lhom(E^{\vee\vee},E) \to \lhom(E^{\vee\vee}, E^{\vee\vee})\to\lhom(E^{\vee\vee},Q_E)\to\cdots $$ In both sequences, the rightmost sheaf has dimension at most 1, hence so does the cokernel of the leftmost monomorphism, and it follows that $$ \mathrm{H}^3(\lhom(E,E)) \simeq \mathrm{H}^3(\lhom(E^{\vee\vee},E)) \simeq \mathrm{H}^3(\lhom(E^{\vee\vee},E^{\vee\vee})). $$ However $$ \mathrm{H}^3(\lhom(E^{\vee\vee},E^{\vee\vee}))=\ext^3(E^{\vee\vee}, E^{\vee\vee}) \simeq \Hom(E^{\vee\vee},E^{\vee\vee}(-4))^*= 0; $$ the first equality follows from the spectral sequence for local to global ext's for $E^{\vee\vee}$, the isomorphism in the middle is given by Serre duality, and the vanishing is a consequence of the $\mu$-semistability of $E^{\vee\vee}$.
It follows that $d^{pq}_2=0$ except for $d^{01}_2$, while $d^{pq}_3=0$ for every $p$ and $q$. This means that $E^{pq}_2$ converges in its third page, providing the desired result. \end{proof}
The following technical lemma will be helpful in our next argument.
\begin{Lema}\label{Technic} Let $F$ be a torsion free sheaf. If $E$ is a subsheaf of $F$ for which the quotient sheaf $Z:=F/E$ is 0-dimensional, then \begin{equation}\label{sum} \sum_{j=0}^3 (-1)^j\chi(\lext^j(Z,E)) + \sum_{j=0}^3 (-1)^j\chi(\lext^j(F,Z)) = 0. \end{equation} \end{Lema} \begin{proof} Break a locally free resolution of $E$ as in \eqref{res-e} into two short exact sequences $$ 0 \to L_2 \to L_1 \to K \to 0 ~~{\rm and}~~ 0 \to K \to L_0 \to E \to 0 . $$ Applying functor $\lhom(Z,-)$ and passing to Euler characteristic on the first sequence, we have: \begin{align}\label{chi1} \chi(\lext^2(Z,K)) - \chi(\lext^3(Z,K)) = \chi(\lext^3(Z,L_2)) - \chi(\lext^3(Z,L_1)) = \end{align} \begin{align*} =(\rk(L_2)-\rk(L_1))\chi(Z), \end{align*} since $\chi(\lext^3(Z,L_k))=\chi(\lext^3(Z,\mathcal{O}_{\mathbb{P}^3})\otimes L_k) = \rk(L_k)\cdot\chi(Z)$. Now, applying the functor $\lhom(Z,-)$ to the second exact sequence we obtain the isomorphism $\lext^1(Z,E)\simeq \lext^2(Z,K)$ and passing to the Euler characteristic we have \begin{equation*} \chi(\lext^2(Z,E)) - \chi(\lext^3(Z,E)) = \chi(\lext^3(Z,K)) - \chi(\lext^3(Z,L_0)). \end{equation*} Subtracting $\chi(\lext^1(Z,E))$ from the left hand side and $\chi(\lext^2(Z,K))$ from the right hand side, and then substituting for (\ref{chi1}) we have: \begin{equation}\label{chiEE} \sum_{j=0}^3 (-1)^j\chi(\lext^j(Z,E)) = (\rk(L_1)-\rk(L_2)-\rk(L_0))\cdot\chi(Z) = -\rk(E)\chi(Z) . \end{equation} Since $\dim\lext^j(Z,E)=0$, we have $$ \chi(\lext^j(Z,E)) = h^0(\lext^j(Z,E)) = $$ $$ = \dim\ext^j(Z,E) \stackrel{\rm SD}{=} \dim\ext^{3-j}(E,Z) = \chi(\lext^{3-j}(E,Z)) , $$ where the supercript SD indicates the use of Serre duality. The formula \eqref{chiEE} applied to the sheaf $F$ then yields $$ \sum_{j=0}^3 (-1)^j\chi(\lext^j(F,Z)) = \rk(F)\chi(Z) .$$ The fact that $\rk(F)=\rk(E)$ provides the desired identity. \end{proof}
\begin{Lema}\label{Lemamixed} Let $E$ be a rank 2 torsion free sheaf with mixed singularities. Then: $$ \displaystyle \sum_{j=0}^{3}(-1)^j\dim \ext^j(E,E) = - 8c_2(E) + 4 + 2c_1(E)^2. $$ \end{Lema}
\begin{proof} Let $Z_E \hookrightarrow Q_E$ the maximal 0-dimensional subsheaf of $Q_E$, and set $T_E := Q_E/Z_E$ to be the pure 1-dimensional quotient; we assume that both $Z_E$ and $T_E$ are nontrivial. Let $E'$ be the kernel of the composed epimorphism $E^{\vee\vee}\twoheadrightarrow Q_E \twoheadrightarrow T_E$; note that it also fits into the following short exact sequence \begin{equation}\label{puremixed} 0 \to E \to E^{\prime} \to Z_E\to 0. \end{equation} Note that $c_1(E')=c_1(E)$ and $c_2(E')=c_2(E)$. In addition, $(E')^{\vee\vee}\simeq E^{\vee\vee}$, and $Q_{E'}\simeq T_E$, thus $E'$ is a torsion free sheaf with 1-dimensional singularities. It follows that $E'$ has homological dimension 1 (that is $\lext^p(E',G)=0$ for $p\ge2$ and every coherent sheaf $G$), so the proof of \cite[Proposition 3.4]{Harshorne-Reflexive} also applies for $E'$, and we conclude that $$ \displaystyle \sum_{j=0}^{3}(-1)^j\dim \ext^j(E',E') = - 8c_2(E) + 4 + 2c_1(E)^2. $$ Therefore, it is enough to prove that $$ \displaystyle \sum_{j=0}^{3}(-1)^j\dim \ext^j(E,E) = \sum_{j=0}^{3}(-1)^j\dim \ext^j(E',E'), $$ which, by Lemma \ref{exts general} is equivalent to show that $$ \displaystyle \chi(\lhom(E,E)) - \chi(\lext^1(E,E)) + h^0(\lext^2(E,E)) = \chi(\lhom(E^{\prime},E^{\prime})) - \chi(\lext^1(E^{\prime},E^{\prime})). $$
\noindent To see this, note that applying the functor $\lhom(E^{\prime},-)$ to the sequence (\ref{puremixed}) we obtain:
\begin{align} & \chi(\lhom(E^{\prime},E)) - \chi(\lhom(E^{\prime},E^{\prime})) + \chi(\lhom(E^{\prime},Z_E)) - \nonumber \\ & \chi(\lext^1(E^{\prime},E)) + \chi(\lext^1(E^{\prime},E^{\prime}))-\chi(\lext^1(E^{\prime},Z _E))=0. \nonumber \end{align} Next, applying the functor $\lhom(-,E)$ to the sequence (\ref{puremixed}) we have \begin{align}
& \chi(\lhom(E^{\prime},E)) - \chi(\lhom(E,E)) + \chi(\lext^1(Z_E,E)) - \nonumber \\
& \chi(\lext^1(E^{\prime},E)) +
\chi(\lext^1(E,E))-\chi(\lext^2(Z_E,E))= 0. \nonumber \end{align} Taking the difference between these last two equations we obtain $$ \chi(\lhom(E^{\prime},E^{\prime})) - \chi(\lext^1(E^{\prime},E^{\prime})) = \chi(\lhom(E,E)) - \chi(\lext^1(E,E)) + $$ $$ - \chi(\lext^1(Z_E,E)) + \chi(\lext^2(Z_E,E)) + \chi(\lhom(E^{\prime},Z_E)) - \chi(\lext^1(E^{\prime},Z_E)) = $$ $$ \chi(\lhom(E,E))-\chi(\lext^1(E,E))+\chi(\lext^3(Z_E,E)), $$ with the second equality following from applying the formula established in Lemma \ref{Technic} to the sheaves $E$ and $E'$. Applying the functor $\lhom(-,E)$ to the sequences $$ 0 \to E' \to E^{\vee\vee} \to T_E \to 0 ~~{\rm and}~~ 0 \to Z_E \to Q_E \to T_E \to 0 $$ we conclude that $\lext^3(T_E,E)=0$ and $\lext^3(Q_E,E)\simeq\lext^3(Z_E,E)$. We already noticed in the proof of Lemma \ref{exts general} that $\lext^3(Q_E,E)\simeq\lext^2(E,E)$, thus $$ \chi(\lext^3(Z_E,E))=\chi(\lext^2(E,E))=h^0(\lext^2(E,E)), $$ as desired. \end{proof}
Gathering the above results we are in position to prove the Theorem \ref{dimext1}. \begin{proof}[Proof of Theorem \ref{dimext1}] By Lemma \ref{Lemamixed}, it is enough to show that $\dim \Hom(E,E) = 1$ and $\ext^3(E,E) = 0$, but these follow easily from the stability of $E$. \end{proof}
The following proposition will be a technical tool that will help us to compute explicitly the dimension of $\ext^1(E,E)$ for certain torsion free sheaves.
\begin{Prop}\label{important} Let $F$ be a stable rank 2 reflexive sheaf on $\mathbb{P}^3$, with $\dim \ext^2(F,F) = 0$. Let $Z$ be an artinian sheaf, and $T$ be a sheaf of pure dimension 1 such that $H^1(\lhom(F,T))=0$; set $Q :=Z \oplus T$ and assume also that $\operatorname{Sing}(F)\cap\supp(Q)=\emptyset$. If $\varphi: F \to Q$ is an epimorphism, then, for $E := \ker\varphi$, \begin{itemize} \item[(i)] $E$ is a stable rank 2 torsion free sheaf; \item[(ii)] $c_1(E) = c_1(F)$ and $c_2(E) = c_2(F) + \mult(T)$, where $\mult(T)$ denotes the multiplicity of the sheaf $T$; \item[(iii)] $\ext^2(E,E) = \mathrm{H}^0(\lext^3(Z,E)) \oplus \ext^3(T,E)$. \end{itemize} \end{Prop} \begin{proof} The items (i) and (ii) are straightforward calculations; we will prove (iii). First we will show that the spectral sequence map $$d_2^{01}: \mathrm{H}^0(\lext^1(E,E)) \to \mathrm{H}^2(\lhom(E,E))$$ \noindent is an epimorphism. Consider the exact sequence: \begin{equation}\label{mainmixed}
0\to E \to F \to Q \to 0. \end{equation} Applying the functor $\lhom(F,-)$ to \eqref{mainmixed}, once $\coker\{\lhom(F,E)\to \lhom(E,E)\}$ is supported in dimension 1, we have \begin{equation}\label{toprow} \mathrm{H}^2(\lhom(F,E)) \to \mathrm{H}^2(\lhom(F,F)) \to 0. \end{equation} Next apply $\Hom(F,-)$ in the sequence (\ref{mainmixed}), by hypothesis, $\ext^2(E,E) = 0$, then we have $$\ext^1(F,Q) \to \ext^2(F,E) \to 0. $$ To see that $\ext^1(F,Q)$ vanishes, note that $\ext^p(F,Q) = 0$ for $p=2,3$ because $F$ is reflexive. $\lext^1(F,Q) = 0$ because $\operatorname{Sing}(F) \cap \supp(Q) = \emptyset$. In addition, $\mathrm{H}^p(\lhom(F,Q)) = 0$ for $p=2,3$ because $\dim Q = 1$. From the spectral sequence, $\ext^1(F,Q) = \mathrm{H}^1(\lhom(F,Q))$ which vanishes by hypothesis. Therefore $d_2^{01}: \mathrm{H}^0(\lext^1(F,E)) \to \mathrm{H}^2(\lhom(F,E))$ is surjective. Then we have \begin{equation} \label{d012-diagram} \xymatrix{ \mathrm{H}^0(\lext^1(F,E)) \ar[d]\ar[r]^{d^{01}_2} & \mathrm{H}^2(\lhom(F,E)) \ar[d] \\ \mathrm{H}^0(\lext^1(E,E)) \ar[r]^{d^{01}_2} & \mathrm{H}^2(\lhom(E,E)), }\end{equation} \noindent where the vertical arrow in the left is the natural map coming from the exact sequence (\ref{mainmixed}), and horizontal maps came from the spectral sequence. Since the top row map, and the right vertical map are surjective, we have that the bottom map is surjective as we wanted. Now, applying $\lhom(-,E)$ to the sequence (\ref{mainmixed}) we have $$\lext^2(E,E) \simeq \lext^3(Q,E) \simeq \lext^3(Z,E) \oplus \lext^3(T,E).$$ Furthermore, there is an exact sequence \begin{equation*} \xymatrix{ \lext^1(F,E) \ar[r] & \lext^1(E,E) \ar[r]^{f}& \lext^2(Q,E) \ar[r]& 0, } \end{equation*} where $\dim \ker f = 0$, since $\dim\lext^1(F,E)=0$. Thus \begin{equation}\label{almost} \ext^2(E,E) = \mathrm{H}^0(\lext^3(Z,E)) \oplus \mathrm{H}^0(\lext^3(T,E)) \oplus \mathrm{H}^1(\lext^2(T,E)). \end{equation} \noindent Since $F$ is reflexive, from \cite[Proposition 1.1.6]{HL}, we have $\lext^p(T,F) = 0$ for $p=0,1$ and $\mathrm{codim}~\lext^p(T,F)\geq p$ for $p = 2,3$. Clearly, $\dim\lext^p(T,E) \le 1$ for $p>0$, while $\lhom(T,E) = 0$; using these facts, we obtain from the spectral sequence for $\ext^{\cdot}(T,E)$ that \begin{equation}\label{ext3}
\ext^3(T,E) = \mathrm{H}^0(\lext^3(T,E)) \oplus \mathrm{H}^1(\lext^2(T,E)). \end{equation} Putting together the equations (\ref{almost}) and (\ref{ext3}) we obtain item (iii). \end{proof}
\begin{Remark} Item (iii) of Proposition \ref{important} also holds when $T=0$ without assuming that $\ext^2(F,F)=0$, see the proof of Main Theorem \ref{main2}, starting in page \pageref{pf mthm 2} below. \end{Remark}
An important ingredient of the Proposition \ref{important} is a family of stable reflexive sheaves, that fills out an irreducible component of the moduli space, with the expected dimension. A priori, it is not clear why such family should exist. In \cite{JMT} the authors proved that, indeed, such families exists for infinitely many values of the second Chern class, provided that the first Chern class is even. Below we state a theorem that shows that this happens also for sheaves with odd first Chern class. For simplicity of notation, we define $$ G_{(a,b,c)}:=a\cdot\mathcal{O}_{\mathbb{P}^3}(-3) \oplus b\cdot\mathcal{O}_{\mathbb{P}^3}(-2) \oplus c\cdot\mathcal{O}_{\mathbb{P}^3}(-1). $$
\begin{Teo}\label{thm newclass} For each triple $(a,b,c)$ of positive integers such that $3a+2b+c$ is odd, the family of rank 2 reflexive sheaves $F$ obtained as the cokernel of the maps $\alpha$ below \begin{equation}\label{newclass} 0 \to a\cdot\mathcal{O}_{\mathbb{P}^3}(-3) \oplus b\cdot\mathcal{O}_{\mathbb{P}^3}(-2) \oplus c\cdot\mathcal{O}_{\mathbb{P}^3}(-1) \stackrel{\alpha}{\longrightarrow} (a+b+c+2)\cdot\mathcal{O}_{\mathbb{P}^3} \to F(k) \to 0, \end{equation} where $k:=(3a+2b+c+1)/2$, fills out a nonsingular irreducible component $\mathcal{S}(a,b,c)$ of $\mathcal{R}(-1;n;m)$ of expected dimension $8n-5$, with $n$ and $m$ are given by the expressions: \begin{equation}\label{m(a,b,c)} \begin{split} & n = \frac{1}{4}(3a+2b+c+1)^2 + 3a + b, \\ & m(a,b,c) = 27{{a+2}\choose{3}} + 8{{b+2}\choose{3}} + {{c+2}\choose{3}} + 3(3a+2b+5)ab +\\ & + \frac{3}{2}(3a+c+4)ac + (2b+3c+3)bc + 6abc. \end{split} \end{equation} More precisely, let $\widetilde{\s}(a,b,c)\subset \Hom\left(G_{(a,b,c)},(a+b+c+2)\cdot\mathcal{O}_{\mathbb{P}^3}\right)$ be the open subset consisting of monomorphisms with 0-dimensional degeneracy loci; then $$ \s(a,b,c) = \widetilde{\s}(a,b,c) / ((\mathrm{Aut}(G_{(a,b,c)})\times GL(a+b+c+2))/\mathbb{C}^*) . $$ \end{Teo} \begin{proof} Let $a,b,c \in \mathbb{Z}$, such that $3a+2b+c$ is odd and non zero, and consider morphisms of the form $$ \alpha: G_(a,b,c) \to (a+b+c+2)\cdot\mathcal{O}_{\mathbb{P}^3}. $$ If the degeneracy locus
$$ \Delta(\alpha) := \{ x\in\mathbb{P}^3 ~|~ \alpha(x) ~~ \text{is not injective} \} $$ is 0-dimensional, then the cokernel of $\alpha$ is a rank 2 reflexive sheaf $F$ on $\mathbb{P}^3$, which we twist by $k:=(3a+2b+c+1)/2$, so that $c_1(F)=-1$, and the exact sequence in display (\ref{newclass}) is satisfied. The dimension of this family of rank 2 reflexive sheaves is given by $$ \dim\Hom\left(G_{(a,b,c)}, (a+b+c+2)\cdot\mathcal{O}_{\mathbb{P}^3}\right) - \dim\mathrm{Aut}\left(G_{(a,b,c)}\right) - (a+b+c+2)^2 + 1 = $$ $$8k^2 + 24a + 8b - 5 = 8c_2(F) -5. $$
Note that for such sheaf $F$ satisfies $h^0(F(-1)) = 0$, thus $F$ is always stable. It only remains for us to check that $\dim \ext^2(F,F) = 0$. This follows from applying the functor $\Hom(\cdot,F(k))$ to the exact sequence in display (\ref{newclass}), and observing that $\mathrm{H}^1(F(t))=0$ for every $t\in\mathbb{Z}$ and that $\mathrm{H}^2(F(k))=0$. Therefore the family of sheaves given by (\ref{newclass}) provides a component of the moduli space of stable rank 2 reflexive sheaves on $\mathbb{P}^3$. \end{proof}
The case that deserves special attention is the case $a = b =0$ and $c = 1$, that give us $c_1(F) = -1$, $c_2(F) = c_3(F) = 1$. In \cite[Lemma 9.4]{Harshorne-Reflexive} is shown that every reflexive sheaf in $\mathcal{R}(-1,1,1)$ admits a resolution of the form: \begin{equation}\label{resR(-1,1,1)} 0 \to \mathcal{O}_{\mathbb{P}^3}(-2) \stackrel{\alpha}{\longrightarrow} 3\cdot\mathcal{O}_{\mathbb{P}^3}(-1) \to F \to 0. \end{equation}
From this sequence we can easily deduce the splitting behaviour of a sheaf $F$ in ${\mathcal R}(-1,1,1)$. Indeed, each one of the $3$ rows of the map $\alpha$ can be viewed as the equation of a hyperplane in $\mathbb{P}^3$, since $\alpha$ is injective, the hyperplane must intersect in exactly one point $p$, that coincides with the singularity of the sheaf $F$. Thus, if $l \subset \mathbb{P}^3$ is a line, if $p \notin l$, then the restriction
of $F$ on $l$, $F|_{l}$, is isomorphic to $\mathcal{O}_l(-1) \oplus \mathcal{O}_l$. On the other hand, if $p \in l$, from
sequence (\ref{resR(-1,1,1)}), we have that $F|_{l} \simeq \mathcal{O}_{p} \oplus 2 \mathcal{O}_l(-1)$. Summarizing, we have:
\begin{equation}\label{splitingF}
F|_l=\begin{cases} \mathcal{O}_l(-1) \oplus \mathcal{O}_l,& \text{if } p \notin l,\\ \mathcal{O}_{p} \oplus 2\cdot\mathcal{O}_l(-1),& \text{if } p \in l. \end{cases} \end{equation}
\begin{Remark}\label{R(-1,1,1)} From \cite[Example 4.2.1] {Harshorne-Reflexive} it follows that $\mathcal{R}(-1,1,1)$ is irreducible non-singular and rational of dimension $3$. Moreover, there is an isomorphism $\mathcal{R}(-1,1,1)\xrightarrow{\simeq}\mathbb{P}^3,\ [F]\mapsto \operatorname{Sing}(F)$, and every sheaf $[F]\in\mathcal{R}(-1,1,1)$ fits in an exact triple $0\to\op3(-2)\to3\cdot\op3(-1)\to F\to0$. This yields that $\ext^2(F,F)=0$. Also Theorem \ref{thm newclass} implies that $\s(0,0,1)= \mathcal{R}(-1,1,1)$. Besides, under the isomorphism $\mathcal{R}(-1,1,1)\simeq\mathbb{P}^3$, the above exact triple globalizes to the exact triple over $\mathbb{P}^3\times\mathbb{P}^3$: \begin{equation}\label{univ F} 0\to\op3(-2)\boxtimes\op3\xrightarrow{\alpha}\op3(-1) \boxtimes T_{\mathbb{P}^3}(-1)\to\mathbf{F}\to0, \end{equation} where $\mathbf{F}$ is the universal family of reflexive sheaves over ${\mathcal R}(-1,1,1)$, the morphism $\alpha$ is the composition $\op3(-2)\boxtimes\op3\xrightarrow{i(-1) \boxtimes\mathrm{id}}4\cdot\op3(-1)\boxtimes\op3 \xrightarrow{\mathrm{id}\boxtimes\epsilon}\op3(-1)\boxtimes T_{\mathbb{P}^3}(-1)$, and $i,\epsilon$ are the morphisms in the Euler exact triple $0\to\op3(-1)\xrightarrow{i}4\cdot\op3 \xrightarrow{\epsilon}T_{\mathbb{P}^3}(-1)\to0$. \end{Remark}
\section{Sheaves with 0-dimensional and mixed singularities}\label{New Irreducible Components}
In \cite{JMT} the authors produced examples of irreducible components with $0$-dimen-sional singularities and pure $1$-dimensional singularities, in the moduli space of rank 2 stable torsion free sheaves with first Chern class equals to 0, and in \cite{IT} the authors proved the existence of irreducible components in ${\mathcal M}(0,3,0)$ whose generic point is a sheaf with mixed singularities, with first Chern class equals to 0. The first natural question that arises is that if similar constructions can be made for sheaves with odd first Chern class, and if it is possible similar irreducible components for non zero third Chern class.
We will explicit construct examples of irreducible components of the moduli space of torsion free sheaves with mixed singularities. We refer the reader to \cite{IT} for some examples in ${\mathcal M}(0,3,0)$.
For the rest of this work, let $e \in \{-1,0\}$, and $n$, $m$ be two integers such that $en \equiv m (\mathrm{mod}~2)$. Let \begin{equation}\label{def R*}
{\mathcal R}^*(e,n,m):=\{[F]\in{\mathcal R}(e,n,m)\ |\ \ext^2(F,F)=0 \}. \end{equation} By semicontinuity, ${\mathcal R}^{*}(e,n,m)$ is an open smooth subset of ${\mathcal R}(e,n,m)$ such that, in view of Theorem \ref{dimext1} and Corollary \ref{dim calm}, \begin{equation}\label{dim R*} \dim_{[F]}{\mathcal R}^{*}(e,n,m)=\dim\ext^1(F,F)=8n-3+2e,\ \ \ \ \ \ \ \ [F]\in{\mathcal R}^{*}(e,n,m). \end{equation} (Here and below by the dimension $\dim_xX$ of a given scheme $X$ locally of finite type at a point $x\in X$ we mean the maximum of dimensions of irreducible components of $X$ passing through the point $x$.)
Let $(\mathbb{P}^3)^s_0$ be the open dense subset of $(\mathbb{P}^3)^s$ consisting of disjoint unions of $s$ distinct points in $\mathbb{P}^3$. For any closed point $[F]\in{\mathcal R}^{*}(e,n,m)$, define the sets \begin{equation}\label{PiF}
{\Pi}_{[F]}:=\{S\in(\mathbb{P}^3)^s_0~|~S\cap\operatorname{Sing}(F)=\emptyset\}, \end{equation} \begin{equation}\label{X_F} \mathcal{X}_{[F]}:=\{(l,S)\in\mathrm{G}(2,4)\times
(\mathbb{P}^3)^s_0~|~l\cap S=\emptyset,\ (l\cup S)\cap\operatorname{Sing}(F)
=\emptyset, ~\mathrm{and}~F|_l=\mathcal{O}_l(e)\oplus \mathcal{O}_l\}. \end{equation} Note that, since any reflexive sheaf $F$ from ${\mathcal R}^{*}(e,n ,m)$ has 0-dimensional singularities, the set \begin{equation}\label{R*times P3s} \left({\mathcal R}^{*}(e,n,m)\times(\mathbb{P}^3)_0^s\right)_0:= \{( [F],S)\in
{\mathcal R}^{*}(e,n,m)\times(\mathbb{P}^3)_0^s ~|~S\in{\Pi}_{[F]}\} \end{equation} is a dense open subset in ${\mathcal R}^{*}(e,n,m)\times(\mathbb{P}^3)^s$, hence by \eqref{dim R*} it is smooth equidimensional of dimension \begin{equation}\label{dim product 1} \dim\left({\mathcal R}^{*}(e,n,m)\times(\mathbb{P}^3)_0^s\right)_0=8n+3s+2e-3. \end{equation} Respectively, by the Grauert--M\"ulich Theorem, the set \begin{equation}\label{R*times G times GP3s} \left({\mathcal R}^{*}(e,n,m)\times G(2,4)\times(\mathbb{P}^3)_0^s\right)_0:= \{( [F],(l,S))\in{\mathcal R}^{*}(e,n,m)\times G(2,4)\times(\mathbb{P}^3)_0^s
~|~(l,S)\in\mathcal{X}_{[F]}\} \end{equation} is a dense open subset in ${\mathcal R}^{*}(e,n,m)\times G(2,4) \times(\mathbb{P}^3)_0^s$, hence by \eqref{dim R*} it is smooth equidimensional of dimension \begin{equation}\label{dim product} \dim\left({\mathcal R}^{*}(e,n,m)\times G(2,4)\times(\mathbb{P}^3)_0^s \right)_0=8n+3s+2e+1. \end{equation} For a pair $([F],S)\in\left({\mathcal R}^{*}(e,n,m)\times(\mathbb{P}^3)_0^s \right)_0$, consider the $2s$-dimensional vector space $\Hom(F,{\mathcal O}_S)$ and its open dense subset $\Hom(F,{\mathcal O}_S)_e$ of epimorphisms $F\twoheadrightarrow {\mathcal O}_S$. By construction, the group $\mathrm{Aut}({\mathcal O}_S)$ acts on $\Hom(F,{\mathcal O}_S)_e$, and it follows that the quotient space $\Hom(F,{\mathcal O}_S)_e/\mathrm {Aut}({\mathcal O}_S)$ is a smooth irreducible scheme isomorphic to a product of projective spaces, where $S=(q_1,...,q_s)$: \begin{equation}\label{descr fibre 1} \begin{split} &\Hom(F,{\mathcal O}_S)_e/\mathrm{Aut}({\mathcal O}_S)\simeq\mathbf \prod_{i=1}^{s}\mathbb{P}^1_{q_i},\\ & \dim\Hom(F,{\mathcal O}_S)_e/\mathrm{Aut}({\mathcal O}_S)=s, \end{split} \end{equation} and where $\mathbb{P}^1_{q_i}=\Hom(F,{\mathcal O}_{q_i})_e/\mathrm {Aut}({\mathcal O}_{q_i})$, $i=1,...,s$.
Now, for any element $\phi\in\Hom(F,{\mathcal O}_S)_e$ the torsion free sheaf $E_{\phi}:=\ker(\phi:F\twoheadrightarrow{\mathcal O}_S)$ is stable, and defines a closed point in $\mathcal{M}(e,n, m-2s)$. Furthermore, $E_{\phi}\simeq E_{\phi'}$ if, and only if, there is a $g\in\mathrm{Aut}({\mathcal O}_S)$ such that $\phi=g\circ\phi'$. Denote by $[\phi]$ the equivalence class of $\phi$ modulo $\mathrm{Aut}({\mathcal O}_S)$ and consider the set \begin{equation}\label{tilde tau}
\tilde{{\mathcal T}}(e,n,m,s):=\{x=([F],S,[\phi_x])~|~([F],S) \in\left({\mathcal R}^{*}(e,n,m)\times(\mathbb{P}^3)_0^s\right)_0, \phi_x\in\Hom(F,{\mathcal O}_S)_e\}. \end{equation} By definition, $\tilde{{\mathcal T}}(e,n,m,s)$ is fibered over $\left({\mathcal R}^{*}(e,n,m)\times(\mathbb{P}^3)_0^s\right)_0$ with fiber \linebreak $\Hom(F,{\mathcal O}_S)_e/\mathrm{Aut}({\mathcal O}_S)$ over a given point $([F],S)$. Thus by \eqref{dim R*} and \eqref{descr fibre} we conclude that $\tilde{{\mathcal T}}(e,n,m,s)$ is naturally endowed with a structure of smooth equidimensional scheme of dimension $$ \dim\tilde{{\mathcal T}}(e,n,m,s)=8n+4s+2e-3, $$ and the number of irreducible components of $\tilde{{\mathcal T}} (e,n,m,s)$ is equals to the number of those of ${\mathcal R}^{*}(e,n,m)$. Furthermore, for any point $t=([F],S),[\phi_x])\in\tilde {{\mathcal T}}(e,n,m,s)$, the sheaf $E(t):=\ker\{ \phi: F \twoheadrightarrow{\mathcal O}_S\}$ is a stable sheaf from $\mathcal{M}(e,n,m-2s)$. Hence there is a well-defined modular morphism $$ \Phi:\ \tilde{{\mathcal T}}(e,n,m,s)\hookrightarrow \mathcal{M}(e,n,m-2s),\ \ \ \ \ \ t\mapsto[E(t)]. $$ $\Phi$ is clearly an embedding, since the data $x=([F],(l,S), [\phi_x])$ are recovered uniquely from $[E(t)]: F\simeq E(t)^{\vee\vee},\ S=\supp(E(t)^{\vee\vee}/E(t))$ and $\phi$ is the canonical quotient morphism $E(t)^{\vee\vee}\twoheadrightarrow{\mathcal O}_S\simeq E(t)^{\vee\vee}/E(t)$. We thus set \begin{equation} {\mathcal T}(e,n,m,s):=\Phi(\tilde{{\mathcal T}}(e,n,m,s)) \simeq\tilde{{\mathcal T}}(e,n,m,s). \end{equation} Let $\mathrm{T}(e,n,m,s)$ be the closure $\overline {{\mathcal T}(e,n,m,s)}$ of ${\mathcal T}(e,n,m,s)$ in ${\mathcal M}(e,n,m-2s)$. Formula \eqref{dim R*} yields: \begin{equation}\label{dimfamilymixed1} \dim\mathrm{T}(e,n,m,s)=\dim{\mathcal T}(e,n,m,s)=8n+4s+2e-3. \end{equation}
Respectively, let $r\geq e$. For a triple $([F],(l,S))\in \left({\mathcal R}^{*}(e,n,m)\times G(2,4)\times(\mathbb{P}^3)_0^s\right)_0$, set \begin{equation}\label{QlSr} Q_{(l,S),r}:={\mathcal O}_S\oplus i_{*}{\mathcal O}_l(r), \end{equation} where $i: l\hookrightarrow\mathbb{P}^3$ is a closed immersion. Consider the $(2r+2s+2-e)$-dimensional vector space $\Hom(F,Q_{(l,S),r})$ and its open dense subset $\Hom(F,Q_{(l,S),r})_e$ of epimorphisms $F\twoheadrightarrow Q_{(l,S),r} $. By construction, the group $\mathrm{Aut}(Q_{(l,S),r})$ acts on \linebreak $\Hom(F,Q_{(l,S),r})_e$, and it follows that the quotient space $$ \Hom(F,Q_{(l,S),r})_e/\mathrm{Aut}(Q_{(l,S),r}) $$ is a smooth irreducible scheme isomorphic to a product of projective spaces: \begin{equation}\label{descr fibre} \begin{split} &\Hom(F,Q_{(l,S),r})_e/\mathrm{Aut}(Q_{(l,S),r})\simeq\mathbf {P}^{2r+1-e}_l\times\prod_{i=1}^{s}\mathbb{P}^1_{q_i},\\ & \dim\Hom(F,Q_{(l,S),r})_e/\mathrm{Aut}(Q_{(l,S),r})= 2r+s+1-e, \end{split} \end{equation} and where \begin{equation}\label{Pl} \mathbf{P}^{2r+1-e}_l=\Hom(F,i_{*}{\mathcal O}_l (r))_e/\mathrm{Aut}(i_{*}{\mathcal O}_l(r)) \end{equation} and $\mathbb{P}^1_{q_i}$ are the same as in \eqref{descr fibre 1}.
For any element $\phi\in\Hom(F,Q_{(l,S),r})_e$ the torsion free sheaf $E_{\phi}:=\ker\phi$ is stable, and defines a closed point in $\mathcal{M}(e,n+1,m-2r-2s-2-e)$. Furthermore, $E_{\phi}\simeq E_{\phi^{\prime}}$ if, and only if, there is a $g\in\mathrm{Aut}(Q_{(l,S),r})$ such that $\phi=g\circ\phi^{\prime}$. Denote by $[\phi]$ the equivalence class of $\phi$ modulo $\mathrm{Aut} (Q_{(l,S),r})$ and consider the set \begin{equation}\label{familyX} \begin{split} & \tilde{\mathcal{X}}(e,n,m,r,s):=\{x=
([F],(l,S),[\phi_x])~|~([F],(l,S))\in\left({\mathcal R}^{*}(e,n,m) \times G(2,4)\times(\mathbb{P}^3)_0^s\right)_0,\\ &[\phi_x]\in\Hom(F,Q_{(l,S),r})_e/\mathrm{Aut}(Q_{(l,S),r})\}. \end{split} \end{equation} By definition, $\tilde{\mathcal{X}}(e,n,m,r,s)$ is fibered over $\left({\mathcal R}^{*}(e,n,m)\times G(2,4)\times(\mathbb{P}^3)_0^s\right)_0$ with fiber\\ $\Hom(F,Q_{(l,S),r})_e/\mathrm{Aut}(Q_{(l,S),r})$ over a given point $([F],(l,S))$. Thus by \eqref{dim R*} and \eqref{descr fibre} $\tilde{\mathcal{X}}(e,n,m,r,s)$ is naturally endowed with a structure of smooth equidimensional scheme of dimension \begin{equation}\label{dim tilde X} \dim\tilde{\mathcal{X}}(e,n,m,r,s)=8n+4s+2r+e+2, \end{equation} and the number of irreducible components of $\tilde {\mathcal{X}}(e,n,m,r,s)$ equals the number of those of ${\mathcal R}^{*}(e,n,m)$. Furthermore, for any point $$ x=([F],(l,S),[\phi_x])\in\tilde {\mathcal{X}}(e,n,m,r,s), $$ the sheaf $E(x):=\ker\{ \phi: F \twoheadrightarrow Q_{(l,s),r}\}$ is a stable sheaf from $\mathcal{M}(e,n+1,m+2+e-2r-2s)$. Hence there is a well-defined modular morphism $$ \Psi:\ \tilde{\mathcal{X}}(e,n,m,r,s)\hookrightarrow \mathcal{M}(e,n+1,m+2+e-2r-2s),\ \ \ \ \ \ x\mapsto[E(x)]. $$ $\Psi$ is clearly an embedding, since the data $x=([F],(l,S), [\phi_x])$ are recovered uniquely from $[E(x)]: F\simeq E(x)^{\vee\vee},\ l\sqcup S=\supp(E(x)^{\vee\vee} /E(x))$ and $\phi$ is the canonical quotient morphism $E(x)^{\vee\vee}\twoheadrightarrow Q_{(l,S),r}\simeq E(x)^{\vee\vee}/E(x)$. We thus set \begin{equation}\label{def calX} \mathcal{X}(e,n,m,r,s):=\Psi(\tilde{\mathcal{X}}(e,n,m,r,s)) \simeq\tilde{\mathcal{X}}(e,n,m,r,s). \end{equation} Let $\mathrm{X}(e,n,m,r,s)$ be the closure $\overline{\mathcal {X}(e,n,m,r,s)}$ of $\mathcal{X}(e,n,m,r,s)$ in ${\mathcal M}(e,n+1, m+2+e-2r-2s)$. Formula \eqref{dim tilde X} yields: \begin{equation}\label{dimfamilymixed} \dim\mathrm{X}(e,n,m,r,s)=\dim\mathcal{X}(e,n,m,r,s)= 8n+4s+2r+2+e. \end{equation} \begin{Remark}\label{X(-1,1,1,-1,0)} By Remark \ref{R(-1,1,1)}, ${\mathcal R}^*(-1,1,1)= {\mathcal R}(-1,1,1)$ is smooth irreducible of the expected dimension 3. Thus \eqref{dimfamilymixed} yields that $\mathrm{X}(-1,1,1, -1,0)$ is an irreducible scheme of dimension 7.
\end{Remark}
We now prove the following general result about the schemes $\mathrm{T}(e,n,m,s)$.
\begin{Teo}\label{0dcomp} Given $s>0$, we have: \begin{itemize} \item[(i)] For any nonsingular irreducible component ${\mathcal R}^*$ of of ${\mathcal R}(e,n,m)$ there corresponds an irreducible component of $\mathrm{T}(e,n,m,s)$ of dimension $8n-3+2e+4s$ which is also an irreducible component of ${\mathcal M}(e,n,m-2s)$. In particular, if ${\mathcal R}(e,n,m)$ is irreducible, then $\mathrm{T}(e,n,m,s)$ is also irreducible. \item[(ii)] The generic sheaf $[E]$ of any irreducible component of $\mathrm{T}(e,n,m,s)$ satisfies the conditions that $[E^{\vee\vee}]\in{\mathcal R}^*(e,n,m)$ and $Q_E=E^{\vee\vee}/E$ is an artinian sheaf of length $s$. \end{itemize} \end{Teo} \begin{proof} For the statement (i) of Theorem, it is enough to prove that, for each $[E(t)]\in{\mathcal T}(e,n,m,s)$, $\ext^2(E(t),E(t))=4s$. Indeed, in this case Theorem \ref{dimext1} yields that $\dim \ext^1(E(t),E(t))=8n-3+2e+4s=$, and this dimension coincides with the dimension of $\mathrm{T}(e,n,m,s)$ by \eqref{dimfamilymixed1}, and therefore by the deformation theory any irreducible component of $\mathrm{T}(e,n,m,s)$ is an irreducible component of ${\mathcal M}(e,n,m-2s)$.
From Proposition \ref{important} we have $$ \dim\ext^2(E(t), E(t))=h^0(\lext^3(Q,E(t))), $$ where $Q=E(t)^{\vee\vee}/E(t)$. To compute this group, note that, since, by the definition of ${\mathcal T}(e,n,m,s)$, $Q={\mathcal O}_S$, where $S=\{q_1,\dots,q_s\}\in (\mathbb{P}^3)_0^s$ and $S\cap\operatorname{Sing}(E(t)^{\vee\vee})=\emptyset$, we have \begin{equation}\label{sum of exts} \ext^2(E(t),E(t)) \simeq \mathrm{H}^0(\lext^3(Q, E(t)))\simeq\bigoplus_{q_i\in S} \ext^3_{{\mathcal O}_{\mathbb{P}^3,q_i}}({\mathcal O}_{q_i},E(t)_{q_i}). \end{equation} Take a point $q=q_j$ for some $1\le j\le s$, and an open subset $U$ in $\mathbb{P}^3$ not containing other points of $\operatorname{Sing}(E(t))$. Consider the exact sequence $0\to E(t)\to E(t)^{\vee\vee}\to Q\to0$ and restrict it onto $U$. We then obtain the following exact sequence of sheaves on $U$: $$ 0\to{\mathcal O}_U\oplus{\mathcal I}_{q,U}\to2\cdot{\mathcal O}_{U}\to{\mathcal O}_q\to0, $$ where ${\mathcal I}_{q,U}$ denotes the ideal sheaf of the point $p\in U$ and ${\mathcal O}_q$ denotes the structure sheaf of the point
$q$ as a subscheme of $U$. In particular, $E(t)|_U\simeq {\mathcal O}_U\oplus {\mathcal I}_{q,U}$, so that \begin{equation}\label{dir sum} \ext^3_{{\mathcal O}_{\mathbb{P}^3,q}}({\mathcal O}_q,E(t)_q)\simeq\mathrm{H}^0( \lext^3_{{\mathcal O}_{U}}({\mathcal O}_q, {\mathcal I}_{q,U}))\oplus \mathrm{H}^0(\lext^3_{{\mathcal O}_{U}}({\mathcal O}_q,{\mathcal O}_U)). \end{equation} Applying the functor $\lhom(-,{\mathcal I}_{q,U})$ to the sequence $0 \to {\mathcal I}_q\to{\mathcal O}_{U}\to{\mathcal O}_q\to 0$, we obtain: $\lext^3_{{\mathcal O}_{U}}({\mathcal O}_q, {\mathcal I}_{q,U}) \simeq \lext^2_ {{\mathcal O}_{U}}({\mathcal I}_{q,U}, {\mathcal I}_{q,U})$. The last sheaf is an artinian sheaf of length $3$ by the proof of \cite [Proposition 6]{JMT}. Thus, since $\lext^3_{{\mathcal O}_{U}} ({\mathcal O}_q,{\mathcal O}_U))\simeq {\mathcal O}_q $, it follows from \eqref{dir sum} that each point in S contributes with 4 to the dimension of $\ext^2(E(t), E(t))$, hence, by \eqref{sum of exts}, $\dim \ext^2 (E(t),E(t))=4s$. The other claims in the statement of Theorem are clear from the definition of $\mathrm{T}(e,n,m,s)$. \end{proof}
We next proceed to a general result about the schemes $\mathrm{X} (e,n,m,r,s)$.
\begin{Teo}\label{NewComponentsmixed}
Let $e, n, m, r, s$ be integers such that $e\in\{-1,0\}$, $n,\ m>0$, $r\ge e$ and $s\ge0$. Then the scheme $\mathrm{X} (e,n,m,r,s)$ is equidimensional of dimension $8n+4s+2r+2+e$, and the number of irreducible components of $\mathrm{X} (e,n,m,r,s)$ is the same as that of ${\mathcal R}^{*}(e,n,m)$. Furthermore, $\mathrm{X}(e,n,m,r,s)$ contains a dense open subset $\mathcal{X}(e,n,m,r,s)$ such that, for $[E]\in \mathcal{X}(e,n,m,r,s)$, the following statements hold. \begin{itemize} \item[(i)] If $r\ge1$, then $\dim\ext^1(E,E)=8n+4s+2r+2+e= \dim\mathrm{X}(e,n,m,r,s)$. Hence, if ${\mathcal R}^{*}(e,n,m)$ is irreducible, then $\mathrm{X}(e,n,m,r,s)$ is an irreducible $(8n+4s+2r+2+e)$-dimensional component of ${\mathcal M}(e,n+1, m+2+e-2r-2s)$. \item[(ii)] If $0\ge r\ge e$, then $\dim\ext^1(E,E)=8n+4s+5+ 2e>\dim\mathrm{X}(e,n,m,r,s)$. \end{itemize} \end{Teo} \begin{proof} The first claim follows from \eqref{dimfamilymixed} and the above considerations. For the statements (i) and (ii), consider any sheaf $[E]\in\mathcal {X}(e,n,m,r,s)$. By definition $[E]$ defines a line $l$ and a set of $s$ points $S$ considered as a reduced scheme as: $l\sqcup S=\supp(E^{\vee\vee}/E)$. Note that, by Proposition \ref{important}.(iii) in which we set $T=i_* {\mathcal O}_l(r),\ Z={\mathcal O}_S$, where $i:Z\hookrightarrow\mathbb{P}^3$ is the embedding, one has \begin{equation}\label{dim ext2} \dim\ext^2(E,E)=h^0(\lext^3(\mathcal{O}_S,E))+\dim \ext^3(i_{*}\mathcal{O}_l(r),E). \end{equation} First, one has \begin{equation}\label{h0=} h^0(\lext^3(\mathcal{O}_S,E))=4s. \end{equation} The proof of this equality is given in \cite[Proof of Prop. 6]{JMT} in the case $e=0$. However, since $\lext^2(E,E) $ is 0-dimensional, the computation of $h^0(\lext^2(E,E))$ is purely local, and gives the same result for $e=-1$. Next, $\ext^3(i_{*}\mathcal{O}_l(r),E)\simeq \Hom(E,i_{*}\mathcal{O}_l(r-4))^{\vee}$ by Serre duality, and $$ \Hom(E,i_{*}\mathcal{O}_l(r-4))\simeq\mathrm{H}^0(\lhom(E,i_{*} \mathcal{O}_l(r-4))). $$ To compute $h^0(\lhom(E,i_{*} \mathcal{O}_l(r-4)))$, apply the functor $i^{*}(-\otimes i_{*}\mathcal{O}_l)$ to the triple $$ 0\to E\to E^{\vee\vee}\to{\mathcal O}_S\oplus i_*{\mathcal O}_l(r)\to0. $$
Using the fact that $E^{\vee\vee}|_{l} \simeq{\mathcal O}_l\oplus{\mathcal O}_l(e)$ we obtain the exact sequence $$ 0\to i^{*}Tor_1(i_{*}\mathcal{O}_l(r),i_{*}\mathcal{O}_l)\to
E|_l\xrightarrow{f}\mathcal{O}_l(e)\oplus\mathcal{O}_l \xrightarrow{g}\mathcal{O}_l(r)\to 0. $$ Whence $\ker g\simeq {\mathcal O}_l(-r+e)$, and since $i^{*}Tor_1(i_{*}\mathcal{O}_l(r),i_{*}\mathcal{O}_l)\simeq N_{l/\mathbb{P}^3}^{\vee}\otimes\mathcal{O}_l(r) \simeq 2\cdot \mathcal{O}_l(r-1)$, we obtain an exact sequence
$0\to2\cdot \mathcal{O}_l(r-1)\to E|_l\to{\mathcal O}_l(-r+e)\to0$. This triple easily implies that $h^0(\lhom(E,i_{*} \mathcal{O}_l(r-4)))$ equals $2r-3-e$ for $r\ge1$ and, respectively, equals 0 for $0\ge r\ge e$. This together with \eqref{dim ext2}, \eqref{h0=} and Theorem \ref{dimext1} with $c_2(E)=n+1$ yields the statements (i) and (ii) of Theorem. \end{proof}
We conclude this section with our first application of Theorem \ref{NewComponentsmixed}.
The case of moduli spaces ${\mathcal M}(-1,n,0)$ is interesting from the point of view that it contains, among others, all those irreducible components that have locally free sheaves (i. e., vector bundles) as their generic points. Ein had shown in \cite{Ein} that the number of these components for given number $n$ is unbounded as $n$ grows infinitely. Therefore, it is important to understand whether the components of ${\mathcal M}(-1, n,0)$ with non-locally free sheaves as their generic points satisfy the similar property. In this section we give an affirmative answer to this question in Theorem \ref{einmixed} below. Namely, the components $\mathrm{X}(-1,n,m,r,s)$ described in Theorem \ref{NewComponentsmixed} will serve for this purpose, with the numbers $n,m,r,s$ chosen appropriately.
\begin{Teo}\label{einmixed} Let $\eta_n$ and $\xi_n$ denote the number of irreducible components of $\mathcal{M}(-1,n,0)$ whose generic point corresponds to a non-locally free sheaf with mixed singularities and with 1-dimensional singularities, respectively. Then $$\lim\sup_{n\to\infty}\eta_n = \lim\sup_{n\to\infty}\xi_n = \infty. $$ \end{Teo} \begin{proof} For any odd integer $q\ge1$ set $n_q=9q^2-9q+1$, and for any integer $i$ such that $0\le i\le q-1,$ set $a_{q,i}=i,\ b_{q,i}=3q-3i-3,\ c_{q,i}=3i+1$. Then, according to Theorem 8 of \cite{JMT}, for an odd integer $m_{q,i}=m(a_{q,i},b_{q,i}, c_{q,i})$ given by (\ref{m(a,b,c)}) the scheme ${\mathcal R}^*(-1;n_q, m_{q,i})$ defined in \eqref{def R*} is nonempty. Thus, by Theorem \ref{NewComponentsmixed}, to any integers $s$ and $r$ such that $0\leq s\leq n_q-1$, $r=\frac{1}{2}(m_{q,i}+1-2s)$, there corresponds an equidimensional union $\mathrm{X}(-1, n_q-1,m_{q,i},r, s)$ of irreducible components of ${\mathcal M} (-1,n_q,0)$ of dimension $8n_q+2s+m_{q,i}+3$, where the number of irreducible components of $\mathrm{X}(-1,n_q-1,m_ {q,i},r,s)$ is the same as that of ${\mathcal R}^*(-1;n_q,m_{q,i})$. Therefore, since $0\le i\le q-1$, for each odd $q$ we obtain at least $q$ different irreducible components of $\mathrm{X} (-1,n_q-1,m_{q,i},r,s),\ i=0,...,q-1,$ which are irreducible components of ${\mathcal M}(-1,n_q,0)$, generic points of which are sheaves with mixed singularities. Taking $s=0$ we obtain the claim about sheaves with 1-dimensional singularities. \end{proof}
A similar result also holds for sheaves with 0-dimensional singularities. The proof is very similar to \cite[Theorem 9]{JMT}, using the series of components of ${\mathcal R}(-1,n,m)$ produced in Theorem \ref{thm newclass}. More precisely:
\begin{Teo}\label{ein0} Let $\zeta_n$ denote the number of irreducible components of $\mathcal{M}(-1,n,0)$ whose generic point corresponds to a non-locally free sheaf with 0-dimensional singularities. Then $$\lim\sup_{n\to\infty}\zeta_n = \infty. $$ \end{Teo}
\section{Infinite collections of rational moduli components} \label{Rational Components}
In this section we will construct an infinite collection of rational moduli components of the spaces ${\mathcal M}(-1,n,m)$ and ${\mathcal M}(0,n,m)$ with generic sheaves $E$ satisfying $\dim\operatorname{Sing}(E)=0$. This collection will include all previously known rational moduli components of ${\mathcal M}(-1,n,m)$ and ${\mathcal M}(0,n,m)$ whose generic sheaf has the property above. As a consequence, we can conclude that the moduli schemes $\mathcal{M}(-1,n,m)$ and $\mathcal{M}(0,n,m)$ contain at least one rational irreducible components for all $n\ge1$ and all admissible $m$.
The desired collection will be constructed via elementary transformations at sets of points from certain moduli components of stable reflexive rank 2 sheaves on $\mathbb{P}^3$. For this, we invoke results of Chang \cite{Chang,Chang2}, Mir\'o-Roig \cite{Maria-Miro}, Okonek--Spindler \cite{OS1985} and Schmidt \cite{S2018} on the moduli spaces of reflexive sheaves $\mathcal{R}(e,n,m)$ , $e\in\{0,1\}$. These are the results concerning the moduli spaces of reflexive sheaves with Chern classes belonging to the set of triples of integer numbers \label{sigmas} \begin{equation}\label{def sigma} \Sigma:=\Sigma_{-1}\cup\Sigma_{0},
\end{equation} where $\Sigma_{-1}$ and $\Sigma_0$ being respectively given by \begin{equation}\label{def sigma -1}
\{(-1,n,n^2)\ |\ n\ge1\} ~\bigcup~
\{(-1,n,n^2-2rn+2r(r+1))\ |\ n\ge5,\ 1\le r\le(\sqrt{4n-7}-1)/2\} \end{equation} and \begin{equation}\label{def sigma 0}
\{(0,n,n^2-n+2)\ |\ n\ge3\} ~\bigcup~ \{(0,n,n^2-n)\ |\
n\ge4\}~\bigcup~ \{(0,n,n^2-3n+8)\ |\ n\ge5\}.
\end{equation} According to \cite{Chang2,Maria-Miro,OS1985,S2018}, for each triple $(e,n,m)\in\Sigma$, the moduli space of stable rank 2 reflexive sheaves $\mathcal{R}(e,n,m)$ satisfies the following properties:\\ (I) Each $R=\mathcal{R}(e,n,m)$ is an irreducible, nonsingular and rational scheme, and it is a dense open subset of an irreducible component of $\mathcal{M}(e,n,m)$;\\ (II) $R$ is a fine moduli space, i. e., there exists a universal family of reflexive sheaves $\boldsymbol{\mathcal{F}}$ on $R\times\mathbb{P}^3$. (In the case $e=-1$ this a well-known property the moduli spaces of rank 2 sheaves on $\mathbb{P}^3$ with odd determinant - see for instance \cite[Thm. 4.6.5]{HL}. In the case $e=0$ this follows from the explicit constructions of reflexive sheaves from $R$ as extensions of standard sheaves. These constructions are provided in \cite{Chang2,Maria-Miro,OS1985,S2018}.) \\ (III) The dimension of each $R$ is given by: \begin{equation}\label{dim R -1} \begin{split} & \dim\mathcal{R}(-1,n,n^2)=n^2+3n+1\ \ \ {\rm if}\ \ \ n\ge2,\ \ \ {\rm resp.},\ \ \ 3\ \ \ {\rm if}\ \ \ n=1,\\ & \dim\mathcal{R}(-1,n,n^2-2rn+2r(r+1))= n^2+(3-2r)n+2r^2+5,\ \ \ if\ \ \ r\ge2,\ \ n\ge5,\\ & \dim\mathcal{R}(-1,n,n^2-2rn+2r(r+1))=n^2+n+6,\ \ \ {\rm if}\ \ \ r=1,\ \ n\ge5,\\ \end{split} \end{equation} \begin{equation}\label{dim R 0} \begin{split} & \dim\mathcal{R}(0,n,n^2-n+2)=n^2+2n+5,\ \ \ n\ge4,\ \ \ {\rm resp.},\ \ \ 21\ \ \ {\rm if}\ \ \ n=3, \\ & \dim\mathcal{R}(0,n,n^2-n)=n^2+2n+5,\ \ \ n\ge4,\\ & \dim\mathcal{R}(0,n,n^2-3n+8)=n^2+11\ \ \ {\rm if}\ \ \ n\ge6,\ \ \ {\rm resp.},\ \ \ 37\ \ \ {\rm if}\ \ \ n=5. \end{split} \end{equation} (IV) For $e=-1$ and arbitrary integer $n\ge1$, the maximal possible $m$ such that ${\mathcal M}(-1,n,m)\ne\emptyset$ equals $n^2$; note that $(-1,n,n^2)\in\Sigma_{-1}$ for $n\ge1$. For $e=0$ and arbitrary integer $n\ge1$, the maximal possible $m$ such that ${\mathcal M}(-1,n,m)\ne\emptyset$ equals $n^2-n+2$; note that $(-1,n,n^2-n+2)\in\Sigma_0$.\\ (V) The dimensions of the components $\mathcal{R}(e,n,m)$ satisfy the relations: \begin{equation}\label{dim=dim Ext} \dim\mathcal{R}(e,n,m)=\dim \ext^1(F,F),\ \ \ \ \ \ [F]\in\mathcal{R}(e,n,m),\ \ \ \ \ \ (e,n,m)\in \Sigma_{-1}\cup\Sigma_0. \end{equation}
Now take an arbitrary scheme $R=\mathcal{R}(e,n,m)$ for $(e,n,m)\in\Sigma$ and, similarly to \eqref{R*times P3s}, set \begin{equation}\label{R times P3s} \left(R\times(\mathbb{P}^3)_0^s\right)_0:= \{([F],S)\in
R\times(\mathbb{P}^3)_0^s ~|~S\in{\Pi}_{[F]}\}, \end{equation} \begin{equation}\label{R times G times P3s} \left(R\times G(2,4)\times(\mathbb{P}^3)_0^s\right)_0:= \{( [F],(l,S))\in R\times G(2,4)\times(\mathbb{P}^3)_0^s
~|~(l,S)\in\mathcal{X}_{[F]}\}, \end{equation} where ${\Pi}_{[F]}$ is defined in \eqref{PiF} for any reflexive sheaf $[F]\in R$. In particular, for $s=1$ we have \begin{equation}\label{minus Sing}
(R\times\mathbb{P}^3)_0=\{([F],x)\in R\times\mathbb{P}^3\ |\ x\not\in \operatorname{Sing}(F)\}. \end{equation} By property (III) above there is a universal sheaf $\boldsymbol{\mathcal{F}}$ on $R\times\mathbb{P}^3$, and the definition \eqref{minus Sing} yields that $$
\mathbf{F}:=\boldsymbol{\mathcal{F}}|_{(R\times\mathbb{P}^3)_0} $$ is a locally free rank 2 sheaf. Hence, there exists an open subset $U$ of $(R\times\mathbb{P}^3)_0$ such that \begin{equation}
\mathbf{P}(\mathbf{F}|_U)\simeq U\times\mathbb{P}^1, \end{equation} and we have dense open inclusions \begin{equation}\label{two embeddings} \mathbf{P}(\mathbf{F})\overset{\mathrm{open}}{\hookleftarrow} U\times\mathbb{P}^1\overset{\mathrm{open}}{\hookrightarrow} R\times\mathbb{P}^3\times\mathbb{P}^1. \end{equation} Now introduce a piece of notation. Let $s$ be a positive integer and let $f:X\to Y$ be an arbitrary morphism of schemes. The symmetric group $G=\mathcal{S}_s$ acts on $W=\prod_{1}^{s}X$ by permutations of factors, and the $s$-fold fibered product $X\times_Y\cdots\times_YX$ naturally embeds in $W$ as a $G$-invariant subscheme. We will denote by $\mathrm{Sym}^s(X/Y)$ the quotient scheme $(X\times_Y\cdots \times_YX)/G$ and call this quotient the fibered symmetric product of $X$ over $Y$.
Fix an integer $s\ge1$. The composition of projections $$ f:\ \mathbf{P}(\mathbf{F})\xrightarrow{\pi}(R\times\mathbb{P}^3)_0 \hookrightarrow R\times\mathbb{P}^3\xrightarrow{pr_1}R, $$ where $\pi$ is the structure morphism, defines the fibered symmetric product $\mathrm{Sym}^s(\mathbf{P} (\mathbf{F})/R)$ together with the projection $f_s:\mathrm{Sym}^s(\mathbf{P}(\mathbf{F})/R)\to R$ which factorizes as $$ f_s:\mathrm{Sym}^s(\mathbf{P}(\mathbf{F})/R)\xrightarrow {\pi_s}R\times\mathrm{Sym}^s(\mathbb{P}^3)\xrightarrow{pr_1}R. $$ The open embedding $(\mathbb{P}^3)_0^s\hookrightarrow \mathrm{Sym}^s(\mathbb{P}^3)$ together with the above projection $\pi_s$ defines an open dense embedding of the fibered product \begin{equation}\label{Y R} Y_R:=\mathrm{Sym}^s(\mathbf{P}(\mathbf{F})/R)\times _{\mathrm{Sym}^s(\mathbb{P}^3)}(\mathbb{P}^3)_0^s\overset{\mathrm{open}} {\hookrightarrow}\mathrm{Sym}^s(\mathbf{P}(\mathbf{F})/R). \end{equation} By the definition of $Y_R$, its set-theoretic description is the same as that of $\tilde{{\mathcal T}}(e,n,m,s)$, with ${\mathcal R}^*(e,n,m)$ substituted by $R$: \begin{equation}\label{Y_R as a set}
Y_R:=\{y=([F],S,[\varphi_y])~|~([F],S)\in\left(R\times(\mathbb{P}^3) _0^s\right)_0, \varphi_y\in\Hom(F,{\mathcal O}_S)_e\}, \end{equation} where $\left(R\times(\mathbb{P}^3)_0^s\right)_0$ is defined in \eqref{R times P3s}.
Now define a new set $X_R$ by the formula similar to \eqref{familyX}, with ${\mathcal R}^*(e,n,m)$ substituted by $R$: \begin{equation}\label{X_R as a set} \begin{split}
& X_R:=\{x=([F],(l,S),[\varphi_x])~|~([F],(l,S))\in \left(R\times G(2,4)\times(\mathbb{P}^3)_0^s\right)_0,\\ &[\varphi_x]\in\Hom(F,Q_x)_e/\mathrm{Aut}(Q_x),\ Q_x:= Q_{(l,S),r}\}, \end{split} \end{equation} where $\left(R\times G(2,4)\times(\mathbb{P}^3)_0^s\right)_0$ and $Q_{(l,S),r}$ are defined in \eqref{R times G times P3s} and \eqref{QlSr}, respectively. There is a well-defined projection \begin{equation}\label{rho=} \rho:\ X_R\to Y_R\times G(2,4),\ \ \ ([F],(l,S),[\varphi_x])
\mapsto\big(([F],S,[\varphi_x|_{{\mathcal O}_S}]),l\big) ~~, \end{equation} such that $$ \mathcal{V}=\rho(X_R) $$ is a dense open subset of $Y_R\times G(2,4)$, and \begin{equation}\label{rho-1} \rho^{-1}(y,l)=\mathbf{P}^{2r+1-e}_l,\ \ \ \ (y,l)\in \mathcal{V}, \end{equation} where $\mathbf{P}^{2r+1-e}_l$ is described in \eqref{Pl} and \eqref{QlSr}. Let $\Gamma\subset G(2,4)\times\mathbb{P}^3$ be the graph of incidence with projections $G(2,4)\leftarrow\Gamma \to\mathbb{P}^3$. Consider the natural projections $$ \mathcal{V}\xleftarrow{v}\mathcal{V}\times_G\Gamma\times_{\mathbb{P}^3} (R\times\mathbb{P}^3)_0\xrightarrow{g}(R\times\mathbb{P}^3)_0\xrightarrow{h}\mathbb{P}^3 $$ and a locally free sheaf $\mathbf{E}$ defined as $$ \mathbf{E}=(v_*(g^*\mathbf{F}\otimes h^*\op3(r))),\ \ \ \ \ \rk\mathbf{E}=2r+2-e. $$ Then \begin{equation}\label{rho again} \rho:\ X_R=\mathbf{P}(\mathbf{E})\to\mathcal{V} \hookrightarrow Y_R\times G(2,4), \end{equation} is a locally trivial $\mathbf{P}^{2r+1-e}$-fibration with fibre $\mathbf{P}^{2r+1-e}_l$ described in \eqref{rho-1}.
From the definition \eqref{R times P3s} of $\left(R\times(\mathbb{P}^3) _0^s\right)_0$ it follows that $\pi_s(Y_R)\subset\left (R\times(\mathbb{P}^3)_0^s\right)_0$. We thus consider the composition \begin{equation}\label{prn pi s} f_Y:Y_R\xrightarrow{\pi_s}(R\times(\mathbb{P}^3)_0^s)_0\hookrightarrow R\times(\mathbb{P}^3)_0^s\xrightarrow{pr_1}R. \end{equation} Note that the open embeddings in \eqref{two embeddings} commute with the natural projections $f:\mathbf{P}(\mathbf{F})\to R$,\ $f':U\times\mathbb{P}^1\to R$ and $f'':R\times\mathbb{P}^3\times\mathbb{P}^1\to R$ and therefore define the induced dense open embeddings \begin{equation}\label{two induced emb} \mathrm{Sym}^s(\mathbf{P}(\mathbf{F})/R)\overset{\mathrm{open} }{\hookleftarrow}\mathrm{Sym}^s(U\times\mathbb{P}^1/R) \overset{\mathrm{open}}{\hookrightarrow}\mathrm{Sym}^s (R\times\mathbb{P}^3\times\mathbb{P}^1/R) \end{equation} which commute with the induced projections $f_s:\mathrm{Sym}^s(\mathbf{P}(\mathbf{F})/R)\to R$,\ $f'_s:\mathrm{Sym}^s(U\times\mathbb{P}^1/R)\to R$ and $f''_s:\mathrm{Sym}^s(R\times\mathbb{P}^3\times\mathbb{P}^1/R)\to R$. The diagram \eqref{two induced emb} yields a birational isomorphism $\mathrm{Sym}^s(\mathbf{P}(\mathbf{F})/R) \overset{\mathrm{bir}}{\dashleftarrow\dashrightarrow} \mathrm{Sym}^s(R\times\mathbb{P}^3\times\mathbb{P}^1/R)$, hence the dense open embedding \eqref{Y R} leads to a birational isomorphism \begin{equation}\label{bir isom} Y_R\overset{\mathrm{bir}}{\dashleftarrow\dashrightarrow} \mathrm{Sym}^s(R\times\mathbb{P}^3\times\mathbb{P}^1/R). \end{equation} On the other hand, from the definition of $\mathrm{Sym}^s(R\times\mathbb{P}^3\times\mathbb{P}^1/R)$ follows an isomorphism \begin{equation}\label{isom 1} \mathrm{Sym}^s(R\times\mathbb{P}^3\times\mathbb{P}^1/R)\simeq R\times\mathrm{Sym}^s(\mathbb{P}^3\times\mathbb{P}^1/R). \end{equation} Since any symmetric product of a rational variety is also rational (see for instance \cite[Ch. 4, Thm. 2.8]{GKZ}), it follows from \eqref{isom 1} and the property (I) that $\mathrm{Sym}^s(R\times\mathbb{P}^3\times \mathbb{P}^1/R)$ is a rational irreducible scheme of dimension $4s+\dim R$. Hence by \eqref{bir isom} \begin{equation}\label{rational scheme} Y_R\ \ \textrm{is a rational irreducible scheme of dimension}\ \ 4s+\dim R. \end{equation} This together with the the description \eqref{rho again} yields: \begin{equation}\label{rational scheme2} X_R\ \ \textrm{is a rational irreducible scheme of dimension}\ \ 4s+2r+5-e+\dim R. \end{equation}
\begin{Teo}\label{Tmain2} For any $(e,n,m)\in\Sigma_{-1}\cup\Sigma_0$ and $R$ irreducible component of ${\mathcal R}(e,n,m)$, we have: \begin{itemize} \item[(i)] for any integer $s$ such that $0\le 2s\le m$ there exists a rational, generically reduced, irreducible component $\overline{Y}_R$ of the moduli space ${\mathcal M}(e,n,m-2s)$ having the dimension $4s+\dim{\mathcal R}(e,n,m)$, where $\dim{\mathcal R}(e,n,m)$ is given by one of the corresponding formulas \eqref{dim R -1}-\eqref{dim R 0}. A generic sheaf from $\overline{Y}_R$ has 0-dimensional singularities;
\item[(ii)] for any integers $r,s$ such that $s\ge0$, $r\ge3$ and $2r+2s\le m+2+e$ there exists a rational, generically reduced, irreducible and component $\overline{X}_R$ of the moduli space ${\mathcal M}(e,n+1,m+2+e-2r-2s)$ having the dimension $4s+2r+5-e+ \dim{\mathcal R}(e,n,m)$, with $\dim{\mathcal R}(e,n,m)$ given by the formulas mentioned above. A generic sheaf from $\overline{X}_R$ has singularities of pure dimension 1 for $s=0$, and, respectively, of mixed dimension for $s\ge1$. \end{itemize} \end{Teo}
\begin{proof}
\noindent{\bf Item (i).} We are going to show that $Y_R$ is an open dense subset of an irreducible component $\overline{Y}_R$ of ${\mathcal M}(e,n,m-2s)$, where $R={\mathcal R}(e,n,m)$. To this aim, we first construct a family of sheaves on $\mathbb{P}^3$ with base $Y_R$ which are obtained from reflexive sheaves $[F]\in R$ via elementary transformations along sets of $s$ points. Let $H={\rm Hilb}^s(\mathbb{P}^3)$ be the Hilbert scheme of 0-dimensional subschemes of length $s$ in $\mathbb{P}^3$, together with the universal family of 0-dimensional schemes $Z_H\hookrightarrow H\times\mathbb{P}^3$. We have an open embedding $(\mathbb{P}^3)^s_0\hookrightarrow H$ and the induced family $Z=Z_H\times_H(\mathbb{P}^3)^s_0\hookrightarrow(\mathbb{P}^3)^s_0\times\mathbb{P}^3$. Given a point $\{S\}\in(\mathbb{P}^3)^s_0$, we will denote also by $S$ the corresponding 0-dimensional subscheme $Z\times_{H(\mathbb{P}^3)^s _0}\{S\}$ in $\mathbb{P}^3$. (This will not cause an ambiguity since $S$ is a reduced scheme by the definition of $(\mathbb{P}^3)^s_0$.) According to \cite[Ch. II, Prop. 7.12]{H}, for a given sheaf $[F]\in R$ and the above point $\{S\}$ such that $S\cap\operatorname{Sing} (F)=\emptyset$, choosing a class $[\varphi]$ modulo $\mathrm{Aut}(\mathcal{O}_S)$ of an epimorphism $$ \varphi:F\twoheadrightarrow\mathcal{O}_S $$ is equivalent to choosing a section $[\varphi]$ of the
structure morphism $\pi:\mathbf{P}(F|_S)\to S$, $$
[\varphi]:S\hookrightarrow\mathbf{P}(F|_S). $$ By the construction of $Y_R$ (see \eqref{Y R}), the section $[\varphi]$ is just a point of $Y_R$ lying in the fiber $\pi_s^{-1}([F],\xi)$ of the projection $\pi_s:Y_R\to (R\times(\mathbb{P}^3)_0^s)_0$ defined in \eqref{prn pi s}. Using this description of points of $Y_R$, define a family \begin{equation}\label{family for YR}
\{[E_y]\in{\mathcal M}(e,n,m-2s)\ |\ E_y=\ker(\varphi_y:F \twoheadrightarrow\mathcal{O}_S),\ y=([F],S,[\varphi_y])\in Y_R\}. \end{equation} Here, by definition, for each $y=([F],S,[\varphi_y])\in Y_R$, the sheaf $E=E_y$ satisfies the exact triple \begin{equation}\label{triple for E} 0\to E\to F\xrightarrow{\varphi_y}\mathcal{O}_S\to0, \ \ \ \ \ F=E^{\vee\vee}. \end{equation} In particular, this triple, together with the stability of $F$, yields by usual argument the stability of $E$, i. e., the definition of the family \eqref{family for YR} is consistent. The family $\{E_y\}_{y\in Y_R}$ globalizes in a standard way to a sheaf $\mathbf{E}$ on $Y_R\times\mathbb{P}^3$ such that, for any
$y\in Y_R$, $\mathbf{E}|_{\{y\}\times\mathbb{P}^3}\simeq E_y$. We thus have a natural modular morphism \begin{equation}\label{modular morphism for YR}
\Psi:Y_R\to{\mathcal M}(e,n,m-2s),\ y\mapsto\left[\mathbf{E}|_{\{y\}\times\mathbb{P}^3}\right]. \end{equation} The morphism $\Psi$ is clearly an embedding, since a point $\{y\}$ is recovered from $E=\ker(\varphi_y)$ as the (class of the) quotient map $F=E^{\vee\vee}\twoheadrightarrow E^{\vee\vee}/E$. We therefore identify $Y_R$ with its image in ${\mathcal M}(e,n,m-2s)$. Let $\overline{Y}_R$ be the closure of $Y_R$ in ${\mathcal M}(e,n,m-2s)$.
We have to show that $\overline{Y}_R$ is an irreducible rational component of ${\mathcal M}(e,n,m-2s)$, where $R={\mathcal R}(e,n, m)$. Here the rationality and the dimension of $\overline{Y} _R$ are given in display \eqref{rational scheme}. Since $\overline{Y}_R$ is irreducible, to prove that $\overline{Y}_R$ is an irreducible and generically reduced component of ${\mathcal M}(e,n,m-2s)$, it is enough to show that, for an arbitrary point $y\in Y_R$ the sheaf $E=E_y$ satisfies the equality \begin{equation}\label{dim ext=dim Y} \dim\ext^1(E,E)=\dim Y_R=4s+\dim{\mathcal R}(e,n,m). \end{equation} (Note that the equality \eqref{dim ext=dim Y} is beyond the scope of Theorem \ref{0dcomp}, since we cannot assume that $\ext^2(F,F)=0$ here).
Indeed, let $E$ satisfy the triple \eqref{triple for E}. Then, since $S$ is 0-dimensional and $F$ is reflexive, it follows that $\dim\operatorname{Sing}(E)=\dim\operatorname{Sing}(F)=0$, and therefore $\dim\lext^1 (E,E)=\dim\lext^1(F,E)=0.$ Thus, \begin{equation}\label{vanish H1} \mathrm{H}^1(\lext^1(E,E))=0,\ \ \ \ \mathrm{H}^1(\lext^1(F,E))=0, \end{equation} \begin{equation}\label{vanish H2} \mathrm{H}^2(\lext^1(E,E))=0. \end{equation} The first equality in \eqref{vanish H1} and Lemma \ref{Extisomixed}.(ii) yield the equality $\ext^2(E,E)=\mathrm{H}^0(\lext^2(E,E))\oplus\coker d^{01}_2$ where $d^{01}_2$ is the differential $d^{01}_2:\mathrm{H}^0(\lext^1 (E,E))\to\mathrm{H}^2(\lhom(E,E))$ in the spectral sequence of local-to-global Ext’s. Moreover, this spectral sequence together with \eqref{vanish H2} yields an exact sequence \begin{equation}\label{exact 1} \mathrm{H}^0(\lext^1(E,E))\xrightarrow{d_2^{01}}\mathrm{H}^2(\lhom(E,E))\to \ext^2(E,E)\to\mathrm{H}^0(\lext^2(E,E))\to0. \end{equation} Note that, since the reflexive sheaf $F$ has homological dimension 1, it follows that \begin{equation}\label{Ext2,3=0} \lext^2(F,E)=0=\lext^3(F,E). \end{equation} Therefore, applying to the triple \eqref{triple for E} the functor $\lext^2(-,E)$ we obtain \begin{equation} \lext^2(E,E)=\lext^3(\mathcal{O}_S,E), \end{equation} Here, as in \eqref{h0=}, one has $h^0(\lext^3(\mathcal{O}_S,E) )=4s$, so that \begin{equation}\label{h0(ext2)} h^0(\lext^2(E,E))=4s. \end{equation} Next, the equality $\lext^2(F,E)=0$ (see \eqref{Ext2,3=0}) together with the second equality in \eqref{vanish H1} yields an exact sequence similar to \eqref{exact 1}: \begin{equation}\label{exact 2} \mathrm{H}^0(\lext^1(F,E))\xrightarrow{d_2^{01}}\mathrm{H}^2(\lhom(F,E))\to \ext^2(F,E)\to0. \end{equation} The exact sequences \eqref{exact 1} and \eqref{exact 2} fit in a commutative diagram extending \eqref{d012-diagram} \begin{equation} \label{d012-diagram extended} \xymatrix{ \mathrm{H}^0(\lext^1(F,E)) \ar[d]\ar[r]^{d^{01}_2} & \mathrm{H}^2(\lhom(F,E)) \ar[d]^-{\simeq}\ar[r] & \ext^2(F,E) \ar[r] \ar[d] & 0 \ar[d] & \\ \mathrm{H}^0(\lext^1(E,E)) \ar[r]^{d^{01}_2} & \mathrm{H}^2(\lhom(E,E))\ar[r] & \ext^2(E,E) \ar[r] & \mathrm{H}^0(\lext^2(E,E)) \ar[r] & 0.} \end{equation} Here the second vertical map is an isomorphism. Indeed, applying to the exact triple \eqref{triple for E} the functor $\lhom(-,E)$ we obtain an exact sequence $0\to\lhom(F,E)\to \lhom(E,E)\to\mathcal{A}\to0$, where $\dim\mathcal{A}\le0$ since $\mathcal{A}$ is a subsheaf of the sheaf $\lext^1( {\mathcal O}_S,E)$ of dimension $\le0$. Now passing to cohomology of the last exact triple we obtain the desired isomorphism. The diagram \eqref{d012-diagram extended} together with \eqref{h0(ext2)} yields the relation \begin{equation}\label{dim=dim+4s} \dim\ext^2(E,E)=\dim\ext^2(F,E)+4s. \end{equation} Now applying to \eqref{triple for E} the functor $\Hom(F,-)$ we obtain the exact sequence \begin{equation}\label{Ext2(F,E)} \ext^1(F,{\mathcal O}_S)\to\ext^2(F,E)\to\ext^2(F,F)\to\ext^2 (F,{\mathcal O}_S). \end{equation} Since $\supp({\mathcal O}_S)\cap\operatorname{Sing}(F)=\emptyset$ and $F$ is reflexive, it is easy to show that \begin{equation}\label{extj(F,OS)=0} \ext^j(F,{\mathcal O}_S)=0,\ \ \ \ \ j>0, \end{equation} - see, e. g., \cite[Proof of Prop. 6]{JMT} for the case $e=0$; in case $e=-1$ this argument goes on without changing. Thus, from \eqref{Ext2(F,E)} it follows that $\dim\ext^2(F,E)=\dim\ext^2(F,F)$, and the relation \eqref{dim=dim+4s} yields \begin{equation}\label{dim+4s} \dim\ext^2(E,E)=\dim\ext^2(F,F)+4s. \end{equation} Next, since in \eqref{triple for E} $\dim{\mathcal O}_S=0$, it follows that $c_i(E)=c_i(F),\ i=1,2$. Thus, since both $E$ and $F$ are stable, Theorem \ref{dimext1} implies that $\dim\ext^1(E,E)-\dim\ext^2(E,E)=\dim\ext^1(F,F)- \dim\ext^2(F,F)$. Hence, by \eqref{dim+4s} $\dim\ext^1(E,E)=\dim\ext^1(F,F)+4s$. This together with \eqref{dim=dim Ext} implies \eqref{dim ext=dim Y}.
\noindent{\bf Item (ii).} We have to show that $X_R$, where $R={\mathcal R}(e,n,m)$, is an open dense subset of an irreducible component $\overline{X} _R$ of ${\mathcal M}(e,n+1,m+2+e-2r-2s)$. Using the pointwise description \eqref{X_R as a set} of $X_R$, we consider similarly to \eqref{family for YR} a family \begin{equation}\label{family for XR}
\{[E_x]\in{\mathcal M}(e,n+1,m+2+e-2r-2s)\ |\ E_x=\ker(\varphi:F \twoheadrightarrow Q_x),\ x=([F],(l,S),[\varphi_x])\in X_R\}. \end{equation} The rest of the argument below is parallel to that in the proof of statement (i) above. The difference is due to the fact that the triple \eqref{triple for E} is modified as \begin{equation}\label{triple for E new} 0\to E\to F\xrightarrow{\varphi_x}Q_x\to0,\ \ \ \ \ Q_x=\mathcal{O}_S\oplus i_*{\mathcal O}_l(r),\ \ \ \ \ F=E^{\vee \vee},\ \ \ \ \ E=E_x. \end{equation} As for the sheaf $E$ in \eqref{triple for E}, a standard argument for the sheaf $E$ in the last triple in view of the stability of $F$ yields the stability of $E$, i. e., the definition of the family \eqref{family for XR} is consistent. This family $\{E_x\}_{x\in X_R}$ globalizes in a standard way to a sheaf $\mathbf{E}$ on $X_R\times\mathbb{P}^3$ such that, for any
$x\in X_R$, $\mathbf{E}|_{\{x\}\times\mathbb{P}^3}\simeq E_x$. We thus have a natural modular morphism similar to \eqref{modular morphism for YR}: \begin{equation}\label{modular morphism for XR}
\Psi:X_R\to{\mathcal M}(e,n+1,m+2+e-2r-2s),\ x\mapsto\left[\mathbf{E}|_{\{x\}\times\mathbb{P}^3}\right]. \end{equation} The morphism $\Psi$ is clearly an embedding, since a point $\{x\}$ is recovered from $E=\ker(\varphi_x)$ as the (class of the) quotient map $F=E^{\vee\vee}\twoheadrightarrow E^{\vee\vee}/E$. We therefore identify $X_R$ with its image in ${\mathcal M}(e,n+1,m+2+e-2r-2s)$. Let $\overline{X}_R$ be the closure of $X_R$ in ${\mathcal M}(e,n,m-2s)$.
We have to prove that, for $R={\mathcal R}(e,n,m)$, the scheme $\overline{Y}_R$ is an irreducible rational component of ${\mathcal M}(e,n+1,m+2+e-2r-2s)$. Here the rationality and the dimension of $\overline{X}_R$ are given in display \eqref{rational scheme2}. Since $\overline{X}_R$ is irreducible, to prove that $\overline{X}_R$ is an irreducible and generically reduced component of ${\mathcal M}(e,n+1,m+2+e-2r -2s)$, it is enough to show that, for an arbitrary point $x\in X_R$ the sheaf $E=E_x$ satisfies the equality \begin{equation}\label{dim ext=dim X} \dim\ext^1(E,E)=\dim\overline{X}_R=4s+2r+5-e+\dim{\mathcal R}(e,n,m). \end{equation} (Remark that the equality \eqref{dim ext=dim X} is beyond the scope of Theorem \ref{0dcomp}, since we cannot assume that $\ext^2(F,F)=0$ here).
Indeed, let $E$ satisfy the triple \eqref{triple for E new}. This triple and the definition \eqref{X_R as a set} of $X_R$ yield that \begin{equation}\label{emptyset0} \mathrm{Supp}(Q_x)=S\sqcup l,\ \ \ \ \ \operatorname{Sing}(E)=\mathrm{Supp} (Q_x)\sqcup\operatorname{Sing}(F),\ \ \ \ \ \mathrm{i.\ e.}\ \ \ \ \ \mathrm{Supp}(Q_x)\cap\operatorname{Sing}(F)=\emptyset. \end{equation} Hence, since $F$ is reflexive, \begin{equation}\label{lext1(F,E)} \dim\lext^1(F,E)=0,\ \ \ \ \mathrm{Supp}(\lext^1(F,E))= \operatorname{Sing}(F), \end{equation} \begin{equation}\label{Exti(F,E)restr=0}
\lext^i(F,E)|_{\mathbb{P}^3\smallsetminus\operatorname{Sing}(F)}=0,\ \ \ i\ge1. \end{equation}
Since $F$ is locally free along $l$ (see \eqref{emptyset0})
the isomorphisms $F|_l\simeq{\mathcal O}_l(e)\oplus{\mathcal O}_l$ and $\lext^2(i_*{\mathcal O}_l,\op3)\simeq{\mathcal O}_l(2)$ imply that \begin{equation}\label{lext2(Ol(r),F)} \lext^2(i_*{\mathcal O}_l(r),F)\simeq{\mathcal O}_l(2-r+e)\oplus{\mathcal O}_ l(2-r), \end{equation} \begin{equation}\label{lext1,3=0} \lext^1(i_*{\mathcal O}_l(r),F)=\lext^3(i_*{\mathcal O}_l(r),F)=0. \end{equation} By the same reason, $\lext^j(F,i_*{\mathcal O}_l(r))=0,\ j>0,$ so that, since $r\ge3$, we have \begin{equation}\label{Extj(F,Ol(r))=0} Ext^j(F,i_*{\mathcal O}_l(r))=H^j(\Hom(F,i_*{\mathcal O}_l(r)))\simeq H^j({\mathcal O}_l(r-e)\oplus{\mathcal O}_l(r))=0,\ \ \ j>0. \end{equation} Applying to the triple \eqref{triple for E new} the functor $\lhom(i_*{\mathcal O}_l(r),-)$ and using \eqref{lext2(Ol(r),F)}, \eqref{lext1,3=0} and the isomorphisms $$ \lext^1(i_*{\mathcal O}_l(r), i_*{\mathcal O}_l(r))=N_{l/\mathbb{P}^3}\simeq2{\mathcal O}_l(1) ~~{\rm and}~~ \lext^2(i_*{\mathcal O}_l(r),i_*{\mathcal O}_l(r))\simeq\det N_{l/\mathbb{P}^3} \simeq{\mathcal O}_l(2), $$ we obtain an exact sequence \begin{equation}\label{exact on l} 0\to2{\mathcal O}_l(1)\to\lext^2(i_*{\mathcal O}_l(r),E)\to{\mathcal O}_l(2-r+e) \oplus{\mathcal O}_l(2-r)\to{\mathcal O}_l(2)\xrightarrow{\gamma} \lext^3(i_*{\mathcal O}_l(r),E)\to0. \end{equation} Here one easily sees that $\mathrm{Supp}(\lext^3(i_*{\mathcal O}_l (r),E))=l$, hence $\gamma$ is an isomorphism \begin{equation}\label{=Ol(2)} \lext^3(i_*{\mathcal O}_l(r),E)\simeq{\mathcal O}_l(2), \end{equation} and \eqref{exact on l} yields an exact triple $0\to2{\mathcal O}_l(1)\to\lext^2(i_*{\mathcal O}_l(r),E)\to{\mathcal O}_l(2-r+e) \oplus{\mathcal O}_l(2-r)\to0$. Passing to cohomology of this triple and using the condition $r\ge3$ we get \begin{equation}\label{h1(...)=} h^1(\lext^2(i_*{\mathcal O}_l(r),E))=2r-e-6. \end{equation} Next, applying to the triple \eqref{triple for E new} the functor $\lhom(-,E)$ we obtain a long exact sequence \begin{equation}\label{Hom(-,E)} 0\to\lhom(F,E)\to\lhom(F,E)\xrightarrow{\partial}\lext^1(Q_x, E)\to...\to\lext^3(F,E)\to\lext^3(E,E)\to0. \end{equation} Denoting $\mathcal{A}=\mathrm{im}(\partial)$ we obtain an exact triple \begin{equation}\label{Hom(-,E)1} 0\to\lhom(F,E)\to\lhom(F,E)\to\mathcal{A}\to0, \end{equation} Since $\mathcal{A}$ is a subsheaf of $\lext^1(Q_x,E)$ and by \eqref{emptyset0} $\dim\lext^1(Q_x,E)\le1$, it follows that $\mathrm{H}^2(\mathcal{A})=0$, and the triple \eqref{Hom(-,E)1} yields an epimorphism \begin{equation}\label{h2 epi} \mathrm{H}^2(\lhom(F,E))\twoheadrightarrow\mathrm{H}^2(\lhom(E,E)). \end{equation} Next, restricting the sequence \eqref{Hom(-,E)} onto $\mathbb{P}^3\smallsetminus\operatorname{Sing}(F)$ and using \eqref{Exti(F,E)restr=0} we obtain the isomorphism \begin{equation}\label{lext(E,E)restr}
\lext^1(E,E)|_{\mathbb{P}^3\smallsetminus\operatorname{Sing}(F)}\simeq\lext^1 (i_*{\mathcal O}_l(r),E). \end{equation} Since by \eqref{triple for E new} the sheaves $E$ and $F$ coincide outside $\mathrm{Supp}(Q_x)$, it follows from \eqref{lext(E,E)restr} and the reflexivity of $F$ that \begin{equation}\label{lext(E,E)} \lext^1(E,E)\simeq\lext^2(i_*{\mathcal O}_l(r),E)\oplus \lext^1(F,F),\ \ \ \ \ \ \dim\lext^1(F,F)=0. \end{equation} These equalities together with \eqref{h1(...)=} imply \begin{equation}\label{h1(lext1)=} h^1(\lext^1(E,E))=h^1(\lext^2(i_*{\mathcal O}_l(r),E))=2r-e-6. \end{equation} From \eqref{lext1(F,E)} and \eqref{lext(E,E)} we find \begin{equation}\label{vanish H1 new} \mathrm{H}^1(\lext^1(F,E))=\mathrm{H}^2(\lext^1(F,E))=0, \end{equation} \begin{equation}\label{vanish H2 new} \mathrm{H}^2(\lext^1(E,E))=0. \end{equation} The spectral sequence of local-to-global Ext’s for the pair $(E,E)$ together with \eqref{vanish H2 new} yields the exact sequences \begin{equation}\label{exact 1 new} \mathrm{H}^0(\lext^1(E,E))\xrightarrow{d_2^{01}}\mathrm{H}^2(\lhom(E,E))\to \coker d^{01}_2\to0, \end{equation} \begin{equation}\label{exact 11 new} 0\to\ker{\varepsilon}\to\ext^2(E,E)\xrightarrow{\varepsilon} \mathrm{H}^0(\lext^2(E,E))\to0. \end{equation} \begin{equation}\label{exact 12 new} 0\to\coker d^{01}_2\to\ker{\varepsilon}\to\mathrm{H}^1(\lext^1(E,E)) \to0. \end{equation} Note that, since the sheaf $F$ is reflexive, the equalities \eqref{Ext2,3=0} are still true, so that the rightmost part of the long exact sequence \eqref{Hom(-,E)} yields the isomorphisms \begin{equation}\label{lext2(E,E)} \lext^2(E,E)\simeq\lext^3(Q_x,E)=\lext^3({\mathcal O}_S,E)\oplus \lext^3(i_*{\mathcal O}_l(r),E). \end{equation} Here, as in \eqref{h0=}, one has $h^0(\lext^3(\mathcal{O}_S,E) =4s$, and by \eqref{=Ol(2)} we have $h^0(\lext^3({\mathcal O}_S,E)) =3$, so that \eqref{lext2(E,E)} implies \begin{equation}\label{h0(ext2) new} h^0(\lext^2(E,E))=4s+3. \end{equation} Besides, similar to \eqref{exact 1 new}-\eqref{exact 12 new}, the spectral sequence of local-to-global Ext’s for the pair $(F,E)$ together with \eqref{vanish H1 new} and the first equality \eqref{Ext2,3=0} yields the exact sequence \begin{equation}\label{exact 2 new} \mathrm{H}^0(\lext^1(F,E))\xrightarrow{d_2^{01}}\mathrm{H}^2(\lhom(F,E))\to \ext^2(F,E)\to0, \end{equation} The exact sequences \eqref{exact 1 new} and \eqref{exact 2 new} in view of \eqref{h2 epi} fit in a commutative diagram \begin{equation} \label{d012-diagram extended new} \xymatrix{ \mathrm{H}^0(\lext^1(F,E)) \ar[d]\ar[r]^{d^{01}_2} & \mathrm{H}^2(\lhom(F,E)) \ar@{>>}[d]\ar[r] & \ext^2(F,E) \ar[r] \ar@{>>}[d] & 0 \ar[d] & \\ \mathrm{H}^0(\lext^1(E,E)) \ar[r]^{d^{01}_2} & \mathrm{H}^2(\lhom(E,E))\ar[r] & \coker d^{01}_2 \ar[r] &\ 0. &} \end{equation} Now applying to \eqref{triple for E new} the functor $\Hom(F,-)$ we obtain the exact sequence \begin{equation}\label{Ext2(F,E) new} \ext^1(F,Q_x)\to\ext^2(F,E)\to\ext^2(F,F)\to\ext^2(F,Q_x). \end{equation} Recall that $Q_x={\mathcal O}_S\oplus i_*{\mathcal O}_l(r)$, and the equalities \eqref{extj(F,OS)=0} are still true. This together with \eqref{Extj(F,Ol(r))=0} yields $\ext^i(F,Q_x)=0,\ i=1,2,$ and \eqref{Ext2(F,E) new} implies the isomorphism. \begin{equation}\label{Ext2=...} \ext^2(F,E)\simeq\ext^2(F,F). \end{equation} Now \eqref{exact 11 new}, \eqref{exact 12 new}, diagram \eqref{d012-diagram extended new} and \eqref{Ext2=...} imply the inequality $$ \dim\ext^2(E,E)\le\dim\ext^2(F,F)+h^0(\lext^2(E,E))+ h^1(\lext^1(E,E)) $$ which in view of \eqref{h1(lext1)=} and \eqref{h0(ext2) new} can be rewritten as \begin{equation}\label{dim+...new} \dim\ext^2(E,E)\le\dim\ext^2(F,F)+4s+2r-e-3. \end{equation} Next, since $c_1(Q_x)=0,\ c_2(Q_x)=-1$, it follows from \eqref{triple for E new} that $c_1(E)=c_1(F),\ i=1,2$. Thus, since both $E$ and $F$ are stable, Theorem \ref{dimext1} implies that $\dim\ext^1(E,E)-\dim\ext^2(E,E)=\dim\ext^1(F,F)- \dim\ext^2(F,F)+8$. Hence, by \eqref{dim+...new} $\dim\ext^1(E,E)\le\dim\ext^1(F,F)+4s+2r+5-e$. This inequality in view of \eqref{rational scheme2} and \eqref{dim=dim Ext} can be rewritten as $$ \dim\ext^1(E,E)\le4s+2r+5-e+\dim R=\dim\overline{X}_R. $$ On the other hand, since in \eqref{modular morphism for XR} the modular morphism $\Psi:X_R\to{\mathcal M}(e,n+1,m+2+e-2r-2s)$ is an embedding, it follows that $\dim\ext^1(E,E)\ge\dim \overline{X}_R$. Thus, the last inequality is a strict equality, and we obtain \eqref{dim ext=dim X}. \end{proof}
We are finally in position to give the proof of Main Theorem \ref{main2}. \label{pf mthm 2}
\noindent{\it Proof of Main Theorem \ref{main2}.} {\bf Item (i).} It follows from Theorem \ref{Tmain2} (i) and the property (IV). Namely, take $R={\mathcal R}(-1,2n,(2n)^2)$. If $n\ge1$, then take $s=2n^2$, so that, for this $s$, $Y_R$ is a rational generically reduced component of ${\mathcal M}(-1,2n,0)$, with generic sheaf having 0-dimensional singularities. If $n\ge3$, then for $(s,r)=(0,2(n^2-n-1))$, the scheme $X_R$ is a rational generically reduced component of ${\mathcal M}(-1,2n,0)$, with generic sheaf having purely 1-dimensional singularities; b) for each pair $(s,r)$ such that $1\le s\le 2(n^2-n-1)$ and $r=2(n^2-n)+1-s$, the scheme $X_R$ is a rational generically reduced component of ${\mathcal M}(-1,2n,0)$, with generic sheaf having singularities of mixed dimension.
\noindent\noindent{\bf Item (ii).} It follows from Theorem \ref{Tmain2} (i) and the property (IV). Take $R={\mathcal R} (0,n,n^2-n+2)$. If $n\ge1$, then for $s=\frac{n^2-n+2}{2}$, the scheme $Y_R$ is a rational generically reduced component of ${\mathcal M}(-0,n,0)$, with generic sheaf having 0-dimensional singularities. If $n\ge3$, then for $(s,r)=(0,\frac{n(n-3)}{2} +3)$, the scheme $X_R$ is a rational generically reduced component of ${\mathcal M}(0,n,0)$, with generic sheaf having purely 1-dimensional singularities. If $n\ge4$, then for each pair $(s,r)$ such that $1\le s\le\frac{n(n-3)}{2}$ and $r=\frac{n(n-3)}{2}+3-s$, the scheme $X_R$ is a rational generically reduced component of ${\mathcal M}(0,n,0)$, with generic sheaf having singularities of mixed dimension. Main Theorem \ref{main2} is proved. ~
$\Box$
\begin{Remark} As it was shown by Le Potier in \cite{LeP1993}, the moduli scheme ${\mathcal M}(0,2,0)$ consists of three irreducible components: one is the closure of locally free sheaves, while the other two have, as a general point, sheaves with 0-dimensional singular locus obtained via elementary transformations of reflexive sheaves in ${\mathcal R}(0,2,2)$ and ${\mathcal R}(0,2,4)$ at points. While the results of this section do not cover these irreducible components, Le Potier has shown that they are also rational, via a different method. \end{Remark}
\section{Irreduciblility of ${\mathcal M}(-1,2,4)$} \label{irreducible of M(-1,2,4)}
In the previous sections our results ensured the existence of irreducible components of the moduli spaces of torsion free sheaves with prescribed singularities, without focusing on the description of all irreducible components of the moduli space for given Chern classes. The aim of this and subsequent sections is to consider this problem for smallest value $c_2=2$ of the second Chern class. Namely, in Sections \ref{irreducible of M(-1,2,4)}-\ref{irreducible of M(-1,2,0)} we will obtain the complete characterization of the moduli spaces ${\mathcal M}(-1,2,c_3)$ for all possible values of $c_3$. These results will illustrate why this study becomes too complicated for large values of $c_2$.
More precisely, in this section we will describe the irreducible components of the moduli spaces ${\mathcal M}(-1,2,c_3)$ for possible values $c_3 = 0,2,4$ of the third Chern class. For the convenience of the reader, in the following proposition we will fix some numerical invariants of torsion free sheaves that we will use in this section.
\begin{Prop}\label{ChernClasses} Let $E$ be a torsion free sheaf, $E^{\vee\vee}$ its double dual and $Q_E : = E^{\vee \vee}/E$. The following holds: \begin{itemize} \item[(i)] $\dim Q_E\le1$ and $c_1(E^{\vee \vee}) = c_1(E)$; \item[(ii)] if $\dim Q_E=1$ then $c_2(E^{\vee \vee}) = c_2(E)-\mult(Q_E)$, $c_3(E^{\vee\vee})=c_3(E)+c_3(Q_E)-c_1(E)\cdot\mult Q_E$, where $\mult(Q_E)$ is the multiplicity of the sheaf $Q_E$; \item[(iii)] if $\dim Q_E=0$ then $c_2(E^{\vee \vee})=c_2(E)$, $c_3(E^{\vee \vee})=c_3(E)+2 \cdot \mathrm{length}(Q_E)$. \end{itemize} If, in addition $E$ is stable with $c_1(E)=-1$, then \begin{itemize} \item[(iv)] $E^{\vee\vee}$ is stable; \item[(v)] If $\dim Q_E=1$ then $c_2(E) \geq \mult Q_E\ge1$. \end{itemize} \end{Prop} \begin{proof} Since $E$ is torsion free, it fits in the following exact sequence: \begin{equation}\label{fundamentalcomponents} 0 \to E \to E^{\vee \vee} \xrightarrow{\varepsilon} Q_E \to 0. \end{equation} The statement (i) is clear, since $E$ is torsion free. Therefore, computing the Chern classes we have the items (ii) and (iii). To show (iv), it is enough to consider the triple $0\to A\xrightarrow{i} E^{\vee\vee}\to B\to0$ where both $A$ and $B$ are rank-1 torsion free sheaves with $c_1(A)+c_1(B)= c_1(E^{\vee\vee})=-1$. Since $\dim Q\le1$, it follows that $\dim\mathrm{im}(\varepsilon\circ i)\le1$, where $\varepsilon$ is the epimorphism in \eqref{fundamentalcomponents}. Therefore, the rank 1 sheaf $A'=\ker(\varepsilon\circ i)$ satisfies the equality $c_1(A')=c_1(A)$. On the other hand, since $A'$ is a subsheaf of the stable sheaf $E$, it follows that $c_1(A')\le c_1(E)\le-1$. Hence, $c_1(A)\le c_1(E^{\vee\vee})=-1$ and $c_1(B)\ge0$, which implies that the reduced Hilbert polynomial of $A$ is less than that of $E^{\vee\vee}$, that is $E^{\vee \vee}$ is stable. In particular, $c_2(E^{\vee \vee}) \geq 1$, see \cite[Cor. 3.3]{Harshorne-Reflexive}. Thus, if $\dim Q_E=1$ then, by (iv), $c_2(E)\ge\mult Q_E\ge1$. \end{proof}
The next Lemma is an easy technical result that we use later in this section. \begin{Lema}\label{nonemptyfamily} For each $F \in {\mathcal R}(-1,1,1)$, consider the set $Y_F:=\{ l \in G(2,4);~ \operatorname{Sing}(F) \subset l\}$, and the set \begin{equation}\label{Y(r)}
Y(r) := \{(F,l,\varphi)|~ (F,l) \in {\mathcal R}(-1,1,1)\times Y_F,~ \varphi \in \Hom(F,i_{*}\mathcal{O}_l(r))_e / \mathrm{Aut}(i_{*}\mathcal{O}_l(r))\}. \end{equation} Then, for each $r \in \{-1,0,1\}$, the set $Y(r)$ is an irreducible scheme of dimension $8+2r$. In addition, the closure in $ {\mathcal M}(-1,2,2-2r)$ of the image of the morphism $Y(r)\to{\mathcal M}(-1,2,2-2r),\ (F,l,\varphi) \mapsto[\ker\varphi]$ is never an irreducible component of ${\mathcal M}(-1,2,2-2r)$.
\begin{proof} For each $[F] \in {\mathcal R}(-1,1,1)$, $\operatorname{Sing}(F)$ is a unique point, so that the set $Y_F$ is a surface in the Grassmannian $G(2,4)$ isomorphic to $\mathbb{P}^2$. Therefore it is irreducible of dimension $2$. To see that the dimension of $\Hom(F,i_{*}\mathcal{O}_l(r)_e /\mathrm{Aut}(F,i_{*} \mathcal{O}_l(r)\}$ is $3+2r$, apply the functor $\lhom(-,i_{*}\mathcal{O}_l(r))$ to the sequence \eqref{resR(-1,1,1)}, and recall that $\dim\mathrm{H}^0(\lext^1(F,i_{*}\mathcal{O}_l(r))) = 1$. Putting all these data together, we define the set $Y(r)$ by \eqref{Y(r)}. By construction it is an irreducible scheme of dimension $8+2r$. Indeed one has the surjective projection $$Y(r) \twoheadrightarrow {\mathcal R}(-1,1,1)\times Y_F ~~, ~~ ([F],l,\varphi) \mapsto ([F],l) ~~, $$ onto an irreducible scheme ${\mathcal R}(-1,1,1)\times Y_F$ of dimension 5 (see Remark \ref{R(-1,1,1)}), with fibers $$ \Hom(F,i_{*}\mathcal{O}_l(r))_e/ \mathrm{Aut}(i_{*}\mathcal{O}_l(r)) \overset{\textrm{open}}{\hookrightarrow} \Hom(F,i_{*}\mathcal{O}_l(r))/\mathrm{Aut}(i_{*} \mathcal{O}_l(r))$$ which have dimension $3+2r$. \end{proof} \end{Lema} With the previous Lemma, we are already in position to prove the first main result of this section.
\begin{Teo}\label{M(-1,2,4)} The moduli space ${\mathcal M}(-1,2,4)$ of rank 2 stable sheaves on $\mathbb{P}^3$ with Chern classes $c_1=-1,\ c_2=2,\ c_3= 4$ is the closure $\overline{{\mathcal R}(-1,2,4)}$ of the moduli space ${\mathcal R}(-1,2,4)$ of the rank 2 reflexive sheaves with Chern classes $c_1=-1,\ c_2=2,\ c_3=4$. Hence ${\mathcal M}(-1,2,4)$ is irreducible, rational, generically smooth, and of dimension 11. Moreover, \begin{equation}\label{dim Sing=0}
\{[F]\in{\mathcal M}(-1,2,4)|\ \dim\operatorname{Sing}(F)=0\}={\mathcal R}(-1,2,4). \end{equation} \end{Teo} \begin{proof} By \cite[Thm 9.2]{Harshorne-Reflexive}, ${\mathcal R}(-1,2,4)$ is irreducible of dimension $11$, and $\overline{{\mathcal R}(-1,2,4)}$ is an irreducible component of ${\mathcal M}(-1,2,4)$. Consider $[E] \in {\mathcal M}(-1,2,4) \setminus {\mathcal R}(-1,2,4)$. By Proposition \ref{ChernClasses}, since $E^{\vee\vee}$ is stable, and either $\dim Q_E=1$ and $1\leq \mult Q_E \leq c_2(E)=2$, or $\dim Q_E=0$. We will study the possibilities for $\dim Q_E$ and $\mult Q_E$.
i) If $\dim Q_E=1,\ \mult Q_E = 2$, then by Proposition \ref{ChernClasses}.b) $c_2(E^{\vee \vee}) = 0$, and by \cite[Thm 8.2]{Harshorne-Reflexive}, \begin{equation}\label{c3 le} c_3(E^{\vee\vee}) \leq c_2(E^{\vee \vee})^2, \end{equation} since $E^{\vee\vee}$ is stable. Therefore $c_3(E^{\vee \vee})=0$, that is $E^{\vee \vee}$ is a stable locally free rank 2 sheaf. Since $c_1(E^{\vee \vee})=-1,\ c_2(E^{\vee \vee})=0$, this contradicts to \cite[Cor. 3.5]{H1978}.
ii) If $\dim Q_E=\mult Q_E = 1$, then $c_2(E^{\vee \vee})=1$ and, as above, $0\le c_3(E^{\vee \vee})\le c_2^2(E^{\vee \vee})=1$. Moreover, the equality $\mult Q_E = 1$ implies that $Q_E$ is supported on a line, say, $l$ and it fits in an exact sequence of the form: \begin{equation}\label{extensionQ_E} 0 \to Z_E\to Q_E\to i_{*}\mathcal{O}_l(r) \to 0, \end{equation} where $Z_E$ is the maximal $0$-dimensional subsheaf of $Q_E$ of length $s\ge0$, and $\mathcal{O}_{l}$ is the structure sheaf of the line $l$. This sequence and Proposition \ref{ChernClasses}.b) yield \begin{equation} \label{chi(Q_E(t))} \chi(Q_E(t)) = t+r+s+1,\ \ \ c_3(Q_E)=2(r+s-1), \end{equation} and $c_2(E^{\vee \vee})=1,\ \ \ c_3(E^{\vee\vee})=2r+2s+3 \ge0$. (Here the inequality $c_3(E^{\vee \vee})\ge0$ follows from \cite[Prop. 2.6]{Harshorne-Reflexive}.) Thus from \eqref{c3 le} we obtain $r+s=-1$, i. e. \begin{equation}\label{c2,c3} c_2(E^{\vee \vee})=c_3(E^{\vee \vee})=1. \end{equation} As $E^{\vee\vee}$ is stable, this means that $[E^{\vee\vee}]\in M(-1,1,1)$. Note that, by \eqref{extensionQ_E}, there is an epimorphism $E^{\vee \vee} \twoheadrightarrow Q_E$, so that from \eqref{extensionQ_E} and the formula (\ref{splitingF}) in which we set $F=E^{\vee \vee}$ it follows that $r\ge-1$. This together with the relation $r+s=-1$ and the inequality $s\ge0$ shows that the only possible values for $r$ and $s$ are $r=-1,\ s=0$. We thus have $Q_E/Z_E=i_{*}\mathcal {O}_l(-1)$. This together with \eqref{c2,c3} yields that, if $l\cap\operatorname{Sing}(E^{\vee\vee})=\emptyset$, then, since $[E^{\vee\vee}]\in{\mathcal M}(-1,1,1)$, it follows that $[E]$ belongs to the scheme $\mathcal{X}(-1,1,1,-1,0)$ defined in display \eqref{def calX}. Note that $\dim\mathcal{X}(-1,1,1,-1,0)=7$ by Remark \ref{X(-1,1,1,-1,0)}. Since by the deformation theory (see Theorem \ref{dimext1}) any irreducible component of ${\mathcal M}(-1,2,4)$ has dimension at least 11, the last equality shows that the dimension of $\mathcal{X}(-1,1,1,-1,0)$ is too small to fill an irreducible component of ${\mathcal M}(-1,2,4)$.
iii) If $\dim Q_E=0$, then $s=\mathrm{length}(Q_E)>0$ and, by Proposition \ref{ChernClasses}.c), $c_2(E)= c_2(E^{\vee\vee})=2$, $c_3(E^{\vee\vee})=c_3(E)+2s= 4+2s\ge6$. Therefore, $4=c_2^2(E^{\vee\vee})<c_3(E^{\vee\vee })$. But this inequality contradicts the stability of $E^{\vee \vee}$ by \cite[Thm. 8.2(d)]{Harshorne-Reflexive}.
In conclusion, we have proved that ${\mathcal M}(-1,2,4)= \overline{{\mathcal R}(-1,2,4)}$, and the equality in display \eqref{dim Sing=0} follows from iii) above. The rationality of ${\mathcal R}(-1,2,4)$ is known from \cite{Chang2}. Hence, ${\mathcal M}(-1,2,4)$ is rational. \end{proof}
As a by product of the previous proof, we obtain the following interesting result.
\begin{Cor} The complement of ${\mathcal R}(-1,2,4)$ in ${\mathcal M}(-1,2,4)$ is precisely $\mathrm{X}(-1,1,1,-1,0)$. \end{Cor}
\section{Description of families with 0-dimensional singularities} \label{irreducible of M(2)}
In this section we describe explicitly the sheaves in the families $\mathrm{T}(-1,2,2,1)$, $\mathrm{T}(-1,2,4,1)$ and $\mathrm{T}(-1,2,4,2)$. This description will be used later in the study of irreducible components of the moduli spaces $\mathcal{M} (-1,2,c_3)$ for $c_3=2$ and $c_3=4$. Everywhere below for a coherent sheaf $F$ on a given scheme $X$ we denote by $\mathbf{P}(F)$ the projective spectrum of the symmetric algebra $\mathrm{Sym}_{{\mathcal O}_X}(F)$. Besides, as before, for any point $p\in\mathbb{P}^3$ we denote $A_p=\mathrm{Aut} ({\mathcal O}_p)\simeq\mathbf{k}$.
We start with the following theorem describing, for $i=1$ and $i=2$, the irreducible families $\mathrm{T}(-1,2,2i,1)$ defined in Section \ref{New Irreducible Components} as the closures in ${\mathcal M}(-1,2,2i-2)$ of their open subsets $\mathcal{T} (-1,2,2i,1)$. For these $i$, consider the moduli spaces $R_i:={\mathcal R}(-1,2,2i)$ and the universal ${\mathcal O}_{\mathbb{P}^3\times R_i}$-sheaves $\mathbf{F}_i$, respectively.
\begin{Teo}\label{T(-1,2,2i,1)} The scheme $\mathrm{T}(-1,2,2i,1)$, for $i\in\{1,2\}$, is an irreducible 15-dimensional component of ${\mathcal M}(-1,2,2i-2)$. This component contains an open subset of $\mathrm{T}(-1,2,2i,1)$, isomorphic to $\mathbf{P}(\mathbf{F}_i)$, which consists of all the points $[E]\in{\mathcal M}(-1,2,2i-2)$ such that $E^{\vee\vee}/E$ is a 0-dimensional scheme of length 1. This subset $\mathbf{P} (\mathbf{F}_i)$ contains the open subset $\mathcal{T} (-1,2,2i,1)$.
\end{Teo} \begin{proof} Let $i\in\{1,2\}$. For any point $y\in R_i$ we denote
$F_{i,y}=\mathbf{F}_i|_{\mathbb{P}^3\times\{y\}}$. By \cite[Lemma 4.5]{St} $\mathbf{P}(F_{i,y})$ is an irreducible 4-dimensional scheme for any $y\in R_i$. Hence, since $\dim R_i=11$, $i=1,2,$ it follows that $\mathbf{P} (\mathbf{F}_i)$ is an irreducible 15-dimensional scheme. Consider the structure morphisms $\pi_i: \mathbf{P} (\mathbf{F}_i)\to\mathbb{P}^3\times R_i$ and the compositions $\theta_i=pr_1\circ\pi_i: \mathbf{P}(\mathbf{F}_i)\to\mathbb{P}^3$. By the functorial property of projective spectra \cite[Ch. II, Prop. 7.12]{H} we have for $i=1,2$: \begin{equation}\label{descrn of P(Fi)} \mathbf{P}(\mathbf{F}_i)=\{z=(p,[F_i],[\psi]=\psi\
\mathrm{mod}A_p)|\ (p,[F_i])=\pi_i(z),\ \psi:\ F_i \to{\mathcal O}_p\ \mathrm{is\ an\ epimorphism}\}. \end{equation} Hence, each point $z=(p,[F_i],[\psi])\in \mathbf{P}(\mathbf{F}_i)$ defines an exact triple \begin{equation}\label{triple in T(-1,2,2i,1)} 0\to E_i\to F_i\xrightarrow{\psi}{\mathcal O}_p\to0,\ \ \ \ \ \ [E_i=E_{i,z}:=\ker\psi]\in{\mathcal M}(-1,2,2i-2),\ \ \ F_i=E_{i,z}^{\vee\vee},\ \ \ \ \ \ i=1,2. \end{equation} This triple is globalized to an ${\mathcal O}_{\mathbb{P}^3\times R_i }$-triple in the following way. Namely, let $\mathbf{ \tilde{F}}_i=\mathbf{F}_i\otimes_{{\mathcal O}_{R_i}}{\mathcal O}_{ \mathbf{P}(\mathbf{F}_i)}$ and consider the "diagonal" embedding $j:\mathbf{P}(\mathbf{F}_i)\hookrightarrow\mathbb{P}^3 \times\mathbf{P}(\mathbf{F}_i),\ z\mapsto(\theta_i(z),z)$. By construction, $j^*\mathbf{\tilde{F}}_i=\pi_i^* \mathbf{F}_i$ and we obtain the composition of surjections $\boldsymbol{\psi}:\ \mathbf{\tilde{F}}_i \twoheadrightarrow j_*j^*\mathbf{\tilde{F}}_i =j_*\pi_i^*\mathbf{F}_i\twoheadrightarrow j_* {\mathcal O}_{\mathbf{P}(\mathbf{F}_i)}(1)$ which yields an exact ${\mathcal O}_{\mathbb{P}^3\times\mathbf{P}(\mathbf{F}_i)}$- triple, where $\mathbf{E}_i:=\ker\boldsymbol{\psi}$: \begin{equation}\label{bold Ei} 0\to\mathbf{E}_i\to\mathbf{\tilde{F}}_i\xrightarrow {\boldsymbol{\psi}}j_*{\mathcal O}_{\mathbf{P}(\mathbf{F}_i)}(1)\to0, \ \ \ \ \ \ i=1,2. \end{equation} By construction, the sheaves in this triple are flat over $R_i$, hence its restriction onto $\mathbb{P}^3\times\{z\}$ for any $z=(p, [F_i],[\psi])\in\mathbf{P}(\mathbf{F}_i)$ yields the triple \eqref{triple in T(-1,2,2i,1)} with $E_{i,z}=
\mathbf{E}_i|_{\mathbb{P}^3\times\{z\}}$. Thus we obtain the modular morphism \begin{equation}\label{modular fi} f_i:\ \mathbf{P}(\mathbf{F}_i)\to{\mathcal M}(-1,2,2i-2),\ \ \ z\mapsto E_{i,z},\ \ \ \ \ \ \ i=1,2. \end{equation} This morphism is clearly an embedding, since the data $([F_i],p,[\psi])$ in the triple \eqref{triple in T(-1,2,2i,1)} are uniquely recovered from the point $[E=E_{i,z}]\in{\mathcal M}(-1,2,2i-2)$; namely, $F_i:= E^{\vee\vee},\ p:=\supp(Q_E)$, where$Q_E:= E^{\vee\vee}/E\simeq{\mathcal O}_p$ since $\mathrm{length}~Q_E=1$ and $\psi:F_i\twoheadrightarrow {\mathcal O}_p$ is the quotient epimorphism. We therefore identify $\mathbf{P}(\mathbf{F}_i) $ with its image under the morphism $f_i$.
Last, under the description \eqref{descrn of P(Fi)} of $\mathbf{P}(\mathbf{F}_i)$ we have, by the definition of $\mathcal{T}(-1,2,2i,1)$, that $\mathcal{T}(-1,2,2i,1)
=\{z=(p,[F_i],[\psi])\in\mathbf{P}(\mathbf{F}_i)|\ p\not\in\operatorname{Sing}(F_i)\}$ is an open subset of $\mathbf{P}(\mathbf{F}_i)$ which is dense since $\mathbf{P} (\mathbf{F}_i)$ is irreducible. Hence, by definition, its closure in ${\mathcal M}(-1,2,2i-2)$ coincides with $\mathrm{T}(-1,2,2i,1)$. In addition, it is an irreducible component of ${\mathcal M}(-1,2, 2i-2)$ by Theorem \ref{0dcomp}. \end{proof}
Let us introduce one more piece of notation. For any
$y\in R_2$, let $F_y:=\mathbf{F}_2|_{\mathbb{P}^3\times\{y\}}$ and let $pr_2:\mathbb{P}^3\times R_2\to R_2$ be the projection. Besides, for an arbitrary ${\mathcal O}_{\mathbb{P}^3\times R_2}$-sheaf $A$ and an integer $m\in\mathbb{Z}$ let $A(m):=A\otimes(\op3(m) \boxtimes{\mathcal O}_{R_2})$. The following remark will be important below in the study of the scheme $\mathbf{P} (\mathbf{E}_2)$ for the ${\mathcal O}_{\mathbb{P}^3\times\mathbf{P} (\mathbf{F}_2)}$-sheaf $\mathbf{E}_2$ defined in \eqref{bold Ei} for $i=2$. \begin{Remark}\label{resolution for F2} From \cite[Lemma 9.6 and Proof of Lemma 9.3] {Harshorne-Reflexive} it follows that, for any $y\in R_2$, the sheaf $F_y$ fits in an exact triple \begin{equation}\label{triple for Fy} 0\to\op3(-3)\to2\cdot\op3(-1)\oplus\op3(-2) \xrightarrow{\phi} F_y\to0. \end{equation} This triple clearly globalizes to a locally free ${\mathcal O}_{\mathbb{P}^3\times R_2}$-resolution of the universal sheaf $\mathbf{F}_2$: \begin{equation}\label{triple for F} 0\to\mathbf{L}_2\to\mathbf{L}_1\xrightarrow{\Phi}\mathbf{F} \to0,\ \ \ \ \ \ \rk\mathbf{L}_1=3,\ \ \ \rk\mathbf{L}_2=1. \end{equation} explicitly, $\mathbf{L}_1$ fits in the exact triple $0\to\op3(-1)\boxtimes M_0\to\mathbf{L}_1\to\op3(-2) \boxtimes M_1\to0$ and $\mathbf{L}_2=\op3(-3)\boxtimes M_2$, where $M_0,\ M_1,\ M_2$ are locally free ${\mathcal O}_{R_2}$-sheaves of ranks 2, 1, 1, respectively, which are determined by $\mathbf{F}_2$ as: $M_0=pr_{2*}(\mathbf{F}_2(1))$, $M_1=pr_{2*}(\mathbf{F}_2(2))/pr_{2*}(\mathrm{im}(ev))$, where $ev:\op3(1)\boxtimes M_0\to\mathbf{F}_2(2)$ is the evaluation morphism, and $M_2=\ker(pr_{2*}\mathbf{L}_1(3) \xrightarrow{pr_{2*}\Phi}pr_{2*}\mathbf{F}_2(3))$. \end{Remark}
Consider the structure morphism $\pi_2:\mathbf{P} (\mathbf{F}_2)\to\mathbb{P}^3\times R_2$. Note that the triple \eqref{triple for Fy} immediately yields that $\pi_2^{-1} (p,y)$ equals to $\mathbb{P}^1$ if $p\not\in\operatorname{Sing}(F_y )$, respectively, equals $\mathbb{P}^2$ if $p\in\mathrm {Sing}(F_y)$. As $\mathrm{codim}(\operatorname{Sing}(F_y),\mathbb{P}^3)=3$, it follows by the definition of $\mathcal{T}(-1,2,2i,1)$ that \begin{equation}\label{codim-tau} \mathrm{codim}_{\mathbf{P}(\mathbf{F}_2)} (\mathbf{P}(\mathbf{F}_2)\smallsetminus\mathcal{T} (-1,2,4,1))=2. \end{equation}
Now proceed to the study of the scheme $\mathbf{P} (\mathbf{E}_2)$ endowed with the structure morphism $\pi: \mathbf{P}(\mathbf{E}_2)\to\mathbb{P}^3\times\mathbf{P}(\mathbf{F}_2)$ and consider the composition $\tau=pr_1\circ\pi:\mathbf{P} (\mathbf{E}_2)\to\mathbb{P}^3$. Similarly to \eqref{descrn of P(Fi)}, in view of the functorial property of projective spectra \cite[Ch. II, Prop. 7.12]{H} we obtain the following description of the scheme $\mathbf{P}(\mathbf{E}_2)$: \begin{equation}\label{descrn of P(E2)} \mathbf{P}(\mathbf{E}_2)=\{w=(q,[E_2],[\varphi]=\varphi\
\mathrm{mod}A_q)|\ (q,[E_2])=\pi(w),\ \varphi:\ E_2\to{\mathcal O}_q\ \mathrm{is\ an\ epimorphism}\}. \end{equation} It follows now that each point $w=(q,[E_2],[\varphi])\in \mathbf{P}(\mathbf{E}_2)$ defines an exact triple \begin{equation}\label{triple in T(-1,2,4,2)} 0\to E_w\to E_2\xrightarrow{\varphi}{\mathcal O}_q\to0,\ \ \ \ \ \ [E_w:=\ker\varphi]\in{\mathcal M}(-1,2,0). \end{equation} This triple is globalized to an ${\mathcal O}_{\mathbb{P}^3\times\mathbf{P} (\mathbf{F}_2)}$-triple which is constructed completely similar to the triples \eqref{bold Ei}. Namely, let $\mathbf{\tilde{E}}_2 =\mathbf{E}_2\otimes_{{\mathcal O}_{\mathbf{P}(\mathbf{F}_2)}}{\mathcal O}_{ \mathbf{P}(\mathbf{E}_2)}$ and consider the "diagonal" embedding $j:\mathbf{P}(\mathbf{E_2})\hookrightarrow\mathbb{P}^3\times \mathbf{P}(\mathbf{E}_2),\ w\mapsto(\tau(w),w)$. Then $j^*\mathbf{\tilde{E}}_2=\pi^*\mathbf{E}_2$ and we obtain the composition of surjections $\boldsymbol{\varphi}:\ \mathbf{\tilde{E}}_2 \twoheadrightarrow j_*j^*\mathbf{\tilde{E}}_2 =j_*\pi^*\mathbf{E}_2\twoheadrightarrow j_*{\mathcal O}_{\mathbf{P} (\mathbf{E}_2)}(1)$ which yields an exact ${\mathcal O}_{\mathbb{P}^3\times \mathbf{P}(\mathbf{E}_2)}$-triple, where $\mathbf{E}:= \ker\boldsymbol{\varphi}$: \begin{equation}\label{bf E} 0\to\mathbf{E}\to\mathbf{\tilde{E}}_2\xrightarrow {\boldsymbol{\varphi}}j_*{\mathcal O}_{\mathbf{P}(\mathbf{E}_2)}(1) \to0. \end{equation} By construction, the restriction of this triple onto $\mathbb{P}^3\times\{w\}$ for any $w=(p,[E_2],[\varphi])\in\mathbf{P} (\mathbf{E}_2)$ yields the triple \eqref{triple in
T(-1,2,4,2)}, where $E_w=\mathbf{E}|_{\mathbb{P}^3\times \{w\}}$ and where $[E_2]\in\mathbf{P}(\mathbf{F}_2)$ by Theorem \ref{T(-1,2,2i,1)},(ii) fits in the triple \eqref{triple in T(-1,2,2i,1)} for $i=2$: $0\to E_2\to F_2\xrightarrow{\psi}{\mathcal O}_p\to0$, $F_2=E_2^{\vee\vee}$. Combining this triple with \eqref{triple in T(-1,2,4,2)}, we obtain the equality $F_2=E_w^{\vee\vee}$ and two exact triples, where $E=E_w$: \begin{equation}\label{QE of length 2} 0\to E\to E^{\vee\vee}\to Q_E\to0,\ \ \ \ \ \ \ \ \ \ 0\to{\mathcal O}_q\to Q_E\to{\mathcal O}_p\to0. \end{equation} Besides, we have a modular morphism $f:\ \mathbf{P}(\mathbf{E}_2)\to{\mathcal M}(-1,2,0),\ \ \ w\mapsto [E_w]$. From \eqref{QE of length 2} and the definition of the family $\mathcal{T}(-1,2,4,2)$ given in Theorem \ref{0dcomp} it follows that \begin{equation}\label{open tau(-1,2,4,2)} \mathcal{T}(-1,2,4,2)=\{[E]\in f(\mathbf{P}(\mathbf{E}_2))\
|\ \supp(Q_E)=p\sqcup q,\ \supp(Q_E)\cap \operatorname{Sing}(E^{\vee\vee})=\emptyset \}. \end{equation} \begin{Teo}\label{T(-1,2,4,2)} The scheme $\mathrm{T}(-1,2,4,2)$ is an irreducible 19-dimensional component of ${\mathcal M}(-1,2,0)$. This component contains a dense subset, isomorphic to $f(\mathbf{P} (\mathbf{E}_2))$, which consists of all the points $[E]\in{\mathcal M}(-1,2,2)$ such that $E^{\vee\vee}/E$ is a 0-dimensional scheme of length 2. This subset $f(\mathbf{P} (\mathbf{E}_2))$ contains $\mathcal{T} (-1,2,4,2)$ as the dense open subset described in \eqref{open tau(-1,2,4,2)}. \end{Teo} \begin{proof} We have to prove the irreducibility of $\mathbf{P} (\mathbf{E}_2)$. Since $\mathbf{P}(\mathbf{F}_2)$ is irreducible, it is enough to prove that, for an arbitrary point $z=(p,[F_2],[\psi])\in\mathbf{P}(\mathbf{F}_2)$, the fiber $p_E^{-1}(z)$ of the composition $p_E:\mathbf{P} (\mathbf{E}_2)\xrightarrow{\pi}\mathbb{P}^3\times\mathbf{P}(\mathbf{F}_ 2)\xrightarrow{pr_2}\mathbf{P}(\mathbf{F}_2)$ is irreducible of dimension 4. Note that the sheaves $F_2$ and
$E_2=\mathbf{E}_2|_{\mathbb{P}^3\times\{z\}}$ fit in the exact triple \eqref{triple in T(-1,2,2i,1)} for $i=2$. Besides, $F_2$ fits in the exact triple \eqref{triple for Fy} in which we set $F_y= F_2$. These two triples are included in a commutative diagram \begin{equation}\label{comm diagram0} \xymatrix{& & 0 \ar[d] & 0 \ar[d] & \\ 0\ar[r]& \op3(-3)\ar@{=}[d] \ar[r] & \mathcal{G} \ar[d]\ar[r] & E_2\ar[d]\ar[r] & 0\\ 0\ar[r]& \op3(-3) \ar[r] & 2\cdot\op3(-1)\oplus\op3(-2) \ar[r]^-{\phi} \ar[d]^{\lambda} & F_2\ar[r]\ar[d]^{\psi} & 0\\ & &\mathcal{O}_p\ar@{=}[r]\ar[d] & \mathcal{O}_p \ar[d] & & \\ & & 0 & 0, & & } \end{equation} where $\lambda:=\psi\circ\phi$ and $\mathcal{G}=\ker (\lambda)$. Here, the surjection $\lambda$ induces an embedding of a point $$ {}^{\sharp}\lambda:\ w=\mathbf{P}({\mathcal O}_p) \hookrightarrow W:=\mathbf{P}(2\cdot\op3(-1)\oplus\op3(-2)), $$ and from standard properties of projective spectra it follows that $\mathbf{P}(\mathcal{G})$ is a small birational modification of $W$. More precisely, this modification as the composition of the blowing up $\sigma_w$ of $W$ at the point $w$ and the contraction of the proper preimage of the fiber $\pi_W^{-1}(p,z)$ under $\sigma_w$, where $\pi_W:W\to\mathbb{P}^3$ is the structure morphism. In particular,$\mathbf{P}(\mathcal{G} )$ is an irreducible projective scheme of dimension $\dim\mathbf{P}(\mathcal{G})=5$. By the same reason, from the rightmost vertical triple of \eqref{comm diagram0} it follows that, if $p\not\in\operatorname{Sing}(F_2)$, the scheme $\mathbf{P}(E_2)$ is a small birational modification of $\mathbf{P}(F_2)$. Namely, this modification is the composition of the blowing up $\sigma_p$ of $\mathbf{P}(F_2)$ at its smooth point $\mathbf{P}({\mathcal O}_p)$, and the contaraction of the proper preimage of the fiber $\pi^{-1}(p,z)$ under $\sigma_p$. Therefore, since by \cite[Lemma 4.5]{St} $\mathbf{P}(F_2)$ is irreducible, $\mathbf{P}(E_2)$ is an irreducible scheme of dimension $\dim\mathbf{P}(E_2)=4$, if $p\not\in\operatorname{Sing}(F_2)$, i. e. when $z\in\mathcal{T}(-1,2,4,1)$. This implies that the scheme $\mathbf{P}(\mathbf{E}_2)_0 :=p_E^{-1}(\mathcal{T}(-1,2,4,1))$ is irreducible of dimension 19, since by Theorem \ref{0dcomp} $\mathcal{T} (-1,2,4,1)$ is irreducible of dimension 15.
Next, an easy computation with the diagram \eqref{comm diagram0} yields: $(E_2\otimes{\mathcal O}_p)^{\vee}\subset (\mathcal{G}\otimes{\mathcal O}_p)^{\vee}=\mathbf{k}^5$, hence \begin{equation}\label{fibre in P4} \pi_E^{-1}(z,p)=\mathbb{P}((E_2\otimes{\mathcal O}_p)^{\vee})\subset \mathbb{P}((\mathcal{G}\otimes{\mathcal O}_p)^{\vee})=\mathbb{P}^4. \end{equation} Now, acccording to Remark \ref{resolution for F2}, the middle horizontal triple in \eqref{comm diagram0} globalizes to the exact ${\mathcal O}_{\mathbb{P}^3\times\mathbf{P} (\mathbf{F}_2)}$-triple $0\to\mathbf{\tilde{L}}_2 \to\mathbf{\tilde{L}}_1\to\mathbf{\tilde{F}}_2\to0$ obtained by lifting the exact triple \eqref{triple for F} from $\mathbb{P}^3\times R_2$ onto $\mathbb{P}^3\times\mathbf{P} (\mathbf{F}_2)$. Similarly, the rightmost vertical, the middle vertical and the upper horizontal triples in \eqref{comm diagram0} globalize, respectively, to the triple \eqref{bold Ei} for $i=2$, the triple $0\to\mathbf{G}\to \mathbf{\tilde{L}}_1\to j_*{\mathcal O}_{\mathbf{P}(\mathbf{F})}(1) \to0$ and the triple \begin{equation}\label{triple for G,E} 0\to\mathbf{\tilde{L}}_2\to\mathbf{G}\to\mathbf{E}\to0, \end{equation} where $\mathbf{G}$ is an ${\mathcal O}_{\mathbb{P}^3\times\mathbf{P}
(\mathbf{F}_2)}$-sheaf such that $\mathbf{G}|_{\{z\} \times\mathbb{P}^3}=\mathcal{G}$. Consider the composition $$ p_G:\mathbf{P}(\mathbf{G}) \xrightarrow{\pi_G}\mathbf{P}(\mathbf{F})\times\mathbb{P}^3 \xrightarrow{pr_1}\mathbf{P}(\mathbf{F}) $$ where $\pi_G$ is the structure morphism of $\mathbf{P}(\mathbf{G})$. Note that the sheaf $\mathbf{\tilde{L}}_2$ in the triple \eqref{triple for G,E} is invertible, hence this triple shows that $\mathbf{P}(\mathbf{E})$ is a Cartier divisor in $\mathbf{P}(\mathbf{G})$ defined as the zero-set of some section $0\ne s\in\mathrm{H}^0({\mathcal O}_{\mathbf{P}(\mathbf{G})}(1) \otimes p_G^*\mathbf{\tilde{L}}_2^{\vee})$. On the other hand, the fibers $p_G^{-1}(z)=\mathbf{P}(\mathcal{G})$ of $p_G$ are irreducible projective schemes, that is $p_G$ is a projective morphism with irreducible 5-dimensional fibers over the irreducible 15-dimensional scheme $\mathbf{P} (\mathbf{F}_2)$. It follows that, if $\mathbf{P} (\mathbf{E}_2)$ is reducible, then any its irreducible component $U$ has dimension \begin{equation}\label{dim U} \dim U=\dim\mathbf{P}(\mathbf{G})-1=19. \end{equation} Note that, for any $z=(p,[F_2],\psi\ \mathrm{mod}A_p)\in
\mathbf{P}(\mathbf{F}_2)$, we have $F_2|_{\mathbb{P}^3\smallsetminus
\{p\}}=E_2|_{\mathbb{P}^3\smallsetminus\{p\}}$, hence, since $\dim
\mathbf{P}(F_2)=4$, $\mathbf{P}(E_2|_{\mathbb{P}^3\smallsetminus\{p\}} )$ is a 4-dimensional scheme. On the other hand, by definition, $$ \mathbf{P}(E_2)=p_E^{-1}(z)=\pi_E^{-1}(\{z\}
\times\mathbb{P}^3)=\mathbf{P}(E_2|_{\mathbb{P}^3\smallsetminus\{p\}})\cup \pi_E^{-1}(z,p).$$ Hence, by \eqref{fibre in P4}, for any $z\in\mathbf{P}(\mathbf{F}_2)$, we have \begin{equation}\label{dim fibre} \dim\mathbf{P}(E_2)=4. \end{equation}
This together with \eqref{codim-tau} implies that $\mathbf{P}(\mathbf{E}_2)\smallsetminus \mathbf{P}(\mathbf{E}_2)_0$ has codimension 2 in $\mathbf{P} (\mathbf{E}_2)$. Therefore, by \eqref{dim U}, $\mathbf{P} (\mathbf{E}_2)$ is irreducible and contains $\mathbf{P} (\mathbf{E}_2)_0$ as a dense open subset.
Finally, remark that, by the description given in display \eqref{open tau(-1,2,4,2)}, the set $\mathcal{T}(-1,2,4,2)$ is a nonempty open subset of $\mathbf{P}(\mathbf{E}_2)_0$, hence it is dense in $\mathbf{P}(\mathbf{E}_2)$. \end{proof}
\section{Description of families with mixed singularities}\label{descr of X}
We now proceed to the description of the sets $X(-1,1,1,-1,1 )$ and $X(-1,1,1,0,1)$. Our aim is to construct explicitly certain open dense subsets of them, together with a universal family of sheaves over these subsets, which will be used in our further results. We start with the following lemma. \begin{Lema}\label{two irred} Let $G:=G(2,4)$ be the Grassmannian of lines in $\mathbb{P}^3$ and $M={\mathcal M}(-1,1,1)\simeq\mathbb{P}^3$ (see Remark \ref{R(-1,1,1)}). Consider $[F]\in M$, and let $\mathcal{E}$ and $E$ be the sheaves on $\mathbb{P}^3$ fitting in the exact triples \begin{equation}\label{triple F, calE} 0\to\mathcal{E}\to F\xrightarrow{\varepsilon}{\mathcal O}_l(-1)\to0, \end{equation} \begin{equation}\label{triple calE,E} 0\to E\to\mathcal{E}\xrightarrow{\gamma}{\mathcal O}_p\to0, \end{equation} for some line $l\in G$ and some point $p\in\mathbb{P}^3$. Then $\mathbf{P}(\mathcal{E})$ and $\mathbf{P}(E)$ are irreducible generically smooth schemes of dimension 4. \end{Lema} \begin{proof} We first show that $\mathcal{E}$ fits in the exact triple: \begin{equation}\label{resoln calE} 0\to\op3(-3)\to\op3(-2)\oplus2\cdot\op3(-1)\xrightarrow{e} \mathcal{E}\to0. \end{equation} Let $x_0:=\operatorname{Sing}(F)$ (see Remark \ref{R(-1,1,1)}). Consider the two possible cases (a) $x_0\in l$ and (b) $x_0\not\in l$.\\ Case (a): $x_0\in l$. Note that from the definition of the sheaf $F$ it follows easily that $F$ fits in the exact triple $0\to\op3 (-1)\to F\xrightarrow{\delta}{\mathcal I}_{l,\mathbb{P}^3}\to0$. Since ${\mathcal I}_ {l,\mathbb{P}^3}\otimes{\mathcal O}_l=N^{\vee}_{l/\mathbb{P}^3}\simeq2\cdot{\mathcal O}_l(-1) $, it follows that there exists an epimorphism $\beta:{\mathcal I}_{l,\mathbb{P}^3} \twoheadrightarrow{\mathcal O}_l(-1)$ such that $\beta\circ\delta= \varepsilon$, where $\varepsilon$ is the epimorphism in \eqref{triple F, calE}. Besides,$\ker\beta\simeq {\mathcal I}_{C,\mathbb{P}^3}$, where $C$ is a nonreduced conic supported on $l$, and we obtain exact triples $$ 0\to\op3(-1)\to\mathcal{E}\to{\mathcal I}_{C,\mathbb{P}^3}\to0,\ \ \ \ \ \ 0\to\op3(-3)\to\op3(-1)\oplus\op3(-2)\to{\mathcal I}_{C,\mathbb{P}^3}\to0. $$ These two triples yield the resolution \eqref{resoln calE} by push-out, since $\ext^1(\op3(-1)\oplus\op3(-2),\op3(-1))=0$.\\ Case(b): $x_0\not\in l$. Note that, by Remark \eqref{R(-1,1,1)}, $F$ fits in the exact triple $0\to\op3(-2)\to3\cdot\op3(-1) \xrightarrow{\alpha}F\to0$. Since clearly $\ker(\varepsilon \circ\alpha:3\cdot\op3(-1)\twoheadrightarrow{\mathcal O}_l(-1))\cong 2\cdot\op3(-1)\oplus{\mathcal I}_{l,\mathbb{P}^3}(-1)$, the last triple together with the triple \eqref{triple F, calE} yields the exact triple $$ 0\to\op3(-3)\xrightarrow{i}2\cdot\op3(-1)\oplus{\mathcal I}_{l,\mathbb{P}^3} (-1)\to\mathcal{E}\to0. $$ Let $c:2\cdot\op3(-1)\oplus{\mathcal I}_{l,\mathbb{P}^3}(-1)\twoheadrightarrow {\mathcal I}_{l,\mathbb{P}^3}(-1)$ be the canonical epimorphism and consider the composition $c\circ i:\op3(-2)\to{\mathcal I}_{l,\mathbb{P}^3}(-1)$. If this composition is the zero map, then $\mathrm{im}(i)\subset2 \cdot\op3(-1)$ and $\coker(i)\subset\mathcal{E}$. Since $\mathcal{E}$ is torsion free, it follows that $\coker(i)= {\mathcal I}_{m,\mathbb{P}^3}$ for some line $m$ distinct from $l$, and $\mathcal{E}$ fits in the exact triple $0\to{\mathcal I}_{m,\mathbb{P}^3} \to\mathcal{E}\to{\mathcal I}_{l,\mathbb{P}^3}(-1)\to0$. This triple implies that $m\subset\operatorname{Sing}(\mathcal{E})$, contrary to the evident equality $\operatorname{Sing}(\mathcal{E})=x_0\sqcup l$. Hence the composition $c\circ i$ is a nonzero morphism, so that $\coker(c\circ i)\cong{\mathcal O}_{\mathbb{P}^2}(-2)$ for some projective plane $\mathbb{P}^2$ in $\mathbb{P}^3$. We thus obtain an exact triple $0\to2\cdot\op3(-1)\to\mathcal{E}\to {\mathcal O}_{\mathbb{P}^2}(-2)\to0$. This triple and and the exact triple $0\to\op3(-3)\to\op3(-2)\to{\mathcal O}_{\mathbb{P}^2}(-2) \to0$ by push-out yield \eqref{resoln calE}, since $\ext^1(\op3(-2),2\cdot\op3(-1))=0$.
Now from \eqref{resoln calE} it follows that $\mathbf{P} (\mathcal{E})$ is a Cartier divisor in $W:=\mathbf{P}(\op3(-2) \oplus2\cdot\op3(-1)$, and the same argument as in the proof of Theorem \ref{T(-1,2,4,2)} shows that $\mathbf{P}(\mathcal{E} )$ is irreducible. Next, the triples \eqref{triple calE,E} and \eqref{resoln calE} yield exact triples $$ 0\to\cdot\op3(-3)\to\mathcal{G}\to E\to0,\ \ \ \ \ \
0\to\mathcal{G}\to\op3(-2)\oplus2\cdot\op3(-1) \xrightarrow{\gamma\circ e}{\mathcal O}_p\to0. $$ The second triple here shows that $\mathbf{\mathcal{G}}$ is irreducible as a small birational modification of the scheme $W$ defined above, hence it is irreducible. On the other hand, the first triple shows that $\mathbf{P}(E)$ is a Cartier divisor in $\mathbf{\mathcal{G}}$, and again the same argument as in the proof of Theorem \ref{T(-1,2,4,2)} yields the irreducibility of $\mathbf{P}(E)$. \end{proof}
Now, let and $\Gamma=\{(x,l)\in\mathbb{P}^3\times G\ |\ x\in l\}$ the graph of incidence, and
${\mathcal O}_{\mathbb{P}^3\times M}$-sheaf (see Remark \ref{R(-1,1,1)}).
for $l\in G$ denote $A_l:=\mathrm{Aut}({\mathcal O}_l(-1)),\ A'_l:=\mathrm{Aut}({\mathcal O}_l),\ A_l\simeq\mathbf{k}^*\simeq A'_l$. Define the sets \begin{equation}\label{B}
B:=\{(l,[F],\epsilon\ \mathrm{mod}A_l))\ |\ (l,[F])\in G\times M,\ \epsilon:F\to{\mathcal O}_l(-1)\ \mathrm{is\ an\ epimorphism}\}. \end{equation} \begin{equation}\label{B'}
B':=\{(l,[F],\epsilon'\ \mathrm{mod}A'_l))\ |\ (l,[F])\in G \times M,\ \epsilon':F\to{\mathcal O}_l\ \mathrm{is\ an\ epimorphism}\}. \end{equation} We have the following proposition.
\begin{Prop}\label{descriptn of B,B'} The following claims are true. \begin{itemize} \item[(i)] $B$, respectively, $B'$ is the set of closed points of an irreducible scheme of dimension 7, respectively, of dimension 9. \item[(ii)] There is an ${\mathcal O}_{\mathbb{P}^3\times B}$-sheaf $\boldsymbol {\mathcal{E}}$ and an invertible ${\mathcal O}_{\mathbf{\Gamma}}$-sheaf $\mathbf{L}$ fitting in the exact triple $0\to\boldsymbol{\mathcal{E}}\to\mathbf{F}_B \xrightarrow{\varepsilon}\mathbf{L}\to0$, where $\mathbf{F}_B=\mathbf{F}\underset{{\mathcal O}_M}{\otimes}{\mathcal O}_B$ and $\mathbf{\Gamma}=\Gamma\times_MB$. Respectively, there is an ${\mathcal O}_{\mathbb{P}^3\times B'}$-sheaf $\boldsymbol{\mathcal{E'}}$ and an invertible ${\mathcal O}_{\mathbf {\Gamma'}}$-sheaf $\mathbf{L'}$ fitting in the exact triple $0\to\boldsymbol{\mathcal{E'}}\to\mathbf{F}_{B'} \xrightarrow{\varepsilon}\mathbf{L'}\to0$, where $\mathbf{F}_{B'}=\mathbf{F}\underset{{\mathcal O}_M}{\otimes}{\mathcal O}_{B '}$ and $\mathbf{\Gamma'}=\Gamma\times_M{B'}$. These triples, being restricted onto $\mathbb{P}^3\times\{b\}$, respectively, onto $\mathbb{P}^3\times\{b'\}$ for any points $b=(l,[F],\epsilon\ \mathrm{mod}A_l)\in B$, $b'=(l,[F],\epsilon'\ \mathrm{mod}A'_l)\in B$, yield: \begin{equation}\label{triple with cal E} 0\to\mathcal{E}_b\xrightarrow{\iota}F\xrightarrow{\epsilon} {\mathcal O}_l(-1)\to0,\ \ \ \ \ \
\mathcal{E}_b\cong\boldsymbol{\mathcal{E}}|_{\mathbb{P}^3\times\{b\}}. \end{equation} \begin{equation}\label{triple with cal E'} 0\to\mathcal{E'}_{b'}\xrightarrow{\iota'}F\xrightarrow {\epsilon'}{\mathcal O}_l\to0,\ \ \ \ \ \ \mathcal{E'}_{b'}\cong\boldsymbol
{\mathcal{E'}}|_{\mathbb{P}^3\times\{b'\}}. \end{equation} \item[(iii)] $\mathbf{P}(\boldsymbol{\mathcal{E}})$, respectively, $\mathbf{P}(\boldsymbol{\mathcal{E'}})$ is an irreducible generically smooth scheme of dimension 11, respectively, of dimension 13. \end{itemize} \end{Prop} \begin{proof} It is enough to argue fiberwise over $M$, i.e. for a fixed sheaf $[F]\in M$. Let $y=\operatorname{Sing}(F)$ and consider the sets $B_y=B\times_M\{y\}$ and $B'_y=B'\times_M\{y\}$. Any points $b=(l,[F],\epsilon)\in B_y$ and $b'=(l,[F],\epsilon') \in B'_y$ define the exact triples \eqref{triple with cal E} and \eqref{triple with cal E'} with $\mathcal{E}_b=\ker(\epsilon)$ and $\mathcal{E'}_{b'}=\ker(\epsilon')$, respectively. These triples, together with the exact triple $0\to\op3(-2)\to3\cdot \op3(-1)\xrightarrow{\delta} F\to0$ from Remark \ref{R(-1,1,1)}, yield commutative diagrams with $\mathcal{G}=\ker(\epsilon\circ \delta)$ and $\mathcal{G'}=\ker(\epsilon'\circ\delta)$, respectively: \begin{equation}\label{comm diagram1} \xymatrix{\op3(-2)\ar@{=}[d]\ar@{>->}[r] & \mathcal{G} \ar@{>->}[d]\ar@{->>}[r] & \mathcal{E}_b\ar@{>->}[d] \\ \op3(-2) \ar@{>->}[r][r] & 3\cdot\op3(-1)\ar@{->>}[r]^-{\delta} \ar@{->>}[d]^{\epsilon\circ\delta} & F \ar@{->>}[d]^{\epsilon}\\ & {\mathcal O}_l(-1) \ar@{=}[r] & \mathcal{O}_l(-1), & } \xymatrix{\op3(-2)\ar@{=}[d]\ar@{>->}[r] & \mathcal{G'} \ar@{>->}[d]\ar@{->>}[r] & \mathcal{E'}_{b'}\ar@{>->}[d] \\ \op3(-2) \ar@{>->}[r][r] & 3\cdot\op3(-1)\ar@{->>}[r]^-{\delta} \ar@{->>}[d]^{\epsilon'\circ\delta} & F\ar@{->>}[d]^{\epsilon'}\\ & {\mathcal O}_l\ar@{=}[r] & \mathcal{O}_l. & } \end{equation} Consider the scheme $\Pi:=\mathbf{P}(3\cdot\op3(-1))\cong\mathbb{P}^3 \times \mathbb{P}^2$. To the epimorphism $\epsilon\circ\delta$ in the left diagram \eqref{comm diagram1} there corresponds an injective morphism $i:\mathbf{P}({\mathcal O}_l(-1))\hookrightarrow \Pi$ which defines a point $x\in\mathbb{P}^2$ such that $\mathrm{im}(i)=l_x:=\{x\}\times l$. Respectively, to the epimorphism $\epsilon'\circ\delta$ in the right diagram \eqref{comm diagram1} there corresponds an injective morphism $i':\mathbf{P}({\mathcal O}_l)\hookrightarrow \Pi$ which defines a point $x'\in(\mathbb{P}_l)_e$, where $(\mathbb{P}_l)_e$ is the set of epimorphisms $3\cdot\op3(-1) \twoheadrightarrow{\mathcal O}_l\ \mathrm{mod}A'_l$ considered as a dense open subset of the projective 5-space $\mathbb{P}(\Hom(3\cdot\op3(-1),{\mathcal O}_l))$. For this point $x'$, we denote $l_{x'}:=\mathrm{im}(i')$. Besides, to the epimorphism $\delta$ in both diagrams there corresponds an injective morphism $i_{\delta}:\mathbf{P}(F) \hookrightarrow\Pi$. From now on we will identify $\mathbf{P}(F)$ with its image under $i_{\delta}$. Now by \eqref{comm diagram1} the condition $b\in B_y$ and the condition $b'\in B'_y$, yield the inclusions \begin{equation}\label{condn on x} l_x\subset\mathbf{P}(F),\ \ \ {\mathrm{respectively,}}\ \ \ l_{x'}\subset\mathbf{P}(F). \end{equation} Next, by the middle horizontal triple in diagrams \eqref{comm diagram1}, $\mathbf{P}(F)$ is a Cartier divisor on $\Pi$ such that ${\mathcal O}_{\Pi}(\mathbf{P}(F))\cong\op3(2)\boxtimes {\mathcal O}_{\mathbb{P}^2}\otimes{\mathcal O}_{\Pi}(1)\cong\op3(1)\boxtimes {\mathcal O}_{\mathbb{P}^2}(1))$. Hence \begin{equation}\label{section s} \mathbf{P}(F)=(s)_0,\ \ \ \ 0\ne s\in\mathrm{H}^0(\op3(1)\boxtimes {\mathcal O}_{\mathbb{P}^2}(1)), \end{equation}
and the conditons \eqref{condn on x} mean that $s|_{l_x}=0$,
respectively, $s|_{l_{x'}}=0$.
Consider the first of these conditions $s|_{l_x}=0$. Let $\Pi=\mathbb{P}^3\times\mathbb{P}^2\xleftarrow{p}\Gamma\times\mathbb {P}^2\xrightarrow{q}G\times\mathbb{P}^2$ be the projections. Then by construction the sheaf $q_*p^*(\op3(1)\boxtimes {\mathcal O}_{\mathbb{P}^2}(1))$ is isomorphic to the sheaf $\mathcal{A}={\mathcal O}_{\mathbb{P}^2}(1)\boxtimes\mathcal{Q}$, where $\mathcal{Q}$ is the universal quotient rank 2 bundle on $G$. In addition, under the natural isomorphism of spaces of sections $\mathrm{H}^0({\mathcal O}_{\op3(1)\boxtimes\mathbb{P}^2}(1))\cong\mathrm{H}^0 (\mathcal{A})$, the section $s$ from \eqref{section s} corresponds to the section $\tilde{s}\in\mathrm{H}^0(\mathcal{A})$.
The above condition $s|_{l_x}=0$ then means that the section $\tilde{s}$ vanishes at the point $(l,x)\in G\times\mathbb{P}^2$. On the other hand, by the universal property of $\mathbf{P}(F)$ (see \cite[Ch. II, Prop. 7.12]{H}) it follows that to give an epimorphism $\epsilon:F\twoheadrightarrow{\mathcal O}_l(-1))\ \mathrm{mod}A_l$ is equivalent to give an embedding $l_x\hookrightarrow\mathbf{P}(F)$ in \eqref{condn on x}. This together with the condition $(l,x)\in(\tilde{s})_0$ yields a natural isomorphism of schemes \begin{equation}\label{By=} B_y\simeq(\tilde{s})_0. \end{equation} Under this isomorphism the fiber of the projection $B_y\simeq(\tilde{s})_0\to G,\ (l,x)\mapsto l$ is naturally identified with $\mathbb{P}(\Hom(F,{\mathcal O}_l(-1)))$. By \eqref{splitingF} this projective space is a point if
$l\not\in Z_y:=\{l\in G\ |\ y\in l\}$, respectively, is $\mathbb{P}^1$ if $l\in Z_y$. This together with the universal property of blowing ups \cite[Ch. II, Prop. 7.14]{H} implies that $B_y$ is isomorphic to the blow-up of $G$ along the smooth center $Z_y\simeq\mathbb{P}^2$. In particular, $B_y$ is irreducible, of dimension 4. Hence $B$ is irreducible of dimension 7.
Now proceed to the second condition $s|_{l_{x'}}=0$. For this, consider the scheme $G'=\{(l,x')|\ l\in G,\ x'\in(\mathbb{P}_l) _e\}$ with the projection $\psi:G'\to G,\ (l,x')\mapsto l$,
and the graph of incidence $\Gamma'=\{(z,l,x')\in\Pi\times G'|\ z\in l_{x'}\}$ with the projections $\Pi\xleftarrow{p'}\Gamma' \xrightarrow{q'}G'$. One checks that ${\mathcal O}_{\Pi}(\mathbf{P}(F))
|_{l_x}\cong{\mathcal O}_{\mathbb{P}^1}(2)$. This implies that, applying the functor $q'_*{p'}^*$ to the section $s$ from \eqref{section s} we obtain the section $\tilde{s}'\in\mathrm{H}^0(\psi^*S^2\mathcal{Q} \otimes\mathcal{D})$ for some invertible ${\mathcal O}_{G'}$-sheaf
$\mathcal{D}$ such that the condition $s|_{l_{x'}}=0$ is equivalent to the condition $(l,x')\in(\tilde{s}')_0$. This similarly to \eqref{By=} yields $B'_y\simeq(\tilde{s}')_0.$
Under this isomorphism the fiber of the projection
$\psi|_{B'_y}:B'_y\simeq(\tilde{s}')_0\to G,\ (l,x')\mapsto l$ is naturally identified with $\mathbb{P}(\Hom(F,{\mathcal O}_l))$. By \eqref{splitingF} this projective space is $\mathbb{P}^2$ if $l\not\in Z_y$, respectively, is $\mathbb{P}^3$ if $l\in Z_y$. This implies that $\tilde{s}'$ as a section of a rank 3 vector bundle is regular, and its zero locus $B'_y$ is irreducible. Hence $B'$ is irreducible of dimension 9. We thus have proved the statement (i) of Lemma.
The statement (ii) is clear. To prove the statement (iii), it is also enough to argue fiberwise over $M$. For the above point $b\in B_y$, we have to prove the irreducibility and generic smoothness of the scheme $\mathbf{P}(\mathcal{E}_b)$. This is just the statement of Lemma \eqref{two irred} in which we set $\mathcal{E}=\mathcal{E}_b$. The irreducibility and generic smoothness of $\mathbf{P}(\mathcal{E}'_{b'})$ for $b'\in B'_y$ is completetly similar. \end{proof}
Let $\rho:\mathbf{P}(\boldsymbol{\mathcal{E}})\to\mathbb{P}^3\times B$ be the structure morphism, and consider the compositions $\theta= pr_1\circ\rho:\mathbf{P}(\boldsymbol{\mathcal{E}})\to\mathbb{P}^3$ and $\tau=pr_2\circ\rho:\mathbf{P}(\boldsymbol{\mathcal{E}})\to B$. Set $\boldsymbol{\mathcal{\tilde{E}}}:=(\mathrm{id}_{\mathbb{P}^3}\times \tau)^*\boldsymbol{\mathcal{E}}=\boldsymbol{\mathcal{E}} \otimes_{{\mathcal O}_B}{\mathcal O}_{\mathbf{P}(\boldsymbol{\mathcal{E}})}$ and consider the "diagonal" embedding $j:\mathbf{P}(\boldsymbol{\mathcal{E}})\hookrightarrow\mathbb{P}^3\times \mathbf{P}(\boldsymbol{\mathcal{E}}),\ z\mapsto(\theta(z),z)$. By construction, $j^*\boldsymbol{\mathcal{\tilde{E}}}=\rho^* \boldsymbol{\mathcal{E}}$ and we obtain the composition of surjections $\mathbf{e}:\ \boldsymbol{\mathcal{\tilde{E}}}\twoheadrightarrow j_*j^*\boldsymbol{\mathcal{\tilde{E}}}=j_*\rho^* \boldsymbol{\mathcal{E}}\twoheadrightarrow j_* {\mathcal O}_{\mathbf{P}(\boldsymbol{\mathcal{E}})}(1)$ which yields an exact ${\mathcal O}_{\mathbb{P}^3\times\mathbf{P}(\boldsymbol{\mathcal{E}})}$- triple, where $\mathbf{E}:=\ker\mathbf{e}$: \begin{equation}\label{bold E} 0\to\mathbf{E}\to\boldsymbol{\mathcal{\tilde{E}}} \xrightarrow{\mathbf{e}}j_*{\mathcal O}_{\mathbf{P}(\boldsymbol {\mathcal{E}})}(1)\to0. \end{equation} In a similar way we define the morphisms $\rho':\mathbf{P} (\boldsymbol{\mathcal{E'}})\to\mathbb{P}^3\times B'$, $\theta'= pr_1\circ\rho':\mathbf{P}(\boldsymbol{\mathcal{E}'})\to\mathbb{P}^3$, $j':\mathbf{P}(\boldsymbol{\mathcal{E'}})\hookrightarrow\mathbb{P}^3 \times \mathbf{P}(\boldsymbol{\mathcal{E'}}),\ z\mapsto(\theta'(z),z)$, the sheaf $\boldsymbol{\mathcal{\tilde{E}'}}:=\boldsymbol {\mathcal{E}'}\otimes_{{\mathcal O}_{B'}}{\mathcal O}_{\mathbf{P}(\boldsymbol {\mathcal{E}'})}$, and the surjection $\mathbf{e'}:\ \boldsymbol {\mathcal{\tilde{E}'}}\twoheadrightarrow j'_*{\mathcal O}_{\mathbf{P} (\boldsymbol{\mathcal{E}'})}(1)$ which yields an exact ${\mathcal O}_{\mathbb{P}^3\times\mathbf{P}(\boldsymbol{\mathcal{E}'})}$- triple, where $\mathbf{E'}:=\ker\mathbf{e'}$: \begin{equation}\label{bold E'} 0\to\mathbf{E'}\to\boldsymbol{\mathcal{\tilde{E}'}}\xrightarrow {\mathbf{e'}}j'_*{\mathcal O}_{\mathbf{P}(\boldsymbol{\mathcal{E}'})} (1)\to0. \end{equation}
Below we will also consider extensions of $\op3$-sheaves of the form \begin{equation}\label{extn Q} 0\to{\mathcal O}_q\to Q\xrightarrow{\gamma}i_*{\mathcal O}_l(-1)\to0,\ \ \ \ \ \end{equation} \begin{equation}\label{2extn Q} 0\to{\mathcal O}_q\to Q\xrightarrow{\gamma}i_*{\mathcal O}_l\to0,\ \ \ \ \ \end{equation} where $(q,l)\in\mathbb{P}^3\times G$ and $i:l\hookrightarrow\mathbb{P}^3$ is the embedding. Below we also set $A_Q:=\mathrm{Aut}(Q)$ for $Q$ in \eqref{extn Q} and \eqref{2extn Q}. \begin{Prop}\label{P(E)=X} The following are true. \begin{itemize} \item[(i)] There are isomorphisms of schemes $\Phi:\mathbf{P} (\boldsymbol{\mathcal{E}})\xrightarrow{\simeq}X$ and $\Phi': \mathbf{P}(\boldsymbol{\mathcal{E}'})\xrightarrow{\simeq}X'$, where \begin{equation}\label{X}
X=\{([F],Q,\delta\ \mathrm{mod}A_Q)|\ [F]\in M,\ Q\ \mathrm{fits\ in}\ \eqref{extn Q},\ \delta:F\to Q\ \mathrm{is\ surjective}\}. \end{equation} \begin{equation}\label{'}
X'=\{([F],Q,\delta\ \mathrm{mod}A_Q)|\ [F]\in M,\ Q\ \mathrm{fits\ in}\ \eqref{2extn Q},\ \delta:F\to Q\ \mathrm{is\ surjective}\}. \end{equation} \item[(ii)] There are inclusions of dense open subschemes $$ \mathcal{X}(-1,1,1,-1,1)\hookrightarrow\mathbf{P}(\boldsymbol {\mathcal{E}}) ~~ {\it and} ~~ \mathcal{X}(-1,1,1,,1)\hookrightarrow \mathbf{P}(\boldsymbol{\mathcal{E'}}). $$ The modular morphisms $$ f:\mathbf{P}(\boldsymbol{\mathcal{E}})\to
{\mathcal M}(-1,2,2),\ z\mapsto[\mathbf{E}|_{\mathbb{P}^3\times\{z\}}] ~~ {\it and} ~~ f':\mathbf{P}(\boldsymbol{\mathcal{E'}})\to
{\mathcal M}(-1,2,0),\ z\mapsto[\mathbf{E'}|_{\mathbb{P}^3\times\{z\}}] $$ are injective, and the closures of their images are $\mathrm{X} (-1,1,1,-1,1)$ and $\mathrm{X}(-1,1,1,0,1)$, respectively. \end{itemize} \end{Prop} \begin{proof} (i) It is enough to consider $\mathbf{P}(\boldsymbol{\mathcal {E}})$, since the argument with $\mathbf{P}(\boldsymbol {\mathcal{E'}})$ is similar. For any point $z\in\mathbf{P}(\boldsymbol{\mathcal{E}})$ let $(q,b)=\rho(z)$. By definition the triple \eqref{bold E} resricted onto $\mathbb{P}^3\times\{z\}$ is the triple \begin{equation} 0\to E_z\to\mathcal{E}_b\xrightarrow{e_z}{\mathcal O}_q\to0. \end{equation} On the other hand, by Proposition \ref{descriptn of B,B'}.(ii), $b=(l,[F],\epsilon\ \mathrm{mod}A_l)$ and $\mathcal{E}_b$ fits in the triple \eqref{triple with cal E} in which $F=\mathcal{E}_b^{\vee\vee}$, $\iota:\mathcal{E}_b\to \mathcal{E}_b^{\vee\vee}$ is the canonical morphism and $\epsilon:\mathcal{E}_b^{\vee\vee}\twoheadrightarrow{\mathcal O}_l(-1)$ is the quotient morphism. Since $\mathcal{E}_b^{\vee\vee}= E_z^{\vee\vee}$, the composition $\tau:E_z\to\mathcal{E}_b \xrightarrow{\iota}E_z^{\vee\vee}$ is the canonical morphism of the sheaf $E_z$ into its reflexive hull, and $Q:=\coker(i)$ fits in the triple \eqref{extn Q}. We thus have an exact triple \begin{equation} 0\to E_z\xrightarrow{\tau}E_z^{\vee\vee}\xrightarrow{\delta}Q \to0, \end{equation} where $\delta$ is the quotient morphism. This defines a morphism $$ \Phi:\mathbf{P}(\boldsymbol{\mathcal{E}})\xrightarrow{\simeq}X, \ z\mapsto([E_z^{\vee\vee}],Q=E_z^{\vee\vee}/E_z,\delta\ \mathrm{mod}A_Q).$$ To construct the inverse morphism $\Phi^{-1}$, take a point $x=([F],Q,\delta\ \mathrm{mod}A_Q)$ and set $\mathcal{E}=\ker(\gamma\circ\delta:F\twoheadrightarrow {\mathcal O}_l(-1))$, where $\gamma$ in \eqref{extn Q} is the morphism of factorization of $Q$ by its maximal artinian subsheaf ${\mathcal O}_q$. We thus obtain the induced epimorphism $e:\mathcal{E} \twoheadrightarrow\ker(\gamma)={\mathcal O}_q$, hence a point $[e]=e\ \mathrm{mod}A_q\in\mathbf{P}(\boldsymbol {\mathcal{E}})$. This yields the desired morphism $\Phi^{-1}: \ X\xrightarrow{\simeq}\mathbf{P}(\boldsymbol{\mathcal{E}}), \ x\mapsto[e]$.\\ (ii) The injectivity of the modular morphism $f$ is clear from the above. In addition, under the description \eqref{X}, the scheme $\mathcal{X}(-1,1,1,-1,1)$ is the set of those points $z=([F],Q,\delta\ \mathrm{mod}A_Q)\in\mathbf{P}(\boldsymbol {\mathcal{E}})$, with $(q,l)=\rho(z)$, for which $q\not\in l$, $q\ne\operatorname{Sing}(F)\not\in l$. This is clearly a nonempty open subset of the scheme $\mathbf{P}(\boldsymbol{\mathcal{E}})$ which is dense since $\mathbf{P}(\boldsymbol{\mathcal{E}})$ is irreducible by Proposition \ref{descriptn of B,B'}.(ii). \end{proof}
Consider the sheaf $\mathbf{E}$ defined in \eqref{bold E}. Let $\mathbf{r}:\mathbf{P}(\mathbf{E})\to\mathbb{P}^3\times\mathbf{P} (\boldsymbol{\mathcal{E}})$ be the structure morphism, and consider the composition $\mathbf{t}=pr_1\circ\mathbf{r}: \mathbf{P}(\mathbf{E})\to\mathbb{P}^3$ and the "diagonal" embedding $\mathbf{j}:\mathbf{P}(\mathbf{E})\hookrightarrow\mathbb{P}^3\times \mathbf{P}(\mathbf{E}),\ w\mapsto(\mathbf{t}(w),w)$. Set $\mathbf{P}(\mathbf{\tilde{E}}):=\mathbf{E} \otimes_{{\mathcal O}_{\mathbf{P}(\boldsymbol{\mathcal{E}})}} {\mathcal O}_{\mathbf{P}(\mathbf{E})}$. By construction, $\mathbf{j}^* \mathbf{\tilde{E}}=\mathbf{r}^*\mathbf{E}$ and we obtain the composition of surjections $\mathbf{\tilde{e}}:\ \mathbf{\tilde{E}}\twoheadrightarrow \mathbf{j}_*\mathbf{j}^*\mathbf{\tilde{E}}=\mathbf{j}_* \mathbf{r}^*\mathbf{E}\twoheadrightarrow \mathbf{j}_* {\mathcal O}_{\mathbf{P}(\mathbf{E})}(1)$ which yields an exact ${\mathcal O}_{\mathbb{P}^3\times\mathbf{P}(\mathbf{E})}$-triple, where $\mathbf{\hat{E}}:=\ker\mathbf{\tilde{e}}$: \begin{equation}\label{hat bold E} 0\to\mathbf{\hat{E}}\to\mathbf{\tilde{E}}\xrightarrow{\mathbf {\tilde{e}}}\mathbf{j}_*{\mathcal O}_{\mathbf{P}(\mathbf{E})}(1)\to0. \end{equation} We will also consider the exact triples of the form \begin{equation}\label{new extn Q} 0\to Z\to Q\to i_*{\mathcal O}_l(-1)\to0,\ \ \ \ \ \dim Z=0,\ \ \ \mathrm{length}(Z)=2, \end{equation} where $i:l\hookrightarrow\mathbb{P}^3$ is the embedding of a line $l\in G$. \begin{Prop}\label{X(-1,1,1,-1,2)} The sheaf $\mathbf{\hat{E}}$ defined in \eqref{hat bold E} determines the modular morphism $$ \hat{\Phi}:\mathbf{P} (\mathbf{E})\to\mathcal{M}(-1,2,0),\ w\mapsto\left[\mathbf
{\hat{E}}|_{\mathbb{P}^3\times\{w\}}\right] , $$ and the closure $\overline {\hat{\Phi}(\mathbf{P}(\mathbf{E}))}$ of its image in $\mathcal{M}(-1,2,0)$ coincides with the scheme $\mathrm{X} (-1,1,1,-1,2)$. In particular, $\mathrm{X}(-1,1,1,-1,2)$ contains all the points $[E]$ such that $Q=E^{\vee\vee}/E$ fits in the triple of the form \eqref{new extn Q}. \end{Prop} \begin{proof} First note that, by the definition of the sheaf $\mathbf{E}$, the scheme $\mathbf{P}(\mathbf{E})$ is fibered over the scheme $\mathbf{P}(\boldsymbol{\mathcal{E}})$ with fiberes of the form $\mathbf{P}(E)$ described in Lemma \ref{two irred}. Hence by that Lemma, these fibers are irreducible of dimension 4. Besides, the scheme $\mathbf{P}(\boldsymbol{\mathcal{E}})$ is also irreducible of dimension 11 by Proposition \ref{descriptn of B,B'}.(iii). Hence the scheme $\mathbf{P}(\mathbf{E})$ is irreducible of dimension 15. The fact that it contains the scheme $\mathcal{X}(-1,1,1,-1,2)$ as a dense open subset is proved in the same way as the statement of item (ii) of Proposition \ref{P(E)=X}, based on the universal property of $\mathbf{P}(\mathbf{E})$ from \cite[Ch. II, Prop. 7.12]{H}. \end{proof}
\begin{Prop}\label{X in T} The following claims hold. \begin{itemize} \item[(i)] The scheme $\mathrm{X}(-1,1,1,-1,1)$ is contained in $\mathrm{T}(-1,2,4,1)$. \item[(ii)] The scheme $\mathrm{X}(-1,1,1,-1,2)$ is contained in $\mathrm{T}(-1,2,4,2)$. \item[(iii)] The scheme $\mathrm{X}(-1,1,1,0,1)$ is contained in $\mathrm{T}(-1,2,2,1)$. \end{itemize} \end{Prop} \begin{proof} (i) We will construct a flat family $\mathbb{E}=\{E_t\}_{t\in \mathbb{P}^1}$ of sheaves from $\mathrm{T}(-1,2,4,1)$ such that, for a certain point $t_0\in\mathbb{P}^1$, $E_{t_0}$ is a smooth point of ${\mathcal M}(-1,2,2)$ lying in $\mathrm{X}(-1,1,1,-1,1)\cap \mathrm{T}(-1,2,4,1)$. From this the statement (i) will follow. Fix a plane $\mathbb{P}^2$ in $\mathbb{P}^3$ and choose a pencil of conics in $\mathbb{P}^2$ considered as a divisor $D$ in $\mathbb{P}^2\times\mathbb{P}^1$, with the projection $D\xrightarrow{p}\mathbb{P}^1$, and, for $t\in\mathbb{P}^1$, denote $C_t=p^{-1}(t)$. We choose the pencil $D$ in such a way that, for two distinct marked points $t_0,t_1\in\mathbb{P}^1$, $C_{t_0}=l_1\cup l_2$ is a union of two distinct lines and $C_{t_1}$ is a smooth conic intersecting $C_{t_0}$ at 4 distinct points. Fix a point $q\in\mathbb{P}^3\smallsetminus\mathbb{P}^2$, and on $\Sigma=\mathbb{P}^3\times\mathbb{P}^1$ consider the line $L=\{q\} \times\mathbb{P}^1$ and the extension of sheaves, where $\mathcal{A}=\op3(-1)\boxtimes{\mathcal O}_{\mathbb{P}^1}(-1)$:
\begin{equation}\label{bbE} 0\to\mathcal{A}\otimes{\mathcal I}_{L,\Sigma}\to\mathbb{E}\to {\mathcal I}_{D,\Sigma}\to0. \end{equation} The extension group corresponding to \eqref{bbE} is $V:=\ext^1({\mathcal I}_{D,\Sigma},\mathcal{A}\otimes{\mathcal I}_{L,\Sigma}) \simeq\ext^1({\mathcal I}_{D,\Sigma},\mathcal{A})\simeq\\ \mathrm{H}^0(\lext^1({\mathcal I}_{D,\Sigma},\mathcal{A}))\simeq \mathrm{H}^0(\lext^2({\mathcal O}_{D},\mathcal{A}))\simeq\mathrm{H}^0(\op3(2)\boxtimes
{\mathcal O}_{\mathbb{P}^1}|_D)\simeq\mathrm{H}^0({\mathcal O}_{\mathbb{P}^2}(2))$. Thus the element of $V$ defining the extension \eqref{bbE} is understood as a section $0\ne s\in\mathrm{H}^0({\mathcal O}_{\mathbb{P}^2} (2))$. Now pick the section $s$ such that it vanishes on the line $l_1$ and doesn't vanish on the line $l_2$; hence it also doesn't vanish on the conic $C_{t_1}$. Then by the Serre construction we obtain the following properties of sheaves
$E_{t}=\mathbb{E}|_{\mathbb{P}^3\times\{t\}}$.\\ (i.a) Under the generic choice of the conic $C_{t_1}$, for generic $t\in\mathbb{P}^1$, the sheaf $[E_{t}]$ is a generic sheaf from $\mathrm{T}(-1,2,4,1)$. In other words, $\mathbb{P}^1\subset \mathrm{T}(-1,2,4,1)$. In particular, $[E_{t_0}]\in \mathrm{T}(-1,2,4,1)$.\\ (i.b) The sheaf $E_{t_0}$ fits in the exact triple $0\to E_{t_0}\xrightarrow{\mathrm{can}}E_{t_0}^{\vee\vee}\to {\mathcal O}_l(-1)\oplus{\mathcal O}_q\to0$, where $[E_{t_0}^{\vee\vee}]\in M$ and, by the construction of $E_{t_0}$, $q\not\in \operatorname{Sing}(E_{t_0}^{\vee\vee})\cup l$. This means that $[E_{t_0}]\in\mathcal{X}(-1,1,1,-1,1)$, and Theorem \ref{NewComponentsmixed}.(iii) implies that $\dim\ext^1(E_{t_0},E_{t_0})=15$. Hence, since $\dim \mathrm{T}(-1,2,4,1)=15$ (see Theorem \ref{0dcomp}), it follows that $E_{t_0}$ is a smooth point of $\mathrm{T} (-1,2,4,1)$ and of ${\mathcal M}(-1,2,2)$ as well. \\ (ii) This is completely similar to the statement (i) above. The only difference is that, instead of fixing a point $q\in\mathbb{P}^3\smallsetminus\mathbb{P}^2$, we fix two distinct points $q_1,q_2\in\mathbb{P}^3\smallsetminus\mathbb{P}^2$, and on $\Sigma=\mathbb{P}^3 \times\mathbb{P}^1$ consider the two corresponding lines $L_i=\{q_i\}\times\mathbb{P}^1$ and the extension of sheaves similar to \eqref{bbE}: $0\to\mathcal{A}\otimes{\mathcal I}_{L_1\sqcup L_2,\Sigma}\to\mathbb{E}\to{\mathcal I}_{D,\Sigma}\to0$. Respectively, for $V$ we take the group $\ext^1({\mathcal I}_{D,\Sigma},\mathcal{A} \otimes{\mathcal I}_{L_1\sqcup L_2,\Sigma})\simeq\mathrm{H}^0 ({\mathcal O}_{\mathbb{P}^2}(2))$. The rest of the argument is literally the same as in (i).
(iii) Similar to the above we construct a flat family $\mathbb{E} =\{E_t\}_{t\in\mathbb{A}^1}$ of sheaves from $\mathrm{T}(-1,2,2,1)$ such that, for a certain point $t_0\in\mathbb{A}^1$, $E_{t_0}$ is a smooth point of ${\mathcal M}(-1,2,0)$ lying in $\mathrm{X}(-1,1,1,0,1)\cap \mathrm{T}(-1,2,2,1)$. From this the statement (ii) will follow. We will use the description of sheaves from ${\mathcal M}(-1,2,2)$ given in \cite[Lemma 2.4]{Chang}. Thus, instead of the above family of conics $D=\{C_t \}_{t\in\mathbb{P}^1}$ we take for $C_t,\ t\in\mathbb{A}^1$, a fixed union $Y=l_1\sqcup l_2$ of two disjoint lines in $\mathbb{P}^3$, fix a point $q\in\mathbb{P}^3\smallsetminus Y$, set $D=Y\times\mathbb {A}^1$, $L=\{q\}\times\mathbb{A}^1$. For these data consider the extension \eqref{bbE}, where we set $\mathcal{A}=\op3(-1)\boxtimes{\mathcal O}_{\mathbb{A}^1}$, and then d the extension group $V=\ext^1({\mathcal I}_{D,\Sigma},\mathcal{A}\otimes {\mathcal I}_{L,\Sigma})$ as above. One easily see that $V\cong \mathrm{H}^0({\mathcal O}_{l_1})\oplus\mathrm{H}^0({\mathcal O}_{l_2})$. For $i=1,2$ pick a nonzero vector $v_i\in\mathrm{H}^0({\mathcal O}_{l_i})$ and identify the base
$\mathbb{A}^1$ of the family $\{C_t\},\ t\in\mathbb{A}^1$, with the subset $\{(v_1,tv_2)|t\in\mathbf{k}\}$. By the Serre construction we obtain the following properties of sheaves
$E_{t}=\mathbb{E}|_{\mathbb{P}^3\times\{t\}}$.\\ (a) For $t\in\mathbb{A}^1\smallsetminus\{0\}$, the sheaf $[E_{t}]$ by definition belongs to $\mathcal{T}(-1,2,2,1)$. It it follows that $\mathbb{A}^1\subset\mathrm{T}(-1,2,2,1)$. In particular, $[E_0]\in\mathrm{T}(-1,2,2,1)$.\\ (b) The sheaf $E_0$ fits in the exact triple $0\to E_0\xrightarrow{\mathrm{can}}E_0^{\vee\vee}\to {\mathcal O}_{_2}\oplus{\mathcal O}_q\to0$, where $[E_0^{\vee\vee}]\in M$ and, by the construction of $E_0$, $q\not\in\operatorname{Sing} (E_0^{\vee\vee})\cup l$. This means that $[E_0]\in\mathcal{X} (-1,1,1,0,1)$, and Theorem \ref{NewComponentsmixed}.(iii) implies that $\dim\ext^1(E_0,E_0)=15$. Hence, since $\dim\mathrm {T}(-1,2,2,1)=15$ (see Theorem \ref{0dcomp}), it follows that $E_0$ is a smooth point of $\mathrm{T}(-1,2,2,1)$ and of ${\mathcal M}(-1,2,0)$ as well. \end{proof}
\section{Irreducible components of ${\mathcal M}(-1,2,2)$} \label{irreducible of M(-1,2,2)}
Now, we are in position to prove the next main result of this paper.
\begin{Teo}\label{M(-1,2,2)} The moduli space ${\mathcal M}(-1,2,2)$ of rank 2 stable sheaves on $\mathbb{P}^3$ with Chern classes $c_1 = -1, c_2 = 2, c_3 = 2$, has exactly $2$ irreducible rational components, namely: \begin{itemize} \item[(i)] the closure $\overline{{\mathcal R}(-1,2,2)}$ of the family of reflexive sheaves ${\mathcal R}(-1,2,2)$, of dimension $11$; \item[(ii)] the irreducible component $\mathrm{T}(-1,2,4,1)$ given by Theorem \ref{0dcomp}, of dimension $15$, whose generic element is a torsion free sheaf $E$ such that $E^{\vee \vee} \in {\mathcal R}(-1,2,4)$ and $Q_E$ is a sheaf of length 1; \item[(iii)] in addition, ${\mathcal M}(-1,2,2)\smallsetminus{\mathcal R}(-1,2,2)= \mathrm{T}(-1,2,4,1)$. \end{itemize} \end{Teo} \begin{proof} By \cite[Thm 2.5]{Chang}, ${\mathcal R}(-1,2,2)$ is irreducible, nonsingular of dimension $11$, and its closure $\overline{{\mathcal R}(-1,2,2)}$ in ${\mathcal M}(-1,2,2)$ is an irreducible component of ${\mathcal M}(-1,2,2)$ of dimension $11$. Consider $E \in {\mathcal M}(-1,2,2) \setminus {\mathcal R}(-1,2,2)$. By Proposition \ref{ChernClasses}, either $\dim Q_E=1$ and $1\leq\mult Q_E\leq 2$, or $\dim Q_E=0$. Consider all the possibilities for $\dim Q_E$ and $\mult Q_E$.\\ i) If $\dim Q_E=1$ and $\mult Q_E = 2$, then $c_2(E^{\vee \vee}) = 0$, and, as in the case i) of the proof of Theorem \ref{M(-1,2,4)}, we are led to a contradiction.\\ ii) If $\dim Q_E=1$ and $\mult Q_E = 1$, then $c_2(E^{\vee \vee})=1$ and $c_3(E^{\vee\vee})=1$ and $Q_E$ is supported on a line. Then $Q_E$ fit in an exact sequence of the form (\ref{extensionQ_E}) where $Z_E$ is the maximal artinian subsheaf of $Q_E$. Then the Euler characteristic of $Q_E(t)$ is given by formula (\ref{chi(Q_E(t))}) which together with (\ref{fundamentalcomponents}) yields: $-1=\chi(E)=\chi(E^{\vee\vee})-\chi(Q_E)=-1-r-s$. Hence $-r-s=0$ and, since we have an epimorphism $\delta:E^{\vee\vee} \to Q_E$, from equation (\ref{splitingF}) it follows that $r\geq -1$. This implies that the possible values for $r$ and $s$ are $r=s=0$ or $r=-1$, $s=1$.\\ Case ii.1) Assume that $r=s=0$. In this case, $Q_E \simeq i_{*}\mathcal{O}_{l}$, for some line $l$, where $i:l\hookrightarrow\mathbb{P}^3$ is the embedding. If $l\cap\operatorname{Sing} E^{\vee\vee}=\emptyset$, then $E$ is a sheaf in $\mathcal{X}(-1,1,1,0,0)$, i. e. a generic sheaf in $\mathrm{X}(-1,1,1,0,0)$, where $\dim\mathrm{X}(-1,1,1,0,0)=9$ by \eqref{dimfamilymixed}. However, this dimension is too small for $\mathrm{X}(-1,1,1,0,0)$ to be an irreducible component of ${\mathcal M}(-1,2,2)$. Next, if $l\cap\operatorname{Sing} ~E^{\vee\vee}\neq\emptyset$, then $E \in Y(0)$ and by Lemma \ref{nonemptyfamily} also $Y(0)$ does not fill an irreducible component of ${\mathcal M}(-1,2,2)$. \\ Case ii.2) Assume that $r=-1$ and $s=1$. In this case, $Q_E$ fits into the exact triple \eqref{extn Q} for some pair $(q,l)\in\mathbb{P}^3\times G$, where $i:l \hookrightarrow\mathbb{P}^3$ is the embedding and $Q=Q_E$. Since $[E^{\vee\vee}]\in M={\mathcal M}(-1,1,1)$, Proposition \ref{P(E)=X}.(i),(ii) yields that $[\delta:E^{\vee\vee}\twoheadrightarrow Q]=\delta\ \mathrm {mod}A_Q$ is the point in $\mathbf{P}(\boldsymbol{\mathcal{E}})$, i. e., $[E]\in \mathrm{X}(-1,1,1,-1,1)$. This together with Proposition \ref{X in T} implies that $[E]\in\mathrm{T}(-1,2,4,1)$. \\ iii) If $\dim Q_E=0$, then $s=\mathrm{length}(Q_E)>0$. By Proposition \ref{ChernClasses}, $c_2(E)=c_2(E^{\vee\vee})=2$ and $c_3(E^{\vee\vee})=c_3(E)+2s=2+2s\ge4$. On the other hand, $c_3(E^{\vee\vee})\le c_2(E^{\vee\vee})^2=4$ by \eqref{c3 le} since $E^{\vee\vee}$ is stable. Hence $s = 1$, $c_3(E^{\vee\vee})=4$, i. e. $Q_E\simeq\mathcal{O}_p$ for some point $p\in\mathbb{P}^3$. Then by Theorem \ref{T(-1,2,2i,1)}.(ii) $[E]$ belongs to the irreducible component $\mathrm{T} (-1,2,4,1)$ of ${\mathcal M}(-1,2,2)$.
In conclusion, we have proved that ${\mathcal M}(-1,2,2) = \overline{{\mathcal R}(-1,2,2)}\cup\mathrm{T}(-1,2,4,1)$. Finally, remark that the rationality of $\overline{{\mathcal R}(-1,2,2)}$ is known from \cite{Chang2}, and the rationality of $\mathrm{T}(-1,2,4,1)$ follows from Main Theorem \ref{main2}. \end{proof}
\section{Irreducible components of ${\mathcal M}(-1,2,0)$} \label{irreducible of M(-1,2,0)}
We are now ready to describe all the irreducible components of ${\mathcal M}(-1,2,0)$. \begin{Teo}\label{M(-1,2,0)} The moduli space ${\mathcal M}(-1,2,0)$ of rank 2 stable sheaves on $\mathbb{P}^3$ with Chern classes $c_1 = -1, c_2 = 2, c_3 = 0$, has exactly 4 irreducible rational components, namely: \begin{itemize} \item[(i)] The closure of the family of stable rank $2$ locally free sheaves ${\mathcal B}(-1,2)$, of dimension $11$; \item[(ii)] The irreducible component $\mathrm{X}(-1,1,1,1,0)$ of dimension $11$, described by Theorem \ref{NewComponentsmixed}, whose generic element is a torsion free sheaf $E$ such that $E^{\vee \vee} \in {\mathcal R}(-1,1,1)$ and $Q_E=i_*{\mathcal O}_l(1)$ for some line $i:l\hookrightarrow\mathbb{P}^3$. \item[(iii)] The irreducible component $\mathrm{T}(-1,2,2,1)$ of dimension $15$ described in Theorem \ref{0dcomp}, whose generic sheaf is a torsion free sheaf $E$ such that $E^{\vee \vee}\in{\mathcal R}(-1,2,2)$ and $Q_E$ is a sheaf of length 1. \item[(iv)] The irreducible component $\mathrm{T}(-1,2,4,2)$ of dimension $19$ described by the Theorem \ref{0dcomp}, whose generic sheaf is a torsion free sheaf $E$ such that $E^{\vee \vee}\in{\mathcal R}(-1,2,4)$ and $Q_E$ is a sheaf of length 2, supported at two distinct points. \end{itemize} \end{Teo} \begin{proof} By \cite[Proposition 4.1]{H1978}, $\overline{{\mathcal B}(-1,2)}$ is an irreducible component of ${\mathcal M}(-1,2,0)$ of dimension $11$. Consider $[E]\in {\mathcal M}(-1,2,0) \setminus {\mathcal B}(-1,2)$; again, By Proposition \ref{ChernClasses}, either $\dim Q_E=1$ and $\leq\mult Q_E \leq 2$, or $\dim Q_E=0$. We will study the possibilities for $\dim Q_E$ and $\mult Q_E$. \\ i) If $\dim Q_E=1$, $\mult Q_E = 2$, then $c_2(E^{\vee\vee})=0$, and by \eqref{c3 le} $c_3(E^{\vee\vee})=0$. Thus $E^{\vee \vee}$ is a stable rank 2 vector bundle with $c_1(E^{\vee \vee})=-1,\ c_2(E^{\vee \vee})=0$, contrary to \cite[Cor. 3.5]{H1978}.\\ ii) If $\dim Q_E=\mult Q_E = 1$, then $c_2(E^{\vee \vee})=1$. Hence $Q_E$ is supported on a line $i:l\hookrightarrow \mathbb{P}^3$ and, possibly, isolated points and fits in the exact sequence \ref{extensionQ_E}, where $\dim Z_E\le0,\ \mathrm{length}Z_E=s$. Then the second formula \eqref{chi(Q_E(t))} and Proposition \ref{ChernClasses}.b) yield $c_3(E^{\vee \vee})=2(r+s)-1$. This together with \eqref{c3 le} implies that $0\le c_3(E^{\vee\vee}) =2(r+s)-1\le1$. Hence, $c_3(E^{\vee\vee})=r+s=1$, i. e. $[E^{\vee\vee}]\in{\mathcal M}(-1,1,1)$. Since we have an epimorphism $E^{\vee \vee} \overset{\delta}{\twoheadrightarrow}Q_E$, it follows from \eqref{splitingF} that $r\ge-1$. This together with the inequality $s\ge0$ implies that the possible values for $r$ and $s$ are: $r=1$ and $s=0$, or $r=0$ and $s=1$, or $r=-1$ and $s=2$. Consider these three cases.\\ Case ii.1): $r = 1$ and $s = 0$. In this case, $Q_E \simeq i_{*}\mathcal{O}_{l}(1)$. If $l\cap\operatorname{Sing}(E^{\vee\vee}) = \emptyset$, then by definition $[E]\in\mathcal{X}(-1,1,1,1,0)$, that is $E$ is a generic sheaf in $\mathrm{X}(-1,1,1,1,0)$. Here by Theorem \ref{NewComponentsmixed}.(i) $\mathrm{X}(-1,1,1,1,0)$ is the irreducible component of dimension $11$ in ${\mathcal M}(-1,2,0)$. If $l \cap \operatorname{Sing}(E) \neq \emptyset$, then by the Lemma \ref{nonemptyfamily} the family of all such $E$ cannot constitute an irreducible component of ${\mathcal M} (-1,2,0)$.\\ Case ii.2): $r=0$ and $s=1$. In this case, the triple \eqref{extensionQ_E} is: $0\to\mathcal{O}_p\to Q_E\xrightarrow{\mathrm{can}} i_{*}\mathcal{O}_{l}\to0$, where $p$ is some point in $\mathbb{P}^3$. This together with the Snake Lemma implies that the sheaf $\mathcal{E}$ defined as the kernel of the composition $E^{\vee\vee}\overset{\mathrm{can}}{\twoheadrightarrow}Q_E \overset{\delta}{\twoheadrightarrow}i_{*}\mathcal{O}_{l}$ fits in the exact triple $0\to E\to\mathcal{E}\to{\mathcal O}_p\to0$. This triple and the stability of $E$ implies the stability of $\mathcal{E}$, hence $[\mathcal{E}]\in{\mathcal M}(-1,2,2)$. Since $\dim\operatorname{Sing}(E)=1$, we have by definition that $[E]\in X(-1,1,1,0,1)$. From Theorem \ref{X in T}.(iii) it follows now that $[E]\in\mathrm{T}(-1,2,2,1)$.\\ Case ii.3): $r=-1$ and $s=2$. In this case, $Q_E$ fits into an exact sequence of the form: $0 \to Z_E \to Q_E \to i_{*}\mathcal{O}_{l}(-1) \to 0$, where $Z_E$ has length $2$ and where $i:l\hookrightarrow\mathbb{P}^3$ is the embedding of some line $l$. Therefore, by Proposition \ref{X(-1,1,1,-1,2)}, $[E]\in\mathrm{X}(-1,1,1,-1,2)$. Then from Proposition \ref{X in T}(ii) it follows that $[E]\in \mathrm{T}(-1,2,4,2)$.
iii) If $\dim Q_E = 0$, let $s = \mathrm{length}(Q_E)$, since we are assuming that $E$ is properly torsion free, it follows that $s>0$. By Proposition \ref{ChernClasses}, $c_2(E) = c_2(E^{\vee \vee})$ and and $c_3(E^{\vee \vee}) = c_3(E)+2s$. Therefore, either $s=1$, $c_3(E^{\vee \vee})=2$, or $s=2$, and $c_3(E^{\vee \vee})=4$. Consider both these cases.\\ Case 1: $s= 1$, then $c_3(E^{\vee \vee})=2$. In this case $Q_E \simeq\mathcal{O}_p$ for some point $p\in\mathbb{P}^3$, so that $[E]\in\mathrm{T}(-1,2,2,1)$ by Theorem Theorem \ref{T(-1,2,2i,1)}.(i).\\ Case 2: $s = 2$, then $Q_E$ has length $2$, and $[E]\in \mathrm{T}(-1,2,4,2)$ by Theorem \ref{T(-1,2,4,2)}.
In conclusion, we have proved that ${\mathcal M}(-1,2,0)= \overline{{\mathcal B}(-1,2)}\cup\mathrm{T}(-1,2,2,1)\cup \mathrm{T}(-1,2,4,2)\cup\mathrm{X}(-1,1,1,1,0)$. Remark also that the rationality of of $\overline{{\mathcal B}(-1,2)}$ is proved in \cite{Hart2}, the rationalty of $\mathrm{T}(-1,2,2,1)$ and $\mathrm{T}(-1,2,4,2)$ follows from Main Theorem \ref{main2}, and the rationality of $\mathrm{X}(-1,1,1,1,0)$ also follows from Main Theorem \ref{main2} with small additional argument due to the elementary transformations of sheaves along the line $l$. \end{proof}
\begin{Remark} Meseguer, Sols and Str\o mme proved in \cite{MSS} that ${\mathcal M}(-1,2,0)$ contains, besides ${\mathcal B}(-1,2)$, at least two families of non locally free sheaves containing sheaves that are not limits of locally free sheaves. Zavodchikov then proved in \cite{Z} that these families of sheaves form irreducible components of dimension 15 and 19; they coincide with the components we denoted by $\mathrm{T}(-1,2,2,1)$ and $\mathrm{T}(-1,2,4,2)$, respectively. Later, Zavodchikov proved in \cite{Za} that ${\mathcal M}(-1,2,0)$ consists of exactly 4 irreducible components; this article, however, is only available in russian.
We emphasize that our proof of Theorem \ref{M(-1,2,0)} is completely independent from the results in \cite{MSS,Z,Za}, treating ${\mathcal M}(-1,2,c_3)$ in a uniform manner for all 3 possible values of $c_3$. Additionally, it also provides further information on the generic element of each component. \end{Remark}
\section{Connectedness of the spaces ${\mathcal M}(-1,2,c_3)$}\label {Connectedness of M(2)}
Since the space ${\mathcal M}(-1,2,4)$ is irreducible, it is obviously connected. In this section we will prove that the spaces ${\mathcal M}(-1,2,2)$ and ${\mathcal M}(-1,2,0)$ are also connected.
\begin{Teo}\label{connected1} The moduli space ${\mathcal M}(-1,2,2)$ is connected. \end{Teo} \begin{proof} First, remark that one easily constructs a flat family of curves $\mathcal{Z}$ in $\mathbb{P}^3$ with base $\mathbb{A}^1$, i. e., a subscheme $\mathcal{Z}$ of $\mathbb{P}^3\times\mathbb{A}^1$ with the projection $\pi:\ \mathcal{Z}\to\mathbb{P}^3\times\mathbb{A}^1 \xrightarrow{pr_2}\mathbb{A}^1$ satisfying the properties:\\ a) for $t\in\mathbb{A}^1\smallsetminus\{0\}$, the fiber $Z_t :=\pi^{-1}(t)$ is a disjoint union $l_{1t}\sqcup l_{2t}$ of two lines;\\ b) the zero fiber $Z_0:=\pi^{-1}(0)$ as a set is the union of two distinct lines $l_{10}$ and $l_{20}$ meeting at a point, say, $p$ such that $(Z_0)_{red}=l_{10}\cup l_{20}$ and $Z_0$ as a scheme has $p$ as an embedded point; more precisely, there is an exact triple: \begin{equation}\label{pointp} 0\to{\mathcal O}_p\to{\mathcal O}_{Z_0}\to{\mathcal O}_{(Z_0)_{red}} \to 0. \end{equation} Indeed, to construct the family $\mathcal{Z}$, consider the projective space $\mathbb{P}^4$ with coordinates $(u:x:y:z:w)$ and the affine line $\mathbb{A}^1$ with coordinate $t$. In $\mathbb{P}^4$ consider a reduced subscheme $W$ given by the equations $\{xz=xw=yz=yw=0\}$ (This $W$ is just a union of two projective planes in $\mathbb{P}^4$ intersecting at the point $\tilde{p}=(1:0:0:0:0)$.) Next, in $\mathbb{P}^4\times \mathbb{A}^1$ take a divizor $D=\{tu=x+y+z+w\}$, and let $\tilde{\mathcal{Z}}=D\cap W\times\mathbb{A}^1$. Furthermore, in $\mathbb{P}^4$ take a hyperplane $\mathbb{P}^3_0=\{x+y+z+w=0\}$, fix some isomorphism $f:\ D\xrightarrow{\simeq}\mathbb{P}^3_0\times \mathbb{A}^1\simeq\mathbb{P}^3\times\mathbb{A}^1$ and set $p=f(\tilde {p})$. (For instance, one can take for $f$ a morphism $((u:x:y:z:w),t)\mapsto((u:x:y:z:-(x+y+z)),t)$. Then the subscheme $\mathcal{Z}=f(\tilde{\mathcal{Z}})$ satisfies the above properties a) and b).
Let $p_2:\mathbb{P}^3\times\mathbb{A}^1\to\mathbb{A}^1$ be the projection. One checks that, for $t\in\mathbb{A}^1$, $\ext^1({\mathcal I}_{Z_t},\op3(-1))$ has fixed dimension 4 while the higher Ext-groups of this pair vanish, hence the base change for relative Ext-sheaves (see, e. g., \cite[Thm. 1.4]{L}) shows that the sheaf $\mathcal{A}=\lext^1_{p_2}({\mathcal I}_{\mathcal{Z},\mathbb{P}^3 \times{\mathbb{A}^1}},\mathcal{O}_{\mathbb{P}^3}(-1)\boxtimes{\mathcal O}_ {\mathbb{A}^1})$ is a locally free ${\mathcal O}_ {\mathbb{A}^1}$-sheaf and there exists a nowhere vanishing section $s\in\mathrm{H}^0(\mathcal{A})$. Furthermore, by the spectral sequence of global-to-relative Ext we may consider $s$ as an element of the group $\ext^1_{p_2}({\mathcal I}_{\mathcal{Z},\mathbb{P}^3 \times{\mathbb{A}^1}},\mathcal{O}_{\mathbb{P}^3}(-1)\boxtimes{\mathcal O}_ {\mathbb{A}^1})$. This element defines an extension of ${\mathcal O}_ {\mathbb{P}^3\times\mathbb{A}^1}$-sheaves \begin{equation}\label{flat over A1} 0\to\mathcal{O}_{\mathbb{P}^3}(-1)\boxtimes{\mathcal O}_{\mathbb{A}^1}\to \mathbf{E}\to {\mathcal I}_{\mathcal{Z},\mathbb{P}^3\times{\mathbb{A}^1}}\to0. \end{equation} The sheaf $\mathbf{E}$ is flat over $\mathbb{A}^1$and, by construction, for $t\in \mathbb{A}^1$, the restriction of \eqref{flat over A1} is a nonsplitting extension of $\op3$-sheaves $0\to\mathcal{O}_{\mathbb{P}^3}(-1)\to E_t\to{\mathcal I}_{Z_t,
\mathbb{P}^3}\to0$, where $E_t:=\mathbf{E}|_{\mathbb{P}^3\times\{t\}}$. Hence, $[E_t]\in{\mathcal M}(-1,2,2)$, i. e., we obtain a modular morphism $\Phi:\ \mathbb{A}^1\to{\mathcal M}(-1,2,2),\ \ t\mapsto [E_t]$.
Note that, for $t\ne0$, $[E_t]\in{\mathcal R}(-1,2,2)$ by \cite [Lemma 2.4]{Chang}, i. e. $\Phi(\mathbb{A}^1\setminus\{0\}) \subset{\mathcal R}(-1,2,2)$. It follows that $[E_0]\in\overline {{\mathcal R}(-1,2,2)}$. Besides, $E_0$ fits the exact sequence \begin{equation}\label{E0} 0\to\mathcal{O}_{\mathbb{P}^3}(-1)\xrightarrow{r}E_0\to{\mathcal I}_{Z_0,\mathbb{P}^3} \to0. \end{equation} From \eqref{pointp} and \eqref{E0} we deduce the following exact sequences: \begin{equation}\label{E0p} 0\to E_0\to E_0^{\vee\vee}\to\mathcal{O}_p\to 0, \end{equation} \begin{equation}\label{stability} 0\to \mathcal{O}(-1) \stackrel{s}{\to} E_0^{\vee\vee} \to {\mathcal I}_{(Z_0)_{red},\mathbb{P}^3}\to 0. \end{equation} where $s$ is the composition morphism $r$ in the sequece \eqref{E0} with the canonical monomorphism $E_0\to E_0^{\vee \vee}$. From the sequence \eqref{stability} and \cite [Proposition 4.2]{Harshorne-Reflexive} we conclude that $E_0^{\vee \vee}$ is stable. Moreover, by \eqref{E0p} and Theorem \ref{T(-1,2,2i,1)}.(ii), $[E_0]\in\mathrm{T}(-1,2,4, 1)$. This yields the proof since, by Theorem \ref{M(-1,2,2)}, ${\mathcal M}(-1,2,2)=\overline{{\mathcal R}(-1,2,2)}\cup\mathrm{T} (-1,2,4,1)$. \end{proof}
\begin{Teo}\label{connected2} The moduli space ${\mathcal M}(-1,2,0)$ is connected. \end{Teo} \begin{proof} By Theorem \ref{M(-1,2,0)}, $$ {\mathcal M}(-1,2,0)=\overline{{\mathcal B}(-1,2)}\cup\mathrm{T}(-1,2,2, 1)\cup\mathrm{T}(-1,2,4,2)\cup\mathrm{X}(-1,1,1,1,0). $$ We are going to prove that: (i) the component $\overline{ {\mathcal B}(-1,2)}$ intersects the components $\mathrm{T}(-1,2,2, 1)$ and $\mathrm{T}(-1,2,4,2)$; (ii) the component $\mathrm{X} (-1,1,1,1,0)$ intersects the component $\mathrm{T}(-1,2,4,2 )$. This will imply the connectedness of ${\mathcal M}(-1,2,0)$.
(i) By \cite[Example 3.1.2] {Harshorne-Reflexive}, the generic sheaf $[E]\in{\mathcal B}(-1,2)$ fits into an exact triple of the form \begin{equation}\label{conicextension} 0 \to \mathcal{O}_{\mathbb{P}^3}(-2) \to E \to {\mathcal I}_{Z}(1) \to 0 ~~, \end{equation} where $I_Z$ is the ideal sheaf of a disjoint union of two conics in $\mathbb{P}^3$. The proof is similar to the proof of Theorem \ref{connected1}, with minor changes, that we will include here for completeness.
Consider the following two $1$-dimensional flat families of curves $\mathcal{Z}^1$ and $\mathcal{Z}^2$ in $\mathbb{P}^3$, with base $U$ open subset in $\mathbb{A}^1$ containing the point $0$, and with projections $\pi_i: \mathcal{Z}^i\hookrightarrow \mathbb{P}^3\times U\xrightarrow{pr_2}U$, $i=1,2,$ such that $\mathcal {Z}^i$ satisfies the conditions ($a$) and ($b_i$), $i=1,2$, where:\\ ($a$) for $t\ne0$, the fiber $Z_t^i:=\pi_i^{-1}(t)$ is a disjoint union of two conics;\\ ($b_1$) the fiber $Z^1_0$ at $0$ as a set is the union of two smooth conics, $C_1$ and $C_2$ meeting in a unique point, say $p$, i. e., $(Z_0^1)_{red}=C_1\cup C_2$ and $p=C_1\cap C_2$, and as a scheme $Z_0^1$ has an embedded point $p$ such that the following triple is exact: \begin{equation}\label{pointpnovo} 0\to {\mathcal O}_p\to{\mathcal O}_{Z_0^1}\to{\mathcal O}_{(Z_0^1)_{red}}\to0. \end{equation} ($b_2$) the fiber $Z^2_0$ at $0$ is a union of two conics, $C_1$ and $C_2$ meeting in two distinct fat points of multiplicity $2$. That is, $(Z_0^2)_{red} =C_1\cup C_2 $, and $\{p_1,p_2\}=C_1\cap C_2$, and as scheme $Z_0^2$ has two embedded points $p_1$ and $p_2$: \begin{equation}\label{2pointp} 0\to{\mathcal O}_{p_1}\oplus{\mathcal O}_{p_2}\to{\mathcal O}_{Z_0^2}\to {\mathcal O}_{(Z_0^2)_{red}}\to0. \end{equation} Now similarly to \eqref{flat over A1} we obtain the exact triples: \begin{equation}\label{flat over U} 0\to\mathcal{O}_{\mathbb{P}^3}(-2)\boxtimes{\mathcal O}_U\to \mathbf{E}^i\to {\mathcal I}_{\mathcal{Z}^i,\mathbb{P}^3\times U}\otimes \mathcal{O}_{\mathbb{P}^3}(1)\boxtimes{\mathcal O}_U\to0,\ \ \ \ i=1,2, \end{equation} such that, for $t\in U$, the restriction of \eqref{flat over U} is a nonsplitting extension of $\op3$-sheaves $0\to\mathcal{O}_{\mathbb{P}^3}(-2)\to E^i_t\to {\mathcal I}_{Z^i_t,\mathbb{P}^3}(1)\to0$, where
$E^i_t:=\mathbf{E}^i|_{\mathbb{P}^3\times\{t\}}$. Hence, $[E^i_t]\in{\mathcal M}(-1,2,0)$, i. e., we obtain modular morphisms $\Phi_i:\ U\to{\mathcal M}(-1,2,0),\ \ t\mapsto[E^i_t]$. Note that, for $t\ne0$, each $[E_t^i]\in{\mathcal B}(-1,2)$ by \cite[Example 3.1.2]{H1978}. Hence, also $[E_0^i]\in\overline {{\mathcal B}(-1,2)}$, $i=1,2$. Besides, $E_0^i$ fit in the following exact triples: \begin{equation}\label{E0i} 0 \to \mathcal{O}_{\mathbb{P}^3}(-2) \stackrel{r_i}{\to} E_0^i\to {\mathcal I}_{Z_0^i,\mathbb{P}^3}(1)\to0,\ \ \ i=1,2. \end{equation} The triples \eqref{pointpnovo},\eqref{2pointp} and \eqref{E0i}, yield the following exact sequences: \begin{equation}\label{E0p1} 0\to E_0^1\to (E_0^1)^{\vee\vee}\to\mathcal{O}_p\to 0, \end{equation} \begin{equation}\label{E0p2} 0\to E_0^2\to (E_0^2)^{\vee\vee}\to \mathcal{O}_{p_1} \oplus \mathcal{O}_{p_2} \to 0, \end{equation} \begin{equation}\label{stabilityi} 0\to \mathcal{O}(-2) \stackrel{s_i}{\to} (E_0^i)^{\vee\vee} \to {\mathcal I}_{(Z_0^i)_{red},\mathbb{P}^3}(1)\to 0,\ \ \ \ \ i=1,2, \end{equation} where $s_i$ is the composition morphism $r_i$ from \eqref{E0i} with the canonical monomorphism $E_0^i\to(E_0^i)^ {\vee\vee}$. From sequence \eqref{stabilityi} and \cite[Proposition 4.2] {Harshorne-Reflexive} we conclude that $(E_0^i)^{\vee\vee}$ is stable, i. e. $[(E_0^i)^{\vee\vee}]\in{\mathcal M}(-1,2,2i)$, $i=1,2$. Thus, \eqref{E0p1} and Theorem \ref{M(-1,2,0)}.(c) yield $[E_0^1]\in\mathrm{T}(-1,2,2,1)$; respectively, \eqref{E0p2} and Theorem \ref{M(-1,2,0)}.(d) yield $[E_0^2] \in\mathrm{T}(-1,2,4,2)$. Since, by the above, $[E_0^1], [E_0^2]\in\overline{{\mathcal B}(-1,2)}$, $i=1,2$, it follows, that $\overline{{\mathcal B}(-1,2)}\cap\mathrm{T}(-1,2,2,1)\ne \emptyset$ and $\overline{{\mathcal B}(-1,2)}\cap\mathrm{T} (-1,2,4,2)\ne\emptyset$, as stated.
(ii) Fix a sheaf $[F]\in{\mathcal R}^*(-1,1,1)$, a line $l$ in $\mathbb{P}^3$ such that $l\cap\operatorname{Sing}(F)=\emptyset$, and two distinct points $p_1,p_2\in l$. Consider the surface $S=l\times\mathbb {A}^1$ with the projection $pr _2:S\to\mathbb{A}^1$. The points $p_1,p_2$ define two points $\tilde{p}_i=(p_i,0)\in
pr_2^{-1}(0),\ i=1,2$. Since $F|_l\cong{\mathcal O}_l\oplus {\mathcal O}_l(-1)$ (see \eqref{splitingF}), it follows that there exists an epimorphism $\mathbf{e}:F\boxtimes{\mathcal O}_{\mathbb {A}^1}\twoheadrightarrow{\mathcal B}:={\mathcal I}_{\tilde{p}_1\sqcup \tilde{p}_2,S}\otimes{\mathcal O}_l(1)\boxtimes{\mathcal O}_{\mathbb{A}^1}$. Consider an ${\mathcal O}_{\mathbb{P}^3\times\mathbb{A}^1}$-sheaf $\mathbf{E}= \ker\mathbf{e}$, flat over $\mathbb{A}^1$ and, for $t\in
\mathbb{A}^1$, set $E_t:=\mathbf{E}|_{\mathbb{P}^3\times\{t\}}$. By construction, the restriction of the exact triple $0\to \mathbf{E}\to F\boxtimes{\mathcal O}_{\mathbb{A}^1}\to{\mathcal B}\to0$ onto $\mathbb{P}^3\times\{t\}$ yields the exact sequences \begin{equation}\label{seq 1} 0\to E_t\to F\to{\mathcal O}_l(1)\to0,\ \ \ \ \ \ t\in\mathbb{A}^1 \smallsetminus\{0\}, \end{equation} \begin{equation}\label{seq 2} 0\to E_0\to F\to{\mathcal O}_l(-1)\oplus{\mathcal O}_{p_1}\oplus{\mathcal O}_{p_2} \to0, \end{equation} and there is a modular morphism $\Psi:\mathbb{A}^1\to {\mathcal M}(-1,2,0),\ t\mapsto[E_t]$. By the definition of $\mathcal{X}(-1,1,1,1,0)$ and $\mathrm{X}(-1,1,1,1,0)$ (see \eqref{familyX} and \eqref{def calX}), it follows from \eqref{seq 1} that $[E_t]\in\mathcal{X}(-1,1,1,1,0)$ for $t\in\mathbb{A}^1\smallsetminus\{0\}$, i. e. $\Psi(\mathbb {A}^1\smallsetminus\{0\})\subset \mathcal{X}(-1,1,1,1,0) \subset\mathrm{X}(-1,1,1,1,0)$. Hence, $[E_0]\in\mathrm{X} (-1,1,1,1,0)$. On the other hand, by the definition of $\mathcal{X}(-1,1,1,-1,2)$, $[E_0]\in\mathcal{X}(-1,1,1,-1,2) \subset\mathrm{X}(-1,1,1,-1,2)$. Since by Theorem \ref{X in T}.(ii) $\mathrm{X}(-1,1,1,-1,2)$ lies in $\mathrm{T} (-1,2,4,2)$, it follows that $\mathrm{X}(-1,1,1,1,0)\cap \mathrm{T}(-1,2,4,2)\ne\emptyset$. \end{proof}
The first part of the previous proof can also be regarded as a proof of the following claim. Let $E$ be a stable torsion free sheaf with $(c_1(E),c_2(E),c_3(E))=(-1,2,0)$ such that \begin{itemize} \item[(i)] there exists a nontrivial section in $H^0(E^{\vee\vee}(2))$ that vanishes along the union of two conics intersecting in a point $p$, and \item[(ii)] $E^{\vee\vee}/E=\mathcal{O}_p$. \end{itemize} Then $E$ is smoothable. Indeed, these two hypotheses imply that $E$ fits into the following exact sequence $$ 0 \to \op3(-2) \to E \to I_Z(1) \to 0, $$ where $Z$ coinciding the scheme $Z^1_0$ described in item ($b_1$) above. Deforming $Z$ into a union of disjoint conics, we obtain a deformation of $E$ into a locally free sheaf $F$ with $[F]\in{\mathcal B}(-1,2)$.
Similarly, if $E$ satisfies the following two hypotheses \begin{itemize} \item[(i')] there exists a nontrivial section in $H^0(E^{\vee\vee}(2))$ that vanishes along the union of two conics intersecting in two points $p$ and $q$; \item[(ii')] $E^{\vee\vee}/E=\mathcal{O}_p\oplus\mathcal{O}_q$, \end{itemize} then $E$ is smoothable.
\end{document} |
\begin{document}
\author{ \footnotesize L. Biasco \& L. Chierchia \\ \footnotesize Dipartimento di Matematica e Fisica \\ \footnotesize Universit\`a degli Studi Roma Tre \\ \footnotesize Largo San Leonardo Murialdo 1 - 00146 Roma, Italy \\ {\footnotesize luca.biasco@uniroma3.it, luigi.chierchia@uniroma3.it} \\ }
\title{f
Global properties of generic real--analytic nearly--integrable Hamiltonian systems
}
\begin{abstract}\noindent We introduce a new class ${\mathbb G}^n_{s}$ of generic real analytic potentials on $\mathbb T^n$ and study global analytic properties of
natural nearly--integrable Hamiltonians $\frac12 |y|^2+\varepsilon f(x)$, with potential $f\in {\mathbb G}^n_{s}$, on the phase space $ {\mathcal M} =B\times \mathbb T^n$ with $B$ a given ball in $\mathbb R^n$. The phase space $ {\mathcal M} $ can be covered by three sets: a `non--resonant' set, which is filled up to an exponentially small set of measure $e^{-c {\mathtt K}}$ (where ${\mathtt K}$ is the maximal size of resonances considered) by primary maximal KAM tori; a `simply resonant set' of measure $\sqrt{\varepsilon} {\mathtt K}^a$ and a third set of measure $\varepsilon {\mathtt K}^b$ which is `non perturbative', in the sense that the $\ham$--dynamics on it can be described by a natural system which is {\sl not} nearly--integrable. We then focus on the simply resonant set -- the dynamics of which is particularly interesting (e.g., for Arnol'd diffusion, or the existence of secondary tori) -- and show that on such a set the secular (averaged) 1 degree--of--freedom Hamiltonians (labelled by the resonance index $k\in {\mathbb Z} ^n$) can be put into a universal form (which we call `Generic Standard Form'), whose main analytic properties are controlled by {\sl only one parameter, which is uniform in the resonance label $k$}. \end{abstract}
\allowdisplaybreaks
\section*{Introduction}\label{maindefinitions}
{
\noindent} The paper is divided in three parts.
{
\noindent} {\bf 1.} In the first part we discuss generic properties of (multi--periodic) analytic functions introducing a new class ${\mathbb G}^n_{s}$ of functions
real analytic on the complex neighborhood of $\mathbb T^n$ given by
\[
{\mathbb T} ^n_s:=\{x=(x_1,...,x_1)\in {\mathbb C} ^n:\ \ |\, {\rm Im}\, x_j|<s\}/(2\pi {\mathbb Z} ^n) \,. \]
Such a class -- related but smaller than the sets $ {\mathcal H} _{s,\t}$ introduced in \cite{BCnonlin} -- is generic, as
it contains an open and dense set in the norm $\|f\|_s=\sup_k |f_k|e^{s\noruno{k}}$, and has full probability measure with respect to a natural weighted product probability measure on Fourier coefficients. \\ The class ${\mathbb G}^n_{s}$ may be described as follows. Consider a real--analytic zero average function $f$ and consider its projection $ \pi_{\!{}_{\integer k}} f$ onto the Fourier modes proportional to a given $k\in {\mathbb Z} ^n\, \backslash\, \{0\}$ (with components with no common divisors), which is given by
\[ \sa\in {\mathbb T} \mapsto \pi_{\!{}_{\integer k}} f (\sa):=\sum_{j\in {\mathbb Z} } f_{jk} e^{\ii j\sa}\,.
\] These one dimensional projections arise naturally, e.g., in averaging theory, where they are the leading terms of the averaged (`secular') Hamiltonians around simple resonances $\{y|\, y\cdot k= \sum y_j k_j=0\}$. Denoting by $\gen$ the set of generators of 1--dimensional maximal lattices in $\mathbb{Z}^n$,
the class ${\mathbb G}^n_{s}$ is formed by real--analytic zero average functions $f$ with $\|f\|_s\le 1$, which satisfy \[
\d_{\!{}_f}=\varliminf_{\sopra{\noruno{k}\to+{\infty}}{k\in\gen}} |f_k| e^{\noruno{k}s} \noruno{k}^n>0 \,, \] and such that the Fourier--projection $\pi_{\!{}_{\integer k}} f$ is a Morse function with distinct critical values for all $k\in\gen$ with $\noruno{k}\le \mathtt N$, where $\mathtt N$ is a {\sl a--priori} Fourier cut--off depending only on $n,s$ and $\d_{\!{}_f}$. \\ A remarkable feature of this class of functions is that the Fourier projection $\pi_{\!{}_{\integer k}} f$ is close (in a large analytic norm) to a shifted rescaled cosine, \[
\pi_{\!{}_{\integer k}} f(\sa)\sim |f_k|\cos (\sa+\sa_k)\,; \qquad \forall k\in\gen\,,\noruno{k}\ge \mathtt N\,, \] (Proposition~\ref{pollaio} below), allowing to have uniform control of the analytic properties of secular Hamiltonians as $\noruno{k}\to+{\infty}$.
{
\noindent}
We believe that the class ${\mathbb G}^n_{s}$ is a good candidate to address analytic problems in dynamical systems whenever generic results -- such as generic existence of secondary tori in nearly--integrable Hamiltonian systems\footnote{I.e., tori which are not a continuous deformation of integrable $(\varepsilon=0$) flat tori; for references, see
\cite{Nei89},
\cite{MNT},
\cite{Simo}, \cite{AM},
\cite{BCnonlin},
\cite{FHL},
\cite{BClin2}.}
or Arnol'd diffusion\footnote{\label{AD}See
\cite{A64}; compare also, e.g., \cite{CG}, \cite{DH}, \cite{Z}, \cite{Ma}, \cite{T},
\cite{DLS},
\cite{BKZ},
\cite{DS},
\cite{KZ},
\cite{GPSV},
\cite{CdL22}, \cite{CFG}.} -- are considered.
{
\noindent} {\bf 2.}
In the rest of the paper, we consider natural nearly--integrable Hamiltonian systems with $n\ge 2$ degrees of freedom with Hamiltonian $\ham=\frac12 |y|^2 +\varepsilon f(x)$ ($n\ge 2$),
with potential $f$ in the class ${\mathbb G}^n_{s}$ with a fixed $s>0$, on a bounded phase space $ {\mathcal M} =\DD\times \mathbb T^n\subset \mathbb R^n\times \mathbb T^n$; in fact, in view of the model, it is not restrictive to simply consider $\DD=\{y\in \mathbb R^n\st |y| <1\}$.
\\ We, then, introduce a covering of the action space $\DD= \Rz\cup\Ru\cup \Rd$, depending on two `Fourier cut--offs' ${\mathtt K}>{\mathtt K}_{{}_{\rm o}}>\mathtt N$ ($\mathtt N$ as above), so that: $\Rz$ is a non--resonant set up to order ${\mathtt K}_{{}_{\rm o}}$; $\Ru$ is union of neighborhoods $\Ruk$ of simple resonances $\{y\in \DD: y\cdot k=0\}$ of maximal order ${\mathtt K}_{{}_{\rm o}}$, which are non resonant modulo $ {\mathbb Z} k$ up to order ${\mathtt K}$, and $\Rd$ is a set of measure proportional to $\varepsilon {\mathtt K}^b$ for a suitable $b>1$ (which depends only on $n$); similar `geometry--of--resonances' analysis is typical of Nekhoroshev's theory\footnote{Compare \cite{Nek}, \cite{BGG}, \cite{poschel}, \cite{DG}, \cite{Nie00},
\cite{DH}, \cite{GCB},
\cite{BFN},
\cite{NPR}. }. \\
The set $\Rd$ is {\sl a non perturbative set}, namely, it is a set where the $\ham$--dynamics is equivalent to the dynamics of a Hamiltonian, which is {\sl not nearly--integrable}: indeed, in the simplest non trivial case $n=2$ such a Hamiltonian is given by $|y|^2/2 + f(x)$.
\\
On the other hand the set $(\Rz\cup \Ru)\times \mathbb T^n$ is suitable for high order perturbation theory, and,
following the averaging theory developed in \cite{BCnonlin}, we construct high order normal forms
(Theorem~\ref{normalform}) so that on $\Rz\times\mathbb T^n$ the above Hamiltonian $\ham$ is conjugated, up to an exponentially small term of $O(e^{-{\mathtt K}_{{}_{\rm o}} s/3})$, to an integrable Hamiltonian, which depends only on action variables and it is close to $|y|^2/2$. By classical KAM theory, it then follows that this set is filled by primary\footnote{Primary tori are smooth deformation of the flat Lagrangian integrable ($\varepsilon=0$)
tori.} KAM tori up to a set of measure of order $O(e^{-{\mathtt K}_{{}_{\rm o}} s/7})$. Actually, here there is a delicate point: the symplectic map realizing the above mentioned conjugation moves the boundary of the phase space $\DD\times \mathbb T^n$ by a quantity much larger than
$O(e^{-{\mathtt K}_{{}_{\rm o}} s/7})$, therefore, in order to get the exponentially small measure estimate on the `non--torus set' one needs to introduce a second covering which takes care of the dynamics close to the boundary: this is done in Lemma~\ref{sedformosa} below. \\ The analysis on the dynamics in $\Ru\times\mathbb T^n$ is {\sl much} more complicate. In each of the neighborhoods $\Ruk$, which cover the set $\Ru$ as $\noruno{k}\le {\mathtt K}_{{}_{\rm o}}$, one can perform resonant averaging theory so as to conjugate $\ham$ to still an integrable system, which however depends on the resonant angle $\ttx_1=k\cdot x$. The averaged systems with secular Hamiltonians $\hamsec_k(\tty,\ttx_1)$ are therefore 1D--Hamiltonian systems (one degree--of--freedom systems in the symplectic variables $(\tty_1,\ttx_1)$ depending also on adiabatic actions $\tty_2,...,\tty_n$), which are close to natural systems with potentials $\pi_{\!{}_{\integer k}} f$. Such potentials, for low $k$'s, are rather general: for instance, they may have an arbitrary large number of separatrices depending on the particular structure of $f$. The global analytic properties of the Hamiltonians $\hamsec_k(\tty,\ttx_1)$ is the argument of the third (and main) part of this paper.
{
\noindent} {\bf 3.} In the third part we prove that the secular Hamiltonians $\hamsec_k(\tty,\ttx_1)$ described in the previous item {\sl can be symplectically conjugated, for all $\noruno{k}\le {\mathtt K}_{{}_{\rm o}}$, to 1D--Hamiltonians in the standard form introduced in} \cite{BCaa23} (see, also, Definition~\ref{morso} below). In few words, a standard 1D--Hamiltonian (which depends on $(n-1)$ external parameters) is a one degree--of--freedom Hamiltonian system close to a natural system with a generic potential, which may be controlled essentially by {\sl only one parameter}, namely, the parameter $\kappa$ appearing in Eq. \equ{alce} below; here, `essentially' means, roughly speaking, that $\kappa$ governs the main scaling properties of the Hamiltonian $\hamsec_k$. What is particularly relevant is that the $\kappa$ parameter of the secular Hamiltonians $\hamsec_k$ is shown to be {\sl independent of $k$}, as it depends only on $n$, $s$, the above parameter $\d_{\!{}_f}$, and on a fourth parameter $\b$, which measures the Morse properties of the the potentials $\pi_{\!{}_{\integer k}} f$ with $\noruno{k}\le \mathtt N$; compare Eq. \equ{kappa} and Remark~\ref{rampulla}--(i).\\ This uniformity allows to analyze global analytic properties: for example,
the action--angle map for standard Hamiltonians, as discussed in \cite{BCaa23}, depends only on the parameter $\kappa$ of the standard Hamiltonian and therefore can be used {\sl simultaneously} for all the secular Hamiltonians $\hamsec_k$, allowing for a nearly--integrable description of $\ham$ on $\Ruk\times {\mathbb T} ^n$ with uniformly exponentially small perturbations.
{
\noindent} The results presented in this paper may be useful in attacking some of the fundamental open problems in the analytic theory of nearly--integrable Hamiltonian systems such as Arnol'd diffusion for generic real analytic systems, and provide indispensable tools to develop a `singular KAM Theory', namely a KAM theory dealing simultaneously with primary and secondary persistent Lagrangian tori in the full phase space, except for the non--perturbative set $\Rd$. In particular, Theorem~\ref{sivori} below is the starting point for, e.g., the following result, which (up to the logarithmic correction and in the case of natural systems) proves a conjecture by Arnold, Kozlov and Neishtadt\footnote{See, \cite[Remark~6.18, \S~6.3--C]{AKN}.},
{
\noindent} {\bf Theorem (\cite{BClin2})} {\sl Fix $n\ge 2$, $s>0$, $f\in {\mathbb G}^n_{s}$, $B$ an open ball in $\mathbb R^n$, and let
$\displaystyle \ham(y,x;\varepsilon):=\frac12 |y|^2 +\varepsilon f(x)$. Then, there exists a constant $c>1$ such that, for all $0<\varepsilon<1$, all points in $B\times \mathbb T^n$
lie on a maximal KAM torus for $\ham$, except for
a subset of measure bounded by
$
\, c\, \varepsilon |\ln \varepsilon|^{\g}$ with $\g:=11 n +4$. }
{
\noindent} Let us remark that, since it is well known that the asymptotic (as $\varepsilon \to 0$) density of non--integrable {\sl primary} tori is $1-c\sqrt\varepsilon$ (see \cite{Nei}, \cite{P82}), the difference of order of the density of invariant maximal tori in the above theorem must come from secondary tori, i.e., the tori in $\Ru\times\mathbb T^n$ whose leading dynamics is governed by the secular Hamiltonians $\hamsec_k(\tty,\ttx_1)$ discussed in this paper.
\section{Generic real analytic periodic functions} \label{gennaro}
We begin with a few definitions.
\dfn{pelle}{\bf (Norms on real analytic periodic functions)}\\ For $s>0$ and $n\in {\mathbb N} =\{1,2,3...\}$,
consider the Banach space of zero average real analytic periodic functions $\displaystyle f:x\in {\mathbb T} ^n:= {\mathbb R} ^n/(2\pi \mathbb{Z}^n)\mapsto \sum_{ k\in {\mathbb Z} }f_k e^{\ii k\cdot x}$, $f_0=0$,
with finite norm\footnote{As usual $\noruno{k}:=\sum |k_j|$.} \[
\|f\|_s:=\sup_{k\in {\mathbb Z} ^n} |f_k| e^{\noruno{k}s}\,,
\] and denote by ${\mathbb B}^n_s$ the closed unit ball of functions $f$ with $\|f\|_s\le 1$. \edfn
{
\noindent} Besides the norm $\|\cdot\|_s$, we shall also use the following two (non equivalent) norms $$
{\modulo}f{\modulo}_{s}:=\sup_{\mathbb T^n_s}|f|\,,\qquad \,\thickvert\!\!\thickvert\, f\,\thickvert\!\!\thickvert\,_{s}:= \sum_{k\in\mathbb{Z}^n}
|f_k| e^{|k|_{{}_1}s}\,. $$ Such norms satisfy the relations
$$\|f\|_{s}\leq {\modulo}f{\modulo}_{s} \leq \,\thickvert\!\!\thickvert\, f\,\thickvert\!\!\thickvert\,_{s}\,. $$ Notice also the following
`smoothing property' of the norm $\,\thickvert\!\!\thickvert\, \cdot\,\thickvert\!\!\thickvert\,_{r,s}$: {\sl if $s'\leq s$, then for any $N\ge 1$, one has} \begin{equation}\label{lesso} f(y,x)=\sum_{\noruno{k}\geq N}f_k(y)e^{\ii k\cdot x} \qquad \Longrightarrow \qquad \,\thickvert\!\!\thickvert\, f \,\thickvert\!\!\thickvert\,_{r,s'}\leq e^{-(s-s')N} \,\thickvert\!\!\thickvert\, f \,\thickvert\!\!\thickvert\,_{r,s}\,. \end{equation}
\dfn{generators}{\bf (Generators and Fourier projectors)}\\ {\rm (i)} Let $ {\mathbb Z} ^n_\varstar$ be the set of integer vectors $k\neq 0$ in $ {\mathbb Z} ^n$ such that the first non--null component is positive: \beq{iguazu}
{\mathbb Z} ^n_\varstar:=
\big\{ k\in {\mathbb Z} ^n:\ k\neq 0\ {\rm and} \ k_j>0\ {\rm where}\ j=\min\{i: k_i\neq 0\}\big\}\,,
\end{equation} and denote by $\gen$ the set of {\sl generators of 1d maximal lattices} in $ {\mathbb Z} ^n$, namely, the set of vectors $k\in {\mathbb Z} ^n_\varstar$ such that the greater common divisor ({\rm gcd}) of their components is 1: \[ \gen:=\{k\in {\mathbb Z} ^n_\varstar:\ {\rm gcd} (k_1,\ldots,k_n)=1\}\,. \] Let us also denote by ${\cal G}^n_{K}$ the generators of size not exceeding $K\ge 1$, \[ {\cal G}^n_{K}:=\gen \cap \{\noruno{k} \leq K \}\,, \] {\rm (ii)} Given a zero average real analytic periodic function and $k\in \gen$, we define
\beq{kproj} \sa\in {\mathbb T} \mapsto \pi_{\!{}_{\integer k}} f (\sa):=\sum_{j\in {\mathbb Z} } f_{jk} e^{\ii j\sa}\,. \end{equation} \edfn Notice that $f$ can be uniquely written as \[ f(x)= \sum_{k\in \gen} \pi_{\!{}_{\integer k}} f (k\cdot x)\,. \]
\begin{definition}\label{buda} Let $\b>0$. A function $F\in C^2(\mathbb T,\mathbb R)$ is called a {\bf $\b$--Morse function} if \[
\min_{\sa\in\mathbb T} \ \big( |F'(\sa)|+|F''(\sa)|\big) \geq\beta \,, \quad \min_{i\neq j }
|F(\sa_i)-F(\sa_j)| \geq \beta \,, \] where $\sa_i\in\mathbb T$ are the critical points of $F$. \end{definition}
\dfn{pigro}{\bf (Cosine--like functions)}
Let $0<\ttg< 1/4$.
We say that a real analytic function $G:\mathbb T_1\to\mathbb{C}$ is $\ttg$--cosine--like if, for some $\eta>0$ and
$\sa_0\in\mathbb R$, one~has
\[
{\modulo}G(\sa)-\eta\cos (\sa+\sa_0) {\modulo}_1
:= \sup_{|\, {\rm Im}\, \sa|<1}{\modulo}G(\sa)-\eta\cos (\sa+\sa_0) {\modulo}
\leq \eta\ttg\,. \]
\edfn Notice that this notion is invariant by rescalings: $G$ is $\ttg$--cosine--like if and only if $\l G$ is $\ttg$--cosine--like for any $\l>0$. Beware of the usage of $|\cdot|_1$ as sup norm on $ {\mathbb T} _1$, the complex strip of width~2.
{
\noindent} Now, the main definition.
\dfn{sicuro} {\bf (The analytic class ${\mathbb G}^n_{s}$)} We denote by ${\mathbb G}^n_{s}$ the subset of functions $f\in {\mathbb B}^n_s$ such that the following two properties hold: \beqa{P1} &&
\varliminf_{\sopra{\noruno{k}\to+{\infty}}{k\in\gen}} |f_k| e^{\noruno{k}s} \noruno{k}^n>0\,, \\ &&
\forall \ k\in\gen\,,\ \pi_{\!{}_{\integer k}} f\ {\rm is \ a\ Morse\ function\ with \ distinct \ critical\ values}\,.
\nonumber \end{eqnarray} \edfn
\rem\label{posizione} (i)
If $f\in {\mathbb B}^n_s$, then the function $\pi_{\!{}_{\integer k}} f$ belongs to $ \hol_{|k|_{{}_1}s}^1$ and therefore has a domain of analyticity which increases with the norm of $k$.
{
\noindent} (ii) A simple example of function in ${\mathbb G}^n_{s}$ is given by
\[ f(x):=2 \sum_{k\in\gen} e^{-s\noruno{k}} \cos k\cdot x\,. \] Indeed, one checks immediately that
\[
\|f\|_s=1\,,\qquad
\varliminf_{\sopra{\noruno{k}\to+{\infty}}{k\in\gen}} |f_k| e^{\noruno{k}s} \noruno{k}^n=+{\infty}\,,\qquad \pi_{\!{}_{\integer k}} f (\sa)=2 e^{-s\noruno{k}} \cos \theta\,. \] (iii) The critical points of an analytic Morse function on $ {\mathbb T} $, by compactness, cannot accumulate, hence, there are a finite, even number of them, which are, alternately, a relative strict maximum and a relative strict minimum. In particular, if $G$ is $\b$--Morse, then the number of its critical points can be bounded by
$\pi\sqrt{2\max |G''|/\b}$. Indeed, if $\sa\neq \sa'$ are critical points of $G$, then, by \equ{cimabue} one has
$$\textstyle\b\le|G(\sa)-G(\sa')|\le \frac12 (\max|G''|) \,{\rm dist}(\sa,\sa')^2\,,$$ which implies that the minimal distance between two critical points is $\sqrt{2\b/\max|G''|}$ and the claim follows. \erem
\subsection{Uniform behaviour of large-mode Fourier projections}\label{ironman}
If a function $f\in {\mathbb B}^n_s$ satisfies \equ{P1}, then, {\sl apart from a finite number of Fourier modes, its Fourier projections $\pi_{\!{}_{\integer k}} f$ are close to a shifted rescaled cosine}, a fact that allows, e.g., to have a uniform analytic theory of high order perturbation theory.
{
\noindent} To discuss this matter, let us first point out that for any sequence of real numbers $\{a_k\}$ and for any function $N(\d)$ such that $\lim_{\d\downarrow 0} N(\d)=+\infty$ one has \beq{analisi1} \varliminf a_k>0 \quad \iff \quad \exists \ \d>0\ \ {\rm s.t.}\ \ a_k\ge \d\,,\ \forall\ k\ge N(\d)\,. \end{equation} We shall apply this remark to the minimum limit in \equ{P1} with a particular choice of the function $N(\d)$, namely, we define $\mathtt N(\d)=\mathtt N(\d;n,s)$ as \beq{enne} \mathtt N(\d):=2\, \max\Big\{1\,,\,\frac1s \, \log \frac{c_{{}_{\rm o}}}{s^n\, \d}\Big\} \,,\qquad {c_{{}_{\rm o}}}:= 2^{44}\ (2n/e)^n\,. \end{equation} For later use, we point out that\footnote{In fact, if $s\ge 1$ then $\mathtt N\ge 2\ge 2/s$, while if $s<1$ then the logarithm in \equ{enne} is larger than one, so that $\mathtt N\ge 2/s$ also in this case.} \beq{bollettino1} \mathtt N\ge2 \ttcs\,,\quad {\rm where}\quad \ttcs:=\max\big\{1, 1/s\big\}\,. \end{equation} From \equ{analisi1} it follows that if $f$ satisfies \equ{P1}, one can find $0<\d\le 1$ such that \beq{P1+}
|f_k|\geq \delta \noruno{k}^{-n}\, e^{-\noruno{k} s}\,,\qquad \forall \ k\in\gen\,,\ \noruno{k}\ge\mathtt N\,. \end{equation} The main feature of the above choice of $\mathtt N$ is that, for $\noruno{k}\ge\mathtt N$, $\pi_{\!{}_{\integer k}} f$ is very close to a shifted rescaled cosine function:
\begin{proposition}\label{pollaio} Let $\d>0$, $f\in{\mathbb B}^n_s$ and assume \equ{P1+}.
Then,
for any $k\in \gen $ with $ \noruno{k}\geq \mathtt N$,
$\pi_{\!{}_{\integer k}} f$ is $2^{-40}$--cosine--like (Definition~\ref{pigro}). \end{proposition} \noindent{\bf Proof\ } We shall prove something slightly stronger, namely, that there exists $\sa_k\in[0,2\pi)$ so that \begin{equation}\label{alfacentauri}
\pi_{\!{}_{\integer k}} f(\sa)=2 |f_k| \big(\cos(\sa+ \sa_k)+F^k_\star(\sa)\big)\,,\quad F^k_{\! \varstar}(\sa):=\frac{1}{2|f_k|}\sum_{|j|\geq 2}f_{jk}e^{\ii j \sa}\,, \end{equation} with $F^k_{\! \varstar}\in\hol_1^1$ and (recall the definition of the norms in \equ{norme})
\begin{equation}\label{gallina} \modulo F^k_{\! \varstar} \modulo_1\le \,\thickvert\!\!\thickvert\, F^k_{\! \varstar}\,\thickvert\!\!\thickvert\,_{1}\leq 2^{-40}\,. \end{equation} Indeed, by definition of $\pi_{\!{}_{\integer k}} f$, \[ \pi_{\!{}_{\integer k}} f(\sa):= \sum_{j\in {\mathbb Z} \, \backslash\, \{0\}} f_{jk} e^{\ii j \sa}
= \sum_{|j|=1} f_{jk}e^{\ii j\sa} + \sum_{|j| \ge 2} f_{jk}e^{\ii j\sa} \,,
\] and, defining $\sa_k\in[0,2\pi)$ so that $e^{{\rm i} \sa_k}= f_k/|f_k|$, one has $$
\frac{1}{2|f_k|}\sum_{|j|=1}f_{jk}e^{\ii j \sa}=\, {\rm Re}\, \Big( \frac{f_k}{|f_k|} e^{{\rm i} \sa}\Big)=\, {\rm Re}\, e^{{\rm i} (\sa+\sa_k)} =\cos (\sa+\sa_k)\,, $$ which yields \equ{alfacentauri}. Now, since $f\in{\mathbb B}^n_s$ it is
$|f_k|\le e^{-\noruno{k}s}$
and, by \equ{P1+}, $|f_k|\geq \delta \noruno{k}^{-n}\, e^{-\noruno{k} s}$.
Therefore, for $\noruno{k}\ge \mathtt N$, one has \beqa{onlyyou} \,\thickvert\!\!\thickvert\, F^k_{\! \varstar}\,\thickvert\!\!\thickvert\,_{{}_1}&\stackrel{\equ{alfacentauri}}=&
\frac{1}{2|f_k|}\sum_{|j|\geq 2}|f_{jk}|e^{|j|} \leq
\frac{\noruno{k}^n e^{\noruno{k}s}}{2\d}\sum_{|j|\geq 2}|f_{jk}|e^{|j|}\nonumber \\ &\le&
\frac{\noruno{k}^n e^{\noruno{k}s}}{2\d}\sum_{|j|\geq 2}e^{-|j|(\noruno{k}s-1)} \nonumber \\ &\le& \frac{2 e^2 \noruno{k}^n}{\d} \ e^{-\noruno{k}s} =\frac{2^{n+1} e^2 }{s^n\d} e^{-\frac{\noruno{k}s}2}\ \ \Big(\frac{\noruno{k}s }2\Big)^n e^{-\frac{\noruno{k}s}2} \nonumber \\ &\le& \Big(\frac{2n}{e s}\Big)^n\, \frac{2e^2}{\d} \, e^{-\frac{\mathtt N s}2} \le 2^{-40}\,, \end{eqnarray} where the geometric series converges since $\noruno{k}s\ge \mathtt N s\ge2 $ (by \equ{bollettino1}) and last inequality follows by definition of $\mathtt N$ in \equ{enne}. \qed
\rem In fact, the particular form of $\mathtt N$ is used {\sl only} in the last inequality in \equ{onlyyou}. \erem
\noindent Next, we need an elementary calculus lemma:
\begin{lemma}\label{pennarello}
Assume that $F\in C^2(\mathbb T,\mathbb R)$, $\bar\sa$ and $0<\mathtt c<1/2$ are such that
$$\|F-\cos (\sa+\bar \sa)\|_{C^2}\le \mathtt c\,,$$
where $\|F\|_{C^2}:=\max_{0\leq k\leq 2}\sup|F^{(k)}|$.
Then, $F$ has only two critical points and it is $(1-2 \mathtt c)$--Morse (Definition~\ref{buda}).
\end{lemma} \noindent{\bf Proof\ } By considering the translated function $\sa\to F(\sa-\bar\sa)$, one can reduce oneself to the case $\bar\sa=0$ ($F$ is $\b$--Morse, if and only if $\sa\to F(\sa-\bar\sa)$ is $\b$--Morse).\\
Thus, we set $\bar \sa=0$, and note that, by assumption $|F'|=|F'+\sin\sa-\sin\sa|\ge|\sin \sa|-\mathtt c $, and, analogously, $|F''|\ge |\cos\sa|-\mathtt c $. Hence, $|F'|+|F''|\ge|\sin\sa|+|\cos\sa|-2\mathtt c \ge 1-2\mathtt c $. Next, let us show that $F$ has a unique strict maximum $\sa_0\in I:=(-\pi/6,\pi/6)$ (mod $2\pi$). Writing $F=\cos\sa+g$, with $g:=F-\cos \sa$, one has that $F'(-\pi/6)=1/2+g'(\pi/6)\ge 1/2 - \mathtt c >0$, and, similarly $F'(\pi/6)\le -1/2 +\mathtt c $, thus $F$ has a critical point in $I$, and, since $-F''=\cos\sa -g''\ge \cos\sa-\mathtt c \ge \sqrt3/2-\mathtt c >0$, $F$ is strictly concave in $I$, showing that such critical point is unique and it is a strict local minimum. In fact, similarly one shows that $F$ has a second critical point $\sa_1\in (\pi-\pi/6,\pi+\pi/6)$ where $F$ is strictly convex, so that $\sa_1$ is a strict local minimum; but, since
in the complementary of these intervals $F$ is strictly monotone (as it is easy to check), it follows that $F$ has a unique global strict maximum and a unique global strict minimum. Finally, $F(\sa_0)-F(\sa_1)\ge \sqrt3-2\mathtt c >1-2\mathtt c $ and the claim follows. \qed
{
\noindent} From Proposition~\ref{pollaio} and Lemma~\ref{pennarello} one gets immediately:
\begin{proposition}\label{punti} Let $\d>0$, $f\in{\mathbb B}^n_s$ and assume \equ{P1+}. Then, for every $k\in \gen$ with $\noruno{k}\ge \mathtt N$, the function
$\pi_{\!{}_{\integer k}} f$ is $|f_k|$--Morse. \end{proposition} \noindent{\bf Proof\ } As in the proof of Proposition~\ref{pollaio}, we get \beq{derby}
\Big|\frac{\pi_{\!{}_{\integer k}} f}{2f_k} - \cos(\sa+\sa^k)\Big|_1\stackrel{\equ{alfacentauri}}=
|F^k_\star|_1\leq
\,\thickvert\!\!\thickvert\, F^k_\star\,\thickvert\!\!\thickvert\,_1\stackrel{\equ{gallina}} \leq 2^{-40}\,, \end{equation} which implies that the function $F:=\pi_{\!{}_{\integer k}} f/(2f_k)$ is $C^2$--close to a (shifted) cosine: Indeed, by Cauchy estimates
$\|\cdot\|_{C^2}\leq 2 |\cdot|_1$, so that \[
\|F-\cos(\sa+\sa^k)\|_{C^2}=\max_{0\le j\le 2} \max_\mathbb T |\partial_\sa^j(F-\cos(\sa+\sa^k))|\le
2|F^k_\star|_1 \stackrel{\equ{derby}} \leq 2^{-39} \,. \] By Lemma~\ref{pennarello} we see that $F$ is $(1-2^{-38})$--Morse, and the claim follows by rescaling.~\qed
\subsection{Genericity}
In this section we prove that ${\mathbb G}^n_{s}$ is a generic set in ${\mathbb B}^n_s$.
\dfn{sicuro2} Given $n,s>0$, $0<\d\le 1$ and $\b>0$ and $\mathtt N$ as in \equ{enne} we call ${\mathbb G}^n_{s}(\d,\b)$ the set of functions in ${\mathbb B}^n_s$ which satisfy \equ{P1+} together with:
\beq{P2+} \pi_{\!{}_{\integer k}} f\ {\rm is \ \b\!-\!Morse}\,,\qquad \, \ \ \forall \ k\in\gen\,,\ \noruno{k}\le \mathtt N\,. \end{equation} \edfn Then, the following lemma holds:
\begin{lemma}
\label{telaviv} Let $n,s>0$. Then, $\displaystyle {\mathbb G}^n_{s}=\bigcup_{\sopra{\d\in (0,1]}{\b>0}}{\mathbb G}^n_{s}(\d,\b)$. \end{lemma}
\noindent{\bf Proof\ } Assume $f\in {\mathbb G}^n_{s}$ and
let $0<\d_0\le 1$ be smaller than limit inferior in \equ{P1}. Then, there exists $N_0$ such that $|f_k|>\d_0 \noruno{k}^{-n} e^{-\noruno{k}s}$, for any $\noruno{k}\ge N_0$, $k\in\gen$. Since $\lim_{\d\to 0} \mathtt N=+\infty$, there exists $0<\d<\d_0$ such that $\mathtt N>N_0$. Hence, if $\noruno{k}\ge \mathtt N$ and $k\in\gen$, \equ{P1+} holds. \\ Since $\pi_{\!{}_{\integer k}} f$ is, for any $\noruno{k}\le \mathtt N$, a Morse function with distinct critical values one can, obviously, find a $\b>0$ for which \equ{P2+} holds. Hence $f\in {\mathbb G}^n_{s}(\d,\b)$.
{
\noindent} Now, let $f\in \bigcup {\mathbb G}^n_{s}(\d,\b)$. Then, there exist $\d\in (0,1]$ and $\b>0$ such that \equ{P1+} and \equ{P2+} hold. Then, \equ{P1} follows immediately from \equ{P1+}. By Proposition~\ref{pollaio}, for any $k\in \gen$ with $\noruno{k}> \mathtt N$,
$\pi_{\!{}_{\integer k}} f$ is $2^{-40}$--cosine--like, showing (Lemma~\ref{pennarello}) that $\pi_{\!{}_{\integer k}} f$ is Morse with distinct critical values also for $\noruno{k}\ge \mathtt N$. The proof is complete. \qed
\begin{proposition}\label{adso} ${\mathbb G}^n_{s}$ contains an open and dense set in ${\mathbb B}^n_s$. \end{proposition}
{
\noindent} To prove this result we need a preliminary elementary result on real analytic periodic functions:
\begin{lemma}\label{trifolato} Let $F=\sum F_j e^{\ii j \sa}$ be a real analytic function on $ {\mathbb T} $.
There exists a compact set $\G\subseteq\mathbb{C}$ (depending on $F_j$ for $|j|\ge 2$) of zero Lebesgue measure such that if the Fourier coefficient $F_1$ does not belong to $\G$, then $F$ is a Morse function with distinct critical values. \end{lemma} \noindent{\bf Proof\ } Without loss of generality we may assume that $F$ has zero average. Then, letting $z:=F_1\in {\mathbb C} $, we write $F$ as \begin{equation}\label{efesta} F(\sa)= z e^{{\rm i} \sa} + \bar z e^{-{\rm i} \sa} + G(\sa):=z e^{{\rm i} \sa} + \bar z e^{-{\rm i} \sa} +
\sum_{|j|\geq 2} F_je^{\ii j\sa} \,. \end{equation}
When $G\equiv 0$ the claim is true with $\G=\{0\}$.\\ Assume that $G\not\equiv 0$. Observe that, since $G$ is real--analytic, the equations $ F'(\sa)=0= F''(\sa)$ are equivalent to the single equation $z=\frac12 e^{-{\rm i} \sa} \big( \ii G'(\sa)+G''(\sa) \big)$, which, as $\sa\in {\mathbb T} $, describes a smooth closed `critical' curve $\Gamma_1$ in $ {\mathbb C} $. \\ Observe also that $F$ has distinct critical points $\sa_1,\sa_2\in\mathbb T$ with the same critical values if and only if the following three real equations are satisfied: \beq{odessa} F'(\sa_1)=0\,,\qquad F'(\sa_2)=0\,,\qquad F(\sa_1)-F(\sa_2)=0\,. \end{equation} We claim that if $z,\sa_1,\sa_2$ satisfy \equ{odessa} then
\begin{equation}\label{celebration}
z=\zeta(\sa_1,\sa_2)\,,\qquad
g(\sa_1,\sa_2)=0\,, \end{equation}
with $\zeta$ and $g$ real analytic on $\mathbb T^2$ given by
\begin{eqnarray*} &&\zeta(\sa_1,\sa_2):= \left\{\begin{array}l \frac{\ii}{2(e^{{\rm i} \sa_1}-e^{{\rm i} \sa_2})} \big( G'(\sa_1)-G'(\sa_2) +\ii G(\sa_1) -\ii G(\sa_2) \big)\,,\quad \mbox{for}\ \ \sa_1\neq\sa_2\,; \\ \ \\ \frac1{2e^{{\rm i} \sa_1}} \big( G''(\sa_1)+\ii G'(\sa_1) \big)\,,\phantom{AAAAAAAAAAAAAaa} \mbox{for}\ \ \sa_1=\sa_2\,, \end{array}\right. \\ \ \\ &&
g(\sa_1,\sa_2) := \big(1-\cos(\sa_1-\sa_2)\big)\big( G'(\sa_1)+G'(\sa_2) \big) - \sin (\sa_1-\sa_2)\big( G(\sa_1)-G(\sa_2) \big)\,. \end{eqnarray*} Indeed, summing up the the third equation in \eqref{odessa} with the difference of the first two equations multiplied by $-\ii$, we get $$ 2(e^{{\rm i} \sa_1}-e^{{\rm i} \sa_2}) z-{\rm i} \big( G'(\sa_1)-G'(\sa_2) +\ii G(\sa_1) -\ii G(\sa_2) \big)=0\,, $$ which is equivalent to $z=\zeta(\sa_1,\sa_2)$. Then, by definition $g(\sa_1,\sa_1)=0$, while if $\sa_1\neq \sa_2$, substituting $z=\zeta(\sa_1,\sa_2)$ in the first equation in \eqref{odessa} and multiplying by $1-\cos(\sa_1-\sa_2)$ we get $g(\sa_1,\sa_2)=0$ also for $\sa_1\neq\sa_2$. Thus, \eqref{celebration} holds. \\ Next, we claim that the real analytic function $ g(\sa_1,\sa_2)$ is not identically zero. Assume by contradiction that $g$ is identically zero. Then $g(\sa_2+t,\sa_2)\equiv 0$ for every $\sa_2$ and $t$, and taking the fourth derivative with respect to $t$ evaluated at $t=0$, we see that $ G'''(\sa_2)+G'(\sa_2)=0$, for all $\sa_2$. The general (real) solution of the such differential equation is given by $G(\sa_2)= c e^{{\rm i} \sa_2} + \bar c e^{-{\rm i} \sa_2}+c_0,$ with $c\in\mathbb{C},$ $c_0\in\mathbb R, $
which contradicts the fact that, by definition, $G_j=0$ for $|j|\le 1$. Thus, $ g(\sa_1,\sa_2)$ is not identically zero and, therefore, the set $\mathcal Z\subseteq\mathbb T^2$ of its zeros is compact and has zero Lebesgue measure\footnote{Compare, e.g., Corollary 10, p. 9 of \cite{GR}.}. Clearly, also the set $\Gamma_2:=\zeta(\mathcal Z)\subseteq\mathbb{C}$ is compact and has zero measure, and, therefore, if we define $\G=\Gamma_1\cup\Gamma_2$, we see that the lemma holds also in the case $G\nequiv 0$. \qed
\noindent{\bf Proof\ } {\bf of Proposition~\ref{adso}} Let $\tilde{\mathbb G}^n_{\! s}(\d,\b)$ denote the subset of functions in ${\mathbb G}^n_{s}(\d,\b)$ satisfying the (stronger) condition\footnote{Here, we explicitly indicate the dependence on $\d$, while $n$ and $s$ are fixed. Recall that $\mathtt N(\d)$ is decreasing.} \beq{starstar}
|f_k|> \delta \, e^{-\noruno{k} s}\,,\qquad \forall \ k\in\gen\,,\ \noruno{k}\ge\mathtt N =\mathtt N(\d)\,, \end{equation} and let $\displaystyle \tilde{\mathbb G}^n_{\! s}=\bigcup_{\stackrel{0<\d\le 1}{\b>0}} \tilde{\mathbb G}^n_{\! s}(\d,\b)\,. $ We claim that $\tilde{\mathbb G}^n_{\! s}$ is an open subset of ${\mathbb B}^n_s$.
Let $f\in\tilde{\mathbb G}^n_{\! s}(\d,\b)$ for some ${0<\d\le 1},{\b>0}$ and let us show that there exists $0<\d'\leq\d/2$ such that if $g\in {\mathbb B}^n_s$ with
$\|g-f\|_s<\d'\leq \d/2$, then $g\in\tilde{\mathbb G}^n_{\! s}(\d',\b')$ with $\b':=\min\{\b,\d e^{-s\mathtt N(\d/2)}\}/2$. Indeed $$
|\tilde f_k|e^{|k|_{{}_1}s}\geq |f_k| e^{|k|_{{}_1}s} -\|g-f\|_s > \d-\d'\geq\d/2\,, \qquad \forall \ k\in\gen\,,\ \noruno{k}\ge\mathtt N(\d)\,, $$ namely $g$ satisfies \equ{starstar} with $\d/2$ instead of $\d$. We already know that
$\pi_{\!{}_{\integer k}} f$ is $\b\!-\!$Morse $ \forall \ k\in\gen,\, \noruno{k}<\mathtt N(\d)$. Moreover, by Proposition~\ref{punti}, we know that $\pi_{\!{}_{\integer k}} f$ is $|f_k|$--Morse for $k\in \gen$ with $\noruno{k}\ge \mathtt N(\d)$. In conclusion, by \eqref{starstar}, we get that
$\pi_{\!{}_{\integer k}} f$ is $2\b'\!-\!$Morse $ \forall \ k\in\gen,\, \noruno{k}<\mathtt N(\d/2)$. Since the $\|\cdot\|_s$--norm is stronger than the $C^2$--one, taking $\d'$ small enough we get that $\pi_{\!{}_{\integer k}} g$ is $\b'\!-\!$Morse $ \forall \ k\in\gen,\, \noruno{k}<\mathtt N(\d/2)$.
{
\noindent} Let us now show that $\tilde{\mathbb G}^n_{\! s}$ is dense in ${\mathbb B}^n_s$. Fix $f$ in ${\mathbb B}^n_s$ and $0<\loge <1$. We have to find $g\in\tilde{\mathbb G}^n_{\! s}$
with $\|g-f\|_s\leq \loge $. Let $\d:=\loge /4$ and denote by $f_k$ and $g_k$ (to be defined) be the Fourier coefficients of, respectively, $f$ and $g$. It is enough to define $g_k$ only for $k\in {\mathbb Z} ^n_\varstar$ since, for $k\in - {\mathbb Z} ^n_\varstar$ we set $g_k:=\bar{g}_{-k}$, since $g$ must be real analytic. Set $g_k:=f_k$ for
$k\in {\mathbb Z} ^n_\varstar\setminus\gen$.
For
$k\in\gen$, $|k|_{{}_1}\geq \mathtt N(\d)$,
we set $g_k:=f_k$ if
$|f_k|e^{|k|_{{}_1}s}>\d$ and
$g_k:= 2\delta e^{-|k|_{{}_1}s}$ otherwise.
Consider now $k\in\gen$, $|k|_{{}_1}< \mathtt N(\d)$.
We make use of Lemma~\ref{trifolato}
with $F=\pi_{\!{}_{\integer k}} g$, $z=F_1=g_k$. Thus, by Lemma~\ref{trifolato}, there exists a compact set $\G_k\subseteq\mathbb{C}$ (depending on $F_k$ for $|k|\ge 2$) of zero measure such that if $g_k\notin \G_k$ the function $\pi_{\!{}_{\integer k}} g$ is a Morse function with distinct critical values. We conclude the proof of the density choosing
$|g_k|<e^{-|k|_{{}_1}s}$, $|f_k-g_k|\leq \loge e^{-|k|_{{}_1}s}$ with $g_k\notin \Gamma_k$. \qed
\subsection{Full measure} Here we show that {\sl ${\mathbb G}^n_{s}$ is a set of probability 1 with respect to the standard product probability measure on ${\mathbb B}^n_s$}. More precisely, consider the space\footnote{$ {\mathbb Z} ^n_\varstar$ was defined in \eqref{iguazu}.}
${\rm D}^{{ {\mathbb Z} ^n_\varstar}}$, where ${\rm D}:=\{w\in {\mathbb C} :\ |w|\le 1\}$, endowed with the product topology\footnote{
By Tychonoff's Theorem, ${\rm D}^{{ {\mathbb Z} ^n_\varstar}}$ with the product topology is a compact Hausdorff space.
}. The product $\sigma$-algebra of the Borel sets of ${\rm D}^{{ {\mathbb Z} ^n_\varstar}}$ is the $\sigma$--algebra generated by the cylinders $\bigotimes_{k\in{ {\mathbb Z} ^n_\varstar}} A_k$, where $A_k$ are Borel sets of ${\rm D}$, which differs from ${\rm D}$ only for a finite number of $k$. The probability product measure $\mu_{{}_\otimes}$ on
${\rm D}^{{ {\mathbb Z} ^n_\varstar}}$ is then defined by letting $$ \mu_{{}_\otimes} \big(\bigotimes_{k\in{ {\mathbb Z} ^n_\varstar}} A_k \big):= \prod_{k\in{ {\mathbb Z} ^n_\varstar}}
|A_k |\,,
$$
where $|\cdot|$ denotes the normalized ($|{\rm D}|=1$) Lebesgue measure on ${\rm D}$. The (weighted) Fourier bijection\footnote{$f$ is real analytic so that $f_{-k}=\bar f_k.$} \begin{equation}\label{odisseo} \mathcal F:f(x)=\sum_{k\in {\mathbb Z} ^n_\varstar} f_k e^{\ii k\cdot x}+\bar f_k e^{-\ii k\cdot x}
\in {\mathbb B}^n_s \to \big\{f_k e^{|k|_{{}_1}s}\big\}_{k\in { {\mathbb Z} ^n_\varstar}}\in \ell^{\infty}({ {\mathbb Z} ^n_\varstar}) \end{equation} induces a product topology on ${\mathbb B}^n_s$ and a {\sl probability product measure} $\mu$ on the product $\sigma$-algebra $ {\mathcal B} $ of the Borellians in ${\mathbb B}^n_s=\mathcal F^{-1} \big({\rm D}^{{ {\mathbb Z} ^n_\varstar}}\big)$ (with respect to the induced product topology), i.e., given $B\in {\mathcal B} $, we set $\mu(B):=\mu_{{}_\otimes}(\mathcal F(B))$. Then one has:
\begin{proposition}\label{melk}
${\mathbb G}^n_{s}\in {\mathcal B} $ and $\mu({\mathbb G}^n_{s})=1$.
\end{proposition}
\noindent{\bf Proof\ }
First note that, for every $\d,\b>0$ the set
${\mathbb G}^n_{s}(\d,\b)$ is
closed with respect to the product topology.
Indeed for every $ k\in\gen$
the set $\{f\in {\mathbb B}^n_s\ \mbox{s.t.}\
|f_k|\geq \delta \noruno{k}^{-n}\, e^{-\noruno{k} s}\}$
is a closed cylinder. Moreover also
the set $\{f\in {\mathbb B}^n_s\ \mbox{s.t.}\ \pi_{\!{}_{\integer k}} f \ \mbox{is}\ \b\mbox{--Morse}\}$
is closed w.r.t the product topology. In fact we prove that
the complementary $E:=\{f\in {\mathbb B}^n_s\ \mbox{s.t.}\ \pi_{\!{}_{\integer k}} f \ \mbox{is not}\ \b\mbox{--Morse}\}$ is open w.r.t the product topology.
Indeed if $f^*\in E$ there exists a $r>0$ small enough such that
$E_r:=\{f\in {\mathbb B}^n_s\ \mbox{s.t.}\ \|\pi_{\!{}_{\integer k}} f-\pi_{\!{}_{\integer k}} f^*\|_{C^2}<r \}\subseteq E.$
Define the open cylinder
$$
E_{\rho,J}:=\{
f\in {\mathbb B}^n_s\ \mbox{s.t.}\
|f_{jk}-f_{jk}^*|<\frac{\rho}{\noruno{j}^2}e^{-\noruno{jk}s}\ \mbox{for}\ j\in\mathbb{Z}\,,\ 0<\noruno{j}\leq J \}\,.
$$
We claim that $E_{\rho,J}\subseteq E_r$ for suitably small $\rho$
and large $J$ (depending on $r$ and $s$).
Indeed, when $f\in E_{\rho,J}$
$$
\|\pi_{\!{}_{\integer k}} f-\pi_{\!{}_{\integer k}} f^*\|_{C^2}
\leq
3\sum_{j\neq 0} \noruno{j}^2 |f_{jk}-f_{jk}^*|
\leq
3\rho\sum_{0<\noruno{j}\leq J}e^{-\noruno{jk}s}
+
6\sum_{\noruno{j}> J}\noruno{j}^2 e^{-\noruno{jk}s}
<r
$$
for suitably small $\rho$
and large $J$.
Therefore $E_{\rho,J}\subseteq E_r\subseteq E$ and $E$ is open in
the product topology.
In conclusion, taking the intersection over $k\in\gen$,
we get that ${\mathbb G}^n_{s}(\d,\b)$ is
closed with respect to the product topology. \\ Recalling Lemma~\ref{telaviv}, we note that ${\mathbb G}^n_{s}$ can be written as $\displaystyle {\mathbb G}^n_{s}=\bigcup_{h\in\mathbb N} {\mathbb G}^n_{s}(1/h,1/h)$. Thus ${\mathbb G}^n_{s}$ is Borellian.
{
\noindent} Let us now prove that $\mu({\mathbb G}^n_{s})=1$. Fix $0<\d\le 1$ and denote by ${\mathbb G}^n_{s}(\d)$ be the subset of functions in ${\mathbb B}^n_s$ satisfying \equ{P1+} and such that $\pi_{\!{}_{\integer k}} f$
is a Morse function with distinct critical values for every $ k\in\gen$. Recall \eqref{odisseo} and define $$ \mathbb P_\d:=\mathcal F({\mathbb G}^n_{s}(\d))\subseteq \ell^{\infty}({ {\mathbb Z} ^n_\varstar})\,. $$ Fix $\hat g=(g_k)_{k\in {\mathbb Z} ^n_\varstar\setminus\gen}\in
\ell^{\infty}({ {\mathbb Z} ^n_\varstar\setminus\gen})$ with $|g_k|\leq 1$ for every $k\in {\mathbb Z} ^n_\varstar\setminus\gen$.
Consider the section \[
\mathbb P_\d^{\hat g}:=\big\{ \check g=(g_k)_{k\in\gen},\ |g_k|\leq 1 \ \ \mbox{s.t}\ \ |g_k|\geq \delta \noruno{k}^{-n} \ \mbox{if}\ \noruno{k}\geq \mathtt N\,,\ \ g_k e^{-\noruno{k}s}\notin \G_k\,, \ \mbox{if}\ \noruno{k}< \mathtt N \big\}, \] where the sets $\G_k$ (depending on $\hat g$)
were defined in the proof of Proposition \ref{adso}
so that, for every $k\in\gen,$ $|k|_{{}_1}< \mathtt N$,
if $g_k e^{-\noruno{k}s}\notin \G_k$ then the function\footnote{Recall \eqref{efesta}.} $$
g_k e^{-\noruno{k}s} e^{{\rm i} \sa} + \bar g_k e^{-\noruno{k}s} e^{-{\rm i} \sa} +
\sum_{|j|\geq 2} \hat g_{jk} e^{-\noruno{jk}s} e^{\ii j\sa}
=\pi_{\!{}_{\integer k}} f\,,\ \ \mbox{with}\ \ f:=\mathcal F^{-1}(g)\,,\ \ g=(\check g,\hat g)\,, $$
is a Morse function with distinct critical values. Then, since every $\G_k$ has
zero measure $$
\mu_{{}_\otimes}|_{\ell^{\infty}({\gen})}(\mathbb P_\d^{\hat g})=
\prod_{k\in\gen, |k|_{{}_1}\geq \mathtt N} (1-
\d^2\, |k|_{{}_1}^{-2{n}})\geq 1-c\d^2\,, $$ for a suitable constant $c=c(n)$. Since the above estimate holds for every $\hat g\in \ell^{\infty}({ {\mathbb Z} ^n_\varstar\setminus\gen})$,
by Fubini's Theorem we get $$
\mu_{{}_\otimes}|_{\ell^{\infty}({\gen})}(\mathbb P_\d^{\hat g})=
\mu_{{}_\otimes}(\mathbb P_\d)=\mu({\mathbb G}^n_{s}(\d)) \geq 1-c\d^2\,. $$ Then, $$ \mu({\mathbb G}^n_{s})=\lim_{\d\to 0^+} \mu({\mathbb G}^n_{s}(\d))=1\,. \qedeq $$
\section{Averaging, coverings and normal forms}
In the rest of the paper we consider {\sl real--analytic, nearly--integrable natural Hamiltonian systems} \beqa{ham}\textstyle &&\left\{ \begin{array}{l} \dot y = -\ham_x(y,x)\\ \dot x= \ham_y(y,x) \end{array}\right.\,, \phantom{AAAAAAA}(y,x)\in {\mathbb R} ^n\times {\mathbb T} ^n\,,\nonumber \\
&&\textstyle \ham(y,x;\varepsilon):=\frac12 |y|^2 +\varepsilon f(x)\,,\phantom{AAA} n\ge 2\,,\ 0<\varepsilon<1\,.
\end{eqnarray} As usual, `dot' denotes derivative with respect to `time' $t\in {\mathbb R} $; $\ham_y$ and $\ham_x$ denote the gradients with respect to $y$ and $x$; $|y|^2:=y\cdot y:=\sum_j|y_j|^2$; $ {\mathbb T} ^n$ denotes the standard flat torus $ {\mathbb R} ^n/(2\pi {\mathbb Z} ^n)$, and the phase space $\mathbb R^n\times {\mathbb T} ^n$ is endowed with the standard symplectic form $dy\wedge dx=\sum_j dy_j\wedge dx_j$.
{
\noindent} In this section, we discuss the high order normal forms of generic natural systems, especially in neighbourhoods of simple resonances.
{
\noindent} As standard in perturbation theory, we consider a bounded phase space $ {\mathcal M} \subseteq \mathbb R^n\times\mathbb T^n$. By translating actions and rescaling the parameter $\varepsilon$, it is not restrictive to take \begin{equation}\label{bada}
{\mathcal M} :={\rm B}\times \mathbb T^n\,,\qquad {\rm with}\qquad \DD:=B_1(0):=\{y\in \mathbb R^n\st |y| <1\}\,. \end{equation}
{
\noindent} The first step in averaging theory is to construct suitable coverings so as to control resonances where small divisors appear. Let us recall that {\sl a resonance} ${\cal R}_k$
(with respect to the free Hamiltonian $\frac12 |y|^2$) is the hyperplane
$\{y\in {\mathbb R} ^n: y\cdot k=0\}$, where $k\in\gen$, and its order is given by $\noruno{k}$; a {\sl double resonance} ${\cal R}_{k,\ell}$ is the intersection of two resonances: ${\cal R}_{k,\ell}={\cal R}_k\cap {\cal R_\ell}$ with $k\neq\ell$ in
$\gen$; the order of ${\cal R}_{k,\ell}$ is given by $\max\{\noruno{k},\noruno{\ell}\}$.
\subsubsection*{Notations} The real or complex (open) balls of radius $r>0$ and center $y_0\in \mathbb R$ or $z_0\in {\mathbb C} ^n$ are denoted by \beq{palle}
B_r(y_0):= \{y\in\mathbb R^n: |y-y_0|<r\}\,,\qquad D_r(z_0):= \{z\in {\mathbb C} ^n: |z-z_0|<r\}\,;
\end{equation} if $V\subset {\mathbb R} ^n$ and $r>0$, $V_r$ denotes the complex neighborhood of $V$ given by\footnote{$\displaystyle |u|:=\sqrt{u\cdot \bar u}$ denotes the standard Euclidean norm on vectors $u\in {\mathbb C} ^n$ (and its subspaces); `bar', as usual, denotes complex--conjugated.} \beq{dico} V_r := \bigcup_{y\in D}\ D_r(y)\,. \end{equation} We shall also use the notation $\, {\rm Re}\,(V_r)$ to denote the {\sl real} $r$--neighbourhood of $V\subset {\mathbb R} ^n$, namely, \beq{dire} \, {\rm Re}\,(V _r) := V_r\cap {\mathbb R} ^n= \bigcup_{y\in V}\ B_r(y)\,. \end{equation} For a set $V\subseteq \mathbb R^n$ and for $r,s>0$, given a function $f:(y,x) \in V_r\times {\mathbb T} ^n_s\to f(y,x)$, we denote \beq{norme}
{\modulo}f{\modulo}_{V,r,s} = {\modulo}f{\modulo}_{r,s}:=\sup_{V_r\times \mathbb T^n_s}|f|\,, \qquad \,\thickvert\!\!\thickvert\, f\,\thickvert\!\!\thickvert\,_{V,r,s}=\,\thickvert\!\!\thickvert\, f\,\thickvert\!\!\thickvert\,_{r,s}:= \sup_{y\in V_r}\sum_{k\in\mathbb{Z}^n}
|f_k(y)| e^{|k|_{{}_1}s}\,, \end{equation} where $f_k(y)$ denotes the $k$--th Fourier coefficient of $x\in {\mathbb T} ^n\mapsto f(y,x)$; for a function depending only on $y\in V_r$ we set ${\modulo}f{\modulo}_{V,r}={\modulo}f{\modulo}_{r}:=
\sup_{V_r}|f|$.
\subsection{Non--resonant and simply--resonant sets}
Denote by $\pk$ and $\pko$ the orhogonal projections \beq{orto}
\pk y:=(y\cdot e_k)\, e_k\,,\qquad \pko y:=y-\pk y\,,\qquad e_k:=k/|k|\,, \end{equation} and, for any ${\mathtt K}\geq {\mathtt K}_{{}_{\rm o}}\geq 2$ and $\a>0$, define the following sets: \beqa{neva}
&&\Rz:=\{y\in \DD: \min_{k\in {\cal G}^n_{\KO}}|y\cdot k|>\textstyle \frac\a2 \}\,, \ \\ \label{sonnosonnoBIS} && \left\{\begin{array}{l} \Ruk:= \big\{y\in \DD:
|y\cdot k|<\textstyle\a;\, |\pko y\cdot \ell|> \frac{3 \alpha {\mathtt K}}{|k|}, \forall \ell\in {\mathcal G^n_{{\mathtt K}}}\, \backslash\, \mathbb{Z} k\big\}\,,\quad (k\in{\cal G}^n_{\KO}); \\ \Ru:=\bigcup_{k\in {\cal G}^n_{\KO}} \Ruk\,; \end{array}\right. \end{eqnarray} where, as above, $\DD=B_1(0)$.
{
\noindent} Eq. \equ{neva} implies that $\Rz$ is a $(\a/2)$--non--resonant set up to order ${\mathtt K}_{{}_{\rm o}}$, i.e., \beq{ovvio}
|y\cdot k|>\frac\a2\,,\qquad \forall \ y\in \Rz\,,\ \forall \ 0<|k|\le {\mathtt K}_{{}_{\rm o}}\,.
\end{equation} Indeed, fix $y\in \Rz$ and $k\in {\mathbb Z} ^n$ with $0<|k|\le {\mathtt K}_{{}_{\rm o}}$. Then, there exists $\bar k\in {\cal G}^n_{\KO}$ and $j\in {\mathbb Z} \, \backslash\, \{0\}$ such that $k=j\bar k$, so that $$
|y\cdot k|=|j|\ |\bar k\cdot y|\ge |\bar k\cdot y|>\a/2\,. $$ From \equ{sonnosonnoBIS} it follows that $\Ruk$
is $(2 \alpha {\mathtt K}/|k|)$--non resonant modulo $ {\mathbb Z} k$ up to order ${\mathtt K}$, namely: \begin{equation}\label{cipollotto2}
|y\cdot \ell |\ge 2\a{\mathtt K}/|k|\,, \ \ \ \forall k \in {\cal G}^n_{\KO}\,,\ \forall y\in \Ruk\,,\
\forall \ell\notin \mathbb{Z} k\,, \ |\ell|\leq {\mathtt K}\ . \end{equation}
Indeed, fix $y\in \Ruk$, $k\in{\cal G}^n_{\KO}$, $\ell\notin \mathbb{Z} k$ with $|\ell|\le {\mathtt K}$. Then, there exist $j\in\mathbb{Z}\setminus\{ 0\}$ and $\bar\ell\in{\cal G}^n_{\K}$ such that $\ell=j\bar\ell$. Hence, \begin{eqnarray*}
|y\cdot \ell|&=&|j|\, |y\cdot \bar\ell| \ge |y\cdot \bar\ell| =| \pko y \cdot\bar\ell+ \pk y\cdot \bar\ell|
\ge |\pko y\cdot \bar\ell|- \frac{\alpha {\mathtt K}}{|k|}\\
&>& \frac{3 \alpha {\mathtt K}}{|k|} - \frac{\alpha {\mathtt K}}{|k|} = \frac{2 \alpha {\mathtt K}}{|k|}\ . \end{eqnarray*} Relations \equ{ovvio} and \equ{cipollotto2} yield quantitative control on the small divisors that appear in perturbation theory allowing for {\sl high} order averaging theory as we now proceed to show.
\subsubsection*{Averaging} To perform averaging, we need to introduce a few parameters (Fourier cut--offs, a small divisor threshold, radii of analyticity) and some notation.
{
\noindent} Let \beq{dublino} \left\{\begin{array}{l} {\mathtt K}\ge 6 {\mathtt K}_{{}_{\rm o}}\ge 12\,,\quad
\nu:= \textstyle\frac92n+2\,, \quad \a:= \sqrt\varepsilon {\mathtt K}^\nu\,, \quad r_{\rm o}:=\frac{\a}{16 {\mathtt K}_{{}_{\rm o}}}\,, \quad r_{\rm o}':= \frac{r_{\rm o}}2\,, \\ \textstyle s_{\rm o}:=s\big(1-\frac1{{\mathtt K}_{{}_{\rm o}}}\big)\,, \ s_{\rm o}':=s_{\rm o}\big(1-\frac1{{\mathtt K}_{{}_{\rm o}}}\big)\,, \ \textstyle s_{\varstar}:=s\big(1-\frac1{{\mathtt K}}\big)\,, \ s_{\varstar}':=s_{\varstar}\big(1-\frac1{{\mathtt K}}\big)\,,\\
r_k:={\a}/{ |k|}=\sqrt\varepsilon {\mathtt K}^\nu/|k|\,,\quad r_k':=\frac{r_k}2\,,\ s'_k:=|k|_{{}_1}s_{\varstar}'\,,\qquad (\forall\ k\in{\cal G}^n_{\KO})\,. \end{array}\right. \end{equation} \rem (i) The action space $\DD$ can be trivially covered by three sets as follows $$ \DD=\Rz\cup\Ru \cup \Rd\,,\qquad \Rd:=\DD\, \backslash\, (\Rz\cup\Ru)\,. $$ As just pointed out, on the set $(\Rz\cup\Ru)\times \mathbb T^n$ one can construct detailed, high order normal forms, while $\Rd$ is a {\sl small set of measure of order $\varepsilon^2 {\mathtt K}^\g$} (compare \equ{teheran4} below).
{
\noindent} (ii) It is important to notice that $\Rd$, which is a neighborhood of double resonances of order ${\mathtt K}$, is a {\sl non perturbative set}, as pointed out in \cite{AKN}. Indeed, consider for simplicity the case $n=2$, where the only double resonance is the origin $y=0$. Rescaling variables and time by setting $y =\sqrt\varepsilon {\rm y}$, ${\rm x}=x$,
${\rm t}=\sqrt{\varepsilon}t$, the Hamiltonian $t$--flow of $\frac12 |y|^2+\varepsilon f(x)$ on $\{y: |y|<\varepsilon\}\times \mathbb T^2\subseteq \Rd\times \mathbb T^2$ is equivalent to the ${\rm t}$--flow on $\{|{\rm y}|<1\} \times \mathbb T^2$ of the Hamiltonian $\frac12 {\rm y}^2+f({\rm x})$, which {\sl does not depend upon $\varepsilon$}. \erem
{
\noindent} Next result is based on
`refined Averaging Theory' as presented in \cite{BCnonlin}. The main technical point in this approach is the minimal loss of regularity in the angle analyticity domain and the usage of two Fourier cut--offs; for a discussion on these fine points, we refer to the Introduction in \cite{BCnonlin}.
\begin{lemma}[Averaging Lemma] \label{averaging} Let $\ham$ be as in \equ{ham} with $f\in{\mathbb B}^n_s$ and let \equ{dublino} hold. There exists a constant $\bco=\bco(n,s)>1$ such that if ${\mathtt K}_{{}_{\rm o}}\ge \bco$ the following holds.
{
\noindent} {\rm (a)} There exists a real analytic symplectic map \begin{equation}\label{trota} \Psi_{\rm o}: \Rz_{r_{\rm o}'}\times \mathbb T^n_{s_{\rm o}'} \to \Rz_{r_{\rm o}}\times \mathbb T^n_{s_{\rm o}} \,, \end{equation} such that, denoting by $\langle \cdot \rangle$ the average over angles $x$, \beq{prurito} \hamo(y,x) := \big(\ham\circ\Psi_{\rm o}\big)(y,x)
=\frac{|y|^2}2+\varepsilon\big( g^{\rm o}(y) +
f^{\rm o}(y,x)\big)\,,\quad \langle f^{\rm o}\rangle=0\,, \end{equation} where $g^{\rm o}$ and $f^{\rm o}$ are real analytic on $\Rz_{r_{\rm o}'}\times \mathbb T^n_{s_{\rm o}'}$ and satisfy \beq{552}
| g^{\rm o}|_{r_{\rm o}'} \leq \vartheta_{\rm o}:= \frac{1}{{\mathtt K}^{6n+1}}\,, \qquad \,\thickvert\!\!\thickvert\, f^{\rm o} \,\thickvert\!\!\thickvert\,_{r_{\rm o}',s_{\rm o}'} \leq e^{-{\mathtt K}_{{}_{\rm o}} s/3}\,. \end{equation} {\rm (b)} For each $k\in {\cal G}^n_{\KO}$, there exists a real analytic symplectic map \begin{equation}\label{canarino} \Psi_k: \Ruk_{r_k'} \times \mathbb T^n_{s_{\varstar}'} \to \Ruk_{r_k} \times \mathbb T^n_{s_\varstar} \,, \end{equation} such that \beqa{hamk} \hamk(y,x) &:=& \big(\ham\circ\Psi_k\big)(y,x)\\
&=&\frac{|y|^2}2+\varepsilon \big( g^k_{\rm o}(y)+ g^k(y,k\cdot x) + f^k (y,x)\big)\,,\qquad\pi_{\!{}_{\integer k}} f^k=0\,, \nonumber \end{eqnarray} where $g^k_{\rm o}$ is real analytic on $\Ruk_{r_k'}$;
$g^k(y,\cdot)\in\hol_{s'_k}^1$ for every $y\in \Ruk_{r_k'}$ (in particular, $\langle g^k(y,\cdot)\rangle=0$); $f^k $ is real analytic on $\Ruk_{r_k'} \times \mathbb T^n_{s_{\varstar}'} $, and: \begin{equation}\label{cristina}
|g^k_{\rm o}|_{r_k'} \leq \vartheta_{\rm o}\,,\qquad \,\thickvert\!\!\thickvert\, g^k-\pi_{\!{}_{\integer k}} f\,\thickvert\!\!\thickvert\, _{r_k',s'_k} \leq \vartheta_{\rm o}\,,\qquad \,\thickvert\!\!\thickvert\, f^{k} \,\thickvert\!\!\thickvert\, _{r_k',\frac{s_{\varstar}}2} \le
e^{- {\mathtt K} s/3}\,. \end{equation} {\rm (c)} Finally, denoting by $\pi_y$ and $\pi_x$ the projections onto, respectively, the action variable $y$ and the angle variable $x$, one has \begin{equation}\label{dunringill}\textstyle
|\pi_y\Psi_{\rm o}-y|_{r_{\rm o}',s_{\rm o}'}\leq \frac{r_{\rm o}}{2^7 {\mathtt K}_{{}_{\rm o}}}\,,\quad
|\pi_y\Psi_k-y|_{r_k', s_{\varstar}'}\leq \frac{r_k}{2^7 {\mathtt K}} \,, \end{equation} and, for every fixed $y$, $\pi_x \Psi_{\rm o}(y,\cdot)$, and $\pi_x \Psi_k(y,\cdot)$ are diffeomorphisms on $\mathbb T^n$. \end{lemma}
\noindent{\bf Proof\ } The statements follow from Theorem~6.1 in \cite[p. 3553]{BCnonlin} with obvious notational changes, which we proceed to spell out. The correspondence of symbols between this paper and \cite{BCnonlin} is the following\footnote{In these identities, the first symbol is the one used here, the second one is that used in \cite{BCnonlin}}:
\begin{eqnarray*} &&
\Rz=\O^0\,;\ \ \Ruk=\O^{1,k}\,,\ \ \textstyle \frac{|y|^2}2=h(y)\,; \ \ {\mathtt K}_{{}_{\rm o}}={\mathtt K_{{}_1}}\,,\ \ {\mathtt K}={\mathtt K}_{{}_2}\,, \\ && g^{\rm o}= {\rm g}^{\rm o}\,; \ \ f^{\rm o}=f^{\rm o}_{\varstar\varstar}\,;\ \ g^k_{\rm o}(y)+g^k(y,k\cdot x)={\rm g}^k(y,x)\,;\ \ f^k=f^k_{\varstar\varstar}\,; \end{eqnarray*} the constants $\bar L$ and $L$ in Definition~2.1 in \cite[p. 3532]{BCnonlin} in the present case are $\bar L=L=1$ (since the frequency map here is the identity map); the projection ${\rm p}_{\!{}_{ {\mathbb Z} k}}$ introduced in \cite[p. 3529]{BCnonlin} is different from the projection $\pi_{\!{}_{\integer k}}$ defined here, the relation between the two being: $\pi_{\!{}_{\integer k}} f(k\cdot x)={\rm p}_{\!{}_{ {\mathbb Z} k}}f(x)$;
finally, the norm $|\cdot|_{D,r,s}$ in \cite[p. 3534]{BCnonlin} corresponds here to the norm $\,\thickvert\!\!\thickvert\,\cdot\,\thickvert\!\!\thickvert\,_{D,r,s}$, hence:
$$|g^k_{\rm o}|_{r_k'}+\,\thickvert\!\!\thickvert\, g^k-\pi_{\!{}_{\integer k}} f\,\thickvert\!\!\thickvert\, _{r_k',s'_k} = \,\thickvert\!\!\thickvert\, {\rm g}^k - {\rm p}_{\!{}_{ {\mathbb Z} k}} f\,\thickvert\!\!\thickvert\,_{D^{1,k},r_k/2,s_\star}\,.$$ Now, Assumption~A in \cite[p. 3533]{BCnonlin} holds. Indeed: \begin{itemize} \item[\footnotesize $\bullet$]
the action--analyticity radii are the same as in \cite{BCnonlin} (compare \equ{dublino} with Eq.~(140) in \cite{BCnonlin}); \item[\footnotesize $\bullet$]
the angle--analyticity radii defined here are the same as in Eq.s~(144) and (147) in \cite{BCnonlin} (with different names); \item[\footnotesize $\bullet$] In \cite{BCnonlin} it is assumed that ${\mathtt K}\ge 3{\mathtt K}_{{}_{\rm o}}\ge 6$ (see Eq. (139) in \cite{BCnonlin}), which in view of \equ{dublino}, is satisfied. Also $\nu$ in \cite{BCnonlin} is assumed to satisfy $\nu\ge n+2$, which in \equ{dublino} is defined as $\nu=\frac92n+2$.
\item[\footnotesize $\bullet$] By taking $\bco$ big enough condition (143) is satisfied.
\item[\footnotesize $\bullet$] finally, to meet the smallness condition~(40) in \cite{BCnonlin}, namely $\varepsilon\le r^2/{\mathtt K}^{2\nu}$ (where $r$ is the analyticity radius of the unperturbed Hamiltonian, which here is a free parameter), one can take $r={\mathtt K}^\nu$ so that condition~(40) in \cite{BCnonlin} becomes simply $\varepsilon\le 1$. \end{itemize} Thus, Theorem~6.1 of \cite{BCnonlin} can be applied, and \equ{prurito} and \equ{hamk} are immediately recognized as, respectively, Eq.'s (145) and (148) in \cite{BCnonlin}. Since $\bar\vartheta$ and $\vartheta$ in Eq. 141 of \cite{BCnonlin} are of the form $c(n,s) /{\mathtt K}^{7n+1}$, we see that, by taking $\bco$ big enough, they can be bounded by $\vartheta_{\rm o}=1/{\mathtt K}^{6n+1}$ in \equ{552}. Analogously, the exponential estimates on the perturbation functions in (146) and (150) of \cite{BCnonlin} are, respectively, of the form $c(n,s)\, {\mathtt K}_{{}_{\rm o}}^n e^{-{\mathtt K}_{{}_{\rm o}} s/2}$ and $c(n,s)\, {\mathtt K}^n e^{-{\mathtt K} s/2}$, which, by taking $\bco$ big enough, can be bounded, respectively, by $e^{-{\mathtt K}_{{}_{\rm o}} s/3}$ and $e^{-{\mathtt K} s/3}$ as claimed. Thus (a) and (b) are proven. Finally, from (71) and (69) in \cite[p. 3541]{BCnonlin} it follows at once \equ{dunringill} and the injectivity of the angle maps.
\qed
{
\noindent} For high Fourier modes, a more precise and uniform normal form can be achieved\footnote{This lemma should be compared with Theorem 2.1 in \cite{BCnonlin}.}:
\begin{lemma}[Cosine--like Normal forms] \label{coslike} Let $\ham$ be as in \equ{ham} with $f\in{\mathbb B}^n_s$ satisfying \eqref{P1+} and let \equ{dublino} hold. There exists a constant $\bfco=\bfco(n,s,\d)\ge \max\{\mathtt N\,,\,\bco\}$ such that if ${\mathtt K}_{{}_{\rm o}}\ge \bfco$ then the following holds. For any
$k\in {\cal G}^n_{\KO}$ such that $\noruno{k}\ge \mathtt N$, then the Hamiltonian $\hamk$ in \equ{hamk} takes the form: \begin{equation}\label{hamkc} \hamk =
\frac{|y|^2}2 + \varepsilon g^k_{\rm o}(y)+
2|f_k|\varepsilon\ \big[\cos(k\cdot x +\sa_k)+ F^k_{\! \varstar}(k\cdot x)+ g^k_{\! \varstar}(y,k\cdot x)+ f^k_{\! \varstar} (y,x)
\big]\,, \end{equation} where $\sa_k$ and $F^k_{\! \varstar}$ are as in Proposition~\ref{pollaio} and: \begin{equation}\label{cate}
g^k_{\! \varstar}:=\frac{1}{2|f_k|}\, \big(g^k- \pi_{\!{}_{\integer k}} f\big)\,,
\qquad f^k_{\! \varstar} :=\frac{1}{2|f_k|} f^k\,. \end{equation} Furthermore,
$g^k_{\! \varstar}(y,\cdot )\in\hol_1^1$ (for every $y\in \Ruk_{r_k'}$), $\pi_{\!{}_{\integer k}} f^k_{\! \varstar}=0$, and one has: \beq{martinaTE} \,\thickvert\!\!\thickvert\, g^k_{\! \varstar}\,\thickvert\!\!\thickvert\,_{r_k',1}\le \vartheta :=\frac{1}{{\mathtt K}^{5n}}\,,\qquad\quad \,\thickvert\!\!\thickvert\, f^k_{\! \varstar} \,\thickvert\!\!\thickvert\, _{r_k',\frac{s_\varstar}2} \leq
e^{-{\mathtt K} s/7}\,.
\end{equation} \end{lemma} Observe that, under the assumptions of Lemma~\ref{coslike}, by \equ{dublino} and \equ{bollettino1} it is \beq{bollettino3} {\mathtt K}\ge 6{\mathtt K}_{{}_{\rm o}}\ge6\mathtt N\ge 12\ttcs\ge 12\,. \end{equation}
\noindent{\bf Proof\ } First of all observe that the hypotheses of Lemma~\ref{coslike} imply those of Lemma~\ref{averaging} so that the results of Lemma~\ref{averaging} hold.\\ From \equ{cate} it follows that
$g^k(y,\sa)=\pi_{\!{}_{\integer k}} f(\sa)+ 2 |f_k| g^k_\star(y,\sa)$, which together with \equ{alfacentauri} and \equ{hamk} of Lemma~\ref{averaging}, implies immediately the relations \equ{hamkc}. To prove the first estimate in \equ{martinaTE}, we observe that, since $\noruno{k}\ge \mathtt N$, recalling \equ{dublino} and \equ{bollettino3} one has \beq{bad}\textstyle s'_k =
|k|_{{}_1} s\, \big(1-\frac1{\mathtt K}\big)^2 > \mathtt N s\, \frac45> 1\,. \end{equation} Thus, $g^k_{\! \varstar}(y,\cdot)$
is bounded on a `large' angle--domain of size larger than 1 and has zero average (since $g^k_{\! \varstar}(y,\cdot)\in\hol_{|k|_{{}_1}s_{\varstar}'}^1$). Now, recall the smoothing property \equ{lesso} (with $N=1$), recall that ${\mathtt K}_{{}_{\rm o}}\le {\mathtt K}/6$, and take $\bfco$ large enough. Then, \begin{align*} \,\thickvert\!\!\thickvert\, g^k_{\! \varstar}\,\thickvert\!\!\thickvert\,_{r_k',1}&\stackrel{\equ{cate}}{:=}
\frac{1}{2|f_k|}\, \,\thickvert\!\!\thickvert\, g^k- \pi_{\!{}_{\integer k}} f\,\thickvert\!\!\thickvert\,_{r_k',1} \stackrel{\equ{P1+}}\le \frac{\noruno{k}^n e^{\noruno{k}s}}{2\d}\, \,\thickvert\!\!\thickvert\, g^k- \pi_{\!{}_{\integer k}} f\,\thickvert\!\!\thickvert\,_{r_k',1} \\ &\stackrel{(\ref{lesso},\ref{bad})}\le \frac{\noruno{k}^n e^{\noruno{k}s}}{2\d}\, \,\thickvert\!\!\thickvert\, g^k- \pi_{\!{}_{\integer k}} f\,\thickvert\!\!\thickvert\,_{r_k',s'_k} \cdot e^{-(s'_k-1)} \stackrel{\equ{cristina}}\le \frac{\noruno{k}^n e}{2\d}\, \vartheta_{\rm o}\ e^{\noruno{k}(s-s_\varstar')}\\ &\stackrel{\equ{dublino}}= \frac{\noruno{k}^n e}{2\d}\, \vartheta_{\rm o}\ e^{\frac{\noruno{k}}{{\mathtt K}} s \big(2-\frac1{\mathtt K}\big)} \stackrel{\equ{552}}\le \frac{{\mathtt K}_{{}_{\rm o}}^n e}{2\d}\, \frac{1}{{\mathtt K}^{6n+1}}\ e^{2s \frac{{\mathtt K}_{{}_{\rm o}}}{{\mathtt K}}}\le \frac{1}{{\mathtt K}^{5n}}\stackrel{\equ{martinaTE}}=\vartheta \,. \end{align*} Furthermore, possibly increasing $\bfco$, one also has \begin{align*} \,\thickvert\!\!\thickvert\, f^k_{\! \varstar}\,\thickvert\!\!\thickvert\,_{r_k',\frac{s_\varstar}2}&\stackrel{\equ{cate}}=
\frac{1}{2|f_k|}\, \,\thickvert\!\!\thickvert\, f^k\,\thickvert\!\!\thickvert\,_{r_k',\frac{s_\varstar}2} \stackrel{ \equ{P1+}}\le \frac{\noruno{k}^n e^{\noruno{k}s}}{2\d}\, \,\thickvert\!\!\thickvert\, f^k\,\thickvert\!\!\thickvert\,_{r_k',\frac{s_\varstar}2} \stackrel{\equ{cristina}}\le \frac{\noruno{k}^n e^{\noruno{k}s}}{2\d}\, e^{-\frac{{\mathtt K} s}3} \\ &\le \frac{{\mathtt K}_{{}_{\rm o}}^n}{2\d}\ e^{-{\mathtt K} s\big(\frac13-\frac{{\mathtt K}_{{}_{\rm o}}}{\mathtt K}\big)} \le \frac{{\mathtt K}^n}{2\d\cdot 6^n}\ e^{-{\mathtt K} s/6} \le e^{-{\mathtt K} s/7}\,. \qquad \qedeq & \end{align*}
\subsection{Coverings}
As mentioned in the Introduction, the averaging symplectic maps $\Psi_{\rm o}$ and $\Psi_k$ of Lem\-ma~\ref{averaging} may displace boundaries by $\sqrt\varepsilon{\mathtt K}^\nu$ (compare \equ{dublino} and \equ{dunringill}) so one cannot use the secular Hamiltonians to describe the dynamics all the way up to the boundary of $\DD\times \mathbb T^n$. Such a problem -- which is essential, for example, in achieving the results described at the end of the Introduction -- may be overcome by introduce {\sl a second covering}, as we proceed now to explain.
{
\noindent} Recall the definitions of $\Rz$ and $\Ruk$ in \equ{neva} and \equ{sonnosonnoBIS}; recall {\equ{dublino}, the notation in \equ{dire} and define \beq{rocket} \Rzt:= \, {\rm Re}\, (\Rz_{r'_{\rm o}/2})\,,\qquad \Rukt:= \, {\rm Re}\,(\Ruk_{r'_k/2})\,,\phantom{AAAAa} (k\in{\cal G}^n_{\KO})\,. \end{equation} Then, the following result holds: \begin{lemma}\label{sedformosa} {\bf (Covering Lemma)} \beqa{surge} &&\Rz\times \mathbb T^n \subseteq \Psi_{\rm o}\big(\Rzt\times \mathbb T^n\big)\,,\\ &&\label{surge2} \Ruk\times\mathbb T^n\subseteq \Psi_k\big(\Rukt\times \mathbb T^n\big)\,,\qquad \forall k\in {\cal G}^n_{\KO}\,,\\ \label{zucchina} && \label{sonnosonnosonno} \Rd:=\DD\, \backslash\, (\Rz\cup\Ru)\subseteq\bigcup_{k\in {\cal G}^n_{\KO}} \bigcup_{ \sopra{\ell\in \mathcal G^n_{{\mathtt K}}}{\ell\notin \mathbb{Z} k}} \Rd_{k,\ell}\,, \end{eqnarray} where \beq{defi} \Rd_{k\ell}:= \big\{y\in \DD:
|y\cdot k|<\textstyle\a;\, |\pko y\cdot \ell|\le \frac{3 \alpha {\mathtt K}}{|k|}\big\}\,,\qquad (k\in{\cal G}^n_{\KO}\,,\ \ell\in {\cal G}^n_{\K}\, \backslash\, \mathbb{Z} k)\,. \end{equation} \end{lemma}
\rem (i) From the definition of $\Rd$ in \equ{sonnosonnosonno} it follows trivially that $\{{\cal R}^i\}$ is a covering of $\DD$ so that $\DD= \Rz\cup\Ru\cup \Rd$.
{
\noindent} (ii) Notice that from the definition of of $\Rukt$ in \equ{rocket}, one has that \beq{rocket2} \Rukt_{r'_k/2}\subseteq \Ruk_{r'_k}\,. \end{equation}
{
\noindent} (iii) Relations \equ{surge} and \equ{surge2} allow to map back the dynamics of the averaged Hamiltonians \equ{prurito} and \equ{hamk} so as to describe the dynamics also {\sl arbitrarily close to the boundary of the starting phase space}. \erem
{
\noindent} For the proof of the Covering Lemma we shall use the following immediate consequence of the Contraction Lemma\footnote{Recall the definitions in \equ{palle}; as usual $\overline{A}$ denotes the closure of the set $A$.}:
\begin{lemma}\label{DAD} Fix $y_0\in\mathbb R^n$, $r>0$ and let $\phi:D_{2r}(y_0)\to\mathbb{C}^n$ be a real analytic map satisfying \beq{trasloco}
\sup_{D_{2r}(y_0)}|\phi(y)-y|\le M \end{equation}
for some $0<M<r$. Then, $y_0\in \phi(\overline{B_r(y_0)})$. \end{lemma} \noindent{\bf Proof\ } Let $V_0:=\overline{B_r(0)}$. Solving the equation $\phi(y)=y_0$ for some $y\in \overline{B_r(y_0)}$ is equivalent to solve the fixed point equation $w=\psi_0(w):=-{\psi}(y_0+w)$ for $w\in V_0$ having set ${\psi}(y):=\phi(y)-y$. By \equ{trasloco} it follows that $\psi_0: V_0 \to V_0$ and by the mean value theorem and Cauchy estimates we get that, for every $w,w'\in V_0$,
$$|\psi_0(w)-\psi_0(w')|=|{\psi}(y_0+w)-{\psi}(y_0+w')|
\leq \frac{M}{r} |w-w'|\,,$$
showing that $\psi_0$ is a contraction on $V_0$ (since $M/r<1$) and the claim follows by the standard Contraction Lemma. \qed
\noindent{\bf Proof\ } {\bf of \equ{surge}} We start by proving that \beq{amicamea} \forall\ (y_0,x)\in \Rz\times\mathbb T^n\,,\ \exists! \ (y,x_0)\in \Rzt\times\mathbb T^n\!:\ \ \Psi_{\rm o}(y,x)=(y_0,x_0)\,. \end{equation} Define \beq{zoccolo} M:=\frac{r_{\rm o}}{2^7 {\mathtt K}_{{}_{\rm o}}}
\stackrel{\eqref{dublino}}= \frac{\a}{2^{11} {\mathtt K}_{{}_{\rm o}}^2}
<
\frac{\a}{2^{10}{\mathtt K}_{{}_{\rm o}}^2}=:r<
\frac{\a}{2^7 {\mathtt K}_{{}_{\rm o}}}
\stackrel{\eqref{dublino}}=
\frac{r_{\rm o}'}4
\,.
\end{equation}
Fix $(y_0,x)\in \Rz\times\mathbb T^n$ and
let $\phi(y):=\pi_y\Psi_{\rm o}(y,x)$. Then, by \equ{zoccolo},
$$\sup_{D_{2r}(y_0)}|\phi(y)-y|
\le \sup_{D_{r'_{\rm o}}(y_0)}|\phi(y)-y|
\le |\pi_y\Psi_{\rm o}-y|_{r_{\rm o}',s_{\rm o}'} \stackrel{\equ{dunringill}}\le M\,.$$ Thus, by Lemma~\ref{DAD}, since by \equ{zoccolo} $2r<r'_{{\rm o}}/2$, by definition of $\Rzt$, we have that $$y_0\in \pi_y\Psi_{\rm o}\big(\overline{B_r(y_0)}\times \{x\}\big) \subseteq \pi_y\Psi_{\rm o}\big(\Rzt\times \{x\})\,, $$ which implies that $\Psi_{\rm o}(y,x)=(y_0,x_0)$ with $x_0\in\mathbb T^n$ proving \eqref{amicamea}. Now, observe that the map $ (y_0,x)\in \Rz\times\mathbb T^n \mapsto (y,x_0)\in\Rzt\times \mathbb T^n$ in \equ{amicamea} is nothing else than the diffeomorphism associated to the near--to--identity generating function $y_0\cdot x+\psi_0(y_0,x)$ of the near--to--identity symplectomorphism $\Psi_{\rm o}$. Thus, for each $y_0\in\Rz$, the map $x \in\mathbb T^n\mapsto x_0=x+\partial_{y_0} \psi_0(y_0,x)$ is a diffeomorphism of $\mathbb T^n$ with inverse given by $x_0\in\mathbb T^n\mapsto x=x_0+\chi(y_0,x_0)$ for a suitable (small) real analytic map $\chi$. Therefore, given $(y_0,x_0)\in\Rz\times \mathbb T^n$, if we take $x= x_0+\chi(y_0,x_0)$ in \equ{amicamea} we obtain that there exist $(y,x)\in \Rzt\times\mathbb T^n$ such that $(y_0,x_0)=\Psi_0(y,x)$, proving \equ{surge}. \qed
\noindent{\bf Proof\ } {\bf of \equ{surge2}} The argument is completely analogous: Again, we start by proving that \beq{amicomeo}
\forall\ k\in {\cal G}^n_{\KO}\,,\ \forall\ (y_0,x)\in \Ruk\times\mathbb T^n\,,\ \exists! \ (y,x_0)\in \Rukt\times\mathbb T^n\!:\ \ \Psi_k(y,x)=(y_0,x_0)\,. \end{equation} Fix $k\in{\cal G}^n_{\KO}$ and define \beq{zoccoli} M:=\frac{r_k}{2^7 {\mathtt K}_{{}_{\rm o}}}
\stackrel{\eqref{dublino}}= \frac{\a}{2^7|k|\, {\mathtt K}}
<
\frac{\a}{2^6 |k| {\mathtt K}}=:r<\frac{r_k'}4 \stackrel{\eqref{dublino}}= \frac{\a}{8|k|}
\,.
\end{equation} Fix $(y_0,x)\in\Ruk\times \mathbb T^n$, and
let $\phi(y):=\pi_y\Psi_k(y,x)$. By \equ{zoccoli},
$$\sup_{D_{2r}(y_0)}|\phi(y)-y|
\le \sup_{D_{r'_k}(y_0)}|\phi(y)-y|
\le |\pi_y\Psi_k-y|_{r_k',s_\varstar} \stackrel{\equ{dunringill}}\le M\,.$$ Thus, by Lemma~\ref{DAD} we have $$y_0\in \pi_y\Psi_k\big(\overline{B_r(y_0)}\times \{x\}\big) \subseteq \pi_y\Psi_k\big(\Rukt\times \{x\})\,, $$ which implies that $\Psi_k(y,x)=(y_0,x_0)$ for some $x_0\in\mathbb T^n$ proving \eqref{amicomeo}. Now, the argument given in the non--resonant case apply also in this case. \qed
\noindent{\bf Proof\ } {\bf of \equ{sonnosonnosonno}} If $y\in \Rd$ then, since $y\notin \Rz$, there exists $k\in {\cal G}^n_{\KO}$ such that $|y\cdot k|<\a$, in which case, since $y\notin \Ru$, there exists $\ell\in {\cal G}^n_{\K}\, \backslash\, \mathbb{Z} k$
such that $ |\pko y\cdot \ell|\le \frac{3 \alpha {\mathtt K}}{|k|}$, hence $y\in \Rd_{k,\ell}$ for some $k\in {\cal G}^n_{\KO}$ and $\ell\in {\cal G}^n_{\K}\, \backslash\, \mathbb{Z} k$. \qed
{
\noindent} Next, we show that
the measure of $\Rd$ is proportional to\footnote{A similar result can be found in \cite[p. 3533]{BCnonlin}.} $\a^2$: \begin{lemma}\label{coverto}\ There exists a constant $\cdr=\cdr(n)>1$ such that: \begin{equation}\label{teheran44} {\rm\, meas\, } (\Rd) \le \cdr\, \a^2\ {\mathtt K}^{2n} \,. \end{equation} \end{lemma}
\noindent{\bf Proof\ } Let us estimate the measure of $\Rd_{k,\ell}$ in \equ{defi}. Denote by $v\in\mathbb R^n$ the projection of $y$ onto the plane generated by $k$ and $\ell$ (recall that, by hypothesis, $k$ and $\ell$ are not parallel). Then, \begin{equation}\label{soldatino}
|v\cdot k|=|y\cdot k|<\a\,, \qquad |\proiezione_k^\perp v \cdot \ell|
=|\proiezione_k^\perp y \cdot \ell|
\le
3\a{\mathtt K} /|k|\,. \end{equation} Set \beq{bacca}
h:=\pk \ell= \ell -\frac{\ell\cdot k}{|k|^2} k\,. \end{equation} Then, $v$ decomposes in a unique way as $v=a k+ b h$ for suitable $a,b\in\mathbb R$. By \eqref{soldatino}, \beq{goja}
|a|<\frac{\a}{|k|^2}\,,\qquad
|\pk v\cdot\ell|
=|bh\cdot \ell| \le 3\a{\mathtt K} /|k|\,,
\end{equation} and, since $ |\ell|^2 |k|^2-(\ell\cdot k)^2$ is a positive integer (recall, that $k$ and $\ell$ are integer vectors not parallel), $$
|h\cdot \ell| \eqby{bacca}
\frac{ |\ell|^2 |k|^2-(\ell\cdot k)^2 }{|k|^2}
\ge \frac1{|k|^2}\,. $$ Hence, \beq{velazquez}
|b|\le 3 \alpha {\mathtt K} |k| \,. \end{equation} Then, write $y\in \Rd_{k,\ell}$ as $y=v+v^\perp$ with
$v^\perp$ in the orthogonal complement of the plane generated by $k$ and $\ell$. Since $|v^\perp |\le |y|< 1$ and $v$ lies in the plane spanned by $k$ and $\ell$ inside a rectangle of sizes of length $2\a/|k|^2$ and $6 \alpha {\mathtt K} |k|$ (compare \equ{goja} and \equ{velazquez}) we find \[ \textstyle
{\rm\, meas\, }(\Rd_{k,\ell})\le \frac{2\a}{|k|^2}\, (6 \alpha {\mathtt K} |k|)\ 2^{n-2}=3\cdot 2^n \, \a^2 \frac{{\mathtt K}}{|k|}\,,\quad \forall \left\{\begin{array}{l} k\in{\cal G}^n_{\KO}\,,\\
\ell\in \mathcal G^n_{{\mathtt K}}\, \backslash\, \mathbb{Z} k\,. \end{array}\right.
\] Since $\sum_{k\in{\cal G}^n_{\KO}}|k|^{-1}\le c\, {\mathtt K}_{{}_{\rm o}}^{n-1}$ for a suitable $c=c(n)$, and ${\mathtt K}_{{}_{\rm o}}\le {\mathtt K}/6$, \equ{teheran44} follows. \qed
\rem In view of \equ{teheran44} and \eqref{dublino}, we have \begin{equation}\label{teheran4} {\rm\, meas\, } (\Rd) \le \cdr \, \varepsilon\, {\mathtt K}^\gamma \,,\qquad \gamma:=11n+4 \,. \end{equation} Thus, if ${\rm V}_{\! n}=\pi^{\frac{n}2}/\G(1+\frac{n}2)$ denotes the volume of the Euclidean unit ball in $\mathbb R^n$, we have that \beq{piccoletto} \varepsilon<\frac{{\rm V}_{\! n}}{\cdr {\mathtt K}^\gamma}\quad\implies \quad {\rm\, meas\, } (\Rd) <{\rm\, meas\, } \DD\,. \end{equation} \erem
\subsection{Normal Form Theorem} In the normal form around simple resonances the `averaged Hamiltonian' in \equ{hamk} (i.e., the Hamiltonian obtained disregarding the exponentially small term $f^k$) depends on angles through the linear combination $k\cdot x$, which, since $k\in\gen$ defines {\sl a new well--defined angle $\ttx_1\in {\mathbb T} $}. This fact calls for a linear symplectic change of variables:
\begin{lemma}\label{Fiu} Let the hypotheses of Lemma~\ref{coslike} hold. \\ {\rm (i)} For any $k\in {\cal G}^n_{\KO}$ there exists a matrix $\hAA\in\mathbb{Z}^{(n-1)\times n}$ such that\footnote{${\rm SL}(n,\mathbb{Z})$ denotes the group of $n\times n$ matrices with entries in $ {\mathbb Z} $ and determinant 1;
$|M|_{{}_\infty}$, with $M$ matrix (or vector), denotes the maximum norm $\max_{ij}|M_{ij}|$ (or $\max_i |M_i|$).} \beq{scimmia} \begin{array}l \displaystyle \AA:=\binom{k}{\hAA} =\binom{k_1\cdots k_n}{\hAA}\in\ \ {\rm SL}(n,\mathbb{Z})\,,\\
|\hAA|_{{}_\infty}\leq |k|_{{}_\infty}\,,\ \
|\AA|_{{}_\infty}=|k|_{{}_\infty}\,,\ \
|\AA^{-1}|_{{}_\infty}\leq
(n-1)^{\frac{n-1}2} |k|_{{}_\infty}^{n-1}\,.\phantom{\displaystyle\int} \end{array} \end{equation} {\rm (ii)} Let $\Fio$ be the linear, symplectic map on $\mathbb R^n\times {\mathbb T} ^n$ onto itself defined by \begin{equation}\label{talktothewind} \Fio: (\tty,\ttx) \mapsto (y,x)= (\AA^T\tty, \AA^{-1} \ttx) \,. \end{equation} Then, \beq{azz} \ttx_1=k\cdot x\,,\qquad\quad y=\tty_1 k+\hAA^T \hat \tty\,,\phantom{AAAAAAA} \big[\hat \tty:=(\tty_2,...,\tty_{n})\big]\,. \end{equation} Furthermore, letting\footnote{$\Rukt$ is defined in \equ{rocket}; recall, also, \equ{dublino}.} \begin{equation}\label{formentera}\textstyle \DDD^k:= \AA^{-T}\Rukt\,,\quad \left\{\begin{array}l
\tilde r_k:=\frac{{r_k}}{ \itcu |k|}\\ \tilde s_k:=
\frac{s}{\itcu |k|^{n-1}} \end{array}\right. \,,\quad \itcu:=5n(n-1)^{\frac{n-1}2}\,, \end{equation} with $\AA$ as in {\rm (i)}, we find \begin{equation}\label{fare2} \Fio: \DDD^k_{\tilde r_k}\times \mathbb T^n_{\tilde s_k} \to \Rukt_{r_k'/2}\times \mathbb T^n_{s_\varstar/2}\,, \qquad \Fio(\DDD^k\times {\mathbb T} ^n)=\Rukt\times {\mathbb T} ^n\,. \end{equation} {\rm (iii)} $\hamk$ in \equ{hamk}, in the symplectic variables $(\tty,\ttx)=\big((\tty_1,\hat\tty),\ttx\big)$, takes the form: \beq{dopomedia} {\mathcal H} _k(\tty,\ttx):=\hamk\circ \Fio(\tty,\ttx)=\hamsec_k(\tty,\ttx_1)+ \varepsilon \bar f^k(\tty,\ttx) \,,\quad (\tty,\ttx) \in \DDD^k_{\tilde r_k}\times \mathbb T^n_{\tilde s_k}\,,
\end{equation}
where the `secular Hamiltonian'
\beq{hamsec}
\hamsec_k(\tty,\ttx_1):= \frac12 |\AA^T\tty|^2+\varepsilon g^k_{\rm o}(\AA^T\tty)+ \varepsilon g^k(\AA^T\tty,\ttx_1)\,,\quad \bar f^k(\tty,\ttx):=f^k (\AA^T\tty,\AA^{-1} \ttx) \end{equation} is a real analytic function for $\tty\in \DDD^k_{\tilde r_k}$ and\footnote{Recall \equ{dublino}.} $\ttx_1\in {\mathbb T} _{s'_k}$. \end{lemma}
\rem\label{ariazz} In the above Lemma~\ref{Fiu} (and also often in what follows), to simplify symbols, we may omit the dependence upon $k$ in the notation, but of course $\AA$, $\hAA$ and $\Fio$ {\sl do depend upon the simple resonance label $k\in {\cal G}^n_{\KO}$}. \erem
\noindent{\bf Proof\ } {\bf of Lemma \ref{Fiu}} (i) From B\'ezout's lemma it follows that\footnote{See Appendix~A of \cite[p. 3564]{BCnonlin} for a detailed proof.}:
{
\noindent} {\sl given $k\in {\mathbb Z} ^n$, $k\neq 0$ there exists a matrix $\AA=(\AA_{ij})_{1\le i,j\le n}$ with integer entries such that $A_{nj}=k_j$ $\forall$ $1\le j\le n$, $\det\AA={\rm gcd}(k_1,...,k_1)$, and
$|\AA|_{{}_\infty}=|k|_{{}_\infty}$.}
{
\noindent} Hence,
since $k\in \gen$, ${\rm gcd}(k_1,...,k_1)=1$, and \equ{scimmia} follows\footnote{Notice that the bound on $|\AA^{-1}|_{{}_\infty}$ follows from D'Alembert expansion of determinants, observing that for any $m\times m$ matrix ${\rm M}$, one has
$|\det {\rm M}|\leq m^{m/2} |{\rm M}|_{{}_\infty}^m$}.
{
\noindent} (ii) $\Fio$ is symplectic since it is generated by the generating function $\tty\cdot \AA x$. \\ The relations in \equ{azz} follow at once from the definition of $\Fio$. \\
Let us prove \equ{fare2}: $\tty\in\DDD^k_{\tilde r_k}$ if and only if $\tty=\tty_0+z$ with $\tty_0\in\DDD^k$ and $|z|<\tilde r_k$. Thus,
$$|\AA^Tz|\stackrel{\equ{scimmia}}\le n |k| |z|<n |k| \tilde r_k\stackrel{ \equ{formentera}}< \frac{r_k}4
\stackrel{ \equ{dublino}}=\frac{r'_k}2\,.
$$ Since, by definition of $\DDD^k$, $\AA^T \tty_0\in \Rukt$, we have that
$\AA^T\tty\in \Rukt_{r_k'/2}$.
\\ Let, now, $\ttx$ belong to $\mathbb T^n_{\tilde s_k}$.Then, for any $1\le j\le n$, recalling the definitions of $s_\varstar$ and $s_\varstar'$ in \equ{dublino}, we find $$
\Big|\, {\rm Im}\, (\AA^{-1} \ttx)_j\Big|= \Big| \sum_{i=1}^n (\AA^{-1})_{ij} \, {\rm Im}\, \ttx_j\Big|\stackrel{\equ{scimmia}}{<}
n(n-1)^{\frac{n-1}2} |k|^{n-1} \tilde s_k \stackrel{\equ{formentera}}{\le }\frac{s_\varstar}{2}< s_\varstar'\,. $$ Thus, $\AA^{-1} \ttx$ belong to $\mathbb T^n_{s_\varstar'}$, and \equ{fare2} follows.
{
\noindent} (iii) Eq.'s \equ{dopomedia}--\equ{hamsec} follow immediately from the definition of the symplectic map $\Fio$ in \equ{talktothewind} and \equ{azz}. The statement on the angle--analyticity domain of $\hamsec_k$ follows from part (b) of Lemma~\ref{averaging}. \qed
{
\noindent} We summarize the above lemmata in the following
\begin{theorem}[Normal Form Theorem] \label{normalform} Let $\ham$ be as in \equ{ham} with $f\in{\mathbb B}^n_s$ satisfying \eqref{P1+} with $\mathtt N$ as in \equ{enne}, and let \equ{dublino} hold. There exists a constant\footnote{$\bco$ is defined in Lemma~\ref{averaging}.} $\bfco=\bfco(n,s,\d)\ge \max\{\mathtt N\,,\,\bco\}$ such that, if ${\mathtt K}_{{}_{\rm o}}\ge \bfco$, $k\in{\cal G}^n_{\KO}$, and $\DDD^k$, $\tilde r_k$, $\tilde s_k$ are as in \equ{formentera}, then there exist
real analytic symplectic maps \beq{trota2} \Psi_{\rm o}: \Rz_{r_{\rm o}'}\times \mathbb T^n_{s_{\rm o}'} \to \Rz_{r_{\rm o}}\times \mathbb T^n_{s_{\rm o}} \,, \qquad \Psi^k: \DDD^k_{\tilde r_k}\times \mathbb T^n_{\tilde s_k} \to \Ruk_{r_k} \times \mathbb T^n_{s_\varstar} \end{equation} having the following properties.
{
\noindent} {\rm (i)} $ \hamo(y,x) := \big(\ham\circ\Psi_{\rm o}\big)(y,x)
=\frac{|y|^2}2+\varepsilon\big( g^{\rm o}(y) +
f^{\rm o}(y,x)\big)$, with $g^{\rm o}$ and $f^{\rm o}$ satisfying \equ{552} and $\langle f^{\rm o}\rangle=0 $.
{
\noindent} {\rm (ii)} \beq{colosseum3cippa} {\mathcal H} _k(\tty,\ttx):=\ham\circ \Psi^k(\tty,\ttx)=\hamsec_k(\tty,\ttx_1)+ \varepsilon \bar f^k(\tty,\ttx) \,,\quad (\tty,\ttx) \in \DDD^k_{\tilde r_k}\times \mathbb T^n_{\tilde s_k}\,,
\end{equation}
where
\beq{hamseccippa}
\hamsec_k(\tty,\ttx_1):= \frac12 |\AA^T\tty|^2+\varepsilon \mathtt g^k_{\rm o}(\tty)+ \varepsilon \mathtt g^k(\tty,\ttx_1) \end{equation} is a real analytic function for $\tty\in \DDD^k_{\tilde r_k}$ and $\ttx_1\in {\mathbb T} _{s'_k}$. In particular
$\mathtt g^k(y,\cdot)\in\hol_{s'_k}^1$ for every $y\in \DDD^k_{\tilde r_k}$. Furthermore, the following estimates hold: \begin{equation}\label{cristinacippa}
|\mathtt g^k_{\rm o}|_{\tilde r_k} \leq \vartheta_{\rm o}=\frac{1}{{\mathtt K}^{6n+1}}\,,\qquad \,\thickvert\!\!\thickvert\, \mathtt g^k-\pi_{\!{}_{\integer k}} f\,\thickvert\!\!\thickvert\, _{{\tilde r_k},s'_k} \leq \vartheta_{\rm o}\,,\qquad \,\thickvert\!\!\thickvert\, \bar f^{k} \,\thickvert\!\!\thickvert\, _{{\tilde r_k},{\tilde s_k}} \le
e^{- {\mathtt K} s/3}\,. \end{equation} {\rm (iii)} If $ \noruno{k}\geq \mathtt N$, there exists $\sa_k\in[0,2\pi)$ such that \begin{equation}\label{hamkccippa} {\mathcal H} _k =
\frac12 |\AA^T\tty|^2+\varepsilon \mathtt g^k_{\rm o}(\tty)+
2|f_k|\varepsilon\ \big[\cos(\ttx_1 +\sa_k)+ F^k_{\! \varstar}(\ttx_1)+ \mathtt g^k_{\! \varstar}(\tty,\ttx_1)+ \mathtt f^k_{\! \varstar} (\tty,\ttx)
\big]\,, \end{equation} where $F^k_{\! \varstar}$ is as in Proposition~\ref{pollaio} and satisfies $F^k_{\! \varstar}\in\hol_1^1$ and $ \modulo F^k_{\! \varstar} \modulo_1\leq 2^{-40}$.\\ Moreover,
$\mathtt g^k_{\! \varstar}(y,\cdot )\in\hol_1^1$ (for every $y\in \DDD^k_{\tilde r_k}$), $\pi_{\!{}_{\integer k}}\mathtt f^k_{\! \varstar}=0$, and one has \beq{martinaTEcippa}\textstyle \,\thickvert\!\!\thickvert\, \mathtt g^k_{\! \varstar}\,\thickvert\!\!\thickvert\,_{\tilde r_k,1}\le \vartheta =\frac{1}{{\mathtt K}^{5n}} \,,\quad\qquad \,\thickvert\!\!\thickvert\, \mathtt f^k_{\! \varstar} \,\thickvert\!\!\thickvert\, _{\tilde r_k,\tilde s_k} \leq e^{-{\mathtt K} s/7}\,. \end{equation} \end{theorem}
\noindent{\bf Proof\ } The first relation in \equ{trota2} is \equ{trota}. Define \beq{pippala} \Psi^k:= \Psi_k\circ \Fio\,. \end{equation} Then, since $s_\varstar/2<s_\varstar'$ (compare \equ{dublino}), by
\equ{fare2}, \equ{rocket2} we get the second relation in \equ{trota2}.
{
\noindent} {\rm (i)} follows from point {\rm (a)} of Lemma~\ref{averaging}.
{
\noindent} {\rm (ii)} \equ{colosseum3cippa}, \equ{hamseccippa} and \equ{cristinacippa} follow from, respectively, \equ{dopomedia}, \equ{hamsec}, \equ{cristina} and point (ii) of Lemma~\ref{Fiu} setting \beq{ggg} \mathtt g^k_{\rm o}(\tty):= g^k_{\rm o}(\AA^T\tty)\,,\qquad \mathtt g^k(\tty,\ttx_1) := g^k(\AA^T\tty,\ttx_1)\,. \end{equation} {\rm (iii)} follows by Proposition~\ref{pollaio} and Lemma~\ref{coslike}. In particular \equ{hamkccippa} follows from \equ{hamkc}. Furthermore, \begin{equation}\label{catecippa}
\mathtt g^k_{\! \varstar}:=\frac{1}{2|f_k|}\, \big(\mathtt g^k- \pi_{\!{}_{\integer k}} f\big)\,, \qquad
\mathtt f^k_{\! \varstar} :=\frac{1}{2|f_k|} \bar f^k \end{equation} and noting that $\mathtt g^k_{\! \varstar}(\tty,\ttx_1) = g^k_{\! \varstar}(\AA^T\tty,\ttx_1)$ and that, by \equ{hamsec}, $\mathtt f^k_{\! \varstar}(\tty,\ttx)=f^k_{\! \varstar}(\AA^T\tty,\AA^{-1} \ttx) $, we see that \equ{martinaTEcippa} follows from \equ{martinaTE} and \equ{fare2}. \qed
\section{Generic Standard Form at simple resonances}
In this final section we show that {\sl the secular Hamiltonians $\hamsec_k$ \equ{hamsec} in Theorem~\ref{normalform} can be symplectically put into a suitable standard form, uniformly in $k\in{\cal G}^n_{\KO}$}
{
\noindent} The precise definition of `standard form' is taken from \cite{BCaa23}, where the analytic properties of action--angle variables of such Hamiltonian systems are discussed.
\begin{definition}\label{morso} Let $\hat D \subseteq \mathbb R^{n-1}$ be a bounded domain, ${\mathtt R}>0$ and $D:= (-{\mathtt R} ,{\mathtt R} ) \times\hat D $. We say that the real analytic Hamiltonian ${\ham}_{\flat}$ is in Generic Standard Form with respect to the symplectic variables $(p_1,q_1)\in (-{\mathtt R} ,{\mathtt R} )\times {\mathbb T} $ and `external actions' $$\hat p=(p_2,...,p_n)\in \hat D$$ if ${\ham}_{\flat}$ has the form
\beq{pasqua}
{\ham}_{\flat}(p,q_1)= \big(1+ \cin(p,q_1)\big) p_1^2
+\Gm(\hat p, q_1)
\,, \end{equation} where:
\begin{itemize}
\item[\bolla] $\cin$ and $ \Gm$ are real analytic functions defined on, respectively, $D_{\mathtt r}\times\mathbb T_{\mathtt s}$ and $\hat D_{\mathtt r}\times \mathbb T_{\mathtt s}$ for some $0<{\mathtt r}\leq{\mathtt R}$ and ${\mathtt s}>0$;
\item[\bolla] $\Gm$ has zero average and there exists a
function $\GO$ (the `reference potential') depending only on $q_1$ such that , for some\footnote{Recall Definition~\ref{buda}.} $\morse>0$,
\begin{equation}\label{A2bis} \GO\ \ \mbox{is} \ \ \morse {\rm \text{--}Morse}\,,\qquad \langle \GO\rangle=0\,; \end{equation} \item[\bolla]
the following estimates hold:
\beq{cimabue}
\left\{\begin{array}{l} \displaystyle \sup_{ {\mathbb T} ^1_{\mathtt s}}|\GO|\le \upepsilon\,,\\
\displaystyle \sup_{\hat D_{\mathtt r}\times {\mathbb T} ^1_{\mathtt s}}|\Gm-\GO| \leq \upepsilon
\lalla \,,\quad{\rm for\ some}\quad 0<\upepsilon\le {\mathtt r}^2/2^{16} \,,\ \ 0\le \lalla<1\,, \\
\displaystyle \sup_{D_{\mathtt r}\times {\mathbb T} ^1_{\mathtt s}}|\cin| \leq \lalla\,. \end{array}\right. \end{equation} \end{itemize} \end{definition} We shall call $(\hat D,{\mathtt R},{\mathtt r},{\mathtt s},\morse,\upepsilon,\lalla)$ {\sl the analyticity characteristics of ${\ham}_{\flat}$ with respect to the unperturbed potential $\GO$}.
\rem\label{trivia}
If ${\ham}_{\flat}$ is in Generic Standard Form, then the parameters $\morse$ and $\upepsilon$ satisfy the relation\footnote{By \equ{cimabue}, $\morse \le |\bar \Gm(\sa_i)- \bar \Gm(\sa_i)|\le 2 \max_ {\mathbb T} |\bar \Gm|\le 2\upepsilon$.} \beq{sucamorse}\textstyle \frac\upepsilon\morse\ge \frac12\,. \end{equation} Furthermore, one can always fix $\varpi\geq 4$ such that:
\begin{equation}\label{alce}\textstyle \frac{1}{\varpi}\leq {\mathtt s}\leq 1\,,\qquad 1\leq \frac{{\mathtt R}}{{\mathtt r}}\leq \varpi\,,\qquad \frac{1}{2}\leq
\frac{\upepsilon}{\morse } \leq \varpi \,. \end{equation} Such a parameter $\varpi$ rules the main scaling properties of these Hamiltonians. \erem
\subsection{Main theorem}
{
\noindent} In the following we shall often use the following notation: If $w$ is a vector with $n$ or $2n$ components, $\hat w=(w)^{\widehat{}}$ denotes the last $(n-1)$ components; if $w$ is vector with $2n$ components, $\check w=(w)^{{\!\!\widecheck{\phantom{a}}}}$ denotes the first $n+1$ components. Explicitly: \beq{checheche} w=(y,x)=\big((y_1,...,y_n),(x_1,...x_n)\big)\quad \Longrightarrow\quad \left\{ \begin{array}{l} \hat w=(w)^{\widehat{}}=(x_2,...,x_n)=\hat x\,,\\ \hat y=\,(y)^{\widehat{}}=(y_2,...,y_n)\,,\\ \check w=(w)^{{\!\!\widecheck{\phantom{a}}}}=(y,x_1)\,,\\ w=(\check w,\hat w)\,. \end{array} \right. \end{equation}
\dfn{dadaumpa} Given a domain $\hat {\rm D}\subseteq \mathbb R^{n-1}$, we denote by
$\Gdag$ the abelian group of symplectic diffeomorphisms $\Psi_{\! \ta}$ of $(\mathbb R\times\hat {\rm D})\times \mathbb R^n$ given by \[ (p,q)\in(\mathbb R\times\hat {\rm D})\times \mathbb R^n\stackrel{\Psi_{\! \ta}}\mapsto (P,Q)= (p_1+\ta(\hat p),\hat q,q_1,\hat q-q_1\partial_{\hat p} \ta(\hat p))\in {\mathbb R} ^{2n}\,, \] with $\ta:\hat {\rm D}\to \mathbb R$ smooth.
\edfn
\rem \label{alice} The group properties of $\Gdag$ are trivial: $$ {\rm id}_{\Gdag}=\Psi_{\! 0}\,,\qquad \Psi_{\! \ta}^{-1}=\Psi_{\! -\ta }\,,\qquad \Psi_{\! \ta}\circ\Psi_{\! \ta'}=\Psi_{\! \ta+\ta'}\,. $$ Notice, however, that, unless $\partial_{\hat p} \ta\in {\mathbb Z} ^{n-1}$, maps in $\Psi_{\! \ta}\in \Gdag$ {\sl do not induce well defined angle maps}
$q\in {\mathbb T} ^n\mapsto (q_1,\hat q-q_1\partial_{\hat p} \ta(\hat p))\in {\mathbb T} ^n$. \erem Now, let $f\in{\mathbb G}^n_{s}$ satisfy\footnote{Recall that by Lemma~\ref{telaviv} such $\d$ and $\b$ always exist.} \eqref{P1+} and \equ{P2+} for some $0<\d\le 1$ and $\b>0$ with $\mathtt N$ defined in \equ{enne}, let $k\in{\cal G}^n_{\KO}$, recall \equ{dublino} and define the following parameters\footnote{Here and in what follows we shall not always indicate explicitly the dependence upon $k$. Recall the definitions of $\itcu$, $\hAA$ and $\ttcs$ in, respectively, \equ{formentera}
Lemma~\ref{Fiu} and \equ{bollettino1}.} \beq{cerbiatta} \begin{array}l
{\mathtt R}={\a}/{|k|^2}={\sqrt\varepsilon {\mathtt K}^\nu}/{|k|^2}\,,\quad \itcd=4\, n^{\frac32} \itcu\,, \quad {\mathtt r}= {{\mathtt R}}/{\itcd}\,,
\quad\varepsilon_k=\frac{2\varepsilon}{|k|^2}\,, \phantom{\displaystyle\int} \\ \hat D = \big\{ \hat\act\in\mathbb R^{n-1}: \
|\proiezione_k^\perp \hAA^T \hat\act|<1\,, \ \displaystyle \min_{\sopra{\ell\in {\cal G}^n_{\K}}{\ell \notin \mathbb{Z} k}}
\big| \big(\proiezione_k^\perp \hAA^T \hat\act\big)\cdot \ell\big|
\geq {\textstyle \frac{3\a{\mathtt K}}{|k|}} \big\}\,,\ D=(-{\mathtt R},{\mathtt R})\times \hat D\,, \\
\morse=\casitwo{\varepsilon_k \b,}{\noruno{k}<\mathtt N}{\varepsilon_k |f_k|,}{\noruno{k}\ge \mathtt N}\,,\,\qquad
\chk=\casitwo {1 \,,}{\noruno{k}<\mathtt N}
{ |f_k|\,,}{\noruno{k}\ge \mathtt N}\,, \quad \upepsilon=\textstyle \ttcs \varepsilon_k\, \chk\,, \\ {\mathtt s}=\casitwo{\min\{\frac{s}2,1\}\,,}{\noruno{k}<\mathtt N}{1\,,}{\noruno{k}\ge \mathtt N}\,,\quad \textstyle \chs:=\casitwo{s'_k\,,}{\noruno{k}<\mathtt N\,,}{1\,,}{\noruno{k}\ge \mathtt N}\,, \quad \displaystyle \lalla=\frac{1}{{\mathtt K}^{5n}}\,. \end{array} \end{equation}
\begin{theorem}[Generic Standard Form at simple resonances]\label{sivori}\ \\ Let $\ham$ be as in \equ{ham} with $f\in{\mathbb G}^n_{s}$ satisfying \eqref{P1+} and \equ{P2+} for some $0<\d\le 1$ and $\b>0$ with $\mathtt N$ defined in \equ{enne}. Assume that\footnote{$\bfco$ is defined in Theorem~\ref{normalform}.} ${\mathtt K}_{{}_{\rm o}}\ge \max\{\itcd,\bfco\}$. Then, with the definitions given in \equ{cerbiatta}, the following holds for all $k\in{\cal G}^n_{\KO}$.
{
\noindent} {\rm (i)} There exists a real analytic
symplectic transformation \beq{diamond} \Phi_\diamond:(\ttp,\ttq)\ \in D\times \mathbb R^n \to (\tty,\ttx)=\Phi_\diamond(\ttp,\ttq)\in \mathbb R^{2n}\,, \end{equation} such that: $\Phi_\diamond$ fixes $\hat\ttp$ and\footnote{I.e., in \equ{diamond} it is $\tty=\hat\ttp,\ttx_1=\ttq_1$.} $\ttq_1$; for every $\hat\ttp\in\hat D$ the map $(\ttp_1,\ttq_1)\mapsto (\tty_1,\ttx_1)$ is symplectic; the $(n+1)$--dimensional map\footnote{Recall the notation in \equ{checheche}.} $\check\Phi_\diamond$ depends only on the first $n+1$ coordinates $(\ttp,\ttq_1)$, is $2\pi$--periodic in $\ttq_1$ and, if $\DDD^k= \AA^{-T}\Ruk$ and $\hamsec_k$ are as in Theorem~\ref{normalform}, one has\footnote{ $r_k$ and $s'_k$ are defined in \equ{dublino}, $\tilde r_k$ in \equ{formentera}.} \beq{tikitaka} \begin{array}l
\check\Phi_\diamond:
D_{\chr}\times {\mathbb T} _\chs\ \, \to \DDD^k_{\tilde r_k}\times \mathbb T_\chs\,, {\phantom{\displaystyle \int}} \\ \hamsec_k\circ \check\Phi_\diamond(\ttp,\ttq)=:
\textstyle{\frac{|k|^2}{2}}( {\ham}_{{}_k}(\ttp,\ttq_1)+ \hzk(\hat\ttp))\,, {\phantom{\displaystyle \int}} \\ \sup_{\hat\ttp\in \hat D_{2{\mathtt r}}}
\big|\textstyle\hzk(\hat\ttp)- \htk(\hat\ttp)
\big| \leq
\textstyle 6\, \varepsilon_k\lalla\,,\qquad\ \htk(\hat\ttp):={\textstyle \frac{1}{|k|^2}} | \proiezione^\perp_k \hAA^T \hat\ttp|^2\,. \end{array} \end{equation} {\rm (ii)} ${\ham}_{{}_k}$ in \equ{tikitaka} is in Generic Universal Form according to Definition~\ref{morso}: \[ {\ham}_{{}_k}(\ttp,\ttq_1)=\big(1+\cins(\ttp,\ttq_1)\big)\, \ttp_1^2 + \Gf(\hat\ttp,\ttq_1)\,, \] having reference potential
\beq{paranoia} \GO=\bGf:= \varepsilon_k\, \pi_{\!{}_{\integer k}} f\,, \end{equation} analyticity characteristics given in \equ{cerbiatta}, and $\upkappa$ verifying \equ{alce} with \beq{kappa}\textstyle \upkappa=\upkappa(n,s,\b):=\max\big\{\itcd\,, 4\ttcs\,, \ttcs/\beta \big\}\,. \end{equation} {\rm (iii)} Finally, $\Phi_\diamond=\Fiuno\circ\Fidue\circ\Fitre$, where\footnote{Recall Definition~\ref{dadaumpa}.}: $\Fiuno:=\Psi_{\!\giuno}\in\Gdag$ with
$\giuno(\hat \ttp):=-\textstyle \frac1{|k|^2}{(\hAA k)\cdot \hat \ttp}$; $\Fitre:=\Psi_{\!\gitre}\in\Gdag$ for a suitable real analytic function $\gitre(\hat\ttp)$ satisfying \[ \textstyle
|\gitre|_{4 \chr}< \frac{\varepsilon_k\chk}{\chr}\lalla\,, \] and $\Fidue(\ttp,\ttq)=(\ttp_1+\upeta_{{}_2},\hat \ttp, \ttq_1,\hat \ttq+\upchi_{{}_2})$ for suitable real analytic functions $\upeta_{{}_2}=\upeta_{{}_2}(\hat \ttp,\ttq_1)$ and $\upchi_{{}_2}=\upchi_{{}_2}(\hat \ttp,\ttq_1)$
satisfying \[ \textstyle
|\upeta_{{}_2}|_{4 \chr,\chs}< \frac{\varepsilon_k\chk}{\chr}\lalla\,,\qquad |\upchi_{{}_2}|_{2 \chr,\chs}< \frac{4\varepsilon_k\chk}{\chr^2}\,\lalla \,. \] \end{theorem}
\rem\label{rampulla} (i) One of the main point of the above theorem is that the parameter $\kappa$ in \equ{kappa} {\sl does not depend on $k$}. Incidentally, we point out that $\kappa$ depends (indirectly) also on $\d$, since $\d$ appears in the definition of $\mathtt N$ and $\b$ is the uniform Morse constant of the first $\mathtt N$ reference potentials.
{
\noindent} (ii) Note that by \equ{cerbiatta}, \equ{tikitaka}, \equ{dublino} and \equ{bollettino1} \begin{equation}\label{fangorn}\textstyle \min\big\{\frac{s}2,1\big\}\le {\mathtt s}\leq \chs\le {s'_k}\,. \end{equation} In particular, the composition $\hamsec_k\circ \check\Phi_\diamond$ is well defined; compare Theorem~\ref{normalform}--(ii).\\ As for the action analyticity radii, notice that, by the definitions in \equ{dublino}, \equ{formentera} and \equ{cerbiatta}, one has \beq{bollettino2}
r_k={\mathtt R}\, |k|\,,\qquad \tilde r_k= \frac{{\mathtt R}}\itcu\,. \end{equation} (iii) The three maps which define $\Phi_\diamond$ have the following purposes: The first one is needed to decouple the `kinetic energy' of the 1--d.o.f. secular system; the second one is introduced so as to get a purely positional 1--dimensional potential; finally, the third one puts the momentum coordinate of the equilibria in 0.
{
\noindent} (iv) The proof is fully constructive and the explicit definition of ${\ham}_{{}_k}$ is given in \equ{cins}, \equ{fpe}, \equ{guaito}, \equ{limone}, \equ{pontediferro} and \equ{hamsecu} below. \erem
\subsection{Proof of the main theorem} The proof is articulated in three lemmata. \\ The first lemma shows how to `block--diagonalize' the kinetic energy. For $k\in {\cal G}^n_{\KO}$, recall the definition of the matrices
$\AA$ and $\hAA$ in \eqref{scimmia}, and define\footnote{ ${\rm I}_{m}$ denotes the $(m\times m)$--identity matrix and recall the notation in \equ{checheche}.} \beqa{centocelle} && \tty= {\rm U}\ttY:= \left(\begin{matrix}
1 & - \frac1{|k|^2}(\hAA k)^T \cr 0 & \quad{\rm I}_{{}_{n-1}} \cr \end{matrix}\right)\ttY \,,\qquad {\rm i.e.}\qquad
\casi{\tty_1=\ttY_1 - \frac1{|k|^2}{\hAA k\cdot \hat \ttY}\,,} {\hat \tty=\hat \ttY\,.} \end{eqnarray} Then, one has
\begin{lemma}\label{phi1} {\rm (i)}
Let $\Fiuno$ be the map $\Fiuno(\ttY,\ttX)=({\rm U}\ttY,{\rm U}^{-T}\ttX)$. Then, $\Fiuno$ is symplectic and \beq{finocchio} \DDD^k = {\rm U} \ZZ \,,\qquad \Fiunoc: \ZZ_{4 \chr}\times\mathbb T_\chs\to \DDD^k_{\tilde r_k}\times \mathbb T_\chs\,. \end{equation} {\rm (ii)} Let \beq{hamsecu} \left\{ \begin{array}{l} \Guo:= {\textstyle \varepsilon_k} g^k_{\rm o}(\AA^T {\rm U} \ttY)\,,\quad \Gu(\ttY,\ttX_1):= {\textstyle \varepsilon_k} g^k(\AA^T {\rm U} \ttY,\ttX_1)\,,\\ \ \\
\hamsecu(\ttY,\ttX_1):=
\ttY_1^2+ \Guo(\ttY)+\Gu(\ttY,\ttX_1)\,,\qquad \langle \Gu(\ttY,\cdot)\rangle=0\,.
\end{array}\right. \end{equation} Then, if $\hamsec_k$ is as in \equ{hamsec}, one has \beq{spigolak}
\hamsec_k\circ \Fiunoc(\ttY,\ttX_1)= {\textstyle\frac{|k|^2}{2}} \, \hamsecu(\ttY,\ttX_1)+ {\textstyle \frac12} | \proiezione^\perp_k \hAA^T \hat \ttY| ^2\,, \end{equation} with $\hamsecu$ real analytic on $\ZZ_{4 \chr}\times\mathbb T_\chs$ and $\langle \Gu(\ttY,\cdot)\rangle=0$, and the following estimates~hold\footnote{$\vartheta $ is defined in \equ{martinaTE}. Notice that, by \equ{cerbiatta}, $\chi_{{}_k}\le 1$ for all $k$.}: \beq{betta}
|\Guo|_{4 \chr}\le \bettao:=2 \varepsilon_k \vartheta = \frac{2\varepsilon_k}{{\mathtt K}^{5n}}\,,\qquad
| \Gu-\bGf|_{4 \chr,\chs}\le \betta:=\chi_{{}_k}\bettao\le\bettao \,. \end{equation} \end{lemma}
\noindent{\bf Proof\ } (i) $\Fiuno$ is symplectic since it is generated by the generating function ${\rm U}\ttY\cdot \ttx$.\\ From the definitions of $\AA$ and ${\rm U}$ in, respectively, \equ{scimmia} and \equ{centocelle}, it follows \beq{perpieta}
(\AA^T{\rm U}) \ttY=\ttY_1 k + \hAA^T\hat \ttY- {\textstyle \frac{(\hAA k)\cdot \hat \ttY}{|k|^2}k}=
\ttY_1 k + \hAA^T\hat \ttY- {\textstyle \frac{ \hAA^T \hat \ttY \cdot k}{|k|^2}k} =\ttY_1 k + \proiezione_k^\perp \hAA^T \hat \ttY\,. \end{equation}
Thus, $\tty= (\AA^T{\rm U}) \ttY$ if and only if $\tty\cdot k=\ttY_1 |k|^2$ and $\proiezione_k^\perp \tty=\proiezione_k^\perp \hAA^T \hat \ttY$, which is equivalent to say $(\AA^T{\rm U}) \ZZ =\Ruk$, which in view of \equ{formentera}, is equivalent to $\DDD^k = {\rm U} \ZZ$. Now, by~\equ{scimmia}, \beq{UU}
|{\rm U}|, |{\rm U}^{-1}|\le n^{\frac32}\,, \end{equation} where, as usual, for a matrix $M$ we denote by
$\displaystyle |M|=\sup_{u\neq 0} |Mu|/|u|$ the standard operator norm. Thus, by \equ{cerbiatta} and \equ{bollettino2} we have (for complex $z$) \beq{chitikaka}
|z|<4{\mathtt r} \ \ \implies \ \
|{\rm U} z|< n^{\frac32} 4 {\mathtt r} = 4 n^{\frac32}\, \frac{{\mathtt R}}{\itcd}= \frac{{\mathtt R}}{\itcu}= \tilde r_k\,, \end{equation} which, since $\ttX_1=\ttx_1$, implies that $\Fiunoc: \ZZ_{4 \chr}\times\mathbb T_\chs\to \DDD^k_{\tilde r_k}\times \mathbb T_\chs$, proving \equ{finocchio}.
{
\noindent} (ii) By the previous item, the composition
$\hamsec_k\circ \Fiunoc$ is well defined and analytic on $\ZZ_{4 \chr}\times\mathbb T_\chs$. From \equ{perpieta} it follows that
$|\AA^T{\rm U} \ttY|^2=|k|^2 \ttY_1^2 + | \proiezione^\perp_k \hAA^T \hat \ttY| ^2$, and \equ{spigolak} follows. Notice that since $g^k(y,\cdot)\in\hol_{s'_k}^1$ (compare Lemma~\ref{averaging}), $\Gu$ has zero average over $\mathbb T$. \\ By the definition of $\Guo$ and $\Gu$ in \equ{hamsecu}, by \equ{chitikaka}, \equ{cristina} in\footnote{Recall that, by \equ{dublino}, \equ{formentera}, $\tilde r_k<r'_k=r_k/2$. Recall also the definitions of $\vartheta_{\rm o}$ and $\vartheta $ in \equ{552} and \equ{martinaTE}.} Lemma~\ref{averaging}, the estimates
on $|\Guo|_{4 \chr}$ and on $| \Gu-\bGf|_{4 \chr,\chs}$ for $\noruno{k}<\mathtt N$ in \equ{betta} follow. The estimate for $\noruno{k}\ge\mathtt N$ in \equ{betta} follows from Lemma~\ref{coslike}: see in particular \equ{cate}, \equ{martinaTE} and \equ{alfacentauri}. \qed
{
\noindent} Next lemma shows how one can remove the dependence on $\ttY_1$ in the potential $\Gu$.
\begin{lemma}\label{avogado} If let ${\mathtt K}\ge \itcd$ then, \beq{tettapic}\textstyle
\frac{\bettao}{\chr^2}
<\frac1{2^{10}}\ \frac\chs{\pi+\chs}<1
\,, \end{equation} and the following statements hold. \\ {\rm (i)} The fixed point equation \beq{fpe} \pp = -{\textstyle \frac12} \, \partial_{\ttY_1} \Guo(\pp,\hat\ttP) -{\textstyle \frac12} \, \partial_{\ttY_1}\Gu(\pp,\hat\ttP,\ttQ_1) \end{equation} has a unique solution $\pp:(\hat \ttP,\ttQ_1)\in \hat\ZZ\times {\mathbb T} \mapsto \pp(\hat\ttP,\ttQ_1)\in {\mathbb R} $ real analytic on $\hat\ZZ_{4 \chr}\times {\mathbb T} _\chs$, satisfying \beq{pitale}\textstyle
|\pp|_{4 \chr,\chs}<\frac{\bettao}{3 \chr}\,. \end{equation} Furthermore, if we define \beq{guaito} \left\{ \begin{array}{l} \pp_{\rm o}(\hat \ttP):=\langle \pp(\hat\ttP,\cdot)\rangle \\ \tilde\pp:=\pp-\pp_{\rm o} \end{array}\right.\,,\qquad \left\{ \begin{array}{l} \displaystyle \phi(\hat \ttP,\ttX_1):= \int_0^{\ttX_1} \tilde\pp(\hat \ttP,\sa)d\sa\\ \hat\qq(\hat\ttP,\ttQ_1):=-\partial_{\hat\ttP} \, \phi(\hat\ttP,\ttQ_1) \end{array}\right. \end{equation} then, $\ttQ_1\to\hat\qq(\hat\ttP,\ttQ_1)$ is a real analytic periodic function, and one has \beq{tess}\textstyle
|\pp_{\rm o}|_{4 \chr}< \frac13\, \frac{\bettao}{\chr}\,,\qquad\quad
|\tilde\pp|_{4 \chr,\chs}< \frac13\, \frac{\betta}{\chr}\,,\qquad |\hat \qq|_{2 \chr,\chs}< \frac{\betta}{6 \chr^2}\,(\pi+\chs) \,. \end{equation} {\rm (ii)} The real analytic symplectic map $\Fidue$ generated by $ \ttP\cdot\ttX+ \phi(\hat\ttP,\ttX_1)$, namely, \beq{Fidue} \Fidue:(\ttP,\ttQ)\mapsto (\ttY,\ttX) \quad{\rm with}\quad \casi{\ttY_1=\ttP_1+ \tilde \pp(\hat \ttP,\ttQ_1)}{\hat \ttY=\hat \ttP}\,, \quad \casi{\ttX_1=\ttQ_1}{\hat \ttX=\hat \ttQ + \hat\qq(\hat\ttP,\ttQ_1)}
\,, \end{equation} satisfies: \beq{elficheck} \Fiduec: \ZZ_{2 \chr}\times {\mathbb T} _\chs\to \ZZ_{3 \chr}\times {\mathbb T} _\chs\,, \end{equation} and \beqa{hamsecd} \hamsecd(\ttP,\ttQ_1)&:=&\hamsecu\circ \Fiduec(\ttP,\ttQ_1)\\ &=& \big(1+\tilde\cin(\ttP,\ttQ_1)\big)\, \big(\ttP_1-\pp_{\rm o}(\hat \ttP)\big)^2 + \Gfo(\hat\ttP)+ \Gf(\hat\ttP,\ttQ_1)\,, \nonumber \end{eqnarray} for suitable functions $\tilde\cin$, $\Gfo$ and $\Gf$ (explicitly defined in \equ{limone} below, with $\langle\Gf\rangle=0$) real analytic on, respectively,
$\ZZ_{2\chr}\times {\mathbb T} _\chs$, $\hat \ZZ_{2\chr}$ and $\hat \ZZ_{2\chr}\times {\mathbb T} _\chs$ , which satisfy the bounds: \beq{cima}\textstyle
|\tilde \cin|_{2\chr,\chs}\leq \frac{\bettao}{4\chr^2}\,, \qquad
|\Gfo|_{2\chr}\leq 3 \bettao\,\qquad \quad
|\Gf-\bGf|_{2\chr,\chs}\leq 2\betta\,. \end{equation} \end{lemma}
\noindent{\bf Proof\ } We start by proving \equ{tettapic}. Recalling \equ{fangorn}, \equ{bollettino1} and \equ{bollettino3}, we have \beq{chenoia}\textstyle \frac{\pi+\chs}\chs\stackrel{}\le 1+ 2\pi \ttcs < 8 \ttcs<{\mathtt K}\,.
\end{equation} Now, by the definitions in \equ{betta}, \equ{spigolak}, \equ{cerbiatta}, \equ{dublino}, we find \[ \textstyle \frac{\bettao}{\chr^2} =
4 \itcd^2 \, \frac{|k|^2}{{\mathtt K}^{14n+4}} \stackrel{\equ{chenoia}}\le 4 \itcd^2 \, \frac{1}{{\mathtt K}^{14n+1}} \frac\chs{\pi+\chs}\,, \] which yields \equ{tettapic} since, by assumption, ${\mathtt K}>{\mathtt K}_{{}_{\rm o}}\ge \itcd$.
{
\noindent} (i) Let us denote by ${\bf X}:= \hat\ZZ_{4 \chr,\chs}\times {\mathbb T} _\chs$ and by
$\mathcal X$ the complete metric space formed by the real analytic complex--valued functions $u: {\bf X}\to \{z\in {\mathbb C} :|z|\le \chr/2\}$, equipped with the metric given by the distance in sup--norm on $\bf X$. Let us also denote: \beq{pontediferro} \ttG^\sharp:=\Guo+\Gu\,,\qquad\quad \tilde \ttG^\sharp := \Gu- \bGf\,. \end{equation} Note that $\Gu$ and $\tilde \ttG^\sharp$ have zero average. Consider the operator $F:u\in {\mathcal X} \mapsto F(u)$, where $F(u)(\hat \ttP,\ttQ_1):=-{\textstyle \frac12} \partial_{\ttY_1} \ttG^\sharp (u(\hat\ttP,\ttQ_1),\hat\ttP, \ttQ_1)$. If $u\in {\mathcal X}$, then, by Cauchy estimate we get \beqa{onam1}
\sup_{\bf X} |F(u)|&=&{\textstyle \frac12} \sup_{\bf X}
\big| \partial_{\ttY_1} \ttG^\sharp(u(\hat\ttP,\ttQ_1),\hat\ttP, \ttQ_1)\big| \nonumber\\ &=&{\textstyle \frac12} \sup_{\bf X}
\big| \partial_{\ttY_1} \big[\ttG^\sharp(u(\hat\ttP,\ttQ_1),\hat\ttP, \ttQ_1)- \bGf(\ttQ_1)\big]\big| \nonumber\\ &\le &
\frac12\ \frac{\big| \ttG^\sharp-\bGf\big|_{4 \chr,\chs}}{4 \chr- \frac\chr2} \nonumber\\ &\stackrel{\equ{betta}}\le& \frac12\, \frac{\bettao+\betta }{4 \chr- \frac\chr2} \le \frac27\, \frac\bettao{\chr} \stackrel{\equ{tettapic}}{< } \frac27\, \chr<\frac\chr2\,.
\end{eqnarray} Thus, $F:{\mathcal X}\to {\mathcal X}$. Let us check that $F$ is, in fact, a contraction on ${\mathcal X}$. If $u,v\in{\mathcal X}$, then, again, by Cauchy estimate, \equ{betta} and \equ{tettapic}, we get\footnote{$u$ and $v$, in the r.h.s. of the first inequality, are evaluated at $(\ttQ_1,\hat\ttP)$. } \beqa{onam2}
\sup_{\bf X} |F(u)-F(v)| &\le & {\textstyle \frac12} \sup_{\bf X}
\big| \partial_{\ttY_1}\big( \ttG^\sharp(u,\hat \ttP,\ttQ_1)- \ttG^\sharp(v,\hat \ttP,\ttQ_1)\big)\big| \nonumber\\
&\le& \frac12\ \big| \partial_{\ttY_1}^2(\ttG^\sharp-\bGf\big)|_{\frac\chr2,\chs}\cdot \sup_{\bf X} |u-v| \nonumber\\ &\le &
\frac12\ \frac{\big| \ttG^\sharp-\bGf\big|_{4 \chr,\chs}}{\big({4 \chr}- \frac\chr2\big)^2}\cdot \sup_{\bf X} |u-v| \nonumber\\
&\stackrel{\equ{betta}}{\le}& \frac{4}{49}\ \frac{\bettao}{\chr^2} \cdot \sup_{\bf X} |u-v|
\stackrel{\equ{tettapic}}{<} \frac18\ \cdot \sup_{\bf X} |u-v|\,,
\end{eqnarray} showing that $F$ is a contraction on $\mathcal X$. Thus, by the standard Contraction Lemma, it follows that there exists a unique $\pp\in {\mathcal X}$ solving \equ{fpe}.
{
\noindent} Since $\pp=F(\pp)$, one sees that \equ{pitale} follows from \equ{onam1}.\\ The first bound in \equ{tess} follows immediately from \equ{pitale}. \\ To prove the second estimate in \equ{tess}, write\footnote{To simplify notation, we drop, here, from the notation the explicit dependence on $\hat\ttP$ and $\ttQ_1$ of $\ttG^\sharp$.} \beq{deangelis}
\partial_{\ttY_1} \ttG^\sharp (\pp)=
\partial_{\ttY_1} \ttG^\sharp (\pp_{\rm o}+\tilde\pp) =
\partial_{\ttY_1} \ttG^\sharp (\pp_{\rm o}) + w \tilde\pp\,,\quad {\rm with} \quad w:= \int_0^1 \partial^2_{\ttY_1} \ttG^\sharp (\pp_{\rm o}+t \tilde\pp)dt\,. \end{equation} As above, by Cauchy estimates, \beq{marcore}
|w|_{4\chr,\chs}\le \frac2{49} \, \frac{\bettao+\betta}{\chr^2}<\frac18\,. \end{equation} Thus, by \equ{deangelis}, Cauchy estimates, and \equ{marcore}, observing that\footnote{Recall that $\langle \Gu(\ttY,\cdot)\rangle=0$ as stated in Lemma \ref{phi1}.} $\langle \partial_{\ttY_1} \Gu(\pp_{\rm o})\rangle=0$, one finds \begin{eqnarray*}
|\tilde \pp|=|\pp-\pp_{\rm o}|&\stackrel{\equ{deangelis}}=&{\textstyle \frac12} \big| \partial_{\ttY_1} \ttG^\sharp (\pp_{\rm o}) - \langle \partial_{\ttY_1} \ttG^\sharp (\pp_{\rm o})\rangle +
w \tilde \pp - \langle w \tilde \pp \rangle\big|\\ &=&
{\textstyle \frac12} \big| \partial_{\ttY_1} \Gu (\pp_{\rm o}) - \langle \partial_{\ttY_1} \Gu(\pp_{\rm o})\rangle +
w \tilde \pp - \langle w \tilde \pp \rangle\big|\\ &=&
{\textstyle \frac12} \big| \partial_{\ttY_1} \big(\Gu (\pp_{\rm o})-\bGf\big)+
w \tilde \pp - \langle w \tilde \pp \rangle\big|\\
&\stackrel{\equ{betta},\equ{marcore}}\le& \frac12 \Big(\, \frac27 \, \frac\betta\chr\Big) + \frac12 |\tilde \pp|\,, \end{eqnarray*} which yields immediately the second bound in \equ{tess}. \\ Next, since $\tilde \pp$ has zero average over the torus, the function $\phi$ defined in \equ{guaito} defines a (real analytic) periodic function such that $\partial_{\ttX_1}\phi=\tilde \pp$. Furthermore, by the second estimate in \equ{tess}, one has\footnote{$\pi+\chs$ is an estimate of the length of the integration path in \equ{guaito}, as the real part of
$\ttQ_1$ can be taken in $[-\pi,\pi)$.} $
|\phi|_{4 \chr,\chs}< \frac{\betta}{3 \chr}\ (\pi+\chs)\,, $ so that, by Cauchy estimates, also last bounds in \equ{tess} follow.
{
\noindent} (ii) By the definition of $\Fidue$ in \equ{Fidue}, by \equ{tess} and \equ{tettapic}, the relations in \equ{elficheck} follow at once. \\ Now, define\footnote{Here, $\pp_{\rm o}=\pp_{\rm o}(\hat\ttP)$.} \beq{limone} \left\{\begin{array}{l} \displaystyle \tilde\cin(\ttP,\ttQ_1):=\int_0^1 (1-t)\partial_{\ttY_1}^2 \ttG^\sharp\big(\pp_{\rm o}+t (\ttP_1-\pp_{\rm o}),\hat\ttP,\ttQ_1\big)dt\,,\\ \Gfo(\hat\ttP):= \langle \pp(\hat \ttP,\cdot)^2\rangle +\langle \ttG^\sharp\big( \pp(\cdot,\hat \ttP),\hat\ttP,\cdot)\rangle \,,\\ \displaystyle \Gf(\hat\ttP, \ttQ_1):= \pp(\hat \ttP,\ttQ_1)^2 + \ttG^\sharp\big( \pp(\ttQ_1,\hat \ttP),\hat\ttP,\ttQ_1) - \Gfo(\hat\ttP) \,, \end{array}\right. \end{equation} then, by Taylor's formula, \equ{hamsecu}, \equ{Fidue}, \equ{pontediferro} and \equ{fpe}, one finds\footnote{$\displaystyle g(t_0+\t)=g(t_0)+g'(t_0)\t+\Big(\int_0^1 (1-t) g''\big(t_0+t\t\big)dt\Big) \t^2$ with $g=\Gu(\cdot,\ttQ_1)$, $t_0=\pp$ and $\t=\ttP_1-\pp_{\rm o}$. For ease of notation we drop the (dumb) dependence upon $\hat \ttP$ in these formulae.} \beqa{onam3} \hamsecd(\ttP_1,\ttQ_1)&:=&\hamsecu\circ \Fiduec(\ttP_1,\ttQ_1) = (\ttP_1+\tilde\pp)^2+\ttG^\sharp(\ttP_1+\tilde\pp,\ttQ_1)\nonumber\\ &\stackrel{\equ{guaito}}=&\big(\pp+(\ttP_1-\pp_{\rm o})\big)^2+ \ttG^\sharp\big(\pp+(\ttP_1-\pp_{\rm o}),\ttQ_1\big)\nonumber\\ &\stackrel{\equ{limone}}{=}& (\ttP_1-\pp_{\rm o})^2+2(\ttP_1-\pp_{\rm o})\pp + \pp^2+ \ttG^\sharp(\pp,\ttQ_1)+\partial_{\ttY_1}\ttG^\sharp(\pp,\ttQ_1) (\ttP_1-\pp_{\rm o}) \nonumber \\ && + (\ttP_1-\pp_{\rm o})^2 \tilde \cin\nonumber\\ &\stackrel{\equ{fpe}}{=}& (1+\tilde \cin) (\ttP_1-\pp_{\rm o})^2 + \pp^2+ \ttG^\sharp(\pp,\ttQ_1)\nonumber\\ &\stackrel{\equ{limone}}{=}& (1+\tilde \cin) (\ttP_1-\pp_{\rm o})^2+ \Gfo+ \Gf(\ttQ_1)\,, \end{eqnarray} proving \equ{hamsecd}. \\ Let us now prove \equ{cima}. Observe that for $\ttP\in \ZZ_{2\chr}$ by \equ{tettapic} and \equ{tess} the segment $\big(\pp_{\rm o}+t (\ttP_1-\pp_{\rm o}),\hat \ttP\big)$, $t\in[0,1]$, still belongs to $\ZZ_{2\chr}$, hence, by definition of $\tilde \cin$ in \equ{limone}, by Cauchy estimate\footnote{Compare, also, the estimates done in \equ{onam2}.} and \equ{betta} one obtains the first estimate in \equ{cima}. \\
By the definition of $\Gfo$, by \equ{pitale} and \equ{betta}, observing that $|\ttG^\sharp|\le \bettao+\betta\le 2\bettao$, one gets immediately the second estimate in \equ{cima}. \\ As for the third estimate in \equ{cima}, by the definitions given, one has that\footnote{Dropping, again, in the notation the dumb variable $\hat \ttP$.} \beq{proietti} \Gf-\bGf = \big(\pp^2-\langle \pp^2\rangle\big)+\big(\ttG^\sharp(\pp,\cdot) - \langle \ttG^\sharp(\pp,\cdot)\rangle-\bGf\big) \,. \end{equation} Let us estimate the terms in brackets separately. For $\hat\ttP\in \hat\ZZ_{2\chr}$ and $\ttQ_1\in {\mathbb T} _{\chs}$, one finds \beqa{fiorini}
|\pp^2-\langle \pp^2\rangle|=|2\tilde \pp\pp_{\rm o} +\tilde\pp^2-\langle \tilde \pp^2\rangle|\le
(2|\pp_{\rm o}|+2|\tilde \pp|)\, |\tilde\pp| \stackrel{\equ{tess}}{\le} \frac49 \bettao \frac\betta{\chr^2}\stackrel{\equ{tettapic}}{<} \frac\betta2\,. \end{eqnarray} To estimate the second term in \equ{proietti}, we define $$\z(t):= \ttG^\sharp(\pp_{\rm o}+t \tilde \pp,\ttQ_1) - \langle \ttG^\sharp(\pp_{\rm o}+t \tilde \pp,,\cdot)\rangle\,, $$ and observe that (recall \equ{pontediferro})
$\z(0)= \Gu(\pp_{\rm o},\ttQ_1)$ and that, by Cauchy estimates, we get\footnote{Reasoning as in \equ{onam1}.} \beqa{oji}
|\z'(s)|&\le& |\tilde \pp|\, \int_0^1\big| \partial_{\ttY_1}\big( \ttG^\sharp(\pp_{\rm o}+t \tilde \pp,\ttQ_1) - \langle \ttG^\sharp(\pp_{\rm o}+t \tilde \pp,,\cdot)\rangle\big)\big|dt\nonumber\\
&\le& |\tilde \pp|\, \frac{2|\ttG^\sharp|_{4\chr ,\chs}}{4\chr -\frac\chr2}
\le |\tilde \pp|\, \frac{4\bettao}{4\chr -\frac\chr2} \stackrel{\equ{tess}}{<}\frac8{21} \frac{\bettao}{\chr^2}\ \, \betta\stackrel{\equ{tettapic}}{<}\frac12 \betta\,. \end{eqnarray} Thus, \[
\big|\ttG^\sharp(\pp,\cdot) - \langle \ttG^\sharp(\pp,\cdot)\rangle-\bGf\big|\le |\z(0)-\bGf|+\int_0^1|\z'(t)|dt \le
| \Gu(\pp_{\rm o},\cdot)-\bGf|+\frac12 \betta\stackrel{\equ{betta}}{\le} \frac32 \betta\,. \] Putting together this estimate and \equ{fiorini} one gets also the third estimate in \equ{cima}. \qed
{
\noindent} The final transformation is again just a translation, which is done so that
{\sl all equilibria of the secular system will lie on the angle--axis in its 2--dimensional phase space}.
\begin{lemma}\label{platano} The real analytic symplectic map $\Fitre\in\Gdag$ defined as \beq{Fitre} \Fitre: (\ttp,\ttq) \mapsto (\ttP,\ttQ)
\quad{\rm with}\quad \casi{\ttP_1=\ttp_1 + \pp_{\rm o}(\hat\ttp)}{\hat \ttP=\hat \ttp}\,, \qquad \casi{\ttQ_1=\ttq_1}{\hat\ttQ=\hat\ttq -\ttq_1\partial_{\hat \ttp} \pp_{\rm o}(\hat\ttp) \, }
\,, \end{equation} satisfies \beq{giovannino}\textstyle \Fitrec: \ZZ_{\chr}\times {\mathbb T} _\chs\to \ZZ_{2\chr}\times {\mathbb T} _\chs\,. \end{equation} Furthermore, one has: \beq{Hsharp-} \hamsecd\circ \Fitrec(\ttp,\ttq_1)= \big(1+\cins(\ttp,\ttq_1)\big)\, \ttp_1^2 + \Gfo(\hat\ttp)+ \Gf(\hat\ttp,\ttq_1)\,, \end{equation} where \beq{cins} \cins(\ttp,\ttq_1):= \tilde\cin\big(\pp_{\rm o}(\hat\ttp)+\ttp_1, \hat\ttp,\ttq_1\big)\,, \end{equation} and the following bunds hold: \beq{cimadue}
|\cins|_{\chr,\chs}\leq \frac{\bettao}{4\chr^2}\,, \qquad
|\Gfo|_{2\chr}\leq 3 \bettao\,\qquad \quad
|\Gf-\bGf|_{2\chr,\chs}\leq 2\betta\,. \end{equation} \end{lemma}
\noindent{\bf Proof\ } Just observe that, if $|\ttp_1|<\chr$, then, by \equ{tess} and \equ{tettapic}, it follows that, for all $\ttp\in\ZZ_{\chr}$, $$\textstyle
|\pp_{\rm o}(\hat\ttp)+\ttp_1|< \frac{\bettao}{3 \chr}+\chr\le \frac\chr3+\chr=\frac43 \chr<2 \chr\,, $$ so that \equ{giovannino} holds. Finally, by \equ{cima}, we get\footnote{{\sl $\Gf$ and $\bGf$ are the same} as in \equ{cima} of Lemma~\ref{avogado}.} \equ{cimadue}. \qed
We are ready for the
{
\noindent} \noindent{\bf Proof\ } {\bf of Theorem \ref{sivori}}\\ Recall the definitions of $\Phi_{\!{}_j}$, $1\le j\le 3$, in, respectively, Lemma~\ref{phi1}, \equ{Fidue} and \equ{Fitre} and define $\Phi_\diamond:=\Fiuno\circ\Fidue\circ \Fitre$, and $\hzk(\hat\p):=
\frac{1}{|k|^2} | \proiezione^\perp_k \hAA^T \hat\p| ^2 +\Gfo(\hat\p)$. Then, the expression for ${\ham}_{{}_k}$ in \equ{tikitaka}
follows by \equ{spigolak}, \equ{hamsecd} and \equ{Hsharp-}. \\ By \equ{giovannino} and Lemma~\ref{platano}, the Hamiltonian function ${\ham}_{{}_k}$ is real analytic on $\ZZ_\chr\times {\mathbb T} _\chs$, where $\ZZ=(-{\mathtt R},{\mathtt R})\times\hat \ZZ$ (compare \equ{cerbiatta}). \\ By \equ{P2+} and Proposition~\ref{punti} we have that $\bGf$ in \eqref{paranoia} is $\morse$--Morse with $\morse$ as in \equ{cerbiatta}. \\
Let us, now, estimate $|\bGf|_{\mathtt s}$. Consider, first, $\noruno{k}<\mathtt N$. Then, estimating $|f_{jk}|$ by $e^{-|j|\noruno{k}s}$, by the definition of ${\mathtt s}$, we get \begin{eqnarray*}
|\bGf|_{\mathtt s}&\stackrel{\equ{paranoia}}=& \varepsilon_k |\pi_{\!{}_{\integer k}} f|_{{\mathtt s}}
\le
\varepsilon_k |\pi_{\!{}_{\integer k}} f|_{s/2}=\varepsilon_k \sum_{j\neq 0} |f_{jk}|e^{\frac{|j|\noruno{k}s}{2}} \\
&\le& \frac{8\varepsilon}{|k|^2} \, \frac{e^{-s/2}}{2(1-e^{-s/2})}< \frac{8\varepsilon}{|k|^2} \, \frac{1}{s}\,. \end{eqnarray*} If $\noruno{k}\ge\mathtt N$ one has \[
|\bGf|_{\mathtt s}=|\bGf|_1
\stackrel{\equ{alfacentauri}}=\frac{4\varepsilon}{|k|^2}|f_k||\cos(\sa+ \sa_k)+F^k_\star(\sa)|_1
\stackrel{\equ{gallina}}{\le}\frac{4\varepsilon}{|k|^2} \, |f_k|\, (\cosh 1+2^{-40})<\frac{8\varepsilon}{|k|^2} |f_k|\,. \] Thus, by definitions of $\chi_{{}_k}$ in \equ{cerbiatta} and $\bttcd$, one gets \beq{crusca}
|\bGf|_{\mathtt s}\le \upepsilon \,, \end{equation} with $\upepsilon$ as in \equ{cerbiatta}. Next, since $\chi_{{}_k}\le 1\le \bttcd$, \beq{mappo}
|\Gf-\bGf|_{{\mathtt r},{\mathtt s}}\stackrel{\equ{cima}}\le 2\betta\stackrel{\equ{betta}}=\frac{8\varepsilon}{|k|^2} \chi_{{}_k}\vartheta \stackrel{\equ{cerbiatta}}\le \upepsilon \lalla\,.
\end{equation} By \equ{cimadue}, \equ{cerbiatta}, \equ{betta}, using the inequalities $|k|\le {\mathtt K}_{{}_{\rm o}}\le {\mathtt K}/6$, recalling \equ{tikitaka}, \equ{dublino}, and the hypothesis ${\mathtt K}_{{}_{\rm o}}\ge \itcd$ (in the last inequality),
one sees that \beq{mappo2}
|\cins|_{{\mathtt r},{\mathtt s}}\leq \itcd^2\, \frac{|k|^2}{{\mathtt K}^{2\nu}}\, \vartheta \le \frac{\itcd^2}{36}\, \frac{1}{{\mathtt K}^{2(\nu-1)}} \, \vartheta < \vartheta =\lalla\,. \end{equation} Then \equ{cimabue} follows by \equ{crusca}, \equ{mappo} and \equ{mappo2}. \\ Finally, observe that, by the definitions in \equ{cerbiatta} and \equ{betta} one has \beq{stent}\textstyle {\upepsilon}/{\morse}= \casitwo{ \frac{4 \bttcd}{\b} \,,}{\noruno{k}<\mathtt N\,,}{4 \bttcd \,,\phantom{\displaystyle \int} }{\noruno{k}\ge \mathtt N\,.} \end{equation} Then, \equ{alce} with $\varpi$ as in \equ{kappa}, follows immediately by the definitions in \equ{cerbiatta}, \equ{sucamorse} and \equ{stent}. \qed
\small
\end{document} |
\begin{document}
\title{Quasi-Polynomial Local Search for Restricted Max-Min Fair Allocation hanks{This research was supported by ERC Advanced investigator grants 228021 and 226203.}
\begin{abstract} The restricted max-min fair allocation problem (also known as the restricted Santa Claus problem) is one of few problems that enjoys the intriguing status of having a better estimation algorithm than approximation algorithm. Indeed, Asadpour et al.~\cite{AFS08} proved that a certain configuration LP can be used to estimate the optimal value within a factor ${1}/{(4+\epsilon)}$, for any $\epsilon>0$, but at the same time it is not known how to efficiently find a solution with a comparable performance guarantee.
A natural question that arises from their work is if the difference between these guarantees is inherent or because of a lack of suitable techniques. We address this problem by giving a quasi-polynomial approximation algorithm with the mentioned performance guarantee. More specifically, we modify the local search of~\cite{AFS08} and provide a novel analysis that lets us significantly improve the bound on its running time: from $2^{O(n)}$ to $n^{O(\log n)}$. Our techniques also have the interesting property that although we use the rather complex configuration LP in the analysis, we never actually solve it and therefore the resulting algorithm is purely combinatorial. \end{abstract}
\section{Introduction}
We consider the problem of indivisible resource allocation in the following classical setting: a set \ensuremath{\mathcal{R}}\xspace of available resources shall be allocated to a set \ensuremath{\mathcal{P}}\xspace of players where the value of a set of resources for player $i$ is given by the function $f_i : 2^\ensuremath{\mathcal{R}}\xspace \mapsto \mathbb{R}$.
This is a very general setting and dependent on the specific goals of the allocator several different objective functions have been studied.
One natural objective, recently studied in~\cite{DS06,Feige06,FV06,Vondrak08}, is to maximize the social welfare, i.e., to find an allocation $\pi: \ensuremath{\mathcal{R}}\xspace \mapsto \ensuremath{\mathcal{P}}\xspace$ of resources to players so as to maximize $\sum_{i\in \ensuremath{\mathcal{P}}\xspace} f_i( \pi^{-1}(i))$. However, this approach is not suitable in settings where the property of ``fairness'' is desired. Indeed, it is easy to come up with examples where an allocation that maximizes the social welfare assigns all resources to even a single player. In this paper we address this issue by studying algorithms for finding ``fair'' allocations. More specifically, fairness is modeled by evaluating an allocation with respect to the satisfaction of the least happy player, i.e., we wish to find an allocation $\pi$ that maximizes $\min_{i\in \ensuremath{\mathcal{P}}\xspace} f_i(\pi^{-1}(i))$. In contrast to maximizing the social welfare, the problem of maximizing fairness is already $\mathsf{NP}$-hard when players have linear value functions. In order to simplify notation for such functions we denote $f_i({j})$ by $v_{i,j}$ and hence we have that $f_i(\pi^{-1}(i)) = \sum_{j\in
\pi^{-1}(i)} v_{i,j}$. This problem has recently received considerable attention in the literature and is often referred to as the \emph{max-min fair allocation} or the \emph{Santa Claus} problem.
One can observe that the max-min fair allocation problem is similar to the classic problem of scheduling jobs on unrelated machines to minimize the makespan, where we are given the same input but wish to find an allocation that minimizes the maximum instead of one that maximizes the minimum. In a classic paper~\cite{LST90}, Lenstra, Shmoys \& Tardos gave a $2$-approximation algorithm for the scheduling problem and proved that it is $\mathsf{NP}$-hard to approximate the problem within a factor less than $1.5$. The key step of their $2$-approximation algorithm is to show that a certain linear program, often referred to as the assignment LP, yields an additive approximation of $v_{\max} = \max_{i,j} v_{i,j}$. Bez\'akov\'a and Dani~\cite{BD05} later used these ideas for max-min fair allocation to obtain an algorithm that always finds a solution of value at least $OPT-v_{\max}$, where $OPT$ denotes the value of an optimal solution. However, in contrast to the scheduling problem, this algorithm and more generally the assignment LP gives no approximation guarantee for max-min fair allocation in the challenging cases when $v_{\max} \geq OPT$.
In order to overcome this obstacle, Bansal \& Sviridenko~\cite{BS06} proposed a stronger linear program relaxation, known as the configuration LP, for the max-min fair allocation problem. The configuration LP that we describe in detail in Section~\ref{sec:CLP} has been vital to the recent progress on better approximation guarantees. Asadpour \& Saberi~\cite{AS07} used it to obtain a
$\Omega(1/\sqrt{|\ensuremath{\mathcal{P}}\xspace|} (\log |\ensuremath{\mathcal{P}}\xspace|)^3)$-approximation algorithm which was later improved by Bateni et al.~\cite{Bateni09}
and Chakrabarty et al.~\cite{Chakrabarty09} to algorithms that return a solution of value at least $\Omega(OPT/|\ensuremath{\mathcal{P}}\xspace|^\epsilon)$ in time
$O(|\ensuremath{\mathcal{P}}\xspace|^{1/\epsilon})$.
The mentioned guarantee $\Omega(OPT/|\ensuremath{\mathcal{P}}\xspace|^\epsilon)$ is rather surprising because the integrality gap of the configuration LP is no better than $O(OPT/\sqrt{|\ensuremath{\mathcal{P}}\xspace|})$~\cite{BS06}. However, in contrast to the general case, the configuration LP is significantly stronger for the prominent special case where values are of the form $v_{i,j} \in \{v_j, 0\}$. This case is known as the \emph{restricted} max-min fair allocation or the restricted Santa Claus problem and is the focus of our paper. The worst known integrality gap for the restricted case is $1/2$ and it is known~\cite{BD05} that it is $\mathsf{NP}$-hard to beat this factor (which is also the best known hardness result for the general case).
Bansal \& Sviridenko~\cite{BS06} first used the configuration LP to obtain an $O(\log\log \log |\ensuremath{\mathcal{P}}\xspace| / \log \log
|\ensuremath{\mathcal{P}}\xspace|)$-approximation algorithm for the restricted max-min fair allocation problem. They also proved several structural properties that were later used by Feige~\cite{Feige08} to prove that the integrality gap of the configuration LP is in fact constant in the restricted case. The proof is based on repeated use of Lov\'{a}sz local lemma and was turned into a polynomial time algorithm~\cite{HSS10}.
The approximation guarantee obtained by combining~\cite{Feige08} and~\cite{HSS10} is a large constant and is far away from the best known analysis of the configuration LP by Asadpour et al.~\cite{AFS08}. More specifically, they proved in~\cite{AFS08} that the integrality gap is lower bounded by $1/4$ by designing a beautiful local search algorithm that eventually finds a solution with the mentioned approximation guarantee, but is only known to converge in exponential time. As the configuration LP can be solved up to any precision in polynomial time, this means that we can approximate the value of an optimal solution within a factor $1/(4+\epsilon)$ for any $\epsilon >0$ but it is not known how to efficiently find a solution with a comparable performance guarantee. Few other problems enjoy this intriguing status (see e.g. the overview article by Feige~\cite{FeigeSurv08}). One of them is the restricted assignment problem\footnote{Also here the restricted version of the problem is the special case where $v_{ij} \in \{v_j, \infty\}$ ($\infty$ instead of $0$ since we are minimizing).},
for which the second author in \cite{SME11} developed the techniques from~\cite{AFS08} to show that the configuration LP can be used to approximate the optimal makespan within a factor $33/17 + \epsilon$ improving upon the $2$-approximation by Lenstra, Shmoys \& Tardos~\cite{LST90}. Again it is not known how to efficiently find a schedule of the mentioned approximation guarantee. However, these results indicate that an improved understanding of the configuration LP is likely to lead to improved approximation algorithms for these fundamental allocation problems.
In this paper we make progress that further substantiates this point. We modify the local search of~\cite{AFS08} and present a novel analysis that allows us to significantly improve the bound on the running time from an exponential guarantee to a quasi-polynomial guarantee. \begin{theorem} \label{thm:main}
For any $\epsilon \in (0,1]$, we can find a
$\frac{1}{4+\epsilon}$-approximate solution to restricted
max-min fair allocation in time $n^{O\left(\frac{1}{\epsilon}
\log n\right)}$, where $n= |\ensuremath{\mathcal{P}}\xspace| + |\ensuremath{\mathcal{R}}\xspace|$. \end{theorem}
In Section~\ref{sec:algodesc}, we give an overview of the local search of~\cite{AFS08} together with our modifications. The main modification is that at each point of the local search, we carefully select which step to take in the case of several options, whereas in the original description~\cite{AFS08} an arbitrary choice was made. We then use this more stringent description with a novel analysis (Section~\ref{sec:algoanal}) that uses the dual of the configuration LP as in~\cite{SME11}. The main advantage of our analysis (of the modified local search) is that it allows us to obtain a better upper bound on the search space of the local search and therefore also a better bound on the run-time.
Furthermore, our techniques have the interesting property that although we use the rather complex configuration LP in the analysis, we never actually solve it. This gives hope to the interesting possibility of a polynomial time algorithm that is purely combinatorial and efficient to implement (in contrast to solving the configuration LP) with a good approximation ratio.
Finally, we note that our approach currently has a similar dependence on $\epsilon$ as in the case of solving the configuration LP since, as mentioned above, the linear program itself can only be solved approximately. However, our hidden constants are small and for a moderate $\epsilon$ we expect that our combinatorial approach is already more attractive than solving the configuration LP.
\section{The Configuration LP} \label{sec:CLP} The intuition of the configuration linear program (LP) is that any allocation of value $T$ needs to allocate a bundle or configuration $C$ of resources to each player $i$ so that $f_i(C) \geq T$. Let $\conf{i,T}$ be the set of those configurations that have value at least $T$ for player $i$. In other words, $\conf{i,T}$ contains all those subsets of resources that are feasible to allocate to player $i$ in an allocation of value $T$. For a guessed value of $T$, the configuration LP therefore has a decision variable $x_{i, C}$ for each player $i\in \ensuremath{\mathcal{P}}\xspace$ and configuration $C \in \conf{i,T}$ with the intuition that this variable should take value one if and only if the corresponding set of resources is allocated to that player. The configuration LP $CLP(T)$ is a feasibility program and it is defined as follows:
\begin{equation*} \boxed{
\begin{minipage}{10cm}
\begin{align*}
\sum_{C\in \conf{i,T}} x_{i,C} &\geq 1 & i \in \ensuremath{\mathcal{P}}\xspace \\[1mm]
\sum_{i,C: j\in C, C\in \conf{i,T}} x_{i,C} &\leq 1 & j \in \ensuremath{\mathcal{R}}\xspace \\[1mm]
x & \geq 0
\end{align*}
\end{minipage} } \end{equation*}
The first set of constraints ensures that each player should receive at least one bundle and the second set of constraints ensures that a resource is assigned to at most one player.
If $CLP(T_0)$ for some $T_0$ is feasible, then $CLP(T)$ is also feasible for all $T\leq T_0$, because $\conf{i, T_0}\subseteq \conf{i, T}$ and thus a solution to $CLP(T_0)$ is a solution to $CLT(T)$ as well. Let $T_{OPT}$ be the maximum of all such values. Every feasible allocation is a feasible solution of configuration LP, hence $T_{OPT}$ is an upper bound on the value of the optimal allocation.
We note that the LP has exponentially many constraints; however, it is known that one can approximately solve it to any desired accuracy by designing a polynomial time (approximate) separation algorithm for the dual~\cite{BS06}. Although our approach does not require us to solve the linear program, the dual shall play an important role in our analysis. By associating a variable $y_i$ with each constraint in the first set of constraints, a variable $z_j$ with each constraint in the second set of constraints, and letting the primal have the objective function of minimizing the zero function, we obtain the dual program:
\begin{equation*} \boxed{
\begin{minipage}{10cm}
\begin{align*}
\max & \mbox{ } \sum_{i\in \ensuremath{\mathcal{P}}\xspace} y_i - \sum_{j\in \ensuremath{\mathcal{R}}\xspace} z_j && \\[2mm]
y_i &\leq \sum_{j\in C} z_j & i\in \ensuremath{\mathcal{P}}\xspace, C \in
\conf{i, T}\\[1mm]
y,z & \geq 0
\end{align*}
\end{minipage} } \end{equation*}
\section{Local Search with Better Run-time Analysis}
In this section we modify the algorithm by Asadpour et al.~\cite{AFS08} in order to significantly improve the run-time analysis: we obtain a $1/(4+\epsilon)$-approximate solution in run-time bounded by $n^{O(1/\epsilon \log n)}$ whereas the original local search is only known to converge in time $2^{O(n)}$. For better comparison, we can write $n^{O(1/\epsilon \log n)} = 2^{O(1/\epsilon
\log^2 n)}$. Moreover, our modification has the nice side effect that we actually never solve the complex configuration LP --- we only use it in the analysis.
\subsection{Description of Algorithm} \label{sec:algodesc} Throughout this section we assume that $T$ --- the guessed optimal value --- is such that $CLP(T)$ is feasible. We shall find an $1/\alpha$ approximation where $\alpha$ is a parameter such that $\alpha>4$. As we will see, the selection of $\alpha$ has the following trade-off: the closer $\alpha$ is to $4$ the worse bound on the run-time we get.
We note that if $CLP(T)$ is not feasible and thus $T$ is more than $T_{OPT}$, our algorithm makes no guarantees. It might fail to find an allocation, which means that $T>T_{OPT}$. We can use this for a standard binary search on the interval $[0, \frac{1}{|\ensuremath{\mathcal{P}}\xspace|}\sum_i v_i]$ so that in the end we find an allocation with a value at least $T_{OPT}/\alpha$.
\subsubsection{Max-min fair allocation is a bipartite hypergraph problem.} Similar to~\cite{AFS08}, we view the max-min fair allocation problem as a matching problem in the bipartite hypergraph $G=(\ensuremath{\mathcal{P}}\xspace, \ensuremath{\mathcal{R}}\xspace, E)$. Graph $G$ has an hyperedge $\{i\} \cup C$ for each player $i\in \ensuremath{\mathcal{P}}\xspace$ and configuration $C \subseteq \ensuremath{\mathcal{R}}\xspace$ that is feasible with respect to the desired approximation ratio $1/\alpha$, i.e., $f_i(C) \geq T/\alpha$, and minimal in the sense that $f_i(C') < T/\alpha$ for all $C' \subset C$. Note that the graph might have exponentially many edges and the algorithm therefore never keeps an explicit representation of all edges.
From the construction of the graph it is clear that a matching covering all players corresponds to a solution with value at least $T/\alpha$. Indeed, given such a matching $M$ in this graph, we can assign matched resources to the players and everyone gets resources with total value of at least $T/\alpha$.
\subsubsection{Alternating tree of ``add'' and ``block'' edges.}
The algorithm of Asadpour et al.~\cite{AFS08} can be viewed as follows. In the beginning we start with an empty matching and then we increase its size in every iteration by one, until all players are matched. In every iteration we build an alternating tree rooted in a currently unmatched player $p_0$ in the attempt to find an alternating path to extend our current matching $M$. The alternating tree has two types of edges: edges in the set $A$ that we wish to \emph{add} to the matching and edges in the set $B$ that are currently in the matching but intersect edges in $A$ and therefore \emph{block} them from being added to the matching. While we are building the alternating tree to find an alternating path, it is important to be careful in the selection of edges, so as to guarantee eventual termination. As in~\cite{AFS08}, we therefore define the concept of addable and blocking edges.
Before giving these definitions, it will be convenient to introduce the following notation. For a set of edges $F$, we denote by $F_\ensuremath{\mathcal{R}}\xspace$ all resources contained in edges in $F$ and similarly $F_\ensuremath{\mathcal{P}}\xspace$ denotes all players contained in edges in $F$. We also write $e_\ensuremath{\mathcal{R}}\xspace$ instead of $\{e\}_\ensuremath{\mathcal{R}}\xspace$ for an edge $e$ and use $e_\ensuremath{\mathcal{P}}\xspace$ to denote the player in $e$.
\begin{definition} We call an edge $e$ addable if $e_\ensuremath{\mathcal{R}}\xspace\cap (A_\ensuremath{\mathcal{R}}\xspace\cup B_\ensuremath{\mathcal{R}}\xspace) = \emptyset$ and $e_\ensuremath{\mathcal{P}}\xspace\in \{p_0\}\cup A_\ensuremath{\mathcal{P}}\xspace\cup B_\ensuremath{\mathcal{P}}\xspace$. \end{definition} \begin{definition} An edge $b$ in the matching $M$ is blocking $e$ if $e_\ensuremath{\mathcal{R}}\xspace\cap b_\ensuremath{\mathcal{R}}\xspace \not= \emptyset$. \end{definition} Note that an addable edge matches a player in the tree with resources that currently do not belong to any edge in the tree and that the edges blocking an edge $e$ are exactly those in the matching that prevent us from adding $e$. For a more intuitive understanding of these concepts see Figure \ref{fig:alter-tree} in Section \ref{sec:example}.
The idea of building an alternating tree is similar to standard matching algorithms using augmenting paths. However, one key difference is that the matching can be extended once an alternating path is found in the graph case, whereas the situation is more complex in the considered hypergraph case, since a single hyperedge might overlap several hyperedges in the matching. It is due to this complexity that it is more difficult to bound the running time of the hypergraph matching algorithm of~\cite{AFS08} and our improved running time is obtained by analyzing a modified version where we carefully select in which order the edges should be added to the alternating tree and drop edges from the tree beyond certain distance.
We divide resources into 2 groups. \emph{Fat resources} have value at least $T/\alpha$ and \emph{thin resources} have less than $T/\alpha$. Thus any edge containing a fat resource contains only one resource and is called \emph{fat edge}. Edges containing thin resources are called \emph{thin edges}. Our algorithm always selects an addable edge of minimum distance to the root $p_0$ according to the following convention. The length of a thin edge in the tree is one and the length of a fat edge in the tree is zero. Edges not in the tree have infinite length. Hence, the \emph{distance of a vertex} from the root is the number of thin edges between the vertex and the root and, similarly, the \emph{distance of an edge $e$} is the number of thin edges on the way to $e$ from $p_0$ including $e$ itself. We also need to refer to distance of an addable edge that is not yet in the tree. In that case we take the distance as if it was in the tree. Finally, by the \emph{height of the alternating tree} we refer to the maximum distance of a resource from the root.
\subsubsection{Algorithm for extending a partial matching.} \begin{algorithm}[h!] \DontPrintSemicolon \caption{Increase the size of the matching} \label{increase-match} \SetKwInOut{Input}{Input}\SetKwInOut{Output}{Output} \Input{A partial matching $M$} \Output{A matching of increased size assuming that $T$ is at most $T_{OPT}$} \BlankLine
Find an unmatched player $p_0\in \ensuremath{\mathcal{P}}\xspace$, make it a root of the alternating tree\;
\While{there is an addable edge within distance $2\log_{(\alpha-1)/3}(|\ensuremath{\mathcal{P}}\xspace|)+1$}{
\Indp
Find an addable edge $e$ of minimum distance from the root\;
$A\leftarrow A\cup \{e\}$\;
\eIf {$e$ has blocking edges $b_1, \dots, b_k$} {
\Indp
$B\leftarrow B\cup \{b_1, \dots, b_k\}$\;
\Indm
}(\tcp*[h]{collapse procedure})
{
\Indp
\While{$e$ has no blocking edges} {
\Indp
\eIf {there is an edge $e'\in B$ such that $e'_\ensuremath{\mathcal{P}}\xspace=e_\ensuremath{\mathcal{P}}\xspace$} {
\Indp
$M \leftarrow M\setminus \{e'\}\cup \{e\}$\;
$A\leftarrow A\setminus \{e\}$\;
$B \leftarrow B\setminus \{e'\}$\;
Let $e''\in A$ be the edge that $e'$ was blocking\;
$e\leftarrow e''$\;
\Indm
}
{
\Indp
$M\leftarrow M\cup\{e\}$\;
\Return $M$\;
\Indm
}
\Indm
}
Let $e'$ be the blocking edge that was last removed from $B$\;
Remove all edges in $A$ of greater distance than $e'$ and the edges in $B$ that blocked these edges\;
\Indm
} } \Return $T_{OPT}$ is less than $T$ \end{algorithm}
Algorithm \ref{increase-match} summarizes the modified procedure for increasing the size of a given matching by also matching a previously unmatched player $p_0$. For better understanding of the algorithm, we included an example of an algorithm execution in Figure \ref{fig:alter-tree} in Section \ref{sec:example}.
Note that the algorithm iteratively tries to find addable edges of minimum distance to the root. On the one hand, if the picked edge $e$ has blocking edges that prevents it from being added to the matching, then the blocking edges are added to the alternating tree and the algorithm repeatedly tries to find addable edges so as to make progress by removing the blocking edges.
On the other hand, if edge $e$ has no blocking edges, then this means that the set of resources $e_\ensuremath{\mathcal{R}}\xspace$ is free, so we make progress by adding $e$ to the matching $M$. If the player was not previously matched, it is the root $p_0$ and we increased the size of the matching. Otherwise the player $e_\ensuremath{\mathcal{P}}\xspace$ was previously matched by an edge $e'\in B$ such that $e'_\ensuremath{\mathcal{P}}\xspace=e_\ensuremath{\mathcal{P}}\xspace$, so we remove $e'$ from $M$ and thus it is not a blocker anymore and can be removed from $B$. This removal has decreased the number of blockers for an edge $e''\in A$. If $e''$ has $0$ blockers, we recurse and repeat the same procedure as with $e$. Note that this situation can be seen on Figure \ref{fig:alt-2} and \ref{fig:alt-3} in Section \ref{sec:example}.
\subsection{Example of Algorithm Execution} \label{sec:example} \begin{figure}
\caption{Alternating tree visualization. The right part of every picture
is the alternating tree and to the left we display the positions of edges in
the tree in the bipartite graph. Gray edges are in the set $A$ and white edges
are in the set $B$.}
\label{fig:alt-1}
\label{fig:alt-2}
\label{fig:alt-3}
\label{fig:alt-4}
\label{fig:alter-tree}
\end{figure}
Figure \ref{fig:alter-tree} is a visualization of an execution of Algorithm \ref{increase-match}. The right part of every picture is the alternating tree and to the left we display the positions of edges in the tree in the bipartite graph. Gray edges are $A$-edges and white are $B$-edges.
In Figure \ref{fig:alt-1} we start by adding an $A$-edge to the tree. There are 2 edges in the matching intersecting this edge, so we add them as blocking edges. Then in Figure \ref{fig:alt-2} we add a fat edge that has no blockers, so we add it to the matching and thus remove one blocking edge, as we can see in Figure \ref{fig:alt-3}. Then in Figure \ref{fig:alt-4} we add a thin edge which has no blockers. Now the $A$ and $B$ edges form an alternating path, so by swapping them we increase the size of the matching and the algorithm terminates.
Note that the fat edge in step 2 is added before the thin edge from step 4, because it has shorter distance from the root $p_0$. Recall that the distance of an edge $e$ is the number of thin edges between $e$ and the root including $e$, thus the distance of the fat edge is 2 and the distance of the thin edge is 3.
\subsection{Analysis of Algorithm}
\label{sec:algoanal}
Let the parameter $\alpha$ of the algorithm equal $4+\epsilon$ for some
$\epsilon\in (0, 1]$. We first prove that Algorithm~\ref{increase-match} terminates in time $n^{O\left(\frac{1}{\epsilon}\log n\right)}$ where $n=|\ensuremath{\mathcal{P}}\xspace| + |\ensuremath{\mathcal{R}}\xspace|$ and, in the following subsection, we show that it returns a matching of increased size if $CLP(T)$ is feasible.
Theorem~\ref{thm:main}~then follows from that, for each guessed value of $T$, Algorithm~\ref{increase-match} is at most invoked $n$ times and we can find the maximum value $T$ for which our algorithm finds an allocation by binary search on the interval $[0, \frac{1}{|\ensuremath{\mathcal{P}}\xspace|}\sum_i v_i]$. Since we can assume that the numbers in the input have bounded precision, the binary search only adds a polynomial factor to the running time.
\subsubsection{Run-time Analysis.} We bound the running time of Algorithm~\ref{increase-match} using that the alternating tree has height at most
$O(\log_{(\alpha-1)/3}|\ensuremath{\mathcal{P}}\xspace|) = O\left(\frac{1}{\epsilon} \log
|\ensuremath{\mathcal{P}}\xspace|\right)$. The proof is similar to the termination proof in~\cite{AFS08} in the sense that we associate a signature vector with each tree and then show that its lexicographic value decreases. However, one key difference is that instead of associating a value with \emph{each edge} of type $A$ in the tree, we associate a value with each ``layer'' that consists of \emph{all edges} of a certain distance from the root. This allows us to directly associate the run-time with the height of the alternating tree.
When considering an alternating tree it is convenient to partition $A$ and $B$ into $A_0, A_1, \dots, A_{2k}$ and $B_0, B_1, \dots, B_{2k}$ respectively by the distance from the root, where $2k$ is the maximum distance of an edge in the alternating tree (it is always an even number). Note that $B_i$ is empty for all odd $i$. Also, $A_{2i}$ contains only fat edges and $A_{2i+1}$ only thin edges. For a set of edges $F$ we denote by $F^t$ all the thin edges in $F$ and by $F^f$ all the fat edges in $F$. For a set of edges $F$ denote by $F^t$ all the thin edges in $F$ and by $F^f$ all the fat edges in $F$. We also use $\ensuremath{\mathcal{R}}\xspace^t$ to denote thin resources and $\ensuremath{\mathcal{R}}\xspace^f$ to denote fat resources.
\begin{lemma} \label{lemma:termination} For a desired approximation guarantee of $1/\alpha = 1/(4+\epsilon)$, Algorithm~\ref{increase-match} terminates in time $n^{O\left(\frac{1}{\epsilon}\log n\right)}$. \end{lemma} \begin{proof} We analyze the run-time of Algorithm~\ref{increase-match} by associating a signature vector with the alternating tree of each iteration. The signature vector of an alternating tree is then defined to be \begin{align*}
( -|A^f_0|, |B^f_0|, &-|A^t_1|, |B^t_2|, -|A^f_2|, |B^f_2|, \\
&-|A^t_3|, |B^t_4|, -|A^f_4|, |B^f_4|, \\ & \vdots \\
& -|A^t_{2k-1}|, |B^t_{2k}|, -|A^f_{2k}|, |B^f_{2k}|, \infty). \end{align*}
We prove that each addition of an edge decreases the lexicographic value of the signature or increases the size of the matching.
On the one hand, if we add an edge with no blocking edges, we either completely collapse the alternating tree or collapse only a part of it. If we completely collapse the tree then the algorithm terminates. Otherwise, let $e'$ be the last blocking edge that was removed from $B$ by the algorithm during the collapse procedure. Also let $B'$ and $A'$ be the sets of blocking edges and addable edges obtained after the collapse procedure. Note that $e'$ is a thin edge because otherwise $e'$ was blocking a fat edge $e$ that after the removal of $e'$ had no more blocking edges which in turn contradicts that $e'$ was the last blocking edge removed from $B$. Let $2\ell$ be the distance of $e'$, i.e., $e' \in B^t_{2\ell}$. As the algorithm drops all edges in $A$ of distance at least $2 \ell+1$ and all edges in $B$ that blocked these edges, the partial collapse of the tree changes its
signature to $$( -|A'^f_0|, |B'^f_0|, -|A'^t_1|, |B'^t_2|, -|A'^f_2|, |B'^f_2|, \dots, -|A'^t_{2\ell-1}|,
|B'^t_{2\ell}|, -|A'^f_{2\ell}|, |B'^f_{2\ell}|, \infty),$$ which equals
$$( -|A^f_0|, |B^f_0|, -|A^t_1|, |B^t_2|, -|A^f_2|, |B^f_2|, \dots, -|A^t_{2\ell-1}|,
|B^t_{2\ell}|-1, -|A'^f_{2\ell}|, |B'^f_{2\ell}|, \infty).$$ Thus we either increase the size of the matching or decrease the signature of the alternating tree.
On the other hand, if the added edge $e$ has blocking edges, there are two cases. We either open new layers $A_{2k+1}=\{e\}$ and $B_{2k+2}$ where $e$ is a thin edge and the signature gets smaller, since $-|A^t_{2k+1}|<\infty$. If we do not open a new layer, we increase the size of some $A_\ell$ by either a thin or fat edge and
$-(|A_\ell|+1)<-|A_\ell|$, so in this case the signature decreases too.
The algorithm only runs as long as the height of the alternating tree is at most
$O(\log_{(\alpha-1)/3}|\ensuremath{\mathcal{P}}\xspace|)=O(\log_{1+\epsilon/3} |\ensuremath{\mathcal{P}}\xspace|)$. This can be rewritten as
$
O\left(\frac{\log |\ensuremath{\mathcal{P}}\xspace|}{\log (1+\epsilon/3)}\right)=
O\left(\frac{\log |\ensuremath{\mathcal{P}}\xspace|}{\epsilon}\right) $
where the equality follows from $x\leq 2\log(1+x)$ for $x\in(0,1]$ and we only consider $\epsilon\in(0, 1]$. There are at most
$|\ensuremath{\mathcal{P}}\xspace|$ possible values for each position in a signature, so the total number of signatures encountered during the execution of Algorithm~\ref{increase-match} is $|\ensuremath{\mathcal{P}}\xspace|^{O\left(\frac{1}{\epsilon} \log
|\ensuremath{\mathcal{P}}\xspace|\right)}$. As adding an edge happens in polynomial time in $n =
|\ensuremath{\mathcal{P}}\xspace| + |\ensuremath{\mathcal{R}}\xspace|$, we conclude that Algorithm~\ref{increase-match} terminates in time $n^{O\left(\frac{1}{\epsilon}\log n\right)}$. \qed \end{proof}
\subsubsection{Correctness of Algorithm~\ref{increase-match}.} \label{sec:correctness} We show that Algorithm~\ref{increase-match} is correct, i.e., that it returns an increased matching if $CLP(T)$ is feasible.
We have already proved that the algorithm terminates in Lemma~\ref{lemma:termination}. The statement therefore follows from proving that the condition of the while loop always is satisfied assuming that the configuration LP is feasible. In other words, we will prove that there always is an addable edge within the required distance from the root. This strengthens the analogous statement of~\cite{AFS08} that states that there always is an addable edge (but without the restriction on the search space that is crucial for our run-time analysis). We shall do so by proving that the number of thin blocking edges increases quickly with respect to the height of the alternating tree and, as there cannot be more than $|\ensuremath{\mathcal{P}}\xspace|$ blocking edges, this in turn bounds the height of the tree.
We are now ready to state the key insight behind the analysis that shows that the number of blocking edges increases as a function of $\alpha$ and the height of the alternating tree. \begin{lemma} Let $\alpha >4$. Assuming that $CLP(T)$ is feasible, if there is no addable edge $e$ within distance $2D+1$ from the root for some integer $D$, then \begin{equation*}
\frac{\alpha-4}{3} \sum_{i=1}^D |B^t_{2i}| < |B^t_{2D+2}|. \end{equation*} \label{lem:combinatorial} \end{lemma}
Before giving the proof of Lemma \ref{lem:combinatorial}, let us see how it implies that there always is an addable edge within distance $2
\log_{(\alpha-1)/3}(|\ensuremath{\mathcal{P}}\xspace|)+1$ from the root assuming the configuration LP is feasible, which in turn implies the correctness of Algorithm~\ref{increase-match}.
\begin{corollary}
\label{cor:height}
If $\alpha>4$ and $CLP(T)$ is feasible, then there is always an addable edge
within distance $2D+1$ from the root, where $D =
\log_{(\alpha-1)/3}|\ensuremath{\mathcal{P}}\xspace|$. \end{corollary} \begin{proof}
The proof of the corollary follows intuitively from that Lemma~\ref{lem:combinatorial} says that the number of blocking edges increases exponentially in terms of the height of the tree and therefore, as there are at most $|\ensuremath{\mathcal{P}}\xspace|$ blocking edges, the height must be $O_\alpha(
\log|\ensuremath{\mathcal{P}}\xspace|)$. We now proceed with the formal proof.
Let us first consider the case when $|B_2^t|=0$, i.e., there are no thin edges in the alternating tree, so its height is $0$. Then there must be an addable edge (of distance at most $1$), since otherwise, by the above lemma, we get a contradiction $0=(\alpha-4)/3|B_2^t|<|B_4^t|=0$.
From now on assume that $|B_2^t|\geq 1$ and suppose toward contradiction that there is no addable edge within distance $2D+1$. Let \begin{equation*}
\text{$b_i=\sum_{j=1}^i |B_{2j}^t|$ and $q=(\alpha-4)/3$.} \end{equation*} By Lemma~\ref{lem:combinatorial}, \begin{equation*}
\text{$q b_i<|B^t_{2i+2}|$ for $i\leq D$ and $b_{i+1}=b_i+|B^t_{2i+2}|$,} \end{equation*} so $b_{i+1}>(1+q)b_i$ for all $i\leq D$, which in turn implies \begin{equation*}
b_{D+1} = \sum_{j = 1}^{D+1} |B^t_{2j}| >(1+q)^{D}b_1 \geq (1+q)^D = |\ensuremath{\mathcal{P}}\xspace|, \end{equation*}
where the last equality follows by the selection of $D$. However, this is a contradiction since the number of blocking edges and hence $b_{D+1}$ is at most the number of players $|\ensuremath{\mathcal{P}}\xspace|$. \qed
\end{proof} We complete the correctness analysis of the algorithm by presenting the proof of the key lemma.
\begin{proof}[Lemma~\ref{lem:combinatorial}] Let $H_{2D+1}$ be the tree formed from the original alternating tree by taking all edges of distance at most $2D+1$ plus edges in the set $B^t_{2D+2}$. The following invariant holds throughout the execution of Algorithm \ref{increase-match} and plays an important role in the analysis: If there is an addable edge $e$ with respect to $H_{2D+1}$ within distance $2D+1$, then $e$ is an addable edge within distance $2D+1$ with respect to the original tree. Hence, in the proof of this lemma we only need to consider edges in $H_{2D+1}$. The invariant trivially holds in the beginning of the algorithm and is preserved when adding an edge with blockers, because an edge of minimum distance is selected. The situation is more complex when an edge has no blockers. Dropping off the edges beyond certain distance in Algorithm \ref{increase-match} ensures that the invariant remains true even in this case.
Suppose toward contradiction that there is no addable edge within distance $2D+1$ and \begin{equation*}
\frac{\alpha-4}{3} \sum_{i=1}^D |B^t_{2i}| \geq |B^t_{2D+2}|. \end{equation*} We show that this implies that the dual of the configuration LP is unbounded, which in turn contradicts the assumption that the primal is feasible. Recall that the objective function of the dual is $\max \sum_{i\in \ensuremath{\mathcal{P}}\xspace} y_i - \sum_{j\in \ensuremath{\mathcal{R}}\xspace} z_j$. Furthermore, as each solution $(y,z)$ of the dual can be scaled by a scalar $c$ to obtain a new solution $(c\cdot y, c \cdot z)$, any solution with positive objective implies unboundedness.
We proceed by defining such solution $(y^*, z^*)$, that is determined by the alternating tree. More precisely, we take \begin{equation*}
y^*_i = \begin{cases}
\frac{\alpha-1}{\alpha} & \mbox{if $i\in\ensuremath{\mathcal{P}}\xspace$ is within distance $2D$
from the root},\\
0 & \mbox{otherwise,}
\end{cases} \end{equation*} and \begin{equation*}
z_j^* = \begin{cases}
(\alpha-1)/\alpha & \mbox{if $j\in\ensuremath{\mathcal{R}}\xspace$ is fat and within distance $2D$
from the root,}\\
v_j/T & \mbox{if $j\in\ensuremath{\mathcal{R}}\xspace$ is thin and within distance $2D+2$ from the
root,} \\
0 & \mbox{otherwise.}
\end{cases} \end{equation*}
Let us first verify that $(y^*, z^*)$ is indeed a feasible solution. We have chosen all $y_i, z_j$ to be non-negative, so it only remains to check the first condition of the dual. Let $i\in \ensuremath{\mathcal{P}}\xspace$ and let $C$ be such that $f_i(C)\geq T$, i.e., $C\in \conf{i, T}$. We distinguish between the two cases when $y_i = 0$ and $y_i = (\alpha-1)/\alpha$. On the one hand, if $y_i = 0$ we have that $y_i \leq \sum_{j\in C}z_j$, since $\sum_{j \in C} z_j$ is always non-negative.
On the other hand, if $y_i=(\alpha-1)/\alpha$, then we have two sub-cases. Either there is $z_j=(\alpha-1)/\alpha$ for some $j\in C$ and we have $\sum_{j\in C} z_j\geq y_i$. Otherwise $\sum_{j\in C} z_j=\sum_{j\in C\cap F} v_j/T\geq\sum_{j\in C\cap F} v_{i,j}/T$, where $F$ is the set of resources which are assigned positive value $z_j$. Suppose $\sum_{j\in C\cap F} v_j/T<(\alpha-1)/\alpha$, then there is a set $R=C\setminus F\subseteq \ensuremath{\mathcal{R}}\xspace$ with $f_i(R)\geq T/\alpha$ and thus $\{i\}\cup R$ is an addable edge in $H_{2D+1}$ and hence an addable edge within distance $2D+1$ in the original tree. This contradicts the assumption that no such addable edges exist, so $\sum_{j\in C} z_j\ge y_i$.
Having proved that $(y^*,z^*)$ is a feasible solution, the proof is now completed by showing that the value of the solution is positive. We have \begin{equation*} \sum_{i\in \ensuremath{\mathcal{P}}\xspace}y_i=\frac{\alpha-1}{\alpha} \left(1+\sum_{i=0}^D
|B_{2i}|\right), \end{equation*} since each player in the alternating tree has its unique blocking edge leading to it except the root. For fat resources we have \begin{equation*}
\sum_{j\in \ensuremath{\mathcal{R}}\xspace^f}z_j\leq\frac{\alpha-1}{\alpha}\sum_{i=0}^D|B_{2i}^f|, \end{equation*} since every fat edge contains only one fat resource by minimality.
For thin resources, \begin{equation*} \sum_{j\in \ensuremath{\mathcal{R}}\xspace^t}z_j\leq \frac{2}{\alpha} \sum_{i=1}^{D+1}
|A^t_{2i-1}| + \frac{1}{\alpha} \sum_{i=1}^{D+1} |B^t_{2i}|, \end{equation*} since the size of each thin edge is at most $2T/\alpha$ and the part of each blocking edge not contained in any other $A$-edge is at most of size $T/\alpha$, because otherwise the set of resources in the blocking edge would not be a minimal set.
We also have $|A^t_{2i-1}|\leq |B^t_{2i}|$ for any $i$, since each adding edge has to have at least one blocking edge. This implies
\begin{equation*}
\sum_{j\in \ensuremath{\mathcal{R}}\xspace}z_j\leq
\frac{\alpha-1}{\alpha} \sum_{i=0}^D |B_{2i}^f|
+ \frac{3}{\alpha} \sum_{i=1}^{D+1} |B^t_{2i}|. \end{equation*} By the assumption toward contradiction, \begin{equation*}
\text{$|B^t_{2D+2}|\leq \frac{\alpha-4}{3} \sum_{i=1}^D |B^t_{2i}|$, so
$3\sum_{i=1}^{D+1} |B^t_{2i}| \leq (\alpha-1)\sum_{i=1}^D |B^t_{2i}|$.} \end{equation*} This implies \begin{equation*}
\sum_{j\in \ensuremath{\mathcal{R}}\xspace}z_j\leq
\frac{\alpha-1}{\alpha} \sum_{i=0}^D|B_{2i}^f|
+ \frac{\alpha-1}{\alpha} \sum_{i=1}^D|B^t_{2i}| <
\frac{\alpha-1}{\alpha} \left(1+\sum_{i=0}^D |B_{2i}|\right) =
\sum_{i\in \ensuremath{\mathcal{P}}\xspace} y_i, \end{equation*} so the dual is unbounded and we get a contradiction. \qed \end{proof}
\section{Conclusions} Asadpour et al. \cite{AFS08} raised as an open question whether their local search (or a modified variant) can be shown to run in polynomial time. We made progress toward proving this statement by showing that a modified local search procedure finds a solution in quasi-polynomial time. Moreover, based on our findings, we conjecture the stronger statement that there is a local search algorithm that does not use the LP solution, i.e., it is combinatorial, and it finds a $1/(4+\epsilon)$-approximate solution in polynomial time for any fixed $\epsilon >0$.
\end{document} |
\begin{document}
\title{ReLU to the Rescue: Improve Your On-Policy Actor-Critic with Positive Advantages}
\begin{abstract}
This paper introduces a novel method for enhancing the effectiveness of on-policy Deep Reinforcement Learning (DRL) algorithms. Three surprisingly simple modifications to the A3C algorithm: (1) processing advantage estimates through a ReLU function, (2) spectral normalization, and (3) dropout, serve to not only improve efficacy but also yield a ``cautious'' DRL algorithm. Where on-policy algorithms such as Proximal Policy Optimization (PPO) and Asynchronous Advantage Actor-Critic (A3C) do not explicitly account for cautious interaction with the environment, our method integrates caution in two critical ways: (1) by maximizing a lower bound on the value function plus a constant, thereby promoting a \textit{conservative value estimation}, and (2) by incorporating Thompson sampling for cautious exploration. In proving that our algorithm maximizes the lower bound, we also ground Regret Matching Policy Gradients (RMPG), a discrete-action on-policy method for multi-agent reinforcement learning. Our rigorous empirical evaluations across various benchmarks demonstrate our approach's improved performance against existing on-policy algorithms. This research represents a substantial step towards efficacious and cautious DRL algorithms, which are needed to unlock applications to complex, real-world problems. \end{abstract}
\section{Introduction} \label{sec:introduction}
Deep Reinforcement Learning (DRL) is a paradigm to approximate solutions to complex sequential decision-making problems in domains such as robotics \citep{ibarz2021train}, autonomous driving \citep{kiran2021deep}, strategy games \citep{mnih2015human,silver2017mastering,arulkumaran2019alphastar}, and human-computer interaction \citep{ziegler2019fine}. In recent years, DRL algorithms have achieved state-of-the-art performance on many challenging benchmarks \citep{young19minatar,gymnax2022github,todorov2012mujoco,brockman2016openai}. However, their success in real-world applications does not only depend on their capacity to execute tasks while simultaneously refining the equations defining their action policy. It also hinges on cautious policy execution in the face of finite observations of a world in flux to avoid catastrophic results.
On-policy algorithms, such as Proximal Policy Optimization (PPO) \citep{schulman2017proximal} or Asynchronous Advantage Actor-Critic (A3C) \citep{mnih2016asynchronous}, incorporate differentiable policies that are updated based on recent interactions with the environment. Such recency bias, and their potential to actively sample informative observations, make on-policy approaches compelling candidates for applications in real-world non-stationary environments. However, neither PPO nor A3C explicitly accounts for cautious environmental interaction. In response, we propose a novel method that explicitly incorporates caution in decision-making in two significant ways: (1) by maximizing a lower-bound on the value function plus a constant to promote algorithmic decision-making under a conservative estimate of value \citep{kumar2020conservative}; and (2) by integrating careful exploration around action values with higher estimated value via Thompson sampling \citep{thompson1933likelihood}. Only three surprisingly simple modifications to the A3C algorithm are needed to achieve this: (1) the lower-bound on value is realized by processing advantage estimates through a ReLU function, (2) the additive constant is regularized by applying spectral normalization to promote conservative estimates of value, and (3) Thompson sampling is enabled by adopting dropout and weight normalization.
Through our thorough empirical assessments on the Gymnasium and Brax MuJoCo benchmarks for continuous control \citep{brockman2016openai,brax2021github}, we show that our approach consistently outperforms existing on-policy algorithms such as PPO and A3C. Furthermore, our method shows competitive performance to these state-of-the-art on-policy methods in environments found in the MinAtar and ClassicControl benchmarks \citep{gymnax2022github,young19minatar}. Consequently, this paper offers a novel enhancement to boost the efficacy of on-policy DRL algorithms, underpinned by comprehensive theoretical proof and extensive empirical evidence of its effectiveness. While sufficiently cautious algorithmic interaction with the world is still a distant goal, we hope this research will catalyze the development of further efficacious and careful applications of DRL for solving complex, real-world problems.
\section{Background} \label{sec:background}
\textbf{Notation.} We consider a discounted, $\mathrm{T}$-horizon Markov Decision Process (MDP) defined by the tuple $(\mathcal{S}, \mathcal{A}, \mathrm{P}, \mathrm{r}, \gamma)$, where $\mathcal{S}$ is the state space, $\mathcal{A}$ is the action space, $\mathrm{P}$ is the state transition probability, $\mathrm{r}$ is the immediate reward upon transitioning from state $\mathbf{s}$ to state $\mathbf{s}^{\prime}$, and $\gamma \in [0, 1]$ is the discount factor. MDPs provide a framework for modeling sequential decision-making problems, where an agent interacts with an environment over discrete time steps to achieve a goal \citep{puterman2014markov}. Following the notation of \citet{sutton2018reinforcement}, we define states at time $\mathrm{t} \in \mathrm{T}$ by the $d$-dimensional, real-valued, random variable, $\mathbf{S}_{\mathrm{t}}: \Omega \to \mathcal{S} \subseteq \mathbb{R}^d$, with observable instances $\mathbf{s}_{\mathrm{t}} = \mathbf{S}_{\mathrm{t}}(\omega_{\mathrm{t}}): \forall \omega_{\mathrm{t}} \in \Omega$. We define actions by the $m$-dimensional random variable $\mathbf{A}_{\mathrm{t}}: \Omega \to \mathcal{A}$, with observable instances, $\mathbf{a}_{\mathrm{t}} = \mathbf{A}_{\mathrm{t}}(\omega_{\mathrm{t}}): \forall \omega_{\mathrm{t}} \in \Omega$. Rewards are defined by the continuous-valued random variable, $\mathrm{R}_{\mathrm{t}}:\Omega \to \mathcal{R} \subseteq \mathbb{R}$, with observable instances, $\mathrm{r}_{\mathrm{t}} = \mathrm{R}_{\mathrm{t}}(\omega_{\mathrm{t}}): \forall \omega_{\mathrm{t}} \in \Omega$. Let the random variable, $\mathrm{G}_{\mathrm{t}} \coloneqq \sum_{\mathrm{k} = \mathrm{t} + 1}^{\mathrm{T}} \gamma^{\mathrm{k} - 1 - \mathrm{t}} \mathrm{R}_{\mathrm{k}}$, denote the discounted return. We use the standard definitions for the conditional action distribution/density (policy), $\pi(\mathbf{a} \mid \mathbf{s})$, the state value function under the policy, $v_{\pi}(\mathbf{s}) \coloneqq \mathbb{E}_{\pi} \left[ \mathrm{G}_{\mathrm{t}} \mid \mathbf{S}_{\mathrm{t}} = \mathbf{s} \right]$, and state-action value function under the policy, $q_{\pi}(\mathbf{s}, \mathbf{a}) \coloneqq \mathbb{E}_{\pi} \left[ \mathrm{G}_{\mathrm{t}} \mid \mathbf{S}_{\mathrm{t}} = \mathbf{s}, \mathbf{A}_{\mathrm{t}} = \mathbf{a}\right]$.
\textbf{On-policy, Actor-critic reinforcement learning.} On-policy, Actor-critic approaches to reinforcement learning are called \emph{policy-gradient} methods, in that they seek to optimize a policy function, $\pi(\mathbf{a} \mid \mathbf{s}, \bm{\theta})$, differentiable concerning parameters, $\bm{\theta}$, to maximize the expected discounted return under the policy, $v_{\pi}(\mathbf{s})$. On-policy approaches differ from off-policy approaches in that they only use recent observations from the current policy to achieve this objective. Actor-critic methods differ from other policy-gradient methods because they fit an approximate value function (critic), $v(\mathbf{s}, \mathbf{w})$, to the data collected under the policy, in addition to optimizing the policy function (actor). The critic is typically used in actor optimization but not generally for decision-making.
Deep reinforcement learning implements the actor and critic using neural network architectures, where the function parameters correspond to network weights. We denote the parameters of the actor and critic networks as $\bm{\theta}$ and $\mathbf{w}$, respectively. The output likelihood of the actor makes distributional assumptions informed by characteristics of the action space, $\mathcal{A}$. For continuous action spaces, the likelihood is commonly an independent multivariate normal distribution with homogeneous noise variance, $\pi(\mathbf{a}_{\mathrm{t}} \mid \mathbf{s}_{\mathrm{t}}, \bm{\theta}) \sim \mathcal{N}(\mathbf{a} \mid \bm{\mu}(\mathbf{s}, \bm{\theta}), \mathrm{I}\bm{\sigma}^2(\bm{\theta}))$, where $\bm{\sigma}^2(\bm{\theta}) = (\sigma^2_1, \dots, \sigma^2_m)$ is the vector of inferred action noise variances. For discrete action spaces, the likelihood is often a categorical distribution, $\pi(\mathbf{a}_{\mathrm{t}} \mid \mathbf{s}_{\mathrm{t}}, \bm{\theta}) \sim \mathrm{Categorical}(\mathbf{a} \mid \bm{\mu}(\mathbf{s}, \bm{\theta}))$. In both cases, the mean parameter of the likelihood, $\bm{\mu}(\mathbf{s}, \bm{\theta})$, is the $m$-dimensional, vector-valued output of a neural network architecture with parameters, $\bm{\theta}$. Critic networks are commonly fit using a mean squared error objective, which corresponds to a univariate normal output likelihood with unit variance, $p(\mathrm{g} \mid \mathbf{s}, \mathbf{w}) \sim \mathcal{N}(\mathbf{s} \mid v(\mathbf{s}, \mathbf{w}), 1)$, where the mean parameter is the approximate value function, $v(\mathbf{s}, \mathbf{w})$, and is given by the scalar-valued output of any neural network architecture with parameters, $\mathbf{w}$.
The baseline on-policy, actor-critic policy gradient algorithm seeks to perform gradient ascent with respect to the ``performance'' function, $J(\bm{\theta}) \coloneqq v_{\pi}(\mathbf{s}_0, \bm{\theta})$, where $v_{\pi}(\mathbf{s}_0, \bm{\theta})$ is the value function with respect to the parameters $\bm{\theta}$. By the policy gradient theorem \citep{sutton1999policy}, we have: $\nabla_{\bm{\theta}} J(\bm{\theta}) = \nabla_{\bm{\theta}} v_{\pi}(\mathbf{s}_{0}) \propto \int_{\mathcal{S}} \rho(\mathbf{s}) \int_{\mathcal{A}} q_{\pi}(\mathbf{s}, \mathbf{a}) \nabla_{\bm{\theta}} \pi(\mathbf{a} \mid \mathbf{s}, \bm{\theta}) d\mathbf{a} d\mathbf{s}$. \citet{sutton2018reinforcement} show that a generalization of this result includes a comparison of the state-action value function, $q_{\pi}(\mathbf{s}, \mathbf{a})$, to an arbitrary baseline that does not vary with the action, $\mathbf{a}$. When the baseline is the state value function, $v_{\pi}(\mathbf{s})$, we have an objective in terms of the \emph{advantage function} \citep{schulman2015high}, $h_{\pi}(\mathbf{s}, \mathbf{a}) \coloneqq q_{\pi}(\mathbf{s}, \mathbf{a}) - v_{\pi}(\mathbf{s})$, namely: $\nabla_{\bm{\theta}} J(\bm{\theta}) \propto \int_{\mathcal{S}} \rho(\mathbf{s}) \int_{\mathcal{A}} h_{\pi}(\mathbf{s}, \mathbf{a}) \nabla_{\bm{\theta}} \pi(\mathbf{a} \mid \mathbf{s}, \bm{\theta}) d\mathbf{a} d\mathbf{s}$. This formulation in terms of \emph{all actions} can be further simplified in terms of observed actions and states as: $\nabla_{\bm{\theta}} J(\bm{\theta}) \propto \mathbb{E}_{\pi} \left[ h_{\pi}(\mathbf{S}_{\mathrm{t}}, \mathbf{A}_{\mathrm{t}}) \nabla_{\bm{\theta}} \log{\pi(\mathbf{A}_{\mathrm{t}} \mid \mathbf{S}_{\mathrm{t}}, \bm{\theta})} \right]$. We use $\mathbb{E}_{\pi}$ to denote an expectation over states $\mathbf{S}_{\mathrm{t}}$ and actions $\mathbf{A}_{\mathrm{t}}$ collected under the policy $\pi(\mathbf{a} \mid \mathbf{s})$.
In general, because neither the state-action, $q_{\pi}(\mathbf{s}, \mathbf{a})$, nor the state value, $v_{\pi}(\mathbf{s})$, functions are known, we need an estimator for the advantage function. For compactness, we will focus on the generalized advantage estimator (GAE) proposed by \citet{schulman2015high}: $h(\mathbf{s}_{\mathrm{t}}, \mathrm{r}_{\mathrm{t}}, \mathbf{w}) = \sum_{\mathrm{k} = \mathrm{t} + 1}^{\mathrm{T}} (\gamma \lambda)^{\mathrm{k} - 1 - \mathrm{t}} \delta_{\mathrm{t} - \mathrm{k} + 1}^{\mathbf{w}},$ where $0 < \lambda \leq 1$, and $\delta_{\mathrm{t}}^{\mathbf{w}} = \mathrm{r}_{\mathrm{t}} + \gamma v(\mathbf{s}_{\mathrm{t} + 1}; \mathbf{w}) - v(\mathbf{s}_{\mathrm{t}}; \mathbf{w})$ is the temporal difference (TD) residual of the value function with discount, $\gamma$ \citep{sutton2018reinforcement}. The GAE then yields a low-variance gradient estimator for the policy function: $\widehat{\nabla_{\bm{\theta}} J}(\bm{\theta}) \coloneqq \mathbb{E}_{\pi} \left[ h(\mathbf{S}_{\mathrm{t}}, \mathrm{R}_{\mathrm{t}}, \mathbf{w}) \nabla_{\bm{\theta}} \log{\pi(\mathbf{A}_{\mathrm{t}} \mid \mathbf{S}_{\mathrm{t}}, \bm{\theta})} \right]$. Finally, the actor and critic networks are generally optimized by using mini-batch stochastic gradient descent \cite{robbins1951stochastic} to fit the functions induced by the network weights to a batch of data collected under the current policy, $\mathcal{D}^{b}_{\pi} = \{\mathbf{s}_{i}, \mathbf{a}_{i}, \mathrm{r}_{i}\}_{i=1}^b$.
\section{Methods} \label{sec:methods}
In this section, we develop our cautious, on-policy actor-critic algorithm. As a reminder, we realize this algorithm by making three simple changes to the A3C algorithm: first, we process advantage estimates through a ReLU function; second, we regularize network weights using spectral normalization; and third, we implement the actor and critic networks as Bayesian Neural Networks to enable Thompson sampling. We provide the theoretical grounding to prove that clipping the advantages during policy optimization results in optimizing a lower bound on the value function plus a constant. We show that under standard assumptions, the constant is equal to the expected, clipped difference in the state value function, $\gamma v_{\pi}(\mathbf{s}') - v_{\pi}(\mathbf{s})$, over all actions, $\mathbf{a}$, and next-states, $\mathbf{s}'$, under the policy given state, $\mathbf{s}$, and that we can regularize it using spectral normalization. And finally, we detail how to enable cautious exploration via Thompson sampling by adding dropout and weight decay. The following theorem formalizes the main result of our paper.
\begin{theorem}
\label{th:main}
Let, $\mathrm{G}_{\mathrm{t}} \coloneqq \sum_{\mathrm{k} = \mathrm{t} + 1}^{\mathrm{T}} \gamma^{\mathrm{k} - 1 - \mathrm{t}} \mathrm{R}_{\mathrm{k}}$, denote the discounted return.
Let $q_{\pi}(\mathbf{s}, \mathbf{a}) = \mathbb{E}_{\pi} \left[ \mathrm{G}_{\mathrm{t}} \mid \mathbf{S}_{\mathrm{t}} = \mathbf{s}, \mathbf{A}_{\mathrm{t}} = \mathbf{a}\right]$, denote the state-action value function, and
$v_{\pi}(\mathbf{s}) = \mathbb{E}_{\pi} \left[ \mathrm{G}_{\mathrm{t}} \mid \mathbf{S}_{\mathrm{t}} = \mathbf{s} \right]$, denote the state value function, under policy $\pi(\mathbf{a} \mid \mathbf{s}, \bm{\theta})$.
Let $\big(x\big)^+ \coloneqq \max(0, x)$.
Assume, without loss of generality, that rewards, $\mathrm{R}_{\mathrm{t}}$, are non-negative.
Assume that the gradient of the policy, $\nabla \pi(\mathbf{a} \mid \mathbf{s}, \bm{\theta})$, is a conservative vector field.
Then, performing gradient ascent with respect to,
\begin{equation}
\nabla_{\bm{\theta}} J(\bm{\theta}) = \mathbb{E}_{\pi}\left[ \Bigl(q_{\pi}(\mathbf{S}_{\mathrm{t}}, \mathbf{A}_{\mathrm{t}}) - v_{\pi}(\mathbf{S}_{\mathrm{t}}) \Bigr)^+ \nabla_{\bm{\theta}} \log \pi(\mathbf{A}_{\mathrm{t}} \mid \mathbf{S}_{\mathrm{t}}, \bm{\theta}) \right],
\label{eq:main-objective}
\end{equation}
maximizes a lower-bound, $v_{\pi}^*(\mathbf{s})$, on the state value function, $v_{\pi}(\mathbf{s})$, plus a constant:
\begin{equation}
v_{\pi}^*(\mathbf{s}) \leq v_{\pi}(\mathbf{s}) + C(\mathbf{s}),
\label{eq:main-bound}
\end{equation}
where, $C(\mathbf{s}) = \iint \Big( \gamma v_{\pi}(\mathbf{s}') - v_{\pi}(\mathbf{s}) \Big)^{+} d\mathbb{P}(\mathbf{s}' \mid \mathbf{S}_{\mathrm{t}}=\mathbf{s}, \mathbf{A}_{\mathrm{t}} = \mathbf{a}) d\Pi(\mathbf{a} \mid \mathbf{S}_{\mathrm{t}}=\mathbf{s})$, is the expected, clipped difference in the state value function, $\gamma v_{\pi}(\mathbf{s}') - v_{\pi}(\mathbf{s})$, over all actions, $\mathbf{a}$, and next states, $\mathbf{s}'$, under the policy given state, $\mathbf{s}$.
Here, we use $\int \dots d\Pi(\mathbf{a} \mid \mathbf{s})$ to denote $\sum_{\mathbf{a}} \dots \pi(\mathbf{a} \mid \mathbf{s})$ for discrete action spaces and $\int \dots \pi(\mathbf{a} \mid \mathbf{s})d\mathbf{a}$ for continuous action spaces.
Similarly, we use $\int \dots d\mathbb{P}(\mathbf{s}' \mid \mathbf{s}, \mathbf{a})$ to denote $\sum_{\mathbf{s}'} \dots p(\mathbf{s}' \mid \mathbf{s}, \mathbf{a})$ for discrete state spaces and $\int \dots p(\mathbf{s}' \mid \mathbf{s}, \mathbf{a})d\mathbf{s}'$ for continuous state spaces.
Proof is provided in \Cref{app:constant-proof}. \end{theorem}
\textbf{Bounding the constant $C(\mathbf{s})$.} Considering the value function, $v_{\pi}(\mathbf{s})$, as $K$-Lipschitz continuous and assuming that the expected value of the value function, $v_{\pi}(\mathbf{s}')$ over next-states, $\mathbf{s}'$, is equal to the value function evaluated at the current state, $v_{\pi}(\mathbf{s})$. Then, when $\gamma =1$, the constant is bounded proportional to the expected absolute difference between states. \begin{equation}
\begin{split}
C(\mathbf{s})
&= \iint \Big( v_{\pi}(\mathbf{s}') - v_{\pi}(\mathbf{s}) \Big)^{+} d\mathbb{P}(\mathbf{s}' \mid \mathbf{S}_{\mathrm{t}}=\mathbf{s}, \mathbf{A}_{\mathrm{t}} = \mathbf{a}) d\Pi(\mathbf{a} \mid \mathbf{S}_{\mathrm{t}}=\mathbf{s}) \\
&= \frac{1}{2} \iint \Big( v_{\pi}(\mathbf{s}') - v_{\pi}(\mathbf{s}) + \big| v_{\pi}(\mathbf{s}') - v_{\pi}(\mathbf{s}) \big| \Big) d\mathbb{P}(\mathbf{s}' \mid \mathbf{S}_{\mathrm{t}}=\mathbf{s}, \mathbf{A}_{\mathrm{t}} = \mathbf{a}) d\Pi(\mathbf{a} \mid \mathbf{S}_{\mathrm{t}}=\mathbf{s}) \\
&= \frac{1}{2} \iint \big| v_{\pi}(\mathbf{s}') - v_{\pi}(\mathbf{s}) \big| d\mathbb{P}(\mathbf{s}' \mid \mathbf{S}_{\mathrm{t}}=\mathbf{s}, \mathbf{A}_{\mathrm{t}} = \mathbf{a}) d\Pi(\mathbf{a} \mid \mathbf{S}_{\mathrm{t}}=\mathbf{s}) \\
&\leq \frac{1}{2} \iint K\big|\big| \mathbf{s}' - \mathbf{s} \big|\big| d\mathbb{P}(\mathbf{s}' \mid \mathbf{S}_{\mathrm{t}}=\mathbf{s}, \mathbf{A}_{\mathrm{t}} = \mathbf{a}) d\Pi(\mathbf{a} \mid \mathbf{S}_{\mathrm{t}}=\mathbf{s}).
\end{split} \end{equation}
This interpretation motivates using spectral normalization \citep{miyato2018spectral} of the value function estimator weights, $v(\mathbf{s}, \mathbf{w})$, which regulates the Lipschitz constant, $K$, of the estimator and can improve performance in the off-policy reinforcement learning setting \citep{bjorck2021towards,gogianu2021spectral}. Moreover, when using the generalized advantage estimator with the same assumptions, the constant is given by: $C(\mathbf{s}) = \frac{1}{2} \iint \big| \gamma \lambda v_{\pi}(\mathbf{s}') - v_{\pi}(\mathbf{s}) \big| d\mathbb{P}(\mathbf{s}' \mid \mathbf{S}_{\mathrm{t}}=\mathbf{s}, \mathbf{A}_{\mathrm{t}} = \mathbf{a}) d\Pi(\mathbf{a} \mid \mathbf{S}_{\mathrm{t}}=\mathbf{s})$. Since $\gamma \lambda < 1$, the GAE also serves to regularize the constant.
\textbf{Cautious exploration.}
We propose Bayesian inference over the actor and critic parameters to enable cautious exploration via Thompson sampling \citep{thompson1933likelihood}. This involves introducing posterior distributions over the policy parameters, $q(\bm{\Theta} \mid \mathcal{D}_{n-1})$, and value function estimator parameters, $q(\mathbf{W} \mid \mathcal{D}_{n-1})$. Here, $\mathcal{D}_{n-1} = \{\mathbf{s}_{i}, \mathbf{a}_{i}, \mathrm{r}_{i}\}_{i=1}^{|\mathcal{T}_{n-1}|}$ is data collected under the policy, $\pi(\mathbf{a} \mid \mathbf{s}, \bm{\Theta}_{n-1})$, over a set of horizons, $\mathcal{T}_{n-1} = \mathrm{T}^{n-1}_1 \cup \mathrm{T}^{n-1}_2 \cup \dots$. In general, any inference technique is permissible. In \Cref{alg:vsop}, we outline the procedure for the case of approximate inference using dropout Bayesian Neural Networks (BNNs) following \citet{gal2016dropout}. For a dropout BNN, the posterior distribution for the policy parameters is of the form $q(\bm{\theta} \mid \widehat{\bm{\theta}}, p)$, where $\widehat{\bm{\theta}}$ is the expected value of the parameters, and $p$ is the dropout rate. Similarly, the posterior distribution for the value function parameters is of the form $q(\mathbf{w} \mid \widehat{\mathbf{w}}, p)$, where $\widehat{\mathbf{w}}$ is the expected value of the parameters, and $p$ is the dropout rate. We optimize each dropout BNN by minimizing the Kullback–Leibler divergence between a prior distribution and its approximate posterior.
We term this method VSOP: for Variational [b]ayes, Spectral-normalized, On-Policy reinforcement learning. \Cref{alg:vsop} details VSOP for dropout BNNs.
\begin{algorithm}
\caption{VSOP for Dropout Bayesian Neural Networks \label{alg:vsop}}
\begin{algorithmic}[1]
\Require{initial state, $\mathbf{s}'$, environment, $p(\mathbf{s}', \mathrm{r} \mid \mathbf{s}, \mathbf{a})$, rollout buffer, $\mathcal{D}$, initial actor parameters, $\widehat{\bm{\theta}}$, initial critic parameters, $\widehat{\mathbf{w}}$, dopout rate, $p$, learning rate, $\eta$, minibatch size, $b$.}
\Statex
\While{true}
\State $\mathcal{D} \gets \emptyset$ \Comment{reset rollout buffer}
\While{acting} \Comment{interact with the environment}
\State $\mathbf{s} \gets \mathbf{s}'$ \Comment{update current state}
\State $\bm{\theta} \sim q(\bm{\theta} \mid \widehat{\bm{\theta}}, p)$ \textbf{if} TS \textbf{else} $\bm{\theta} \gets \widehat{\bm{\theta}}$ \Comment{sample actor params if Thompson sampling (TS)}
\State $\mathbf{a} \sim \pi(\mathbf{a} \mid \mathbf{s}, \bm{\theta})$ \Comment{sample action from policy}
\State $\mathbf{s}', \mathrm{r} \sim p(\mathbf{s}', \mathrm{r} \mid \mathbf{s}, \mathbf{a})$ \Comment{sample next state and reward from environment}
\State $\mathcal{D} \gets \mathcal{D} \cup \{(\mathbf{s}, \mathbf{a}, \mathrm{r})\}$ \Comment{update rollout buffer}
\EndWhile
\State $\mathbf{w}^* \gets \widehat{\mathbf{w}}$ \Comment{freeze critic weights for advantage estimates}
\State $\beta \gets (1 - p) / \left(2|\mathcal{D}|\right)$ \Comment{set parameter precision}
\While{fitting} \Comment{update actor and critic}
\State $\{\mathbf{s}_i, \mathbf{a}_i, \mathrm{r}_i\}_{i=1}^{b} \sim \mathcal{D}$ \Comment{sample minibatch from rollout buffer}
\State $\widetilde{\mathbf{w}} \sim q(\mathbf{w} \mid \mathbf{w}^*, p)$ \textbf{if} TS \textbf{else} $\widetilde{\mathbf{w}} \gets \mathbf{w}^*$ \Comment{sample advantage params if TS}
\State $\bm{\theta} \sim q(\bm{\theta} \mid \widehat{\bm{\theta}}, p)$ \Comment{sample actor parameters}
\State $\widehat{\bm{\theta}}\gets \widehat{\bm{\theta}} - \eta \frac{1}{b} \sum_{i = 1}^{b} h^+(\mathbf{s}_{i}, \mathrm{r}_{i}, \widetilde{\mathbf{w}}) \nabla_{\bm{\theta}} \log{\pi(\mathbf{a}_{i} \mid \mathbf{s}_{i}, \bm{\theta})} + 2\beta\bm{\theta}$ \Comment{update actor}
\State $\mathbf{w} \sim q(\mathbf{w} \mid \widehat{\mathbf{w}}, p)$ \Comment{sample critic parameters}
\State $\widehat{\mathbf{w}} \gets \widehat{\mathbf{w}} - \eta \frac{1}{b} \sum_{i = 1}^{b} \nabla_{\mathbf{w}} \log{p(\mathrm{g}(\mathbf{s}_{i}, \mathrm{r}_{i}, \widetilde{\mathbf{w}}) \mid \mathbf{s}_{i}, \mathbf{w})} + 2\beta\mathbf{w}$ \Comment{update critic}
\EndWhile
\EndWhile
\end{algorithmic} \end{algorithm}
\section{Related Works} \label{sec:related-works}
\subsection{On-policy methods}
VSOP is an on-policy RL algorithm. \Cref{table:performance metrics} compares the gradient of the performance function, $\nabla J(\bm{\theta})$, for VSOP with those for relevant on-policy algorithms. We discuss each algorithm below.
\begin{table}[ht]
\caption{Comparison of performance functions for on-policy methods}
\label{table:performance metrics}
\centering
\begin{tabular}{@{}lll@{}}
\toprule
Method & \multicolumn{2}{c}{$\nabla J(\bm{\theta})$} \\ \midrule
A3C & $\mathbb{E}_{\pi}\left[ h_{\pi}(\mathbf{S}_{\mathrm{t}}, \mathbf{A}_{\mathrm{t}}) \nabla \log \pi(\mathbf{A}_{\mathrm{t}} \mid \mathbf{S}_{\mathrm{t}}, \bm{\theta}) \right];$ & $h_{\pi}(\mathbf{S}_{\mathrm{t}}, \mathbf{A}_{\mathrm{t}}) = q_{\pi}(\mathbf{S}_{\mathrm{t}}, \mathbf{A}_{\mathrm{t}}) - v_{\pi}(\mathbf{S}_{\mathrm{t}})$ \\
\textbf{VSOP} & $\mathbb{E}_{\pi}\left[ h_{\pi}^+(\mathbf{S}_{\mathrm{t}}, \mathbf{A}_{\mathrm{t}}) \nabla \log \pi(\mathbf{A}_{\mathrm{t}} \mid \mathbf{S}_{\mathrm{t}}, \bm{\theta}) \right];$ & $h_{\pi}^+(\mathbf{S}_{\mathrm{t}}, \mathbf{A}_{\mathrm{t}}) = \max\big(0, h_{\pi}(\mathbf{S}_{\mathrm{t}}, \mathbf{A}_{\mathrm{t}})\big)$ \\
RMPG & $\mathbb{E}_{\pi}\left[ \int h_{\pi}^+(\mathbf{S}_{\mathrm{t}}, \mathbf{a}) \nabla d\Pi(\mathbf{a} \mid \mathbf{S}_{\mathrm{t}}, \bm{\theta}) \right]$ & \\
TRPO & $\mathbb{E}_{\pi}\left[ h_{\pi}(\mathbf{S}_{\mathrm{t}}, \mathbf{A}_{\mathrm{t}}) \nabla \rho(\mathbf{S}_{\mathrm{t}}, \mathbf{A}_{\mathrm{t}}, \bm{\theta}) \right];$ & $\rho(\mathbf{S}_{\mathrm{t}}, \mathbf{A}_{\mathrm{t}}, \bm{\theta}) = \frac{\pi(\mathbf{A}_{\mathrm{t}} \mid \mathbf{S}_{\mathrm{t}}, \bm{\theta})}{\pi(\mathbf{A}_{\mathrm{t}} \mid \mathbf{S}_{\mathrm{t}}, \bm{\theta}_{\mathrm{old}})}$ \\
PPO & \multicolumn{2}{l}{$\mathbb{E}_{\pi}\left[ \min \bigg( h_{\pi}(\mathbf{S}_{\mathrm{t}}, \mathbf{A}_{\mathrm{t}}) \nabla \rho(\mathbf{S}_{\mathrm{t}}, \mathbf{A}_{\mathrm{t}}, \bm{\theta}), \textrm{clip}\Big( h_{\pi}(\mathbf{S}_{\mathrm{t}}, \mathbf{A}_{\mathrm{t}}) \nabla \rho(\mathbf{S}_{\mathrm{t}}, \mathbf{A}_{\mathrm{t}}, \bm{\theta}), 1-\epsilon, 1 + \epsilon \Big) \bigg) \right]$} \\
DPO & \multicolumn{2}{l}{
$\mathbb{E}_{\pi}\left[ \nabla \begin{cases}
\big( h_{\pi}(\rho(\bm{\theta}) - 1) - a \tanh( h_{\pi}(\rho(\bm{\theta}) - 1)/a ) \big)^+ & h_{\pi}(\mathbf{S}_{\mathrm{t}}, \mathbf{A}_{\mathrm{t}}) \geq 0 \\
\big( h_{\pi}\log(\rho(\bm{\theta})) - b \tanh(h_{\pi}\log(\rho(\bm{\theta}) / b) \big)^+ & h_{\pi}(\mathbf{S}_{\mathrm{t}}, \mathbf{A}_{\mathrm{t}}) < 0 \\
\end{cases} \right]$
} \\
CVaR & $\mathbb{E}_{\pi}\left[ \big( \nu_{\alpha} - \mathrm{G}_{\mathrm{t}} \big)^+ \nabla \log \pi(\mathbf{A}_{\mathrm{t}} \mid \mathbf{S}_{\mathrm{t}}, \bm{\theta}) \right];$ & $\nu_{\alpha} \coloneqq \alpha\text{-quantile of return, } \mathrm{G}_{\mathrm{t}}$ \\
RSPG & $\mathbb{E}_{\pi}\left[ \big( \mathrm{G}_{\mathrm{t}} - \nu_{\alpha} \big)^+ \nabla \log \pi(\mathbf{A}_{\mathrm{t}} \mid \mathbf{S}_{\mathrm{t}}, \bm{\theta}) \right];$ & $\mathrm{G}_{\mathrm{t}} \coloneqq \sum_{\mathrm{k} = \mathrm{t} + 1}^{\mathrm{T}} \gamma^{\mathrm{k} - 1 - \mathrm{t}} \mathrm{R}_{\mathrm{k}}$ \\
EPOpt & $\mathbb{E}_{\pi}\left[ \mathds{1}\big(\mathrm{G}_{\mathrm{t}} \leq \nu_{\alpha}\big) \nabla J(\theta, \mathbf{S}_{\mathrm{t}}, \mathbf{A}_{\mathrm{t}}) \right];$ & $J(\theta, \mathbf{S}_{\mathrm{t}}, \mathbf{A}_{\mathrm{t}})$ on-policy perf. function \\
\bottomrule
\end{tabular} \end{table}
\textbf{Mirror Learning.} \emph{Trust Region Policy Optimization (TRPO)} \citep{schulman2015trust} is an on-policy, actor-critic method that improves upon the baseline policy gradient method by incorporating a constraint on the maximum size of policy updates. TRPO takes small steps toward improvement and limits the step size to ensure that the new policy does not deviate significantly from the old policy. TRPO achieves this by optimizing a surrogate objective function that approximates the expected reward under the new policy while imposing a constraint on the KL divergence between the new and old policies. TRPO is effective in various high-dimensional and continuous control tasks. \emph{Proximal Policy Optimization (PPO)} \citep{schulman2017proximal}, like TRPO, improves upon the baseline policy gradient method by constraining the maximum size of policy updates. However, instead of using a KL divergence constraint, PPO employs a clipped surrogate objective function to limit the size of policy updates. PPO simplifies the optimization procedure compared to TRPO, making it more computationally efficient and easier to implement. While TRPO and PPO constrain policy updates based on the ratio between the new and old policies, VSOP constrains policy updates according to the sign of the estimated advantage function. As such, PPO and TRPO are instances of the \emph{mirror learning} framework \cite{kuba2022mirror}, whereas VSOP does not inherit the same theoretical guarantees. \citet{lu2022discovered} explores the Mirror Learning space by meta-learning a “drift” function. They term their immediate result Learned Policy Optimization (LPO). Through its analysis, they arrive at \emph{Discovered Policy Optimisation (DPO)}, a novel, closed-form RL algorithm.
\textbf{Regret Matching Policy Gradient (RMPG)} \citep{srinivasan2018actor} is inspired by an objective called regret policy gradient (RPG), which maximizes a lower-bound on the advantages: $(h(\mathbf{s}, \mathbf{a}))^{+} \leq h(\mathbf{s}, \mathbf{a})$. RPG directly optimizes the policy for an estimator of the advantage lower-bound, denoted as $\nabla_{\bm{\theta}} J^{\textrm{RPG}}(\bm{\theta})$. RMPG, being inspired by RPG, has a different objective, $\nabla_{\bm{\theta}} J^{\textrm{RMPG}}(\bm{\theta})$. In both cases, $q(\mathbf{s}, \mathbf{a}, \mathbf{w})$ is a parametric estimator of the state-action value function, $q_{\pi}(\mathbf{s}, \mathbf{a})$. RMPG has demonstrated improved sample efficiency and stability in learning compared to standard policy gradient methods. VSOP is closely related to RMPG; however, we provide the missing theoretical foundations to ground RMPG (\Cref{app:constant-proof}), extend RMPG from the \emph{all actions} formulation making it more suitable for continuous control (\Cref{sec:rmpg-to-vsop}), and employ the GAE rather than the state-action value function estimator, $q(\mathbf{s}, \mathbf{a}, \mathbf{w})$.
\textbf{Risk Sensitive Reinforcement Learning.} Instead of optimizing expected value, risk-sensitive RL methods optimize a measure of risk. \citet{tamar2015optimizing} propose the risk-averse \emph{CVaR-PG} which seeks to minimize the Conditional Value at Risk (CVaR), $\Phi(\theta) \coloneqq \mathbb{E}_{\pi} \left[ \mathrm{G}_{\mathrm{t}} \mid \mathrm{G}_{\mathrm{t}} \leq \nu_{\alpha} \right]$, where $\nu_{\alpha}$ is the $\alpha$-quantile of the return, $\mathrm{G}_{\mathrm{t}}$, distribution under the policy, $\pi(\mathbf{a} \mid \mathbf{s}, \bm{\theta})$. Relatedly, \citet{tang2020worst} have used the CVaR as a baseline function for standard policy updates. By focusing only on the worse case trajectories, CVaR-PG is susceptible to ``blindness to success,'' thus \citet{greenberg2022efficient} propose a Cross-entropy Soft-Risk algorithm (CeSoR) to address this. \citet{kenton2019generalizing} and \citet{filos2022model} also propose uncertainty aware, risk-averse methods. For model-based policy gradient methods, \citet{rajeswaran2016epopt} propose \emph{Ensemble Policy Optimization (EPOpt)}, which incorporates restricting policy updates to be risk-averse based on the CVaR and uses ensembles to sample hypothesized models. In contrast to the above risk-averse methods, \citet{petersen2019deep} present \emph{Risk Seeking Policy Gradient (RSPG)} which focuses on maximizing best-case performance by only performing gradient updates when rewards exceed a specified quantile of the reward distribution. \citet{prashanth2022risk} provide a comprehensive discussion on risk-sensitive RL.
\subsection{Off-policy methods}
\emph{Self Imitation Learning (SIL)} \citep{oh2018self} is a hybrid method that uses clipped advantage estimates to improve the performance of on-policy algorithms such as PPO and A2C by learning from its successful off-policy trajectories. By leveraging experience replay, SIL encourages the agent to imitate its high-reward actions. \emph{Self Imitation Advantage Learning (SIAL)} \citep{ferret2020self} extends SIL to the off-policy domain. SIAL uses the clipped advantage function to weigh the importance of different actions during self-imitation, enabling the agent to focus on actions that yield higher long-term rewards. Importantly, even though SIL and SIAL only update policies when advantage estimates are positive, they differ from VSOP in that they are off-policy algorithms that learn from successful past trajectories and optimize different objectives based on max-entropy reinforcement learning \citep{aghasadeghi2011maximum,haarnoja2018soft}.
\subsection{Thompson Sampling in Deep Reinforcement Learning} Thompson sampling has been extensively explored in conventional and Deep Q-Learning \citep{strens2000bayesian,wang2005bayesian,osband2016deep,moerland2017efficient,azizzadenesheli2018efficient} to improve exploration and sample efficiency. \citet{clements2019estimating} and \citet{nikolov2018information} propose similar sampling-based exploration strategies for Deep Q-Learning. \citet{jiang2022uncertainty} propose a Thompson sampling strategy based on an ensemble of quantile estimators of the state-action value distribution. In the context of \emph{policy gradient} methods, related Upper Confidence Bound (UCB) \citep{ciosek2019better} and Hamiltonian Monte-Carlo (HMC) \citep{xu2022improving} approaches are proposed for off-policy Soft Actor-Critic (SAC) \citep{haarnoja2018soft}, and \citet{henaffexploration} proposes an elliptical episodic reward for general use. \citet{igl2019generalization} propose Selective Noise Injection using fixed dropout masks to sample policies and then actions, but stop short of formalizing this as Thompson sampling. Similarly for \citet{hausknecht2022consistent}. We believe our work is the first to formalize and show the benefit of Thompson sampling for on-policy actor-critic methods.
\section{Experiments} We comprehensively evaluate VSOP against on-policy RL methods across various domains, including continuous and discrete action spaces and diverse dimensionalities in both the action and observation spaces. Furthermore, we evaluate our method using both PyTorch \citep{paszke2019pytorch} and JAX \citep{jax2018github} frameworks. In \Cref{sec:exp-gymnasium}, we compare VSOP to baseline implementations of PPO, A3C, and RMPG on the Gymnasium \citep{brockman2016openai} implementation of MuJoCo \citep{todorov2012mujoco} for continuous control (\Cref{sec:exp-gymnasium-baseline}). In this setting, we further ablate the effect that positive advantages, spectral normalization, and Thompson sampling each has on performance (\Cref{sec:exp-gymnasium-mech}), investigate the relationship between Thompson sampling and asynchronous parallelization (\Cref{app:exploration}), show that spectral normalization and Thompson sampling also have non-negligible positive effects for PPO (\Cref{sec:exp-gymnasium-ppo}), and offer comparison to off-policy approaches like SAC \citep{haarnoja2018soft} and Twin Delayed DDPG (TD3) \citep{fujimoto2018addressing} (\Cref{sec:exp-gymnasium-off}). In \Cref{sec:exp-gymnax}, we exploit the fast iteration cycles offered by vectorized JAX implementations and the gymnax framework \citep{gymnax2022github} to perform fair comparisons of VSOP, PPO, A2C, and DPO under equal hyper-parameter search budgets.
\subsection{Gymansium MuJoCo} \label{sec:exp-gymnasium} For this evaluation, we build off of \citet{huang2022cleanrl}'s \href{https://github.com/vwxyzjn/cleanrl}{CleanRL} package which provides reproducible, user-friendly implementations of state-of-the-art reinforcement learning algorithms using PyTorch \citep{paszke2019pytorch}, Gymnasium \citep{brockman2016openai,todorov2012mujoco}, and Weights \& Biases \citep{wandb}. Overall, several code-level optimizations for PPO reproducibility \citep{engstrom2020implementation,andrychowicz2021matters} are superfluous for our method in this setting. For example, we omit advantage normalization, value loss clipping \citep{schulman2017proximal}, gradient clipping, and modification of the default Adam \citep{kingma2014adam} epsilon parameter as they either do not lead to an appreciable difference in performance or have a slightly negative effect. However, we find that orthogonal weight initialization, learning rate annealing, reward scaling/clipping, and observation normalization/clipping have non-negligible positive effects on performance \cite{engstrom2020implementation, andrychowicz2021matters}. In addition to adding dropout, weight decay regularization, and spectral normalization, we also look at model architecture modifications not present in the CleanRL implementation: layer width, number of hidden layers, layer activation, layer normalization \cite{ba2016layer}, and residual connections. We find that ReLU activation functions \citep{nair2010rectified}, increasing layer width to 256, and a dropout rate of 0.01-0.04 are beneficial. We find that network depth and residual connections are benign overall. In contrast to recent findings for offline data in off-policy reinforcement learning \citep{ball2023efficient}, layer normalization — whether applied to the actor, the critic, or both — is detrimental to performance. We give full details in \Cref{app:implementation-details-gym}.
\begin{figure}
\caption{
Gymnasium-MuJoCo. Comparing VSOP to on-policy baseline algorithms.
Here, VSOP improves over baseline PPO in 5 environments, matches it's performance in 4 environments, and is worse in just 1 environment. VSOP improves over A2C in all environments but Pusher, where performance is statistically equal. Finally, VSOP improves over RMPG in all environments.
}
\label{fig:mujoco-baselines}
\end{figure}
\subsubsection{Comparison to on-policy baselines.} \label{sec:exp-gymnasium-baseline} First, we compare tuned VSOP to baseline implementations: PPO, A2C, and RMPG. We use the CleanRL \citep{huang2022cleanrl} implementation of PPO, the StableBaselines3 \citep{stable-baselines3} hyper-parameter settings for A2C, and the VSOP optimal hyper-params for RMPG. \Cref{fig:mujoco-baselines} summarizes these results. VSOP improves over baseline PPO in five environments, matches its performance in four environments, and is worse in just one environment, Pusher. VSOP improves over A3C in all environments but Pusher, where performance is statistically equal. Finally, VSOP improves over RMPG in all environments.
\subsubsection{Ablation of mechanisms.} \label{sec:exp-gymnasium-mech} Next, we investigate the influence of our four proposed mechanisms on the performance of VSOP. For reference, the mechanisms are positive advantages, single-action setting, spectral normalization, and Thompson sampling. \Cref{fig:mujoco-hyper} summarizes these results, where we see that positive advantages and operating in the single-action regime impact performance on MuJoCo significantly. Spectral normalization and Thompson sampling also influence performance on MuJoCo positively, especially in high-dimensional action and observation space settings such as Humanoid, Humanoid Stand-Up, and Ant. The performance gains for spectral normalization align with results given by \citet{bjorck2021towards} and \citet{gogianu2021spectral} for DDPG \citep{lillicrap2015continuous}, DRQ \citep{kostrikov2020image}, Dreamer \citep{hafner2019dream}, DQN \citep{wang2016dueling} and C51 \citep{bellemare2017distributional}.
\begin{figure}
\caption{
Comparing the effect of VSOP mechanisms on Mujoco continuous control performance.
Using the single action framework and updating the policy only on positive advantage estimates have the largest effects, followed by spectral normalization, and finally Thompson sampling.
Green solid lines (VSOP) show proposed, optimized method.
Yellow dashed lines (no Thomp. samp.) show VSOP without Thompson sampling.
Red dash dot lines (no spect. norm.) show VSOP without spectral normalization.
Blue dotted lines (RMPG) show the ``all actions'' approach.
Purple dash dot dot lines (with neg. advantages) show VSOP without restricting policy updates to positive advantages.
}
\label{fig:mujoco-hyper}
\end{figure}
\subsubsection{Closing the gap to off-policy methods} \label{sec:exp-gymnasium-off} Interestingly, we see that applying spectral normalization and dropout to PPO also yields an improvement. We call this augmentation VSPPO and provide detailed analysis in \Cref{app:ppo}. In \Cref{fig:mujoco-sac}, we compare VSOP and VSPPO to SAC and TD3. We close the performance gap significantly for environments such as Humanoid, Half-Cheetah, Ant, and Humanoid Stand-up.
\begin{figure}
\caption{Mujoco continuous control benchmark comparison to SAC and TD3}
\label{fig:mujoco-sac}
\end{figure}
\subsection{Gymnax Environments} \label{sec:exp-gymnax} PureJaxRL \citep{lu2022discovered} uses Gymnax \citep{gymnax2022github} and Jax \citep{jax2018github} to enable vectorization, which facilitates principled hyper-parameter tuning. Using it, we explore several environments and compare VSOP, PPO, A3C, and DPO. We use Bayesian hyper-parameter optimization \citep{snoek2012practical} and give each algorithm a search budget of 100 steps. We search over hyper-parameters such as the learning rate, number of update epochs, number of mini-batches in an update epoch, the GAE $\lambda$ parameter, the max gradient norm, and the width of the network. We give full implementation details in \Cref{app:implementation-details-gymnax}. \Cref{tab:gymnax-ranking} shows the overall ranking of each method. VSOP is competitive with DPO and improves over PPO and A3C.
\begin{table}[t]
\caption{Rank scores (lower is better) for VSOP, DPO, PPO, and A3C on Brax-MuJoCo, MinAtar, and Classic Control. Methods are ranked from 1 to 4 based on statistically significant differences (paired t-test with p-value 0.1) between mean last episode returns. Ties are given the same rank, and the proceeding score will be the last rank plus the number of additional methods.}
\label{tab:gymnax-ranking}
\centering
\begin{tabular}{l|ccc|c}
\toprule
\textbf{Method} & \textbf{Brax-MuJoCo} & \textbf{MinAtar} & \textbf{Classic Control} & \textbf{Avg. Rank} \\
\midrule
DPO & \textbf{1.33} & \textbf{1.75} & 1.25 & 1.44 \\
VSOP (Ours) & 1.78 & 2.50 & \textbf{1.00} & 1.76 \\
PPO & 2.00 & 2.25 & 1.25 & 1.83 \\
A3C & 4.00 & 2.25 & 1.25 & 2.50 \\
\bottomrule
\end{tabular} \end{table}
\Cref{fig:classic-control} summarize the results for \textbf{Classic Control}. Performance of each method is in general statistically equal, but VSOP shows significant gain on MountainCar Continuous.
\begin{figure}
\caption{Acrobot}
\label{fig:classic-control-acrobot}
\caption{CartPole}
\label{fig:classic-control-cartpole}
\caption{MountainCar Cont.}
\label{fig:classic-control-mountaincar}
\caption{Pendulum}
\label{fig:classic-control-pendulum}
\caption{
Classic Control Environments \citep{gymnax2022github}. Mean episodic return and 68\% CI over 20 random seeds are shown for VSOP (Blue), PPO (Orange), A3C (Green), and DPO (Red). Each method is hyper-parameter tuned using Bayesian Optimization with 100 search steps. Paired t-test p-values for last episode with respect to VSOP shown in brackets. Significant improvement is seen for VSOP compared to all other methods on MountainCar Continuous.
}
\label{fig:classic-control}
\end{figure}
\Cref{fig:minatar} summarize the results for \textbf{MinAtar} \citep{bellemare2013arcade,young19minatar}. VSOP shows significant improvement over PPO and A3C in Space Invaders. We see marginal improvement over PPO and DPO in Breakout, with significant improvement over A3C. VSOP trails the baselines significantly in Asterix and Freeway.
\begin{figure}
\caption{Asterix}
\label{fig:minatar-asterix}
\caption{Breakout}
\label{fig:minatar-breakout}
\caption{Freeway}
\label{fig:minatar-freeway}
\caption{SpaceInvaders}
\label{fig:minatar-spaceinvaders}
\caption{
MinAtar Environments \citep{young19minatar}. Mean episodic return and 68\% CI over 20 random seeds are shown for VSOP (Blue), PPO (Orange), A3C (Green), and DPO (Red). Methods are hyper-parameter tuned using Bayesian Optimization with 100 search steps. p-values for last episode with respect to VSOP shown in brackets. VSOP performs well on Breakout and SpaceInvaders.
}
\label{fig:minatar}
\end{figure}
\Cref{fig:brax} summarize the results for \textbf{Brax MuJoCo} \citep{todorov2012mujoco,brax2021github}. We perform paired t-tests for the last episode between each method and VSOP. We threshold at a p-value of 0.1 to indicate significance. VSOP significantly outperforms A3C in all environments. VSOP significantly outperforms PPO in four of nine environments (InvertedDoublePendulum, Pusher, Reacher, and Walker2d), is statistically equivalent in two environments (Hopper and HumanoidStandUp), and is significantly less effective in three environments (Ant, HalfCheetah, and Humanoid). VSOP outperforms DPO on Ant, is statistically equivalent in four environments (HumanoidStandUp, Pusher, Reacher, and Walker2d), but is significantly less effective in four environments (HalfCheetah, Hopper, Humanoid, and InvertedDoublePendulum). Overall, VSOP outperforms A3C and PPO and is competitive with DPO.
\begin{figure}
\caption{Brax-ant}
\label{fig:Brax-ant}
\caption{Brax-halfcheetah}
\label{fig:Brax-halfcheetah}
\caption{Brax-hopper}
\label{fig:Brax-hopper}
\caption{Brax-humanoid}
\label{fig:Brax-humanoid}
\caption{Brax-humanoidstandup}
\label{fig:Brax-humanoidstandup}
\caption{Brax-inverteddoublependulum}
\label{fig:Brax-inverted_double_pendulum}
\caption{Brax-pusher}
\label{fig:Brax-pusher}
\caption{Brax-reacher}
\label{fig:Brax-reacher}
\caption{Brax-walker2d}
\label{fig:Brax-walker2d}
\caption{
Brax-MuJoCo Environments \citep{brax2021github,todorov2012mujoco}. Mean episodic return and 68\% CI over 20 random seeds are shown for VSOP (Blue), PPO (Orange), A3C (Green), and DPO (red). Each method is hyper-parameter tuned using Bayesian Optimization \citep{snoek2012practical} with a budget of 100 search steps. Paired t-test p-values for last episode with respect to VSOP shown in brackets. VSOP generally out performs PPO and A3C and is competitive with DPO.
}
\label{fig:brax}
\end{figure}
\section{Conclusion}
We have presented a novel approach for improving the performance of on-policy DRL algorithms by incorporating cautious interaction. Our method realized through simple modifications to the A3C algorithm, optimizes a lower bound on value plus a constant and integrates exploration via Thompson sampling. We provide a theoretical justification for our approach by demonstrating that our algorithm optimizes this lower bound. Our empirical evaluations across several diverse benchmarks confirm our approach's improved performance compared to existing on-policy algorithms. Although achieving sufficiently cautious algorithmic interaction with the world remains a distant goal, our research constitutes a significant stride toward this objective. We trust that our work will catalyze further advancements in the field, propelling the development of more cautious and efficacious DRL applications in resolving complex, real-world problems.
\section{Broader Impact}
Algorithmic decision-making is becoming increasingly present in many areas of our life. While this has the potential for benefit, it is also known to automate and perpetuate historical patterns that are often unjust and discriminatory \citep{buolamwini2018gender,noble2018algorithms,benjamin2020race,birhane2021algorithmic}. We believe that cautious interaction is a necessary feature for the type of deployed algorithmic decision-making systems the RL community envisions, but that technological solutions alone will not suffice.
\appendix \section{Theoretical Results}
\subsection{Proof of Theorem 1} \label{app:constant-proof}
\begin{theorem}
Let, $\mathrm{G}_{\mathrm{t}} \coloneqq \sum_{\mathrm{k} = \mathrm{t} + 1}^{\mathrm{T}} \gamma^{\mathrm{k} - 1 - \mathrm{t}} \mathrm{R}_{\mathrm{k}}$, denote the discounted return.
Let $q_{\pi}(\mathbf{s}, \mathbf{a}) = \mathbb{E}_{\pi} \left[ \mathrm{G}_{\mathrm{t}} \mid \mathbf{S}_{\mathrm{t}} = \mathbf{s}, \mathbf{A}_{\mathrm{t}} = \mathbf{a}\right]$, denote the state-action value function, and
$v_{\pi}(\mathbf{s}) = \mathbb{E}_{\pi} \left[ \mathrm{G}_{\mathrm{t}} \mid \mathbf{S}_{\mathrm{t}} = \mathbf{s} \right]$, denote the state value function, under policy $\pi(\mathbf{a} \mid \mathbf{s}, \bm{\theta})$.
Let $\big(x\big)^+ \coloneqq \max(0, x)$.
Assume, without loss of generality, that rewards, $\mathrm{R}_{\mathrm{t}}$, are non-negative.
Assume that the gradient of the policy, $\nabla \pi(\mathbf{a} \mid \mathbf{s}, \bm{\theta})$, is a conservative vector field.
Then, performing gradient ascent with respect to,
\begin{equation}
\nabla_{\bm{\theta}} J(\bm{\theta}) = \mathbb{E}_{\pi}\left[ \Bigl(q_{\pi}(\mathbf{S}_{\mathrm{t}}, \mathbf{A}_{\mathrm{t}}) - v_{\pi}(\mathbf{S}_{\mathrm{t}}) \Bigr)^+ \nabla_{\bm{\theta}} \log \pi(\mathbf{A}_{\mathrm{t}} \mid \mathbf{S}_{\mathrm{t}}, \bm{\theta}) \right],
\label{eq:app-objective}
\end{equation}
maximizes a lower-bound, $v_{\pi}^*(\mathbf{s})$, on the state value function, $v_{\pi}(\mathbf{s})$, plus a constant:
\begin{equation}
v_{\pi}^*(\mathbf{s}) \leq v_{\pi}(\mathbf{s}) + C(\mathbf{s}),
\label{eq:app-bound}
\end{equation}
where, $C(\mathbf{s}) = \iint \Big( \gamma v_{\pi}(\mathbf{s}') - v_{\pi}(\mathbf{s}) \Big)^{+} d\mathbb{P}(\mathbf{s}' \mid \mathbf{S}_{\mathrm{t}}=\mathbf{s}, \mathbf{A}_{\mathrm{t}} = \mathbf{a}) d\Pi(\mathbf{a} \mid \mathbf{S}_{\mathrm{t}}=\mathbf{s})$, is the expected, clipped difference in the state value function, $\gamma v_{\pi}(\mathbf{s}') - v_{\pi}(\mathbf{s})$, over all actions, $\mathbf{a}$, and next states, $\mathbf{s}'$, under the policy given state, $\mathbf{s}$.
Here, we use $\int \dots d\Pi(\mathbf{a} \mid \mathbf{s})$ to denote $\sum_{\mathbf{a}} \dots \pi(\mathbf{a} \mid \mathbf{s})$ for discrete action spaces and $\int \dots \pi(\mathbf{a} \mid \mathbf{s})d\mathbf{a}$ for continuous action spaces.
Similarly, we use $\int \dots d\mathbb{P}(\mathbf{s}' \mid \mathbf{s}, \mathbf{a})$ to denote $\sum_{\mathbf{s}'} \dots p(\mathbf{s}' \mid \mathbf{s}, \mathbf{a})$ for discrete state spaces and $\int \dots p(\mathbf{s}' \mid \mathbf{s}, \mathbf{a})d\mathbf{s}'$ for continuous state spaces.
\begin{proof}
\Cref{cor:pg_decomposition} shows that the policy-gradient theorem \citep{sutton1999policy} can be expressed in terms of the clipped advantage function,
\[
\mathcolor{mypurple}{h_{\pi}^{+}(\mathbf{s}, \mathbf{a})} = \mathcolor{mypurple}{\Bigl(q_{\pi}(\mathbf{s}, \mathbf{a}) - v_{\pi}(\mathbf{s}) \Bigr)^+} \coloneqq \mathcolor{mypurple}{\max(0, q_{\pi}(\mathbf{s}, \mathbf{a}) - v_{\pi}(\mathbf{s}))},
\]
as,
\begin{equation}
\begin{split}
\nabla v_{\pi}(\mathbf{s})
&= \int_{\mathcal{S}} \sum_{k=0}^{\infty} \Bigg[ \gamma^k \int_{\mathcal{A}} \mathcolor{mypurple}{h_{\pi}^{+}(\mathbf{x}, \mathbf{a})} \nabla d\Pi(\mathbf{a} \mid \mathbf{x}) \Bigg] d\mathbb{P}(\mathbf{s} \to \mathbf{x}; k, \pi) \\
&\quad\quad + \int_{\mathcal{S}} \sum_{k=0}^{\infty} \Bigg[ \gamma^k \int_{\mathcal{A}} \mathds{1}\big(q_{\pi}(\mathbf{x}, \mathbf{a}) > v_{\pi}(\mathbf{x}) \big) v_{\pi}(\mathbf{x}) \nabla d\Pi(\mathbf{a} \mid \mathbf{x}) \Bigg] d\mathbb{P}(\mathbf{s} \to \mathbf{x}; k, \pi) \\
&\quad\quad\quad\quad + \int_{\mathcal{S}} \sum_{k=0}^{\infty} \Bigg[ \gamma^k \int_{\mathcal{A}} \mathds{1}\big(q_{\pi}(\mathbf{x}, \mathbf{a}) \leq v_{\pi}(\mathbf{x}) \big) q_{\pi}(\mathbf{x}, \mathbf{a}) \nabla d\Pi(\mathbf{a} \mid \mathbf{x}) \Bigg] d\mathbb{P}(\mathbf{s} \to \mathbf{x}; k, \pi),
\end{split}
\end{equation}
where, $\mathbb{P}(\mathbf{s} \to \mathbf{x}; k, \pi)$, is the probability of transitioning from state $\mathbf{s}$ to state $\mathbf{x}$ in $k$ steps under policy $\pi$.
The first right hand side term above defines the gradient of the lower-bound, $v_{\pi}^*(\mathbf{s})$, with respect to $\bm{\theta}$:
\begin{equation}
\nabla v_{\pi}^*(\mathbf{s}) \coloneqq \int_{\mathcal{S}} \sum_{k=0}^{\infty} \Bigg[ \gamma^k \int_{\mathcal{A}} \mathcolor{mypurple}{h_{\pi}^{+}(\mathbf{x}, \mathbf{a})} \nabla d\Pi(\mathbf{a} \mid \mathbf{x}) \Bigg] d\mathbb{P}(\mathbf{s} \to \mathbf{x}; k, \pi).
\end{equation}
Letting, $v_{\pi}^{*}(\mathbf{s}_0)=\int_{\mathcal{S}} \sum_{k=0}^{\infty} \gamma^k \int_{\mathcal{A}} h_{\pi}^{+}(\mathbf{s}, \mathbf{a}) \nabla d\Pi(\mathbf{a} \mid \mathbf{s}) d\mathbb{P}(\mathbf{s}_0 \to \mathbf{s}; k, \pi)$, a straightforward continuation of the policy gradient theorem \citep{sutton1999policy} will show that
\[
\nabla J(\bm{\theta}) \coloneqq \nabla v_{\pi}^{*}(\mathbf{s}_0) \propto \iint h_{\pi}^{+}(\mathbf{s}, \mathbf{a}) \nabla_{\bm{\theta}} d\Pi(\mathbf{a} \mid \mathbf{s}, \bm{\theta}) d\mathrm{P}(\mathbf{s}).
\]
We then arrive at \Cref{eq:app-objective} by moving from the all states/actions to single state/action formulation:
\begin{subequations}
\begin{align*}
\nabla J(\bm{\theta})
&\coloneqq \nabla v_{\pi}^{*}(\mathbf{s}_0),
&& \text{by definition} \\
&\propto \iint \Bigl(q_{\pi}(\mathbf{s}, \mathbf{a}) - v_{\pi}(\mathbf{s}) \Bigr)^+ \nabla_{\bm{\theta}} d\Pi(\mathbf{a} \mid \mathbf{s}, \bm{\theta}) d\mathrm{P}(\mathbf{s}),
&& \text{\citet{sutton1999policy}} \\
&= \mathbb{E}_{\pi} \left[ \int \Bigl(q_{\pi}(\mathbf{S}_{\mathrm{t}}, \mathbf{a}) - v_{\pi}(\mathbf{S}_{\mathrm{t}}) \Bigr)^+ \nabla_{\bm{\theta}} d\Pi(\mathbf{a} \mid \mathbf{S}_{\mathrm{t}}, \bm{\theta}) \right],
&& \text{} \\
&= \mathbb{E}_{\pi} \left[ \int \Bigl(q_{\pi}(\mathbf{S}_{\mathrm{t}}, \mathbf{a}) - v_{\pi}(\mathbf{S}_{\mathrm{t}}) \Bigr)^+ \frac{\nabla_{\bm{\theta}} d\Pi(\mathbf{a} \mid \mathbf{S}_{\mathrm{t}}, \bm{\theta})}{d\Pi(\mathbf{a} \mid \mathbf{S}_{\mathrm{t}}, \bm{\theta}} d\Pi(\mathbf{a} \mid \mathbf{S}_{\mathrm{t}}, \bm{\theta} \right],
&& \text{} \\
&= \mathbb{E}_{\pi} \left[ \int \Bigl(q_{\pi}(\mathbf{S}_{\mathrm{t}}, \mathbf{A}_{\mathrm{t}}) - v_{\pi}(\mathbf{S}_{\mathrm{t}}) \Bigr)^+ \nabla_{\bm{\theta}} \log{\pi(\mathbf{A}_{\mathrm{t}} \mid \mathbf{S}_{\mathrm{t}}, \bm{\theta})} \right].
&& \text{}
\end{align*}
\end{subequations}
Now we need to show that,
\[
v_{\pi}^*(\mathbf{s}) \leq v_{\pi}(\mathbf{s}) + \iint \Big( \gamma v_{\pi}(\mathbf{s}') - v_{\pi}(\mathbf{s}) \Big)^{+} d\mathbb{P}(\mathbf{s}' \mid \mathbf{S}_{\mathrm{t}}=\mathbf{s}, \mathbf{A}_{\mathrm{t}}) d\Pi(\mathbf{a} \mid \mathbf{S}_{\mathrm{t}}=\mathbf{s}).
\]
To do so, we will first prove that it holds for episodes, $\mathrm{T}$, of length 1, then that it holds for episodes of length 2.
These two proofs will then prove \Cref{eq:app-bound} for episodes of arbitrary length by mathematical induction and conclude the proof.
\textbf{For episodes of length 1, $|T| = 1$}, we have
\begin{equation}
\begin{split}
\nabla v_{\pi}(\mathbf{s})
&= \int q_{\pi}(\mathbf{s}, \mathbf{a}) \nabla d\Pi(\mathbf{a} \mid \mathbf{s}) + \int \nabla q_{\pi}(\mathbf{s}, \mathbf{a}) d\Pi(\mathbf{a} \mid \mathbf{s}), \\
&= \int q_{\pi}(\mathbf{s}, \mathbf{a}) \nabla d\Pi(\mathbf{a} \mid \mathbf{s}) + \int \bigg( \nabla \int \mathrm{r} d\mathbb{P}(\mathrm{r} \mid \mathbf{s}, \mathbf{a}) \bigg) d\Pi(\mathbf{a} \mid \mathbf{s}), \\
&= \int q_{\pi}(\mathbf{s}, \mathbf{a}) \nabla d\Pi(\mathbf{a} \mid \mathbf{s}), \\
&= \int h_{\pi}^{+}(\mathbf{s}, \mathbf{a}) \nabla d\Pi(\mathbf{a} \mid \mathbf{s}) + \int \Big( \mathds{1}\big(q_{\pi} > v_{\pi} \big) v_{\pi}(\mathbf{s}) + \mathds{1}\big(q_{\pi} \leq v_{\pi} \big) q_{\pi}(\mathbf{s}, \mathbf{a}) \Big) \nabla d\Pi(\mathbf{a} \mid \mathbf{s}).
\end{split}
\end{equation}
Therefore, for $|T| = 1$,
\[
\nabla v_{\pi}^*(\mathbf{s}) = \int h_{\pi}^{+}(\mathbf{s}, \mathbf{a}) \nabla d\Pi(\mathbf{a} \mid \mathbf{s})
\]
In order to recover $v_{\pi}^*(\mathbf{s})$, we need to use the work of \citet{willse2019inverse} to define an inverse function for the gradient.
Assume that the policy, $\pi(\mathbf{a} \mid \mathbf{s}, \bm{\theta})$, is a smooth, infinitely differentiable function with respect to $\bm{\theta}$. Further, let the gradient of the policy,
\begin{equation}
\nabla \pi(\mathbf{a} \mid \mathbf{s}, \bm{\theta}) =
\begin{pmatrix}
\frac{\partial}{\partial\theta_1} \pi(\mathbf{a} \mid \mathbf{s}, \theta_1), \\
\vdots \\
\frac{\partial}{\partial\theta_k} \pi(\mathbf{a} \mid \mathbf{s}, \theta_k)
\end{pmatrix},
\end{equation}
be a conservative vector field. We call $\tilde{\beta}\big(\nabla \pi(\mathbf{a} \mid \mathbf{s}, \bm{\theta})\big)$ the inverse of the gradient operation, $\nabla \pi(\mathbf{a} \mid \mathbf{s}, \bm{\theta})$.
Assuming that $\pi(\mathbf{a} \mid \mathbf{s}, \bm{\theta})$ is a representative of $\tilde{\beta}$, we have that,
\begin{equation}
\label{eq:grad-inverse}
\begin{split}
\pi(\mathbf{a} \mid \mathbf{s}, \bm{\theta}) &= \tilde{\beta}\big(\nabla \pi(\mathbf{a} \mid \mathbf{s}, \bm{\theta})\big), \\
&= \int_{\gamma} \nabla \pi(\mathbf{a} \mid \mathbf{s}, \bm{\theta}) d\mathbf{x}, \\
&= \int_{\gamma} \frac{\partial}{\partial\theta_1} \pi(\mathbf{a} \mid \mathbf{s}, \theta_1) d\theta_1 + \dots + \frac{\partial}{\partial\theta_k} \pi(\mathbf{a} \mid \mathbf{s}, \theta_k) d\theta_k,
\end{split}
\end{equation}
where $\gamma$ is a path from the fixed reference point, $\bm{\theta}_0$, to $\bm{\theta}$. The conservativeness of $\nabla \pi(\mathbf{a} \mid \mathbf{s}, \bm{\theta})$ guarantees that the integrals are path independent.
Now we have,
\begin{subequations}
\begin{align*}
v_{\pi}^*(\mathbf{s})
&= \tilde{\beta}\bigg( \int h_{\pi}^{+}(\mathbf{s}, \mathbf{a}) \nabla d\Pi(\mathbf{a} \mid \mathbf{s}) \bigg), \\
&= \int h_{\pi}^{+}(\mathbf{s}, \mathbf{a}) \tilde{\beta}\big(\nabla d\Pi(\mathbf{a} \mid \mathbf{s}) \big),
&& \text{linearity} \\
&= \int h_{\pi}^{+}(\mathbf{s}, \mathbf{a}) d\Pi(\mathbf{a} \mid \mathbf{s}),
&& \text{\Cref{eq:grad-inverse}}\\
&\leq \iint \Big( \mathrm{r} + \big( \gamma v_{\pi}(\mathbf{s}') - v_{\pi}(\mathbf{s}) \big)^+ \Big) d\mathbb{P}(\mathbf{s}', \mathrm{r} \mid \mathbf{s}, \mathbf{a})d\Pi(\mathbf{a} \mid \mathbf{s}),
&& \text{\Cref{cor:lower-bound-decomp}} \\
&= v_{\pi}(\mathbf{s}) + \iint \big( \gamma v_{\pi}(\mathbf{s}') - v_{\pi}(\mathbf{s}) \big)^+ d\mathbb{P}(\mathbf{s}' \mid \mathbf{s}, \mathbf{a})d\Pi(\mathbf{a} \mid \mathbf{s}),
&& \text{|T| = 1}
\end{align*}
\end{subequations}
which concludes the proof for episodes of length 1.
\textbf{For episodes of length 2}, $|T| = 2$, we have
\begin{subequations}
\begin{align*}
\nabla v_{\pi}(\mathbf{s})
&= \int q_{\pi}(\mathbf{s}, \mathbf{a}) \nabla d\Pi(\mathbf{a} \mid \mathbf{s}) + \int \nabla q_{\pi}(\mathbf{s}, \mathbf{a}) d\Pi(\mathbf{a} \mid \mathbf{s}), \\
\begin{split}
&= \int q_{\pi}(\mathbf{s}, \mathbf{a}) \nabla d\Pi(\mathbf{a} \mid \mathbf{s}) + \iiint q_{\pi}(\mathbf{s}', \mathbf{a}') \nabla d\Pi(\mathbf{a}' \mid \mathbf{s}') d\mathbb{P}(\mathbf{s}' \mid \mathbf{a}, \mathbf{s}) d\Pi(\mathbf{a} \mid \mathbf{s}) \\
&\quad\quad+ \iiint \bigg( \nabla \int \mathrm{r}' d\mathbb{P}(\mathrm{r}' \mid \mathbf{s}', \mathbf{a}') \bigg) d\Pi(\mathbf{a}' \mid \mathbf{s}'),
\end{split} \\
&= \int q_{\pi}(\mathbf{s}, \mathbf{a}) \nabla d\Pi(\mathbf{a} \mid \mathbf{s}) + \iiint q_{\pi}(\mathbf{s}', \mathbf{a}') \nabla d\Pi(\mathbf{a}' \mid \mathbf{s}') d\mathbb{P}(\mathbf{s}' \mid \mathbf{a}, \mathbf{s}) d\Pi(\mathbf{a} \mid \mathbf{s}), \\
\begin{split}
&= \int h_{\pi}^{+}(\mathbf{s}, \mathbf{a}) \nabla d\Pi(\mathbf{a} \mid \mathbf{s}) + \iiint h_{\pi}^{+}(\mathbf{s}', \mathbf{a}') \nabla d\Pi(\mathbf{a}' \mid \mathbf{s}') d\mathbb{P}(\mathbf{s}' \mid \mathbf{a}, \mathbf{s}) d\Pi(\mathbf{a} \mid \mathbf{s}) \\
&\quad\quad+ \int \Big( \mathds{1}\big(q_{\pi} > v_{\pi} \big) v_{\pi}(\mathbf{s}) + \mathds{1}\big(q_{\pi} \leq v_{\pi} \big) q_{\pi}(\mathbf{s}, \mathbf{a}) \Big) \nabla d\Pi(\mathbf{a} \mid \mathbf{s}) \\
&\quad\quad+ \iiint \Big( \mathds{1}\big(q_{\pi} > v_{\pi} \big) v_{\pi}(\mathbf{s}') + \mathds{1}\big(q_{\pi} \leq v_{\pi} \big) q_{\pi}(\mathbf{s}', \mathbf{a}') \Big) \nabla d\Pi(\mathbf{a}' \mid \mathbf{s}')d\mathbb{P}(\mathbf{s}' \mid \mathbf{a}, \mathbf{s}) d\Pi(\mathbf{a} \mid \mathbf{s}).
\end{split}
\end{align*}
\end{subequations}
Therefore, for $|T| = 2$,
\[
\nabla v_{\pi}^*(\mathbf{s}) = \int h_{\pi}^{+}(\mathbf{s}, \mathbf{a}) \nabla d\Pi(\mathbf{a} \mid \mathbf{s}) + \iiint h_{\pi}^{+}(\mathbf{s}', \mathbf{a}') \nabla d\Pi(\mathbf{a}' \mid \mathbf{s}') d\mathbb{P}(\mathbf{s}' \mid \mathbf{a}, \mathbf{s}) d\Pi(\mathbf{a} \mid \mathbf{s}).
\]
Finally, we apply the $\tilde{\beta}$ operator:
\begin{subequations}
\begin{align*}
v_{\pi}^*(\mathbf{s})
&= \tilde{\beta}\bigg(\int h_{\pi}^{+}(\mathbf{s}, \mathbf{a}) \nabla d\Pi(\mathbf{a} \mid \mathbf{s}) + \iiint h_{\pi}^{+}(\mathbf{s}', \mathbf{a}') \nabla d\Pi(\mathbf{a}' \mid \mathbf{s}') d\mathbb{P}(\mathbf{s}' \mid \mathbf{a}, \mathbf{s}) d\Pi(\mathbf{a} \mid \mathbf{s})\bigg), \\
&= \int h_{\pi}^{+}(\mathbf{s}, \mathbf{a}) \tilde{\beta}\Big(\nabla d\Pi(\mathbf{a} \mid \mathbf{s})\Big) + \iiint h_{\pi}^{+}(\mathbf{s}', \mathbf{a}') \tilde{\beta}\Big(\nabla d\Pi(\mathbf{a}' \mid \mathbf{s}')\Big) d\mathbb{P}(\mathbf{s}' \mid \mathbf{a}, \mathbf{s}) d\Pi(\mathbf{a} \mid \mathbf{s}),
&& \text{linearity} \\
&= \int h_{\pi}^{+}(\mathbf{s}, \mathbf{a}) d\Pi(\mathbf{a} \mid \mathbf{s}) + \iiint h_{\pi}^{+}(\mathbf{s}', \mathbf{a}') d\Pi(\mathbf{a}' \mid \mathbf{s}') d\mathbb{P}(\mathbf{s}' \mid \mathbf{a}, \mathbf{s}) d\Pi(\mathbf{a} \mid \mathbf{s}),
&& \text{\Cref{eq:grad-inverse}} \\
\begin{split}
&\leq \iint \mathrm{r} d\mathbb{P}(\mathrm{r} \mid \mathbf{s}, \mathbf{a}) d\Pi(\mathbf{a} \mid \mathbf{s}) + \iint \big( \gamma v_{\pi}(\mathbf{s}') - v_{\pi}(\mathbf{s}) \big)^+ d\mathbb{P}(\mathbf{s}' \mid \mathbf{s}, \mathbf{a})d\Pi(\mathbf{a} \mid \mathbf{s}) \\
&\quad\quad + \iiint h_{\pi}^{+}(\mathbf{s}', \mathbf{a}') d\Pi(\mathbf{a}' \mid \mathbf{s}') d\mathbb{P}(\mathbf{s}' \mid \mathbf{a}, \mathbf{s}) d\Pi(\mathbf{a} \mid \mathbf{s}),
\end{split}
&& \text{\Cref{cor:lower-bound-decomp}} \\
\begin{split}
&\leq \iint \mathrm{r} d\mathbb{P}(\mathrm{r} \mid \mathbf{s}, \mathbf{a}) d\Pi(\mathbf{a} \mid \mathbf{s}) + \iint \big( \gamma v_{\pi}(\mathbf{s}') - v_{\pi}(\mathbf{s}) \big)^+ d\mathbb{P}(\mathbf{s}' \mid \mathbf{s}, \mathbf{a})d\Pi(\mathbf{a} \mid \mathbf{s}) \\
&\quad\quad + \iint \gamma v_{\pi}(\mathbf{s}') d\mathbb{P}(\mathbf{s}' \mid \mathbf{a}, \mathbf{s}) d\Pi(\mathbf{a} \mid \mathbf{s}),
\end{split}
&& \text{\Cref{cor:lower-bound}} \\
&= v_{\pi}(\mathbf{s}) + \iint \big( \gamma v_{\pi}(\mathbf{s}') - v_{\pi}(\mathbf{s}) \big)^+ d\mathbb{P}(\mathbf{s}' \mid \mathbf{s}, \mathbf{a})d\Pi(\mathbf{a} \mid \mathbf{s}).
&& \text{rearranging terms}
\end{align*}
\end{subequations}
\end{proof} \end{theorem}
\begin{corollary}
\label{cor:pg_decomposition}
$\nabla v_{\pi}(\mathbf{s})$ can be written in terms of $h_{\pi}^{+}(\mathbf{s}, \mathbf{a})$.
\begin{proof}
\begin{subequations}
\begin{align}
\nabla v_{\pi}(\mathbf{s})
&= \nabla \bigg[ \int q_{\pi}(\mathbf{s}, \mathbf{a}) d\Pi(\mathbf{a} \mid \mathbf{s}) \bigg], \label{eq:three_goats-a} \\
&= \int q_{\pi}(\mathbf{s}, \mathbf{a}) \nabla d\Pi(\mathbf{a} \mid \mathbf{s}) + \int \nabla q_{\pi}(\mathbf{s}, \mathbf{a}) d\Pi(\mathbf{a} \mid \mathbf{s}), \label{eq:three_goats-b} \\
\begin{split}
&= \int \Big( \mathcolor{mygreen}{h_{\pi}^{+}(\mathbf{s}, \mathbf{a})} + \mathcolor{myfuchsia}{\mathds{1}\big(q_{\pi} > v_{\pi} \big) v_{\pi}(\mathbf{s})} + \mathcolor{mypurple}{\mathds{1}\big(q_{\pi} \leq v_{\pi} \big) q_{\pi}(\mathbf{s}, \mathbf{a})} \Big) \nabla d\Pi(\mathbf{a} \mid \mathbf{s}) \\
&\quad\quad + \int \nabla q_{\pi}(\mathbf{s}, \mathbf{a}) d\Pi(\mathbf{a} \mid \mathbf{s}),
\end{split} \label{eq:three_goats-c} \\
\begin{split}
&= \int \Big( \mathcolor{mygreen}{h_{\pi}^{+}(\mathbf{s}, \mathbf{a})} + \mathcolor{myfuchsia}{\mathds{1}\big(q_{\pi} > v_{\pi} \big) v_{\pi}(\mathbf{s})} + \mathcolor{mypurple}{\mathds{1}\big(q_{\pi} \leq v_{\pi} \big) q_{\pi}(\mathbf{s}, \mathbf{a})} \Big) \nabla d\Pi(\mathbf{a} \mid \mathbf{s}) \\
&\quad\quad + \int \nabla \bigg[ \int \big( \mathrm{r} + \gamma v_{\pi}(\mathbf{s}') \big) d\mathbb{P}(\mathbf{s}', \mathrm{r} \mid \mathbf{s}, \mathbf{a}) \bigg] d\Pi(\mathbf{a} \mid \mathbf{s}),
\end{split} \label{eq:three_goats-d} \\
\begin{split}
&= \int \Big( \mathcolor{mygreen}{h_{\pi}^{+}(\mathbf{s}, \mathbf{a})} + \mathcolor{myfuchsia}{\mathds{1}\big(q_{\pi} > v_{\pi} \big) v_{\pi}(\mathbf{s})} + \mathcolor{mypurple}{\mathds{1}\big(q_{\pi} \leq v_{\pi} \big) q_{\pi}(\mathbf{s}, \mathbf{a})} \Big) \nabla d\Pi(\mathbf{a} \mid \mathbf{s}) \\
&\quad\quad + \gamma\iint \nabla v_{\pi}(\mathbf{s}') d\mathbb{P}(\mathbf{s}' \mid \mathbf{s}, \mathbf{a}) d\Pi(\mathbf{a} \mid \mathbf{s}),
\end{split} \label{eq:three_goats-e} \\
\begin{split}
&= \int \Big( \mathcolor{mygreen}{h_{\pi}^{+}(\mathbf{s}, \mathbf{a})} + \mathcolor{myfuchsia}{\mathds{1}\big(q_{\pi} > v_{\pi} \big) v_{\pi}(\mathbf{s})} + \mathcolor{mypurple}{\mathds{1}\big(q_{\pi} \leq v_{\pi} \big) q_{\pi}(\mathbf{s}, \mathbf{a})} \Big) \nabla d\Pi(\mathbf{a} \mid \mathbf{s}) \\
&\quad\quad + \gamma \iint \Bigg[ \int q_{\pi}(\mathbf{s}', \mathbf{a}') \nabla d\Pi(\mathbf{a}' \mid \mathbf{s}') \\
&\quad\quad\quad\quad + \gamma \int \nabla v_{\pi}(\mathbf{s}'') d\mathbb{P}(\mathbf{s}'' \mid \mathbf{s}', \mathbf{a}') d\Pi(\mathbf{a}' \mid \mathbf{s}') \Bigg] d\mathbb{P}(\mathbf{s}' \mid \mathbf{s}, \mathbf{a}) d\Pi(\mathbf{a} \mid \mathbf{s}),
\end{split} \label{eq:three_goats-f} \\
\begin{split}
&= \int \Big( \mathcolor{mygreen}{h_{\pi}^{+}(\mathbf{s}, \mathbf{a})} + \mathcolor{myfuchsia}{\mathds{1}\big(q_{\pi} > v_{\pi} \big) v_{\pi}(\mathbf{s})} + \mathcolor{mypurple}{\mathds{1}\big(q_{\pi} \leq v_{\pi} \big) q_{\pi}(\mathbf{s}, \mathbf{a})} \Big) \nabla d\Pi(\mathbf{a} \mid \mathbf{s}) \\
&\quad\quad + \gamma \iint \Bigg[ \int \Big( \mathcolor{mygreen}{h_{\pi}^{+}(\mathbf{s}', \mathbf{a}')} + \mathcolor{myfuchsia}{\mathds{1}\big(q_{\pi} > v_{\pi} \big) v_{\pi}(\mathbf{s}')} + \mathcolor{mypurple}{\mathds{1}\big(q_{\pi} \leq v_{\pi} \big) q_{\pi}(\mathbf{s}', \mathbf{a}')} \Big) \nabla d\Pi(\mathbf{a}' \mid \mathbf{s}') \\
&\quad\quad\quad\quad + \gamma \int \nabla v_{\pi}(\mathbf{s}'') d\mathbb{P}(\mathbf{s}'' \mid \mathbf{s}', \mathbf{a}') d\Pi(\mathbf{a}' \mid \mathbf{s}') \Bigg] d\mathbb{P}(\mathbf{s}' \mid \mathbf{s}, \mathbf{a}) d\Pi(\mathbf{a} \mid \mathbf{s}),
\end{split} \label{eq:three_goats-g} \\
\begin{split}
&= \int_{\mathcal{S}} \sum_{k=0}^{\infty} \Bigg[ \gamma^k \int_{\mathcal{A}} \mathcolor{mygreen}{h_{\pi}^{+}(\mathbf{x}, \mathbf{a})} \nabla d\Pi(\mathbf{a} \mid \mathbf{x}) \Bigg] d\mathbb{P}(\mathbf{s} \to \mathbf{x}; k, \pi) \\
&\quad\quad + \int_{\mathcal{S}} \sum_{k=0}^{\infty} \Bigg[ \gamma^k \int_{\mathcal{A}} \mathcolor{myfuchsia}{\mathds{1}\big(q_{\pi}(\mathbf{x}, \mathbf{a}) > v_{\pi}(\mathbf{x}) \big) v_{\pi}(\mathbf{x})} \nabla d\Pi(\mathbf{a} \mid \mathbf{x}) \Bigg] d\mathbb{P}(\mathbf{s} \to \mathbf{x}; k, \pi) \\
&\quad\quad\quad\quad + \int_{\mathcal{S}} \sum_{k=0}^{\infty} \Bigg[ \gamma^k \int_{\mathcal{A}} \mathcolor{mypurple}{\mathds{1}\big(q_{\pi}(\mathbf{x}, \mathbf{a}) \leq v_{\pi}(\mathbf{x}) \big) q_{\pi}(\mathbf{x}, \mathbf{a})} \nabla d\Pi(\mathbf{a} \mid \mathbf{x}) \Bigg] d\mathbb{P}(\mathbf{s} \to \mathbf{x}; k, \pi)
\end{split} \label{eq:three_goats-h}
\end{align}
\end{subequations}
\end{proof} \end{corollary}
\begin{corollary}
\label{cor:lower-bound-decomp}
\begin{equation*}
\underline{v}_{\pi}^{v_{\pi}}(\mathbf{s}) \leq \iint \mathrm{r} d\mathbb{P}(\mathrm{r} \mid \mathbf{s}, \mathbf{a})d\Pi(\mathbf{a} \mid \mathbf{s}) + \iint \Big( \gamma v_{\pi}(\mathbf{s}') - v_{\pi}(\mathbf{s}) \Big)^+ d\mathbb{P}(\mathbf{s}' \mid \mathbf{s}, \mathbf{a})d\Pi(\mathbf{a} \mid \mathbf{s})
\end{equation*}
\begin{proof}
\begin{subequations}
\begin{align*}
\underline{v}_{\pi}^{v_{\pi}}(\mathbf{s})
&\coloneqq \int h_{\pi}^{+}(\mathbf{s}, \mathbf{a}) d\Pi(\mathbf{a} \mid \mathbf{s}) \\
&= \frac{1}{2} \int \Big( q_{\pi}(\mathbf{s}, \mathbf{a}) - v_{\pi} + \big|q_{\pi}(\mathbf{s}, \mathbf{a}) - v_{\pi} \big| \Big) d\Pi(\mathbf{a} \mid \mathbf{s})
&& \text{(}2\max(0, a) = a + |a|\text{)} \\
\begin{split}
&= \frac{1}{2} \int \bigg( \int \big( \mathrm{r} + \gamma v_{\pi}(\mathbf{s}') - v_{\pi}(\mathbf{s}) \big) d\mathbb{P}(\mathbf{s}', \mathrm{r} \mid \mathbf{s}, \mathbf{a}) \\
&\quad\quad+ \Big| \int \big( \mathrm{r} + \gamma v_{\pi}(\mathbf{s}') - v_{\pi}(\mathbf{s}) \big) d\mathbb{P}(\mathbf{s}', \mathrm{r} \mid \mathbf{s}, \mathbf{a}) \Big| \bigg) d\Pi(\mathbf{a} \mid \mathbf{s})
\end{split} \\
\begin{split}
&\leq \frac{1}{2}\iint \Big( \mathrm{r} + \gamma v_{\pi}(\mathbf{s}') - v_{\pi}(\mathbf{s}) + \big| \mathrm{r} + \gamma v_{\pi}(\mathbf{s}') - v_{\pi}(\mathbf{s}) \big| \Big) \\
&\quad\quad d\mathbb{P}(\mathbf{s}', \mathrm{r} \mid \mathbf{s}, \mathbf{a}) d\Pi(\mathbf{a} \mid \mathbf{s})
\end{split}
&& \text{(Jensen's inequality)} \\
\begin{split}
&\leq \frac{1}{2}\iint \Big( 2\mathrm{r} + \gamma v_{\pi}(\mathbf{s}') - v_{\pi}(\mathbf{s}) + \big| \gamma v_{\pi}(\mathbf{s}') - v_{\pi}(\mathbf{s}) \big| \Big) \\
&\quad\quad d\mathbb{P}(\mathbf{s}', \mathrm{r} \mid \mathbf{s}, \mathbf{a}) d\Pi(\mathbf{a} \mid \mathbf{s})
\end{split}
&& \text{(triangle inequality)} \\
&= \iint \Big( \mathrm{r} + \big( \gamma v_{\pi}(\mathbf{s}') - v_{\pi}(\mathbf{s}) \big)^+ \Big) d\mathbb{P}(\mathbf{s}', \mathrm{r} \mid \mathbf{s}, \mathbf{a})d\Pi(\mathbf{a} \mid \mathbf{s})
&& \text{(}2\max(0, a) = a + |a|\text{)}
\end{align*}
\end{subequations}
\end{proof} \end{corollary}
\begin{corollary}
\label{cor:lower-bound}
When, without loss of generality, rewards, $\mathrm{R}_{\mathrm{t}}$, are assumed to be non-negative:
\begin{equation*}
\underline{v}_{\pi}^{v_{\pi}}(\mathbf{s}) \coloneqq \int h_{\pi}^{+}(\mathbf{s}, \mathbf{a}) d\Pi(\mathbf{a} \mid \mathbf{s}) \leq v_{\pi}(\mathbf{s})
\end{equation*}
\begin{proof}
\begin{subequations}
\begin{align*}
\int h_{\pi}^{+}(\mathbf{s}, \mathbf{a}) d\Pi(\mathbf{a} \mid \mathbf{s})
&= \frac{1}{2} \int \Big( q_{\pi}(\mathbf{s}, \mathbf{a}) - v_{\pi} + \big|q_{\pi}(\mathbf{s}, \mathbf{a}) - v_{\pi} \big| \Big) d\Pi(\mathbf{a} \mid \mathbf{s})
&& \text{( } 2\max(0, a) = a + |a| \text{ )} \\
&\leq \int q_{\pi}(\mathbf{s}, \mathbf{a}) d\Pi(\mathbf{a} \mid \mathbf{s})
&& \text{(triangle inequality)} \\
&= v_{\pi}(\mathbf{s})
\end{align*}
\end{subequations}
\end{proof} \end{corollary}
\subsection{Relation to Regret Matching Policy Gradient (RMPG)} \label{sec:rmpg-to-vsop} Here we provide a derivation starting from RMPG and arriving at our method.
\begin{equation*}
\begin{split}
\nabla J(\bm{\theta})
&= \mathbb{E}_{\pi}^{} \left[ \int_{\mathcal{A}} \left( q_{\pi}^{}(\mathbf{S}_{\mathrm{t}}, \mathbf{a}) - \int_{\mathcal{A}} \pi(\mathbf{a}^{\prime} \mid \mathbf{S}_{\mathrm{t}}, \bm{\theta}) q_{\pi}^{}(\mathbf{S}_{\mathrm{t}}, \mathbf{a}^{\prime}) d\mathbf{a}^{\prime} \right)^{+} \nabla_{\bm{\theta}} \pi(\mathbf{a} \mid \mathbf{S}_{\mathrm{t}}, \bm{\theta}) d\mathbf{a} \right] \\
&= \mathbb{E}_{\pi}^{} \left[ \int_{\mathcal{A}} \left( q_{\pi}^{}(\mathbf{S}_{\mathrm{t}}, \mathbf{a}) - v_{\pi}^{}(\mathbf{S}_{\mathrm{t}}) \right)^{+} \nabla_{\bm{\theta}} \pi(\mathbf{a} \mid \mathbf{S}_{\mathrm{t}}, \bm{\theta}) d\mathbf{a} \right] \\
&= \mathbb{E}_{\pi}^{} \left[ \int_{\mathcal{A}} h_{\pi}^{+}(\mathbf{S}_{\mathrm{t}}, \mathbf{a}) \nabla_{\bm{\theta}} \pi(\mathbf{a} \mid \mathbf{S}_{\mathrm{t}}, \bm{\theta}) d\mathbf{a} \right] \\
&= \mathbb{E}_{\pi}^{} \left[ \int_{\mathcal{A}} \pi(\mathbf{a} \mid \mathbf{S}_{\mathrm{t}}, \bm{\theta}) h_{\pi}^{+}(\mathbf{S}_{\mathrm{t}}, \mathbf{a}) \frac{\nabla_{\bm{\theta}} \pi(\mathbf{a} \mid \mathbf{S}_{\mathrm{t}}, \bm{\theta})}{\pi(\mathbf{a} \mid \mathbf{S}_{\mathrm{t}}, \bm{\theta})} d\mathbf{a} \right] \\
&= \mathbb{E}_{\pi}^{} \left[ h_{\pi}^{+}(\mathbf{S}_{\mathrm{t}}, \mathbf{A}_{\mathrm{t}}) \frac{\nabla_{\bm{\theta}} \pi(\mathbf{A}_{\mathrm{t}} \mid \mathbf{S}_{\mathrm{t}}, \bm{\theta})}{\pi(\mathbf{A}_{\mathrm{t}} \mid \mathbf{S}_{\mathrm{t}}, \bm{\theta})} \right] \\
&= \mathbb{E}_{\pi}^{} \left[ h_{\pi}^{+}(\mathbf{S}_{\mathrm{t}}, \mathbf{A}_{\mathrm{t}}) \nabla_{\bm{\theta}} \log{\pi(\mathbf{A}_{\mathrm{t}} \mid \mathbf{S}_{\mathrm{t}}, \bm{\theta})} \right]
\end{split} \end{equation*}
\section{Implementation Details} \label{app:implementation-details}
All code is available at: \href{https://github.com/anndvision/vsop}{https://github.com/anndvision/vsop}.
\subsection{Gymansium} \label{app:implementation-details-gym}
We build off of \citet{huang2022cleanrl}'s \href{https://github.com/vwxyzjn/cleanrl}{CleanRL} package which provides reproducible, user-friendly implementations of state-of-the-art reinforcement learning algorithms using PyTorch \citep{paszke2019pytorch}, Gymnasium \citep{brockman2016openai,todorov2012mujoco}, and Weights \& Biases \citep{wandb}. Several code-level optimizations \citep{engstrom2020implementation,andrychowicz2021matters} key to PPO reproducibility are superfluous for our method. We omit advantage normalization, value loss clipping \citep{schulman2017proximal}, gradient clipping, and modification of the default Adam \citep{kingma2014adam} epsilon parameter as they either do not lead to an appreciable difference in performance or have a slightly negative effect. However, we find that orthogonal weight initialization, learning rate annealing, reward scaling/clipping, and observation normalization/clipping remain to have non-negligible positive effects on performance \cite{engstrom2020implementation, andrychowicz2021matters}. In addition to adding dropout, weight decay regularization, and spectral normalization, we also look at model architecture modifications not present in the CleanRL implementation: layer width, number of hidden layers, layer activation, layer normalization \cite{ba2016layer}, and residual connections. We find that ReLU activation functions \citep{nair2010rectified}, increasing layer width to 256, and a dropout rate of 0.01-0.04 are beneficial. We find that network depth and residual connections are benign overall. In contrast to recent findings in the context of offline data for off-policy reinforcement learning \citep{ball2023efficient}, layer normalization — whether applied to the actor, the critic, or both — is detrimental to performance.
In \Cref{table:gymnasium-hypers}, we present the hyperparameters used for the VSOP, VSPPO, RMPG, A3C, and PPO algorithms when trained on Gymnasium MuJoCo environments. The table lists hyperparameters such as the number of timesteps, thread number, and learning rate, among others. Each algorithm may have a unique set of optimal hyperparameters. Please note that some hyperparameters: 'clip $\epsilon$', 'norm. adv.', and 'clip v-loss' may not apply to all algorithms, as these are specific to certain policy optimization methods. The 'width' and 'activation' fields correspond to the architecture of the neural network used by the policy, and the 'weight decay' and 'dropout' fields pertain to the regularization techniques applied during training. In general, tuning these hyperparameters is crucial to achieving optimal performance. Note that Adam optimization \citep{kingma2014adam} is used for all algorithms except for A3C where RMSProp \citep{hinton2012rms} is used.
\begin{table}[ht]
\centering
\caption{Hyper-parameters for PPO, VSOP, RMPG, A3C, and VSPPO algorithms across Gymnasium MuJoCo environments}
\label{table:gymnasium-hypers}
\begin{tabular}{@{}l|ccccc@{}}
\toprule
& \multicolumn{5}{c}{Gymnasium MuJoCo} \\
Parameter & VSOP & VSPPO & RMPG & A3C & PPO \\ \midrule
timesteps & 3e6 & 3e6 & 3e6 & 3e6 & 3e6 \\
num. envs & 1 & 1 & 1 & 1 & 1 \\
num. steps & 2048 & 2048 & 2048 & 5 & 2048 \\
learning rate & 3e-4 & 3e-4 & 3e-4 & 7e-4 & 3e-4 \\
anneal lr & True & True & True & True & True \\
optim. $\epsilon$.& 1e-8 & 1e-8 & 1e-8 & 3e-6 & 1e-5 \\
GAE $\gamma$ & 0.99 & 0.99 & 0.99 & 0.99 & 0.99 \\
GAE $\lambda$ & 0.95 & 0.95 & 0.95 & 1.0 & 0.95 \\
num. minibatch & 32 & 32 & 32 & 1 & 32 \\
update epochs & 10 & 10 & 10 & 1 & 10 \\
norm. adv. & False & False & False & False & True \\
clip $\epsilon$ & N/A & N/A & N/A & N/A & 0.2 \\
clip v-loss & False & False & False & False & True \\
ent. coef. & 0.0 & 0.0 & 0.0 & 0.0 & 0.0 \\
v-loss coef. & 0.5 & 0.5 & 0.5 & 0.5 & 0.5 \\
max grad. norm. & $\infty$ & $\infty$ & $\infty$ & 0.5 & 0.5 \\
norm. obs. & True & True & True & True & True \\
norm. reward & True & True & True & True & True \\
width & 256 & 256 & 256 & 64 & 64 \\
activation & relu & relu & relu & tanh & tanh \\
weight decay & 2.4e-4 & 2.4e-4 & 2.4e-4 & 0.0 & 0.0 \\
dropout & 0.02 & 0.02 & 0.02 & 0.0 & 0.0 \\
\bottomrule
\end{tabular} \end{table}
We report median values and standard error measurements over ten random seeds.
\subsection{Gymnax} \label{app:implementation-details-gymnax}
\begin{table}[ht]
\centering
\begin{tabular}{|c|c|c|c|}
\hline
\textbf{Hyperparameter} & \textbf{Range} & \textbf{Transformation} & \textbf{Transformed Range} \\
\hline
num. envs & [2, 8] & $2^{x}$ where $x$ is int & \{4, 8, 16, 32, 64, 128, 256\} \\
\hline
num. steps & [2, 8] & $2^{x}$ where $x$ is int & \{4, 8, 16, 32, 64, 128, 256\} \\
\hline
$\lambda$ & [0.0, 1.0] & round to multiple of 0.002 & \{0.0, 0.002, \ldots, 1.0\} \\
\hline
learning rate & [1e-4, 1e-3] & round to multiple of 0.00005 & \{1e-4, 1.5e-5, \ldots, 1e-3\} \\
\hline
max grad. norm. & [0.2, 5.0] & round to multiple of 0.1 & \{0.2, 0.3, \ldots, 5.0\} \\
\hline
num. minibatch & [0, 6] & $2^{x}$ where $x$ is int & \{1, 2, 4, 8, 16, 32, 64\} \\
\hline
update epochs & [1, 10] & round to int & \{1, 2, 3, ..., 10\} \\
\hline
width & [6, 10] & $2^{x}$ where $x$ is int & \{64, 128, 256, 512, 1024\} \\
\hline
\end{tabular} \caption{Hyperparameter search space with transformations}
\label{table:search_space_transformed} \end{table}
We optimize the hyper-parameters for each algorithm for each set of environments using a Bayesian optimization search strategy \citep{snoek2012practical}. Each algorithm has a budget of 100 search steps. We use NVIDIA A100 GPUs. The hyperparameters we search over include learning rate, number of steps, number of environments, GAE $\lambda$, update epochs, number of minibatches, and the maximum gradient norm. We also search over the hidden layer width for Brax-MuJoCo and MinAtar environments. Each hyperparameter has a specific search space and transformation applied during the search. We summarize the search sapce in \Cref{table:search_space_transformed}.
For the MinAtar environments, the hyper-parameters search spaces are: the number of steps in $[2, 8]$ (transformed to $2^{x}$ where $x$ is the integer part of the sample), GAE $\lambda$ in $[0.0, 1.0]$ (rounded to the nearest multiple of $0.002$), learning rate in $[1e-4, 1e-3]$ (rounded to the nearest multiple of $0.00005$), update epochs in $[1, 10]$ (rounded to the nearest integer), maximum gradient norm in $[0.0, 5.0]$ (rounded to the nearest multiple of $0.1$), number of minibatches in $[0, 6]$ (transformed to $2^{x}$), update epochs in $[1, 10]$ (rounded to the nearest integer), and number of minibatches in $[0, 7]$ (transformed to $2^{x}$), and hidden layer width in $[6, 10]$ (transformed to $2^{x}$). We set the $\gamma$ and number of environments to fixed values at $0.99$ and $64$, respectively.
For MuJoCo-Brax, we do not search over the number of environments or steps. Instead we set them to fixed values at $0.99$, $2048$, and either $10$ or $5$, respectively. The search space for the remaining hyper-parameters the same ranges as for the MinAtar environments. Further, we only optimize over the Humanoid, Hopper, and Reacher environments for 20 million steps. We test for each environment for 50 million steps.
Finally, for Classic Control environments, we employ the same hyperparameter search as for MinAtar, except that we search over the number of environments in $[2, 8]$ (transformed to $2^{x}$ where $x$ is the integer part of the sample) and we do not search over the hidden layer width, instead setting it to a fixed value of $64$.
This strategy allows us to thoroughly explore the hyperparameter space and find values that generalize well across a variety of different tasks. Further it allows us to fairly compare each algorithm. \Cref{table:minatar-hypers,table:brax-mujoco-hypers,table:classic-control-hypers} report the final hyper-parameter values for PPO, VSOP, and A3C.
\begin{table}[ht]
\centering
\caption{PPO, VSOP, A3C, and DPO Hyper-parameters for MinAtar environments.}
\label{table:minatar-hypers}
\begin{tabular}{@{}l|cccc@{}}
\toprule
Parameter & PPO & VSOP & A3C & DPO \\ \midrule
learning rate & 9e-4 & 7.5e-4 & 7e-4 & 1e-3 \\
num. envs & 128 & 128 & 128 & 128 \\
num. steps & 64 & 32 & 4 & 16 \\
GAE $\gamma$ & 0.99 & 0.99 & 0.99 & 0.99 \\
GAE $\lambda$ & 0.70 & 0.82 & 0.87 & 0.70 \\
num. minibatch & 8 & 16 & 2 & 8 \\
update epochs & 10 & 9 & 1 & 6 \\
max grad. norm. & 1.9 & 2.8 & 1.3 & 0.4 \\
width & 512 & 512 & 512 & 256 \\
activation & relu & relu & relu & relu \\
clip $\epsilon$ & 0.2 & N/A & N/A & 0.2 \\
ent. coef. & 0.01 & 0.01 & 0.01 & 0.01 \\
\bottomrule
\end{tabular} \end{table}
\begin{table}[ht]
\centering
\caption{Hyper-parameters for PPO, VSOP, A3C, and DPO algorithms across Brax-MuJoCo environments}
\label{table:brax-mujoco-hypers}
\begin{tabular}{@{}l|cccc@{}}
\toprule
Parameter & PPO & VSOP & A3C & DPO \\ \midrule
learning rate & 4.5e-4 & 1e-4 & 7e-4 & 2e-4 \\
num. envs & 2048 & 2048 & 2048 & 2048 \\
num. steps & 10 & 10 & 5 & 10 \\
GAE $\gamma$ & 0.99 & 0.99 & 0.99 & 0.99 \\
GAE $\lambda$ & 0.714 & 1.0 & 0.97 & 0.942 \\
num. minibatch & 32 & 64 & 2 & 32 \\
update epochs & 3 & 2 & 1 & 6 \\
max grad. norm. & 3.3 & 3.7 & 1.0 & 0.4 \\
width & 512 & 512 & 128 & 512 \\
activation & relu & relu & relu & relu\\
clip $\epsilon$ & 0.2 & N/A & N/A & 0.2 \\
ent. coef. & 0.0 & 0.0 & 0.0 & 0.0 \\
\bottomrule
\end{tabular} \end{table}
\begin{table}[ht]
\centering
\caption{Hyper-parameters for PPO, VSOP, A3C, and DPO algorithms across Classic Control environments}
\label{table:classic-control-hypers}
\begin{tabular}{@{}l|cccc@{}}
\toprule
Parameter & PPO & VSOP & A3C & DPO \\ \midrule
learning rate & 1e-3 & 8.5e-4 & 5.5e-4 & 1e-3 \\
num. envs & 8 & 16 & 8 & 4 \\
num. steps & 8 & 64 & 4 & 4 \\
GAE $\gamma$ & 0.99 & 0.99 & 0.99 & 0.99 \\
GAE $\lambda$ & 0.54 & 0.58 & 0.13 & 1.0 \\
num. minibatch & 8 & 16 & 8 & 1 \\
update epochs & 3 & 8 & 1 & 10 \\
max grad. norm. & 3.4 & 1.9 & 3.8 & 5.0 \\
width & 64 & 64 & 64 & 64 \\
activation & tanh & tanh & tanh & tanh \\
clip $\epsilon$ & 0.2 & N/A & N/A & 0.2 \\
ent. coef. & 0.01 & 0.01 & 0.01 & 0.01 \\
\bottomrule
\end{tabular} \end{table}
All reported results for MinAtar, Classic Control, and MuJoCo-Brax respectively are given by mean values and 68\% confidence intervals over 20 random seeds. During tuning we use 2 random seeds and for testing we use a different set of 20 random seeds, as per the guidance of \citet{eimer2023hyperparameters}.
\section{Additional Results} \label{app:additional-results}
\subsection{Comparing the effects of asynchronous parallelization and Thompson sampling} \label{app:exploration}
When tuning on the MuJoCo-Brax environment, we found that the positive-effect of Thompson sampling on performance became diminished. In the MuJoCo-Brax setting we used asynchronous parallelization with 2048 environments and just 10 steps per environment for 20480 steps per model update. Whereas in the Gymnasium setting we use just 1 environment and 2048 steps per update. \Cref{fig:explore} summarizes an investigation to see if parallelization and/or update frequency mitigates the positive effects of Thompson sampling. This investigation is still on-going and we will leave it for follow up work. We do see, that Thompson sampling is necessary in the single environment setting: red-solid vs red-dashed lines. We also see that decreasing the update frequency and increasing parallelization seems to yield better results when no dropout is applied. This can be seen by comparing the smaller difference between solid and dashed purple lines (256 threads, 32768 steps per update) with the larger difference between solid and dashed orange lines (16 threads, 2048 steps per update). This is a progressive trend as we move through red, orange, yellow, green, blue, and purple. The trend is stable but more pronounced as we decrease the mini-batch size.
\begin{figure}
\caption{Mini-batch Size 64}
\label{fig:halfcheetah-64}
\caption{Mini-batch Size 128}
\label{fig:halfcheetah-128}
\caption{Mini-batch Size 256}
\label{fig:halfcheetah-256}
\caption{Mini-batch Size 64}
\label{fig:humanoid-64}
\caption{Mini-batch Size 128}
\label{fig:humanoid-128}
\caption{Mini-batch Size 256}
\label{fig:humanoid-256}
\caption{
Investigating the connection between dropout and asynchronous parallelization. Top row, HalfCheetah-v4, Bottom row, Humanoid-v4. Solid lines, VSOP. Dashed lines, VSOP without dropout. Red: 1 thread, 2048 steps. Orange: 16 threads, 128 steps. Yellow: 32 threads, 128 steps. Green: 64 threads, 128 steps. Blue: 128 threads, 128 steps. Purple: 256 threads, 128 steps.
}
\label{fig:explore}
\end{figure}
\subsection{Spectral norm and Thompson sampling improve PPO} \label{app:ppo} \label{sec:exp-gymnasium-ppo} Interestingly, we see this same trend when applying spectral normalization and dropout to PPO. In \Cref{fig:mujoco-ppo} we compare VSOP to the original PPO, and our own implementation that adds Thompson sampling and spectral normalization, VSPPO. In \Cref{fig:mujoco-vsppo} we compare how Thompson sampling and spectral norm effect PPO.
\begin{figure}
\caption{MuJoCo continuous control benchmark comparison to PPO}
\label{fig:mujoco-ppo}
\end{figure} \begin{figure}
\caption{MuJoCo continuous control benchmark examining the effect of Thompson sampling and spectral normalization on PPO.}
\label{fig:mujoco-vsppo}
\end{figure}
\end{document} |
\begin{document}
\title[Note on expanding implicit functions]{Note on expanding implicit functions\\into formal power series by means of\\multivariable Stirling polynomials} \author{Alfred Schreiber} \address{Department of Mathematics\\
University of Flensburg\\
Auf dem Campus 1\\
24943 Flensburg, Germany}
\email{info@alfred-schreiber.de}
\begin{abstract} Starting from the representation of a function $f(x,y)$ as a formal power series with Taylor coefficients $f_{m,n}$, we establish a formal series for the implicit function $y=y(x)$ such that $f(x,y)=0$ and the coefficients of the series for $y$ depend exclusively on the $f_{m,n}$. The solution to this problem provided here relies on using partial Bell polynomials and their orthogonal companions. \end{abstract}
\keywords{Implicit function, Formal power series, Higher derivatives, Inversion, Bell polynomials, Stirling polynomials} \subjclass[2010]{Primary: 13F25, 11B83; Secondary: 05A19, 11C08}
\maketitle
\section{Introduction}
The problem of calculating the higher derivatives of a function $y=y(x)$, which is implicitly given by an equation $f(x,y)=0$, has been discussed several times already in the mathematical literature of the 19th and 20th centuries. L. Comtet has listed some of these papers in the bibliography of his famous monograph \cite{comt1974}. His own contribution to the problem can be found in \cite{comt1968,comt1974,cofi1974}. Recently, the problem has attracted renewed attention, especially with regard to some of its combinatorial aspects. The results in \cite{cofi1974} have been subjected to careful analysis by Wilde \cite{wild2008}, who also gives new proofs. Zemel \cite{zeme2019} provides an in-depth combinatorial interpretation for those binomial building blocks that appear in the closed formula he proved for the higher derivatives of $y$; the same author has also achieved a generalization to several variables \cite{zeme2022}.
\section{Preliminaries}
The procedure described in the following for calculating the higher derivatives of an implicit function starts from the problem as formulated by Comtet in \cite[p.\,152--153]{comt1974}. There, for a function $f$ given as a formal power series \[
f(x,y)=\sum_{m,n\geq0}f_{m,n}\frac{x^m y^n}{m!n!} \] (with coefficients $f_{m,n}$ from a fixed commutative field of characteristic zero) Comtet poses the somewhat modified (but equivalent) task of finding a formal power series $y=y(x)=\sum_{n\geq1}y_{n}\frac{x^n}{n!}$ such that \mbox{$f(x,y)=0$.} From this results immediately a representation of the $k$-th derivatives $D^{k}(y)$ $(k=1,2,3,\ldots)$ as \[
D^k(y)=y_k+\sum_{n\geq1}y_{k+n}\frac{x^n}{n!}. \] In order to be able to calculate $y_n=D^n(y)(0)$, we assume $f_{0,0}=0$ and $f_{0,1}\neq0$. Then, by writing
\begin{equation*}
f(x,y)=\sum_{n\geq0}\varphi_{n}\frac{y^n}{n!}, \end{equation*}
where $\varphi_n=\varphi_{n}(x)=\sum_{m\geq0}f_{m,n}\frac{x^m}{m!}$, we see that $f(x,y)=0$ is equivalent to
\begin{equation}\label{eq1}
g(y):=\sum_{n\geq1}\varphi_{n}\frac{y^n}{n!}=-\varphi_0. \end{equation}
The formal power series $g$ is compositionally invertible, since $g(0)=0$ and by assumption $g'(0)=\varphi_1=f_{0,1}+x\cdot\sum(\ldots)\neq0$. Let $\inv{g}$ denote the (unique) inverse of $g$. Then, the implicit function $y$ is obtained from \eqref{eq1} in the form
\begin{equation}\label{eq2}
y=y(x)=\inv{g}(-\varphi_0(x)). \end{equation}
Comtet \cite{comt1974} evaluates this expression using Lagrange's inversion formula and determines the coefficients $y_n$ by collecting the terms in $x^n/n!$ that occur in the process. But this only in principle! In fact, only some few \textit{ad hoc} calculations are performed that yield explicit representations for $y_1,y_2,y_3,y_4$ (see the table on p.\,153). Of course, this does not tell us what the general coefficient $y_n$ looks like.
In the following, we show how the concepts developed in \cite{schr2015,schr2021a, schr2021b} provide a complete insight into the structure of $y_n$ solely as a function of the coefficients of $f(x,y)$. This is done in two reduction steps.
\section{The first reduction step}
In the first step, we determine the $n$-th Taylor coefficient $\inv{g}_n$ of $\inv{g}$:
\begin{equation}\label{eq3}
\inv{g}_n=\inv{g}_n(x):=\left[\frac{y^n}{n!}\right]\inv{g}(y)=A_{n,1}(\varphi_1(x),\ldots,\varphi_n(x)), \end{equation}
where $A_{n,1}$ is the first member of the double-indexed family $A_{n,k}$ of multivariable Stirling polynomials in the indeterminates $X_{1}^{-1},X_1,\ldots,X_{n-k+1}$ with $0\leq k\leq n$; see \cite[Eq.\,(7.2)]{schr2015}. A fundamental (and even characteristic) property of these polynomials is their inverse relationship to the partial Bell polynomials $B_{n,k}$ \cite[Thm.\,5.1]{schr2015}, which states that $\sum_{j=k}^{n}A_{n,j}B_{j,k}=\kronecker{n}{k}$, where $\kronecker{n}{n}=1$, $\kronecker{n}{k}=0$, if $n\neq k$ (Kronecker's symbol). For further information, the reader is referred to the monograph \cite{schr2021b}.
\begin{rem} In \cite{schr2015,schr2021a,schr2021b}, the collective term `Stirling polynomials of the first and second kind' (in several indeterminates) was proposed for $A_{n,k}$ and $B_{n,k}$ because the associated coefficient sums $A_{n,k}(1,\ldots,1)$ and $B_{n,k}(1,\ldots,1)$ turn out to be just the (signed) Stirling numbers of the first and the Stirling numbers of the second kind, respectively. \end{rem}
For our purposes we need the following explicit representation of $A_{n,1}$ as a linear combination of monomial terms \cite[Cor.\,7.2]{schr2015}:
\begin{equation}\label{eq4}
A_{n,1}\mspace{-2mu}=\mspace{-2mu}X_1^{-(2n-1)}\mspace{-20mu}\sum_{\ptsind{2n-2}{n-1}}\mspace{-2mu}\frac{(-1)^{n-1-r_1}(2n-2-r_1)!}{r_2!\dotsm r_n!(2!)^{r_2}\dotsm(n!)^{r_n}}X_1^{r_1}X_2^{r_2}\dotsm X_n^{r_n}. \end{equation}
The sum has to be taken over the set $\ptsind{2n-2}{n-1}$ of all partitions of $2n-2$ elements into $n-1$ non-empty blocks, that is, of all sequences $r_1,r_2,\ldots,r_{n}$ of non-negative integers such that $r_1+r_2+\cdots+r_n=n-1$ and $r_1+2r_2+\cdots+nr_n=2n-2$.
From equations \eqref{eq2} and \eqref{eq3} we now get
\begin{align}
\label{eq5}
y(x)&=\sum_{k\geq1}\inv{g}_{k}\frac{(-\varphi_0(x))^k}{k!}\\
&=\sum_{k\geq1}(-1)^{k}A_{k,1}(\varphi_1(x),\ldots,\varphi_k(x))\frac{\varphi_0(x)^k}{k!}.\notag \end{align}
Using a well-known property of the partial Bell polynomials (see, for instance, \cite[p.\,133]{comt1974}) and observing that $D^j(\varphi_0)(0)=f_{j,0}$ we have
\begin{equation}\label{eq6}
\frac{\varphi_0(x)^k}{k!}=\sum_{n\geq k}B_{n,k}(f_{1,0},\ldots,f_{n-k+1,0})\frac{x^n}{n!}, \end{equation}
and thus from \eqref{eq3} and \eqref{eq5}
\begin{equation}\label{eq7}
y(x)=\sum_{k\geq1}\sum_{n\geq k}(-1)^{k}\inv{g}_{k}(x)B_{n,k}(f_{1,0},\ldots,f_{n-k+1,0})\frac{x^n}{n!}, \end{equation}
where $\inv{g}_k(x)=A_{k,1}(\varphi_1(x),\ldots,\varphi_k(x))$ is well-defined as a formal power series because of $\varphi_1\neq0$. Of course, the term $\inv{g}_k(x)$ hides most of the remaining complexity, which is why we do the following power series `ansatz' in a purely formal way for now: \[
A_{k,1}(\varphi_1(x),\ldots,\varphi_k(x))=\sum_{j\geq0}a_{k,j}\frac{x^j}{j!}. \] With this we obtain from \eqref{eq7}
\begin{align*}
y(x)&=\sum_{n,j\geq0}\sum_{k\geq1}(-1)^{k}a_{k,j}B_{n,k}(f_{1,0},\ldots,f_{n-k+1,0})\frac{x^{n+j}}{n!j!}\\
&=\sum_{m\geq n\geq 0}\binom{m}{n}\left\{\sum_{k\geq1}(-1)^{k}a_{k,m-n}B_{n,k}(f_{1,0},\ldots,f_{n-k+1,0})\right\}\frac{x^m}{m!}. \end{align*}
Since the coefficient of $x^m/m!$ is nonzero if and only if $m\geq n\geq k$, we get
\begin{equation}\label{eq8}
y_m=\sum_{n=1}^{m}\binom{m}{n}\left\{\sum_{k=1}^n(-1)^{k}a_{k,m-n}B_{n,k}(f_{1,0},\ldots,f_{n-k+1,0})\right\}. \end{equation}
This preliminary result is already suitable to calculate the first coefficients.
\begin{exms} Let us consider the cases $m=1$ and $m=2$. --- It follows from \eqref{eq8} $y_1=(-1)^{1}a_{1,0}B_{1,1}(f_{1,0})=-a_{1,0}f_{1,0}$. Observing $A_{1,1}=X_{1}^{-1}$ we thus obtain $a_{1,0}=\inv{g}_1(0)=A_{1,1}(\varphi_1)(0)=\varphi_1(0)^{-1}=f_{0,1}^{-1}$ and hence $y_1=-f_{1,0}f_{0,1}^{-1}$ which corresponds to the familiar identity $y'(x)=-f_{x}f_{y}^{-1}$.
Already for $m=2$ the computational effort increases noticeably. We have
\begin{align*}
y_2&=\binom{2}{1}\sum_{k=1}^1(-1)^{k}a_{k,1}B_{1,k}(f_{1,0},\ldots,f_{2-k,0})\\
&+\binom{2}{2}\sum_{k=1}^2(-1)^{k}a_{k,0}B_{2,k}(f_{1,0},\ldots,f_{3-k,0})\\
&=-2a_{1,1}B_{1,1}(f_{1,0})-a_{1,0}B_{2,1}(f_{1,0},f_{2,0})+a_{2,0}B_{2,2}(f_{1,0}). \intertext{Now recall $B_{2,1}=X_2$, $B_{2,2}=X_1^{2}$, $A_{2,1}=-X_1^{-3}X_2$, and observe that $\inv{g}'_1(x)=-\varphi'_1(x)\varphi_1(x)^{-2}$. This yields }
y_2&=-2\inv{g}'_1(0)f_{1,0}-f_{0,1}^{-1}f_{2,0}+\inv{g}_2(0)f_{1,0}^{2}\\
&=2\frac{\varphi'_1(0)}{\varphi_1(0)^2}f_{1,0}-f_{0,1}^{-1}f_{2,0}-\frac{\varphi_2(0)}{\varphi_1(0)^3}f_{1,0}^{2}\\
&=2f_{0,1}^{-2}f_{1,0}f_{1,1}-f_{0,1}^{-1}f_{2,0}-f_{0,1}^{-3}f_{0,2}f_{1,0}^2, \end{align*}
which of course also follows immediately from $y''(x)=2f_{y}^{-2}f_{x}f_{xy}-f_{y}^{-1}f_{xx}-f_{y}^{-3}f_{yy}f_{x}^2$ if we take $x=0$. \end{exms}
\begin{rem} The number of distinct monomials in $D^n(y)$ grows rapidly; it is 9 for $y_3$, 24 for $y_4$, and 91159 for $y_{15}$. Comtet \cite[p.\,175]{comt1974} established a generating function for this sequence and gave a table with some of its values. See also Comtet/Fiolet \cite{cofi1974} and the correction made by Wilde \cite{wild2008}. \end{rem}
\section{The second reduction step}
In the next and final step, we will show how the general Taylor coefficient $a_{k,l}$ of $\inv{g}_k(x)$ which appears in equation \eqref{eq8} can be represented by a polynomial expression depending solely on the $f_{m,n}$. Since the derivative $D^l$ of $l$-th order is a linear operator for any integer $l\geq0$, we obtain from equation \eqref{eq4}
\begin{align}
\label{eq9}
a_{k,l}&=D^l(a_k)(0)=D^l(A_{k,1}(\varphi_1,\ldots,\varphi_k))(0)\\
&=\sum_{\ptsind{2k-2}{k-1}}\mspace{-2mu}\frac{(-1)^{k-1-r_1}(2k-2-r_1)!}{r_2!\dotsm r_k!(2!)^{r_2}\dotsm(k!)^{r_k}}D^l(\varphi_1^{s_1}\varphi_2^{r_2}\dotsm \varphi_k^{r_k})(0),\notag \end{align} where $s_1:=r_1-2k+1$. We evaluate the term $D^l(\ldots)$ by means of the general Leibniz rule as follows: \begin{align}
\label{eq10}
D^l(\varphi_1^{s_1}\varphi_2^{r_2}\dotsm \varphi_k^{r_k})=\sum_{\substack{j_1+j_2+\cdots+j_k=l \\
j_1,j_2,\ldots,j_k\geq0}}\,
\frac{l!}{j_1!j_2!\cdots j_k!}\;D^{j_1}(\varphi_1^{s_1})D^{j_2}(\varphi_2^{r_2})\cdots D^{j_k}(\varphi_k^{r_k}). \end{align}
Therefore, only the expressions like $D^{j_{\nu}}(\varphi_{\nu}^{r_{\nu}})(0)$ remain to be reduced. Recall that \eqref{eq6} describes the fact that $D^n(\varphi_0^k)(0)=k!B_{n,k}(D^1(\varphi_0)(0),\ldots,D^{n-k+1}(\varphi_0)(0))$ is the $n$-th Taylor coefficient of $\varphi_0(x)^k$. Accordingly
\begin{align}\label{eq11}
D^{j_{\nu}}(\varphi_{\nu}^{r_{\nu}})(0)&=r_{\nu}!B_{j_\nu,r_\nu}(D^1(\varphi_{\nu})(0),\ldots,D^{j_\nu-r_\nu+1}(\varphi_{\nu})(0))\\
&=r_{\nu}!B_{j_\nu,r_\nu}(f_{1,\nu},f_{2,\nu},\ldots,f_{j_\nu-r_\nu+1,\nu}).\notag \end{align}
Finally, we obtain an explicit formula for the coefficients $a_{k,m-n}$ in \eqref{eq8} by putting $l=m-n$ in \eqref{eq9} and \eqref{eq10} and combining this with \eqref{eq11}:
\begin{align}\label{eq12} a_{k,m-n}=&\sum_{\ptsind{2k-2}{k-1}}\mspace{-2mu}\frac{(-1)^{k-1-r_1}(2k-2-r_1)!}{r_2!\dotsm r_k!(2!)^{r_2}\dotsm(k!)^{r_k}}\Biggr\lbrace\sum_{j_1+\cdots+j_k=m-n}\frac{(m-n)!}{j_1!\cdots j_k!}\\
&\quad\times (r_1-2k+1)!B_{j_1,r_1-2k+1}(f_{1,1},f_{2,1},\ldots,f_{j_1-r_1+2k,1})\notag\\
&\quad\times \prod_{\nu=2}^{k}r_\nu!B_{j_\nu,r_\nu}(f_{1,\nu},f_{2,\nu},\ldots,f_{j_\nu-r_\nu+1,\nu})\Biggr\rbrace.\notag \end{align}
In summary, we have reached the following result:
\begin{thm} Under the assumptions made in Section 1 for the function $f(x,y)$ and its Taylor coefficients $f_{m,n}$, the implicit function $y=y(x)$ with $f(x,y)=0$ can be represented as a formal Taylor series whose coefficients are given by the equations \eqref{eq8} and \eqref{eq12} solely as a function of the $f_{m,n}$. \end{thm}
\nocite*
\end{document} |
\begin{document}
\title[Global Lipschitz stability for polygonal inclusions]{Global Lipschitz stability estimates for polygonal conductivity inclusions from boundary measurements }
\author[E.~Beretta et al.]{Elena~Beretta} \address{Dipartimento di Matematica ``Brioschi'', Politecnico di Milano \& New York University Abu Dhabi} \email{eb147@nyu.edu} \author[]{Elisa~Francini} \address{Dipartimento di Matematica e Informatica ``U. Dini'', Universit\`{a} di Firenze} \email{elisa.francini@unifi.it}
\keywords{polygonal inclusions, conductivity equation, stability, inverse problems}
\subjclass[2010]{35R30, 35J25}
\begin{abstract} We derive Lipschitz stability estimates for the Hausdorff distance of polygonal conductivity inclusions in terms of the Dirichlet-to-Neumann map. \end{abstract}
\maketitle
\section{Introduction}
In this paper we establish Lipschitz stability estimates for a certain class of discontinuous conductivities $\gamma$ in terms of the Dirichlet-to-Neumann map.\\ More precisely, we consider the following boundary value problem \begin{equation}\label{conductivity}
\left\{\begin{array}{rcl}
\textrm{ div }((1+(k-1)\chi_{\mathcal{P}})\nabla u) & = & 0\mbox{ in }\Omega\subset\mathbb{R}^2, \\
u & = & \phi \mbox{ on }\partial\Omega,
\end{array}
\right. \end{equation} where $\phi\in H^{1/2}\left(\partial\Omega\right)$, $\mathcal{P}$ is a polygonal inclusion strictly contained in a planar, bounded domain $\Omega$ and $k\neq 1$ is a given, positive constant. \\Our goal is to determine the polygon $\mathcal{P}$ from the knowledge of the Dirichlet-to-Neumann map \begin{equation*}
\Lambda_{\gamma}: H^{1/2}\left(\partial\Omega\right)\to H^{-1/2}\left(\partial\Omega\right) \end{equation*} with \begin{equation*}
\Lambda_{\gamma}(f):=\gamma{\frac{\partial u}{\partial \nu}}\in H^{-1/2}\left(\partial\Omega\right). \end{equation*} This class of conductivity inclusions is quite common in applications, like for example in geophysics exploration, where the medium (the earth) under inspection contains heterogeneities in the form of rough bounded subregions (for example subsurface salt bodies) with different conductivity properties \cite{ZK}.
Moreover, polygonal inclusions represent a class in which Lipschitz stable reconstruction from boundary data can be expected \cite{BdHFV}. In fact, it is well known that the determination of an arbitrary (smooth) conductivity inclusion from the Dirichlet-to-Neumann map is exponentially ill-posed \cite{DiCR}. On the other hand, restricting the class of admissible inclusions to a compact subset of a finite dimensional space regularizes the inverse problem and allows to establish Lipschitz stability estimates and stable reconstructions (see \cite{BV},\cite{BMPS}, \cite{AS}, \cite{H}). In order to show our main result we follow a similar approach as the one in \cite{BdHFV} and take advantage of a recent result obtained by the authors in \cite{BFV17} where they prove Fr\'echet differentiability of the Dirichlet-to-Neumann map with respect to affine movements of vertices of polygons and where they establish an explicit representation formula for the derivative.\\ We would like to mention that our result relies on the knowledge of infinitely many measurements though one expects that finitely many measurements should be enough to determine a polygonal inclusion. In fact, in \cite{BFS} the authors show that if the inclusion is a convex polyhedron, then one suitably assigned current at the boundary of the domain $\Omega$ and the corresponding measured boundary potential are enough to uniquely determine the inclusion (see also \cite{S} for the unique determination of an arbitrary polygon from two appropriately chosen pairs of boundary currents and potentials and also \cite{KY} where a convex polygon is uniquely determined in the case of variable conductivities). Unfortunately in the aforementioned papers, the choice of the current fields is quite special and the proof of uniqueness is not constructive. In fact, to our knowledge, no stability result for polygons from few boundary measurements has been derived except for the local stability result obtained in \cite{BFI}. On the other hand, in several applications, like the geophysical one, many measurements are at disposal justifying the use of the full Dirichlet-to-Neumann map, \cite{BCFLM}.\\ The paper is organized as follows: in Section 2 we state our main assumptions and the main stability result. Section 3 is devoted to the proof of our main result and finally, Section 4 is devoted to concluding remarks about the results and possible extensions.
\section{Assumptions and main result}
Let $\Omega\subset{\mathbb{R}}^2$ be a bounded open set with $diam(\Omega)\leq L$. We denote either by $x=(x_1,x_2)$ and by $P$ a point in ${\mathbb{R}}^2$. We assume that $\partial\Omega$ is of Lipschitz class with constants $r_0$ and $K_0>1$ that means that for every point $P$ in $\partial\Omega$ there exists a coordinate system such that $P=0$ and \[\Omega\cap \left([-r_0,r_0]\times[-K_0r_0,K_0r_0]\right)=\left\{(x_1,x_2)\,:\, x_1\in[-r_0,r_0], x_2>\phi(x_1)\right\}\] for a Lipschitz continuous function $\phi$ with Lipschitz norm smaller than $K_0$.
We denote by $dist(\cdot,\cdot)$ the euclidian distance between points or subsets in ${\mathbb{R}}^2$. Later on we will also define the Haussdorff distance $d_H(\cdot,\cdot)$.
Let ${\mathcal{A}}$ the set of closed, simply connected, simple polygons $\mathcal{P}\subset \Omega$ such that: \begin{equation}\label{lati} \mathcal{P}\mbox{ has at most }N_0\mbox{ sides each one with length greater than }d_0; \end{equation} \begin{equation}\label{lip}\partial\mathcal{P}\mbox{ is of Lipschitz class with constants }r_0\mbox{ and }K_0,\end{equation} there exists a constant $\beta_0\in (0,\pi/2]$ such that the angle $\beta$ in each vertex of $\mathcal{P}$ satisfies the conditions \begin{equation}\label{angoli}
\beta_0\leq\beta\leq 2\pi-\beta_0\mbox{ and } |\beta-\pi|\geq\beta_0, \end{equation} and \begin{equation}\label{distanza} dist(\mathcal{P},\partial\Omega)\geq d_0. \end{equation}
Notice that we do not assume convexity of the polygon.
Let us consider the problem \begin{equation*}
\left\{\begin{array}{rcl}
\text{\normalfont div}(\gamma\nabla u) & = & 0\mbox{ in }\Omega, \\
u&=&\phi\mbox{ on }\partial\Omega,\\
\end{array}
\right. \end{equation*} where $\phi\in H^{1/2}(\partial\Omega)$ and \begin{equation}\label{gamma} \gamma=1+(k-1)\chi_{\mathcal{P}}, \end{equation} for a given $k>0$, $k\neq 1$ and for $\mathcal{P}\in{\mathcal{A}}$. The constants $k$, $r_0$, $K_0$, $L$, $d_0$, $N_0$ and $\beta_0$ will be referred to as the \textit{a priori data}.\\ In the sequel we will introduce a number of constants depending only on the \textit{a priori data} that we will always denote by $C$. The values of these constants might differ from one line to the other.
Let us consider the Dirichlet to Neumann map \[\begin{array}{rcl}\Lambda_\gamma: H^{1/2}(\partial\Omega)&\to& H^{-1/2}(\partial\Omega)\\
\phi&\to&\gamma{\frac{\partial u}{\partial n}}_{|_{\partial\Omega}},\end{array}\] whose norm in the space of linear operators $\mathcal{L}(H^{1/2}(\partial\Omega), H^{-1/2}(\partial\Omega))$ is defined by
\[\|\Lambda_\gamma\|_*=\sup\left\{\|\Lambda_\gamma\phi\|_{H^{-1/2}(\partial\Omega)}/\|\phi\|_{H^{1/2}(\partial\Omega)}\,:\,\phi\neq 0\right\}. \] \begin{teo}\label{mainteo} Let $\mathcal{P}^0,\mathcal{P}^1\in{\mathcal{A}}$ and let \[\gamma_0=1+(k-1)\chi_{\mathcal{P}^0}\mbox{ and }\gamma_1=1+(k-1)\chi_{\mathcal{P}^1}.\] There exist $\varepsilon_0$ and $C$ depending only on the a priori data such that, if
\[\|\Lambda_{\gamma_0}-\Lambda_{\gamma_1}\|_*\leq \varepsilon_0,\] then $\mathcal{P}^0$ and $\mathcal{P}^1$ have the same number $N$ of vertices $\left\{P_j^0\right\}_{j=1}^N$ and $\left\{P_j^1\right\}_{j=1}^N$ respectively. Moreover,
\begin{equation}\label{stab1} d_{H}\left( \partial \mathcal{P}^{0},\partial \mathcal{P}^{1}\right)\leq C
\|\Lambda_{\gamma_0}-\Lambda_{\gamma_1}\|_*\quad \mbox{ for every }j=1,\ldots,N. \end{equation} \end{teo} \vskip 8truemm \begin{rem} Observe that our stability estimate is a global one. In fact, if $\left \Vert \Lambda _{\gamma_0}-\Lambda _{\gamma_1}\right \Vert _{\ast }>\varepsilon _{0}$, since the following trivial inequality holds \[ d_{H}\left( \partial \mathcal{P}^{0},\partial \mathcal{P}^{1}\right) \leq 2L, \] we have trivially \begin{equation} d_{H}\left( \partial \mathcal{P}^{0},\partial \mathcal{P}^{1}\right) \leq 2L\leq 2L\frac{\left \Vert \Lambda _{0}-\Lambda _{1}\right \Vert _{\ast }}{ \varepsilon _{0}} \label{stab2} \end{equation} Therefore, in \textit{any case, }by\textit{\ (\ref{stab1}), (\ref{stab2})} we obtain the global estimate \[ d_{H}\left( \partial \mathcal{P}^{0},\partial \mathcal{P}^{1}\right) \leq \left( C+\frac{2L}{\varepsilon _{0}}\right) \left \Vert \Lambda _{0}-\Lambda _{1}\right \Vert _{\ast }. \]
\end{rem}
\section{Proof of the main result}
The proof of Theorem \ref{mainteo} follows partially the strategy used in \cite{BdHFV} in the case of the Helmholtz equation.
The first step of the proof is a rough stability estimate for $\|\gamma_0-\gamma_1\|_{L^2(\Omega)}$ which is stated in Section \ref{srozza} and which follows from a result by Clop, Faraco and Ruiz \cite{CFR}. Then, in section \ref{sgeo}, we show a rough stability estimate for the Hausdorff distance of the polygons. We also show that if $\|\Lambda_{\gamma_0}-\Lambda_{\gamma_1}\|_*$ is small enough, then the two polygons have the same number of vertices and that the distance from vertices of $\mathcal{P}^0$ and vertices of $\mathcal{P}^1$ is small. For this reason it is possible to define a coefficient $\gamma_t$ that goes smoothly from $\gamma_0$ to $\gamma_1$ and the corresponding Dirichlet to Neumann map. We prove that the Dirichlet to Neumann map is differentiable (section \ref{sFdiff}), its derivative is continuous (section \ref{sderivcont}) and bounded from below (section \ref{sboundbelow}). These results finally give the Lipschitz stability estimate of Theorem \ref{mainteo}.
\subsection{A logarithmic stability estimate}\label{srozza}
As in \cite{BdHFV}, we can show that, thanks to Lemma 2.2 in \cite{MP} there exists a constant $\Gamma_0$, depending only on the a priori data, such that, for $i=0,1$, \begin{equation}\label{hs}
\|\gamma_i\|_{H^s(\Omega)}\leq \Gamma_0\quad\forall s\in(0,1/2). \end{equation} Due to this regularity of the coefficients, we can apply Theorem 1.1 in \cite{CFR} and obtain the following logarithmic stability estimate: \begin{prop}\label{strozza} There exist $\alpha<1/2$ and $C>1$, depending only on the a priori data, such that
\begin{equation}\label{stimarozzal2}\|\gamma_1-\gamma_0\|_{L^2(\Omega)}\leq C\left|\log\|\Lambda_{\gamma_0}-\Lambda_{\gamma_1}\|_*\right|^{-\alpha^2/C},\end{equation}
if $\|\Lambda_{\gamma_0}-\Lambda_{\gamma_1}\|_*<1/2$. \end{prop}
\subsection{A logarithmic stability estimate on distance of vertices}\label{sgeo} In this section we want to show that, due to the assumptions on polygons in ${\mathcal{A}}$, estimate \eqref{stimarozzal2} yields an estimate on the Hausdorff distance $d_H(\partial\mathcal{P}^0,\partial\mathcal{P}^1)$ and, as a consequence, on the distance of the vertices of the polygons.
It is immediate to get from \eqref{stimarozzal2} that \begin{equation}\label{diffsimm}
\left|\mathcal{P}^0\Delta\mathcal{P}^1\right|\leq \frac{C}{|k-1|}\left|\log\|\Lambda_{\gamma_0}-\Lambda_{\gamma_1}\|_*\right|^{-\alpha^2/C}\end{equation} Now, we show that \eqref{diffsimm} implies an estimate on the Hausdorff distance of the boundaries of the polygons.
Let us recall the definition of the Hausdorff distance between two sets $A$ and $B$: \[d_H(A,B)=max\{\sup_{x\in A}\inf_{y\in B}dist(x,y),\sup_{y\in B}\inf_{x\in A}dist(y,x)\}\]
The following result holds:
\begin{lm}\label{dadiffahaus} Given two polygons $\mathcal{P}^0$ and $\mathcal{P}^1$ in ${\mathcal{A}}$, we have
\[d_H(\partial\mathcal{P}^0,\partial\mathcal{P}^1)\leq C\sqrt{\left|\mathcal{P}^0\Delta\mathcal{P}^1\right|}\] where $C$ depends only on the a priori data. \end{lm} \begin{proof} Let $d=d_H(\partial\mathcal{P}^0,\partial\mathcal{P}^1)$. Assume $d>0$ (otherwise the thesis is trivial) and let $x_0\in\partial \mathcal{P}^0$ such that $dist(x_0,\partial\mathcal{P}^1)=d$. Then, \[B_d(x_0)\subset {\mathbb{R}}^2\setminus\partial \mathcal{P}^1.\] There are two possibilities:\\ (i) $B_d(x_0)\subset {\mathbb{R}}^2\setminus\mathcal{P}^1$ or \\ (ii) $B_d(x_0)\subset \mathcal{P}^1$.
In case (i), $B_d(x_0)\cap \mathcal{P}^0\subset \mathcal{P}^0\setminus\mathcal{P}^1$. The definition of ${\mathcal{A}}$ implies that, if $d\leq d_0$, there is a constant $C>1$ depending only on the a priori data such that
\[\left|B_d(x_0)\cap \mathcal{P}^0\right|\geq \frac{d^2}{C^2}.\] If $d\geq d_0$ we trivially have
\[\left|B_d(x_0)\cap \mathcal{P}^0\right|\geq \left|B_{d_0}(x_0)\cap \mathcal{P}^0\right|\geq \frac{d_0^2}{C^2},\] hence, in any case, for \[f(d)=\left\{\begin{array}{rl}d^2/C^2&\mbox{ if }d<d_0\\ d_0^2/C^2&\mbox{ if }d\geq d_0\end{array}\right.\] we have
\[f(d)\leq \left|B_d(x_0)\cap \mathcal{P}^0\right|\leq \left|\mathcal{P}^0\Delta\mathcal{P}^1\right| .\]
Now, if $\left|\mathcal{P}^0\Delta\mathcal{P}^1\right|<\frac{d_0^2}{C^2}$, then $f(d)=\frac{d^2}{C^2}\leq \left|\mathcal{P}^0\Delta\mathcal{P}^1\right|$ gives $d\leq C\sqrt{\left|\mathcal{P}^0\Delta\mathcal{P}^1\right|}$. On the other hand, if $\left|\mathcal{P}^0\Delta\mathcal{P}^1\right|\geq \frac{d_0^2}{C^2}$ we have
\[\frac{d^2}{C^2}\leq \frac{L^2}{C^2}\leq \frac{L^2}{C^2}\frac{\left|\mathcal{P}^0\Delta\mathcal{P}^1\right|}{d_0^2/C^2}\]
that gives $d\leq \frac{LC}{d_0}\sqrt{\left|\mathcal{P}^0\Delta\mathcal{P}^1\right|}$.
In case (ii), $B_d(x_0)\subset \mathcal{P}^1$, hence \[B_d(x_0)\setminus \mathcal{P}^0\subset \mathcal{P}^1\setminus\mathcal{P}^0\subset\mathcal{P}^1\Delta\mathcal{P}^0.\] Proceeding as above we have
\[f(d)\leq \left|B_d(x_0)\setminus \mathcal{P}^0\right|\leq \left|\mathcal{P}^0\Delta\mathcal{P}^1\right|\] and the same conclusion follows. \end{proof}
\begin{prop}\label{propvertici} Given the set of polygons ${\mathcal{A}}$ there exist $\delta_0$ and $C$ depending only on the a priori data such that, if for some $\mathcal{P}^0$, $\mathcal{P}^1\in {\mathcal{A}}$ we have \[d_H(\partial\mathcal{P}^0,\partial\mathcal{P}^1)\leq \delta_0,\] then
$\mathcal{P}^0$ and $\mathcal{P}^1$ have the same number $N$ of vertices $\{P^0_i\}_{i=1}^N$ and $\{P^1_i\}_{i=1}^N$, respectively, that can be ordered in such a way that \[dist(P^0_i,P^1_i)\leq Cd_H(\partial\mathcal{P}^0,\partial\mathcal{P}^1) \mbox{ for every }i=1,\ldots,N.\] \end{prop}
\begin{proof} Let us denote by \[\delta=d_H(\partial\mathcal{P}^0,\partial\mathcal{P}^1).\]
Assume $\mathcal{P}^0$ has $N$ vertices and that $\mathcal{P}^1$ has $M$ vertices. We now will show that for any vertex $P^0_i\in \partial\mathcal{P}^0$ there exists a vertex $P^1_j\in\partial\mathcal{P}^1$ such that $dist(P^0_i,P^1_j)<C\delta$. By assumption \eqref{lati} this implies that $N\leq M$. Interchanging the role of $\mathcal{P}^0$ and $\mathcal{P}^1$ we get that $M\leq N$ which implies that $M=N$.
Let $P$ be one of the vertices in $\partial\mathcal{P}^0$ and let us consider the side $l^\prime$ of $\partial\mathcal{P}^1$ that is close to $P$. Let us set the coordinate system with origin in the midpoint of $l^\prime$ and let $(\pm l/2,0)$ be the endpoint of $l^\prime$.
By definition of the Hausdorff distance, $P\in \mathcal{U}_\delta=\left\{x\in{\mathbb{R}}^2\,:\,dist(x,l^\prime)\leq\delta\right\}$.
Now we want to show that, due to the assumptions on ${\mathcal{A}}$, for sufficiently small $\delta$ there is a constant $C$ such that the distance between $P$ and one of the endpoints of $l^\prime$ is smaller than $C\delta$. The reason is that if $P$ is too far from the endpoints, assumption \eqref{angoli} on $\mathcal{P}^0$ cannot be true.
Let us choose $\delta$ small enough to have: \begin{equation}\label{cond1}
\delta<K_0 r_0 \end{equation} (this guarantees that the $\delta$-neighborhood of each side of $\mathcal{P}^1$ does not intersect the $\delta$-neighborhood of a non adjacent side), and \begin{equation}\label{cond2}
\delta<\frac{d_0\sin\beta_0}{16}. \end{equation}
Notice that, by assumption \eqref{angoli} and by \eqref{cond1}, the rectangle \[R=\left[-\frac{l}{2}+\frac{2\delta}{\sin\beta_0},\frac{l}{2}-\frac{2\delta}{\sin\beta_0}\right]\times[-\delta,\delta]\] does not intersect the $\delta$-neighborhood of any other side of $\mathcal{P}^1$.
Let us now show that $P$ cannot be contained in a slightly smaller rectangle \[R^\prime=\left[-\frac{l}{2}+\lambda,\frac{l}{2}-\lambda\right]\times[-\delta,\delta],\] where $\lambda=\frac{6\delta}{\sin\beta_0}$.
Let us assume by contradiction that $P\in R^\prime$ and consider the two sides of $\partial\mathcal{P}^0$ with an endpoint at $P$. These sides have length greater than $d_0$, hence they intersect $\partial B_{\lambda/2}(P)$ in two points $Q_1$ and $Q_2$ in $R$ (because $\lambda/2<\lambda-\frac{2\delta}{\sin\beta_0}$).
Since $\lambda/2>2\delta$ the intersection $\partial B_{\lambda/2}(P)\cap R$ is the union of two disjoint arcs. We estimate the angle of $\mathcal{P}^0$ at $P$ in the two alternative cases:\\ (i) $Q_1$ and $Q_2$ are on the same arc or\\ (ii) $Q_1$ and $Q_2$ are on different arcs.
In case (i), the angle at $P$ is smaller than $\arcsin\left(\frac{4\delta}{\lambda}\right)$ (the angle is smaller than $\arcsin\left(\frac{2(\delta-b)}{\lambda}\right)+\arcsin\left(\frac{2(\delta+b)}{\lambda}\right)$, where $b$ is the $y$-coordinate of $P$, that is maximum for $b=\pm\delta$).
In order for \eqref{angoli} to be true we should have \[\arcsin\left(\frac{4\delta}{\lambda}\right)=\arcsin\left(\frac{2}{3}\sin\beta_0\right)\leq \beta_0\] that is not possible for $\beta_0\in (0,\pi/2)$.
In case (ii), the angle differs from $\pi$ at most by $\arcsin\left(\frac{4\delta}{\lambda}\right)$, which is again too small for \eqref{angoli} to be true.
Since neither of cases (1) and (2) can be true, it is not possibile that $P\in R^\prime$, hence, $P\in \mathcal{U}_\delta\setminus R^\prime$ which implies that there is one of the endpoints of $l^\prime$, let us call it $P^\prime$ such that \[dist(P,P^\prime)\leq \delta \sqrt{1+\frac{16}{\sin^2\beta_0}}.\] \end{proof} \begin{prop}\label{strozzavert} Under the same assumptions of Theorem \ref{mainteo}, there exist positive constants $\varepsilon_0$, $\alpha$ and $C>1$, depending only on the a priori data, such that, if
\[\varepsilon:=\|\Lambda_{\gamma_0}-\Lambda_{\gamma_1}\|_*<\varepsilon_0,\] then $\mathcal{P}^0$ and $\mathcal{P}^1$ have the same number $N$ of vertices $\left\{P_j^0\right\}_{j=1}^N$ and $\left\{P_j^1\right\}_{j=1}^N$ respectively. Moreover, the vertices can be order so that \begin{equation}\label{stimarozzavertici}
dist\left(P_j^0,P_j^1\right)\leq \omega(\varepsilon) \mbox{ for every }j=1,\ldots,N, \end{equation}
where $\omega(\varepsilon)=C \left|\log \varepsilon\right|^{-\alpha^2/C}$. \end{prop} \begin{proof}It follows by the combination of Proposition \ref{strozza}, Lemma \ref{dadiffahaus} and Proposition \ref{propvertici}.\end{proof}
\subsection{Definition and differentiability of the function $F$}\label{sFdiff}
Let us denote by $\{P^j_i\}_{i=1}^N$ the vertices of polygon $\mathcal{P}^j$ for $j=0,1$ numbered in such a way that $dist(P^0_i,P^1_i)\leq \omega(\varepsilon)\mbox{ for }i=1,\ldots,N$, for $\omega(\varepsilon)$ as in Proposition \ref{strozzavert} and the segment $P^i_jP^i_{j+1}$ is a side of $\mathcal{P}^i$ for $i=0,1$ and $j=1,\ldots,N$.
Let us consider a deformation from $\mathcal{P}^0$ to $\mathcal{P}^1$: for $t\in[0,1]$ let \[P_i^t=P^0_i+tv_i,\mbox{ where }v_i=P^1_i-P^0_i,\mbox{ for }i=1,\ldots,N\] and denote by $\mathcal{P}^t$ the polygon with vertices $P^t_j$ and sides $P^t_jP^t_{j+1}$.
Let $\gamma_t=1+(k-1)\chi_{\mathcal{P}^t}$ and let $\Lambda_{\gamma_t}$ be the corresponding DtoN map.
As we proved in \cite[Corollary 4.5]{BFV17} the DtoN map $\Lambda_{\gamma_t}$ is differentiable with respect to $t$.
The function \[F(t,\phi,\psi)=<\Lambda_{\gamma_t}(\phi), \psi>,\] for $\phi,\psi\in H^{1/2}(\partial\Omega)$, is a differentiable function from $[0,1]$ to ${\mathbb{R}}$ and we can write explicitly its derivative.
Let $u_t,v_t\in H^1(\Omega)$ be the solutions to \[ \left\{\begin{array}{rcl}
\text{\normalfont div}(\gamma_t\nabla u_t) & = & 0\mbox{ in }\Omega, \\
u_t&=&\phi\mbox{ on }\partial\Omega,\\
\end{array}
\right. \mbox{ and } \left\{\begin{array}{rcl}
\text{\normalfont div}(\gamma_t\nabla v_t) & = & 0\mbox{ in }\Omega, \\
v_t&=&\psi\mbox{ on }\partial\Omega,\\
\end{array}
\right. \] and denote by $u_t^e$ and $v_t^e$ their the restrictions to $\Omega\setminus\mathcal{P}^t$ (and by $u_t^i$ and $v_t^i$ their restrictions to $\mathcal{P}^t$).
Let us fix an orthonormal system $(\tau_t, n_t)$ in such a way that $n_t$ represents almost everywhere the outward unit normal to $\partial \mathcal{P}_t$ and the tangent unit vector $\tau_t$ is oriented counterclockwise. Denote by $M_t$ a $2\times 2$ symmetric matrix valued function defined on $\partial\mathcal{P}_t$ with eigenvalues $1$ and $1/k$ and corresponding eigenvectors $\tau_t$ and $n_t$.
Let $\Phi_t^v$ be a map defined on $\partial\mathcal{P}_t$, affine on each side of the polygon and such that \[\Phi_t^v(P_i^t)=v_i\mbox{ for }i=1,\ldots,N.\]
Then, it was proved in \cite[Corollary 2.2]{BFV17} that, for all $t\in[0,1]$, \[\frac{d}{dt}F(t,\phi,\psi)=(k-1)\int_{\partial \mathcal{P}^t}M_t\nabla u_t^e\nabla v_t^e (\Phi_t^v\cdot n_t). \]
\subsection{Continuity at zero of the derivative of $F$}\label{sderivcont}
\begin{lm}\label{lcontder} There exist constants $C$ and $\beta$, depending only on the a priori data, such that
\begin{equation}\label{contder}\left|\frac{d}{dt}F(t,\phi,\psi)-{\frac{d}{dt}F(t,\phi,\psi)}_{|_{t=0}} \right|\leq C\|\phi\|_{H^{1/2}(\partial\Omega)}\|\psi\|_{H^{1/2}(\partial\Omega)}|v|^{1+\beta}t^\beta.\end{equation} \end{lm} \begin{proof}
This result corresponds to Lemma 4.4 in \cite{BFV17}. The dependence on $|v|$ is obtained by refining estimate (3.5) in \cite[Proposition 3.4]{BFV17} to get
\[\|u_t-u_0\|_{H^1(\Omega)}\leq C\|\phi\|_{H^{1/2}(\partial\Omega)}\left|\mathcal{P}^t\Delta\mathcal{P}^0\right|^\theta\leq C_1\|\phi\|_{H^{1/2}(\partial\Omega)}|v|^\theta t^\theta,\] and by noticing that
\[\left|\Phi_t^v\right|\leq C|v|.\] \end{proof}
\subsection{Bound from below for the derivative of $F$}\label{sboundbelow}
In this section we want to obtain a bound from below for the derivative of $F$ at $t=0$. \begin{prop}\label{p3.3} There exist a constant $m_1>0$, depending only on the a priori data, and a pair of functions $\tilde{\phi}$ and $\tilde{\psi}$ in $H^{1/2}(\partial\Omega)$ such that
\begin{equation}\label{tesibasso}\left|{\frac{d}{dt}F(t,\tilde{\phi},\tilde{\psi})}_{|_{t=0}}\right|\geq m_1 |v| \|\tilde{\phi}\|_{H^{1/2}(\partial\Omega)}\|\tilde{\psi}\|_{H^{1/2}(\partial\Omega)}. \end{equation} \end{prop} \begin{proof} Let us first normalize the length of vector $v$ and introduce \[H(\phi,\psi)=\int_{\partial\mathcal{P}_0} M_o\nabla u_0^e\nabla v_0^e\tilde{\Phi}_0^{v}\cdot n_0,\] where
\[\tilde{\Phi}_0^{v}=\Phi_0^{v/|v|}.\]
By linearity, we have that ${\frac{d}{dt}F(t,\phi,\psi)}_{|_{t=0}}=|v|H(\phi,\psi)$.
Let $m_0=\|H\|_*=\sup\left\{\frac{H(\phi,\psi)}{\|\phi\|_{H^{1/2}(\partial\Omega)}\|\psi\|_{H^{1/2}(\partial\Omega)}}\,:\,\phi,\psi\neq 0\right\}$ be the operator norm of $H$, so that
\begin{equation}\label{normaH}\left|H(\phi,\psi)\right|\leq m_0\|\phi\|_{H^{1/2}(\partial\Omega)}\|\psi\|_{H^{1/2}(\partial\Omega)}\mbox{ for every }\phi,\psi\in H^{1/2}(\partial\Omega).\end{equation}
Let $\Sigma$ be an open non empty subset of $\partial\Omega$ and let us extend $\Omega$ to a open domain $\Omega_0=\Omega\cup D_0$ that has Lipschitz boundary with constants $r_0/3$ and $K_0$ and such that $\Sigma$ is contained in $\Omega_0$ (see \cite{AV} for a detailed construction). Let us extend $\gamma_0$ by $1$ in $D_0$ (and still denote it by $\gamma_0$).
We denote by $G_0(x,y)$ the Green function corresponding to the operator $\text{\normalfont div}(\gamma_0 \nabla\cdot)$ and to the domain $\Omega_0$. The Green function $G_0(x,y)$ behaves like the fundamental solution of the Laplace equation $\Gamma(x,y)$ for points that are far from the polygon. For points close to the sides of the polygon but far from its vertices, the asymptotic behaviour of the Green function has been described in \cite[Theorem 4.2]{AV} or \cite[Proposition 3.4]{BF11}: Let $y_r=Q+rn(y_0)$, where $Q$ is a point on $\partial\mathcal{P}^0$ whose distance from the vertices of the polygons is greater than $r_0/4$ and $n(y_0)$ is the unit outer normal to $\partial\mathcal{P}^0$. Then, for small $r$, \begin{equation}\label{stimagreen}
\left\|G_0(\cdot,y_r)-\frac{2}{k+1}\Gamma(\cdot,y_r)\right\|_{H^1(\Omega_0)}\leq C, \end{equation} where $C$ depends only on the a priori data.
Let us take $u_0=G_0(\cdot,y)$ and $v_0=G_0(\cdot,z)$ for $y,z\in K$, where $K$ is a compact subset of $D_0$ such that $dist(K,\partial\Omega)\geq r_0/3$ and $K$ contains a ball of radius $r_0/3$. The functions $u_0$ and $v_0$ are both solutions to the equation $\text{\normalfont div}(\gamma_0\nabla\cdot)=0$ in $\Omega$.
Define the function \[S_0(y,z)=\int_{\partial\mathcal{P}_0}M_0\nabla G_0(\cdot,y)\nabla G_0(\cdot,z)(\tilde{\Phi}_0^v\cdot n_0)\] that, for fixed $z$, solves $\text{\normalfont div}(\gamma_0 \nabla S_0(\cdot,z))=0$ in $\Omega\setminus\mathcal{P}^0$ and, for fixed $y$ it solves $\text{\normalfont div}(\gamma_0 \nabla S_0(y,\cdot))=0$ in $\Omega\setminus\mathcal{P}^0$.
For $y,z\in K$, $S_0(y,z)=H(u_0,v_0)$, hence, by \eqref{normaH} \begin{equation}\label{12.1}
|S_0(y,z)|\leq \frac{C_0m_0}{r_0^2}\mbox{ for }y,z\in K, \end{equation} where $C_0$ depend on the a priori data.
Moreover, by \eqref{stimagreen}, there exist $\rho_0$ and $E$ depending only on the a priori data such that \begin{equation}
\label{13.1}
|S_0(y,z)|\leq E(d_yd_z)^{-1/2} \mbox{ for every } y,z\in \Omega\setminus\left(\mathcal{P}^0 \cup_{i=1}^NB_{\rho_0}(P_i^0)\right), \end{equation} where $d_y=dist(y,\mathcal{P}^0)$.
Since $S_0$ is small for $y,z\in K$ (see \eqref{12.1} and consider $m_0$ small), bounded for $y,z\in \Omega\setminus\mathcal{P}^0$ far from the vertices of the polygon, and since it is harmonic in $\Omega\setminus\mathcal{P}^0$, we can use a three balls inequality on a chain of balls in order to get a smallness estimate close to the sides of the polygon.
To be more specific, let $l_i$ be a side of $\mathcal{P}^0$ with endpoints $P^0_i$ and $P^0_{i+1}$. Let $Q^0_i$ be the midpoint of $l_i$ and let $y_r=Q^0_i+rn_i$ where $n_i$ is the unit outer normal to $\partial\mathcal{P}^0$ at $Q^0_i$ and $r\in(0,K_0r_0)$.
\begin{lm}\label{small} There exist constants $C>1$, $\beta$, and $r_1<r_0/C$ depending only on the a priori data, such that, for $r<r_1$ \begin{equation}
\label{1.3.1}
\left|S_0(y_r,y_r)\right|\leq C \left(\frac{\varepsilon_0}{\varepsilon_0+E}\right)^{\beta\tau^2_r}(\varepsilon_0+E)r^{-1}, \end{equation} where $\varepsilon_0=m_0C_0r_0^{-2}$ and $\tau_r=\frac{1}{\log(1-r/r_1)}$. \end{lm} \begin{proof}For the proof of Lemma \ref{small} see \cite[Proposition 4.3]{BF11} where the estimate of $\tau_r$ is slightly more accurate.\end{proof}
Now, we want to estimate $\left|S_0(y_r,y_r)\right|$ from below. In order to accomplish this, let us take $\rho=\min\{d_0/4,r_0/4\}$ and write \begin{align}\label{6.1}
\left|\frac{S_0(y_r,y_r)}{k-1}\right|\geq
&\left|\int_{\partial\mathcal{P}_0\cap B_\rho(Q_i^0)}M_0\nabla G_0(\cdot,y_r)\nabla G_0(\cdot,y_r)(\tilde{\Phi}_0^v\cdot n_0)\right|\\&-\left|\int_{\partial\mathcal{P}_0\setminus B_\rho(Q_i^0)}M_0\nabla G_0(\cdot,y_r)\nabla G_0(\cdot,y_r)(\tilde{\Phi}_0^v\cdot n_0)\right| \\&:=I_1-I_2.\end{align} The behaviour of the Green function (see \cite{AV}) gives immediately that, for $r<\rho/2$, \begin{equation}\label{6.2} I_2\leq C_1, \end{equation} for some $C_1$ depending only on the a priori data.
In order to estimate $I_1$, we add and subtract $\Gamma(\cdot,y_r)$ to $G_0(\cdot,y_r)$, then by Young inequality, \eqref{stimagreen}, and by the properties of $M_0$, we get \begin{equation}\label{I1}
I_1\geq C_2\left|\int_{\partial\mathcal{P}_0\cap B_\rho(Q_i^0)}\left|\nabla \Gamma(\cdot,y_r)\right|^2(\tilde{\Phi}_0^v\cdot n_0^i)\right|-C_3, \end{equation} where $C_2$ and $C_3$ depend only on the a priori data.
By definition of $\tilde{\Phi}_0^v$ we have
\[\left|\tilde{\Phi}_0^v(x)-\tilde{\Phi}_0^v(Q_i^0)\right|\leq C_4|x-Q^0_i|,\] so, by adding and subtracting $\Phi^v_0(Q_i^0)$ into the integral of \eqref{I1}, we can write \begin{align*}
\left|\int_{\partial\mathcal{P}_0\cap B_\rho(Q_i^0)}\left|\nabla \Gamma(\cdot,y_r)\right|^2(\tilde{\Phi}_0^v\cdot n_0^i)\right|\geq
& \,\overline{\alpha} \int_{\partial\mathcal{P}_0\cap B_\rho(Q_i^0)}\left|\nabla \Gamma(\cdot,y_r)\right|^2\\&-
C_4\int_{\partial\mathcal{P}_0\setminus B_\rho(Q_i^0)}\left|\nabla \Gamma(\cdot,y_r)\right|^2|x-Q^0_i|, \end{align*}
where $\overline{\alpha}=|\tilde{\Phi}_0^v(Q_i^0)\cdot n_0^i|$. By straightforward calculations one can see that \begin{equation}\label{8.2}
\int_{\partial\mathcal{P}_0\cap B_\rho(Q_i^0)}\left|\nabla \Gamma(\cdot,y_r)\right|^2\geq \frac{C_5}{r} \end{equation} and \begin{equation}\label{9.1}
\int_{\partial\mathcal{P}_0\setminus B_\rho(Q_i^0)}\left|\nabla \Gamma(\cdot,y_r)\right|^2|x-Q^0_i| \leq C_6\left|\log (\rho/r)\right|. \end{equation} By putting together \eqref{6.1}, \eqref{6.2}, \eqref{8.2} and \eqref{9.1}, we get \begin{equation}\label{9.1star}
\left|S_0(y_r,y_r)\right|\geq \frac{C_6 \overline{\alpha}}{r}-C_7\left|\log (\rho/r)\right|-C_8.\end{equation}
By comparing \eqref{1.3.1} and \eqref{9.1star} we get \begin{equation}\label{9.2}
C_6\overline{\alpha}\leq C \left(\frac{\varepsilon_0}{\varepsilon_0+E}\right)^{\beta\tau^2_r}(\varepsilon_0+E) +C_7r|\log(\rho/r)|+C_8r. \end{equation} By an easy calculation one can see that $\beta\tau_r^2\geq r^2/C_9$, hence \begin{equation}\label{9.3} C_6\overline{\alpha}\leq C \left(\frac{\varepsilon_0}{\varepsilon_0+E}\right)^{r^2/C_9}(\varepsilon_0+E) +C_{10}\sqrt{r}. \end{equation}
By choosing $r=\left|\log\left(\frac{\varepsilon_0}{\varepsilon_0+E}\right)\right|^{-1/4}$ and recalling that $\varepsilon_0=C_0m_0r_0^{-2}$ we have
\[|\tilde{\Phi}_0^v(Q_i^0)\cdot n_0^i|=\overline{\alpha}\leq \omega_0(m_0),\] where $\omega_0(t)$ is an increasing concave function such that $\lim_{t\to 0^+}\omega_0(t)=0$.
This estimate can also be obtained for $\tilde{\Phi}_0^v(y)\cdot n_0^i$ for every $y\in B_{\rho}(Q_i^0)\cap l_i$. Since $\tilde{\Phi}_0^v$ is linear on the bounded side $l_i$,
\[|\tilde{\Phi}_0^v(y)\cdot n_0^i|\leq\omega_0(m_0)\quad\quad \mbox{for every }y\in l_i,\] and, in particular
\begin{equation}\label{vec1}\left|\frac{v_i}{|v|}\cdot n_0^i\right|=|\tilde{\Phi}_0^v(P_i)\cdot n_0^i|\leq\omega_0(m_0)\end{equation} Repeating the same argument on the adjacent side, $l_{i+1}$, containing $P_i$ we obtain in particular that \begin{equation}\label{vec2}
\left|\frac{v_i}{|v|}\cdot n_0^{i+1}\right|=|\tilde{\Phi}_0^v(P_i)\cdot n_0^{i+1}|\leq\omega_0(m_0)\end{equation} Then, there exists a constant $C>0$ depending on the a priori constants only such that
\[\left|\frac{v_i}{|v|}\right|\leq C\omega_0(m_0)\] and since one can apply the same procedure on each side of the polygon we have
\[\left|\frac{v_i}{|v|}\right|\leq C\omega_0(m_0)\mbox{ for }i=1,\ldots,N\]
that yields \[1\leq NC\omega_0(m_0)\Rightarrow m_0\geq \omega_0^{-1}(1/CN).\] By definition of the operator norm of $H$, there exist $\tilde{\phi}$ and $\tilde{\psi}$ in $H^{1/2}(\partial\Omega)$ such that
\[|H(\tilde{\phi},\tilde{\psi})|\geq \frac{m_0}{2}\|\tilde{\phi}\|_{H^{1/2}(\partial\Omega)}\|\tilde{\psi}\|_{H^{1/2}(\partial\Omega)}\] and \eqref{tesibasso} is true for $m_1=\frac{\omega_0^{-1}(1/CN)}{2}$.
\end{proof} \begin{rem} Note that the lower bound for the derivative of $F$ in Proposition \ref{p3.3} holds for functions $\tilde{\phi}$ and $\tilde{\psi}$ with compact support on an open portion of $\partial\Omega$. \end{rem}
\subsection{Lipschitz stability estimate} In this section we conclude the proof of Theorem \ref{mainteo}. Let $\tilde{\phi}$ and $\tilde{\psi}$ the functions the satisfy \eqref{tesibasso} in Proposition \ref{p3.3}.
By \eqref{tesibasso} and by \eqref{contder} we have
\begin{eqnarray*}\label{fine}
\left|<\left(\Lambda_{\gamma_1}-\Lambda_{\gamma_0}\right)(\tilde{\phi}),\tilde{\psi}>\right|\!\!&=\!\!&\left|F(1,\tilde{\phi},\tilde{\psi})-F(0,\tilde{\phi},\tilde{\psi})\right|=\left|\int_0^1\frac{d}{dt}F(t,\tilde{\phi},\tilde{\psi})dt\right|\\
\!\! &\geq\!\!& \frac{d}{dt}F(t,\tilde{\phi},\tilde{\psi})_{|_{t=0}} \!-\!\int_0^1\!\left| \frac{d}{dt}F(t,\tilde{\phi},\tilde{\psi})-\frac{d}{dt}F(t,\tilde{\phi},\tilde{\psi})_{|_{t=0}}\right|dt\\
&\geq&\left(m_1-C|v|^\beta\right)|v|\|\tilde{\phi}\|_{H^{1/2}(\partial\Omega)}\|\tilde{\psi}\|_{H^{1/2}(\partial\Omega)}, \end{eqnarray*} that implies \begin{equation}\label{fine2}
\varepsilon=\|\Lambda_{\gamma_0}-\Lambda_{\gamma_1}\|_*\geq \left(m_1-C|v|^\beta\right)|v|. \end{equation} From \eqref{stimarozzavertici}, since
$|v|\leq N\max_jdist(P^0_j,P^1_j)$ it follows that there exists $\varepsilon_0>0$ such that, if
\[\|\Lambda_{\gamma_0}-\Lambda_{\gamma_1}\|_*\leq\varepsilon_0,\] then
\[\left(m_1-C|v|^\beta\right)\geq m_1/2\] and
\[|v|\leq \frac{2}{m_1}\|\Lambda_{\gamma_0}-\Lambda_{\gamma_1}\|_*.\] Finally, since
\[d_{H}\left( \partial \mathcal{P}^{0},\partial \mathcal{P}^{1}\right)\leq C| v|\]
the claim follows.
$\square$
Finally, as a byproduct of Theorem \ref{mainteo} and of Proposition \ref{propvertici} we have the following
\begin{coro} Let $\mathcal{P}^0,\mathcal{P}^1\in{\mathcal{A}}$ and let \[\gamma_0=1+(k-1)\chi_{\mathcal{P}^0}\mbox{ and }\gamma_1=1+(k-1)\chi_{\mathcal{P}^1}.\] There exist $\varepsilon_0$ and $C$ depending only on the a priori data such that, if
\[\|\Lambda_{\gamma_0}-\Lambda_{\gamma_1}\|_*\leq \varepsilon_0,\] then $\mathcal{P}^0$ and $\mathcal{P}^1$ have the same number $N$ of vertices $\left\{P_j^0\right\}_{j=1}^N$ and $\left\{P_j^1\right\}_{j=1}^N$ respectively. Moreover, the vertices can be ordered so that
\begin{equation} dist\left(P_j^0,P_j^1\right)\leq C
\|\Lambda_{\gamma_0}-\Lambda_{\gamma_1}\|_*\quad \mbox{ for every }j=1,\ldots,N. \end{equation} \end{coro}
\section{Final remarks and extensions} We have derived Lipschitz stability estimates for polygonal conductivity inclusions in terms of the Dirichlet-to-Neumann map using differentiability properties of the Dirichlet-to-Neuman map. \\ The result extends also to the case where finitely many conductivity polygonal inclusions are contained in the domain $\Omega$ assuming that they are at controlled distance one from the other and from the boundary of $\Omega$.\\ We expect that the same result holds also when having at disposal local data. In fact, as we observed at the end of Proposition 3.6, the lower bound for the derivative of $F$ is obtained using solutions with compact support in a open subset of $\partial\Omega$ and a rough stability estimate of the Hausdorff distance of polygons in terms of the local Dirichlet-to-Neumann map could be easily derived following the ideas contained in \cite{MR}.\\ Finally, it is relevant for the geophysical application we have in mind to extend the results of stability and reconstruction to the 3-D setting possibly considering an inhomogeneous and/or anisotropic medium. This case is not at all straightforward since differentiability properties of the Dirichlet-to-Neumann map in this case are not known.
\textbf{Acknowledgment}
The paper was partially supported by GNAMPA - INdAM.
\end{document} |
\begin{document}
\global\long\def\mathbf{N}{\mathbf{N}} \global\long\def\mathbf{Z}{\mathbf{Z}} \global\long\def\mathbf{R}{\mathbf{R}} \global\long\def{\color{blue}\mathcal{B}}{{\color{blue}\mathcal{B}}} \global\long\def{\color{red}\mathcal{R}}{{\color{red}\mathcal{R}}} \global\long\def\mathcal{Q}{\mathcal{Q}} \global\long\def{\color{magenta}??}{{\color{magenta}??}} \global\long\def\text{if }{\text{if }} \global\long\def\text{ and }{\text{ and }} \global\long\def\text{ and}{\text{ and}} \global\long\def\text{otherwise}{\text{otherwise}} \global\long\def\mathcal{A}{\mathcal{A}} \global\long\def\mathcal{C}{\mathcal{C}} \global\long\def\mathcal{F}{\mathcal{F}} \global\long\def\textnormal{\ensuremath{\frownie}}{\textnormal{\ensuremath{\frownie}}} \global\long\def\operatorname{Im}{\operatorname{Im}} \global\long\def\operatorname{Dom}{\operatorname{Dom}} \global\long\def\mathcomment#1{} \global\long\def\Padded#1{\padded{#1}}
\makeatletter \global\long\def\labelifpresent#1{\ifpresent{#1}{}[\nonumber\@gobbletwo]} \makeatother \title{Ramsey numbers of Boolean lattices} \author{D\'aniel Gr\'osz\thanks{Department of Mathematics, University of Pisa, Pisa. e-mail: \protect\href{mailto:groszdanielpub@gmail.com}{groszdanielpub@gmail.com}} \and Abhishek Methuku\thanks{School of Mathematics, University of Birmingham, Birmingham. e-mail: \protect\href{mailto:abhishekmethuku@gmail.com}{abhishekmethuku@gmail.com}} \and Casey Tompkins\thanks{Discrete Mathematics Group, Institute for Basic Science (IBS), Daejeon. e-mail: \protect\href{mailto:ctompkins496@gmail.com}{ctompkins496@gmail.com}}} \maketitle \begin{abstract} \noindent \setlength\parskip
amount The \emph{poset Ramsey number} $R(\mathcal{Q}_{m},\mathcal{Q}_{n})$ is the smallest integer $N$ such that any blue-red coloring of the elements of the Boolean lattice $\mathcal{Q}_{N}$ has a blue induced copy of~$\mathcal{Q}_{m}$ or a red induced copy of~$\mathcal{Q}_{n}$. The \emph{weak poset Ramsey number} $R_{w}(\mathcal{Q}_{m},\mathcal{Q}_{n})$ is defined analogously, with weak copies instead of induced copies. It is easy to see that $R(\mathcal{Q}_{m},\mathcal{Q}_{n})\ge R_{w}(\mathcal{Q}_{m},\mathcal{Q}_{n})$.
\noindent Axenovich and Walzer~\cite{AxenovichWalzer} showed that $n+2\le R(\mathcal{Q}_{2},\mathcal{Q}_{n})\le2n+2$. Recently, Lu and Thompson~\cite{LuThompson} improved the upper bound to $\frac{5}{3}n+2$. In this paper, we solve this problem asymptotically by showing that $R(\mathcal{Q}_{2},\mathcal{Q}_{n})=n+O(n/\log n)$.
\noindent In the diagonal case, Cox and Stolee~\cite{CoxStolee} proved $R_{w}(\mathcal{Q}_{n},\mathcal{Q}_{n})\ge2n+1$ using a probabilistic construction. In the induced case, Bohman and Peng~\cite{BohmanPeng} showed $R(\mathcal{Q}_{n},\mathcal{Q}_{n})\ge2n+1$ using an explicit construction. Improving these results, we show that $R_{w}(\mathcal{Q}_{m},\mathcal{Q}_{n})\ge n+m+1$ for all $m\ge2$ and large~$n$ by giving an explicit construction; in particular, we prove that $R_{w}(\mathcal{Q}_{2},\mathcal{Q}_{n})=n+3$. \end{abstract}
\section{Introduction}
\partitle{Background and definitions.} The classical Ramsey theorem asserts that for any $m$ and $n$, there is an integer~$N$ such that every blue-red edge coloring of the complete graph on $N$ vertices contains a blue clique on $m$ vertices or a red clique on $n$ vertices. Determining the smallest such integer $N$, known as the Ramsey number is a central problem in combinatorics. More generally, for any two graphs $G$ and $H$, the Ramsey number is the smallest integer $N$ such that every blue-red edge coloring of the complete graph on $N$ vertices contains a red copy of $G$ or a blue copy of $H$. Several natural variations of these problems such as multicolor Ramsey numbers, and hypergraph Ramsey numbers are major subjects of ongoing research. For further examples, we refer the reader to the surveys ~\cite{ConlonFoxSudakov,MubayiSuk}.
In this paper, we will study poset Ramsey numbers. A \emph{partially ordered set} (or a \emph{poset} for short) is a set with an accompanying relation $\le$ which is transitive, reflexive, and antisymmetric. A \emph{Boolean lattice} of dimension~$n$, denoted by $\mathcal{Q}_{n}$, is the power set of $[n]\coloneqq\{1,2,\ldots,n\}$ equipped with the inclusion relation. If $(P,\le)$ and $(Q,\le')$ are posets, then an injection $f:P\to Q$ is \emph{order-preserving} if $f(x)\le'f(y)$ whenever $x\le y$; we say that $f(P)$ is a \emph{weak copy} of~$P$ in~$Q$ and that $P$ is a \emph{weak subposet} of~$Q$. An injection $f:P\to Q$ is an \emph{order-embedding} if $f(x)\le'f(y)$ if and only if $x\le y$; we say that $f(P)$ is an \emph{induced copy} of $P$ in $Q$ and that $P$ is an \emph{induced subposet} of~$Q$.
For posets $P_{1}$ and $P_{2}$, the \emph{(induced) poset Ramsey number} $R(P_{1},P_{2})$ is defined to be the smallest integer $N$ such that every blue-red coloring of the elements of the Boolean lattice $\mathcal{Q}_{N}$ contains an induced copy of $P_{1}$ whose elements are blue or an induced copy of~$P_{2}$ whose elements are red. Similarly, the \emph{weak poset Ramsey number} $R_{w}(P_{1},P_{2})$ is defined to be the smallest integer $N$ such that every blue-red coloring of the elements of the Boolean lattice $\mathcal{Q}_{N}$ contains a weak copy of~$P_{1}$ whose elements are blue or a weak copy of~$P_{2}$ whose elements are red. (For convenience, we will call a copy of poset $P$ all of whose elements are blue is called a blue copy of $P$, and a copy of poset~$P$ all of whose elements are red is called a red copy of~$P$.) It is easy to see that $R(P_{1},P_{2})\ge R_{w}(P_{1},P_{2})$. The focus of this paper is the natural problem when $P_{1}$ and $P_{2}$ are Boolean lattices $\mathcal{Q}_{m}$ and $\mathcal{Q}_{n}$ for $m,n\in\mathbf{N}$. Recently, variants of this problem, such as rainbow poset Ramsey numbers have been studied in~\cite{Chang.etal,Chenetal,CoxStolee}.
\partitle{Induced poset Ramsey numbers.} For the diagonal poset Ramsey number $R(\mathcal{Q}_{n},\mathcal{Q}_{n}),$ Axenovich and Walzer~\cite{AxenovichWalzer} showed that $2n\le R(\mathcal{Q}_{n},\mathcal{Q}_{n})\le n^{2}+2n$. Walzer~\cite{WalzerThesis} improved the upper bound to $R(\mathcal{Q}_{n},\mathcal{Q}_{n})\le n^{2}+1$. Recently, Lu and Thompson~\cite{LuThompson} further improved it to $R(\mathcal{Q}_{n},\mathcal{Q}_{n})\le n^{2}-n+2$. On the other hand, Cox and Stolee~\cite{CoxStolee} showed that for $n\ge13$, $R_{w}(\mathcal{Q}_{n},\mathcal{Q}_{n})\ge2n+1$, which implies that $R(\mathcal{Q}_{n},\mathcal{Q}_{n})\ge2n+1$.
More generally, Axenovich and Walzer~\cite{AxenovichWalzer} showed that $n+m\le R(\mathcal{Q}_{m},\mathcal{Q}_{n})\le mn+n+m$ for any integers $n,m\ge1$. Lu and Thompson~\cite{LuThompson} improved this bound by showing that $R(\mathcal{Q}_{m},\mathcal{Q}_{n})\le(m-2+\frac{9m\text{\textminus}9}{(2m\text{\textminus}3)(m+1)})n+m+3$ for all $n\ge m\ge4$. See \cite{AxenovichWalzer,CoxStolee,LuThompson,WalzerThesis} for several other interesting results.
For the off-diagonal poset Ramsey number $R(\mathcal{Q}_{2},\mathcal{Q}_{n})$, Axenovich and Walzer~\cite{AxenovichWalzer} showed that $n+2\le R(\mathcal{Q}_{2},\mathcal{Q}_{n})\le2n+2$. Recently, Lu and Thompson~\cite{LuThompson} improved the upper bound by proving that $R(\mathcal{Q}_{2},\mathcal{Q}_{n})\le\frac{5}{3}n+2$. In this paper, we determine $R(\mathcal{Q}_{2},\mathcal{Q}_{n})$ asymptotically by proving the following theorem. \begin{thm} \label{thm:Upperbound}For every $c>2$, there exists an integer $n_{0}$ such that for all $n\ge n_{0}$, we have \[ R(\mathcal{Q}_{2},\mathcal{Q}_{n})\le n+c\frac{n}{\log_{2}n}. \] \end{thm}
Combining \cref{thm:Upperbound} with the lower bound $R(\mathcal{Q}_{2},\mathcal{Q}_{n})\ge n+2$, we obtain that $R(\mathcal{Q}_{2},\mathcal{Q}_{n})$ is asymptotically equal to~$n$. We prove \cref{thm:Upperbound} in \cref{sec:upper}. In fact, it follows from our proof of \cref{thm:Upperbound} that for all $n\ge2$, we have $R(\mathcal{Q}_{2},\mathcal{Q}_{n})\le n+6.14\frac{n}{\log_{2}n}$.
\partitle{Weak poset Ramsey numbers.} A chain of length~$k$ is a poset of~$k$ distinct, pairwise comparable elements and is denoted by~$C_{k}$. Cox and Stolee~\cite{CoxStolee} showed that $R_{w}(C_{k},\mathcal{Q}_{n})=n+k-1$; since $\mathcal{Q}_{m}$~is a weak subposet of~$C_{2^{m}}$, this implies that $R_{w}(\mathcal{Q}_{m},\mathcal{Q}_{n})\le n+2^{m}-1$. The lower bound $R_{w}(\mathcal{Q}_{m},\mathcal{Q}_{n})\ge m+n$ is obtained by a simple ``layered'' coloring of~$\mathcal{Q}_{m+n-1}$ considered by Axenovich and Walzer~\cite{AxenovichWalzer}, which is described as follows. The collection of all subsets of~$[N]$ of a given size~$k$ is called a \emph{layer}. A coloring of~$\mathcal{Q}_{N}$ is \emph{layered} if for every layer, all sets on that layer have the same color. A layered coloring of~$\mathcal{Q}_{m+n-1}$ with $m$~blue layers and $n$~red layers does not contain a (weak) blue copy of~$\mathcal{Q}_{m}$ or a (weak) red copy of~$\mathcal{Q}_{n}$. Therefore, $R_{w}(\mathcal{Q}_{m},\mathcal{Q}_{n})\ge m+n$ (which implies $R(\mathcal{Q}_{m},\mathcal{Q}_{n})\ge m+n$). Despite the work of several researchers, so far this lower bound on $R_{w}(\mathcal{Q}_{m},\mathcal{Q}_{n})$ has not been improved except in the diagonal case: Cox and Stolee~\cite{CoxStolee} showed that $R_{w}(\mathcal{Q}_{n},\mathcal{Q}_{n})\ge2n+1$ for $n\ge13$ using a probabilistic construction. Recently, in the induced case, Bohman and Peng \cite{BohmanPeng} gave an explicit construction showing the bound $R(\mathcal{Q}_{n},\mathcal{Q}_{n})\ge2n+1$. Note that these constructions showing $R(\mathcal{Q}_{n},\mathcal{Q}_{n})\ge2n+1$ cannot be layered.
We give an explicit construction which yields a lower bound on $R_{w}(\mathcal{Q}_{m},\mathcal{Q}_{n})$ for all~$m$ and $n\ge68$, thereby generalizing the results of Bohman and Peng to the weak poset case, and additionally extending their results and those of Cox and Stolee to the off-diagonal case. \begin{thm} \label{thm:Lowerbound}For any $m\ge2$ and $n\ge68$, we have \textup{ \[ R_{w}(\mathcal{Q}_{m},\mathcal{Q}_{n})\ge m+n+1. \] } \end{thm}
Note that \cref{thm:Lowerbound} shows that $R_{w}(\mathcal{Q}_{2},\mathcal{Q}_{n})=n+3$ since $R_{w}(\mathcal{Q}_{2},\mathcal{Q}_{n})\le n+2^{2}-1=n+3$ by the upper bound mentioned earlier.
We prove \cref{thm:Lowerbound} in \cref{subsec:ours-general}. The construction and the proof of \cref{thm:Lowerbound} are simpler if we restrict ourselves to the case of $m=2$ and consider induced subposets rather than weak subposets . Therefore, in order to illustrate the main ideas of our construction, we present a short proof showing the special case $R(\mathcal{Q}_{2},\mathcal{Q}_{n})\ge n+3$ (for $n\ge18$) in \cref{subsec:ours-induced}. We also give a probabilistic construction for $m\ge3$ and $n$ sufficiently large in \cref{subsec:cox stolee} by generalizing a construction of Cox and Stolee~\cite{CoxStolee}.
\section{\label{sec:upper}Upper bound: Proof of \texorpdfstring{\cref{thm:Upperbound}}{Theorem~\ref{thm:Upperbound}}}
Let $k=\left\lfloor c\frac{n}{\log_{2}n}\right\rfloor $. Assume that ${\color{blue}\mathcal{B}},{\color{red}\mathcal{R}}\subset\mathcal{Q}_{n+k}$ such that ${\color{blue}\mathcal{B}}\sqcup{\color{red}\mathcal{R}}=\mathcal{Q}_{n+k}$, and further assume that $\mathcal{Q}_{2}$~is not an induced subposet of ${\color{blue}\mathcal{B}}$, and $\mathcal{Q}_{n}$~is not an induced subposet of~${\color{red}\mathcal{R}}$.
Before continuing with the proof of \cref{thm:Upperbound}, let us provide an outline of the proof.
\emph{Outline of the proof.} We attempt to define an order-embedding $\varphi$ from $\mathcal{Q}_{n}$ into~${\color{red}\mathcal{R}}$ recursively, starting with~$\emptyset$, in such a way that the image of each set only depends on the images of its proper subsets. For every $A\subseteq[n]$, $\varphi(A)$ will be a superset of~$A$, possibly containing some additional elements from $[n+k]\setminus[n]$.
If $\emptyset\in{\color{red}\mathcal{R}}$, then we set $\varphi(\emptyset)=\emptyset$. More generally, in order for $\varphi$ to be order-preserving, for any set $A\in\mathcal{Q}_{n}$, $\varphi(A)$ must be a superset of the images of all proper subsets of~$A$; as long as the minimal set that is a superset of $A$ and also has this property is in~${\color{red}\mathcal{R}}$, we set it as $\varphi(A)$. If instead this minimal set is in~${\color{blue}\mathcal{B}}$, then we proceed to add elements of $[n+k]\setminus[n]$ to it, in an order determined by some arbitrary permutation~$\pi$ of $[n+k]\setminus[n]$, until we obtain a set that is in ${\color{red}\mathcal{R}}$. Throughout this recursive procedure, in addition to the injection~$\varphi$, we construct a function~$\alpha$ where $\alpha(A)$ records the number of elements of $[n+k]\setminus[n]$ we need to include in~$\varphi(A)$ as a result of hitting sets in~${\color{blue}\mathcal{B}}$ while attempting to embed $A$ (and its subsets, during previous steps of the recursion); and another function~$f$, where $f(A)$ records an actual chain of length~$\alpha(A)$, consisting of sets in~${\color{blue}\mathcal{B}}$ that we have encountered while trying to embed $A$ and its subsets.
For any fixed permutation~$\pi$ of $[n+k]\setminus[n]$, the above embedding procedure can only fail if, at some point, as we try to define $\varphi(A)$ for some $A\in\mathcal{Q}_{n}$, we hit a set in~${\color{blue}\mathcal{B}}$, but we have already ``used up'' all $k$ elements of $[n+k]\setminus[n]$, so there are no elements left to add. In this event, we obtain a chain of length $k+1$, contained in~${\color{blue}\mathcal{B}}$. As $\mathcal{Q}_{n}$~is not an induced subposet of ${\color{red}\mathcal{R}}$, the procedure must fail for all $k!$ permutations~$\pi$ of $[n+k]\setminus[n]$. This way, we can obtain a chain of length $k+1$ inside~${\color{blue}\mathcal{B}}$, corresponding to each of these permutations. We show that these $k!$ chains must all be distinct. We then show that the existence of $k!$ distinct chains of length $k+1$ inside ${\color{blue}\mathcal{B}}$ implies that $\mathcal{Q}_{2}$ is an induced subposet of ${\color{blue}\mathcal{B}}$, a contradiction.
Now we continue with the proof of \cref{thm:Upperbound}.
At the core of the proof is \cref{lem:functions}. We will use the following notation: for a chain of sets $\mathcal{C}$ in~$\mathcal{\mathcal{Q}}_{n+k}$ of length~$l$, we denote its sets by $\left(q_{0},q_{1},\ldots,q_{l-1}\right)$ where $q_{0}\subseteq q_{1}\subseteq\ldots\subseteq q_{l-1}$. \begin{claim} \label{lem:functions}Let $\pi:[n+k]\setminus[n]\rightarrow[n+k]\setminus[n]$ be a permutation. There exist $\varphi:\mathcal{Q}_{n}\rightarrow{\color{red}\mathcal{R}}\cup\{\textnormal{\ensuremath{\frownie}}\}$ (where $\textnormal{\ensuremath{\frownie}}$~is an arbitrary element, distinct from the members of~${\color{red}\mathcal{R}}$, and used solely to indicate failure to produce an induced map into~${\color{red}\mathcal{R}}$), $\alpha:\mathcal{Q}_{n}\rightarrow\{0,1,\ldots,k,k+1\}$ and $f:\mathcal{Q}_{n}\rightarrow\mathcal{C}^{\le k+1}({\color{blue}\mathcal{B}})$, where $\mathcal{C}^{\le k+1}({\color{blue}\mathcal{B}})$ is the family of all chains of length at most $k+1$ in~${\color{blue}\mathcal{B}}$, with the following properties:\gdef\labelwidthi{\widthof{\textbf{\textup{L0. }}}} \gdef\labeli{\textbf{\textup{P\arabic{enumi}. }}} \gdef\propertyref#1{P#1} \gdef\refi{\propertyref{\arabic{enumi}}} \begin{enumerate}[label=\labeli, ref=\refi, labelsep=0em, leftmargin=0em, labelwidth=\labelwidthi, itemindent=\labelwidth, align=left] \item \label{enu:phi containment}If $B,A\in\mathcal{Q}_{n}$ and $\varphi(B),\varphi(A)\in{\color{red}\mathcal{R}}$, then $B\subsetneqq A\Longleftrightarrow\varphi(B)\subsetneqq\varphi(A)$. (This implies that if $\textnormal{\ensuremath{\frownie}}\notin\operatorname{Im}\varphi$, then $\mathcal{Q}_{n}$~is an induced subposet of~${\color{red}\mathcal{R}}$.) \item \label{enu:alpha order}If $B\subseteq A\in\mathcal{Q}_{n}$, then $\alpha(B)\le\alpha(A)$. \item \label{enu:alpha phi}If $\alpha(A)=k+1$, then $\varphi(A)=\textnormal{\ensuremath{\frownie}}$. Otherwise $\varphi(A)\cap[n]=A$, and $\varphi(A)=A\cup\{\pi(n+\nobreak1),\pi(n+2),\ldots,\pi(n+\alpha(A))\}$. \item \label{enu:f chain}For every $A\in\mathcal{Q}_{n}$, $f(A)=\left(f(A)_{0},f(A)_{1},\ldots,f(A)_{\alpha(A)-1}\right)$ is a chain in~${\color{blue}\mathcal{B}}$ of length $\alpha(A)$ with the property that $f(A)_{i}\setminus[n]=\{\pi(n+1),\pi(n+2),\ldots,\pi(n+i)\}$. \item \label{enu:f subset}If $A\in\mathcal{Q}_{n}$ such that $1\le\alpha(A)\le k$, then $f(A)_{\alpha(A)-1}\subseteq\varphi(A)$. (In fact this implies that $f(A)_{\alpha(A)-1}\subsetneqq\varphi(A)$, since the elements of~$f(A)$ are in~${\color{blue}\mathcal{B}}$, while $\varphi(A)$ is in~${\color{red}\mathcal{R}}$. We do not use this observation.) \end{enumerate} \gdef\lastproperty{\propertyref{\arabic{enumi}}} \end{claim}
\begin{proof} We construct the functions $\varphi$, $\alpha$ and~$f$ recursively, and simultaneously prove the above properties by induction: we set the values of these functions on a set $A\in\mathcal{Q}_{n}$ in such a way that they only depend on the values of the functions on proper subsets of~$A$. (This includes the case of $A=\emptyset$ where no proper subsets exist, which we do not treat in a special way for most of the proof. One can also consider the proof as a recursion and induction on the size of the set $A$.) Let us fix an $A\in\mathcal{Q}_{n}$. Now we will define the values $\varphi(A)$, $\alpha(A)$ and $f(A)$, and then prove that \propertyref{1} to \lastproperty{} hold for this set~$A$ under the assumption that they hold for every proper subset of~$A$.
If there exists a $B\subsetneqq A$ such that $\varphi(B)=\textnormal{\ensuremath{\frownie}}$, then we pick such a set~$B$ arbitrarily, and set $\varphi(A)=\textnormal{\ensuremath{\frownie}}$, $\alpha(A)=k+1$ and $f(A)=f(B)$. Otherwise let \begin{align*} \beta & =\min\bigl\{ i\in\{0,1,\ldots,k\}:\left(\forall B\subsetneqq A:\alpha(B)\le i\right)\bigr\}\\
& =\begin{cases} {\displaystyle \max_{{\scriptscriptstyle B\subsetneqq A}}\alpha(B)} & \text{if } A\ne\emptyset,\\ 0 & \text{if } A=\emptyset, \end{cases} \end{align*}
and let \[ C=A\cup\{\pi(n+1),\pi(n+2),\ldots,\pi(n+\beta)\}=A\cup\left(\bigcup_{B\subsetneqq A}\varphi(B)\right) \]
(note that $\{\pi(n+1),\pi(n+2),\ldots,\pi(n+\beta)\}=\emptyset$ if $\beta=0$). We get the last equality by applying \ref{enu:alpha phi} to the proper subsets of~$A$. We want $\varphi(A)$ to be a superset of $C$. If $C\in{\color{red}\mathcal{R}}$, we set $\varphi(A)=C$. If $C\in{\color{blue}\mathcal{B}}$, we keep adding $\pi(n+\beta+1),\pi(n+\beta+2),\ldots$ to it, until the set is not in~${\color{blue}\mathcal{B}}$, if possible. That is, let \[ \alpha(A)=\begin{cases} \min\left\{ \begin{gathered}i\in\{\beta,\beta+1,\ldots,k\}:\\ C\cup\{\pi(n+\beta+1),\pi(n+\beta+2),\ldots,\pi(n+i)\}\in{\color{red}\mathcal{R}} \end{gathered} \right\} & \text{if such \ensuremath{i} exists,}\\ k+1 & \text{otherwise}. \end{cases} \] Then let \[ \varphi(A)=\begin{cases} C\cup\{\pi(n+\beta+1),\pi(n+\beta+2),\ldots,\pi(n+\alpha(A))\} & \text{if }\alpha(A)\le k,\\ \textnormal{\ensuremath{\frownie}} & \text{if }\alpha(A)=k+1. \end{cases} \]
Note that in the first case, \begin{gather*} C\cup\{\pi(n+\beta+1),\pi(n+\beta+2),\ldots,\pi(n+\alpha(A))\}\\ =A\cup\{\pi(n+1),\pi(n+2),\ldots,\pi(n+\alpha(A))\}. \end{gather*} Furthermore if $A=\emptyset$, set $f(A)=()$, an empty chain. Otherwise pick a set $B\subsetneqq A$ such that $\alpha(B)=\beta$. We set $f(A)$ to be a chain of length $\alpha(A)$ in~${\color{blue}\mathcal{B}}$: \[ f(A)_{i}=\begin{cases} f(B)_{i} & \tif0\le i<\beta,\\ A\cup\{\pi(n+1),\pi(n+2),\ldots,\pi(n+i)\} & \text{if }\beta\le i<\alpha(A). \end{cases} \]
Note that these definitions of $\varphi(A)$, $\alpha(A)$ and~$f(A)$ only depend on the values of these functions for proper subsets of~$A$, so our recursive definitions make sense. It is easy to check that the definitions of~$\varphi$ and~$\alpha$ satisfy \ref{enu:alpha order}~and~\ref{enu:alpha phi}, which together imply \ref{enu:phi containment}. \ref{enu:f chain}~and~\ref{enu:f subset} are also trivially satisfied when $\alpha(A)=0$. If $\alpha(A)=\beta=k+1$, we have defined $f(A)=f(B)$ for some $B\subsetneqq A$ such that $\varphi(B)=\textnormal{\ensuremath{\frownie}}$; then \ref{enu:f chain} follows because it holds for~$B$ by induction, and \ref{enu:f subset} is trivial.
Now we prove \ref{enu:f chain} and \ref{enu:f subset} when $\alpha(A)>0$ and $\beta\le k$. In the case where $\alpha(A)=\beta$ (equivalently if $C\in{\color{red}\mathcal{R}}$, $\varphi(A)=C$ and $\alpha(A)=\alpha(B)$), then $f(A)=f(B)$ is a chain satisfying \ref{enu:f chain} by induction. Since \ref{enu:f subset} holds for~$B$ by induction, we get that $f(A)_{\alpha(A)-1}=f(B)_{\alpha(B)-1}\subset\varphi(B)\subset\varphi(A)$, so \ref{enu:f subset} is satisfied for~$A$ as well. If $\alpha(A)>\beta$, then \ref{enu:f subset} follows from the definitions of $f(A)$ and~$\varphi(A)$. Furthermore, $A\cup\{\pi(n+1),\pi(n+2),\ldots,\allowbreak\pi(n+i)\}\in{\color{blue}\mathcal{B}}$ for $\beta\le i<\alpha(A)$ because $\alpha(A)$~was chosen as the smallest~$i\ge\beta$ such that $A\cup\{\pi(n+1),\pi(n+2),\ldots,\pi(n+i)\}\in{\color{red}\mathcal{R}}$; this is enough to show \ref{enu:f chain} if~$\beta=0$. Finally, if $\alpha(A)>\beta>0$, then $f(A)$~is obtained by concatenating the chains $f(B)$ and $\bigl(A\cup\{\pi(n+1),\pi(n+2),\ldots,\pi(n+i)\}\bigr)_{\beta\le i<\alpha(A)}$. By induction $f(B)$~is a chain satisfying the conditions of \ref{enu:f chain}, and $B$ satisfies \ref{enu:f subset}, so $f(B)_{\beta-1}\subset\varphi(B)$. Using that \ref{enu:alpha phi} holds for~$B$ by induction, we also have $\varphi(B)=B\cup\{\pi(n+1),\pi(n+2),\ldots,\pi(n+\alpha(B))\}\subsetneqq A\cup\{\pi(n+1),\allowbreak\pi(n+2),\ldots,\pi(n+\beta)\}$ (recall that $B\subsetneqq A$ and $\beta=\alpha(B)$). Thus $f(A)$~is indeed a chain satisfying \ref{enu:f chain}. The proof of the properties \propertyref{1} to \lastproperty{} of $\varphi$, $\alpha$ and~$f$ is now complete. \end{proof} For an arbitrary permutation $\pi:[n+k]\setminus[n]\rightarrow[n+k]\setminus[n]$, let $\varphi^{\pi}$, $\alpha^{\pi}$ and $f^{\pi}$ be the maps given by \cref{lem:functions}. If $\operatorname{Im}\varphi^{\pi}\subseteq{\color{red}\mathcal{R}}$, then $\varphi^{\pi}$~shows that $\mathcal{Q}_{n}$~is an induced subposet of~${\color{red}\mathcal{R}}$ by \ref{enu:phi containment}. Assume that this is not the case. Then, for some $A\in\mathcal{Q}_{n}$, $\varphi^{\pi}(A)=\textnormal{\ensuremath{\frownie}}$, $\alpha^{\pi}(A)=k+1$ by~\ref{enu:alpha phi}, and $f^{\pi}(A)$~is a chain of length $k+1$ in~${\color{blue}\mathcal{B}}$ by~\ref{enu:f chain}. By \ref{enu:f chain}, we have $\pi(n+i)=\left(f^{\pi}(A)_{i}\setminus f^{\pi}(A)_{i-1}\right)\setminus[n]$ when $1\le i\le\alpha^{\pi}(A)-1$, so if $\alpha^{\pi}(A)=k+1$, then one can recover the permutation $\pi$ from the chain~$f^{\pi}(A)$.
Under our assumption that $\mathcal{Q}_{n}$~is not an induced subposet of~${\color{red}\mathcal{R}}$, we get a distinct chain $f^{\pi}$ of length $k+1$ in~${\color{blue}\mathcal{B}}$ for each of the $k!$ permutations~$\pi$ of $[n+k]\setminus[n]$, with the property that \[ \forall\,0\le i\le k:f_{i}^{\pi}\setminus[n]=\{\pi(n+1),\pi(n+2),\ldots,\pi(n+i)\}. \] We claim that the map $\pi\mapsto\left(f_{0}^{\pi},f_{k}^{\pi}\right)$ is injective. Let $\pi_{1}$ and~$\pi_{2}$ be two different permutations of $[n+k]\setminus[n]$. Let $i=\min_{j\in\{0,\ldots,k\}}\pi_{1}(n+j)\neq\pi_{2}(n+j)$. Then $\pi_{1}(i)\in f_{i}^{\pi_{1}}$, $\pi_{1}(i)\notin f_{i}^{\pi_{2}}$,$\pi_{2}(i)\in f_{i}^{\pi_{2}}$ and $\pi_{2}(i)\notin f_{i}^{\pi_{1}}$, so $f_{i}^{\pi_{1}}$ and $f_{i}^{\pi_{2}}$ are unrelated. So if $f_{0}^{\pi_{1}}=f_{0}^{\pi_{2}}$ and $f_{k}^{\pi_{1}}=f_{k}^{\pi_{2}}$, then ${\color{blue}\mathcal{B}}$~would contain an induced copy of $\mathcal{Q}_{2}$, a contradiction.
Since the map $\pi\mapsto\left(f_{0}^{\pi},f_{k}^{\pi}\right)$ is injective, \[ k!\le\left(2^{n+k}\right)^{2}=2^{2(n+k)}. \]
Approximating the left-hand side: \[ k!>\left(\frac{k}{e}\right)^{k}=2^{k(\log_{2}k-\log_{2}e)}\text{, so} \] \begin{equation} k(\log_{2}k-\log_{2}e)<2(n+k).\label{eq:2(n+k)} \end{equation}
Since $k=\left\lfloor c\frac{n}{\log_{2}n}\right\rfloor $, \begin{equation} k\log_{2}k>\left(c\frac{n}{\log_{2}n}-1\right)\left(\log_{2}c+\log_{2}n-\log_{2}\log_{2}n-1\right)=cn(1-o(1)).\label{eq:cn} \end{equation} Since $c>2$, \eqref{eq:cn} contradicts \eqref{eq:2(n+k)} for sufficiently large~$n$. This completes the proof of \cref{thm:Upperbound}. \begin{remark*} It follows the above proof that for all $n\ge2$, we have $R(\mathcal{Q}_{2},\mathcal{Q}_{n})\le\nobreak n+\nobreak6.14\frac{n}{\log_{2}n}$. Here we give a sketch of the calculations.
For $c=6.14$, we have $k=\nobreak\bigl\lfloor6.14\frac{n}{\log_{2}n}\bigr\rfloor>5.611\frac{n}{\log_{2}n}$ for every integer $n\ge2$, and therefore we have $k\log_{2}k>5.611\frac{n}{\log_{2}n}\allowbreak\bigl(\log_{2}n\bigl(1-\frac{\log_{2}\log_{2}n}{\log_{2}n}\bigr)+\log_{2}5.611\bigr)\ge2.977n+13.96\frac{n}{\log_{2}n}$. Using \eqref{eq:2(n+k)}, it can be shown that $0.8797k\log_{2}k\le k(\log_{2}k-\log_{2}e)\overset{{\scriptscriptstyle \eqref{eq:2(n+k)}}}{<}2n+12.28\frac{n}{\log_{2}n}$ for every $n\ge2$, contradicting the lower bound on $k\log_{2}k$ shown earlier. \end{remark*}
\section{\label{sec:lower}Lower bounds}
\subsection{\label{subsec:ours-induced}An explicit construction showing $R(\protect\mathcal{Q}_{2},\protect\mathcal{Q}_{n})\ge n+3$}
In this subsection, we prove a special case of \cref{thm:Lowerbound} to illustrate the basic ideas of the construction. The fully general proof of \cref{thm:Lowerbound}, presented in \cref{subsec:ours-general}, is significantly more involved (primarily due the fact that it is more difficult to deduce properties of a weak map $\mathcal{Q}_{n}\rightarrow\mathcal{Q}_{n+m}$). \begin{thm} \label{prop:m_is_2-1}For $n\ge18$, there exist ${\color{blue}\mathcal{B}},{\color{red}\mathcal{R}}\subset\mathcal{Q}_{n+2}$ such that ${\color{blue}\mathcal{B}}\sqcup{\color{red}\mathcal{R}}=\mathcal{Q}_{n+2}$, $\mathcal{Q}_{2}$~is not an induced subposet of~${\color{blue}\mathcal{B}}$, and $\mathcal{Q}_{n}$~is not an induced subposet of~${\color{red}\mathcal{R}}$. \end{thm}
Let $k=\left\lfloor \frac{n}{2}\right\rfloor $. Let ${\color{blue}\mathcal{B}}\supset\binom{[n+2]}{k}\cup\binom{[n+2]}{k+3}$, with some sets of size $k+1$ which we will add later. Assume for a contradiction that $\mathcal{Q}_{n}$~is an induced subposet of~${\color{red}\mathcal{R}}$. Let $\varphi:\mathcal{Q}_{n}\rightarrow{\color{red}\mathcal{R}}$ be an injection such that $\varphi(A)\subseteq\varphi(B)$ if and only if $A\subseteq B$.
For any maximal chain $\emptyset\subsetneqq A_{1}\subsetneqq\ldots\subsetneqq A_{n-1}\subsetneqq[n]$, the sets in its image satisfy $\varphi(\emptyset)\subsetneqq\varphi(A_{1})\subsetneqq\ldots\subsetneqq\varphi(A_{n-1})\subsetneqq\varphi([n])$, and none of the sets in the image are of size $k$ or $k+3$. So for every $A\subseteq[n]$, \begin{equation}
\left|\varphi(A)\right|=\begin{cases}
\left|A\right| & \text{if }\left|A\right|\le k-1,\\
\left|A\right|+1 & \text{if } k\le\left|A\right|\le k+1,\\
\left|A\right|+2 & \text{if } k+2\le\left|A\right|, \end{cases}\label{eq:levels-1-1} \end{equation}
thus the image of every singleton is a singleton (and the image of the complement of every singleton is the complement of a singleton).
For $a\in[n]$, let $\tilde{\varphi}(a)$ denote the unique element of $\varphi(\{a\})$. The map $\tilde{\varphi}:[n]\rightarrow[n+2]$ is an injection. Note that, for a set $A\subseteq[n]$, $\tilde{\varphi}[A]$ denotes the image of $A$ under $\tilde{\varphi}$, and for a set $B\subseteq[n+m]$, $\tilde{\varphi}^{-1}[B]$ denotes the preimage of $B$ under $\tilde{\varphi}$.
Let $X=\left\{ \tilde{\varphi}(a):a\in[n]\right\} $ and $Y=[n+2]\setminus X=\{y,z\}$. We have $\left|X\right|=n$ and $\left|Y\right|=2$. We claim that for every $A\subseteq[n]$, $\varphi(A)\cap X=\tilde{\varphi}[A]$. Indeed, for every $b\in X$, there is an $a\in[n]$ such that $\tilde{\varphi}(a)=b$, and we have $b=\tilde{\varphi}(a)\in\tilde{\varphi}[A]\Longleftrightarrow a\in A\Longleftrightarrow\{a\}\subseteq A\Longleftrightarrow\{\tilde{\varphi}(a)\}=\varphi(\{a\})\subseteq\varphi(A)\Longleftrightarrow b=\tilde{\varphi}(a)\in\varphi(A)$.
From~\eqref{eq:levels-1-1}, $\varphi(A)$ contains neither $y$ nor $z$ if $\left|A\right|\le k-1$, exactly one of them if $k\le\left|A\right|\le k+1$, and both if $k+2\le\left|A\right|$. \begin{claim} \label{claim:y-1-1}For every set $A\in\binom{[n]}{k}$, $\varphi(A)$ contains the same element of $Y$. \end{claim}
\begin{proof} Assume for a contradiction that some sets of the form $\varphi(A)$ contain $y$, and others contain $z$. Since the Johnson graph -- whose vertices are $\binom{[n]}{k}$, and whose edges connect sets with symmetric difference $2$ -- is connected, there would be two sets $A,B\in\binom{[n]}{k}$ with a symmetric difference of size $2$ such that $y\in\varphi(A)$ and
$z\in\varphi(B)$. Then $\left|A\cup B\right|=k+1$, and $\varphi(A\cup B)$ would contain both $y$ and $z$ as it would have to be a superset of both $\varphi(A)$ and $\varphi(B)$, contradicting that it contains exactly one of $y$ and $z$. \end{proof} Now we specify which sets of $\binom{[n+2]}{k+1}$ are added to~${\color{blue}\mathcal{B}}$ in addition to $\binom{[n+2]}{k}\cup\binom{[n+2]}{k+3}$. Our goal is to add these sets in such a way that for every map $\varphi:\mathcal{Q}_{n}\to\mathcal{Q}_{n+2}$, assuming that $\operatorname{Im}\varphi\subseteq{\color{red}\mathcal{R}}$, and $\varphi$ is an order-embedding, the above observations lead to a contradiction. (The map~$\varphi$, and the variables dependent on it such as $X$, $Y$, $y$ and $z$, are not fixed now; we have to set ${\color{blue}\mathcal{B}}$ in such a way that the existence of any order-embedding $\varphi:\mathcal{Q}_{n}\to{\color{red}\mathcal{R}}$ leads to a contradiction.) For every distinct~$y,z\in[n+2]$, pick a set $C_{y,z}\in\binom{[n+2]}{k+1}$ such that $y\in C_{y,z}$ but $z\notin C_{y,z}$, and $C_{y,z}$ is at a symmetric difference of size at least $4$ from every previously chosen set $C_{y',z'}$, and add $C_{y,z}$ to ${\color{blue}\mathcal{B}}$. We can do this by greedily picking sets~$C_{y,z}$ one-by-one: At each step, we have picked at most $(n+2)(n+1)-1$ sets so far, and each previously picked set~$C_{y',z'}$ blocks at most $1+k(n-k)$ choices (because there are at most that many sets containing $y$ but not $z$ with a symmetric difference of size at most~2 from~$C_{y',z'}$). In total, there are $\binom{n}{k}$ sets $C\in\binom{[n+2]}{k+1}$ that satisfy $y\in C$ and $z\notin C$. For $n\ge18$ and $k=\left\lfloor \frac{n}{2}\right\rfloor $, we have \[ \binom{n}{k}>\bigl((n+2)(n+1)-1\bigr)\bigl(1+k(n-k)\bigr), \] so we can always choose a set $C_{y,z}$ which satisfies the required conditions.
After adding such sets $C_{y,z}$ for every distinct $y,z\in[n+2]$, the resulting family ${\color{blue}\mathcal{B}}$ will not contain a copy of $\mathcal{Q}_{2}$. Indeed, a copy of $\mathcal{Q}_{2}$ in~${\color{blue}\mathcal{B}}$ would have to consist of a set of size~$k$, a set of size $k+3$, and two sets of size $k+1$; but the latter two sets would need to have a symmetric difference of size~$2$.
Now assume for a contradiction that ${\color{red}\mathcal{R}}=2^{[n+2]}\setminus{\color{blue}\mathcal{B}}$ contains an induced copy of~$\mathcal{Q}_{n}$. Consider an arbitrary injection $\varphi:\mathcal{Q}_{n}\rightarrow{\color{red}\mathcal{R}}$, and define $\tilde{\varphi}$, $X$ and $Y=\{y,z\}$ as before, and apply \cref{claim:y-1-1}. We can assume without loss of generality that for every $A\in\binom{[n]}{k}$, we have $y\in\varphi(A)$, and thus $\varphi(A)=\tilde{\varphi}[A]\cup\{y\}$. There is a set $C_{y,z}\in\binom{[n+2]}{k+1}\cap{\color{blue}\mathcal{B}}$ such that $y\in C_{y,z}$, but $z\notin C_{y,z}$. We have $\tilde{\varphi}^{-1}[C_{y,z}]\in{[n] \choose k}$, and \[ \varphi\left(\tilde{\varphi}^{-1}[C_{y,z}]\right)=\left\{ \tilde{\varphi}(a):a\in[n],\tilde{\varphi}(a)\in C_{y,z}\right\} \cup\{y\}=C_{y,z}\in{\color{blue}\mathcal{B}}, \] contradicting that the image of~$\varphi$ is in~${\color{red}\mathcal{R}}$.
\subsection{\label{subsec:ours-general}An explicit construction showing \textmd{\normalsize{}$R_{w}(\protect\mathcal{Q}_{m},\protect\mathcal{Q}_{n})\ge m+n+1$}}
\cref{thm:Lowerbound} will be an immediate consequence of the following \MakeLowercase{\crtcrefnamebylabel{prop:noninduced-lower-general}}. \begin{thm} \label{prop:noninduced-lower-general}Let $n,m\in\mathbf{N}$ such that $m\ge2$ and $n\ge\sqrt{32m+260}+18$. There exist ${\color{blue}\mathcal{B}},{\color{red}\mathcal{R}}\subset\mathcal{Q}_{n+m}$ such that ${\color{blue}\mathcal{B}}\sqcup{\color{red}\mathcal{R}}=\mathcal{Q}_{n+m}$, $\mathcal{Q}_{m}$~is not a weak subposet of~${\color{blue}\mathcal{B}}$, and $\mathcal{Q}_{n}$~is not a weak subposet of~${\color{red}\mathcal{R}}$. \end{thm}
To see that \cref{thm:Lowerbound} follows from \cref{prop:noninduced-lower-general}, notice that we can assume without loss of generality that $n\ge m$, so for $n\ge68$, we have $n\ge\sqrt{32n+260}+18$.
For values of $m$ less than~$67$, the threshold for~$n$ in the hypothesis of~\cref{prop:noninduced-lower-general} is smaller than $68$: for instance, it holds for $m=2$ and $n\ge36$. Also note that in the proof of \cref{lem:modp code}, we use Bertrand's postulate to obtain a prime $N\le p<2(N-1)$. By finding the smallest prime greater than or equal to $N$, one may be able to relax the requirement on $k$ in \cref{lem:modp code}, and thereby extend \cref{prop:noninduced-lower-general} to somewhat smaller values of $n$, for a given $m$.
In the proof of \cref{prop:noninduced-lower-general}, we will use \cref{lem:modp code}, which will follow from \cref{lem:modp subset}. \begin{lem}[Olson \cite{olson}]
\label{lem:modp subset}Let $p$ be a prime, and let $A\subseteq[p]$ such that $\left|A\right|\ge\sqrt{4p-3}$. Then for every $a\in\mathbf{Z}$, there is a subset $B\subseteq A$ such that \[ \sum B\equiv a\pmod p. \] \end{lem}
\begin{lem} \label{lem:modp code}Let $N,k\in\mathbf{N}$ such that $N>3$ and $k\ge\sqrt{8N-15}$. Then there is a constant-weight code $\mathcal{C}\subset\binom{[N]}{k+1}$ such that the symmetric difference between any two sets is of size at least $4$, and the following holds: \begin{numberedstatement} Let $n,m\in\mathbf{N}$ such that $n+m=N$ and $k\le n-\sqrt{8N-15}$. For every $Y\in\binom{[N]}{m}$ and $y\in Y$, there is a set $C\in\binom{[N]\setminus Y}{k}$ such that $C\cup\{y\}\in\mathcal{C}$.\label{eq:modp code statement} \end{numberedstatement} \end{lem}
\begin{proof} By Bertrand's postulate \begin{comment} There are much better estimates for sufficiently large $N$, see \href{https://en.wikipedia.org/wiki/Bertrand\%27s_postulate\#Better_results}{https://en.wikipedia.org/wiki/Bertrand\%27s\_postulate\#Better\_results} \end{comment} , there is a prime~$p$ such that $N\le p<2(N-1)$. Let $d\in[p]$ be a fixed constant, and let \[ \mathcal{C}=\left\{ S\in\binom{[N]}{k+1}:\sum S\equiv d\pmod p\right\} . \]
Let $Y\in\binom{[N]}{m}$ and $y\in Y$. We have to find a set~$C$ that satisfies the condition in the statement. Let $l=\left\lceil \sqrt{8N-15}\right\rceil $. It follows from the conditions of the lemma that $n\ge2l$. Let $[N]\setminus Y=\{x_{1},\ldots,x_{n}\}$ such that $x_{1}<x_{2}<\ldots<x_{n}$. For $i=1,\ldots,l$, let $a_{i}=x_{i}$ and $b_{i}=x_{n-i+1}$. The numbers $b_{i}-a_{i}$ are in $[p]$, and they are different because $b_{1}-a_{1}>b_{2}-a_{2}>\ldots>b_{l}-a_{l}$. Let $a=\sum_{i=1}^{l}a_{i}$. Let $E$ be a subset of $\{x_{l+1},\ldots,x_{n-l}\}$ with $k-l$ elements, and let $e=\sum E$. (It follows from the conditions that $0\le k-l\le n-2l$.)
Since $l\ge\sqrt{8N-15}\ge\sqrt{4p-3}$, by \cref{lem:modp subset}, there is a subset of $\{b_{1}-a_{1},\ldots,b_{l}-a_{l}\}$ such that its sum is congruent with $d-y-e-a$. That is, there is a set $I\subseteq[l]$ such that \[ \sum_{i\in I}(b_{i}-a_{i})\equiv d-y-e-a\pmod p. \]
Let \[ C=E\cup\left\{ \Padded{\begin{cases} a_{i} & \text{if } i\notin I\\ b_{i} & \text{if } i\in I \end{cases}:i\in[l]}\right\} . \]
We have \[ \sum\left(C\cup\{y\}\right)=\sum E+\sum_{i=1}^{l}a_{i}+\sum_{i\in I}(b_{i}-a_{i})+y\equiv d\pmod p, \]
so $C\in\mathcal{C}$. \end{proof}
Let $k$ be an integer between $\sqrt{8(n+m)-15}$ and $n-1-\sqrt{8(n+m)-15}$ inclusive. (The conditions of the proposition imply that $\sqrt{8(n+m)-15}+1\le n-1-\sqrt{8(n+m)-15}$, therefore such an integer $k$ exists.) Let ${\color{blue}\mathcal{B}}\supset\binom{[n+m]}{k}\cup\binom{[n+m]}{k+3}\cup\binom{[n+m]}{k+4}\cup\ldots\cup\binom{[n+m]}{k+m+1}\cup\mathcal{C}$, where $\mathcal{C}$~is given by \cref{lem:modp code}, using $n+m$ in the place of~$N$. First we show that the family ${\color{blue}\mathcal{B}}$ does not contain a~$\mathcal{Q}_{m}$. Indeed, any two sets in~${\color{blue}\mathcal{B}}$ of size $k+1$ have a symmetric difference of size at least~$4$. A copy of~$\mathcal{Q}_{m}$ in~${\color{blue}\mathcal{B}}$ would consist of a set of size~$k$, some sets of size $k+3,\ldots,k+m+1$ corresponding to the sets of size~$2$~to~$m$ of the~$\mathcal{Q}_{m}$, and $m$~sets of size $k+1$ corresponding to the singletons of~$\mathcal{Q}_{m}$. The latter $m$~sets would need to have a symmetric difference of size~$2$.
Assume for a contradiction that $\mathcal{Q}_{n}$~is a subposet of~${\color{red}\mathcal{R}}$. Let $\varphi:\mathcal{Q}_{n}\rightarrow{\color{red}\mathcal{R}}$ be an injection that preserves relations.
For any maximal chain $\emptyset\subsetneqq A_{1}\subsetneqq\ldots\subsetneqq A_{n-1}\subsetneqq[n]$, we have $\varphi(\emptyset)\subsetneqq\varphi(A_{1})\subsetneqq\ldots\subsetneqq\varphi(A_{n-1})\subsetneqq\varphi([n])$, and none of the sets in the image are of size $k$ or $k+3,k+4,\ldots,k+m+1$. So for every $A\subseteq[n]$, \begin{equation}
\left|\varphi(A)\right|=\begin{cases}
\left|A\right| & \text{if }\left|A\right|\le k-1,\\
\left|A\right|+1 & \text{if } k\le\left|A\right|\le k+1,\\
\left|A\right|+m & \text{if } k+2\le\left|A\right|, \end{cases}\label{eq:levels} \end{equation}
thus the image of every singleton is a singleton, and the image of the complement of every singleton is the complement of a singleton).
For $a\in[n]$, let $\varphi_{1}(a)$ denote the unique element of $\varphi(\{a\})$, and let $\varphi_{2}(a)$ denote the unique element of $[n+m]\setminus\varphi\left([n]\setminus\{a\}\right)$. Note that, for a set $A\subseteq[n]$, $\varphi_{i}[A]$ denotes the image of $A$ under $\varphi_{i}$, and for a set $B\subseteq[n+m]$, $\varphi_{i}^{-1}[B]$ denotes the preimage of $B$ under $\varphi_{i}$.
The maps $\varphi_{1}$ and $\varphi_{2}$ are injections. Furthermore, for any distinct $a,b\in[n]$, it holds that $\{a\}\subseteq[n]\setminus\{b\}$, so $\{\varphi_{1}(a)\}=\varphi(\{a\})\subseteq\varphi\left([n]\setminus\{b\}\right)=[n+m]\setminus\{\varphi_{2}(b)\}$, so $\varphi_{1}(a)\ne\varphi_{2}(b)$. Now take the sets $\left\{ \varphi_{1}(a),\varphi_{2}(a)\right\} \subseteq[n+m]$ for each $a\in[n]$; these sets have 1~or~2 elements, depending on whether $\varphi_{1}(a)=\varphi_{2}(a)$. Based on the observations in this paragraph, if $a\ne b$, we have \[ \left\{ \varphi_{1}(a),\varphi_{2}(a)\right\} \cap\left\{ \varphi_{1}(b),\varphi_{2}(b)\right\} =\emptyset. \]
Since $\bigcup_{a\in[n]}\{\varphi_{1}(a),\varphi_{2}(a)\}\subseteq[n+m]$, we have $\sum_{a\in[n]}\left|\{\varphi_{1}(a),\varphi_{2}(a)\}\right|\le n+m$, so the number of these sets which have 2~elements is at most~$m$; in other words, \[
\left|\left\{ a\in[n]:\varphi_{1}(a)\ne\varphi_{2}(a)\right\} \right|\le m. \]
Let \begin{align*} D & =\left\{ a\in[n]:\varphi_{1}(a)=\varphi_{2}(a)\right\} ,\\ E & =[n]\setminus D,\\ X_{12} & =\varphi_{1}[D]=\varphi_{2}[D],\\ X_{1} & =\varphi_{1}[E],\\ X_{2} & =\varphi_{2}[E],\\ X_{\emptyset} & =[n+m]\setminus(X_{12}\cup X_{1}\cup X_{2}). \end{align*}
Then \begin{align} [n+m] & =X_{12}\sqcup X_{1}\sqcup X_{2}\sqcup X_{\emptyset},\label{eq:disjoint}\\ \operatorname{Im}\varphi_{1} & =X_{12}\cup X_{1},\\ \operatorname{Im}\varphi_{2} & =X_{12}\cup X_{2},\nonumber \\
\left|X_{12}\right| & =\left|D\right|\ge n-m,\labelifpresent{grey}\label{eq:D ge n-m}\\
\left|X_{1}\right| & =\left|X_{2}\right|=\left|E\right|\le m,\nonumber \\
\left|X_{12}\right|+\left|X_{1}\right| & =\left|D\right|+\left|E\right|=n,\nonumber \\
\left|E\right|+\left|X_{\emptyset}\right| & =(n+m)-\left(\left|X_{12}\right|+\left|X_{1}\right|\right)=m.\label{eq:e-x0} \end{align}
We have that, for every $A\subseteq[n]$, \begin{equation} \forall a\in A:\varphi_{1}(a)\in\varphi(A)\label{eq:a in A} \end{equation}
because $a\in A\Rightarrow\{a\}\subseteq A\Rightarrow\{\varphi_{1}(a)\}=\varphi(\{a\})\subseteq\varphi(A)\Rightarrow\varphi_{1}(a)\in\varphi(A)$. (Equivalently, $\varphi_{1}[A]\subseteq\varphi(A)$.) Symmetrically, \begin{equation} \forall a\in[n]\setminus A:\varphi_{2}(a)\notin\varphi(A)\label{eq:a notin A} \end{equation}
because $a\in[n]\setminus A\Rightarrow A\subseteq[n]\setminus\{a\}\Rightarrow\varphi(A)\subseteq\varphi\left([n]\setminus\{a\}\right)=[n+m]\setminus\{\varphi_{2}(a)\}\Rightarrow\varphi_{2}(a)\notin\varphi(A)$.
For an $A\subseteq[n]$, let \[ F(A)=\left\{ \Padded{\begin{cases} \varphi_{2}(a) & \text{if } a\in A\\ \varphi_{1}(a) & \text{if } a\notin A \end{cases}:a\in E}\right\} \cup X_{\emptyset}. \]
By \eqref{eq:disjoint}, \eqref{eq:a in A} and \eqref{eq:a notin A}, we have \begin{equation} \begin{aligned}\varphi(A)\cap\left([n+m]\setminus F(A)\right) & =\varphi(A)\cap\left(X_{12}\cup\left\{ \Padded{\begin{cases} \varphi_{1}(a) & \text{if } a\in A\\ \varphi_{2}(a) & \text{if } a\notin A \end{cases}:a\in E}\right\} \right)\\
& =\varphi(A)\cap\left\{ \Padded{\begin{cases} \varphi_{1}(a) & \text{if } a\in A\\ \varphi_{2}(a) & \text{if } a\notin A \end{cases}:a\in[n]}\right\} \overset{\eqref{eq:a in A},\eqref{eq:a notin A}}{=}\varphi_{1}[A], \end{aligned} \label{eq:a-phi} \end{equation}
and therefore \begin{equation}
\left|\varphi(A)\cap\left([n+m]\setminus F(A)\right)\right|=\left|\varphi_{1}[A]\right|=\left|A\right|.\label{eq:a-phi-size} \end{equation}
Note that $\left|F(A)\right|\overset{\eqref{eq:e-x0}}{=}m$. The elements of $F(A)$ are the only elements of $[n+m]$ such that \eqref{eq:a-phi} does not determine whether they are elements of $\varphi(A)$. In particular, \begin{equation} F(A)\subseteq[n+m]\setminus\varphi_{1}[A].\label{eq:F} \end{equation}
From \eqref{eq:levels} and \eqref{eq:a-phi-size}, $\varphi(A)$ contains no element of $F(A)$ if $\left|A\right|\le k-1$, exactly one if $k\le\left|A\right|\le k+1$, and all elements of $F(A)$ if $k+2\le\left|A\right|$. For $A\in\binom{[n]}{k}\cup\binom{[n]}{k+1}$, let $f(A)$ be the single element of $\varphi(A)\cap F(A)$. \begin{claim} \label{claim:y}One of the following holds:
\gdef\labelwidthi{\widthof{\textbf{\textup{A0. }}}} \gdef\labeli{\textbf{\textup{A\arabic{enumi}. }}} \gdef\propertyref#1{A#1} \gdef\refi{\propertyref{\arabic{enumi}}} \begin{enumerate}[label=\labeli, ref=\refi, labelsep=0em, leftmargin=0em, labelwidth=\labelwidthi, itemindent=\labelwidth, align=left] \item \label{enu:X0}There is a $y\in X_{\emptyset}\cup X_{2}$ such that, for every $A\in\binom{[n]}{k}$, we have $f(A)=y$. (In fact in this case $y\in X_{\emptyset}$, since for a $y\in X_{2}$ and $A\in\binom{[n]\setminus\{\varphi_{2}^{-1}(y)\}}{k}$ we would have $y\notin F(A)$. We do not use this.) \item \label{enu:X1}There is a $y\in X_{1}$ such that, for every $A\in\binom{[n]\setminus\{\varphi_{1}^{-1}(y)\}}{k}$, we have $f(A)=y$. (Note that when $\varphi_{1}^{-1}(y)\notin A$, $y\in F(A)$ holds by the definition of $F(A)$.) \end{enumerate} \end{claim}
First we show that \cref{prop:noninduced-lower-general} follows from this claim. We use the constant-weight code $\mathcal{C}\subset{\color{blue}\mathcal{B}}$ given by \cref{lem:modp code}. If \ref{enu:X0} holds in \cref{claim:y}, then we use the statement~\eqref{eq:modp code statement} in \cref{lem:modp code} with the same $n$~and~$m$ as in \eqref{prop:noninduced-lower-general}, $X_{2}\cup X_{\emptyset}$ in the place of~$Y$, and $y$ as given by \cref{claim:y}. There is a set $C\in\binom{X_{12}\cup X_{1}}{k}$ such that $C\cup\{y\}\in\mathcal{C}\subset{\color{blue}\mathcal{B}}$. Then $\varphi_{1}^{-1}[C]\in\mathcal{Q}_{n}$, and $\varphi\left(\varphi_{1}^{-1}[C]\right)=C\cup\{y\}\in{\color{blue}\mathcal{B}}$, contradicting that the image of~$\varphi$ is in~${\color{red}\mathcal{R}}$. If \ref{enu:X1} holds in \cref{claim:y}, then we use the statement~\eqref{eq:modp code statement} with $n-1$ in the place of $n$, $m+1$ in the place of $m$, $X_{2}\cup X_{\emptyset}\cup\{y\}$ in the place of $Y$, and $y$ as given by \cref{claim:y}. There is a set $C\in\binom{(X_{12}\cup X_{1})\setminus\{y\}}{k}$ such that $C\cup\{y\}\in\mathcal{C}\subset{\color{blue}\mathcal{B}}$. Then $\varphi_{1}^{-1}[C]\in\binom{[n]\setminus\{\varphi_{1}^{-1}(y)\}}{k}\subset\mathcal{Q}_{n}$, and $\varphi\left(\varphi_{1}^{-1}[C]\right)=C\cup\{y\}\in{\color{blue}\mathcal{B}}$, contradicting that the image of~$\varphi$ is in~${\color{red}\mathcal{R}}$.
To prove \cref{claim:y}, we need the following. \begin{claim} \label{obs:neighbors}If $A,B\in\binom{[n]}{k}$ with a symmetric difference of size~$2$, and $f(A)\ne f(B)$, then at least one of the following holds: \begin{itemize} \item $f(A)=\varphi_{1}(b)$ where $\{b\}=B\setminus A$. (This implies $b\in E$ and $f(A)\in X_{1}$.) \item $f(B)=\varphi_{1}(a)$ where $\{a\}=A\setminus B$. (This implies $a\in E$ and $f(B)\in X_{1}$.) \end{itemize} \begin{proof}
Indeed, $\left|A\cup B\right|=k+1$, so \begin{equation} \varphi(A\cup B)=\varphi_{1}[A\cup B]\cup\{f(A\cup B)\}=\varphi_{1}[A\cap B]\cup\{\varphi_{1}(a),\varphi_{1}(b),f(A\cup B)\}.\label{eq:a-b-phi} \end{equation} Furthermore, \begin{align} \varphi(A\cup B) & \supset\varphi(A)=\varphi_{1}[A]\cup\{f(A)\}=\varphi_{1}[A\cap B]\cup\{\varphi_{1}(a),f(A)\}\text{ and}\label{eq:phi-a}\\ \varphi(A\cup B) & \supset\varphi(B)=\varphi_{1}[B]\cup\{f(B)\}=\varphi_{1}[A\cap B]\cup\{\varphi_{1}(b),f(B)\}.\label{eq:phi-b} \end{align}
By \eqref{eq:a-b-phi}, \eqref{eq:phi-a} and~\eqref{eq:phi-b}, we have {\thickmuskip=5mu plus 3mu minus 3mu \medmuskip=2mu $\left|\{\varphi_{1}(a),f(A),\varphi_{1}(b),f(B)\}\right|\le\left|\{\varphi_{1}(a),\varphi_{1}(b),f(A\cup B)\}\right|=\nobreak3$}, therefore the elements on the left-hand side of the inequality are not distinct. We know that $\varphi_{1}$ is an injection, and by~\eqref{eq:F}, $f(S)\notin\varphi_{1}[S]$ for any $S$ of size $k$ or $k+1$. We have assumed $f(A)\ne f(B)$. It follows that $f(A)=\varphi_{1}(b)$ or $f(B)=\varphi_{1}(a)$. This completes the proof of \cref{obs:neighbors}. \end{proof} \end{claim}
\present{grey}We first prove \cref{claim:y} under the condition $m\le n-\sqrt{8(n+m)-15}-1$, as the proof is simpler than the proof for arbitrary $m$\emph{.} (For large $n$, this condition holds whenever the ratio of $m$ and $n$ is not very close to 1.) \begin{boldproof}[Proof of \cref{claim:y} when \thinmuskip=2mu \medmuskip=3mu \thickmuskip=4mu $m\le n-\sqrt{8(n+m)-15}-1$] At the beginning of the proof of \cref{prop:noninduced-lower-general}, we chose an arbitrary $k$ between {\medmuskip=3mu plus 3mu minus 3mu $\sqrt{8(n+m)-15}$ and $n-1-\sqrt{8(n+m)-15}$}. Now we will assume that $k\le n-m$; this is satisfied by choosing e.g. $k=\sqrt{8(n+m)-15}$. Then, by~\eqref{eq:D ge n-m}, there exists an $B\in\binom{[n]}{k}$ such that $B\subseteq D$. We show that \cref{claim:y} holds with $y=f(B)$. In fact, since $B\cap E=\emptyset$, we have $f(B)\in F(B)=X_{\emptyset}$, and we show that $f(A)=f(B)$ for every $A\in\binom{[n]}{k}$.
Take an $A\in\binom{[n]}{k}$. We can get from $B$ to $A$ by replacing one element at a time, in such a way that we never add an element that is not an element of $A$, and we never remove an element of $A$ (whether it is also an element of $B$, or we have added it). In particular, we never remove an element of $E$. That is, there is a sequence $B=B_{0},B_{1},\ldots,B_{l}=A$
such that $\left|B_{i}\triangle B_{i+1}\right|=2$ and $b_{i}^{\leftarrow}\in D$ where $\{b_{i}^{\leftarrow}\}=B_{i}\setminus B_{i+1}$. Let $b_{i+1}^{\rightarrow}\in[n]$ such that $\{b_{i+1}^{\rightarrow}\}=B_{i+1}\setminus B_{i}$.
We show by induction that $f(B_{i})=f(B)$ for every $i=0,\ldots,l$. Assume that $f(B)=f(B_{i})\ne f(B_{i+1})$. By \cref{obs:neighbors}, either $f(B_{i})=\varphi_{1}(b_{i+1}^{\rightarrow})$ or $f(B_{i+1})=\varphi_{1}(b_{i}^{\leftarrow})$. The former implies $f(B_{i})=f(B)\in X_{1}$, contradicting that it is in $X_{\emptyset}$. The latter implies $b_{i}^{\leftarrow}\in E$, contradicting that it is in $D$. \end{boldproof}
\begin{boldproof}[Proof of \cref{claim:y}] The general form of \cref{claim:y} will be a consequence of the following lemma. \begin{lem}
\label{lem:y inductive}Let $n,l\in\mathbf{N}$ such that $n\ge5$ and $1\le l\le n-3$, and let $X$ and $Y$ be disjoint sets such that $\left|X\right|=n$. Let $g:\binom{X}{l}\rightarrow X\cup Y$ be a function such that for every $A\in\binom{X}{l}$, $g(A)\notin A$; and for every $A,B\in\binom{X}{l}$ with a symmetric difference of size~$2$, where $g(A)\ne g(B)$, at least one of $\{g(A)\}=B\setminus A$ and $\{g(B)\}=A\setminus B$ holds. Then there is a $y\in X\cup Y$ such that $g(A)=y$ for every $A\in\binom{X\setminus\{y\}}{l}$. \end{lem}
\begin{subproof} We prove \cref{lem:y inductive} by induction on~$l$.
If $l=1$, the sets are singletons, and every symmetric difference is of size~$2$. We define a graph on~$X$: we connect two elements $a$ and~$b$ if $g(\{a\})\ne g(\{b\})$. This graph is the complement of a graph whose components are complete graphs (with the components defined by the values of $a\mapsto g(\{a\})$). For every $a,b\in X$ such that $ab$ is an edge, we have $g(\{a\})=b$ or $g(\{b\})=a$. Direct the graph such that we have the directed edge $(a,b)$ when $g(\{a\})=b$ (we may direct some edges in both directions).
The out-degree of every vertex is at most~1. Thus the number of edges is at most~$n$. By our assumptions $n\ge4$; the only graphs with these properties on at least~5 vertices (ignoring the directions of the edges) are the empty graph and a star on $n$ vertices. If it is an empty graph, then $g(\{a\})$ is the same for every $a\in X$ (and it is necessarily in~$Y$); the statement of \cref{lem:y inductive} holds with $y=g(\{a\})$. If the graph is a star on $n$ vertices, let $a$ be the center. Since at most one edge is directed outward from~$a$, all but at most one edge is directed towards~$a$. That is, $g(\{b\})=a$ for all but at most one $b\in X\setminus\{a\}$. Since the leaves of the star are not connected, $g$~has the same values on them as singletons, so in fact $g(\{b\})=a$ for every $b\in X\setminus\{a\}$, and the lemma holds with $y=a$.
Now let $l\ge2$. If $g(A)$ is the same for every $A\in\binom{X}{l}$, the lemma holds with that value as~$y$. Assume that $g(A)$ is not the same for every $A\in\binom{X}{l}$. Since the Johnson graph is connected, there are sets with a symmetric difference of size~$2$ with different~$g$; consequently there is an~$a\in X$ such that there are sets containing~$a$ with different~$g$.
We use the induction hypothesis with $\tilde{l}=l-1$, $\tilde{n}=n-1$, $\tilde{X}=X\setminus\{a\}$, $\tilde{g}\bigl(\tilde{A}\bigr)=g\bigl(\{a\}\cup\tilde{A}\bigr)$ for $\tilde{A}\in\binom{\tilde{X}}{l-1}$, and $Y$~unchanged. Note that since $g\bigl(\{a\}\cup\tilde{A}\bigr)\notin\{a\}\cup\tilde{A}$, in fact $\tilde{g}\bigl(\tilde{A}\bigr)\in\tilde{X}\cup Y$ and $\tilde{g}\bigl(\tilde{A}\bigr)\notin\tilde{A}$, so the conditions of the induction hypothesis hold. So there is a $b\in\tilde{X}\cup Y$ such that $g\bigl(\tilde{A}\bigr)=b$ for every $\tilde{A}\in\binom{\tilde{X}\setminus\{b\}}{l}$; equivalently, $b\in(X\cup Y)\setminus\{a\}$ such that $g(A)=b$ for every $A\in\binom{X}{l}$ that contains $a$ but not~$b$. If $b$ were in~$Y$, then $g(A)$ would be the same for every $A\in\binom{X}{l}$ that contains $a$, contradicting our assumption. So $b\in X$. \begin{figure}
\caption{$n=5$, $l=2$. The label on an edge $uv$ is $g(\{u,v\})$. (The elements of $Y$ are not shown in the figure.)}
\caption{We first prove that the marked pairs are assigned either $a$ or $b$, then that they are all assigned the same value.}
\label{fig:g(au)=00003Db}
\label{fig:bullets}
\end{figure} (See \cref{fig:g(au)=00003Db}.)
Take a $C\in\binom{X\setminus\{a,b\}}{l}$. We show that $g(C)\in\{a,b\}$. Take an arbitrary $c\in C$. $(C\setminus\{c\})\cup\{a\}$
contains $a$ but not $b$, so $g\bigl((C\setminus\{c\})\cup\{a\}\bigr)=b$. Since $\left|C\triangle\bigl((C\setminus\{c\})\cup\{a\}\bigr)\right|=2$, either $g(C)=g\bigl((C\setminus\{c\})\cup\{a\}\bigr)=b$, or $g(C)=a$, or $g\bigl((C\setminus\{c\})\cup\{a\}\bigr)=c$ --- but the last option is false.
Now we show that $g(C)$ is the same for every $C\in\binom{X\setminus\{a,b\}}{l}$. (See \cref{fig:bullets}.) If this is not the case, there are $C,D\in\binom{X\setminus\{a,b\}}{l}$ such that $\left|C\triangle D\right|=2$, and $g(C)=a$ but $g(D)=b$. But this implies that $C\setminus D=\{b\}$ or $D\setminus C=\{a\}$, which is impossible because $a,b\notin C,D$.
We already know that $g(A)=b$ for every $A\in\binom{X}{l}$ that contains $a$ but not~$b$. If $g(C)=b$ for every $C\in\binom{X\setminus\{a,b\}}{l}$, then the lemma holds with $y=b$ (see \cref{fig:y=00003Db}). So assume instead that $g(C)=a$ for every $C\in\binom{X\setminus\{a,b\}}{l}$ (\cref{fig:assumption}). \begin{figure}
\caption{In this case, \cref{lem:y inductive} holds with $y=b$.}
\caption{We assume that the pairs marked in \cref{fig:bullets} are assigned $a$ instead.}
\label{fig:y=00003Db}
\label{fig:assumption}
\end{figure}
Let $B\in\binom{X}{l}$ such that it contains $b$ but not~$a$. We show that $g(B)=a$. Take two different, arbitrary elements $c,d\in X\setminus(B\cup\{a\})$. \begin{figure}
\caption{We show that \thickmuskip=3mu plus 5mu \relax $g(B)=a$ for $B=\nobreak\{b,e\}$, for arbitrary choice of $e\protect\ne a,b$.}
\caption{\cref{lem:y inductive} holds with $y=a$.}
\label{fig:g(B)=00003Da}
\label{fig:y=00003Da}
\end{figure}
(There are at least two such elements because $l\le n-3$. See \cref{fig:g(B)=00003Da}.) Since $\left|B\triangle\bigl((B\setminus\{b\})\cup\{c\}\bigr)\right|=2$, either $g(B)=g\bigl((B\setminus\{b\})\cup\{c\}\bigr)=a$, or $g(B)=c$, or $g\bigl((B\setminus\{b\})\cup\{c\}\bigr)=b$ --- but the last option is false. So if $g(B)\ne a$, then $g(B)=c$. By the same reasoning applied with $d$ in the place of $c$, if $g(B)\ne a$, then $g(B)=d$, a contradiction. So $g(B)=a$ for every $B\in\binom{X}{l}$ that contains $b$ but not~$a$. Since we already know that $g(C)=a$ for every $C\in\binom{X\setminus\{a,b\}}{l}$, this implies that the lemma holds with $y=a$ (see \cref{fig:y=00003Da}). This completes the proof of \cref{lem:y inductive}. \end{subproof} Using \cref{lem:y inductive}, we show \cref{claim:y}. Let $l=k$, $X=X_{12}\cup X_{1}$, $Y=X_{\emptyset}\cup X_{2}$, and for a $B\in\binom{X_{12}\cup X_{1}}{k}$, let $g(B)=f\left(\varphi_{1}^{-1}[B]\right)$. (Since $\varphi_{1}$~is an injection and its image is $X_{12}\cup X_{1}$, we have $\varphi_{1}^{-1}[B]\in\binom{[n]}{k}=\operatorname{Dom} f$.) The conditions of \cref{lem:y inductive} hold by \cref{obs:neighbors}. By \cref{lem:y inductive}, there is a $y\in[n+m]$ such that $f\left(\varphi_{1}^{-1}[B]\right)=g(B)=y$ for every $B\in\binom{(X_{12}\cup X_{1})\setminus\{y\}}{k}$ (where $(X_{12}\cup X_{1})\setminus\{y\}$ may coincide with $X_{12}\cup X_{1}$).
If $y\in X_{\emptyset}\cup X_{2}$, then for every $A\in\binom{[n]}{k}$, we have $\varphi_{1}[A]\in\binom{(X_{12}\cup X_{1})\setminus\{y\}}{k}=\binom{X_{12}\cup X_{1}}{k}$, and $f(A)=f\left(\varphi_{1}^{-1}\left[\varphi_{1}[A]\right]\right)=y$, so \ref{enu:X0} holds in \cref{claim:y}. If $y\in X_{12}\cup X_{1}$, then for every $A\in\binom{[n]\setminus\{\varphi_{1}^{-1}(y)\}}{k}$, we have $\varphi_{1}[A]\in\binom{(X_{12}\cup X_{1})\setminus\{y\}}{k}$, and $f(A)=f\left(\varphi_{1}^{-1}\left[\varphi_{1}[A]\right]\right)=y$. Since $f(A)\in F(A)\subseteq X_{1}\cup X_{2}\cup X_{\emptyset}$, we also have $y\in X_{1}$, so \ref{enu:X1} holds in \cref{claim:y}. \end{boldproof}
\subsection{\label{subsec:cox stolee}A probabilistic construction showing $R_{w}(\protect\mathcal{Q}_{m},\protect\mathcal{Q}_{n})\ge m+n+1$ when $m\ge3$} \begin{thm} \label{prop:CS-generalized}If $n,m\in\mathbf{N}$, $n$~is sufficiently large, and $m\ge3$, then there exist ${\color{blue}\mathcal{B}},{\color{red}\mathcal{R}}\subset\mathcal{Q}_{n+m}$ such that ${\color{blue}\mathcal{B}}\sqcup{\color{red}\mathcal{R}}=\mathcal{Q}_{n+m}$, $\mathcal{Q}_{m}$~is not a weak subposet of~${\color{blue}\mathcal{B}}$, and $\mathcal{Q}_{n}$~is not a weak subposet of~${\color{red}\mathcal{R}}$. \end{thm}
In most of this subsection, we prove \cref{prop:CS-generalized}. The core of the random construction will be in \cref{lem:random}. In the proof of \cref{lem:random} we will use the asymmetric version of the Lov\'asz Local Lemma. \begin{lem}[Asymmetric Lov\'asz Local Lemma] \label{lem:asymlocal}Let $\mathcal{A}$ be a collection of events. For $A\in\mathcal{A}$, let $\Gamma(A)$~be the set of those events in~$\mathcal{A}$, other than $A$ itself, that are not independent of~$A$. If there is a function $x:\mathcal{A}\to[0,1)$ such that for every $A\in\mathcal{A}$, we have \begin{equation} P(A)\le x(A)\prod_{B\in\Gamma(A)}(1-x(B)),\label{eq:asymlocal-ineq} \end{equation} then there is a non-zero probability that none of the events occur. \end{lem}
\begin{claim} \label{lem:random}If $n,m\in\mathbf{N}$, $n$~is sufficiently large, and $3\le m\le n$, then there is a family of sets $\mathcal{F}\subset\binom{[n+m]}{m}$ such that
\gdef\labelwidthi{\widthof{(ii)}} \gdef\labeli{\textup{(\roman{enumi})}} \gdef\refi{(\textup{\roman{enumi})}} \begin{enumerate}[label=\labeli, ref=\refi, labelwidth=\labelwidthi, itemindent=\labelwidth, align=left] \item \label{enu:supersets}for each $S\in\binom{[n+m]}{m-1}$, $\mathcal{F}$~contains at least~$2$ supersets of~$S$, and \item \label{enu:subsets}for each $T\in\binom{[n+m]}{m+1}$, $\mathcal{F}$~contains at most $m-1$ subsets of~$T$. \end{enumerate} \end{claim}
\begin{proof} Let $p=\left(4(m+1)\left(n^{2}-1\right)e\right)^{-1/m}$. Let $\mathcal{F}$ be a collection of sets given by taking each set $F\in\binom{[n+m]}{m}$ independently at random with probability~$p$.
\begin{comment} Notice that the family $\mathcal{F}$ satisfies the conditions of the lemma if and only if the family $\left\{ F\in\binom{[n+m]}{n}:[n+m]\setminus F\notin\mathcal{F}\right\} $ satisfies the conditions with $n$ and $m$ swapped. \end{comment} {} For any $S\in\binom{[n+m]}{m-1}$, let $A_{S}$ be the event in which $\mathcal{F}$ contains at most~$1$ superset of~$S$, and for any $T\in\binom{[n+m]}{m+1}$, let $B_{T}$ be the event in which $\mathcal{F}$ contains at least~$m$ subsets of~$T$. We have \begin{align*} P(A_{S}) & =(n+1)(1-p)^{n}p+(1-p)^{n+1}\text{ and}\\ P(B_{T}) & =(m+1)p^{m}(1-p)+p^{m+1}. \end{align*}
A given event $A_{S}$ is independent of an event of the form $B_{T}$ unless there is a set $F\in\binom{[n+m]}{m}$ such that $S\subset F\subset T$, i.e., if $S\subset T$. There are $\frac{(n+1)n}{2}$ such events $B_{T}$. $A_{S}$~is not independent of another event $A_{S'}$ if there is a $F\in\binom{[n+m]}{m}$ such that $S,S'\subset F$, i.e., if the symmetric difference of $S$ and $S'$ is of size~$2$. There are $(m-1)(n+1)$ such events $A_{S'}$. By symmetry, a given event $B_{T}$ is independent of all but $\frac{(m+1)m}{2}$ events~$A_{S}$, and is independent of all but $(n-1)(m+1)$ other events of the form $B_{T'}$.
We want to use \cref{lem:asymlocal} to prove that there is a non-zero probability that none of the events $A_{S}$ and $B_{T}$ occur, and thus $\mathcal{F}$ fulfills the conditions of \cref{lem:random}. Define a function \[ x:\left\{ A_{S}:S\in\binom{[n+m]}{m-1}\right\} \cup\left\{ B_{T}:T\in\binom{[n+m]}{m+1}\right\} \to[0,1)\text{ by} \] \[ x(E)=\begin{cases} y\coloneqq\frac{1}{4(m-1)(n+1)} & \text{if } E=A_{S}\text{ for some }S\in\binom{[n+m]}{m-1},\\ z\coloneqq\frac{1}{4(n-1)(n+1)} & \text{if } E=B_{T}\text{ for some }T\in\binom{[n+m]}{m+1}. \end{cases} \]
For an event~$E$ in the domain of~$x$, let $\Gamma(E)$ be the set of other events that are not independent of~$E$. We will use the bounds \begin{equation} e^{-x}\ge1-x\ge e^{-2x},\label{eq:expbound} \end{equation}
which hold when $0\le x\le\frac{1}{2}.$ For any set $T\in\binom{[n+m]}{m+1}$, we have \begin{gather*} x(B_{T})\prod_{E'\in\Gamma(B_{T})}(1-x(E'))=z\overbrace{(1-y)^{(m+1)m/2}}^{E'=A_{S}}\overbrace{(1-z)^{(n-1)(m+1)}}^{E'=B_{T'}}\\ \overset{\eqref{eq:expbound}}{\ge}ze^{-2(y(m+1)m/2+z(n-1)(m+1))}>\frac{1}{4(n-1)(n+1)e}\\ =(m+1)p^{m}>(m+1)p^{m}(1-p)+p^{m+1}=P(B_{T}). \end{gather*}
For any set~$S\in\binom{[n+m]}{m-1}$, we have \begin{gather*} x(A_{S})\prod_{E'\in\Gamma(A_{S})}(1-x(E'))=y\overbrace{(1-z)^{(n+1)n/2}}^{E'=B_{T}}\overbrace{(1-y)^{(m-1)(n+1)}}^{E'=A_{S'}}\\ \overset{\eqref{eq:expbound}}{\ge}ye^{-2(z(n+1)n/2+y(m-1)(n+1))}>\frac{1}{4(m-1)(n+1)e}\ge\frac{1}{4(n-1)(n+1)e}\text{,\quad and} \end{gather*} \begin{equation} \begin{gathered}P(A_{S})=(n+1)(1-p)^{n}p+(1-p)^{n+1}<\bigl((n+1)p+1\bigr)(1-p)^{n}\\ \overset{\eqref{eq:expbound}}{\le}\bigl((n+1)p+1\bigr)\cdot e^{-pn}\\ <\bigl((n+1)\left(4(m+1)\left(n^{2}-1\right)e\right)^{-1/m}+1\bigr)\cdot e^{-\bigl(4(m+1)e\bigr)^{-1/m}\cdot n^{1-2/m}}. \end{gathered} \label{eq:PAS} \end{equation}
On the right-hand side of \eqref{eq:PAS}, $\bigl((n+1)\left(4(m+1)\left(n^{2}-1\right)e\right)^{-1/m}+1\bigr)$ is increasing in $m$ and $e^{-\bigl(4(m+1)e\bigr)^{-1/m}\cdot n^{1-2/m}}$ is decreasing for $m\ge3$. So, by replacing $m$ with $n$ in the first factor, and $m$ with $3$ in the second factor, we have \begin{gather*} P(A_{S})\le\bigl((n+1)\left(4(n+1)\left(n^{2}-1\right)e\right)^{-1/n}+1\bigr)\cdot e^{-\bigl(16e\bigr)^{-1/3}\cdot n^{1/3}}\\ \le\frac{1}{4(n-1)(n+1)e}<x(A_{S})\prod_{E'\in\Gamma(A_{S})}(1-x(E')) \end{gather*}
when $n$~is sufficiently large. Therefore the function $x$ satisfies the inequality \eqref{eq:asymlocal-ineq} required by the asymmetric Lov\'asz Local Lemma, so $\mathcal{F}$ has the desired properties. \end{proof} Now we are ready to prove \cref{prop:CS-generalized} using the family of sets constructed in \cref{lem:random}. We may assume without loss of generality that $m\le n$. Let $\mathcal{F}\subset\binom{[n+m]}{m}$ be the family of sets given by \cref{lem:random}. Let ${\color{blue}\mathcal{B}}=\binom{[n+m]}{0}\cup\binom{[n+m]}{1}\cup\ldots\cup\binom{[n+m]}{m-2}\cup\mathcal{F}\cup\binom{[n+m]}{m+1}$, and let ${\color{red}\mathcal{R}}=\mathcal{Q}_{n+m}\setminus{\color{blue}\mathcal{B}}$.
Assume that ${\color{blue}\mathcal{B}}$ contains a weak copy of~$\mathcal{Q}_{m}$ provided by the injection $\varphi:\mathcal{Q}_{m}\to{\color{blue}\mathcal{B}}$. Note that ${\color{blue}\mathcal{B}}$ has height $m+1$ as a poset. Therefore, for $A\in\mathcal{Q}_{m}$, $\left|\varphi(A)\right|=m$ if $\left|A\right|=m-1$, and $\left|\varphi(A)\right|=m+1$ if $A=[m]$. The $m$ sets of size $m-1$ in~$\mathcal{Q}_{m}$ are mapped to subsets of $\varphi([m])$ in $\mathcal{F}=\binom{[n+m]}{m}\cap{\color{blue}\mathcal{B}}$. But, by \ref{enu:subsets} in \cref{lem:random}, only at most $m-1$ subsets of $\varphi([m])$ are in $\mathcal{F}$, a contradiction.
Similarly, assume that ${\color{red}\mathcal{R}}$ contains a weak copy of~$\mathcal{Q}_{n}$ provided by the injection $\varphi:\mathcal{Q}_{n}\to{\color{red}\mathcal{R}}$. Note that
${\color{red}\mathcal{R}}$ has height $n+1$. Therefore, for $A\in\mathcal{Q}_{n}$, $\left|\varphi(A)\right|=m-1$ if $A=\emptyset$, $\left|\varphi(A)\right|=m$
if $\left|A\right|=1$, and $\left|\varphi(A)\right|=\left|A\right|+m$ if $\left|A\right|\in\{2,3,\ldots,n\}$. The $n$ singletons of~$\mathcal{Q}_{n}$ are mapped to supersets of $\varphi(\emptyset)$ in $\binom{[n+m]}{m}\cap{\color{red}\mathcal{R}}=\binom{[n+m]}{m}\setminus\mathcal{F}$. But, by \ref{enu:supersets} in \cref{lem:random}, at least~$2$ supersets of $\varphi(\text{\ensuremath{\emptyset}})$ are in~$\mathcal{F}$, so at most~$n-1$ are in $\binom{[n+m]}{m}\setminus\mathcal{F}$, a contradiction. \begin{remark*} The above proof of \cref{prop:CS-generalized} cannot be easily made to work for $m=2$. More precisely, the following \MakeLowercase{\crtcrefnamebylabel{claim:cox-stolee-fails}} holds. \end{remark*} \begin{claim} \label{claim:cox-stolee-fails}The conclusion of \cref{lem:random} does not hold for $m=2$, and in fact there is no $\mathcal{F}\subset\binom{[n+2]}{2}$ such that, for ${\color{blue}\mathcal{B}}=\{\emptyset\}\cup\mathcal{F}\cup\binom{[n+2]}{3}$ and ${\color{red}\mathcal{R}}=\mathcal{Q}_{n+2}\setminus{\color{blue}\mathcal{B}}$, $\mathcal{Q}_{2}$~is not a subposet of~${\color{blue}\mathcal{B}}$, and $\mathcal{Q}_{n}$~is not a subposet of~${\color{red}\mathcal{R}}$. \end{claim}
\begin{proof} A family of sets $\mathcal{F}\subset\binom{[n+2]}{2}$ that satisfies the condition \ref{enu:supersets} in \cref{lem:random} contains, for any $S\in\binom{[n+2]}{1}$, a pair of sets $A,B$ such that $S\subset A,B$. Note that $A$ and $B$ have a symmetric difference of size~$2$. Then $A,B\subset A\cup B\in\binom{[n+2]}{3}$, which contradicts the condition \ref{enu:subsets} in \cref{lem:random}. So the two conditions of \cref{lem:random} cannot be satisfied at the same time by a family of sets $\mathcal{F}\subset\binom{[n+2]}{2}$.
It is easy to check that the conditions \ref{enu:supersets}~and~\ref{enu:subsets} on~$\mathcal{F}$ in \cref{lem:random} are not only sufficient, but also necessary for the above coloring to satisfy the conditions of \cref{prop:CS-generalized}, that is, to have no $\mathcal{Q}_{2}$ as a subposet of ${\color{blue}\mathcal{B}}=\{\emptyset\}\cup\mathcal{F}\cup\binom{[n+2]}{3}$, and no $\mathcal{Q}_{n}$ as a subposet of ${\color{red}\mathcal{R}}=\mathcal{Q}_{n+2}\setminus{\color{blue}\mathcal{B}}$. \end{proof}
\section*{Acknowledgments\phantomsection\addcontentsline{toc}{section}{Acknowledgements}}
The second and third authors were supported by the grant IBS-R029-C1. The research of the second author was also partially supported by the EPSRC, grant no. EP/S00100X/1 (A. Methuku).
\end{document} |
\begin{document}
\title[On Zeros and Growth of ]{On Zeros and Growth of Solutions of Second Order Linear Differential Equation} \author[ S. Kumar and M. Saini]{Sanjay Kumar and Manisha Saini}
\address{Sanjay Kumar \\ Department of Mathematics \\ Deen Dayal Upadhyaya College \\ University of Delhi \\ New Delhi--110 078, India }
\email{sanjpant@gmail.com}
\address{Manisha Saini\\ Department of Mathematics\\ University of Delhi\\ New Delhi--110 007, India}
\email{sainimanisha210@gmail.com } \thanks {The research work of the second author is supported by research fellowship from University Grants Commission (UGC), New Delhi.}
\keywords{entire function, meromorphic function, order of growth, exponent of convergence, complex differential equation} \subjclass[2010]{Primary 34M10, 30D35} \begin{abstract} For a second order linear differential equation $f''+A(z)f'+B(z)f=0$, with $ A(z)$ and $B(z)$ being transcendental entire functions under some restriction, we have established that all non-trivial solutions are of infinite order. In addition, we have proved that these solutions have infinite number of zeros. Also, we have extended these results to higher order linear differential equations. \end{abstract} \maketitle
\section{Introduction} Consider a second order linear differential equation of the form \begin{equation}\label{sde} f''+A(z)f'+B(z)f=0, \quad B(z) \not \equiv 0 \end{equation} where $A(z)$ and $B(z)$ are entire functions. We have used the notion of Value Distribution Theory of meromorphic function, also known as Nevanlinna Theory \cite{yang}. For an entire function $f$, the order of $f$ and exponent of convergence of $f$ are defined, respectively, in the following manner, $$ \rho(f) = \limsup_{r \rightarrow \infty} \frac{\log^+ \log^+ M(r, f)}{\log r} , \quad \lambda(f) =\limsup_{r\rightarrow \infty} \frac{\log^+ N(r,\frac{1}{f})}{\log r} $$
where $ M(r,f)= \max\{\ |f(z)|:|z| =r \}\ $ is the maximum modulus of $f(z)$ over the circle $|z| =r$ and $N(r,\frac{1}{f})$ is the number of zeros of $f(z)$ enclosed in the disk $|z| <r$.
It is well known that all solutions of the equation (\ref{sde}) are entire functions. Using Wiman-Valiron theory, it is proved that equation (\ref{sde}) has all solutions of finite order if and only if both $A(z)$ and $B(z)$ are polynomials \cite{lainebook}. Therefore, if either $A(z)$ or $B(z)$ are transcendental entire functions, then almost all solutions of the equation (\ref{sde}) are of infintite order. So, it is natural to find conditions on coeffiicients of the equation (\ref{sde}) such that all non-trivial solutions of the equation (\ref{sde}) are of infinite order. Our aim in this paper is also to find such $A(z)$ and $B(z)$. It was Gundersen \cite{finitegg}, who gave a necessary condition for equation (\ref{sde}) to have a solution of finite order,
\begin{thm} A necessary condition for equation (\ref{sde}) to have a non-trivial solution $f$ of finite order is \begin{equation}\label{necc} \rho(B)\leq \rho(A). \end{equation} \end{thm} We illustrate this condition with following examples
\begin{example} $f(z)=e^{-z}$ satisfies $f''+e^{-z}f'-(e^{-z}+1)f=0,$ where $\rho(A)=\rho(B)=1$. \end{example}\label{eg1}
\begin{example}\label{eg2} With $A(z)=e^z+2$ and $B(z)=1$ equation (\ref{sde}) has finite order solution $f(z)=e^{-z}+1$, where $\rho(B)<\rho(A).$ \end{example} Thus if $\rho(A)<\rho(B)$, then all solutions of the equation (\ref{sde}) are of infinite order. However, given necessary condition is not sufficient, for example
\begin{example}\cite{heitt} If $A(z)=P(z)e^{z}+Q(z)e^{-z}+R(z)$, where $ P, Q$ and $ R$ are polynomials and $B(z)$ is an entire function with $\rho(B)<1$ then $\rho(f)$ is infinite, for all non-trivial solutions $f$ of the equation (\ref{sde}). \end{example} In the same paper \cite{finitegg}, Gundersen proved the following result: \begin{thm}\label{thm1} Let $f$ be a non-trivial solution of the equation (\ref{sde}) where either \begin{enumerate}[(i)] \item $\rho(B)< \rho(A)<\frac{1}{2}$\\ or \item $A(z)$ is transcendental entire function with $\rho(A)=0$ and $B(z)$ is a polynomial \end{enumerate} then $\rho(f)$ is infinite. \end{thm} Hellerstein, Miles and Rossi \cite{heller} proved Theorem [\ref{thm1}] for $\rho(B)<\rho(A)=\frac{1}{2}.$ In \cite{frei}, Frei showed that the second order differential equation, \begin{equation}\label{Sde} f''+e^-f'+B(z)f= 0 \end{equation} possesses a solution of finite order if and only if $B(z)=-n^2 , \quad n \in \mathbb{N}$. Ozawa \cite{ozawa} proved that equation (\ref{Sde}), possesses no solution of finite order when $B(z)=az+b, \quad a \neq0$. Amemiya and Ozawa \cite{ame}and Gundersen \cite{ggpol} studied the equation (\ref{Sde}) for $B(z)$ being a particular polynomial. After this, Langley \cite{lang} showed that the differential equation \begin{equation} f''+Ce^{-z}f'+B(z)f=0 \end{equation} has all non-trivial solutions of infinite order, for any nonzero constant $C$ and for any nonconstant polynomial $B(z)$. \\
J. R. Long introduced the notion of the deficient value and Borel direction into the studies of the equation (\ref{sde}). For the definition of deficient value, Borel direction and function extremal for Yang's inequality one may refer to \cite{yang}. \\
In \cite{extremal}, J. R. Long proved that if $A(z)$ is an entire function extremal for Yang's inequality and $B(z)$ a transcendental entire function with $\rho(B)\neq \rho(A)$, then all solutions of the equation (\ref{sde}) are of infinite order. In \cite{jlongfab}, J.R. Long replaced the condition $\rho(B) \neq \rho(A)$ with the condition that $B(z) $ is an entire function with \emph{Fabry gaps}. \\
X. B. Wu \cite{wu}, proved that if $A(z)$ is a non-trivial solution of $w''+Q(z)w=0$, where $Q(z)= b_mz^m+\ldots +b_0, \quad b_m \neq 0$ and $B(z)$ be an entire function with $\mu(B)<\frac{1}{2}+ \frac{1}{2(m+1)}$, then all solutions of equation (\ref{sde}) are of infinite order. J.R. Long \cite{jlongfab} replaced the condition $\mu(B)< \frac{1}{2}+\frac{1}{2(m+1)}$ with $B(z)$ being an entire function with \emph{Fabry gap} such that $\rho(B) \neq \rho(A)$. \\ The main source of the problems in complex differential equation is Gundersen's \cite{problemgg}. J.R. Long \cite{jlong} gave a partial solution for a question asked by Gundersen in \cite{problemgg}. He proved that: \begin{thm}\label{jrthm}
Let $A(z)=v(z)e^{P(z)}$, where $v(z)( \not \equiv 0)$ is an entire function and $P(z)=a_nz^n +\ldots +a_0$ is a polynomial of degree $n$ such that $\rho(v)<n$. Let $B(z)=b_mz^m +\ldots +b_0$ be a non-constant polynomial of degree $m$, then all non-trivial solutions of the equation (\ref{sde}) have infinite order if one of the following condition holds: \begin{enumerate}[(i)] \item $m+2 <2n$; \item $m+2>2n$ and $m+2 \neq 2kn$ for all integers $k$; \item $m+2=2n$ and $\frac{a_n^2}{b_m}$ is not a negative real. \end{enumerate} \end{thm}
In this paper, we are assuming $B(z)$ to be a transcendental entire function in Theorem \ref{jrthm}. We now recall the notion of \emph{critical rays} and \emph{Fabry gap}:
\begin{defn}\label{def1}\cite{jlong} Let $P(z)=a_{n}z^n+a_{n-1}z^{n-1}+\ldots +a_0$, $a_n\neq0$ and $\delta(P,\theta)=\RE(a_ne^{\iota n \theta})$. A ray $\gamma = re^{\iota \theta}$ is called \emph{critical ray} of $e^{P(z)}$ if $\delta(P,\theta)=0.$ \end{defn}
It can be easily seen that there are $2n$ different critical rays of $e^{P(z)}$ which divides the whole complex plane into $2n$ distinict sectors of equal length $\frac{\pi}{n}.$ Also $\delta(P,\theta)>0$ in $n$ sectors and $\delta(P,\theta)<0$ in remaining $n$ sectors. We note that $\delta(P,\theta)$ is alternatively positive and negative in the $2n$ sectors. \begin{defn}\cite{hayman} Let $g(z)=\sum_{n=0}^{\infty}a_{\lambda_n}z^{\lambda_n}$ be an entire function. If the sequence $(\lambda_n)$ satisfies $$ \frac{\lambda_n}{n} \rightarrow \infty $$ as $ n \rightarrow \infty$, then $g(z) $ has Fabry gap. \end{defn} An entire function with Fabry gap has order positive or infinity \cite{hayman}. We now fix some notations, \\ $ E^+ = \{ \theta \in [0,2\pi]: \delta(P,\theta)\geq 0\}$ and $E^- = \{ \theta \in [0,2\pi]: \delta(P,\theta)\leq 0 \}.$ \\ Let $\alpha>0$ and $\beta>0$ be such that $\alpha<\beta$ then \[\Omega(\alpha,\beta)= \{z\in \mathbb{C}: \alpha<\arg z <\beta \}.\]
In this paper, we will prove the following theorem: \begin{thm}\label{Main} Suppose $A(z)=v(z)e^{P(z)}$ be an entire function with $\lambda(A)<\rho(A)=n$, where $P(z)=a_nz^n+ \ldots a_0$ is a polynomial of degree $n$. Suppose that \begin{enumerate} \item B(z) be a transcendental entire function satisfying $\rho(B)\neq \rho(A)$ or \item B(z) be a transcendental entire function with Fabry gap. \end{enumerate}
Then all non-trivial solutions of the equation (\ref{sde}) are of infinite order. Moreover, all non-trivial solutions of the equation (\ref{sde}) have infinite number of zeros. \end{thm} In Theorem [\ref{Main}] part (2), $B(z)$ may be a transcendental entire function with order equal to order of entire function $A(z)$. J. R. Long have proved Theorem [\ref{Main}], for $A(z)$ being an entire function extremal for Yang's inequality in \cite{jlongfab} and \cite{extremal}. We illustrate our result with some examples \begin{example} $$f''+Q(z)e^{P(z)}f'+B(z) f=0,$$ where $Q(z)$and $P(z)$ are polynomials and $B(z)$ is any transcendental entire funcion with $\rho(B)\neq $ degree of $P(z)$. Then $\rho(f)=\infty$, for all non-trivial solutions. \end{example} \begin{example} $$f''+\sin(z) e^{P(z)}f'+ \cos(z^{\frac{n}{2}}) f=0,$$ where $P(z)$ is a polynomial of degree $m>1, m\neq \frac{n}{2}$ and $n\in \mathbb{N}$ , then all non-trivial solutions are of infinite order. \end{example}
This paper is orgnised in the following manner: in section 2, we give results which will be useful in proving our main result. In section 3, we will prove our main theorem. In section 4, we will extend our result to higher order linear differential equations.
\section{Auxiliary Result}
In this section, we present some known results, which will be useful in proving Theorem [\ref{Main}]. These results involves logarithmic measure and logarithmic density of sets, therefore we recall these concepts: \\
The Lebesgue linear measure of a set $E\subset [0,\infty)$ is defined as $m(E)= \int_{E} dt$. The logarithmic measure of a set $F \subset [1,\infty)$ is given by $m_1(F)= \int_{F}\frac{dt}{t}$. The upper and lower logarithmic densities of a set $F \subset [1,\infty)$ are given, respectively, by $$\overline{\log dens(F)} =\limsup_{r\rightarrow \infty}\frac{m_1(F\cap[1,r])}{\log r}$$ $$\overline{\log dens(F)} =\liminf_{r\rightarrow \infty}\frac{m_1(F\cap[1,r])}{\log r}$$ Also, logarithmic density of a set $F\subset [1,\infty)$ is defined as
$$\log dens(F)=\overline{\log dens(F)} =\overline{\log dens(F)}.$$ The next lemma is due to Gundersen \cite{log gg} and is used thoroughly during the years.
\begin{lemma}\label{gglog} Let $f$ be a transcendental entire function of finite order $\rho$, let $\Gamma= \{\ (k_1,j_1), (k_2,j_2) \ldots (k_m,j_m) \}\ $ denote finite set of distinct pairs of integers that satisfy $ k_i > j_ i \geq 0,$ for $i=1,2, \ldots m, $ and let $\epsilon>0$ be a given constant. Then the following three statements holds:
\begin{enumerate}[(i)]
\item there exists a set $E_1 \subset[0,2\pi)$ that has linear measure zero, such that if $\psi_0 \in [0,2\pi)\setminus E_1, $ then there is a constant $R_0=R_0(\psi_0)>0$ so that for all $z$ satisfying $\arg z =\psi_0$ and $|z| \geq R_0$and for all $(k,j)\in \Gamma$, we have \begin{equation} \label{guneq}
|f^{(k)}(z)/f^{(j)}(z)| \leq |z|^{(k-j)(\rho-1+\epsilon)} \end{equation}
\item there exists a set $E_2 \subset (1,\infty)$ that has finite logarithmic measure, such that for all $z$ satisfying $|z| \not \in E_2 \cup [0,1]$ and for all $(k,j) \in \Gamma$, the inequality (\ref{guneq}) holds.
\item there exists a set $E_3\subset [0,\infty)$ that has finite linear measure, such that for all $z$ satisfying $|z|\not \in E_3$ and for all $(k,j) \in \Gamma$, we have \begin{equation}
|f^{(k)}(z)/ f^{(j)}(z)| \leq |z|^{(k-j)(\rho+\epsilon)}. \end{equation} \end{enumerate} \end{lemma}
The following result gives estimates for absolute value of $ A(z)$ over all complex plane except for a negligible set. \begin{lemma}\label{implem} \cite{banklang} Let $A(z)=v(z)e^{P(z)}$ be an entire function with $\lambda(A)<\rho(A)=n$, where $P(z)$ is a polynomial of degree $n$. Then for every $\epsilon>0$ there exists $E \subset [0,2\pi)$ of linear measure zero such that
\begin{enumerate}[(i)]
\item for $ \theta \in E^+ \setminus E $ there exists $ R>1 $ such that \begin{equation}
|A(re^{\iota \theta})| \geq \exp \left( (1-\epsilon) \delta(P,\theta)r^n \right) \end{equation} for $r>R.$
\item for $\theta \in E^-\setminus E$ there exists $R>1$ such that \begin{equation}\label{eq2le}
|A(re^{\iota \theta})| \leq \exp \left( (1-\epsilon)\delta(P,\theta) r^n \right) \end{equation} for $r>R.$ \end{enumerate} \end{lemma}
Next lemma is from \cite{besi}and give estimates for an entire function of order less than one. \begin{lemma}\label{gglemma}
Let $w(z)$ be an entire function of order $\rho$, where $0<\rho<\frac{1}{2}$and let $\epsilon>0$ be a given constant. Then there exists a set $S \subset [0,\infty)$ that has upper logarithmic density at least $1-2\rho$ such that $|w(z)| >\exp (|z|^{\rho-\epsilon})$ for all $z$ satisfying $|z| \in S.$ \end{lemma} The following lemma is from \cite{lainebook}. \begin{lemma}\label{lainebook} Let $g: (0,\infty) \rightarrow \mathbb{R}$, $h: (0,\infty)\rightarrow \mathbb{R}$ be monotone increasing functions such that $g(r) < h (r)$ outside of an exceptional set $E$ of finite logarithmic measure. Then, for any $\alpha > 1$, there exists $r_0 > 0$ such that $g(r) < h(\alpha r)$ holds for all $r > r_0$. \end{lemma} Next lemma give property of an entire function with Fabry gap and can be found in \cite{jlongfab}, \cite{zhe}. \begin{lemma}\label{fablemma}
Let $g(z)=\sum_{n=0}^{\infty} a_{\lambda_n}z^{\lambda_n}$ be an entire function of finite order with Fabry gap, and $h(z)$ be an entire function with $\rho(h)=\sigma \in (0,\infty)$. Then for any given $\epsilon\in (0,\sigma)$, there exists a set $H\subset (1,+\infty)$ satisfying $ \overline{log dense} H \geq \xi $, where $\xi\in (0,1)$ is a constant such that for all $|z| =r \in H$, one has $$ \log M(r,h) > r^{\sigma-\epsilon}, \quad \log m(r,g) > (1-\xi)\log M(r,g),$$
where $M(r,h)=\max \{\ |h(z)|: |z|=r\}\ $, $m(r,g)=\min \{\ |g(z)|: |z|=r\}\ $ and $M(r,g)= \max \{\ |g(z)|: |z|=r\}\ $. \end{lemma} The following remark follows from the above lemma. \begin{remark} \label{fabremark}
Suppsoe that $g(z)=\sum_{n=0}^{\infty} a_{\lambda_n}z^{\lambda_n}$ be an entire function of order $\sigma \in (0,\infty)$ with Fabry gaps then for any given $\epsilon >0, \quad (0<2\epsilon <\sigma)$, there exists a set $H\subset (1,+\infty)$ satisfying $\overline{\log dense}H \geq \xi$, where $\xi \in (0,1)$ is a constant such that for all $|z| =r \in H$ , one has
$$ |g(z)|> M(r,g)^{(1-\xi)}> \exp{\left((1-\xi) r^{\sigma-\epsilon}\right)}>\exp{\left(r^{\sigma-2\epsilon}\right)}.$$ \end{remark} Next lemma can be found in \cite{lainebook} and can be proved by induction. \begin{lemma}\label{ind} Let $h(z)$ and $Q(z)$ be entire functions and define $f=he^{Q}$. Then $f^{(p)}$ may be represented, for each $p\in \mathbb{N}$, in the form \begin{equation}\notag f^{(p)}= \left( h^{(p)}+pQ'h^{(p-1)}+ \sum_{j=2}^{p} \left( {p \choose j} (Q')^j+H_{j-1}(Q') \right) h^{(p-j)} \right)e^Q \end{equation} where $H_{j-1}(Q')$ stands for a differential polynomial of total degree $\leq j-1$ in $Q'$ and its derivatives, with constants coefficients. \end{lemma} We are now able to prove our main result. \section{Proof of Theorem \ref{Main}} This section contains the proof of Theorem [\ref{Main}], which is as follows: \begin{proof}
If $\rho(A)< \rho(B)$ then by Theorem [\ref{thm1}], all non-trivial solutions $f$ of the equation (\ref{sde}) are of infinite order. Thus we consider that $\rho(B)\leq\rho(A)< \infty$.
Let us suppose that there exists a non-trivial solution $f$ of the equation (\ref{sde}) such that $\rho(f)<\infty$. Then by Lemma [\ref{gglog}], there exists a set $E_1 \subset[0,2\pi)$ that has linear measure zero, such that if $\psi_0 \in [0,2\pi) \setminus E_1, $ then there is a constant $R_0=R_0(\psi_0)>0$ so that for all $z$ satisfying $\arg z =\psi_0$ and $|z| \geq R_0$, we have
\begin{equation} \label{guneq1}
|f^{(k)}(z)/f(z)|\leq |z| ^{2\rho(f)}, \quad k=1, 2 \end{equation} \begin{enumerate} \item Let $B(z)$ be a transcendental entire function with $\rho(B)\neq \rho(A)$. In this case we need to consider $\rho(B)<\rho(A)$. We consider the following cases on $\rho(B)$. \begin{enumerate}[(a)]
\item Suppose that $0<\rho(B)\leq \frac{1}{2}$. Then from Lemma [\ref{gglemma}], there exists a set $S \subset [0,\infty)$ that has upper logarithmic density at least $1-2\rho(B)$ such that
\begin{equation}\label{eqB}
|B(z)|>\exp(|z| ^{\rho(B)-\epsilon}) \end{equation}
for all $z$, satisfying $|z|\in S.$
From equation (\ref{sde}), (\ref{eq2le}), (\ref{guneq1})and (\ref{eqB}), for all $z$, satisfying $\arg z=\psi_0 \in E^- \setminus (E\cup E_1) $and $|z|=r \in S$, $|z|=r >R_0(\psi_0)$ we have
\begin{align*}
\exp{(r ^{\rho(B)-\epsilon})} &< |B(z)| \\
&\leq |f''(z)/f(z)|+ |A(z)| |f'(z)/f(z)| \\
&\leq r^{2\rho(f)}(1+o(1)) \end{align*} which is a contradiction for arbitrary large $r$. \item When $\frac{1}{2}\leq \rho(B)< \infty $ then using Phragm$\acute{e}$n- Lindel$\ddot{o}$f principle, there exists a sector $\Omega(\alpha, \beta); \quad 0\leq \alpha<\beta \leq 2\pi$ with $\beta-\alpha \geq \frac{\pi}{\rho(B)}$ such that
\begin{equation}\label{Border}
\limsup_{r\rightarrow \infty} \frac{\log^+ \log^+ |B(re^{\iota \theta})|}{\log r} =\rho(B) \end{equation} for all $\theta \in \Omega(\alpha, \beta)$. Since $\rho(B) < \rho(A)$ this implies that there exists $\theta_0 \in \Omega(\alpha, \beta) \cap \left(E^- \setminus E \right)$. Thus from equation (\ref{eq2le}) and (\ref{Border}), for $\arg z =\theta_0$ we have,
\begin{equation}\label{eqAle}
|A(re^{\iota \theta_0})| \leq \exp{\left( (1-\epsilon) \delta(P,\theta_0) r^n\right)} \end{equation} and \begin{equation}\label{eqB1}
\exp{\left(r^{\rho(B)-\epsilon}\right)} \leq |B(re^{\iota \theta_0})| \end{equation}
for sufficiently large $r$. Now from equations (\ref{sde}), (\ref{guneq1}), (\ref{eqAle})and (\ref{eqB1}), for all $z=re^{\iota \theta_0}$, satisfying $\theta_0\in \Omega(\alpha, \beta)\cap E^- \setminus(E\cup E_1)$ and $|z|=r > R_0(\theta_0) $ we have, \begin{align*}
\exp{(r ^{\rho(B)-\epsilon})} &< |B(z)| \\
&\leq |f''(z)/f(z)| + |A(z)| | f'(z)/f(z)|\\
&\leq r^{2\rho(f)}(1+o(1)) \end{align*} which is a contradiction for arbitrary large $r$.
\item Now suppose that $B(z)$ is a transcendental entire function with $\rho(B)=0$, then using a result from \cite{pd}, for all $\theta \in [0,2\pi)$ one has, \begin{equation}\label{eqB2}
\limsup_{r\rightarrow \infty} \frac{\log |B(re^{\iota \theta})|}{\log r} =\infty \end{equation} this implies that for any large $G>0$ there exists $R(G)>0$ such that \begin{equation}\label{eqB3}
r^G \leq |B(re^{\iota \theta})| \end{equation}
for all $\theta \in [0,2\pi)$ and for all $r>R(G)$. From equations (\ref{sde}), (\ref{eq2le}), (\ref{guneq1})and (\ref{eqB3}), for all $z=re^{\iota \theta}$ satisfying $\arg z=\theta \in E^-\setminus \left( E \cup E_1 \right )$ and $|z| =r >R$ we have, \begin{align*}
r ^G &< |B(z)| \\
&\leq |f''(z)/f(z)|+ |A(z)| |f'(z)/f(z)| \\
&\leq r^{2\rho(f)}(1+o(1)) \end{align*} which is a contradiction for arbitrary large $r$. \end{enumerate} Thus all non-trivial solutions of the equation (\ref{sde}) are of infinite order in this case.
\item Let $B(z)$ be a transcendental entire function with Fabry gap. Then from Lemma (\ref{fablemma}), for any given $\epsilon >0, \quad (0<2\epsilon <\rho(B))$, there exists a set $H\subset (1,+\infty)$ satisfying $\overline{\log dense}H \geq \xi$, where $\xi \in (0,1)$ is a constant such that for all $|z| =r \in H$, one has \begin{equation}\label{eq2B}
|B(z)| >\exp{\left(r^{\rho(B)-2\epsilon}\right)} \end{equation}
From equation (\ref{sde}), (\ref{eq2le}), (\ref{guneq1})and (\ref{eq2B}), for all $z$ satisfying $\arg z = \psi_0 \in E^- \setminus (E\cup E_1)$and $|z|=r \in H, \quad r >R_0(\psi_0) $, we have \begin{align*}
\exp{\left( r^{\rho(B)-2\epsilon}\right)}&< |B(z)| \leq |f''(z)/f(z)| + |A(z)| |f'(z)/f(z)| \\
&\leq r^{2\rho(f)}(1+o(1)) \end{align*} which is a contradiction for arbitrary large $r$. \end{enumerate} We thus conclude that all non-trivial solutions of the equation (\ref{sde}) are of infinite order.
Now let us suppose that $f(z)=h(z)e^{Q(z)}$, where $h(z)$and $Q(z)$ are entire functions, be a non-trivial solution of the equation (\ref{sde}) and hence $\rho(f)=\infty$.
First we suppose that $\lambda(f)=\rho(h)< \rho(f)$. From equation (\ref{sde}), we have \begin{equation} h''+ \left( A(z)+2 Q'(z) \right) h+ \left( B(z) +Q''(z) +(Q')^2(z) \right)=0 \end{equation} which implies that $\rho(h)\geq \max \{\ \rho(A), \rho(B), \rho(Q) \}\ >0$. As a consequence of this, $f$ contains infinite number of zeros.
If we suppose that $\lambda(f)=\rho(f)=\infty$ then it is clear that $f$ has infinite number of zeros. \end{proof}\section{Further Results} In this section we will extend our result to higher order linear differential equations. We consider the higher order linear differential equation as follows: \begin{equation}\label{sde1} f^{(m)}+A_{(m-1)}(z)f^{(m-1)}+\ldots +A_1(z)f'+A_0(z)f=0 \end{equation} where $m\geq 2$ and $A_0, A_1, \ldots, A_{(m-1)}$ are entire functions. Then it is well known that all solutions of the equation (\ref{sde1}) are entire functions. Moreover, if $A_0, A_1, \ldots, A_{(m-1)}$ are polynomials then all solutions of the equation (\ref{sde1}) are of finite orde and vice-versa \cite{lainebook}. Therefore, if any of the coefficient is a transcendental entire function then equation (\ref{sde1}) will possesses almost all solutions of infinite order. However, conditions on coefficients of the equation (\ref{sde1}) are found so that all solutions are of infinite order \cite{lainebook}. Here, we are giving one of such condition on the coefficients of the equation (\ref{sde1}). \begin{thm} Suppose there exist an integer $j \in \{\ 1,2, \ldots , m-1 \}\ $ such that $ \lambda(A_j)<\rho(A_j)$. Suppose that $A_0$ be a transcendental entire function satisfying $ \rho(A_i) <\rho(A_0)$ where $i=1,2,\ldots m-1, i\neq j$ with \begin{enumerate} \item $\rho(A_0)\neq \rho(A_j)$ or \item $A_0(z)$ be a transcendental entire function with Fabry gap. \end{enumerate}
Then every non-trivial solution of the equation (\ref{sde1}) is of infinite order. In addition, all non-trivial solutions of the equation (\ref{sde1}) has infinite number of zeros. \end{thm} \begin{proof}
First let us suppose that $\rho(A_j)<\rho(A_0)$. Then suppose that there exist a solution $f \not \equiv 0$ of the equation (\ref{sde1}) such that $\rho(f) < \infty$, then by Lemma [\ref{gglog}] (ii) there exists a set $ E_2 \subset (1,\infty)$ that has finite logarithmic measure, such that for all $z$ satisfying $|z|\not \in E_2\cup[0,1]$ such that \begin{equation} \label{guneqq}
|f^{(k)}(z)/f(z)| \leq |z|^{m \rho(f)} \end{equation} where $k=1,2,\ldots, m$. Using equation (\ref{sde1}) and (\ref{guneqq}), we have \begin{align*}
|A_0(z)|&\leq \big|\frac{f^{(m)}(z)}{f(z)}\big| +|A_{(m-1)}(z)|\big|\frac{f^{(m-1)}(z)}{f(z)}\big|+\ldots +|A_1(z)|\big|\frac{f'(z)}{f(z)}\big| \\
&\leq |z|^{m\rho(f)} \{\ 1+|A_{(m-1)}(z)|+\dots +|A_1(z)| \}\ \end{align*}
for all $z$ satisfying $|z| \not \in E_2\cup[0,1]$. From here we get that \begin{equation} T(r,A_0)\leq m\rho(f) \log r+(m-1) T(r,A_i) +O(1) \end{equation}
where $T(r,A_i)=\max \{\ T(r,A_k): k=1,2,\ldots, m-1\}\ $ and $|z|=r \not \in E_2\cup[0,1]$. Using Lemma [\ref{lainebook}], this implies that $\rho(A_0) \leq \rho(A_i)$, which is a contradiction. Thus all non-trivial solutions of the equation (\ref{sde1}) are of infinite order in this case.
Now consider $\rho(A_0) \leq \rho(A_j)$ and there exists a non-trivial solution $f$ of finite order then by Lemma [\ref{gglog}] (i), there exists a set $E_1 \subset [0,2 \pi)$ with linear measure zero such that if $\psi_0 \in [0,2\pi)\setminus E_1, $ then there is a constant $R_0=R_0(\psi_0)>0$ so that for all $z$ satisfying $\arg z =\psi_0$ and $|z| \geq R_0$ we have \begin{equation} \label{guneqqq}
|f^{(k)}(z)/f(z)| \leq |z|^{m\rho(f)} \qquad k=1,2, \ldots, m-1 \end{equation} Since $\rho(A_i)<\rho(A_0)$, for all $i=1,2, \ldots, m-1, i\neq j$ then for any constant $\eta>0$ such that \\ $ \max \{\ \rho(A_i): i=1,2, \ldots m-1, i\neq j \}\ <\eta< \rho(A_0)$ there exists $R_0>0$ such that \begin{equation}\label{Aieq}
|A_i(z)|\leq \exp{|z|^\eta} \end{equation}
where $i=1,2,\ldots, m-1, i\neq j$ and $|z|=r >R_0$. \\ Also $\lambda(A_j)<\rho(A_j)=n$ then $A_j(z)=v(z)e^{P(z)}$, where $v(z)$ is an entire function and $P(z)$ is a polynomial of degree $n$. \begin{enumerate} \item Let $A_0(z)$ be a transcendental entire function with $\rho(A_0)\neq \rho(A_j)$. In this case we need to consider that $\rho(A_0)<\rho(A_j)$. We will discuss following three cases: \begin{enumerate}[(a)] \item\label{casea} suppose $0<\rho(A_0)<\frac{1}{2}$ then by Lemma [\ref{gglemma}], for $0<\epsilon< (\rho(A_0)-\eta)$ there exists a set $S \subset [0,\infty)$ that has upper logarithmic density at least $1-2\rho(A_0)$ such that \begin{equation}\label{A0eq}
|A_0(z)| >\exp (|z|^{\rho(A_0)-\epsilon})
\end{equation} for all $z$ satisfying $|z| \in S.$ Now using equation (\ref{eq2le}), (\ref{sde1}), (\ref{guneqqq}), (\ref{Aieq})and (\ref{A0eq}) we have \begin{align*}
\qquad \qquad \exp{ (|z|^{\rho(A_0)-\epsilon})} &<|A_0(z)| \\
&\leq \big|\frac{f^{(m)}(z)}{f(z)}\big| +|A_{(m-1)}(z)|\big|\frac{f^{(m-1)}(z)}{f(z)}\big| \\
&+\ldots +|A_1(z)|\big|\frac{f'(z)}{f(z)}\big| \\ &\leq r^{m\rho(f)} \{\ 1+ \exp{r^\eta}\\ &+ \ldots + \exp \left( (1-\epsilon) \delta(P,\psi_0)r^n \right)+ \ldots + \exp{r^\eta} \}\ \\ &= r^{m\rho(f)}\{\ 1+ (m-2) \exp{r^\eta} + o(1)\}\ \end{align*}
for all $z$ satisfying $|z|=r \in S$ and $\arg z =\psi_0 \in E^- \setminus (E\cup E_1)$. From here we will get contradiction for sufficiently large $r$. \item Now suppose that $\rho(A_0) \geq \frac{1}{2}$, then using Phragm$\acute{e}$n- Lindel$\ddot{o}$f principle, there exists a sector $\Omega(\alpha, \beta); \quad 0\leq \alpha<\beta \leq 2\pi$ with $\beta-\alpha \geq \frac{\pi}{\rho(A_0)}$ such that
\begin{equation}\label{A0order}
\limsup_{r\rightarrow \infty} \frac{\log^+ \log^+ |A_0(re^{\iota \theta})|}{\log r} =\rho(A_0) \end{equation} for all $\theta \in \Omega(\alpha, \beta)$. Since $\rho(A_0) < \rho(A_j)$ this implies that there exists $\theta_0 \in \Omega(\alpha, \beta) \cap \left(E^- \setminus E \right)$ such that \begin{equation}\label{Ajeq}
|A_j(re^{\iota \theta_0})| \leq \exp{\left( (1-\epsilon)\delta(P,\theta_0)r^n \right)} \end{equation} and form equation (\ref{A0order}), we have \begin{equation}\label{Aoeq}
|A_0(re^{\iota \theta_0})| \geq \exp{r^{\rho(A_0)-\epsilon}} \end{equation} Thus we get contradiction using equation (\ref{sde1}), (\ref{guneqqq}), (\ref{Aieq}), (\ref{Ajeq})and (\ref{Aoeq}) for sufficiently large $r$ by using similar argument as in case (\ref{casea}). \item Suppose $A_0$ be a transcendental entire function with $\rho(A_0)=0$, then using a result from \cite{pd}, for all $\theta \in [0,2\pi)$ one has, \begin{equation}\label{eqA02}
\limsup_{r\rightarrow \infty} \frac{\log |A_0(re^{\iota \theta})|}{\log r} =\infty \end{equation} this implies that for any large $G>0$ there exists $R(G)>0$ such that \begin{equation}\label{eqA03}
r^G \leq |A_0(re^{\iota \theta})| \end{equation} for all $\theta \in [0,2\pi)$ and for all $r>R(G)$. From equations (\ref{eq2le}), (\ref{sde1}), (\ref{guneqqq}), (\ref{Aieq})and (\ref{eqA03}) we get a contradiction for sufficiently large $r$ using similar argument as in case (\ref{casea}).\\ Thus we conclude that all non-trivial solutions of the equation (\ref{sde1}) are of infinite order in this case. \end{enumerate}
\item Suppose that $A_0(z)$ be a trascendental entire function with Fabry gap then using Lemma [\ref{fablemma}], for any given $\epsilon >0, \quad (0<2\epsilon <\rho(A_0)-\eta)$, there exists a set $H\subset (1,+\infty)$ satisfying $\overline{\log dense}H \geq \xi$, where $\xi \in (0,1)$ is a constant such that for all $|z|=r \in H$ , one has \begin{equation}\label{eq2A0}
|B(z)| >\exp{\left(r^{\rho(A_0)-2\epsilon}\right)} \end{equation}
From equation (\ref{eq2le}), (\ref{sde1}), (\ref{guneqqq}), (\ref{Aieq})and (\ref{eq2A0}), for all $z$ satisfying $\arg z = \psi_0 \in E^- \setminus (E\cup E_1)$and $|z| =r \in H$, $r>R_0(\psi_0) $, we have \begin{align*}
\qquad \qquad \exp{\left( r^{\rho(A_0)-2\epsilon}\right)}&< |A_0(z)| \\
&\leq \big|\frac{f^{(m)}(z)}{f(z)}\big| +|A_{(m-1)}(z)|\big|\frac{f^{(m-1)}(z)}{f(z)}\big| \\
&+\ldots +|A_1(z)|\big|\frac{f'(z)}{f(z)}\big| \\ &\leq r^{m\rho(f)} \{\ 1+ \exp{r^\eta} \\ &+ \ldots + \exp \left( (1-\epsilon) \delta(P,\psi_0)r^n \right) \\ &+ \ldots + \exp{r^\eta} \}\ \\ &= r^{m\rho(f)} \{\ 1+ (m-2) \exp{r^\eta+o(1)} \}\ \end{align*} which is a contradiction for arbitrary large $r$. \end{enumerate} Thus all solutions $f\not \equiv 0$ of the equation (\ref{sde1}) are of infinite order. Suppose that $f(z)=h(z)e^{Q(z)}$, where $h(z)$and $Q(z)$ are entire functions, be a non-trivial solution of the equation (\ref{sde1}) therefore $\rho(f)=\infty$. \\ Let us suppose that $\rho(h)=\lambda(f)<\rho(f)$. Now from equation (\ref{sde1}) and Lemma [\ref{ind}] we get \begin{equation}\label{indu} h^{m}+B_{m-1}(z)h^{(m-1)}+\ldots+ B_0(z)h=0 \end{equation} where $$ B_{m-1}=A_{m-1}+mQ'$$ \begin{align*} B_{m-j}&=A_{m-j}+(m-j+1)A_{m-j+1}Q' \\ &+ \sum_{i=2}^{j} \left( {m-j+1 \choose i}(Q')^i+H_{i-1}(Q') \right) A_{m-j+i}. \end{align*} where $j=2,3, \ldots, m$ and $A_m \equiv 1$. Equation (\ref{indu}) implies that $\rho(h)\geq \max \{\ \rho(Q), \rho(A_0), \rho(A_1), \ldots, \rho(A_{m-1} \}\ >0$. Thus $f(z)$ has infinite number of zero.\\ If $\lambda(f)=\rho(f)=\infty$ then also zeros of $f(z)$ are infinite. \end{proof}
\end{document} |
\begin{document}
\begin{center} \today\\[10pt] {\Large\bf Co-elementary Equivalence, Co-elementary Maps, and Generalized Arcs} \\[20pt] Paul Bankston\\ Department of Mathematics, Statistics and Computer Science\\ Marquette University\\ Milwaukee, WI 53233\\[20pt] \end{center} \begin{abstract} By a {\bf generalized arc\/} we mean a continuum with exactly two non-separating points; an {\bf arc} is a metrizable generalized arc. It is well known that any two arcs are homeomorphic (to the real closed unit interval); we show that any two generalized arcs are co-elementarily equivalent, and that co-elementary images of generalized arcs are generalized arcs. We also show that if $f:X \to Y$ is a function between compact Hausdorff spaces and if $X$ is an arc, then $f$ is a co-elementary map if and only if $Y$ is an arc and $f$ is a monotone continuous surjection. \end{abstract}
\section{Introduction and Outline of Results.}\label{1}
A {\bf generalized arc\/} is a continuum (i.e., a connected compact Hausdorff space) that has exactly two non-separating points; an {\bf arc\/} is a metrizable generalized arc. The class of generalized arcs is precisely the class of linearly orderable continua; each generalized arc admitting exactly two compatible linear orders. The class of (continuous images of) generalized arcs has been extensively studied over the years (see \cite{HY,NTT,Wil}); the most well-known results in this area being that any two arcs are homeomorphic (to the standard closed unit interval on the real line); and (Hahn-Mazurkiewicz) that a Hausdorff space is a continuous image of an arc if and only if that space is a locally connected metrizable continuum. In this paper, a continuation of \cite{Ban3}, we study the model-theoretic topology of generalized arcs; in particular, the ``dualized model theory'' of these spaces.
Many notions from classical first-order model theory, principally elementary equivalence and elementary embedding, may be phrased in terms of mapping conditions involving the ultraproduct construction. Because of the (Keisler-Shelah) ultrapower theorem (see, e.g., \cite{CK}), two relational structures are elementarily equivalent if and only if some ultrapower of one is isomorphic to some ultrapower of the other; a function from one relational structure to another is an elementary embedding if and only if there is an ultrapower isomorphism so that the obvious square mapping diagram commutes (see, e.g., \cite{Ban2,Ban5,Ekl}). The ultrapower construction in turn is a direct limit of direct products, and is hence capable of being transferred into a purely category-theoretic setting. In this paper we focus on the category $\bf CH$ of compact Hausdorff spaces and continuous maps, but perform the transfer into the opposite category (thus justifying the phrase ``dualized model theory'' above).
In $\bf CH$ one then constructs ultracoproducts, and talks of co-elementary equivalence and co-elementary maps. Co-elementary equivalence is known \cite{Ban2,Ban5,Gur} to preserve important properties of topological spaces, such as being infinite, being Boolean (i.e., totally disconnected), having (Lebesgue) covering dimension $n$, and being a decomposable continuum. If $f:X \to Y$ is a co-elementary map in $\bf CH$, then of course $X$ and $Y$ are co-elementarily equivalent (in symbols $X \equiv Y$). Moreover, since $f$ is a continuous surjection (see \cite{Ban2}), additional information about $X$ is transferred to $Y$. For instance, continuous surjections in $\bf CH$ cannot raise {\bf weight\/} (i.e., the smallest cardinality of a possible topological base, and for many reasons the right cardinal invariant to replace cardiality in the dualized model-theoretic setting), so metrizability (i.e., being of countable weight in the compact Hausdorff context) is preserved. Also local connectedness is preserved, since continuous surjections in $\bf CH$ are quotient maps. Neither of these properties is an invariant of co-elementary equivalence alone.
When attention is restricted to the full subcategory of Boolean spaces, the dualized model theory matches perfectly with the model theory of Boolean algebras because of Stone duality. In the larger category there is no such match \cite{Bana,Ros}, however, and one is forced to look for other (less direct) model-theoretic aids. Fortunately there is a finitely axiomatizable Horn class of bounded lattices, the so-called {\it normal disjunctive\/} lattices \cite{Ban8} (also called {\it Wallman\/} lattices in \cite{Ban5}), comprising precisely the (isomorphic copies of) lattices that serve as bases for the closed sets of compact Hausdorff spaces. We go from lattices to spaces, as in the case of Stone duality, via the {\bf maximal spectrum\/} $S(\;)$, pioneered by H. Wallman \cite{Walm}. $S(A)$ is the space of maximal proper filters of $A$; a typical basic closed set in $S(A)$ is the set of elements of $S(A)$ containing a given element of $A$. $S(\;)$ is contravariantly functorial; if $f:A \to B$ is a homomorphism of normal disjunctive lattices and $M \in S(B)$, then $f^S(M)$ is the unique maximal filter in $A$ containing the pre-image of $M$ under $f$. It is a fairly straightforward task to show, then, that $S(\;)$ converts ultraproducts to ultracoproducts, elementarily equivalent lattices to co-elementarily equivalent compact Hausdorff spaces, and elementary embeddings to co-elementary maps (see \cite{Ban2,Ban4,Ban5,Ban8,Gur}). An important consequence of this is a L\"{o}wenheim-Skolem theorem for co-elementary maps: every compact Hausdorff space maps co-elementarily onto a compact metrizable space. (This result is used in \ref{2.4} and \ref{2.6} below.)
In \cite{Ban3} we showed that any locally connected metrizable space co-elementarily equivalent to an arc is already an arc; here we present the following results. $(i)$ if $f:X \to Y$ is a co-elementary map in $\bf CH$, and if $Y$ is locally connected (in particular, a generalized arc), then $f$ is a monotone continuous surjection; $(ii)$ co-elementary images of (generalized) arcs are (generalized) arcs; $(iii)$ any two generalized arcs are co-elementarily equivalent; $(iv)$ if $X$ is a generalized arc and $f:X \to Y$ is an irreducible co-elementary map in $\bf CH$, then $f$ is a homeomorphism; $(v)$ if every locally connected co-elementary pre-image of an arc is a generalized arc, then every locally connected compact Hausdorff space co-elementarily equivalent to a generalized arc is also a generalized arc; and $(vi)$ if $X$ is an arc and $f$ is a function from $X$ to a compact Hausdorff space $Y$, then $f$ is a co-elementary map if and only if $Y$ is an arc and $f$ is a monotone continuous surjection.
Local connectedness is necessarily a part of $(v)$ above. We do not know at present whether the hypothesis in $(v)$ is true; nor do we know whether monotone surjections between generalized arcs are always co-elementary maps.
\subsection{Remark.}\label{1.1} By way of contrast, there is a Boolean analogue to some of the results above. Define a {\bf generalized Cantor set\/} to be any non-empty Boolean space with no isolated points, and a {\bf Cantor set\/} to be a metrizable generalized Cantor set. It is well known that any two Cantor sets are homeomorphic (to the standard Cantor middle thirds set in the real line), and that the generalized Cantor sets are precisely the Stone duals of the atomless Boolean algebras, constituting an elementary class whose first-order theory is $\aleph_0$-categorical, complete, and model complete. In $(ii)$ and $(iii)$, one may replace ``arc'' with ``Cantor set'' uniformly; a straightforward application of $\aleph_0$-categoricity. The analog of $(iv)$ is false (see Example 3.3.4$(iv)$ in \cite{Ban2}); the projective cover map to a generalized Cantor set is always an irreducible co-elementary map between (seldom-homeomorphic) generalized Cantor sets. As for $(v)$, it follows from the results on dimension in \cite{Ban2} that any compact Hausdorff space co-elementarily equivalent to a generalized Cantor set is itself a generalized Cantor set. Finally, regarding $(vi)$, {\it all\/} continuous surjections between generalized Cantor sets are co-elementary maps. This is a direct consequence of the model completeness of the theory of atomless Boolean algebras.\\
\section{Methods and Proofs.}\label{2} We begin with a proof of $(i)$ above. Recall that a map $f:X \to Y$ is {\bf monotone\/} (resp. {\bf strongly monotone\/}) if the inverse image of a point (resp. a closed connected subset) of $Y$ is connected in $X$.
\subsection{Proposition.}\label{2.1} Let $f:X \to Y$ be a co-elementary map in $\bf CH$, with $Y$ locally connected. Then $f$ is a strongly monotone continuous surjection.\\
\noindent {\bf Proof.\/} Assume $f:X \to Y$ is co-elementary, $Y$ is locally connected, and $f$ is not strongly monotone. Then there is a subcontinuum $S$ of $Y$ such that the inverse image $A := f^{-1}[S]$ is disconnected. Since $A$ is closed, we can write $A = A_1 \cup A_2$ where each $A_i$ is closed non-empty, and $A_1 \cap A_2 = \emptyset$. Let $U_i$ be an open neighborhood of $A_i$, with $U_1 \cap U_2 = \emptyset$. If $C$ is a subcontinuum of $X$ containing $A$, then we can pick some $x_C \in C \setminus (U_1 \cup U_2)$. Let $B$ be the closure of the set of all such points $x_C$, as $C$ ranges over all subcontinua containing $A$. Since no point $x_C$ lies in $U_1 \cup U_2$, $B$ is disjoint from $A$, but intersects every subcontinuum of $X$ that contains $A$.
Now $f[B]$ is closed in $Y$ and disjoint from $S$. Let $W$ be an open neighborhood of $S$ whose closure misses $f[B]$. Since $Y$ is locally connected, we have, for each $y \in S$, a connected open neighborhood $V_y$ of $y$ such that $V_y \subseteq W$. Since $S$ is connected, so also is $V := \bigcup_{y \in S}V_y$; and the closure $K$ of $V$ is a subcontinuum containing $S$. Since $V \subseteq W$, and the closure of $W$ is disjoint from $f[B]$, we know that $K$ is also disjoint from $f[B]$. We need a fact proved elsewhere.\\
\noindent {\bf Lemma.\/}(Lemma 2.8 in \cite{Ban5}) Let $f:X \to Y$ be a co-elementary map in $\bf CH$, with $K \subseteq Y$ a subcontinuum. Then there is a subcontinuum $C \subseteq X$ such that $K = f[C]$, and whenever $V \subseteq K$ is open in $Y$, $f^{-1}[V] \subseteq C$.\\
Using the Lemma, there exists a subcontinuum $C \subseteq X$ such that $f[C] = K$ and $f^{-1}[V] \subseteq C$. Let $x \in A$. Then there is a neighborhood $U$ of $x$ with $f[U] \subseteq V$. Thus $x \in U \subseteq f^{-1}[V] \subseteq C$, hence we infer $A \subseteq C$. Every subcontinuum of $X$ containing $A$ must intersect $B$, so $\emptyset \neq f[B \cap C] \subseteq f[B] \cap f[C] = f[B] \cap K = \emptyset$. This contradiction completes the proof. $\dashv$
\subsection{Remark.}\label{2.2} The Lemma above provides only a weak consequence of co-elementarity. Indeed, the usual projection map from the standard closed unit square in the plane onto its first co\"{o}rdinate is not co-elementary because it does not preserve topological dimension. Nevertheless, it does satisfy the conclusion of the Lemma.\\
Now we are in a position to prove $(ii)$.
\subsection{Proposition.}\label{2.3} Let $f:X \to Y$ be a co-elementary map in $\bf CH$. If $X$ is a generalized arc, then so is $Y$.\\
\noindent {\bf Proof.\/} Let $f:X \to Y$ be a co-elementary map in $\bf CH$, with $X$ a generalized arc. $Y$ is a locally connected continuum because $X$ is locally connected and $f$ is a continuous surjection. By \ref{2.1}, $f$ is monotone; it remains to show $Y$ has precisely two non-separating points.
Let $a, b \in X$ be the two non-separating points of $X$. $Y$ is non-degenerate because of co-elementarity; monotonicity then tells us that $f(a) \neq f(b)$. If $f(a)$ were to separate $Y$, we could also separate $X \setminus K$, where $K:= f^{-1}[\{f(a)\}]$ is a subcontinuum (i.e., closed subinterval) containing the endpoint $a$. This is easily seen to be impossible for generalized arcs. Now let $y \in Y \setminus \{f(a),f(b)\}$, with $K := f^{-1}[\{f(y)\}]$. Then $K$ is a subcontinuum of $X$ containing neither endpoint. Thus $X \setminus K$ is disconnected; hence $y$ separates $Y$. We therefore conclude that $Y$ is a generalized arc. $\dashv$.\\
We can very quickly settle $(iii)$.
\subsection{Proposition.}\label{2.4} Let $X$ and $Y$ be two generalized arcs. Then $X \equiv Y$.\\
\noindent {\bf Proof.\/} Let $X$ and $Y$ be generalized arcs. By the L\"{o}wenheim-Skolem theorem for co-elementary maps, there exist co-elementary maps $f:X \to X_0$ and $g:Y \to Y_0$, where $X_0$ and $Y_0$ are compact metrizable. By \ref{2.3}, the images are generalized arcs; hence they are arcs. Thus $X_0$ and $Y_0$ are homeomorphic, and we conclude $X \equiv Y$ because \cite{Ban2} co-elementary equivalence is an honest equivalence relation. $\dashv$\\
To handle $(iv)$, recall that a continuous surjection $f:X \to Y$ is {\bf irreducible\/} if $Y$ is not the image under $f$ of a proper closed subset of $X$.
\subsection{Proposition.}\label{2.5} Let $f:X \to Y$ be an irreducible co-elementary map in $\bf CH$. If $X$ is a generalized arc, then $f$ is a homeomorphism.\\
\noindent {\bf Proof.\/} It suffices to show $f$ is one-one. Let $y \in Y$, with $K := f^{-1}[\{y\}]$, a subcontinuum of $X$ by \ref{2.1}. Since $X$ is a generalized arc, $K$ is either a singleton or a closed subinterval with non-empty interior. The latter case easily contradicts the irreducibility of $f$, however. $\dashv$\\
In \cite{Gur} it is shown that every infinite compact Hausdorff space is co-elementarily equivalent to a compact Hausdorff space that is not locally connected. (See also \cite{Ban5} for refinements.) This explains the necessity of the local connectedness hypothesis in $(v)$.
\subsection{Proposition.}\label{2.6} Suppose every locally connected co-elementary pre-image of an arc is a generalized arc. Then every locally connected compact Hausdorff space co-elementarily equivalent to a generalized arc is itself a generalized arc.\\
\noindent {\bf Proof.\/} Suppose $X \in \bf CH$ is locally connected, $X \equiv Y$, and $Y$ is a generalized arc. As in the proof of \ref{2.4} above, we have co-elementary maps $f:X \to X_0$ and $g:Y \to Y_0$, where $X_0$ and $Y_0$ are metrizable. Furthermore, we know that $X_0$ is locally connected and that $Y_0$ is an arc (\ref{2.1} again). By the transitivity of co-elementary equivalence, we know $X_0 \equiv Y_0$; by the main result of \cite{Ban3}, we know $X_0$ is an arc. Our hypothesis then tells us that $X$ is a generalized arc. $\dashv$\\
We finish with a proof of $(vi)$. If $X$ is an arc and $f:X \to Y$ is a co-elementary map in $\bf CH$, then $Y$ is an arc and $f$ is a monotone continuous surjection by \ref{2.1} and \ref{2.2}. So it suffices to prove the following.
\subsection{Proposition.}\label{2.7} Every monotone continuous surjection from an arc to itself is a co-elementary map.\\
\noindent {\bf Proof. } Let us take our arc to be the standard closed unit interval ${\bf I}$ with its usual order. $f$ is either $\leq$-preserving or $\leq$-reversing, so we lose no generality in assuming $f$ to be the former.
For any topological space $X$, we denote the closed set lattice of $X$ by $F(X)$. $F(\;)$ converts continuous maps contravariantly into lattice homomorphisms, and serves as a right inverse for $S(\;)$: $S(F(X))$ is naturally homeomorphic to $X$ for any compact Hausdorff $X$.
Monotone continuous surjections from $\bf I$ to itself are strongly monotone; hence $f^F:F({\bf I}) \to F({\bf I})$ is a lattice embedding that takes closed intervals (in this case the connected elements of the lattice) to closed intervals. However, $f^F$ will take atoms to non-atoms when $f$ is not injective. Thus $f^F$ is not an elementary embedding without being an isomorphism. The idea is to restrict the domain and range of $f^F$ in such a way that the resulting lattice embedding, call it $g$, is elementary, and $g^S = f$.
Our plan is to create an elementary lattice embedding $g:{\cal A} \to {\cal B}$, where ${\cal A}$ and ${\cal B}$ are atomless lattice bases for ${\bf I}$ (i.e., both $\cal A$ and $\cal B$ are atomless, as well as meet-dense in $F({\bf I})$), and $g$ agrees with the restriction of $f^F$ to ${\cal A}$.
Since $S(\cal A)$ and $S(\cal B)$ are naturally homeomorphic to $\bf I$, and $f$ is just $g^S$ conjugated with these homeomorphisms, $f$ is a co-elementary map provided $g^S$ is.
For each $y \in \bf I$, let $\lambda (y) := \mbox{inf}(f^{-1}[\{y\}])$ and $\rho (y) := \mbox{sup}(f^{-1}[\{y\}])$. Then for any closed interval $[x,y] \in F(\bf I)$, $f^F([x,y]) = [\lambda (x),\rho (y)]$. Both $\lambda$ and $\rho$ are right inverses for $f$, and are hence strictly increasing (but not necessarily continuous). Of course $\lambda (0) = 0$ and $\rho (1) = 1$.
Let $L,R \subseteq \bf I$, with $0 \in L$ and $1 \in R$. If ${\cal I}(L,R)$ denotes the set of all finite unions of intervals $[x,y]$ with $x \in L$ and $y \in R$, then ${\cal I}(L,R)$ is a sublattice of $F(\bf I)$, which is atomless just in case $L \cap R = \emptyset$. If $L$ and $R$ are dense in $\bf I$, then ${\cal I}(L,R)$ is a lattice base as well.
Now fix $L,R \subseteq \bf I$ to be disjoint countable dense subsets, with $0 \in L$ and $1 \in R$, and set ${\cal A} := {\cal I}(L,R)$. Then the image of $\cal A$ under $f^F$ is ${\cal I}(\lambda [L],\rho[R])$. Clearly $\lambda [L] \cap \rho [R] = \emptyset$, $0 \in \lambda [L]$, and $1 \in \rho [R]$. Let $L', R' \subseteq \bf I$ be disjoint countable dense subsets, with $\lambda [L] \subseteq L'$, $\rho [R] \subseteq R'$, and set ${\cal B} := {\cal I}(L',R')$. Then $\cal B$ is a countable atomless lattice base for $F(\bf I)$, and we denote by $g:{\cal A} \to \cal B$ the embedding $f^F$ with its domain and range so restricted. It remains to show that $g$ is an elementary embedding, and for this it suffices to show that for each finite set $S$ in $\cal A$ and each $b \in \cal B$, there is an automorphism on $\cal B$ that fixes $g[S]$ pointwise and takes $b$ into $g[\cal A]$.
Let $x_1, ..., x_n$ be a listing, in increasing order, of the endpoints of the component intervals of $g[S] \cup \{b\}$ (so each $x_i$ is in $L' \cup R'$), with $X_i := f^{-1}[\{f(x_i)\}]$, $1 \leq i \leq n$. Each $X_i$ is either a singleton or a non-degenerate closed interval, and for $1 \leq i < j \leq n$, either $X_i = X_j$ or each element of $X_i$ is less than each element of $X_j$. Let $U_i$ be an open-interval neighborhood of $X_i$ such that $U_i \cap U_j = \emptyset$ whenever $X_i \neq X_j$. Since $f$ is a $\leq$-preserving surjection and the sets $L$ and $R$ are dense in $\bf I$, each $U_i$ has infinite intersection with both $\lambda [L]$ and $\rho [R]$. If $x_i \in \lambda [L] \cup \rho [R]$, set $x_i' := x_i$. Otherwise we know $x_i$ is an endpoint of a component interval of $b$; and we choose $x_i' \in U_i$ in such a way that $x_i' \in \lambda [L]$ if and only if $x_i \in L'$, and $x_i' < x_j'$ whenever $x_i < x_j$ and $X_i = X_j$. This procedure produces an increasing sequence $x_1',...,x_n'$ of elements of $\lambda [L] \cup \rho [R]$; $x_i' \in \lambda [L]$ if and only if $x_i \in L'$. For each $a \in g[S] \cup \{b\}$, let $a'$ be built up using the endpoints $x_i'$ in the same way as $a$ is built up using the endpoints $x_i$. Then $a' = a$ for each $a \in g[S]$, and $b' \in g[\cal A]$. Now by a classic (Cantor) back and forth argument, there is an order automorphism on $L' \cup R'$ that fixes $L'$ and $R'$ setwise and takes $x_i$ to $x_i'$ for $1 \leq i \leq n$. This order automorphism gives rise to the lattice automorphism on $\cal B$ that we require. $\dashv$\\
\end{document} |
\begin{document}
\twocolumn[ \icmltitle{On The Assumption of Shared Multitask Models for Continual Learning}
\icmlsetsymbol{equal}{*}
\begin{icmlauthorlist} \icmlauthor{Liangzu Peng}{JHU} \icmlauthor{Paris V. Giampouras}{JHU} \icmlauthor{Ren\'e Vidal}{UPenn} \end{icmlauthorlist}
\icmlaffiliation{JHU}{Mathematical Institute for Data Science, Johns Hopkins University} \icmlaffiliation{UPenn}{Institute for Data Engineering and Science, University of Pennsylvania}
\icmlcorrespondingauthor{Liangzu Peng}{lpeng25@jhu.edu}
\icmlkeywords{Continual Learning}
\vskip 0.3in ]
\printAffiliationsAndNotice{}
\begin{abstract} 1 \end{abstract}
\section{Introduction} \begin{align}\label{eq:task-t-dagger}
\mathcal{G}_t:=\mathop{\rm argmin}_{\bm{w} \in \mathcal{W}} \sum_{i=1}^{m_t} \ell_t(\bm{w};\bm{d}_{ti}). \end{align} $\mathcal{W}\subset \mathbb{R}^n$
We might assume $\mathcal{W} = \mathbb{R}^n$.
\subsection{Shared Multitask Models}
\begin{example} Two regression tasks: \begin{equation}
\begin{split}
\mathcal{G}_1&=\mathop{\rm argmin}_{\bm{w}\in\mathbb{R}^n} \|\bm{X}_1 \bm{w} - \bm{y}_1\|_2^2 \\
\mathcal{G}_2&= \mathop{\rm argmin}_{\bm{w}\in\mathbb{R}^n} \|\bm{X}_2 \bm{w} - \bm{y}_2 \|_2^2 \\
\{ \hat{\bm{w}} \} &=\mathop{\rm argmin}_{\bm{w}\in\mathbb{R}^n} \|\bm{X}_1 \bm{w} - \bm{y}_2 \|_2^2 + \|\bm{X}_2 \bm{w} - \bm{y}_2 \|_2^2 \\
\end{split} \end{equation} \end{example}
\begin{itemize}
\item All definitions of \textit{forgetting} are related to a certain type of multitask loss (e.g., the sum of errors). This implicitly assumes that the multitask loss is good.
\item In many experimental reports, the accuracy of multitask learning is used as a reference. In a certain sense, this implicitly acknowledges the assumption of shared multitask models. \end{itemize}
Without the assumption of shared multitask models: \begin{itemize}
\item There is no single model that solves all tasks. We need to define the \textit{ground-truth} for continual learning.
\item Such ground-truth might have nothing to do with the global minimizers of multitask learning. Therefore, the existing definitions of forgetting might become invalid.
\item We need to rethink multitask learning; without the assumption of shared multitask models, multitask learning might not be well-posed! \end{itemize}
\subsection{Alternative Assumption}
\begin{assumption}[Union-of-Tasks Models]\label{assumption:UoT} There are $K$ models $\hat{\bm{w}}_1,\dots,\hat{\bm{w}}_K\in \mathcal{W}$ such that, for each $t$ there exists some $\hat{\bm{w}}_k \in \{ \hat{\bm{w}}_1,\dots,\hat{\bm{w}}_K \} $ such that $\ell_t(\hat{\bm{w}}_k;\bm{d}_{ti})=0$ for each $i$. \end{assumption}
If $K=1$, \cref{assumption:UoT} asserts there is some $\hat{\bm{w}}_1\in\mathcal{W}\subset \mathbb{R}^n$ satisfying $\ell_t(\hat{\bm{w}}_1;\bm{d}_{ti})=0$ for every $i$ and $t$. This occurs when $n$ is very large (i.e., the task is sufficiently overparametrized). Furthermore, since every loss $\ell_t$ takes non-negative values, $\hat{\bm{w}}_1$ is a shared global minimizer of all tasks. Thus \cref{assumption:UoT} with $K=1$ becomes an overparametrized version of the shared multitask assumption.
The case $K=T$ corresponds to where we overparameterize the model for each individual task.
In the case $1<K<T$, there are two tasks whose losses say $\ell_1$ and $\ell_2$ are minimized at the same model. Therefore, we can ``\textit{merge}'' the two tasks by replacing them with a new loss $\ell_1 + \ell_2$. As a result, we can assume $K=T$ without loss of generality.
\begin{problem}[Multitask Learning]\label{problem:MTL} Under \cref{assumption:UoT}, recover the number $T$ of models (tasks), and all models $\hat{\bm{w}}_1,\dots,\hat{\bm{w}_T}$ from all the data and losses. (Assume also we do not know which data sample belongs to which task.) \end{problem}
\begin{problem}[Continual Learning]
A sequential version of \cref{problem:MTL}, where data samples come in a streaming fashion and we do not know which task they belong to. \end{problem}
In other words, we need to solve the equations \begin{equation}
\prod_{t}\ell_t (\bm{w}_t; \bm{d}_{i}) =0,\ \ \forall i=1,\dots,m \end{equation} for $\bm{w}_1,\dots,\bm{w}_t$.
\subsection{11}
\appendix \onecolumn
\end{document} |
\begin{document}
\begin{abstract} Reiner, Shaw and van Willigenburg showed that if two skew Schur functions $s_A$ and $s_B$ are equal, then the skew shapes $A$ and $B$ must have the same ``row overlap partitions.'' Here we show that these row overlap equalities are also implied by a much weaker condition than Schur equality: that $s_A$ and $s_B$ have the same support when expanded in the fundamental quasisymmetric basis $F$. Surprisingly, there is significant evidence supporting a conjecture that the converse is also true.
In fact, we work in terms of inequalities, showing that if the $F$-support of $s_A$ contains that of $s_B$, then the row overlap partitions of $A$ are dominated by those of $B$, and again conjecture that the converse also holds. Our evidence in favor of these conjectures includes their consistency with a complete determination of all $F$-support containment relations for $F$-multiplicity-free skew Schur functions. We conclude with a consideration of how some other quasisymmetric bases fit into our framework. \end{abstract}
\maketitle
\tableofcontents
\section{Introduction}
For well-documented reasons (see, for example, \cite{Ful97, Ful00, Sag01, ec2}), the Schur functions $s_\lambda$ are often considered to be the most important basis for symmetric functions. Furthermore, skew Schur functions $s_{\lambda/\mu}$ are both a natural generalization of Schur functions and a fundamental example of \emph{Schur-positive} functions, meaning that when expanded in the basis of Schur functions, all the coefficients are nonnegative. The coefficients that result are the Littlewood--Richardson coefficients, which also arise in the representation theory of the symmetric and general linear groups, in the study of the cohomology ring of the Grassmannian, and in certain problems about eigenvalues of Hermitian matrices. More information on these connections can be found in the aforementioned references.
For skew shapes $A$ and $B$, determining conditions for the expression \begin{equation}\label{equ:difference} s_A -s_B \end{equation} to be Schur-positive is a problem that has received much attention in recent years. See, for example, \cite{BBR06, FFLP05, KWvW08, Kir04, LPP07, LLT97, McN08, McvW09b, McvW12, Oko97}. It is well known that this question is currently intractable when stated in anything close to full generality. A weaker condition than $s_A-s_B$ being Schur-positive is that the Schur support of $s_B$ is contained in the Schur support of $s_A$. The \emph{Schur support} of $s_A$, also called the Schur support of $A$ and denoted $\ssupp{A}$, is defined to be the set of those $\lambda$ for which $s_\lambda$ appears with nonzero coefficient when we expand $s_A$ as a linear combination of Schur functions. Support containment for skew Schur functions is directly relevant to the results of \cite{DoPy07, FFLP05, McN08, McvW09b, McvW12}; let us give the flavor of just one beautiful result about the support of skew Schur functions. There exist Hermitian matrices $A$, $B$ and $C=A+B$, with eigenvalue sets $\mu$, $\nu$ and $\lambda$ respectively, if and only if $\nu$ is in the Schur support of $s_{\lambda/\mu}$. (See the survey \cite{Ful00} and the references therein.)
Of the aforementioned papers, the most relevant to the present work is \cite{McN08}, which gives necessary conditions on $A$ and $B$ for $s_A - s_B$ to be Schur-positive or, more generally, for the Schur support of $A$ to contain that of $B$. These conditions are in terms of dominance order on $\rowsk{k}{A}$, which are partitions first defined in \cite{RSvW07} and which count certain overlaps among the rows of $A$. We will put our new results in context below by comparing them with the results of \cite{McN08}.
Our goal is to further our understanding of the expression \eqref{equ:difference} and the $\rowsk{k}{A}$ conditions by moving to the setting of quasisymmetric functions. One starting point for information on the importance and many applications of quasisymmetric functions is \cite{Wik13} and the references therein. We will place particular emphasis on the expansion of skew Schur functions in terms of Gessel's basis of fundamental quasisymmetric functions \cite{Ges84}, whose elements we denote by $F_\alpha$ for a composition $\alpha$. Gessel's original applications of the $F$-basis were in studying $P$-partitions of posets and in enumerating certain permutations. Like Schur functions, the $F_\alpha$ have a representation-theoretic significance, arising as the characteristics of the irreducible characters of the (type $A$) 0-Hecke algebra \cite{DKLT96, KrTh97}.
By working in terms of the $F$-basis, we are able to make the advances listed in (a)--(e) below. The concepts of $F$-positivity and $F$-support are defined, as one would expect, by considering expansions of skew Schur functions in terms of the $F$-basis instead of the Schur basis. As shown by \cite[Theorem~7.19.7]{ec2} which appears as Theorem~\ref{thm:fexpansion} below, Schur functions are examples of $F$-positive functions. The diagram shown in Figure~\ref{fig:implications} summarizes implications that are central to this paper. The first two horizontal arrows are by definition of support, while the diagonal arrows are due to Schur functions being $F$-positive. The rightmost arrow is our main result, Theorem~\ref{thm:fsupport}. That this arrow could be replaced by the symbol $\Longleftrightarrow$ is Conjecture~\ref{con:fsupport}. Before giving more details, let us give examples which will be relevant to the discussion that follows. \begin{figure}
\caption{A summary of the implications most pertinent to this paper. Here and elsewhere, $A$ and $B$ are skew shapes, and $\ssupp{A}$ (respectively $\fsupp{A}$) denotes the Schur support (resp.\ $F$-support) of $A$.}
\label{fig:implications}
\end{figure} \begin{example}\label{exa:intro} The three skew shapes shown here tend to be useful for providing counterexamples. \[ \begin{array}{cccccc} & \begin{tikzpicture}[scale=0.4] \begin{scope} \draw (-1.5,1.5) node {$A_1=$}; \draw[thick] (0,0) -- (1,0) -- (1,2) -- (3,2) -- (3,3) -- (1,3) -- (1,2) -- (0,2) -- cycle; \draw (2,3) -- (2,2); \draw (0,1) -- (1,1); \end{scope} \end{tikzpicture} && \begin{tikzpicture}[scale=0.4] \begin{scope} \draw (-1.5,1.5) node {$A_2=$}; \draw[thick] (0,0) -- (1,0) -- (1,1) -- (2,1) -- (2,2) -- (3,2) -- (3,3) -- (1,3) -- (1,1) -- (0,1) -- cycle; \draw (2,3) -- (2,2) -- (1,2); \end{scope} \end{tikzpicture} && \begin{tikzpicture}[scale=0.4] \begin{scope} \draw (-1.5,1) node {$A_3=$}; \draw[thick] (0,0) -- (2,0) -- (2,2) -- (0,2) -- cycle; \draw (1,0) -- (1,2); \draw (0,1) -- (2,1); \end{scope} \end{tikzpicture}
\\ \mbox{Schur} & s_{31} + s_{211} && s_{31} + s_{22} + s_{211} && s_{22} \\ \mbox{expansion}
\\ \mbox{$F$-expansion} & F_{31} + F_{13} + F_{22} + {} && F_{31} + F_{13} + 2F_{22} + {} && F_{22} + F_{121} \\ & F_{211} + F_{121} + F_{112} && F_{211} + 2F_{121} + F_{112} \end{array} \] \end{example}
As promised, here are the full details of our advances. \begin{enumerate} \item\label{ite:equality} It is shown in \cite{RSvW07} that if $s_A = s_B$ for skew shapes $A$ and $B$, then $A$ and $B$ have equal sets of row overlap partitions. This result was strengthened in \cite{McN08} by showing that the same conclusion holds under the weaker assumption that the Schur supports of $A$ and $B$ are equal. We show in Corollary~\ref{cor:equality} that the $F$-supports of $A$ and $B$ being equal is enough to imply $A$ and $B$ have equal sets of row overlap partitions. This is a strengthening of the result from \cite{McN08} for the following reasons: if the Schur supports of $A$ and $B$ are equal, then it follows from \cite[Theorem~7.19.7]{ec2} that their $F$-supports are equal. However, the converse is not true, as shown by $A_1$ and $A_2$ of Example~\ref{exa:intro}. \item\label{ite:fpositive} In a similar vein, it is shown in \cite{McN08} that if $s_A-s_B$ is Schur-positive, then the row overlap partitions of $A$ are dominated by those of $B$. We show in Corollary~\ref{cor:fpositive} that the same conclusion can be drawn under the weaker assumption that $s_A - s_B$ is $F$-positive. Referring to Example~\ref{exa:intro}, consider $s_{A_1} - s_{A_3}$ for an expression that is $F$-positive but not Schur-positive. \item\label{ite:fsupport} The two previous advances both follow from the following stronger new result in terms of supports. It is shown in \cite{McN08} that if the Schur support of $A$ contains that of $B$, then the row overlap partitions of $A$ are dominated by those of $B$. We prove in Theorem~\ref{thm:fsupport} that the same conclusion can be drawn under the weaker assumption that the $F$-support of $A$ contains that of $B$. Again, $A_1$ and $A_3$ serve as an example.
As an application, the contrapositive of Theorem~\ref{thm:fsupport} gives a very simple way to show that the $F$-support of $A$ does not contain the $F$-support of $B$, which implies, among other things, that $s_A-s_B$ is not Schur-positive.
\item\label{ite:conjecture} As shown by $A_1$ and $A_3$ of Example~\ref{exa:intro}, it is certainly not the case that if the row overlaps of $A$ are dominated by those of $B$, then the Schur support of $A$ contains that of $B$. However, we offer Conjecture~\ref{con:fsupport}: if $A$ and $B$ have the same number of boxes, then the row overlaps of $A$ are dominated by those of $B$ if and only if the $F$-support of $A$ contains that of $B$. As a result, examining the rows overlaps would give a quick way to determine containment of $F$-supports. In terminology we will define, the conjecture implies that the $F$-support poset is isomorphic to the overlaps poset. Therefore, the conjecture asserts that $F$-support containment somehow encapsulates exactly the relationship implied by dominance of row overlap partitions. Cases for which Conjecture~\ref{con:fsupport} holds include ribbons whose rows all have length at least 2, and all skew shapes with at most 12 boxes.
\item\label{ite:multfree} Bessenrodt and van Willigenburg \cite{BevW13} have classified all those skew shapes $A$ that are $F$-multiplicity-free, i.e., when $s_A$ is expanded in the $F$-basis, all coefficients are 0 or 1. In Theorem~\ref{thm:multfree}, we determine completely the $F$-positivity and $F$-support comparabilities among $F$-multiplicity-free skew shapes. The analogous relationships for the Schur multiplicity-free skew shapes are only known in special cases (for example, see \cite{McvW09b} for Schur multiplicity-free ribbons). We then show that these $F$-support comparabilities are exactly as predicted by Conjecture~\ref{con:fsupport}. \end{enumerate}
We conclude with a consideration of other quasisymmetric function bases, specifically the monomial quasisymmetric functions, the quasisymmetric Schur functions of Haglund et al.\ \cite{HLMvW11}, and the dual immaculate basis of Berg et al.\ \cite{BBSSZpr}. We augment Figure~\ref{fig:implications} by determining the positivity and support-containment implications involving these bases (see Figure~\ref{fig:moreimplications}).
The rest of the paper is organized as follows. We give the preliminaries and relevant prior results in Sections~\ref{sec:prelims} and~\ref{sec:priorresults}, respectively. Result \eqref{ite:fsupport} above and its consequences \eqref{ite:equality} and \eqref{ite:fpositive} are the topic of Section~\ref{sec:onedirection}. In Section~\ref{sec:mainconjecture}, we present the converse conjecture (Conjecture~\ref{con:fsupport}) and offer evidence in its favor. Section~\ref{sec:multfree} contains the results from \eqref{ite:multfree} about $F$-multiplicity-free skew shapes. We conclude in Section~\ref{sec:conclusion} with a consideration of how other quasisymmetric function bases fit into our framework.
\section{Preliminaries}\label{sec:prelims}
\subsection{Compositions, partitions and skew shapes}
Given a nonnegative integer $n$, a \emph{composition} of $n$ is a sequence $\alpha$ of positive integers whose sum is $n$. We call $n$ the \emph{size} of $\alpha$ and denote it $|\alpha|$. If $\alpha$ is weakly decreasing then it is said to be a \emph{partition} of $n$. Let $\varnothing$ denote the unique partition of 0.
We will follow the custom of letting $[n]$ denote the set $\{1,\ldots,n\}$. For fixed $n$, there is a well-known bijection from compositions $\alpha = (\alpha_1, \ldots, \alpha_k)$ of $n$ to subsets of $[n-1]$ that sends $\alpha$ to the set $S(\alpha)$ defined by \[ S(\alpha) = \{\alpha_1, \alpha_1+\alpha_2, \ldots, \alpha_1+\alpha_2+\cdots+\alpha_{k-1}\}. \] If $S(\alpha)=T$, then we say $\alpha$ is the composition corresponding to the set $T$, and write $\comp{T}=\alpha$ for the inverse map.
Given a partition $\lambda$, we define its \emph{Young diagram} to be a left-justified array of boxes with $\lambda_i$ boxes in the $i$th row from the top. If the Young diagram of another partition $\mu$ is contained in that of $\mu$, then the \emph{skew shape} $\lambda/\mu$ is obtained by removing the boxes corresponding to $\mu$ from the top-left of the Young diagram of $\lambda$.
For example, the skew shapes from Example 1.1 can be expressed as $311/1$, $321/11$ and $22/\varnothing = 22$, respectively. We will typically refer to skew shapes using uppercase Roman letters. The \emph{size} of a skew shape $A$ is its number of boxes and is denoted $|A|$.
A \emph{horizontal strip} is a skew shape that has at most one box in each column, with \emph{vertical strips} defined similarly. The \emph{transpose} $\lambda^t$ of a partition $\lambda$ is the partition obtained by reading the column lengths of the Young diagram of $\lambda$ from left to right. For example $(443)^t = 3332$. The transpose of a skew shape $A=\lambda/\mu$ is $A^t = \lambda^t/\mu^t$.
For a skew shape $A$, let $\rows{A}$ (resp.\ $\cols{A}$) denote the partition consisting of the row (resp.\ column) lengths of $A$ sorted into weakly decreasing order. A \emph{ribbon} is a skew shape in which every pair of adjacent rows overlap in exactly one column. In particular, note that a ribbon is completely determined by its row lengths from top to bottom. This allows us to define the notion of $\rows{\alpha}$ and $\cols{\alpha}$ for a composition $\alpha$ as $\rows{R}$ and $\cols{R}$ respectively, where $R$ is the ribbon whose row lengths from top to bottom are given by $\alpha$. For example, $\rows{1311} = 3111$ and $\cols{1311} = 321$; in general, $\rows{\alpha}$ simply means the weakly decreasing reordering of the parts of $\alpha$. Observe that, for example, $\cols{22}=22$ when we consider 22 to be a skew shape whereas $\cols{22}=211$ when we consider 22 to be a composition; we will ensure the meaning of our notation is clear from the context.
We place a partial order on the set of all partitions according to the following definition.
\begin{definition} For partitions $\lambda = (\lambda_1, \lambda_2, \ldots, \lambda_r)$ and $\mu = (\mu_1, \mu_2, \ldots, \mu_s)$, we define \emph{dominance order} $\preceq$ by $\lambda \preceq \mu$ if \[ \lambda_1 + \lambda_2 + \cdots \lambda_k \leq \mu_1 + \mu_2 + \cdots \mu_k \] for all $k=1,2,\ldots,r$, where we set $\mu_i=0$ if $i > s$. In this case, we will say that $\mu$ \emph{dominates} $\lambda$, or is \emph{more dominant} than $\lambda$. \end{definition}
Note that the above definition makes sense even if $\lambda$ and $\mu$ are partitions of different size, as can be the case later when we compare $\rowsk{k}{A}$ and $\rowsk{k}{B}$ for $k\geq 2$.
As in \cite{McN08}, we will need the following result about this extended definition of dominance order. Since it is straightforward to check, we leave the proof as an exercise.
\begin{lemma}\label{lem:dom_containment} Consider two sequences $a = (a_1, \ldots, a_r)$ and $b = (b_1, \ldots, b_s)$ of nonnegative integers such that $r \leq s$ and $a_i \leq b_i$ for $i=1,2,\ldots,r$. Let $\alpha$ and $\beta$ denote the partitions obtained by sorting the parts of $a$ and $b$ respectively into weakly decreasing order. Then $\alpha \preceq \beta$. \end{lemma}
\subsection{Quasisymmetric functions}
For a formal power series $f$ in the variables $x_1, x_2, \ldots$, let $[x_{i_1}^{a_1} x_{i_2}^{a_2} \cdots x_{i_k}^{a_k}]f$ denote the coefficient of $x_{i_1}^{a_1} x_{i_2}^{a_2} \cdots x_{i_k}^{a_k}$ in the expansion of $f$ into monomials.
\begin{definition} A quasisymmetric function in the variables $x_1, x_2,\ldots$, say with rational coefficients, is a formal power series $f \in \mathbb{Q}[[x_1, x_2, \ldots]]$ of bounded degree such that for every sequence $a_1, a_2, \ldots a_k$ of positive integers, we have \[ [x_{i_1}^{a_1} x_{i_2}^{a_2} \cdots x_{i_k}^{a_k}]f = [x_{j_1}^{a_1} x_{j_2}^{a_2} \cdots x_{j_k}^{a_k}]f \] whenever $i_1 < i_2 < \cdots < i_k$ and $j_1 < j_2 < \cdots < j_k$. \end{definition} As an example, the formal power series \[ \sum_{1 \leq i < j} x_i^2 x_j \] is quasisymmetric but not symmetric.
For a composition $\alpha=(\alpha_1, \ldots, \alpha_k)$, we define the \emph{monomial quasisymmetric function} $M_\alpha$ by \begin{equation}\label{equ:monomials} M_\alpha = \sum_{i_1 < \cdots < i_k} x_{i_1}^{\alpha_1} \cdots x_{i_k}^{\alpha_k}. \end{equation} It is clear that the set $\{M_\alpha\}$, where $\alpha$ ranges over all compositions of size $n$, is a basis for the vector space of quasisymmetric functions of degree $n$. A more important basis for our purposes is the basis of \emph{fundamental quasisymmetric functions} $F_\alpha$ defined by \begin{equation}\label{equ:mtof} F_\alpha = \sum_{S(\alpha) \subseteq T \subseteq [n-1]} M_{\comp{T}} \end{equation} when $\alpha$ has size $n$. For example, $F_{22} = M_{22} + M_{211} + M_{112} + M_{1111}$.
For a skew shape $A$ with $n$ boxes, a \emph{standard Young tableau (SYT)} of shape $A$ is a filling of the boxes of $A$ with the numbers $1, 2, \ldots, n$, each used exactly once, so that the numbers increase down the columns and from left to right along the rows. For example, \[ \begin{tikzpicture}[scale=0.4] \draw[thick] (0,0) -- (0,1) -- (1,1) -- (1,3) -- (3,3) -- (3,1) -- (2,1) -- (2,0) -- cycle; \draw (1,0) -- (1,1) -- (2,1) -- (2,3); \draw (1,2) -- (3,2); \draw (0.5,0.5) node {6}; \draw (1.5,0.5) node {4}; \draw (1.5,1.5) node {3}; \draw (1.5,2.5) node {1}; \draw (2.5,1.5) node {5}; \draw (2.5,2.5) node {2}; \end{tikzpicture} \]
is an SYT of shape 332/11. The \emph{descent set} $S$ of an SYT $T$ of shape $A$ is the set of numbers $i$ for which $i+1$ appears in a lower row than $i$. The \emph{descent composition} of $T$, denoted $\comp{T}$, is then the composition of $|A|$ corresponding to $S$. For example, the SYT above has descent set $\{2,3,5\}$ and descent composition 2121.
Since the following result, which appears as \cite[Theorem~7.19.7]{ec2}, expresses skew Schur functions in the $F$-basis, it is crucial to this paper and is the reason why the $F$-basis is a natural choice of quasisymmetric basis when comparing skew Schur functions. Although skew Schur functions are typically defined as a sum of monomials, Theorem~\ref{thm:fexpansion} can also serve as a definition of skew Schur functions for our purposes.
\begin{theorem}[\cite{Ges84,StaThesis71,StaThesis}]\label{thm:fexpansion} For a skew shape $A$, we have \[ s_A = \sum_T F_{\comp{T}} \] where the sum is over all standard Young tableau $T$ of shape $A$. \end{theorem}
For example, the SYT above contributes $F_{2121}$ to $s_{332/11}$.
Theorem~\ref{thm:fexpansion} tells us that $s_A$ is an example of an \emph{$F$-positive} symmetric function, meaning that it has all nonnegative coefficients when expanded in the $F$-basis. Analogously to Schur support, we define the \emph{$F$-support} of $A$, denoted $\fsupp{A}$, to be the set of compositions $\alpha$ such that $F_\alpha$ appears with positive coefficient when $s_A$ is expanded in the $F$-basis. For any other quasisymmetric basis $\{B_\alpha\}$, analogous definitions of \emph{$B$-positive} and \emph{$B$-support} are obtained by replacing $F$ with $B$.
\section{Prior results}\label{sec:priorresults}
In \cite{RSvW07}, Reiner, Shaw and van Willigenburg gave sufficient conditions for two skew shapes to yield the same skew Schur function. More relevant for the purposes of the current discussion is that they also wrote one section (Section~8) on \emph{necessary} conditions for two skew shapes $A$ and $B$ to satisfy $s_A = s_B$. Their necessary conditions are dependent on certain overlaps among the rows of a skew shape. Before discussing their work, let us first state a relevant classical result along the same lines; it can be considered a starting point for necessary conditions for skew Schur equality. A proof in our terminology can be found in \cite{McN08}, and earlier proofs can be found in \cite{Lam77, Zab}.
\begin{proposition}\label{pro:extreme_fillings} Let $A$ and $B$ be skew shapes. If $\lambda \in \ssupp{A}$, then \[ \rows{A} \preceq \lambda \preceq \cols{A}^t, \]
and both $s_{\rows{A}}$ and $s_{\cols{A}^t}$ appear with coefficient 1 in the Schur expansion of $s_A$. Consequently, if $\ssupp{A} \supseteq \ssupp{B}$, then \[ \rows{A} \preceq \rows{B} \mbox{\ \ and \ \ } \cols{A} \preceq \cols{B}. \] \end{proposition}
Reiner, Shaw and van Willigenburg generalized $\rows{A}$ and $\cols{A}$ using the following key definition.
\begin{definition} Let $A$ be a skew shape with $r$ rows. For $i = 1, \ldots, r-k+1$, define $\overlap{k}{i}$ to be the number of columns occupied in common by rows $i, i+1, \ldots, i+k-1$. Then $\rowsk{k}{A}$ is defined to be the weakly decreasing rearrangement of \[ (\overlap{k}{1}, \overlap{k}{2}, \ldots, \overlap{k}{r-k+1}). \] Similarly, we define $\colsk{k}{A}$ by looking at the overlap among the columns of $A$. \end{definition}
In particular, note that $\rowsk{1}{A} = \rows{A}$ and $\colsk{1}{A} = \cols{A}$.
\begin{example} Let $A=553111/31$ as shown here. \[ \begin{tikzpicture}[scale=0.4] \draw (-1.3,3) node {$A=$}; \draw[thick] (0,0) -- (0,4) -- (1,4) -- (1,5) -- (3,5) -- (3,6) -- (5,6) -- (5,4) -- (3,4) -- (3,3) -- (1,3) -- (1,0) -- cycle; \draw (0,1) -- (1,1); \draw (0,2) -- (1,2); \draw (0,3) -- (1,3); \draw (1,4) -- (3,4); \draw (3,5) -- (5,5); \draw (1,3) -- (1,4); \draw (2,3) -- (2,5); \draw (3,4) -- (3,5); \draw (4,4) -- (4,6); \end{tikzpicture} \] We have that $\rowsk{1}{A} = 432111$, $\rowsk{2}{A}=22111$, $\rowsk{3}{A}=11$, $\rowsk{4}{A}=1$, and $\rowsk{i}{A} = \varnothing$ otherwise. In addition, $\colsk{1}{A} = 42222$, $\colsk{2}{A}=2211$, $\colsk{3}{A}=111$, $\colsk{4}{A}=1$, and $\colsk{i}{A} = \varnothing$ otherwise. \end{example}
It turns out that knowledge of $\rowsk{k}{A}$ for all $k$ is equivalent to knowledge of $\colsk{\ell}{A}$ for all $\ell$. To show this, the natural concept of $\rects{k}{\ell}{A}$ was introduced in \cite{RSvW07}. Here is their result.
\begin{proposition}[\cite{RSvW07}]\label{pro:rects} Given a skew shape $A$, consider the doubly-indexed array \[ (\rects{k}{\ell}{A})_{k,\ell \geq 1} \] where $\rects{k}{\ell}{A}$ is defined to be the number of $k \times \ell$ rectangular subdiagrams contained inside $A$. Any one of the three forms of data \[ (\rowsk{k}{A})_{k \geq 1},\ \ (\colsk{\ell}{A})_{\ell \geq 1}, \ \ (\rects{k}{\ell}{A})_{k, \ell \geq 1} \] on $A$ determines the other two uniquely. \end{proposition}
The main necessary condition from \cite{RSvW07} for skew Schur equality is the following.
\begin{theorem}[\cite{RSvW07}]\label{thm:rsvw} Let $A$ and $B$ be skew shapes. If $s_A = s_B$, then the following three equivalent conditions are true: \begin{itemize} \item $\rowsk{k}{A} = \rowsk{k}{B}$ for all $k$; \item $\colsk{\ell}{A} = \colsk{\ell}{B}$ for all $\ell$; \item $\rects{k}{\ell}{A} = \rects{k}{\ell}{B}$ for all $k, \ell$. \end{itemize} \end{theorem}
There are two results from \cite{McN08} relevant to this section. The first extends Proposition~\ref{pro:rects} to the setting of inequalities.
\begin{proposition}\label{pro:rectsineq} Let $A$ and $B$ be skew shapes. Then the following conditions are equivalent: \begin{itemize} \item $\rowsk{k}{A} \preceq \rowsk{k}{B}$ for all $k$; \item $\colsk{\ell}{A} \preceq \colsk{\ell}{B}$ for all $\ell$; \item $\rects{k}{\ell}{A} \leq \rects{k}{\ell}{B}$ for all $k, \ell$. \end{itemize} \end{proposition}
The second result from \cite{McN08} is the corresponding analogue of Theorem~\ref{thm:rsvw}.
\begin{theorem}\label{thm:combine} Let $A$ and $B$ be skew shapes. If $s_A - s_B$ is Schur-positive, or if $A$ and $B$ satisfy the weaker condition that $\ssupp{A} \supseteq \ssupp{B}$, then the following three equivalent conditions are true: \begin{itemize} \item $\rowsk{k}{A} \preceq \rowsk{k}{B}$ for all $k$; \item $\colsk{\ell}{A} \preceq \colsk{\ell}{B}$ for all $\ell$; \item $\rects{k}{\ell}{A} \leq \rects{k}{\ell}{B}$ for all $k, \ell$. \end{itemize} \end{theorem}
A motivation behind \cite{McN08} was to determine easily testable conditions that would show that $s_A - s_B$ is not Schur-positive for certain skew shapes $A$ and $B$. Theorem~\ref{thm:combine} provides such conditions, as demonstrated by the following example.
\begin{example}\label{exa:incomparable} Let \[ \begin{tikzpicture}[scale=0.4] \begin{scope} \draw (-1.5,2) node {$A=$}; \draw[thick] (0,0) -- (0,3) -- (1,3) -- (1,4) -- (4,4) -- (4,3) -- (2,3) -- (2,2) -- (1,2) -- (1,0) -- cycle; \draw (0,1) -- (1,1); \draw (0,2) -- (1,2); \draw (1,2) -- (1,3); \draw (1,3) -- (2,3); \draw (2,3) -- (2,4); \draw (3,3) -- (3,4); \end{scope} \begin{scope}[xshift=60mm] \draw (0,2) node {and}; \end{scope} \begin{scope}[xshift=95mm] \draw (-1.5,2) node {$B=$}; \draw[thick] (0,0) -- (0,1) -- (1,1) -- (1,3) -- (3,3) -- (3,4) -- (4,4) -- (4,2) -- (3,2) -- (3,1) -- (1,1) -- (1,0) -- cycle; \draw (2,1) -- (2,3); \draw (1,2) -- (3,2) -- (3,3) -- (4,3); \draw (5,2) node {.}; \end{scope} \end{tikzpicture} \] We see that $\rowsk{2}{A} = 111$ and $\rowsk{2}{B} = 21$. Thus we know that $s_B - s_A$ is not Schur-positive. On the other hand, $\rowsk{3}{A} = 1$ while $\rowsk{3}{B} = \varnothing$, implying that $s_A - s_B$ is not Schur-positive. Moreover, we can conclude that $\ssupp{A}$ and $\ssupp{B}$ are incomparable under containment order. \end{example}
\section{Main result}\label{sec:onedirection}
Our goal for this section is to state and prove our main result, and deduce relevant corollaries. We begin immediately with the statement of our main result.
\begin{theorem}\label{thm:fsupport} Let $A$ and $B$ be skew shapes. If $\fsupp{A} \supseteq \fsupp{B}$, then the following three equivalent conditions are true: \begin{itemize} \item $\rowsk{k}{A} \preceq \rowsk{k}{B}$ for all $k$; \item $\colsk{\ell}{A} \preceq \colsk{\ell}{B}$ for all $\ell$; \item $\rects{k}{\ell}{A} \leq \rects{k}{\ell}{B}$ for all $k, \ell$. \end{itemize} \end{theorem}
For example, applying this theorem in Example~\ref{exa:incomparable} shows that $\fsupp{A}$ and $\fsupp{B}$ are incomparable with respect to containment. This is a strictly stronger deduction than being incomparable with respect to Schur support containment (cf.\ $A_1$ and $A_3$ from Example~\ref{exa:intro}.) Moreover, Theorem~\ref{thm:fsupport} is more than just an incremental improvement of Theorem~\ref{thm:combine} since $\fsupp{A} \supseteq \fsupp{B}$ seems to be ``much closer'' to the overlap conditions than $\ssupp{A} \supseteq \ssupp{B}$. We will make this assertion precise in Section~\ref{sec:mainconjecture} by giving evidence in favor of our conjecture that the converse of Theorem~\ref{thm:fsupport} is also true.
\subsection{Consequences of the main result}
We postpone the proof until after we have given some consequences of Theorem~\ref{thm:fsupport}. If $s_A - s_B$ is $F$-positive, then it is clearly the case that $\fsupp{A} \supseteq \fsupp{B}$, so we get the following corollary.
\begin{corollary}\label{cor:fpositive} Let $A$ and $B$ be skew shapes. If $s_A - s_B$ is $F$-positive then the following three equivalent conditions are true: \begin{itemize} \item $\rowsk{k}{A} \preceq \rowsk{k}{B}$ for all $k$; \item $\colsk{\ell}{A} \preceq \colsk{\ell}{B}$ for all $\ell$; \item $\rects{k}{\ell}{A} \leq \rects{k}{\ell}{B}$ for all $k, \ell$. \end{itemize} \end{corollary}
To see that Corollary~\ref{cor:fpositive} is not equivalent to Theorem~\ref{thm:fsupport}, let $A=A_1$ and $B=A_2$ from Example~\ref{exa:intro}. Then the hypothesis of Theorem~\ref{thm:fsupport} holds but that of Corollary~\ref{cor:fpositive} does not.
Next, by Theorem~\ref{thm:fexpansion}, we get that Theorem~\ref{thm:combine} is simply a consequence of Theorem~\ref{thm:fsupport} and Corollary~\ref{cor:fpositive}.
The consequence involving equalities can be captured by the following statement, which includes the content of Theorem~\ref{thm:rsvw}.
\begin{corollary}\label{cor:equality} Let $A$ and $B$ be skew shapes. If $s_A = s_B$ or $\ssupp{A}=\ssupp{B}$ or $\fsupp{A}=\fsupp{B}$, then the following three equivalent conditions are true: \begin{itemize} \item $\rowsk{k}{A} = \rowsk{k}{B}$ for all $k$; \item $\colsk{\ell}{A} = \colsk{\ell}{B}$ for all $\ell$; \item $\rects{k}{\ell}{A} = \rects{k}{\ell}{B}$ for all $k, \ell$. \end{itemize} \end{corollary}
\begin{proof} If $s_A = s_B$ then we have $\ssupp{A}=\ssupp{B}$ which, by Theorem~\ref{thm:fexpansion}, implies $\fsupp{A}=\fsupp{B}$. By Theorem~\ref{thm:fsupport}, $\fsupp{A} \supseteq \fsupp{B}$ implies that $\rowsk{k}{A} \preceq \rowsk{k}{B}$ for all $k$. Similarly, $\rowsk{k}{B} \preceq \rowsk{k}{A}$ for all $k$, and so $\rowsk{k}{A} = \rowsk{k}{B}$ for all $k$. The remainder of the result now follows from Proposition~\ref{pro:rects}. \end{proof}
\subsection{Proving the main result}
We now work towards a proof of Theorem~\ref{thm:fsupport}. The overall approach will be much like that for the proof of \cite[Corollary~3.10]{McN08}, which is our Theorem~\ref{thm:combine}, but the details change because we are now working in the $F$-basis. For example, the easiest inequality for us to show will be that $\colsk{\ell}{A} \preceq \colsk{\ell}{B}$, whereas the $\mathrm{rows}$ inequality was the one proved in \cite{McN08}.
While we can determine $\cols{\alpha}$ for a composition $\alpha$ by constructing the relevant ribbon, it will be helpful for Proposition~\ref{pro:f_extreme_fillings}(b) below to have an equivalent way to obtain $\cols{\alpha}$.
\begin{lemma}\label{lem:cols}
For a ribbon $R$ with $|R|=n$, let $\unsortedrows{R}$ (resp.\ $\unsortedcols{R}$) denote the (unsorted) composition of $n$ given by the row (resp.\ column) lengths of $R$ read from top to bottom (resp.\ right to left). Then the subsets of $[n-1]$ corresponding to $\unsortedrows{R}$ and $\unsortedcols{R}$ are complements of each other.
Consequently, for a composition $\alpha$ of $n$, to obtain $\cols{\alpha}$ from $\alpha$ follow this 4-step process: obtain the subset $S(\alpha)$ of $[n-1]$, take the complement $S(\alpha)^c$, then construct the corresponding composition $\comp{S(\alpha)^c}$ of $n$, and sort the result into weakly decreasing order. \end{lemma}
\begin{proof} Write the numbers $1, 2, \ldots, n$ in sequence from the top right box of $R$ down to the bottom left. Every box numbered $i$ for $i<n$ is either the highest-numbered box of its row or of its column, and not both. It is the highest-numbered box of its row (resp.\ column) if and only if $i$ is an element of the subset of $[n-1]$ corresponding to $\unsortedrows{R}$ (resp.\ $\unsortedcols{R}$). The first assertion of the lemma follows.
The second assertion follows from the definition of $\cols{\alpha}$. \end{proof}
Our Proposition~\ref{pro:extreme_fillings} played a key role in the proofs of \cite{McN08}. To prove Theorem~\ref{thm:fsupport}, we will need the following quasisymmetric analogue of Proposition~\ref{pro:extreme_fillings}. Although we only need part (a) in this section, it makes sense to prove parts (a) and (b) together; we need (b) because we will use~\eqref{equ:suppf} in the proof of Theorem~\ref{thm:multfree}.
\begin{proposition}\label{pro:f_extreme_fillings} Let $A$ and $B$ be skew shapes. If $\alpha \in \fsupp{A}$ then \begin{enumerate} \item $\rows{\alpha} \preceq \cols{A}^t$, \item $\cols{\alpha} \preceq \rows{A}^t$, \end{enumerate} and both inequalities are sharp. Consequently, if $\fsupp{A} \supseteq \fsupp{B}$, then \begin{equation}\label{equ:suppf} \rows{A} \preceq \rows{B} \mbox{\ \ and \ \ } \cols{A} \preceq \cols{B}. \end{equation} \end{proposition}
See Figure~\ref{fig:extreme_fillings} for examples of SYTx giving equality in (a) and (b).
\begin{figure}
\caption{For this skew shape $A$, we have $\rows{A}=433221$, $\rows{A}^t=6531$, $\cols{A}=4422111$ and $\cols{A}^t=7422$. The descents of the SYTx are shown in bold. Observe that the SYT in (a) has descent composition $\cols{A}^t$. The descent composition in (b) is $\alpha=111112111212$, giving $\cols{\alpha}=6531=\rows{A}^t$.}
\label{fig:extreme_fillings}
\end{figure} \begin{proof} By Theorem~\ref{thm:fexpansion}, we know that $\alpha \in \fsupp{A}$ if and only if there exists an SYT $T$ of shape $A$ and descent composition $\alpha$. First consider (a). By definition, $\rows{\alpha}_1$ will be the length of the the longest sequence $i, i+1, \ldots, i+j$ such that none of $i, i+1, \ldots, i+j-1$ is a descent in $T$. Therefore the entries $i, i+1, \ldots, i+j$ appear from left to right in $T$ with no two in the same column. Equivalently, the boxes filled by entries $i, i+1, \ldots, i+j$ form a horizontal strip in $T$, implying that $j+1$ is at most the number of columns of $T$. In other words, $\rows{\alpha}_1 \leq (\cols{A}^t)_1$. By the same logic, the elements of the sum $\rows{\alpha}_1 + \cdots +\rows{\alpha}_k$ correspond to a set of $k$ disjoint horizontal strips in $T$. The number of boxes of any given column of $A$ contained in these $k$ horizontal strips combined is bounded by the minimum of $k$ and the height of the column. Compare this with $(\cols{A}^t)_1 + \cdots + (\cols{A}^t)_k$. Since $(\cols{A}^t)_i$ counts the number of columns of $A$ of height at least $i$, this sum counts the total number of boxes in columns of height less than $k$, plus a contribution of $k$ from each column of height at least $k$. It follows that \[ \rows{\alpha}_1 + \cdots +\rows{\alpha}_k \leq (\cols{A}^t)_1 + \cdots + (\cols{A}^t)_k\,, \] as required.
To see that the inequality in (a) is sharp, consider the SYT $T$ of shape $A$ constructed in the following manner. First, consider the top entry of each nonempty column of $A$, and fill these top entries with $1, 2, \ldots, (\cols{A}^t)_1$ from left to right. Now consider the skew shape $A^-$ consisting of the boxes that have not yet been filled. Since $(\cols{A}^t)_k$ counts the number of columns of $A$ of height at least $k$, we know that $A^-$ has $(\cols{A}^t)_2$ columns. Take the top entry of each such column and fill these top entries with $(\cols{A}^t)_1+1, (\cols{A}^t)_1+2, \ldots, (\cols{A}^t)_1 + ( \cols{A}^t)_2$ from left to right. Continue in this manner until all boxes have been filled. Because at each stage we filled from left to right and we filled a box in every nonempty column, the descent set of $T$ is \[ \{(\cols{A}^t)_1, (\cols{A}^t)_1 + ( \cols{A}^t)_2, \ldots, (\cols{A}^t)_1 +\cdots + ( \cols{A}^t)_{k-1}\}, \] where the longest column of $A$ has $k$ boxes. In other words, the descent composition $\alpha$ satisfies $\alpha = \rows{\alpha} = \cols{A}^t$, as required.
The proof of (b) is somewhat similar, except that now we work with vertical strips instead of horizontal strips and fill these vertical strips from top to bottom. By definition, $\cols{\alpha}_1$ will be the longest sequence $i, i+1, \ldots, i+j$ such that \emph{each} of $i, i+1, \ldots, i+j-1$ is a descent in $T$. Therefore, the entries $i, i+1, \ldots, i+j$ fill a vertical strip in $A$ from top to bottom, implying that $\cols{\alpha}_1 \leq (\rows{A}^t)_1$. The rest of the proof is similar to (a).
To show that the inequality in (b) is sharp, work as in (a) except consider the leftmost entry of each nonempty row instead of the top entry of each column, and fill these leftmost entries from top to bottom. After completing the filling, the \emph{complement} of the descent set of $T$ in $\{1,2,\ldots,|A|-1\}$ is \begin{equation}\label{equ:complement} \begin{split} \{(\rows{A}^t)_1, (\rows{A}^t)_1 + ( \rows{A}^t)_2, \ldots,\\ (\rows{A}^t)_1 +\cdots + ( \rows{A}^t)_{k-1}\}, \end{split} \end{equation}
where the longest row of $A$ has $k$ boxes. The composition of $|A|$ corresponding to the set in~\eqref{equ:complement} is $\rows{A}^t$, which has weakly decreasing parts. By Lemma~\ref{lem:cols}, the descent composition $\alpha$ of $T$ thus satisfies $\cols{\alpha} = \rows{A}^t$, as required. See Figure~\ref{fig:extreme_fillings}(b) for an example, where the complement of the descent set is $\{6, 11, 14\}$.
The last assertion follows from (a) and (b) and the fact that the transpose operation reverses dominance order when applied to partitions of equal size. \end{proof}
We need one more concept before giving the proof proper of Theorem~\ref{thm:fsupport}. For any skew shape $A$, let $\trim{}{A}$ denote the skew shape obtained by deleting the leftmost entry of each nonempty row of $A$. We will consider $\mathrm{trim}$ to be an operation on skew shapes, meaning that $\trim{\ell}{A} = \trim{}{\trim{\ell-1}{A}}$ and $\trim{1}{A}$ is just $\trim{}{A}$. This $\mathrm{trim}$ operation was introduced in \cite{McN08} except there it was defined as deleting the top entry of each nonempty column.
\begin{lemma}\label{lem:trim} For any skew shape $A$ and $\ell\geq2$, we have \begin{enumerate} \item $\colsk{\ell-1}{\trim{}{A}} = \colsk{\ell}{A}$; \item $ \cols{\trim{\ell-1}{A}} = \colsk{\ell}{A}. $ \end{enumerate} \end{lemma}
\begin{proof} Suppose column $i$ of $A$ contributes $c$ to $\colsk{\ell}{A}$, in the sense that column $i$ of $A$ overlaps with column $i+\ell-1$ in exactly $c$ rows. We see that this is equivalent to column $i+1$ of $\trim{}{A}$ overlapping with column $i+\ell-1$ in exactly $c$ rows, thus contributing $c$ to $\colsk{\ell-1}{\trim{}{A}}$, implying the result.
Repeatedly applying (a) gives (b). \end{proof}
\begin{proof}[Proof of Theorem~\ref{thm:fsupport}] We assume that $\fsupp{A} \supseteq \fsupp{B}$ and show that \[ \colsk{\ell}{A} \preceq \colsk{\ell}{B} \mbox{\ \ for all $\ell$}. \] By Proposition~\ref{pro:rectsineq}, the $\mathrm{rows}$ and $\mathrm{rects}$ conditions will follow.
With $\ell$ fixed, we will construct a particular SYT $T$ of shape $B$ and descent composition $\alpha$. Our choice of $T$ will help us isolate $\cols{\trim{\ell-1}{B}}$ which, by Lemma~\ref{lem:trim}(b), means we will isolate $\colsk{\ell}{B}$. Roughly speaking, we will start our construction of $T$ so that $\alpha$ is as least dominant as possible, and construct the remainder of $T$ so that $\alpha$ is as dominant as possible. More precisely, follow the construction of $T$ from Proposition~\ref{pro:f_extreme_fillings}(b) by considering the leftmost box of each row and then filling these boxes by $1, \ldots, (\rows{B}^t)_1$ from top to bottom. Repeat this process with the leftmost unfilled box of each row, and continue until the $\ell-1$ leftmost boxes of each row have been filled, or a row has been completely filled if it has less than $\ell-1$ boxes. Suppose a total of $m$ boxes has been filled to this point. The shape that remains unfilled is exactly $\trim{\ell-1}{B}$. For an example, see Figure~\ref{fig:main_proof}.
\begin{figure}
\caption{An example of the fillings of $B$, $A$ and $C$ from the proof of Theorem~\ref{thm:fsupport}. Here, $\ell=3$, $m=11$, and the boxes of $\trim{2}{B}$ and $\trim{2}{A}$ are colored/shaded.}
\label{fig:main_proof}
\end{figure} We now fill this remaining shape $\trim{\ell-1}{B}$ in the most dominant way possible. Following Proposition~\ref{pro:f_extreme_fillings}(a), the descent composition of this remaining filling will be $\cols{\trim{\ell-1}{B}}^t$. By Lemma~\ref{lem:trim}(b), this equals $\colsk{\ell}{B}^t$. This might suggest, at first glance, that the descent composition $\alpha$ of $T$ consists of the concatenation of some composition of $m$ with $\colsk{\ell}{B}^t$. This is not the case since $m$ is not a descent in $T$, but this will not affect our argument.
Since $\fsupp{A} \supseteq \fsupp{B}$, there exists an SYT $T'$ of shape $A$ with descent composition $\alpha$. Remove the boxes filled with $1, 2, \ldots, m$ in $T'$ to get a filling of some shape $C$, and subtract $m$ from all the entries of $C$. This yields an SYT of shape $C$ with descent composition $\colsk{\ell}{B}^t$. By Proposition~\ref{pro:f_extreme_fillings}(a) and since $\colsk{\ell}{B}^t$ is weakly decreasing, we have $\colsk{\ell}{B}^t \preceq \cols{C}^t$. Since $\colsk{\ell}{B}^t$ and $\cols{C}^t$ are both partitions of $|B|-m$, we deduce that $\colsk{\ell}{B} \succeq \cols{C}$.
Now consider $\trim{\ell-1}{A}$. Since $T'$ has descent composition $\alpha$, the numbers $1, 2, \ldots, m$ must have formed $\ell-1$ vertical strips that filled the left ends of any rows they occupied. Therefore, $\trim{\ell-1}{A} \subseteq C$, by definition of $C$. By Lemma~\ref{lem:dom_containment} applied to column lengths, $\cols{\trim{\ell-1}{A}} \preceq \cols{C}$. Putting everything together, we get \[ \cols{\trim{\ell-1}{A}} \preceq \cols{C} \preceq \colsk{\ell}{B}. \] Applying Lemma~\ref{lem:trim}(b) yields the desired result. \end{proof}
\section{Conjecture for the converse}\label{sec:mainconjecture}
In Theorem~\ref{thm:fsupport} and its corollaries, our hypotheses on $A$ and $B$ have implied that we only consider cases where $A$ and $B$ have equal size. Along the same lines, when comparing $\rowsk{k}{A}$ and $\colsk{k}{B}$ in this section, we will restrict to the case of $A$ and $B$ having the same size, and we can do so without our work losing any substance.
\subsection{The converse statements}
The converse of Corollary~\ref{cor:fpositive} would state that if $\rowsk{k}{A} \preceq \rowsk{k}{B}$ for all $k$ then $s_A - s_B$ is $F$-positive, but this is certainly not true. To obtain a counterexample, one only needs to consider skew shapes of size 4: take $A = 311/1$ and $B = 32/1$; there are two SYT of shape $B$ with descent composition 22, but only one such SYT of shape A. The same example shows that both possibilities for the converse of Theorem~\ref{thm:combine} also fail to hold. As for the equality questions, $A_1$ and $A_2$ from Example~\ref{exa:intro} show that $\rowsk{k}{A} = \rowsk{k}{B}$ for all $k$ does not imply that $s_A = s_B$ or even that Schur supports are equal.
Given these counterexamples, one might expect the converse of Theorem~\ref{thm:fsupport} to fail for a similarly low value of $|A|$, such as 4, 5 or 6. However, we have computationally checked that the following conjecture holds for all $A$ and $B$ with $|A| \leq 12$.
\begin{conjecture}\label{con:fsupport} Skew shapes $A$ and $B$ of the same size satisfy $\fsupp{A} \supseteq \fsupp{B}$ if and only if $\rowsk{k}{A} \preceq \rowsk{k}{B}$ for all $k$. \end{conjecture}
By Proposition~\ref{pro:rectsineq}, we could equivalently replace the $\mathrm{rows}$ condition by the appropriate $\mathrm{cols}$ or $\mathrm{rects}$ condition. A proof of Conjecture~\ref{con:fsupport} would also imply that $\rowsk{k}{A} = \rowsk{k}{B}$ for all $k$ if and only if $\fsupp{A} = \fsupp{B}$, and perhaps this latter statement would be an easier one to prove or disprove.
Obviously, the ``only if'' direction of Conjecture~\ref{con:fsupport} is Theorem~\ref{thm:fsupport}. Despite evidence in favor of the ``if'' direction, this author still remains somewhat skeptical for the following reason. Close examination of the proof of Theorem~\ref{thm:fsupport} suggests that $\fsupp{B}$ encodes more information than $\rowsk{k}{B}$ for all $k$ or equivalently $\colsk{\ell}{B}$ for all $\ell$, since only certain elements of the support were used in the proof of Theorem~\ref{thm:fsupport}. Roughly speaking, we focussed on those compositions $\alpha$ in the support that were obtained by starting our filling in the least dominant way possible, and then filling the remainder $\trim{\ell-1}{B}$ in the most dominant way possible; for each $\ell$, we only used one element of $\fsupp{B}$ to isolate $\colsk{\ell}{B}$.
It can be helpful to view Conjecture~\ref{con:fsupport} in terms of two partially ordered sets. For the first poset $\suppfn{n}$, the elements will be equivalence classes of skew shapes of size $n$, where the equivalence relation is $A \sim B$ if $\fsupp{A} = \fsupp{B}$; the order relation will be $[A] \geq_\suppfn{n} [B]$ if $\fsupp{A} \supseteq \fsupp{B}$, where $[A]$ denotes the equivalence class of $A$. For the second poset, $\ncn{n}$, the elements will be equivalence classes of skew shapes of size $n$, where the equivalence relation is $A \sim B$ if $\rowsk{k}{A} = \rowsk{k}{B}$ for all $k$. The order relation for the second poset is $[A] \geq_\ncn{n} [B]$ if $\rowsk{k}{A} \preceq \rowsk{k}{B}$ for all $k$, where $[A]$ now denotes the equivalence class of $A$ under this second equivalence relation. Is is straightforward to check that Conjecture~\ref{con:fsupport} is equivalent to the statement that the posets $\suppfn{n}$ and $\ncn{n}$ are isomorphic under the map that sends the equivalence class $[A]$ in $\suppfn{n}$ to the equivalence class $[A]$ in $\ncn{n}$. The poset for the case $n=6$ is shown in Figure~\ref{fig:fsupp_6}. \begin{figure}
\caption{$\suppfn{6} = \ncn{6}$. One representative of each equivalence class is drawn.}
\label{fig:fsupp_6}
\end{figure}
\subsection{Special cases of the conjecture}
It is simple to show that Conjecture~\ref{con:fsupport} holds for horizontal strips. Indeed, $s_A$ for a horizontal strip $A$ is completely determined by $\rowsk{1}{A}=\rows{A}$. In fact, we see that $s_A$ in this case is the complete homogeneous symmetric function $h_{\rows{A}}$. It is well known (see, for example, \cite[Example~I.7.9(b)]{Mac95}) that $h_{\rows{A}}-h_{\rows{B}}$ is Schur-positive if and only if $\rows{A} \preceq \rows{B}$. Thus, if $\rows{A} \preceq \rows{B}$, then $s_A - s_B$ is Schur-positive, which implies that $\fsupp{A} \supseteq \fsupp{B}$.
In Section~\ref{sec:multfree}, we will completely determine the poset $\suppfn{n}$ restricted to $F$-multiplicity free skew shapes (in which case $\fsupp{A} \supseteq \fsupp{B}$ is equivalent to $s_A - s_B$ being $F$-positive), from which it will follow that Conjecture~\ref{con:fsupport} holds in that case.
The remainder of this section is devoted to a proof of Conjecture~\ref{con:fsupport} for a special class of ribbons, which we now define.
\begin{definition} A ribbon is said to be \emph{elongated} if all its rows have length at least two. \end{definition}
\begin{theorem}\label{thm:ribbons} Elongated ribbons $A$ and $B$ of the same size satisfy
$\fsupp{A} \supseteq \fsupp{B}$ if and only if $\rowsk{k}{A} \preceq \rowsk{k}{B}$ for all $k$. \end{theorem}
\begin{proof} By Theorem~\ref{thm:fsupport}, we need only prove the ``if'' direction. A key simplification for elongated ribbons is that $\rowsk{k}{A} \preceq \rowsk{k}{B}$ for all $k$ is equivalent to $\rowsk{k}{A} \preceq \rowsk{k}{B}$ for $k=1,2$.
So first suppose $\rowsk{1}{A} = \rows{A} \preceq \rows{B}$ for elongated ribbons $A$ and $B$. This implies that $A$ has at least as many (nonempty) rows as $B$. On the other hand, $\rowsk{2}{A}$ is just a sequence of ones of length equal to one less than the number of rows of $A$. Thus $\rowsk{2}{A} \preceq \rowsk{2}{B}$ implies that $B$ has at least as many rows as $A$. So our rows condition is equivalent to the fact that $\rows{A} \preceq \rows{B}$ and that $A$ and $B$ have an equal number of rows.
Our proof is facilitated by \cite[Theorem~3.3]{KWvW08}, which considers ribbons whose row lengths from top to bottom are weakly decreasing. In this case, their theorem says that $s_A - s_B$ is Schur-positive if and only if $\rows{A} \preceq \rows{B}$ and $A$ and $B$ have equal numbers of rows. For our purposes, we get that if $A$ and $B$ are elongated ribbons with weakly decreasing rows lengths and $\rowsk{k}{A} \preceq \rowsk{k}{B}$ for all $k$, then $\fsupp{A} \supseteq \fsupp{B}$. Therefore, it suffices to show that for elongated ribbons $A$, we have $ \fsupp{A} = \fsupp{A^\geq}$, where $A^\geq$ denotes the ribbon obtained from $A$ by sorting its row lengths into weakly decreasing order from top to bottom. Moreover, it suffices to show that the $F$-support of an elongated ribbon $A$ is preserved when we switch two adjacent rows of $A$ where the lower row is longer than the upper row; this is the result for which we will now give a combinatorial proof.
Consider the setup shown in Figure~\ref{fig:ribbon_proof}. This shows two adjacent rows of an SYT $T$ with descent composition $\alpha$ and shape $A$, where $A$ is an elongated ribbon. We assume that row $i+1$ is strictly longer than row $i$, i.e, that $\ell > k$. Starting with $T$, we wish to form an SYT with descent composition $\alpha$ and shape $A'$, where $A'$ is obtained from $A$ by switching the lengths of the rows $i$ and $i+1$. Our plan is to move $\ell-k$ entries of $T$ from row $i+1$ up to row $i$ so as to preserve the descent set. After moving entries, we will sort our rows into weakly decreasing order. We will need to check that the result still has descent composition $\alpha$ and that the columns are strictly increasing at each of the places marked with the thick lines in columns $j_1$, $j_2$ and $j_3$.
\begin{figure}
\caption{The setup for the proof of Theorem~\ref{thm:ribbons}.}
\label{fig:ribbon_proof}
\end{figure}
To begin, if $b_r$ in row $i$ is a descent and $b_r+1$ appears in row $i+1$ as $c_s$, then we consider $b_r$ and $c_s$ as being \emph{paired}. Paired elements will always remain in their current rows except as described below. There are two cases to consider according to whether or not there exist paired elements.
Suppose that there is at least one pair. We know there are at least $\ell-k$ non-paired entries in row $i+1$, so move the largest $\ell-k$ non-paired entries of row $i+1$ up to row $i$. Since paired elements remain in their current rows, the descent set is preserved. Since row $i$ only gains elements, there will still be a strict increase in column $j_3$. Since there is at least one pair, there will still be a strict increase in column $j_2$. In most cases, we will keep a strict increase in column $j_1$ since we moved the largest non-paired elements out of row $i+1$. However, consider the remaining case when there is a full set of $k$ pairs and $c_1$ gets moved, resulting in the loss of the strict increase in column $j_1$. Suppose that, after this moving takes place, $c_s$ is the entry in the leftmost box of row $i+1$. Since $c_s > d$, our technique will be to switch $c_s$ and $d$. The result will clearly be an SYT. We know that $c_s$ is paired, with $c_s-1$ appearing in row $i$. Therefore, $c_s-1$ will remain a descent. Since $c_s > d$, whether or not $c_s$, $d$ or $d-1$ are descents will be unaffected by the switch of $c_s$ and $d$. We conclude that the descent set is preserved, and we have the desired SYT of shape $A'$ and descent set $\alpha$.
Now suppose there are no pairs. In this case, read along row $i+1$ from right to left. Taking the elements $c_s$ of row $i+1$ one at a time, move $c_s$ up to the appropriate spot along row $i$. However, if doing so would violate the strict increase in column $j_2$, then leave $c_s$ in row $i+1$ and move on to consider $c_{s-1}$, stopping once we have moved up $\ell-k$ elements. If $c_s$ remains in the bottom row, all subsequent elements $c_t$ will be able to move up to row $i$, since $c_t < c_s$. As before, there will still be a strict increase in column $j_3$. By design, there will be a strict increase in column $j_2$. Since $k\geq 2$, $c_1$ will remain in position, thus preserving the strict increase in column $j_1$. Since there are no paired elements, the only way we could change the descent set would be if $c_s$ stayed in row $i+1$ while $c_s-1$ moved from row $i+1$ to row $i$. This would imply that $c_s-1=c_{s-1}$, which is impossible for the following reason: when we attempted to move $c_s$ up to row $i$ and failed, it must have been because the entry $b$ in the leftmost box of row $i$ at that time was strictly between the values $c_{s-1}$ and $c_s$. We conclude that the descent set is preserved, as required. \end{proof}
One might wonder if the row overlap condition might imply something stronger than $F$-support containment in the special case of elongated ribbons. More\linebreak precisely, does Theorem~\ref{thm:ribbons} still hold if we replace the condition ``$\fsupp{A} \supseteq \fsupp{B}$'' by ``$\ssupp{A} \supseteq \ssupp{B}$'' or by ``$s_A - s_B$ is $F$-positive''? The answer is ``no'' for both possibilities, as can be seen by letting $A=632/21$ and $B=652/41$.
One obvious next step would be to try to prove Conjecture~\ref{con:fsupport} for general ribbons. In that regard, we note that the method of proof above has some freedom that we did not use. First observe that the moving of elements described above also works if we want to move less than $\ell-k$ elements. Perhaps more importantly, we started with \emph{any} given $T$ of shape $A$ and descent composition $\alpha$. However, since we are only proving a result about supports, it is sufficient to choose a ``special'' or particular $T$ of shape $A$ and descent set $\alpha$, and there might be a helpful way to make this choice.
\subsection{A saturation-type consequence of the conjecture}
If Conjecture~\ref{con:fsupport} were true, we would get a version of the Saturation Theorem for skew shapes, as we now explain.
For a partition $\lambda$ and a positive integer $n$, let $n\lambda$ denote the partition obtained by multiplying all the parts of $\lambda$ by $n$. The Saturation Theorem \cite{KnTa99} (see also \cite{Buc00} and the survey \cite{Ful00}) can be stated in the following way: for partitions $\lambda$, $\mu$, $\nu$ and any positive integer $n$, we have \[ \ssupp{\lambda/\mu} \supseteq \ssupp{\nu} \mbox{\ \ if and only if\ \ } \ssupp{n\lambda/n\mu} \supseteq \ssupp{n\nu}. \] This statement is written here in an overly complicated form since $\ssupp{\nu}$ is obviously just $\{\nu\}$ and similarly for $n\nu$, but the statement is in the form we need for the following analogue. For a skew shape $A = \lambda/\mu$, we define $nA = n\lambda/n\mu$. Then, David Speyer asked the author if the following skew analogue of the Saturation Theorem could possibly be true: for skew shapes $A$ and $B$ and any positive integer $n$, \[ \ssupp{A} \supseteq \ssupp{B} \mbox{\ \ if and only if\ \ } \ssupp{nA} \supseteq \ssupp{nB}. \] This is false in the ``only if'' direction (which is the easy direction for the Saturation Theorem) since \[ \ssupp{4311/21} \supseteq \ssupp{4421/311} \] but 633 is contained in \[ \ssupp{8842/622} \setminus \ssupp{8622/42}. \] We do not know of a counterexample for the ``if'' direction.
Alejandro Morales asked about connections between the present paper and the Saturation Theorem, and there does appear to be hope of a skew analogue of the Saturation Theorem if we move to $F$-supports.
\begin{question}\label{que:saturation} For skew shapes $A$ and $B$ and any positive integer $n$, is it the case that \[ \fsupp{A} \supseteq \fsupp{B} \mbox{\ \ if and only if\ \ } \fsupp{nA} \supseteq \fsupp{nB}\ ? \] \end{question}
Since dominance order is preserved under the map that sends $\lambda$ to $n\lambda$ and the inverse map, a proof of Conjecture \ref{con:fsupport} would imply an affirmative answer to Question~\ref{que:saturation}.
\section{$F$-multiplicity-free skew shapes}\label{sec:multfree}
In \cite[Theorem~3.4]{BevW13}, Bessenrodt and van Willigenburg give a complete classification of those skew shapes $A$ that are \emph{$F$-multiplicity-free}, meaning that when $s_A$ is expanded in the $F$-basis, all the coefficients are 0 or 1. In other words, $A$ is $F$-multiplicity-free if and only if all SYTx of shape $A$ have distinct descent sets. Our first goal for this section is to completely classify those $F$-multiplicity-free $A$ and $B$ such that $s_A-s_B$ is $F$-positive. By the definition of $F$-multiplicity-free, this is equivalent to classifying those $A$ and $B$ such that $\fsupp{A} \supseteq \fsupp{B}$. Our second goal is to show that this classification implies the truth of Conjecture~\ref{con:fsupport} in the case of $F$-multiplicity-free shapes.
Let us begin with the aforementioned result from \cite{BevW13}. Let $A^\circ$ denote $A$ rotated $180^\circ$, also known as the \emph{antipodal rotation} of $A$. As is well known \cite[Exercise~7.56(a)]{ec2}, $s_A = s_{A^\circ}$. Let us use $1^\ell$ to denote a sequence of $\ell$ copies of $1$, and $A \oplus B$ to denote the skew shape obtained by positioning $A$ immediately below and to the left of $B$ in such a way that $A$ and $B$ have no rows or columns in common. For example, $(1^2) \oplus (2)$ can also be written as $311/1$.
\begin{theorem}[\cite{BevW13}]\label{thm:bevw}
A skew shape $A$ of size $n$ is $F$-multiplicity-free if and only if, up to transpose, $A$ or $A^\circ$ is one of \renewcommand{\roman{enumi}}{\roman{enumi}} \begin{enumerate} \item $(3,3)$ if $n=6$, \item $(4,4)$ if $n=8$, \item $(n-2,2)$ if $n\geq4$, \item $(n-\ell,1^\ell)$ for $0 \leq \ell \leq n-1$, \item $(1^\ell) \oplus (n-\ell)$ for $1 \leq \ell \leq n-1$. \end{enumerate} \end{theorem}
Notice that the first four types in the list above are \emph{straight shapes}, meaning that they take the form $\lambda/\varnothing$ for some partition $\lambda$. We now state the main result of this section.
\begin{theorem}\label{thm:multfree} Let $A$ and $B$ be $F$-multiplicity-free skew shapes of size $n$. Then $s_A = s_B$ if and only if $B \in \{A, A^\circ\}$. Otherwise, $s_A - s_B$ is $F$-positive (equivalently $\fsupp{A} \supseteq \fsupp{B}$) if and only if one of the following conditions holds up to antipodal rotation of $A$ and/or $B$: \begin{enumerate} \item $A = (1^\ell) \oplus (n-\ell)$ and $B \in \{(n-\ell, 1^\ell), (n-\ell+1, 1^{\ell-1})\}$ for $1 \leq \ell \leq n-1$; \item $A = (1^2) \oplus (n-2)$ and $B = (n-2, 2)$ with $n\geq4$; \item $A= (1^{n-2}) \oplus (2)$ and $B = (2,2, 1^{n-4})$ with $n\geq4$. \end{enumerate} \end{theorem}
Observe that the skew shapes $A$ and $B$ in (c) are just the transposes of those in (b), while the transposes of $A$ and $B$ from (a) will be another pair from (a). The subposet of $\suppfn{5}$ consisting of $F$-multiplicity-free skew shapes is depicted in Figure~\ref{fig:suppfn4}.
\begin{figure}
\caption{The subposet of $\suppfn{5}$ consisting of $F$-multiplicity-free skew shapes. For each $A$ drawn, $A^\circ$ is also a member of the equivalence class.}
\label{fig:suppfn4}
\end{figure}
\begin{proof}[Proof of Theorem~\ref{thm:multfree}] Since $s_A = s_{A^\circ}$, we know that if $B \in \{A, A^\circ\}$ then $s_A = s_B$. The converse is a consequence of the analysis below that proves the bulk of the statement of the theorem.
If $\fsupp{A} \supseteq \fsupp{B}$, then Proposition~\ref{pro:f_extreme_fillings} tells us that $\rows{A} \preceq \rows{B}$ and $\cols{A} \preceq \cols{B}$. If $A$ and $B$ are straight shapes, then the latter inequality is equivalent to $\rows{A}^t \preceq \rows{B}^t$ and hence $\rows{A} \succeq \rows{B}$. Thus $\rows{A}=\rows{B}$ and so $A=B$. Therefore, the straight shapes given in (i)--(iv) of Theorem~\ref{thm:bevw} are all incomparable according to $F$-support containment.
It remains to consider comparabilities involving the skew shapes $A=(1^\ell) \oplus (n-\ell)$ for $1 \leq \ell \leq n-1$. Note that this class is mapped to itself under the transpose operation. The rest of the proof is a relatively routine checking of cases involving some explicit expansions of skew Schur functions. Taking one of our skew shapes to be of type (v) of Theorem~\ref{thm:bevw}, we will work backwards through the five possibilities for the type of the other skew shape. \begin{itemize} \item[(v)] Let us first consider the case when $A = (1^\ell) \oplus (n-\ell)$ and $B = (1^m) \oplus (n-m)$ for $\ell < m$. We have $\rows{A} \succ \rows{B}$ but $\cols{A} \prec \cols{B}$. Proposition~\ref{pro:f_extreme_fillings} then tells us that $\fsupp{A}$ and $\fsupp{B}$ are incomparable.
\item[(iv)] By the Pieri rule \cite[Theorem~7.15.7]{ec2}, \begin{equation}\label{equ:multfree} s_{(1^\ell) \oplus (n-\ell)} = s_{(n-\ell, 1^\ell)} + s_{(n-\ell+1, 1^{\ell-1})}. \end{equation} Therefore $s_A - s_B$ is Schur-positive and hence $F$-positive for $A$ and $B$ from (a) of the current theorem.
We next consider other comparabilities among those skew shapes of types (iv) and (v) of Theorem~\ref{thm:bevw}. Note that the class (iv) is also mapped to itself under the transpose operation. From \cite[Lemma~3.2]{BevW13}, we know that for $n\geq 1$ and $0 \leq \ell \leq n-1$, we have \begin{equation}\label{equ:hook} s_{(n-\ell, 1^\ell)} = \sum_\alpha F_\alpha, \end{equation} where the sum if over all compositions $\alpha$ of size $n$ with $\ell+1$ parts. Using this and~\eqref{equ:multfree}, we deduce that the only comparabilities that exist between skew shapes of types (iv) and (v) are those already given in (a) of the current theorem.
\item[(iii)] Consider $(n-2,2)$ for $n\geq4$. Again we refer to \cite[Lemma~3.2]{BevW13} which gives \[ s_{(n-2,2)} = \sum_{i=2}^{n-2} F_{(i,n-i)} + \sum_{j=3}^{n-1} \sum_{i=1}^{j-2} F_{(i, j-i, n-j)}. \] Comparing with~\eqref{equ:multfree} and~\eqref{equ:hook}, comparabilities of $(n-2,2)$ with skew shapes of type $(1^\ell) \oplus (n-\ell)$ can only occur when $\ell=2$. In this case, we have \[ s_{(1^2) \oplus (n-2)} = \sum_{i=1}^{n-1} F_{(i,n-i)} + \sum_{j=2}^{n-1} \sum_{i=1}^{j-1} F_{(i, j-i, n-j)}, \] and so \[ s_{(1^2) \oplus (n-2)}- s_{(n-2,2)} = F_{(1,n-1)} + F_{(n-1,1)} + \sum_{j=2}^{n-1} F_{(j-1,1,n-j)}. \] In particular, $s_A-s_B$ is $F$-positive for $A$ and $B$ from (b) of the current theorem.
The usual $\omega$ involution \cite[\S7.6 and Theorem~7.15.6]{ec2} can be extended to quasisymmetric functions in a way that preserves $F$-positivity; see \cite[Exercise~7.94(a)]{ec2} for one such extension, and \cite{McWapr} for further details and references. Since applying this extended $\omega$ preserves $F$-positivity, we draw the desired analogous conclusion for comparabilities involving $(n-2,2)^t = (2,2,1^{n-4})$ of (c).
\item[(ii), (i)] Finally, by direct computation with $n=6$ and $n=8$, we get that there are no comparabilities involving $(3,3)$, $(4,4)$ or their transposes. \end{itemize} \end{proof}
\begin{remark} If $A$ is $F$-multiplicity-free then Theorem~\ref{thm:fexpansion} implies that $A$ is \emph{Schur-multiplicity-free}, defined in the natural way. Thus when $A$ and $B$ are $F$-multiplicity-free, $s_A - s_B$ being Schur-positive is equivalent to $\ssupp{A} \supseteq \ssupp{B}$. It is relatively easy to determine exactly when $s_A - s_B$ is Schur-positive in the case that $A$ and $B$ are $F$-multiplicity-free, as we now describe. The straight shapes from (i)--(iv) of Theorem~\ref{thm:bevw} are obviously incomparable. Then it follows from \eqref{equ:multfree} that the conditions for $s_A - s_B$ to be Schur-positive are exactly as in Theorem~\ref{thm:multfree} but with conditions (b) and (c) deleted.
Determining conditions for $s_A - s_B$ to be Schur-positive when $A$ and $B$ are \emph{Schur}-multiplicity-free seems to be a significantly harder problem. See \cite{McvW09b} for the case of ribbons and \cite{Gut09} for the solution to $s_A = s_B$ in the Schur-multiplicity-free situation. Both of these papers rely on a classification of skew shapes that are Schur multiplicity-free, which was given in \cite{Gut10,ThYo10}. \end{remark}
With Theorem~\ref{thm:multfree} in place, we can now give our last piece of evidence in favor of Conjecture~\ref{con:fsupport}.
\begin{corollary} Conjecture~\ref{con:fsupport} holds when $A$ and $B$ are $F$-multiplicity-free skew shapes. \end{corollary}
\begin{proof} We wish to find all pairs of $F$-multiplicity-free skew shapes $A$ and $B$ of the same size satisfying $\rowsk{k}{A} \preceq \rowsk{k}{B}$ for all $k$, and show that such $A$ and $B$ satisfy one of the conditions of Theorem~\ref{thm:multfree}. Since antipodal rotation preserves $F$-supports and row overlaps, if Conjecture~\ref{con:fsupport} holds for $A$ and $B$, it will automatically hold with $A^\circ$ in place of $A$ and/or with $B^\circ$ in place of $B$. Therefore, we only need to consider the five classes of $F$-multiplicity-free shapes listed in Theorem~\ref{thm:bevw} and not their antipodal rotations.
First suppose that $A$ and $B$ are straight shapes and that $\rowsk{k}{A} \preceq \rowsk{k}{B}$ for all $k$. By Proposition~\ref{pro:rectsineq}, we then also know that $\colsk{k}{A} \preceq \colsk{k}{B}$ for all $k$. We have $\rowsk{1}{A} = \colsk{1}{A}^t \succeq \colsk{1}{B}^t = \rowsk{1}{B}$. Thus $\rowsk{1}{A} = \rowsk{1}{B}$ and so $A=B$ up to antipodal rotation.
It remains to consider the case when $A$ and/or $B$ takes the form $(1^\ell) \oplus (n-\ell)$ for $1 \leq \ell \leq n-1$. If $A$ takes this form, then \begin{equation}\label{equ:overlaps5} (\rowsk{1}{A}, \ldots, \rowsk{\ell}{A}) = ((n-\ell,1^\ell), 1^{\ell-1}, 1^{\ell-2}, \ldots, 1)), \end{equation} Let us work in the reverse order through the five possibilities from Theorem~\ref{thm:bevw} for the type of $B$. \begin{itemize} \item[(v)] If $B = (1^m) \oplus (n-m)$ for $m\neq \ell$ then it will be neither true that $\rowsk{k}{A} \preceq \rowsk{k}{B}$ for all $k$, nor $\rowsk{k}{B} \preceq \rowsk{k}{A}$ for all $k$. In this case we will say that the \emph{row overlap sequences are incomparable}, and there is nothing to prove. \item[(vi)] If $B$ is of type (iv) from Theorem~\ref{thm:bevw}, then $B = (n-m, 1^m)$ for some $0 \leq m \leq n-1$ and we have \begin{equation}\label{equ:overlaps4} (\rowsk{1}{B}, \ldots, \rowsk{m+1}{B}) = ((n-m,1^m), 1^{m}, 1^{m-1}, \ldots, 1)), \end{equation} Comparing~\eqref{equ:overlaps5} and~\eqref{equ:overlaps4}, we see that the relevant row overlap sequence comparabilities are that $\rowsk{k}{A} \preceq \rowsk{k}{B}$ for all $k$ when $m=\ell$ or $m=\ell-1$. These two possibilities for $m$ give exactly the conditions of (a) of Theorem~\ref{thm:multfree}.
\item[(iii)] Let $B = (n-2,2)$. For $\ell>2$, we have $\rowsk{1}{A} \prec \rowsk{1}{B}$, but $\rowsk{3}{A} \succ \rowsk{3}{B} = \varnothing$, so the row overlap sequences of $A$ and $B$ are incomparable. If $\ell=2$, we get that $\rowsk{k}{A} \preceq \rowsk{k}{B}$ for all $k$. In this case, (b) of Theorem~\ref{thm:multfree} is satisfied. If $\ell=1$, the row overlap sequences are again incomparable. If $B=(n-2,2)^t=(2,2,1^{n-4})$, then the analogous conclusions can be drawn by using $\mathrm{cols}$ in place of $\mathrm{rows}$ and (c) of Theorem~\ref{thm:multfree}. \item[(ii), (i)] When $B$ equals $(3,3)$, $(4,4)$ or one of their transposes, it is routine to check that the row overlap sequences of $A$ and $B$ are incomparable. \end{itemize} We conclude that in all cases where $\rowsk{k}{A} \preceq \rowsk{k}{B}$ for all $k$, one of the conditions of Theorem~\ref{thm:multfree} is satisfied, so we have $\fsupp{A} \supseteq \fsupp{B}$, as required. \end{proof}
\section{Other quasisymmetric bases}\label{sec:conclusion}
It is natural to ask if other quasisymmetric function bases have a role to play in comparing skew Schur functions. In this section, we look at three other bases, namely \begin{itemize} \item the monomial quasisymmetric functions of Equation~\eqref{equ:monomials}; \item the \emph{quasisymmetric Schur} basis of Haglund et al.\ \cite{HLMvW11}, whose elements we denote by $S_\alpha$; \item the \emph{dual immaculate} basis of Berg et al.~\cite{BBSSZpr}, whose elements we denote by $D_\alpha$. \end{itemize} Examples of expansions in these bases appear in Table~\ref{tab:bases}. The latter two bases are both new (introduced in 2008 and 2012 respectively) and are the subject of considerable current interest.
\renewcommand{0}{1.0} \setlength{\tabcolsep}{6pt} \begin{table} \[ \begin{array}{llll} & \begin{tikzpicture}[scale=0.4] \begin{scope} \draw (-1.5,1.5) node {$A_1=$}; \draw[thick] (0,0) -- (1,0) -- (1,2) -- (3,2) -- (3,3) -- (1,3) -- (1,2) -- (0,2) -- cycle; \draw (2,3) -- (2,2); \draw (0,1) -- (1,1); \end{scope} \end{tikzpicture} && \begin{tikzpicture}[scale=0.4] \begin{scope} \draw (-1.5,1) node {$A_3=$}; \draw[thick] (0,0) -- (2,0) -- (2,2) -- (0,2) -- cycle; \draw (1,0) -- (1,2); \draw (0,1) -- (2,1); \end{scope} \end{tikzpicture}
\\ \mbox{Schur expansion} & s_{31} + s_{211} && s_{22}
\\ \mbox{$F$-expansion} & F_{31} + F_{13} + F_{22} + {} && F_{22} + F_{121}
\\ & F_{211} + F_{121} + F_{112} &&
\\ \mbox{$M$-expansion} & M_{31} + M_{13} + M_{22} + 3M_{211} + {} && M_{22} + M_{211} + M_{121} + {} \\ & 3M_{121} + 3M_{112} + 6M_{1111} && M_{112} + 2M_{1111}
\\ \mbox{$S$-expansion} & S_{31} + S_{13} + S_{211} + S_{121} + S_{112} && S_{22}
\\ \mbox{$D$-expansion} & D_{31} + D_{211} && D_{22} - D_{13} \end{array} \] \caption{The expansion of two skew Schur functions from Example~\ref{exa:intro} in the bases of Section~\ref{sec:conclusion}.} \label{tab:bases} \end{table}
Our goals for this section are to show all the implications appearing in Figure~\ref{fig:moreimplications} that did not already appear in Figure~\ref{fig:implications}, to show that all the implications except the rightmost one are strict in the sense that the converse implications are false, and to show that the set of implications in Figure~\ref{fig:moreimplications} is complete in certain sense. \renewcommand{0}{0} \setlength{\tabcolsep}{0pt} \begin{figure}
\caption{A summary of the implications of Section~\ref{sec:conclusion} for skew shapes $A$ and $B$. With the exception of the rightmost implication, all the implications shown are known to be strict in the sense that the converse is false.}
\label{fig:moreimplications}
\end{figure}
There are, of course, other known bases for quasisymmetric functions, such as those in \cite{BJR09,Luo08,Sta05}. One possible first step to incorporating one of these other bases into our framework would be to determine the expansions of skew Schur functions in that new basis.
We now begin the derivation of the implications of Figure~\ref{fig:moreimplications}. The implications at the bottom of the figure involving the $M$-basis are easy to see. Indeed, the horizontal implication is by definition of support, and is strict since $\osupp{M}{3} \supseteq \osupp{M}{21}$ but $s_3 - s_{21}$ is not $M$-positive. The vertical implications involving the $M$-basis are a consequence of any $F_\alpha$ being $M$-positive, as in~\eqref{equ:mtof}, and either implication can seen to be strict by comparing $s_{31/1}$ and $s_{211/1}$.
The Schur functions have a very simple expansion in the $S$-basis: \[ s_\lambda = \sum_\alpha S_\alpha, \] where the sum is over all compositions $\alpha$ that yield $\lambda$ when sorted into weakly decreasing order. It follows that a symmetric function is Schur-positive if and only if it is $S$-positive and, similarly, Schur support containment for skew Schur functions is equivalent to $S$-support containment.
The derivation of the implications involving the $D$-basis requires a little more work. One interesting feature is that the Schur functions are not $D$-positive in general. This means that there are two possible definitions of $\osupp{D}{A}$ for a skew shape $A$: we can either say that $\alpha$ is in the support if $D_\alpha$ appears with nonzero coefficient in the $D$-expansion of $s_A$, or we can insist that the coefficient be positive. It turns out that it doesn't matter which convention we use in Figure~\ref{fig:moreimplications} or in any of the discussion that follows.
The expansion of $s_\lambda$ in the $D$-basis appears as \cite[Theorem~3.38]{BBSSZpr}: if $\lambda$ has $k$ parts, then \[ s_\lambda = \sum_{\sigma} (-1)^\sigma D_{\lambda_{\sigma_1}+1-\sigma_1,\,\lambda_{\sigma_2}+2-\sigma_2,\, \ldots,\, \lambda_{\sigma_k}+k-\sigma_k}\, , \] where the sum is over all permutations $\sigma$ of $[k]$ such that $\lambda_{\sigma_i}+i-\sigma_i > 0$ for all $i \in [k]$. Here $(-1)^\sigma$ denotes the sign of the permutation $\sigma$. Let us deduce some pertinent facts about this expansion. Letting $\sigma$ be the identity permutation, we see that $D_\lambda$ appears with coefficient $+1$ in the $D$-expansion of $s_\lambda$. Moreover, for any $\alpha$, it follows from \cite[Proposition~2.2]{BBSSZpr} that $D_\alpha$ appears with nonzero coefficient in the $D$-expansion of at most one $s_\lambda$. In particular, $D_\lambda$ is the only term indexed by a partition that appears with nonzero coefficient in the $D$-expansion of $s_\lambda$.
If $s_A-s_B$ is $D$-positive then, in particular, the terms in $s_A-s_B$ of the form $D_\lambda$ with $\lambda$ a partition must all have nonnegative coefficients. It then follows from the discussion of the previous paragraph that $s_A-s_B$ is Schur-positive. An example that shows that this implication is strict is \[ s_{32/1}-s_{31} = s_{22} = D_{22} - D_{13}. \] Since each $\alpha$ appears in the $D$-support of at most one $s_\lambda$ and since $\lambda$ is in the $D$-support of $s_\lambda$, we deduce for skew shapes $A$ and $B$ that $\osupp{D}{A} \supseteq \osupp{D}{B}$ if and only if $\ssupp{A} \supseteq \ssupp{B}$.
We can also quickly check that the implications in Figure~\ref{fig:moreimplications} inherited from Figure~\ref{fig:implications} are strict, with the possible exception of the rightmost arrow. Skew shapes $A_1$ and $A_3$ from Table~\ref{tab:bases} show that $F$-positivity of $s_A-s_B$ does not imply Schur-positivity, and similarly for support containment. Next, $A_1$ and $A_2$ from Example~\ref{exa:intro} satisfy $\fsupp{A_1} \supseteq \fsupp{A_2}$ but $s_{A_1} - s_{A_2}$ is not $F$-positive. Finally, $s_{421/2} - s_{431/21} = -s_{32}$ is not Schur-positive, even though $\ssupp{421/2} \supseteq \ssupp{431/21}$. This concludes our demonstration of all the implications of Figure~\ref{fig:moreimplications} and the desired strictness conditions.
But are there more implications that should be shown? Let us impose the condition that $|A|=|B|$ in Figure~\ref{fig:moreimplications}, which does not change the figure or the substance of the implications. Then one implication not shown in Figure~\ref{fig:moreimplications} that could be true is the implication of Conjecture~\ref{con:fsupport}. Even if Conjecture~\ref{con:fsupport} is false, it could conceivably be the case that the row overlaps condition would imply containment of $M$-supports. Apart from these exceptions, we can show that Figure~\ref{fig:moreimplications} is ``complete'' in the sense that all implications involving the various classes are implied by the implications shown. For example, we will show that it is neither the case that $s_A-s_B$ being $M$-positive implies that $\fsupp{A} \supseteq \fsupp{B}$ nor vice versa. To show completeness, there are four implications that we need to show are false; one can check that the absence of these four implications will imply the absence of any other conceivable implications within Figure~\ref{fig:moreimplications}. \begin{itemize} \item To see that $\ssupp{A} \supseteq \ssupp{B}$ does not imply that $s_A - s_B$ is $M$-positive, take $A = 421/2$ and $B=431/21$ as at the end of the previous paragraph. Then $s_A-s_B=-s_{32}$, which is not $M$-positive. \item To see that $s_A-s_B$ being $F$-positive does not imply that $\ssupp{A} \supseteq \ssupp{B}$, take $A=311/1$ and $B=22$ as in Table~\ref{tab:bases}. \item To see that $s_A-s_B$ being $M$-positive does not imply that $\rowsk{k}{A} \preceq \rowsk{k}{B}$ for all $k$, take $A=3$ and $B=111$. \item To see that $\rowsk{k}{A} \preceq \rowsk{k}{B}$ for all $k$ does not imply that $s_A-s_B$ is $M$-positive, take $A=311/1$ and $B=32/1$, in which case $s_A-s_B=m_{1111}-m_{22}$ \end{itemize}
As a final remark, we have focussed on skew Schur functions because of the recent interest on relationships among them, as we described in the introduction, and because of their connection with the overlap partitions. Of course, there may be other symmetric or quasisymmetric functions that would be worth comparing in the quasisymmetric setting. Two natural prospects are the skew quasisymmetric Schur functions \cite{BLvW11}, which generalize the $S$-basis, and the skew dual immaculate functions \cite{BBSSZpr}.
\end{document} |
\begin{document}
\title{On Gabor frames generated by sign-changing windows and B-splines}
\begin{abstract} For a class of compactly supported windows we characterize the frame property for a Gabor system $ \{E_{mb}T_{na}g \}_{m,n \in \mz},$ for translation parameters $a$ belonging to a certain range depending on the support size. We show that the obstructions to the frame property are located on a countable number of ``curves." For functions that are positive on the interior of the support these obstructions do not appear, and the considered region in the $(a,b)$ plane is fully contained in the frame set. In particular this confirms a recent conjecture about B-splines by Gr\"ochenig in that particular region. We prove that the full conjecture is true if it can be proved in a certain ``hyperbolic strip."
\noindent {\it Keywords:} Gabor frames; frame set; B-splines
\end{abstract}
\section{Introduction}
Only for quite special functions $g\in L^2(\mathbb R) $ we know a characterization of the {\it Gabor frame set,}
${\cal F}(g):= \{(a,b)\in \mathbb R_+^2 \, \big| \, \{E_{mb}T_{na}g \}_{m,n \in \mz} \, \mbox{is a frame}\}; $ these functions include the Gaussian \cite{Ly, Se2}, the hyperbolic secant \cite{JS2}, the one-sided/two-sided exponentials \cite{Jan9}, and totally positive functions \cite{GrSt}. Common for all these functions is that they are nonnegative.
Much less is known about more general functions, e.g., functions that change sign. In this paper we consider a class of continuous compactly supported windows $g$ with $\text{supp} \, g= [ - \alpha, \alpha]$ for some $\alpha>0$ and characterize the frame property of $ \{E_{mb}T_{na}g \}_{m,n \in \mz}$ in the region $ \alpha \le a <2\alpha, b< 1/a.$ For technical reasons (and in order to avoid pathological examples of no practical interest) we assume that the function $g$ only has a finite number of zeros in $]-\alpha, \alpha[.$ The general result, to be stated in Theorem \ref{2-2-1}, shows that the zeros in the interior of the support lead to certain obstacles for the frame property that cannot be predicted from the known results for nonnegative functions. For each translation parameter a countable number of obstructions can appear, i.e., one can think about the obstructions as located on a countable number of curves in the $(a,b)$--plane. The general result also implies the existence of a compactly supported dual window if the frame property is satisfied, with an interesting interpretation in terms of the redundancy of the frame: in fact, if $\frac{M-1}{M} \le ab < \frac{M}{M+1}$ for some $M=2,3,\dots,$ i.e., if the redundancy $(ab)^{-1}$ is at least $\frac{M+1}{M}= 1+ 1/M,$ the existence of a dual window supported on $[-2\alpha M, 2\alpha M]$ is guaranteed.
In the special case of a function $g$ that is positive on $]-\alpha, \alpha[$ the general result implies that $ \{E_{mb}T_{na}g \}_{m,n \in \mz}$ is a frame for all parameters $a,b$ in the considered region $\alpha \le a < 2\alpha, b< 1/a.$ In particular, any B-spline $B_N, N\ge 2,$ generates a frame $\{E_{mb}T_{na}B_N\}_{m,n\in \mathbb Z}$ whenever $N/2 \le a < N, \, b< 1/a.$ This confirms a recent conjecture by Gr\"ochenig in that particular region. Inspired by this result we prove that the full conjecture holds if it can be verified in the region determined by the inequalities $1/2 \le ab <1, \, a<N/2.$
The key result in the paper is Theorem \ref{2-2-1}, which characterizes the frame property of $ \{E_{mb}T_{na}g \}_{m,n \in \mz}$ in the aforementioned region. The proof is quite complicated and is split into several lemmas and intermediate steps. The idea of the proof was gained through the work on the special case with translation parameter $a=1$ (see the paper \cite{CKK10}), as well as the observation that the duality condition \eqref{14-20} forces a certain behavior of the window $g$ around points $x_0+a$ for which $g(x_0)=0.$ As further help to understand the idea behind the proof we prove the steps directly in a concrete case, see Example \ref{4108e}. For more informations about Gabor systems and frames we refer to the monographs [5,1].
\section{General results}
Given $ \alpha>0$, let \begin{eqnarray} \notag V_\alpha:= \{f\in C(\mathbb R) \ | \ \text{supp} \, f=[-\alpha,\alpha], \ \mbox{$f$ has a finite number of zeros on} \ [-\alpha,\alpha]\}. \\ \ \label{wi-1} \ \end{eqnarray}
We first characterize the frame property of $ \{E_{mb}T_{na}g \}_{m,n \in \mz}$ for functions $g \in V_\alpha$ and points $(a,b)$ in the region in $\mathbb R_+^2$ determined by the inequalities $\alpha \le a <2\alpha, b< 1/a.$ In order to do this, we need to introduce some parameters and other tools. Consider $(a,b)$ belonging to the described region, and choose $M\in \mathbb N$ such that $ab\in [\frac{M-1}{M},\frac{M}{M+1}[.$ Let $\kappa$ be the largest integer for which
$(1-ab)\kappa \leq b \alpha$. Then $0\leq \kappa \leq M-1$ because $$\kappa \leq \frac{b\alpha}{1-ab} \leq \frac{ab}{1-ab}<\frac{M}{M+1}\left(1- \frac{M}{M+1}\right)^{-1}=M.$$
If $\kappa \neq 0$, let $n\in\{1,2,\cdots, \kappa\},$ and define the function $R_{n}$ on (a subset of) $]a-\alpha,\alpha-(1-ab)\frac{n}{b} ] \subset ]a-\alpha, \alpha]$ by \begin{eqnarray} \label{4277a} R_{n}(y):= \frac1{g(y)} \prod_{k=1}^{n-1} \frac{ g(y+(1-ab)\frac{k}{b}-a)}{g(y+(1-ab)\frac{k}{b})} , \ \ n=1,2,\cdots, \kappa. \end{eqnarray} We use the standard convention that the empty product is $1$. It is easy to see that $R_{n}$ indeed is defined on $]a-\alpha, \alpha-(1-ab)\frac{n}{b}],$ except possibly on a finite set of points. Similarly, still if $\kappa \neq 0$, for $n\in\{1,\cdots, \kappa\}$ we define the function $L_{n}(y)$ on (a subset of) $[-\alpha + (1-ab)\frac{n}{b},\alpha-a[ \subset [-\alpha, \alpha-a[$ by $$L_{n}(y):=
\frac1{g(y)}\prod_{k=1}^{n-1} \frac{ g(y-(1-ab)\frac{k}{b}+a )}{
g(y -(1-ab)\frac{k}{b})}, \ \ n=1,2,\cdots, \kappa. $$
We now state the announced characterization of the frame property.
\begin{thm} \label{2-2-1} Let $g\in V_\alpha$ for some $\alpha>0$ and assume that $ \alpha\leq a < 2 \alpha$ and $ab\in[\frac{M-1}{M},\frac{M}{M+1}[$ for some $M\in\mathbb{N}\setminus\{1\}.$ Let $\kappa\in \{0,1,\cdots, M-1\}$ be the largest integer for which $(1-ab)\kappa \leq b\alpha$. Then $\{E_{mb} T_{na} g \}_{m,n\in \mathbb{Z}}$ is a Gabor frame if and only if the following conditions are satisfied: \begin{itemize}
\item[{\rm(i)}] $|g(x)|+|g(x+a)|>0, \ x\in [-a,0]$;
\item[{\rm(ii)}] If $\kappa\neq 0$ and if there exist $n_+\in\{1,2,\cdots, \kappa\}$ and
$y_+\in ]a-\alpha, \alpha- (1-ab)\frac{n_+}{b}]$ such that
$g(y_+)=0$ and
$ \lim_{y\rightarrow y_+} |R_{n_+}(y)|=\infty$,
then
\begin{equation*}
g(y_++ (1-ab)\frac{n_+}{b}-a)\neq 0;
\end{equation*}
\item[{\rm(iii)}] If $\kappa\neq 0$ and if there exist $n_-\in\{1,2,\cdots, \kappa\}$ and
$y_-\in[-\alpha + (1-ab)\frac{n_-}{b},\alpha-a[$ such that
$g(y_-)=0$ and $ \lim_{y\rightarrow y_-} |L_{n_-}(y)|=\infty$,
then \begin{eqnarray*} g(y_--(1-ab)\frac{n_-}{b}+a)\neq 0;\end{eqnarray*} \item[{\rm(iv)}] For $y_+,y_-, n_+, n_-$ as in {\rm(ii)} and {\rm(iii)}, $$ y_++(1-ab)\frac{n_+}{b} \neq y_--(1-ab)\frac{n_-}{b}+a. $$ \end{itemize} In the affirmative case, there exists a dual window $h$
with {\em $\text{supp}\ h\subseteq [-aM,aM]$}. \end{thm}
We remark that if $\kappa=0$ then the conditions (ii)-(iv) are trivially satisfied. We also note that Theorem \ref{2-2-1} is similar, but significantly more general than Theorem 2.3 in \cite{CKK10}. The main difference is that in the current paper the support size of $g$ (measured by the parameter $\alpha$) and the translation parameter $a$ can vary, subject to the restriction $ \alpha\leq a < 2 \alpha;$ on the other hand \cite{CKK10} dealt with the case $\alpha=a=1.$ This modification turns out to be instrumental for our applications to B-splines.
The proof of the necessity of the conditions in Theorem \ref{2-2-1} is similar to the proof in \cite{CKK10}, so we skip this part. On the other hand, it requires much more work to prove that $ \{E_{mb}T_{na}g \}_{m,n \in \mz}$ is a frame if the conditions (i)-(iv) are satisfied. We prove this part of the theorem in the appendix. In the subsequent example we prove directly that a certain Gabor system is a frame, following the steps from the proof of the general result; the hope is that the analysis of this concrete case will help the reader to understand the idea behind the general proof.
\begin{ex} \label{4108e} Let $\alpha=9/10$ and consider a function $ g\in V_\alpha,$ having the single zero $1/5$ within $]-1,1[.$ Let $a=1$
and $b=3/5$. Then $ab\in[\frac{M-1}{M},\frac{M}{M+1}[$ for $M=2$. Note that $ (1-ab)/b = 5/3 -1 = 2/3 \leq 9/10 =\alpha.$ This implies that $\kappa= 1.$ Trivially, $|g(x)|+|g(x+a)|>0, \ x\in [-a,0]$. Let $n_+:=1$ and $y_+:=1/5$. Then $y_+\in ]a-\alpha, \alpha - (1-ab)\frac{n_+}{b}]=]1/10, 7/30]$ and $g(y_+)=0.$ Furthermore, $R_{n_+}(y)= g(y)^{-1},$ so
$ \lim_{y\rightarrow y_+} |R_{n_+}(y)|=\infty.$ It is also clear that $g(y_+ +(1-ab)\frac{n_+}{b}-a)=g(-2/15) \neq 0$.
It is an easy consequence of the duality conditions for Gabor frames (see \eqref{14-20} in the Appendix) that two real valued, bounded functions $g, h\in L^2(\mathbb R)$ with $\text{supp} \ h \ \subseteq [-aM,aM]=[-2,2]$ generate dual frames $\{E_{mb}T_{na}g\}_{m,n\in \mathbb Z}$ and $\{E_{mb}T_{na}h\}_{m,n\in \mathbb Z}$ if and only if for
$n=0,\pm 1$ and
$a.e.$ $x\in [\frac{n}{b}-a,\frac{n}{b}]$, \begin{equation}\label{14-16}
g(x-\frac{n}{b})h(x) +g(x-\frac{n}{b}+a)h(x+a)=b \delta_{n,0}. \end{equation} We will check \eqref{14-16} directly following the steps in the general proof of Theorem \ref{2-2-1}. Motivated by a general result, see Lemma \ref{b-1}, we choose to put $h(x)=0$ for $x\notin [-a-\alpha,-\frac{1}{b}] \cup [-\alpha,\alpha] \cup [\frac{1}{b}, \alpha+a].$ Then $h(x)=h(x+a)=0$ for
$x\in ]\alpha, \frac{1}{b}[$, which is a subinterval of $[\frac{1}{b}-a,\frac{1}{b}]$; thus \eqref{14-16} holds for $n=1$ and $x\in ] \alpha, \frac{1}{b}[$. Similarly, \eqref{14-16} holds for $n=-1$ and $x\in] -\frac{1}{b}-a, -a -\alpha[$. Note that $g(x-\frac{1}{b}+a)=0$ if and only if
$x=y_+ + \frac{1}{b}-a $. Let us for a moment assume that $h$ is chosen on $[\frac{1}{b}-a, \alpha]$ as a bounded function such that
$h$ is continuous at $y=y_++\frac{1}{b}-a$ and \begin{equation} \label{14-17} \lim_{y\rightarrow y_+}
\left\{ h(y+(1-ab)\frac{n_+}{b})R_{n_+}(y)
\right\} \end{equation} exists; letting $x=y + \frac{1}{b}-a$, this means that \begin{equation*} \lim_{x\rightarrow y_+ +\frac{1}{b}-a}
\left\{\frac{h(x)}{g(x-\frac{1}{b}+a)}
\right\} \end{equation*} exists. Then, defining $h$ on $[\frac{1}{b}, a+\alpha]$ by $$h(x+a)=\left\{ \begin{array}{ll} -\dfrac{g(x-\frac{1}{b})h(x)}{g(x-\frac{1}{b}+a)}, & x \in [\frac{1}{b}-a, \alpha]\setminus \{ y_+ +\frac{1}{b}-a\}; \\ -\lim_{t\rightarrow y_+ +\frac{1}{b}-a} \left\{ \dfrac{h(t)}{g(t-\frac{1}{b}+a)}
\right\} g(x-\frac{1}{b}), & x = y_+ +\frac{1}{b}-a, \end{array} \right. $$ \eqref{14-16} holds for $n=1$ and $x\in [\frac{1}{b}-a, \alpha]$. Hence $ \{E_{mb}T_{na}g \}_{m,n \in \mz}$ is a frame if we can define $h$ as a bounded function on $[-\alpha, \alpha]$ such that \begin{itemize}
\item[(a)] $h$ is continuous at $y=y_+ + \frac{1}{b}-a $ and
\eqref{14-17} holds;
\item[(b)] the duality condition \eqref{14-16} holds for $n=0$ and $x\in [-a,0],$ $i.e.$,
\begin{eqnarray} \label{4925g} g(x)h(x)+ g(x+a)h(x+a)=b, \, x\in [-a,0].\end{eqnarray} \end{itemize}
Let $\tilde y_+:=y_+ + (1-ab)\frac{1}{b}$ and let $B_1:= ]y_+ -\epsilon, y_+ +\epsilon[ \cup ]\tilde y_+ -\epsilon, \tilde y_+ +\epsilon[ \cup ]\alpha-\epsilon,\alpha+\epsilon[$ and $B_2:=]-\alpha-\epsilon,-\alpha+\epsilon[,$ for an $\epsilon >0$ chosen such that
\begin{itemize} \item[(i)] $|g(x)|\ge \delta > 0$ for $x\in (B_1-a)\cup (B_2+a)$ and some $\delta>0$;
\item[(ii)] $B_1\cap (B_2 +a)=\emptyset.$ \end{itemize} Note that $g(x)\neq 0, \ x\in [\alpha-a,a-\alpha].$
By continuity of $g$, $\inf_{x\in [\alpha-a,a-\alpha]} |g(x)|>0$. We define $h(x):=\frac{b}{g(x)}, \ x\in [\alpha-a,a-\alpha]$, which is thus a bounded function. Note that for $x\in [-a,-\alpha]$, we have $g(x)=0$, and therefore \begin{equation}\label{14-18} g(x)h(x)+g(x+a)h(x+a)=b. \end{equation} Similarly, \eqref{14-18} holds for $x\in [\alpha-a,0]$, $i.e.$, we have now verified (b) on the subinterval $[-a,-\alpha]\cup[\alpha-a,0]$. We now put $h=0$ on $B_1\cap[a-\alpha,\alpha]$. Then $h$ is continuous at $y=y_+ + \frac{1}{b} -a $ and
$ \lim_{y\rightarrow y_+} \left\{ h(y +(1-ab)\frac{1}{b} )R_{1}(y) \right\}=0. $ Hence (a) holds. We define $h$ on $( B_1 -a )\cap [-\alpha,\alpha-a]$ by $ h(x)= \frac{b-g(x+a)h(x+a)}{g(x)}= \frac{b}{g(x)};$ thus $h$ is bounded here by the choice of $\epsilon$, and (b) holds on $( B_1 -a )\cap [-\alpha,\alpha-a]$. Similarly, put $h=0$ on $B_2\cap [-\alpha,\alpha-a]$ and define $h$ on $(B_2+a)\cap [a-\alpha,\alpha]$ by $ h(x)= \frac{b}{g(x)};$ thus $h$ is bounded, and (b) holds on $B_2 \cap [-\alpha,\alpha-a]$. We finally put $h=0$ on $[-\alpha, \alpha-a]\setminus ((B_1-a)\cup B_2).$ Note that the zeroset of $g$ within $[-\alpha,\alpha]$ is $\{-\alpha,y_+,\alpha\}$, so $g(x)\neq 0$ for $x\in \overline{[a-\alpha,\alpha]\setminus (B_1\cup(B_2+a))};$ using the continuity of $g$ implies that $\inf_{x\in [a-\alpha,\alpha]\setminus
(B_1\cup(B_2+a))} |g(x)|>0$. We define $h(x)=\frac{b}{g(x)},\ x\in [a-\alpha,\alpha]\setminus (B_1\cup(B_2+a));$ thus, we have now defined $h$ everywhere as a bounded function, and (b) holds for $x\in [-\alpha, \alpha-a]\setminus ((B_1-a)\cup B_2).$ This completes the proof of (b), and hence the proof of $ \{E_{mb}T_{na}g \}_{m,n \in \mz}$ being a Gabor frame with a dual window supported on $[-2,2].$
$\square$\par
\end{ex}
From Theorem \ref{2-2-1} we can immediately extract the possible obstruction curves, i.e., the points $(a,b)$ for which a given function $g\in V_\alpha$ might not generate a frame $ \{E_{mb}T_{na}g \}_{m,n \in \mz}.$ Assume that $g\in V_\alpha$ satisfies the standing assumptions in Theorem \ref{2-2-1} as well as the condition \begin{equation}\label{14-10}
|g(x)|+|g(x+a)|>0,\ x\in [-a,0]. \end{equation} Then, if $\kappa \neq 0,$ the possible obstructions take place on the curves determined by the equations
\begin{eqnarray} && y_++(1-ab)\frac{n_+}{b}-a=y_- , \label{14-11}\\ && y_--(1-ab)\frac{n_-}{b}+a=y_+ , \label{14-12}\\ && y_++(1-ab)\frac{n_+}{b}= y_--(1-ab)\frac{n_-}{b}+a. \label{14-13} \end{eqnarray}for some $y_+, y_-, n_+, n_-$ as in the theorem. The equations \eqref{14-11} and \eqref{14-12} both take the form \begin{eqnarray} \label{4108a} b= \frac{n}{y_--y_++an+a}\end{eqnarray} for some $n\in\{1,2,\cdots, \kappa\},$ while \eqref{14-13} means that \begin{eqnarray} \label{4108b} b= \frac{n_-+n_+}{y_--y_++(n_-+n_+)a+a}\end{eqnarray} for some $n_-, n_+\in\{1,2,\cdots, \kappa\}.$ Note that these curves only depend on the location of the zeros of the function $g\in V_\alpha,$ not on the specific function.
Interestingly, the equations \eqref{4108a} and \eqref{4108b} show that for functions $g\in V_\alpha$ the obstructions take place on ``hyperbolic curves:" this is similar to the result in \cite{LyNes}, where Lyubarski and Nes showed that for any odd function in the Feichtinger algebra $M^1,$ (in particular, the first order Hermite function) the points $(a,b)$ for which $ab=1-1/M= \frac{M-1}{M}$ for $M=2,3,\dots$ do not belong to the frame set.
For functions $g\in V_\alpha$ with no zeroes in $]-\alpha, \alpha[$ the conditions in Theorem \ref{2-2-1} are clearly satisfied, which yields the following:
\begin{cor} \label{4714a} Let $\alpha >0$. Assume that $g$ is a continuous function with {\em $\text{supp}$} $g = [-\alpha, \alpha]$, and that $$ g(x)>0, \ \ x\in ]-\alpha, \alpha[.$$ Then $\{E_{m b}T_{n a} g\}_{m,n\in \mathbb Z}$ is a frame whenever $\alpha \le a < 2\alpha, \, 0<b< 1/a.$ \end{cor}
\section{B-splines and a conjecture by Gr\"ochenig} \label{50131a}
Let us now consider the B-splines $B_N, \, N\in \mathbb N,$ defined recursively by \begin{eqnarray*} B_1 = \chi_{[-1/2, 1/2]}, \, B_{N+1} = B_N *B_1.\end{eqnarray*} The frame properties of $\{E_{mb}T_{na}B_1\}_{m,n\in \mathbb Z}$ are well known (see the work by Janssen \cite{Jan7} and \cite{Sun} by Dai and Sun which finally solved the so-called $abc$-problem), so we focus on the case $N\ge 2,$ where $B_N$ is a continuous function supported on $[-N/2, N/2].$ Furthermore the function $B_N, \, N\ge 2,$ is strictly positive on the interval $]-N/2, N/2[,$ so Corollary \ref{4714a} implies that $\{E_{mb}T_{na}B_N\}_{m,n\in \mathbb Z}$ is a frame whenever $N/2 \le a < N, \, 0<b< 1/a.$ Several other results about the frame set ${\cal F}(B_N)$ are known. We collect them here for easy reference:
\begin{prop} \label{new} Let $N\in \mathbb N \setminus \{1\},$ and consider $a,b>0$ such that $ab<1.$ Then the following hold:
\begin{itemize} \item[{\rm (i)}] $\{E_{mb}T_{na}B_N\}_{m,n\in \mathbb Z}$ is not a frame if $a\ge N.$ \item[{\rm (ii)}] $\{E_{mb}T_{na}B_N\}_{m,n\in \mathbb Z}$ is not a frame if $b=2,3,\dots .$ \item[{\rm (iii)}] $\{E_{mb}T_{na}B_N\}_{m,n\in \mathbb Z}$ is a frame if $a< N, \, b\le 1/N.$ \item[{\rm (iv)}] $\{E_{mb}T_{na}B_N\}_{m,n\in \mathbb Z}$ is a frame if there exists $k\in \mathbb N$ such that \begin{eqnarray} \label{4710d} 1/N < b < 2/N, \, N/2 \leq ak < 1/b.\end{eqnarray} \item[{\rm (v)}] $\{E_{mb}T_{na}B_N\}_{m,n\in \mathbb Z}$ is a frame if $b\in \{1, \frac12, \dots, \frac1{N-1}\}.$ \item[{\rm (vi)}] $\{E_{mb}T_{na}B_N\}_{m,n\in \mathbb Z}$ is a frame if $a=\frac{k}{p} $ for some $k=1, \dots, N-1, \, p\in \mathbb N,$ and $b< 1/k.$ \end{itemize} \end{prop}
{\noindent\bf Proof. \ } The results in (i) and (iii) are classical. Also (ii) is a well known result, originally due to Del Prete \cite{Del1} and rediscovered in \cite{GJ}. For $k=1,$ the statement in (iv) is a consequence of Corollary \ref{4714a}. In general, if \eqref{4710d} holds for some $k\in \mathbb N \setminus \{1\},$ then this implies that $\{E_{mb}T_{nka}B_N\}_{m,n\in \mathbb Z}$ is a frame, and we infer that the larger system $\{E_{mb}T_{na}B_N\}_{m,n\in \mathbb Z}$ itself is a frame (because the upper bound holds automatically). The result in (v) was recently proved by Kloos and St\"ockler \cite{KlSt}, who also proved (vi) for $p=1;$ The case of $p\in \mathbb N$ in (vi) yields an oversampling of the case $p=1,$ and therefore a frame.
$\square$\par
Based on (i)--(iii) in Proposition \ref{new} Gr\"ochenig formulated a conjecture about the frame set ${\cal F}(B_N)$ in \cite{G8}. Basically it says that the frame set consists of all the points $(a,b)\in \mathbb R_+^2$ that avoids the known obstructions:
\noindent {\bf Conjecture} For any $N\ge 2,$
\begin{eqnarray*} {\cal F}(B_N)= \{(a,b)\in \mathbb R_+^2 \, \big| \, a<N, \, ab <1, b \neq 2, 3, \dots\}.\end{eqnarray*}
We will now show that the conjecture is true if we can prove the frame property in a certain `` hyperbolic strip."
\begin{figure}
\caption{The set $A$ belongs to the frame set for $B_N, N>1.$ Corollary \ref{4714a} proves that $B$ also belongs to the frame set (see also the introduction to Section \ref{50131a}); and by Proposition \ref{4710b} the conjecture by Gr\"ochenig is true if it can be verified in the regions $C1, C2, \dots.$}
\label{4108f}
\end{figure}
\begin{prop} \label{4710b} The conjecture is true if $\{E_{mb}T_{na}B_N\}_{m,n\in \mathbb Z}$ is a frame for all $(a,b)\in \mathbb R_+^2$ for which \begin{eqnarray} \label{4710af} a<N/2, \, 1/2 \le ab <1, \, b\notin \{2,3,\dots\}.\end{eqnarray}
\end{prop}
{\noindent\bf Proof. \ } To get a geometric understanding we refer to Figure \ref{4108f}. We note that Corollary \ref{4714a} confirms the frame property in the region determined by the inequalities $N/2<a <N, ab<1;$ furthermore the frame property is satisfied for $a<N, b\leq 1/N$ (i.e., the region A on Figure \ref{4108f}). Thus, it suffices to show that the parameter region determined by the inequalities \begin{eqnarray*} 0 < ab < \frac12, \, \, \, \frac1{N} < b \notin \{2, 3, \dots\}\end{eqnarray*} is contained in the frame set ${\cal F}(B_N)$ under the given assumption. Note that $\{E_{mb}T_{na}B_N\}_{m,n\in \mathbb Z}$ is a Bessel sequence for all $a,b>0,$ i.e., we only need to check the lower frame condition.
Since $0 < 2ab <1,$ choose the unique $M\in \mathbb N$ such that $\frac1{M+1} \le 2ab< \frac1{M}.$ By splitting into the cases $2Ma<N/2$ and $2Ma\ge N/2$ it follows that the system $\{E_{mb}T_{n2Ma}B_N\}_{m,n\in \mathbb Z}$ is a frame; this clearly implies that $\{E_{mb}T_{na}B_N\}_{m,n\in \mathbb Z}$ satisfies the lower frame bound as well.
$\square$\par
\section*{Appendix: Proof of Theorem \ref{2-2-1}} \label{s5-1} Let $M\in \mathbb N$, and assume that $\frac{M-1}{M}\leq ab<\frac{M}{M+1}.$ The starting point is the duality conditions by Ron \& Shen \cite{RoSh, RoSh5} and Janssen \cite{J}, which by an easy calculation implies that two real valued, bounded functions $g,h\in L^2(\mathbb R) $ with $\text{supp}\ g \subseteq [-a,a]$, $\text{supp}\ h \subseteq [-aM,aM]$, generate dual frames $\{E_{mb}T_{na}g\}_{m,n\in \mathbb Z}$ and $\{E_{mb}T_{na}h\}_{m,n\in \mathbb Z}$ if and only if for
$n=0,\pm 1,\pm 2, \cdots, \pm(M-1)$ and $a.e.$ $x\in [\frac{n}{b}-a,\frac{n}{b}]$, \begin{equation}\label{14-20}
g(x-\frac{n}{b})h(x) +g(x-\frac{n}{b}+a)h(x+a)=b \delta_{n,0}. \end{equation}
We will now consider a function $g\in V_\alpha$ that satisfies the conditions (i)--(iv) in Theorem \ref{2-2-1}. We will prove that $ \{E_{mb}T_{na}g \}_{m,n \in \mz}$ is a frame by constructing a dual window $h.$ In the following lemma, we use the insight gained from the proofs in \cite{CKK10} to define $h$ on certain intervals, in such a way that \eqref{14-20} is satisfied for some of the relevant values of $n$ and on certain intervals. After that the subsequent lemma states conditions that yields a definition of $h$ on the remaining parts of the real line in such a way that all the duality conditions are satisfied.
\begin{figure}
\caption{The figure shows the set where $h$ is defined to vanish by \eqref{b-2}, in the case $\kappa=2$. }
\end{figure}
\begin{lemma}\label{b-1} Let $\alpha, a,b >0$ be given such that $ \alpha\leq a <2\alpha$ and $ab\in [\frac{M-1}{M},\frac{M}{M+1}[$ for some $M\in\mathbb{N}\setminus\{1\}$. Assume that \begin{equation}\label{b-2}
h(x)=0, \ \ x\notin -\left(\bigcup_{k=1}^{\kappa}[\frac{k}{b},ak+\alpha]\right)
\cup [-\alpha,\alpha]
\cup \bigcup_{k=1}^{\kappa}[\frac{k}{b},ak+\alpha]. \end{equation} Then the following hold: \begin{itemize}
\item[{\rm (a)}] $h(x)=h(x+a)=0$ for $n=1,\cdots,\kappa$ and $x\in ]\alpha+a(n-1),\frac{n}{b}[$, and for $n=-1,\cdots,-\kappa$ and $x\in ]\frac{n}{b}-a,an-\alpha[$ ;
\item[{\rm (b)}] $h(x)=h(x+a)=0$ for $n=\pm(\kappa+1), \cdots, \pm(M-1)$ and $x\in [\frac{n}{b}-a,\frac{n}{b}].$ \end{itemize} \end{lemma} {\noindent\bf Proof. \ } Note that for $n=1,2,\dots, M-1,$ $ab\ge \frac{n}{n+1};$ thus, $\frac{n}{b}-a \leq a n <\frac{n}{b}.$
\noindent (a): For $1\leq n \leq \kappa$, we note that the statement in (a) only involves the function values of $h$ for $x\in ]\alpha + a(n-1),n/b[$ and $x+a \in ]\alpha+an, n/b+a[.$ Since $\left( ]\alpha + a(n-1),n/b[\cup ]\alpha+an, n/b+a[ \right)\cap \text{supp}\ h=\emptyset$, (a) holds for $1\leq n\leq \kappa$. Similarly, (a) holds for $-\kappa \leq n\leq -1$.
\noindent (b): For $\kappa+1 \leq n \leq M-1$, the statement in (b) only involves the values of $h$ for $x\in [\frac{n}{b}-a,\frac{n}{b}]$ and $x+a \in [\frac{n}{b},\frac{n}{b}+a]$; by the definition of $\kappa$,
we have $b\alpha< (\kappa +1)(1-ab)$, $i.e.$, $a\kappa+\alpha < \frac{\kappa+1}{b}-a$; thus $\left( [\frac{n}{b}-a, \frac{n}{b}+a ] \right)\cap \text{supp} \ h =\emptyset.$ Hence \eqref{14-20} holds for $\kappa+1\leq n\leq M-1.$ Similarly, (b) holds for $-M+1\leq n \leq -\kappa-1$.
$\square$\par
Note that condition (b) in Lemma \ref{14-20} is empty if $\kappa=0$; condition (c) is empty if $\kappa=M-1$.
By Lemma \ref{b-1}, we see that \eqref{14-20} holds for $n=1,\cdots,\kappa$ and $x\in ]\alpha+a(n-1),\frac{n}{b}[$, and for $n=-1,\cdots,-\kappa$ and $x\in ]\frac{n}{b}-a,an-\alpha[$. Similarly, \eqref{14-20} holds for $n=\pm(\kappa+1), \cdots, \pm(M-1)$ and $x\in [\frac{n}{b}-a,\frac{n}{b}].$ What remains is to show that we can define $h$ on the set \begin{eqnarray*} -\left(\bigcup_{k=1}^{\kappa}[\frac{k}{b},ak+\alpha]\right)
\cup [-\alpha,\alpha]
\cup \bigcup_{k=1}^{\kappa}[\frac{k}{b},ak+\alpha]\end{eqnarray*} such that \eqref{14-20} holds for $n=1, \cdots,\kappa$ and $x\in [\frac{n}{b}-a,\alpha+a(n-1)]$, and for $n=-1, \cdots,-\kappa$ and $x\in [an-\alpha, \frac{n}{b}]$, as well as for $n=0$ and $x\in[-a,0].$ The following lemma states sufficient conditions for the first of these requirements to be satisfied. The result is a minor adaption of Lemma 3.3 in \cite{CKK10}, so the proof is omitted.
\begin{lemma}\label{b-3} Let $\alpha, a,b>0$ be given such that $\alpha \le a < 2\alpha$ and $ab\in [\frac{M-1}{M},\frac{M}{M+1}[$ for some $M\in\mathbb{N}\setminus\{1\}$. Let $g\in V_\alpha$, and assume that $g(x) \neq 0$ for $x\in [\alpha -a, a - \alpha].$
Assume further that $\kappa \neq 0$ and that $h$ is chosen
on $[-\alpha,\alpha]$ as a bounded function such that the following conditions hold: \begin{itemize}
\item[\rm{(1)}] If there exist $n_+\in\{1,2,\cdots, \kappa\}$ and $y_+\in ]a-\alpha, \alpha-(1-ab)\frac{n_+}{b}]$ such that $g(y_+)=0$ and $\lim_{y\rightarrow y_+}
|R_{n_+}(y)|=\infty $, then
$h$ is continuous at $y=y_+ + (1-ab)\frac{n_+}{b}$ and the limit \begin{equation} \label{b-4} \lim_{y\rightarrow y_+}
\left\{ h(y+(1-ab)\frac{n_+}{b})R_{n_+}(y)
\right\} \end{equation} exists;
\item[\rm{(2)}] If there exist $n_-\in\{1,2,\cdots, \kappa\}$ and
$y_-\in [-\alpha+ (1-ab)\frac{n_-}{b},\alpha-a]$ such that $g(y_-)=0$ and $\lim_{y\rightarrow y_-}
|L_{n_-}(y)|=\infty $, then
$h$ is continuous at $y=y_- - (1-ab)\frac{n_-}{b}$ and the limit \begin{equation} \label{b-5} \lim_{y\rightarrow y_-} \left\{ h(y-(1-ab)\frac{n_-}{b}) L_{n_-}(y)\right\} \end{equation}
exists. \end{itemize} Then the function $h$ can be defined on the interval
$ -(\bigcup_{k=1}^{\kappa}[\frac{k}{b},ak+\alpha] ) \cup
\bigcup_{k=1}^{\kappa}[\frac{k}{b},ak+\alpha]$ such that the duality condition \eqref{14-20} holds for $n=1,\cdots, \kappa$ and $ x\in [\frac{n}{b}-a, \alpha+a(n-1)],$ as well as for $n=-1,\cdots,-\kappa$ and $ x\in [an-\alpha, \frac{n}{b}];$ the function $h$ is bounded, and the values of $h$ outside $ -(\bigcup_{k=1}^{\kappa}[\frac{k}{b},ak+\alpha] ) \cup [-\alpha,\alpha]\cup \bigcup_{k=1}^{\kappa}[\frac{k}{b},ak+\alpha] $ are irrelevant. \end{lemma}
\begin{rem}{\em In Lemma \ref{b-3}, if $y_+$ is the end point of the interval $]a-\alpha, \alpha-(1-ab)\frac{n_+}{b}]$, $i.e.$, $y_+=\alpha-(1-ab)\frac{n_+}{b}$, the limit $\lim_{y\rightarrow y_+}$ in \eqref{b-4} should be understood as the limit from the left; similarly, if $y_-=-\alpha+(1-ab)\frac{n_-}{b}$, the limit $\lim_{y\rightarrow y_+}$ in \eqref{b-5} should be understood as the limit from the right.
$\square$\par
} \end{rem}
We can now complete the proof of the sufficiency in Theorem \ref{2-2-1}:
\noindent{\bf Proof of Theorem \ref{2-2-1}:} Assume that the conditions (i)--(iv) in Theorem \ref{2-2-1} hold. Note that $g(x)=0,\ x\in [-a,-\alpha]\cup[\alpha,a]$, since $g\in V_\alpha$. This together with condition (i) in Theorem \ref{2-2-1} implies that \begin{equation}\label{14-14}
g(x)\neq 0, \ x\in [\alpha-a,a-\alpha]. \end{equation} Following \eqref{b-2}, let \begin{eqnarray*}
h(x):=0, \ \ x\notin -\left(\bigcup_{k=1}^{\kappa}[\frac{k}{b},ak+\alpha]\right)
\cup [-\alpha,\alpha]
\cup \bigcup_{k=1}^{\kappa}[\frac{k}{b},ak+\alpha]. \end{eqnarray*} Via Lemma \ref{b-3} and the comment just before the lemma, $ \{E_{mb}T_{na}g \}_{m,n \in \mz}$ is a frame if we can define $h$ as a bounded function on $[-\alpha, \alpha]$ in such a way that \begin{itemize}
\item[(a)] the conditions in Lemma \ref{b-3} (1) and (2) hold;
\item[(b)] the duality condition \eqref{14-20} holds for $n=0$ and $x\in [-a,0],$ $i.e.$,
\begin{eqnarray} \label{4925g} g(x)h(x)+ g(x+a)h(x+a)=b, \, x\in [-a,0].\end{eqnarray} \end{itemize} We will split the definition of $h$ on $[-\alpha,\alpha]$ into several intervals. In fact, we will first define $h$ on $[\alpha-a,a-\alpha]$ and then on small balls around certain shifts of the zeros. First, we need some notation. For $m,n=0,1,\cdots, \kappa$, we define the sets $Y_n$ and $W_m$ by \begin{eqnarray*} &&Y_0= \{y_{0,i}\in ]a-\alpha,\alpha ] \ : g(y_{0,i})=0\}_{i=1,2,\cdots,r_0} \\ &&Y_n=\{y_{n,i}\in ]a-\alpha,\alpha-(1-ab)\frac{n}{b} ] \ : g(y_{n,i})=0
\text{ and $\lim_{y\rightarrow y_{n,i}} |R_{n}(y)|=\infty $}\}_{i=1,2,\cdots,r_n} \end{eqnarray*} and \begin{eqnarray*} &&W_0=\{w_{0,j}\in [-\alpha,\alpha-a [ \ : g(w_{0,j})=0\}_{j=1,2,\cdots,l_0}\\ &&W_m=\{w_{m,j}\in [-\alpha+(1-ab)\frac{m}{b},\alpha-a [ \ : g(w_{m,j})=0 \text{ and
$ \lim_{y\rightarrow w_{m,j}} |L_{m}(y)|=\infty$}\}_{j=1,2,\cdots,l_m} \end{eqnarray*} where $r_n$ and $l_m$ are the cardinalities of $Y_n$ and $W_m$, respectively. In words: since $g(x)\neq 0$ for $x\in [\alpha-a,a-\alpha]$,
the sets $Y_0$ and $W_0$ yield enumerations of the zeros for $g$
within $[-\alpha,\alpha],$ split into the positive, respectively, negative part; the sets $Y_n$ and $W_n, n\ge 1,$ yield enumerations of selected zeros within certain subsets of $[-\alpha,\alpha].$
We denote the open interval of radius $r>0$ centered at $x$ by $B(x;r)= ] x- r, x+r[.$ For $y_{n,i} \in Y_n$, $w_{m,j} \in W_m$ for $n,m=0,1,\cdots, \kappa$, let $\tilde y_{n,i}:=y_{n,i}+(1-ab)\frac{n}{b},\ \hat w_{m,j}:=w_{m,j}-(1-ab)\frac{m}{b}.$
If $n,m \geq 1$, then by the conditions (ii), (iii) and (iv)
in Theorem \ref{2-2-1}(3), we have \begin{eqnarray} \label{4918a} g(\tilde y_{n,i}-a)\neq 0 \neq g(\hat w_{m,j}+a), \, \, \mbox{and} \, \, \tilde y_{n,i} \neq \hat w_{m,j}+a.\end{eqnarray} Note that $g(\tilde y_{0,i})=g(\hat w_{0,j})=0$. Then we also have $\tilde y_{0,i} \neq \hat w_{m,j}+a,\ \tilde y_{n,i}-a \neq \hat w_{0,j}$ for $m,n\geq 1$, and $g(\tilde y_{0,i}-a)\neq 0 \neq g(\hat w_{0,j}+a)$ by the condition (i) in Theorem \ref{2-2-1}; thus, \eqref{4918a} actually holds for all $m,n=0,1,\cdots, \kappa.$ Then we can choose $\epsilon >0$ so that
\begin{itemize} \item[(i)] $|g(x)|\ge \delta > 0$ for $x\in
B(\tilde y_{n,i}-a; \epsilon) \cup B(\hat w_{m,j}+a ;\epsilon) $ and some $\delta>0$; \item[(ii)] For $m,n=0,1,\cdots, \kappa$, and $i=1,2,\cdots,r_n, j=1,2,\cdots,l_m,$ \begin{equation}\label{epsilon2-1} B(\tilde y_{n,i}; \epsilon) \cap B(\hat w_{m,j}+a ;\epsilon)=\emptyset. \end{equation} \end{itemize}
\noindent {\bf Definition of $h$ on $[\alpha-a,a-\alpha]$:} By \eqref{14-14} and continuity of $g$,
$\inf_{x\in [\alpha-a,a-\alpha]} |g(x)|>0$. We define $h(x):=\frac{b}{g(x)}, \ x\in [\alpha-a,a-\alpha]$, which is thus a bounded function. Note that for $x\in [-a,-\alpha]$, we have $g(x)=0$, and therefore \begin{equation}\label{b-6} g(x)h(x)+g(x+a)h(x+a)=b. \end{equation} Similarly, \eqref{b-6} holds for $x\in [\alpha-a,0]$, $i.e.$, we have now verified (b) on the subinterval $[-a,-\alpha]\cup[\alpha-a,0]$.
\noindent {\bf Definition of $h$ on $B(\tilde y_{n,i};\epsilon)\cap[a-\alpha,\alpha]$:} On this interval, put $h=0$. If $1\leq n\leq \kappa$, then, $h$ is continuous at $y=\tilde y_{n,i}$ and \begin{equation}\label{b-9}
\lim_{y\rightarrow y_{n,i}} \left\{ h(y +(1-ab)\frac{n}{b} )R_{n}(y) \right\}=0. \end{equation}
Hence the condition in Lemma \ref{b-3} (1) holds.
\noindent {\bf Definition of $h$ on $B(\tilde y_{n,i}-a;\epsilon)\cap[-\alpha,\alpha-a]$:}
We define $h$ on this set by $ h(x)= \frac{b-g(x+a)h(x+a)}{g(x)}= \frac{b}{g(x)};$ thus $h$ is bounded here by the choice of $\epsilon$, and (b) holds on $B(\tilde y_{n,i}-a;\epsilon)\cap[-\alpha,\alpha-a]$.
\noindent {\bf Definition of $h$ on $B(\hat w_{m,j};\epsilon)\cap[-\alpha,\alpha-a]$:} On this interval, put $h=0$. If $1\leq m\leq \kappa$, then $h$ is continuous at $y=\hat w_{m,j}$ and \begin{equation}\label{b-10}
\lim_{y\rightarrow w_{m,j}} \left\{ h(y -(1-ab)\frac{m}{b} )L_{m}(y) \right\}=0. \end{equation}
Hence the condition in Lemma \ref{b-3} (2) holds, $i.e.$, we have now completed the proof of (a).
\noindent {\bf Definition of $h$ on $B(\hat w_{m,j}+a;\epsilon)\cap[a-\alpha,\alpha]$:} We define $h$ on this set by $ h(x)= \frac{b-g(x-a)h(x-a)}{g(x)}= \frac{b}{g(x)};$ thus $h$ is bounded here by the choice of $\epsilon$ and \eqref{epsilon2-1}, and (b) holds on $ B(\hat w_{m,j};\epsilon)\cap[-\alpha,\alpha-a].$
To summarize all these, let $B_+:=\cup_{n=0}^{\kappa} \cup_{i=1}^{r_n} \left(B(\tilde y_{n,i};\epsilon)\cap[a-\alpha,\alpha]\right),$ and $B_-:=\cup_{m=0}^{\kappa} \cup_{j=1}^{l_m} \left(B(\hat w_{m,j};\epsilon)\cap [-\alpha,\alpha-a]\right).$ We have defined $h$ as a bounded function on $B:=[\alpha-a,a-\alpha] \cup B_+ \cup (B_+ -a) \cup B_- \cup (B_- +a)$, and (b) holds on \begin{eqnarray} \label{4101a} [-a,-\alpha]\cup[\alpha-a,0]\cup (B_+ -a)\cup B_-=[-a,-\alpha] \cup \left( B \cap [-a,0]\right).\end{eqnarray}
\noindent {\bf Definition of $h$ on $[-\alpha,\alpha]\setminus B$:} Put $h=0$ on $\left([-\alpha,\alpha]\setminus B \right)\cap [-a,0]$. Note that the zeroset of $g$ within $[-\alpha,\alpha]$ consists of $Y_0$ and $W_0,$ so $g(x)\neq 0$ for
$x\in \overline{[-\alpha,\alpha]\setminus B};$ using the continuity of g implies that $\inf_{x\in [-\alpha,\alpha]\setminus B} |g(x)|>0$. We define $h(x)=\frac{b}{g(x)},\ x\in \left([-\alpha,\alpha]\setminus B \right)\cap [0,a];$ thus, we have now defined $h$ everywhere as a bounded function, and we just need to complete the proof of (b). Since we have proved (b) on the set in \eqref{4101a}, we just need to verify (b) on the set $]- \alpha, 0]\setminus \left( B\cap [-a,0] \right).$ Note that $h$ vanishes on this set and that \begin{eqnarray*} ]- \alpha, 0]\setminus \left( B\cap [-a,0] \right) & = & \left([-\alpha,\alpha]\setminus B \right)\cap [-a,0] \\ & = & ]-\alpha,\alpha-a[\setminus \left((B_+-a)\cup B_-\right) \\ & = & \left( \left([-\alpha,\alpha]\setminus B \right)\cap [0,a] \right) -a, \end{eqnarray*} where we used that $-\alpha\in B_-,\ \alpha\in B_+.$
Thus, by the definition of $h$ on $\left([-\alpha,\alpha]\setminus B \right)\cap [0,a]$ (b) holds on $ ]- \alpha, 0]\setminus \left( B\cap [-a,0] \right),$ as desired.
$\square$\par
\noindent{\bf Acknowledgments:} This research was supported by Basic Science Research Program through the National Research Foundation of Korea(NRF) funded by the Ministry of Education(2013R1A1A2A10011922). The authors thank the reviewer for suggesting that we provide more insight into the intuition behind Theorem \ref{2-2-1}; this motivated us to include Example \ref{4108e}.
\end{document} |
\begin{document}
\title[phantom stable category of $n$-Frobenius categories] {phantom stable category of $n$-Frobenius categories}
\author[Bahlekeh, Fotouhi, Salarian and Sartipzadeh]{Abdolnaser Bahlekeh, Fahimeh Sadat Fotouhi, Shokrollah Salarian and Atousa Sartipzadeh}
\address{Department of Mathematics, Gonbade-Kavous University, Postal Code:4971799151, Gonbad-e-Kavous, Iran} \email{bahlekeh@gonbad.ac.ir}
\address{ School of Mathematics, Institute for Research in Fundamental Science (IPM), P.O.Box: 19395-5746, Tehran, Iran}\email{ffotouhi@ipm.ir}
\address{Department of Pure Mathematics, Faculty of Mathematics and Statistics, University of Isfahan, P.O.Box: 81746-73441, Isfahan,
Iran and \\ School of Mathematics, Institute for Research in Fundamental Science (IPM), P.O.Box: 19395-5746, Tehran, Iran}
\email{Salarian@ipm.ir}
\address{Department of Pure Mathematics, Faculty of Mathematics and Statistics, University of Isfahan, P.O.Box: 81746-73441, Isfahan, Iran}
\email{asartipz@gmail.com}
\subjclass[2010]{18E10, 18G15, 18G65, 14F08}
\keywords{$n$-Frobenius category; phantom stable category; $n$-$\operatorname{{\mathsf{Ext}}}$-phantom morphism; semi-separated noetherian scheme}
\begin{abstract} Let $n$ be a non-negative integer. An exact category $\mathscr{C} $ is said to be an $n$-Frobenius category, provided that
it has enough $n$-projectives and $n$-injectives and the $n$-projectives coincide with the $n$-injectives. It is proved that any abelian category with non-zero $n$-projective objects, admits a non-trivial $n$-Frobenius subcategory. In particular, we explore several examples of $n$-Frobenius categories. Also, as a far reaching generalization of the stabilization of a Frobenius category, we define and study phantom stable category of an $n$-Frobenius category $\mathscr{C} $. Precisely, assume that $\mathcal P\subseteq\operatorname{{\mathsf{Ext}}}^n_{\mathscr{C} }$ is the subfunctor consisting of all conflations of length $n$ factoring through $n$-projective objects. A couple $(\mathscr{C} _{\mathcal P}, T)$, where $\mathscr{C} _{\mathcal P}$ is an additive category and $T$ is a covariant additive functor from $\mathscr{C} $ to $\mathscr{C} _{\mathcal P}$, is a phantom stable category of $\mathscr{C} $, provided that for any morphism $f$ in $\mathscr{C} $, $T(f)=0$, whenever $f$ is an $n$-$\operatorname{{\mathsf{Ext}}}$-phantom morphism and $T(f)$ is an isomorphism in $\mathscr{C} _{\mathcal P}$, if $f$ acts as invertible on $\operatorname{{\mathsf{Ext}}}^n/{\mathcal P}$, and $T$ has the universal property with respect to these conditions. The main focus of this paper is to show that the phantom stable category of an $n$-Frobenius category always exists. Some properties of phantom stable categories that reveal the efficiency of these categories are studied.
\end{abstract} \maketitle
\tableofcontents
\section{Introduction}
Assume that $\mathcal A$ is an abelian category and $\mathscr{C} $ a full additive subcategory of $\mathcal A$ which is closed under extensions. It is known that, the exact strucure of $\mathcal A$ is inherited by $\mathscr{C} $; see \cite[Lemma 10.20]{buh}. {Assume that $n$ is a non-negative integer.} An extension which is obtained by splicing of $n$ conflations in $\mathscr{C} $, will be called a {\em conflation of length $n$}. For arbitrary objects $A,B\in\mathscr{C} $, the equivalence classes of all conflations of length $n$ in $\mathscr{C} $, $\operatorname{{\mathsf{Ext}}}^n_{\mathscr{C} }(A, B)$, forms an abelian group with respect to the Baer sum operation. {In the case $n=0$, we set $\operatorname{{\mathsf{Ext}}}^0_{\mathscr{C} }(A, B):=\operatorname{\mathsf{Hom}}_{\mathscr{C} }(A, B)$.} Moreover, the notion of $n$-projective and $n$-injective objects in $\mathscr{C} $, are defined in terms of the vanishing of $\operatorname{{\mathsf{Ext}}}^{n+1}$ functor. Now we call $\mathscr{C} $ an {\it $n$-Frobenius category}, provided that it has enough $n$-projectives and $n$-injectives and the $n$-projectives coincide with the $n$-injectives. Assume that $\mathscr{C} $ is an $n$-Frobenius category. Then, for any $k\geq 1$, a given object $N\in\mathscr{C} $ fits into conflations of length $k$, say $\mathsf{\Omega}^kN\rightarrow P_{k-1}\rightarrow\cdots\rightarrow P_0\rightarrow N$, where $P_i$'s are $n$-projective, which will be called {\it unit conflations}. Also $\mathsf{\Omega}^kN$ is said to be a $k$-th syzygy of $N$. We set $\mathsf{H}:=\bigcup_{M, N\in\mathscr{C} }\operatorname{\mathsf{Hom}}_{\mathscr{C} }(M, N)$ and $\operatorname{{\mathsf{Ext}}}^n:=\bigcup_{M, N\in\mathscr{C} }\operatorname{{\mathsf{Ext}}}^n_{\mathscr{C} }(M, \mathsf{\Omega}^nN)$, where $\mathsf{\Omega}^nN$ runs over all the $n$-th syzygies of $N$. For any $f,g\in\operatorname{\mathsf{H}}$, the pull-back and push-out of any conflation of length $n$ along $f$ and $g$ are again conflations of length $n$. These operations, which abbreviately are denoted by $\operatorname{{\mathsf{Ext}}}^nf$ and $g\operatorname{{\mathsf{Ext}}}^n$, respectively, induce an $\operatorname{\mathsf{H}}$-bimodule structure on $\operatorname{{\mathsf{Ext}}}^n$. We denote by $\mathcal P$ the subfunctor of $\operatorname{{\mathsf{Ext}}}^n_{\mathscr{C} }$ consisting of all conflatons of the form $\operatorname{{\mathsf{Ext}}}^nf$, for some $f:M\rightarrow P$ of $\operatorname{\mathsf{H}}$, where $P$ is an $n$-projective object of $\mathscr{C} $. Particularly, Proposition \ref{equal} indicates that $\mathcal P$ is a submodule of $\operatorname{{\mathsf{Ext}}}^n$, and then, $\operatorname{{\mathsf{Ext}}}^n/{\mathcal P}$ will be an $\operatorname{\mathsf{H}}$-bimodule. It is proved in Section 6 that if $f$ annihilates $\operatorname{{\mathsf{Ext}}}^n/{\mathcal P}$ from the left, then it annihilates this module from the right and vice versa. Similarly, we stablish that an element $g\in\operatorname{\mathsf{H}}$ acts as invertible on $\operatorname{{\mathsf{Ext}}}^n/{\mathcal P}$ from the left and the right, simultaneously; see Corollaries \ref{lr} and \ref{qo}. Now consider two classes of morphisms in $\operatorname{\mathsf{H}}$, as follows: \begin{itemize}\item The class of all morphisms in $\operatorname{\mathsf{H}}$ annihilating $\operatorname{{\mathsf{Ext}}}^n/{\mathcal P}$, that is called $n$-$\operatorname{{\mathsf{Ext}}}$-phantom morphisms. For the historical remark on phantom morphisms; see \ref{s100}. \item The class of all morphisms in $\operatorname{\mathsf{H}}$ acting as invertible on $\operatorname{{\mathsf{Ext}}}^n/{\mathcal P}$, which will be called quasi-invertible morphisms. \end{itemize}
By a {\it phantom stable category of $\mathscr{C} $}, we mean an additive category $\mathscr{C} _{\mathcal P}$, together with a covariant additive functor $T:\mathscr{C} \longrightarrow\mathscr{C} _{\mathcal P}$ such that:\\ (1) $T(s)$ is an isomorphism in $\mathscr{C} _{\mathcal P}$, for any quasi-invertible morphism $s$. \\(2) For any $n$-$\operatorname{{\mathsf{Ext}}}$-phantom morphism $\varphi$, $T(\varphi)=0$ in $\mathscr{C} _{\mathcal P}$. \\(3) Any covariant additive functor $T':\mathscr{C} \longrightarrow\mathbb{D} $ satisfying the conditions (1) and (2), factors in a unique way through $T$.
In this paper, first we provide some important examples of $n$-Frobenius categories, and then we show that for any $n$-Frobenius category $\mathscr{C} $, the phantom stable category $(\mathscr{C} _{\mathcal P}, T)$ exists; see Theorem \ref{thmst}. Our formalism reveals that the phantom stabilization of an $n$-Frobenius category, is an efficient and natural extension of the classical stable category of a Frobenius category. Indeed, in the case $n=0$, morphisms factoring through projective objects are $n$-$\operatorname{{\mathsf{Ext}}}$-phantom morphisms, which is an ideal of $\operatorname{\mathsf{H}}$. Particularly, in order to examine the efficiency of phantom stable categories, it is proved that for given two objects $M,N\in\mathscr{C} $ and arbitrary syzygies $\mathsf{\Omega} M$ and $\mathsf{\Omega} N$ (with respect to $n$-projectives), there is an induced map $\operatorname{\mathsf{Hom}}_{\mathscr{C} _{\mathcal P}}(M, N)\longrightarrow\operatorname{\mathsf{Hom}}_{\mathscr{C} _{\mathcal P}}(\mathsf{\Omega} M, \mathsf{\Omega} N)$, which is an isomorphism; see Theorem \ref{syziso}.
Our motivation in studing the $n$-Frobenius category and then introducing the concept of phantom stable category, comes from the fact that there are categories rarely have enough projectives or even, they have no projective objects. However, they have often enough $n$-projectives or their subcategories of $n$-projective objects are non-trivial, for some integer $n\geq 1$. For instance, there are no projective objects in the category of quasi-coherent sheaves over the projective line $\mathbf{P^1}(R)$, where $R$ is a commutative ring with identity; see \cite[Corollary 2.3]{ee} and also \cite[Exercise III 6.2(b)]{har}. However, as proved by Serre, the category of coherent sheaves over a projective scheme, has enough locally free sheaves; see \cite[Corollary 5.18]{har}. More generally, the argument given in the proof of \cite[Lemma 1.12]{or} reveals that for a semi-separated noetherian scheme $\mathsf {X}$ of finite Krull dimension, there exists a non-negative integer $n$ such that locally free sheaves of finite rank, are $n$-projective objects in the category of coherent sheaves, $\operatorname{\mathsf{coh}}(\mathsf {X})$, and in particular, the subcategory of $n$-projective objects of $\operatorname{\mathsf{coh}}(X)$, is non-trivial.
Assume that $\mathcal A$ is an abelian category such that its subcategory of $n$-projective objects is non-trivial, for some integer $n\geq 1$. Then it is proved that $\mathcal A$ admits an $n$-Frobenius subcategory $\mathscr{C} $; see Theorem \ref{subcat}. We should stress that this fact is conceivable, as it is known that any abelian category with non-zero projective objects admits a 0-Frobenius subcategory. Assume that $\mathsf {X}$ is a semi-separated noetherian scheme of finite Krull dimension. As we have already mentioned, the category of locally free sheaves of finite rank, $\mathcal L$, is a subcategory of $n$-$\operatorname{\mathsf{proj}}\operatorname{\mathsf{coh}}(\mathsf {X})$, for some non-negative integer $n$. We will see that the subcategory consisting of all syzygies of complete resolutions of locally frees of finite rank, $\mathscr{C} (\mathcal L)$, is an $n$-Frobenius subcategory of $\operatorname{\mathsf{coh}}\mathsf {X}$, and in particular, $n$-$\operatorname{\mathsf{proj}}\mathscr{C} (\mathcal L)=\mathcal L$; see Proposition \ref{locally}.
In order to explore more examples of phantom stable categories, we consider the category of complexes of flat $\mathcal{O}_{\mathsf {X}}$-modules, $\mathsf {Ch}(\mathsf {Flat}\mathsf {X})$, over the scheme $\mathsf {X}$. Pursuing the argument given in \cite[page 28]{ha1}, yields that this is an exact category, with exact structure being short exact sequences of complexes. We will see in Theorem \ref{fp}, that the aforementioned category is $n$-Frobenius, for some integer $n\geq 0$, and in particular, its $n$-projective objects (and then $n$-injective objects), are exactly the flat complexes, i.e., those acyclic complexes with flat kernels; see \cite[Definition 2.5]{e}.
The paper is organized as follows. In Section 2, we study $n$-Frobenius categories and explore some examples of such categories. {We will observe that semi-separated noetherian schemes of finite Krull dimension are good venue for searching such examples.} It is shown that, any abelian category with non-zero $n$-projective objects admits an $n$-Frobenius subcategory. Assume that $\mathsf {X}$ is a semi-separated noetherian scheme of finite Krull dimension. Then we will see that $\mathscr{C} (\mathcal L)$ is an $n$-Frobenius subcategory of $\operatorname{\mathsf{coh}}(\mathsf {X})$, for some integer $n$. Also, we prove that the category $\mathscr{C} (\mathsf {Flat}\mathsf {X})$ consisting of all syzygies of complete resolution of flat sheaves, is an $n$-Frobenius subcategory of $\operatorname{\mathsf{Qcoh}}(\mathsf {X})$, for some non-negative integer $n$, and $n$-$\operatorname{\mathsf{proj}}\mathscr{C} (\mathsf {Flat}\mathsf {X})=\mathsf {Flat}\mathsf {X}$. In Section 3, we study those morphisms in $\operatorname{\mathsf{H}}$ acting as invertible on $\operatorname{{\mathsf{Ext}}}^{n+1}_{\mathscr{C} }$. It is proved that a given morphism $f$ acts as invertible on $\operatorname{{\mathsf{Ext}}}^{n+1}_{\mathscr{C} }$ from the left if and only if it acts as invertible from the right. These morphisms will be called quasi-invertible morphisms. Section 4 is devoted to study conflations factoring through $n$-projective objects, namely, those arising from pull-back along morphisms ending at $n$-projectives. This class of conflations forms a subfunctor of $\operatorname{{\mathsf{Ext}}}^n_{\mathscr{C} }$ and will be denoted by $\mathcal P$. It is proved that a given conflation lies in $\mathcal P$ if and only if it is obtained from push-out along a morphism starting at an $n$-projective object, and so, $\mathcal P$ is an $\operatorname{\mathsf{H}}$-bisubmodule of $\operatorname{{\mathsf{Ext}}}^n$. In Section 5, we show that any conflation $\operatorname{\boldsymbol{\gamma}}$ in $\mathscr{C} $ can be represented as a pull-back as well as push-out of unit conflations. These representations will be called a right (left) unit factorization of $\operatorname{\boldsymbol{\gamma}}$. In Section 6, we investigate those morphisms annihilating the module $\operatorname{{\mathsf{Ext}}}^{n+1}_{\mathscr{C} }$. It is shown that a given object $f\in\operatorname{\mathsf{H}}$ annihilates $\operatorname{{\mathsf{Ext}}}^{n+1}_{\mathscr{C} }$ from the left if and only if it annihilates from the right. We call such a morphism $f$, as an $n$-$\operatorname{{\mathsf{Ext}}}$-phantom morphism. In Section 7, we introduce a composition operator $``\circ"$ on $\operatorname{{\mathsf{Ext}}}^n/{\mathcal P}$. It is shown that this operator is associative and distributive over the Baer sum on both sides. In particular, for any object $M$, $\operatorname{{\mathsf{Ext}}}^n_{\mathscr{C} }(M, \mathsf{\Omega}^nM)/{\mathcal P}$ has a ring structure with identity element, and also, there exists a ring homomorphism $\operatorname{\mathsf{Hom}}_{\mathscr{C} }(M, M)\longrightarrow\operatorname{{\mathsf{Ext}}}^n_{\mathscr{C} }(M, \mathsf{\Omega}^nM)/{\mathcal P}$ sending quasi-invertible morphisms to invertible elements. In Section 8, we consider an equivalence relation on $\operatorname{{\mathsf{Ext}}}^n/{\mathcal P}$. It is observed that this relation is compatible with the composition $``\circ"$ as well as the Baer sum operation. In the paper's final section, we show that the phantom stable category $(\mathscr{C} _{\mathcal P}, T)$ of an $n$-Frobenius category $\mathscr{C} $, always exists.
\section{$n$-Frobenius categories}
Let $\mathcal A$ be an abelian category and let $n$ be a non-negative integer. In this section, we study subcategories of $\mathcal A$ having enough $n$-projectives and $n$-injectives and the class of $n$-projectives coincides with the class of $n$-injectives, which we call $n$-Frobenius categories. It is shown that if $\mathcal A$ has non-zero $n$-projective objects, then it admits a non-trivial $n$-Frobenius subcategory. We also explore several examples of $n$-Frobenius categories. Let us begin this section with stating our convention.
\begin{conv}Throughout the paper, $\mathcal A$ is an abelian category and $\mathscr{C} $ is a full additive subcategory of $\mathcal A$ which is closed under extensions. So, as we have mentioned in the introduction, $\mathscr{C} $ becomes an exact category. We also assume that $(\mathsf {X}, \mathcal{O}_{\mathsf {X}})$ is a semi-separated, noetherian scheme of finite Krull dimension and all locally free sheaves are assumed to be of finite rank. \end{conv}
\begin{dfn} We say that an extension of length $t\geq 1$, $0\rightarrow B\rightarrow X_{t-1}\rightarrow\cdots\rightarrow X_0\rightarrow A\rightarrow 0$ in $\mathcal A$, is a {\it conflation} of length $t$ in $\mathscr{C} $, provided that it is obtained by splicing $t$ conflations of length 1 in $\mathscr{C} $ and it will be denoted by $B\rightarrow X_{t-1}\rightarrow\cdots\rightarrow X_{0}\rightarrow A$. The set of all equivalence classes of such conflations of lenght $t$, will be depicted by $\operatorname{{\mathsf{Ext}}}^t_{\mathscr{C} }(A, B)$. We also set $\operatorname{{\mathsf{Ext}}}^0_{\mathscr{C} }(A, B):=\operatorname{\mathsf{Hom}}_{\mathscr{C} }(A, B)$. It is easily seen that for any $i\geq 0$, $\operatorname{{\mathsf{Ext}}}^i_{\mathscr{C} }(-, -):\mathscr{C} ^{{\rm{op}}}\times\mathscr{C} \longrightarrow\mathbf{Ab}$ is a bifunctor.
Recall that two conflations $\operatorname{\boldsymbol{\epsilon}}, \operatorname{\boldsymbol{\epsilon}}'\in\operatorname{{\mathsf{Ext}}}^t_{\mathscr{C} }(A, B)$ are equivalent, provided that there is a chain of conflations of length $t$, $\operatorname{\boldsymbol{\epsilon}}=\operatorname{\boldsymbol{\epsilon}}_0, \operatorname{\boldsymbol{\epsilon}}_1\cdots, \operatorname{\boldsymbol{\epsilon}}_k=\operatorname{\boldsymbol{\epsilon}}'$ such that for any $0\leq i\leq k-1$, we have either a morphism $\operatorname{\boldsymbol{\epsilon}}_i\rightarrow\operatorname{\boldsymbol{\epsilon}}_{i+1}$ or a morphism $\operatorname{\boldsymbol{\epsilon}}_{i+1}\rightarrow\operatorname{\boldsymbol{\epsilon}}_i$ with fixed ends; see \cite[Proposition 3.1]{mit}.
Following Keller \cite{ke,ke1} conflations of length 1, will be called just conflations. In particular, if $B\stackrel{f}\rightarrow C\stackrel{g}\rightarrow A$ is a conflation, then $f$ (resp. $g$) is called an inflation (resp. a deflation). So if $\operatorname{\boldsymbol{\epsilon}}:B\rightarrow X_{t-1}\rightarrow\cdots\rightarrow X_0\rightarrow A$ is a conflation of length $t$, then $\operatorname{\boldsymbol{\epsilon}}=\operatorname{\boldsymbol{\epsilon}}_{t-1}\cdots\operatorname{\boldsymbol{\epsilon}}_0$, where $\operatorname{\boldsymbol{\epsilon}}_i^,s$ are conflations with compatible ends. \end{dfn}
\begin{dfn}Let $n$ be a non-negative integer.\\ (1) A given object $P\in\mathscr{C} $ (resp. $I\in\mathscr{C} $) is said to be an {\it $n$-projective} (resp. {\it $n$-injective}) object of $\mathscr{C} $, if $\operatorname{{\mathsf{Ext}}}^i_{\mathscr{C} }(P, X)=0$ (resp. $\operatorname{{\mathsf{Ext}}}^i_{\mathscr{C} }(X, I)=0$) for all integers $i>n$ and all objects $X\in\mathscr{C} $. The class of all $n$-projective (resp. $n$-injective) objects will be denoted by $n$-$\operatorname{\mathsf{proj}}\mathscr{C} $ (resp. $n$-$\operatorname{\mathsf{inj}}\mathscr{C} $). \\ (2) The category $\mathscr{C} $ is said to have enough $n$-projectives, provided that each object $M$ in $\mathscr{C} $ fits into a deflation $P\rightarrow M$ with $P$ $n$-projective. Dually one has the notion of having enough $n$-injectives.\\ (3) We say that the exact category $\mathscr{C} $ is {\it $n$-Frobenius}, if
$\mathscr{C} $ has enough $n$-projectives and $n$-injectives and $n$-$\operatorname{\mathsf{proj}}\mathscr{C} $ coincides with $n$-$\operatorname{\mathsf{inj}}\mathscr{C} $. \end{dfn}
\begin{rem}\label{remexam}(1) It follows from the definition that $0$-Frobenius categories are indeed the usual notion of Frobenius categories.\\ (2) If $\mathscr{C} $ is an $n$-Frobenius category, then it will be an $i$-Frobenius category, for any $i\ge n$. In particular, $n$-$\operatorname{\mathsf{proj}}\mathscr{C} = i$-$\operatorname{\mathsf{proj}}\mathscr{C} $. \end{rem}
\begin{s} Assume that $\mathscr{C} $ is an $n$-Frobenius category. Then, for any $k\geq 1$, a given object $N\in\mathscr{C} $ fits into conflations of length $k$;
$\mathsf{\Omega}^kN\rightarrow P_{k-1}\rightarrow\cdots\rightarrow P_0\rightarrow N$ and $N\rightarrow P^1\rightarrow\cdots\rightarrow P^k\rightarrow\mathsf{\Omega}^{-k}N$
such that $P_i, P^i$'s are $n$-projective, which will be called {\it unit conflations}. Also $\mathsf{\Omega}^kN$ is said to be a $k$-th syzygy of $N$. Clearly, unit conflations are not uniquely determined. We denote the class of all unit conflations of length $k$ ending at $N$ (resp. beginning with $N$) by $\mathcal U_k(N)$ (resp. $\mathcal U^k(N)$). Unit conflations, usually will be depicted by $\delta$. \end{s}
\begin{rem}\label{pp1} (1) Assume that $k\geq 1$ and there exists a morphism of conflations;
{\footnotesize \[\xymatrix{\alpha:N \ ~\ar[r] \ \ \ar[d]_f& \ \ X_{k-1}\ar[r] \ \ \ar[d]& \cdots \ar[r]& \ \ X_0\ar[r] \ \ \ar[d] & \ M\ar[d]_g\\ \beta:N' \ ~\ar[r] \ \ &\ \ Y_{k-1}\ar[r] \ \ &\cdots\ar[r]& \ \ Y_0\ar[r] \ \ & M'.}\]} It is evident from the definition of push-out and pull-back that $\operatorname{\boldsymbol{\alpha}}\longrightarrow f\operatorname{\boldsymbol{\alpha}}$ and $\operatorname{\boldsymbol{\beta}} g\longrightarrow\operatorname{\boldsymbol{\beta}}$ are morphisms of conflations with the right and the left fixed ends, respectively. Now using the universal properties of push-out and pull-back diagrams, one may find the morphism of conflations $f\operatorname{\boldsymbol{\alpha}}\longrightarrow\operatorname{\boldsymbol{\beta}} g$ with fixed ends. Consequently, applying \cite[Proposition 3.1]{mit} yields that $f\operatorname{\boldsymbol{\alpha}}=\operatorname{\boldsymbol{\beta}} g$. In particular, any morphism of conflations of length $k$ with the left (resp. right) fixed ends, is a pull-back (resp. push-out) diagram.\\ (2) Assume that $\operatorname{\boldsymbol{\gamma}}\in\operatorname{{\mathsf{Ext}}}^k_{\mathscr{C} }(C, A) $ and take morphisms $h:C'\rightarrow C$ and $l:A\rightarrow A'$ in $\mathscr{C} $. It can be easily seen that there is a morphism of conflations; {\footnotesize \[\xymatrix{\gamma h: A \ \ ~\ar[r] \ \ \ar[d]_l& \ \ X_{k-1}\ar[r] \ \ \ar[d]& \cdots \ar[r]& \ \ X_0\ar[r] \ \ \ar[d] & \ C'\ar[d]_h\\ \ l\gamma:A' \ \ ~\ar[r] \ \ \ &\ \ Y_{k-1}\ar[r] \ \ &\cdots\ar[r]& \ \ Y_0\ar[r] \ \ & C.}\]} So, as we have observed just above, $l(\operatorname{\boldsymbol{\gamma}} h)=(l\operatorname{\boldsymbol{\gamma}})h$; see also \cite[Page 171, (2)]{mit}. \end{rem}
\begin{s} We set $\operatorname{\mathsf{H}}:=\bigcup_{M,N\in\mathscr{C} }\operatorname{\mathsf{Hom}}_{\mathscr{C} }(M, N)$ and $\operatorname{{\mathsf{Ext}}}^n:=\bigcup_{M,N\in\mathscr{C} }\operatorname{{\mathsf{Ext}}}^n_{\mathscr{C} }(M, \mathsf{\Omega}^nN)$, where $\mathsf{\Omega}^nN$ runs over all the $n$-th syzygies of $N$. \\ Assume that $\operatorname{\boldsymbol{\gamma}}\in\operatorname{{\mathsf{Ext}}}^n_{\mathscr{C} }(M, \mathsf{\Omega}^n N)$, $a\in\operatorname{\mathsf{Hom}}_{\mathscr{C} }(M', M)$ and $b\in\operatorname{\mathsf{Hom}}_{\mathscr{C} }(\mathsf{\Omega}^nN, {\mathsf{\Omega}'}^nN)$. According to Remark \ref{pp1}, we have $b(\operatorname{\boldsymbol{\gamma}} a)=(b\operatorname{\boldsymbol{\gamma}})a$. So $\operatorname{{\mathsf{Ext}}}^n$ has an $\operatorname{\mathsf{H}}$-bimodule structure. \end{s}
\begin{rem}\label{zero}Assume that $f:M\rightarrow N$ is a morphism in $\mathscr{C} $. So, for any $X\in\mathscr{C} $ and $k\geq 0$, one has the induced morphism $\operatorname{{\mathsf{Ext}}}^k_{\mathscr{C} }(X, M)\rightarrow\operatorname{{\mathsf{Ext}}}^k_{\mathscr{C} }(X, N)$ mapping each $\operatorname{\boldsymbol{\gamma}}$ to $f\operatorname{\boldsymbol{\gamma}}$, the push-out of $\operatorname{\boldsymbol{\gamma}}$ along $f$. Similarly, $\operatorname{{\mathsf{Ext}}}^k_{\mathscr{C} }(N, X)\rightarrow\operatorname{{\mathsf{Ext}}}^k_{\mathscr{C} }(M, X)$ sends each object $\operatorname{\boldsymbol{\alpha}}$ to $\operatorname{\boldsymbol{\alpha}} f$, the pull-back along $f$. Indeed, these morphisms can be interpreted as multiplication by $f$ from the left and the right, respectively. From this point of view, we will denote these morphisms again by $\mathbf f$. One should note that, in the case $k=0$, since $\operatorname{{\mathsf{Ext}}}^0_{\mathscr{C} }(-, -)=\operatorname{\mathsf{Hom}}_{\mathscr{C} }(-, -)$, $f\operatorname{\boldsymbol{\gamma}}$ and $\operatorname{\boldsymbol{\alpha}} f$ are indeed composition morphisms. \end{rem}
In the sequel, we will see that any abelian category with non-zero $n$-projective objects, admits a non-trivial $n$-Frobenius subcategory. First we state a definition.\\ \begin{dfn} An acyclic complex of $n$-projective objects $\mathbf{P^{\bullet}}:\cdots\longrightarrow P^{i-1}\stackrel{d^{i-1}}\longrightarrow P^i\stackrel{d^i}\longrightarrow P^{i+1}\longrightarrow\cdots$ is said to be {\it a complete resolution of $n$-projective objects}, if $\operatorname{{\mathsf{Ext}}}^{n+1}_{\mathcal A}(\mathsf{im} d^i, Q)=0$, for any $i\in\mathbb{Z} $ and $Q\in n$-$\operatorname{\mathsf{proj}}\mathcal A$.\\ Assume that $\mathcal{I} $ is a resolving subcategory of $n$-$\operatorname{\mathsf{proj}}\mathcal A$, that is, in any conflation $P'\rightarrow P\rightarrow P''$, with $P''\in\mathcal{I} $, we have $P'\in\mathcal{I} $ if and only if $P\in\mathcal{I} $. Assume that $\mathscr{C} (\mathcal{I} )$ is the subcategory of $\mathcal A$ consisting of all objects $M$ which is a syzygy of a complete resolution of objects in $\mathcal{I} $, i.e., an acyclic complex $\mathbf{P^{\bullet}}:\cdots\longrightarrow P^{i-1}\stackrel{d^{i-1}}\longrightarrow P^i\stackrel{d^i}\longrightarrow P^{i+1}\longrightarrow\cdots,$ in $\mathcal{I} $ such that $\operatorname{{\mathsf{Ext}}}^{n+1}_{\mathcal A}(\mathsf{im} d^i, Q)=0$, for any $i\in\mathbb{Z} $ and $Q\in\mathcal{I} $.
If $\mathcal{I} =n$-$\operatorname{\mathsf{proj}}\mathcal A$, instead of $\mathscr{C} (n$-$\operatorname{\mathsf{proj}}\mathcal A)$, we write $\mathscr{C} (\mathcal A).$ It is evident that $\mathscr{C} (\mathcal{I} )$ is a full subcategory of $\mathcal A$ containing all objects in $\mathcal{I} $ and it is closed under finite direct sums. Moreover, as the next proposition indicates, $\mathscr{C} (\mathcal{I} )$ is an exact category. First, we need a couple of preliminary lemmas. In the rest of this section, we assume that the abelian category $\mathcal A$ has non-zero $n$-projective objects. \end{dfn}
\begin{lem}\label{000}{The following statements are satisfied: \begin{enumerate}\item Let $Q\rightarrow M\stackrel{f}\rightarrow N$ be a conflation in $\mathcal A$ such that $Q\in\mathcal{I} $. Then for any $X\in\mathscr{C} (\mathcal{I} )$ and $\operatorname{\boldsymbol{\gamma}}\in\operatorname{{\mathsf{Ext}}}^n_{\mathcal A}(X, N)$, there exists an object $\operatorname{\boldsymbol{\gamma}}'\in\operatorname{{\mathsf{Ext}}}^n_{\mathcal A}(X, M)$ such that $f\operatorname{\boldsymbol{\gamma}}'=\operatorname{\boldsymbol{\gamma}}$. \item Let $M\stackrel{f}\rightarrow N\rightarrow Q$ be a conflation in $\mathcal A$ with $Q\in n$-$\operatorname{\mathsf{proj}}\mathcal A$. Then for any $X\in\mathcal A$ and $\operatorname{\boldsymbol{\gamma}}\in\operatorname{{\mathsf{Ext}}}^n_{\mathcal A}(M, X)$, there exists an object $\operatorname{\boldsymbol{\gamma}}'\in\operatorname{{\mathsf{Ext}}}^n_{\mathcal A}(N,X)$ such that $\operatorname{\boldsymbol{\gamma}}'f=\operatorname{\boldsymbol{\gamma}}$. \end{enumerate}} \end{lem} \begin{proof}We only prove the statement (1), the other one is obtained dually. Since $Q\in\mathcal{I} $ and $X\in\mathscr{C} (\mathcal{I} )$, $\operatorname{{\mathsf{Ext}}}^{n+1}_{\mathcal A}(X, Q)=0$, and so, making use of \cite[Chapter VII, Theorem 5.1]{mit} forces $\operatorname{{\mathsf{Ext}}}^n_{\mathcal A}(X, M)\stackrel{\mathbf f}\rightarrow\operatorname{{\mathsf{Ext}}}^n_{\mathcal A}(X, N)$ to be an epimorphism. Hence, there exists $\operatorname{\boldsymbol{\gamma}}'\in\operatorname{{\mathsf{Ext}}}^ n_{\mathcal A}(X, M)$ such that $f\operatorname{\boldsymbol{\gamma}}'=\operatorname{\boldsymbol{\gamma}}$, as needed. \end{proof}
\begin{lem}\label{cok}The following assertions hold: \begin{enumerate}\item Let $Q\rightarrow M\stackrel{f}\rightarrow N$ be a conflation in $\mathcal A$ such that $Q\in\mathcal{I} $. If $N\in\mathscr{C} (\mathcal{I} ),$ then so does $M$.\item Let $M\stackrel{f}\rightarrow N\rightarrow Q$ be a conflation in $\mathcal A$ such that $Q\in\mathcal{I} $. If $M\in\mathscr{C} (\mathcal{I} )$, then so does $N$.\end{enumerate} \end{lem} \begin{proof}(1) Since $N\in\mathscr{C} (\mathcal{I} )$, by the definition, there exists a complete resolution of objects in $\mathcal{I} $; $\cdots\rightarrow Q^{-1}\stackrel{d^{-1}}\rightarrow Q^0\stackrel{d^0}\rightarrow Q^{1}\stackrel{d^{1}}\rightarrow Q^{2}\rightarrow\cdots,$ such that $N=\mathsf{im} d^0$. Take the conflation $\operatorname{\boldsymbol{\gamma}}:N\rightarrow Q^{1}\rightarrow\cdots\rightarrow Q^{n}\rightarrow\mathsf{\Omega}^{-n}N$ in $\mathcal A$. {Clearly, $\mathsf{\Omega}^{-n}N\in\mathscr{C} (\mathcal{I} )$. So} applying Lemma \ref{000}(1), gives us an object $\operatorname{\boldsymbol{\gamma}}'\in\operatorname{{\mathsf{Ext}}}^n_{\mathcal A}(\mathsf{\Omega}^{-n}N, M)$ such that $f\operatorname{\boldsymbol{\gamma}}'=\operatorname{\boldsymbol{\gamma}}$. Namely, we have the following push-out diagram; {\footnotesize\[\xymatrix{\operatorname{\boldsymbol{\gamma}}':M~ \ \ \ar[r]\ar[d]_{f} & \ \ T\ar[d]\ar[r] \ \ & \ \ Q^{2}\ar[r]\ \ \ar@{=}[d] &\cdots \ar[r]& \ \ Q^{n}\ar@{=}[d]\ar[r] \ \ & \ \ \mathsf{\Omega}^{-n}N\ar@{=}[d]\\ \operatorname{\boldsymbol{\gamma}}:N~\ar[r] \ \ & \ \ Q^{1}\ar[r]\ \ & \ \ Q^{2}\ar[r]\ \ & \cdots \ar[r] &\ \ Q^{n}\ar[r] \ \ & \ \ \mathsf{\Omega}^{-n}N.}\]} {Next take the following pull-back diagram; \[\xymatrix{L~\ar[r]\ar@{=}[d]& T'\ar[r]\ar[d]& M\ar[d]_{f}\\ L~\ar[r] & Q^0\ar[r]& N.}\]
Since $\mathcal{I} $ is closed under extensions, one may get that $T$ and $T'$ belong to $\mathcal{I} $. In particular, we obtain the complete resolution of {objects in $\mathcal{I} $;} $\cdots\rightarrow Q^{-1}\rightarrow T'\rightarrow T\rightarrow Q^{2}\rightarrow\cdots$ such that $M=\mathsf{im}(T'\rightarrow T)$, and then, $M\in\mathscr{C} (\mathcal{I} )$.}\\ (2) This is obtained by dualizing the argument given in the first assertion, so we skip it. Thus the proof is completed. \end{proof}
\begin{prop}\label{proj1}The category $\mathscr{C} (\mathcal{I} )$ is closed under extensions and kernels of epimorphisms. \end{prop} \begin{proof}Let us first prove that $\mathscr{C} (\mathcal{I} )$ is closed under extensions. To do this, assume that $M\stackrel{f}\rightarrow N\stackrel{g}\rightarrow K$ is a conflation in $\mathcal A$ such that $M, K\in\mathscr{C} (\mathcal{I} )$. We shall prove that $N\in\mathscr{C} (\mathcal{I} )$, as well. By the hypothesis, we may take a conflation $M\rightarrow Q^{1}\rightarrow\mathsf{\Omega}^{-1} M$ in $\mathcal A$ such that $Q^{1}\in\mathcal{I} $ and $\mathsf{\Omega}^{-1}M\in\mathscr{C} (\mathcal{I} )$. So considering the push-out diagram; \[\xymatrix{M~\ar[r]\ar[d]_{f}& Q^{1}\ar[r]\ar[d]& \mathsf{\Omega}^{-1}M\ar@{=}[d]\\ N \ar[r] & T\ar[r] & \mathsf{\Omega}^{-1}M,}\] we obtain the conflation $Q^{1}\rightarrow T\rightarrow K$ in $\mathcal A$. As $K\in\mathscr{C} (\mathcal{I} )$, Lemma \ref{cok}(1) implies that $T\in\mathscr{C} (\mathcal{I} )$. This enables us to have the following commutative diagram; \[\xymatrix{N~\ar[r]\ar@{=}[d]& T\ar[r]\ar[d]& \mathsf{\Omega}^{-1}M\ar[d]\\ N~\ar[r] & P^{1}\ar[r]\ar[d]& G^1\ar[d]\\ & \mathsf{\Omega}^{-1}T~\ar@{=}[r] & \mathsf{\Omega}^{-1}T,}\] in which $P^{1}\in\mathcal{I} $ and $\mathsf{\Omega}^{-1}T\in\mathscr{C} (\mathcal{I} )$. As $\operatorname{{\mathsf{Ext}}}^{n+1}_{\mathcal A}(\mathsf{\Omega}^{-1}M, Q)=0=\operatorname{{\mathsf{Ext}}}^{n+1}_{\mathcal A}(\mathsf{\Omega}^{-1}T, Q)$ for any object $Q\in\mathcal{I} $, we have that $\operatorname{{\mathsf{Ext}}}^{n+1}_{\mathcal A}(G^1, Q)=0$. Now consider the conflation $\mathsf{\Omega}^{-1}M\rightarrow G^1\rightarrow\mathsf{\Omega}^{-1}T$ in $\mathcal A$. Since $\mathsf{\Omega}^{-1}M, \mathsf{\Omega}^{-1}T\in\mathscr{C} (\mathcal{I} )$, applying the above argument, will give us conflations $G^1\rightarrow P^{2}\rightarrow G^2$ and $\mathsf{\Omega}^{-2}M\rightarrow G^2\rightarrow\mathsf{\Omega}^{-2}T$ in $\mathcal A$ with $\mathsf{\Omega}^{-2}M, \mathsf{\Omega}^{-2}T\in\mathscr{C} (\mathcal{I} )$, $P^{2}\in\mathcal{I} $ and $\operatorname{{\mathsf{Ext}}}^{n+1}_{\mathcal A}(G^2, Q)$ vanishes, for any object $Q\in\mathcal{I} $. In particular, repeating this manner, gives rise to the existence of an acyclic complex $0\rightarrow N\stackrel{\epsilon}\rightarrow P^{1}\stackrel{d^{1}}\rightarrow P^{2}\stackrel{d^{2}}\rightarrow\cdots,$ where each $P^i$ belongs to $\mathcal{I} $ and $\operatorname{{\mathsf{Ext}}}^{n+1}_{\mathcal A}(\mathsf{im} d^i, Q)=0$ for any object $Q\in\mathcal{I} $ and $i\geq 1$. {Moreover, a dual argument gives us an acyclic complex, $\cdots\rightarrow P^{-1}\stackrel{d^{-1}}\rightarrow P^0\stackrel{\epsilon'}\rightarrow N\rightarrow 0$, where $P^i\in\mathcal{I} $ and $\operatorname{{\mathsf{Ext}}}^{n+1}_{\mathcal A}(\mathsf{im} d^i, Q)=0$, for all $i\leq -1$. }Thus $N\in\mathscr{C} (\mathcal{I} )$, as required. Next we show that $\mathscr{C} (\mathcal{I} )$ is closed under kernels of epimorphisms. So assume that $M\stackrel{f}\rightarrow N\stackrel{g}\rightarrow K$ is a conflation in $\mathcal A$ such that $N, K\in\mathscr{C} (\mathcal{I} )$. We have to show that $M\in\mathscr{C} (\mathcal{I} )$, as well. Since $N\in\mathscr{C} (\mathcal{I} )$, we may obtain the following commutative diagram; \[\xymatrix{M~\ar[r]\ar[d]_{f}& Q^{1}\ar[r]\ar@{=}[d]& T\ar[d]\\ N \ar[r] & Q^{1}\ar[r] & \mathsf{\Omega}^{-1}N,}\] with $\mathsf{\Omega}^{-1}N\in\mathscr{C} (\mathcal{I} )$ and $Q^{1}\in\mathcal{I} $. So applying the snake lemma, gives us the conflation $K\rightarrow T\rightarrow \mathsf{\Omega}^{-1}N$ in $\mathcal A$. Since $\mathsf{\Omega}^{-1}N, K\in\mathscr{C} (\mathcal{I} )$, as we have already seen, $T\in\mathscr{C} (\mathcal{I} )$. Thus, by taking the right half of a complete resolution $0\rightarrow T\rightarrow P^1\stackrel{d^2}\rightarrow P^2\rightarrow\cdots$ of $T$, we will get an acyclic complex $0\longrightarrow M\longrightarrow Q^{1}\stackrel{d^{1}}\longrightarrow P^{1}\stackrel{d^{2}}\longrightarrow\cdots$, whenever all objects, except $M$, lie in $\mathcal{I} $ and $\operatorname{{\mathsf{Ext}}}^{n+1}_{\mathcal A}(\mathsf{im} d^i, \mathcal{I} )=0$, for all $i$.
{Next take the following pull-back diagram; \[\xymatrix{& \mathsf{\Omega} K\ar@{=}[r]\ar[d]& \mathsf{\Omega} K\ar[d]\\ M~\ar[r]\ar@{=}[d] & L\ar[r]\ar[d]& P\ar[d]\\ M\ar[r] & N~\ar[r] & K,}\] where $P\in\mathcal{I} $. Since $N, \mathsf{\Omega} K\in\mathscr{C} (\mathcal{I} )$, by the first assertion, the same is true for $L$. So, considering the pull-back diagram; \[\xymatrix{\mathsf{\Omega} L~\ar[r]\ar@{=}[d]& G\ar[r]\ar[d]& M\ar[d]\\ \mathsf{\Omega} L~\ar[r] & P'\ar[r]\ar[d]& L\ar[d]\\ & P~\ar@{=}[r] & P,}\] and using the fact that $\mathcal{I} $ is resolving, we infer that $G\in\mathcal{I} $. This, in conjunction with $\mathsf{\Omega} L$ being in $\mathscr{C} (\mathcal{I} )$, would guarantee the existence of a left resolution of objects in $\mathcal{I} $ for $M$; $\cdots\rightarrow P^{-1}\stackrel{d^{-1}}\rightarrow P^0\stackrel{d^0}\rightarrow G\rightarrow M\rightarrow 0$, such that $\operatorname{{\mathsf{Ext}}}^{n+1}_{\mathcal A}(\mathsf{im} d^i, \mathcal{I} )=0$ for all $i\leq 0$. Consequently, $M\in\mathscr{C} (\mathcal{I} )$,} as required. \end{proof}
\begin{rem}\label{exact}Assume that $A\rightarrow B\rightarrow C$ is a conflation in $\mathscr{C} $. Then the same argument given in the proof of \cite[Chapter VII, Theorem 5.1]{mit}, yields that for any object $X$ in $\mathscr{C} $ and any $n\geq 1$, there exists an exact sequence; $$\operatorname{{\mathsf{Ext}}}^{n-1}_{\mathscr{C} }(X, C)\rightarrow\operatorname{{\mathsf{Ext}}}^n_{\mathscr{C} }(X, A)\rightarrow\operatorname{{\mathsf{Ext}}}^n_{\mathscr{C} }(X, B)\rightarrow\operatorname{{\mathsf{Ext}}}^n_{\mathscr{C} }(X, C)\rightarrow\operatorname{{\mathsf{Ext}}}^{n+1}_{\mathscr{C} }(X, A).$$ Also, a dual argument gives us the exact sequence; $$\operatorname{{\mathsf{Ext}}}^{n-1}_{\mathscr{C} }(A, X)\rightarrow\operatorname{{\mathsf{Ext}}}^n_{\mathscr{C} }(C, X)\rightarrow\operatorname{{\mathsf{Ext}}}^n_{\mathscr{C} }(B, X)\rightarrow\operatorname{{\mathsf{Ext}}}^n_{\mathscr{C} }(A, X)\rightarrow\operatorname{{\mathsf{Ext}}}^{n+1}_{\mathscr{C} }(C, X).$$ \end{rem}
\begin{theorem}\label{subcat}$\mathscr{C} (\mathcal{I} )$ is an $n$-Frobenius subcategory of $\mathcal A$. \end{theorem} \begin{proof}First one should note that by Proposition \ref{proj1}, $\mathscr{C} (\mathcal{I} )$ is an exact category. In view of the definition of $\mathscr{C} (\mathcal{I} )$, we only need to show that any $n$-injective object of $\mathscr{C} (\mathcal{I} )$ is also $n$-projective over $\mathscr{C} (\mathcal{I} )$ and vice versa. Take an object $N\in n$-$\operatorname{\mathsf{inj}}\mathscr{C} (\mathcal{I} )$ and consider unit conflations $\delta_N:N\rightarrow P^{1}\rightarrow\cdots\rightarrow P^{n}\longrightarrow \mathsf{\Omega}^{-n}N$ and $\mathsf{\Omega}^{-n}N\stackrel{h}\rightarrow P^{n+1}\rightarrow\mathsf{\Omega}^{-n-1} N$, where $P^i\in\mathcal{I} $, for any $i$. Since $\mathsf{\Omega}^{-n-1}N\in\mathscr{C} (\mathcal{I} )$, $\operatorname{{\mathsf{Ext}}}^{n+1}_{\mathscr{C} (\mathcal{I} )}(\mathsf{\Omega}^{-n-1}N,N)=0$, implying that, there exists an object $\operatorname{\boldsymbol{\gamma}}\in\operatorname{{\mathsf{Ext}}}^n_{\mathscr{C} (\mathcal{I} )}( P^{n+1},N)$ such that $\operatorname{\boldsymbol{\gamma}} h=\delta_N$. Namely, we have the following pull-back diagram; {\footnotesize\[\xymatrix{\delta_N \ :N~ \ \ \ \ar[r] \ \ \ar@{=}[d] & \ \ P^{1}\ar@{=}[d]\ar[r]\ \ & \ \ \cdots \ar[r]\ \ &P^{n-1}\ar@{=}[d] \ar[r]& \ \ P^{n}\ar[d]\ar[r] \ \ & \ \ \mathsf{\Omega}^{-n}N\ar[d]^{h}\\ \operatorname{\boldsymbol{\gamma}} : \ \ \ N~ \ \ \ar[r] \ \ \ & \ \ P^{1}\ar[r]\ \ & \ \ \cdots\ar[r] \ \ & P^{n-1} \ar[r] & \ \ H\ar[r] &P^{n+1}\ \ .}\]} As $H\in\mathscr{C} (\mathcal{I} )$, there is an inflation $H\rightarrow Q$ with $Q\in\mathcal{I} $. So by taking the conflation $L\rightarrow H\rightarrow P^{n+1}$, one gets the following commutative diagram; \[\xymatrix{L~\ar[r]\ar@{=}[d]& H\ar[r]\ar[d]&P^{n+1} \ar[d]^{g}\\ L~\ar[r] & Q\ar[r]& \mathsf{\Omega}^{-1}L.}\] In particular, we have the following pull-back diagram of unit conflations; \begin{equation}\label{xy} {\footnotesize \xymatrix{\delta_N : N~ \ \ \ \ar[r] \ \ \ar@{=}[d] & \ \ P^{1}\ar@{=}[d] \ \ \ar[r] \ \ & \ \ \cdots\ar[r] \ \ &P^{n-1} \ar[r] \ar@{=}[d]& \ \ P^{n}\ar[d]\ar[r] \ \ & \mathsf{\Omega}^{-n}N\ar[d]^{gh}\\ \beta : \ \ \ N~ \ \ \ar[r]\ \ \ & \ \ P^{1}\ar[r]\ \ & \cdots\ar[r]\ \ & P^{n-1} \ar[r] & \ \ Q\ar[r] \ \ &{\mathsf{\Omega}}^{-1}L}.} \end{equation} One should note that, according to our notation, $\mathsf{\Omega}^{-1}L={\mathsf{\Omega}'}^{-n}N$. Hence, for any object $X\in\mathscr{C} (\mathcal{I} )$, we will have the following square;{\footnotesize \[\xymatrix{\operatorname{{\mathsf{Ext}}}_{\mathscr{C} (\mathcal{I} )}^{n+1}(N, X)~\ar[r]^{\cong}\ar@{=}[d] & \operatorname{{\mathsf{Ext}}}_{\mathscr{C} (\mathcal{I} )}^{2n+1}({\mathsf{\Omega}'}^{-n}N, X)\ar[d]^{\mathbf g\mathbf h}\\ \operatorname{{\mathsf{Ext}}}_{\mathscr{C} (\mathcal{I} )}^{n+1}(N, X)~\ar[r]^{\cong} & \operatorname{{\mathsf{Ext}}}_{\mathscr{C} (\mathcal{I} )}^{2n+1}({\mathsf{\Omega}}^{-n}N, X).}\]}As $gh$ factors through $P^{n+1}$, the right column is zero, and then, $\operatorname{{\mathsf{Ext}}}_{\mathscr{C} (\mathcal{I} )}^{n+1}(N, X)=0$, meaning that $N\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} (\mathcal{I} )$. Since the converse is obtained dually, we skip it. So the proof is finished. \end{proof}
Assume that $R$ is a commutative noetherian ring. A finitely generated $R$-module $M$ is said to be of $G$-dimension zero, if it is a syzygy of a complete resolution of projectives. The $G$-dimension of a finitely generated $R$-module $N$, $G$-$\dim_RN$, is the length of a shortest resolution of $N$ by $G$-dimension zero modules. If there is no such a resolution of finite length, then we write $G$-$\dim_RN=\infty$. This invariant has been defined by Auslander and Bridger \cite{ab} and provides a refinement of the projective dimension of a module. The category of all modules of $G$-dimension zero (resp. of finite $G$-dimension), is denoted by $\mathcal{G}$ (resp. $\mathcal{G}^{<\infty}$).
\begin{example}\label{ex1} Assume that $(R, \mathfrak{m})$ is a $d$-dimensional commutative noetherian local ring. {In view of the Auslander-Buchsbaum formula, any module of finite projective dimension, is $d$-projective. Set $\mathcal{I} :=d$-$\operatorname{\mathsf{proj}}\operatorname{\mathsf{mod}} R$. Assume that $M\in\mathcal{G}^{<\infty}$ is arbitrary. Then by \cite[Proposition 1.2]{et} (see also \cite[Lemma 2.17]{cfh}), there is a short exact sequence $0\rightarrow M\rightarrow P\rightarrow X\rightarrow 0$ in which $P\in\mathcal{I} $ and $X\in\mathcal{G}$. All of these facts, would imply that $M\in\mathscr{C} (\mathcal{I} )$. Moreover, it is standard to see that any object in $\mathscr{C} (\mathcal{I} )$ lies in $\mathcal{G}^{<\infty}$. Hence by Theorem \ref{subcat}, $\mathcal{G}^{<\infty}$ is a $d$-Frobenius subcategory of $\operatorname{\mathsf{mod}} R$. In particular, if $R$ is Gorenstein, (i.e. $\id_RR$ is finite), then $\operatorname{\mathsf{mod}} R$ is indeed a $d$-Frobenius category. We also note that the category $\mathcal{G}$, is a $0$-Frobenius subcategory of $\operatorname{\mathsf{mod}} R$.} \end{example}
In the sequel, we explore more examples of categories which admit $n$-Frobenius subcategories, for some non-negative integer $n$. \begin{example}\label{ex123} (1) According to \cite[Lemma 1.12]{or}, {the category of locally free sheaves of finite rank, $\mathcal L$, is a subcategory of $n$-$\operatorname{\mathsf{proj}}\operatorname{\mathsf{coh}}(\mathsf {X})$, for some integer $n\geq 0$. So the category $n$-$\operatorname{\mathsf{proj}}\operatorname{\mathsf{coh}}(\mathsf {X})$ is non-trivial.} Thus Theorem \ref{subcat} yields that $\operatorname{\mathsf{coh}}(\mathsf {X})$ has a non-trivial $n$-Frobenius subcategory, that we denote it by $\mathscr{C} (\mathsf {X})$.\\ (2) Assume that $\operatorname{\mathsf{Qcoh}}(\mathsf {X})$ is the category of quasi-coherent sheaves over $\mathsf {X}$ and $\mathsf {Flat}\mathsf {X}$ is its subcategory of flats. Following the argument given in the proof of \cite[Lemma 1.12]{or}, gives rise to the existence of an integer $n\geq 0$ such that for any object $\mathcal{F}\in\mathsf {Flat}\mathsf {X}$, $\operatorname{{\mathsf{Ext}}}^{n+1}(\mathcal{F}, \mathcal{Q} )=0$, for all quasi-coherent sheaves $\mathcal{Q} $. Namely, $\mathsf {Flat}\mathsf {X}$ is a subcategory of $n$-$\operatorname{\mathsf{proj}}\operatorname{\mathsf{Qcoh}}(\mathsf {X})$. Thus $\operatorname{\mathsf{Qcoh}}(\mathsf {X})$ has enough $n$-projective objects, as it has enough flat sheaves; see \cite[Corollary 3.21]{mu}. Hence, Theorem \ref{subcat} implies that $\operatorname{\mathsf{Qcoh}}(\mathsf {X})$ admits a non-trivial $n$-Frobenius subcategory $\mathscr{C} (\mathsf {X})$. In addition, as $\mathsf {Flat}\mathsf {X}$ is a resolving subcategory of $n$-$\operatorname{\mathsf{proj}}\operatorname{\mathsf{Qcoh}}(\mathsf {X})$, another use of Theorem \ref{subcat} yields that $\mathscr{C} (\mathsf {Flat}\mathsf {X})$ is also an $n$-Frobenius subcategory of $\operatorname{\mathsf{Qcoh}}(\mathsf {X})$.\\
\end{example}
Assume that $\mathcal{I} $ is a resolving subcategory of $n$-$\operatorname{\mathsf{proj}}\mathcal A$. In view of the definition of $\mathscr{C} (\mathcal{I} )$, we have $\mathcal{I} \subseteq n$-$\operatorname{\mathsf{proj}}\mathscr{C} (\mathcal{I} )$. The next couple of results, provide examples in which the equality holds.
\begin{prop}\label{cf}Keeping the notation above, the equality $n$-$\operatorname{\mathsf{proj}}\mathscr{C} (\mathsf {Flat}\mathsf {X})=\mathsf {Flat}\mathsf {X}$ holds. \end{prop} \begin{proof}Since $\mathscr{C} (\mathsf {Flat}\mathsf {X})$ is an $n$-Frobenius subcategory of $\operatorname{\mathsf{Qcoh}}(\mathsf {X})$, it suffices to prove that any object of $n$-$\operatorname{\mathsf{inj}}\mathscr{C} (\mathsf {Flat}\mathsf {X})$ is flat. Take an arbitrary object $N\in n$-$\operatorname{\mathsf{inj}}\mathscr{C} (\mathsf {Flat}\mathsf {X})$. Consider the unit conflations; $\delta:N\rightarrow P^{1}\rightarrow\cdots\rightarrow P^{n}\rightarrow\mathsf{\Omega}^{-n}N$ and $\mathsf{\Omega}^{-n}N\stackrel{h}\rightarrow P^{n+1}\rightarrow\mathsf{\Omega}^{-n-1}N$, where $P^i\in\mathsf {Flat}\mathsf {X}$, for any $i$. So, {according to the proof of Theorem \ref{subcat}, we get the diagram \ref{xy}.} In particular, for any object $X\in\operatorname{\mathsf{Qcoh}}(\mathsf {X})$, we will have the following commutative square; {\footnotesize\[\xymatrix{\operatorname{\mathsf{Tor}}_{n+1}^{\mathsf {Ch}}(X, \mathsf{\Omega}^{-n}N)~\ar[r]^{\cong}\ar[d]_{\operatorname{\mathsf{Tor}}_1(1_X, gh)} & \operatorname{\mathsf{Tor}}_{1}^{\mathsf {Ch}}(X, N)\ar@{=}[d]\\ \operatorname{\mathsf{Tor}}_{n+1}^{\mathsf {Ch}}(X, {\mathsf{\Omega}'}^{-n}N)~\ar[r]^{\cong} & \operatorname{\mathsf{Tor}}_{1}^{\mathsf {Ch}}(X, N).}\]} As $gh$ factors through the flat sheaf $P^{n+1}$, the left column is zero, implying that $\operatorname{\mathsf{Tor}}_1^{\mathsf {Ch}}(X, N)=0$, and then $N$ is flat, as needed. \end{proof}
\begin{prop}\label{locally}For an integer $n\geq 0$, $\mathscr{C} (\mathcal L)$ is an $n$-Frobenius subcategory of $\operatorname{\mathsf{coh}}(\mathsf {X})$ with $n$-$\operatorname{\mathsf{proj}}\mathscr{C} (\mathcal L)=\mathcal L$. \end{prop} \begin{proof} As we have mentioned in Example \ref{ex123}(1), $\mathcal L$ is a subcategory of $n$-$\operatorname{\mathsf{proj}}\operatorname{\mathsf{coh}}(\mathsf {X})$, which is evidently resolving. So by Theorem \ref{subcat}, $\mathscr{C} (\mathcal L)$, is an $n$-Frobenius subcategory of $\operatorname{\mathsf{coh}}(\mathsf {X})$. Moreover, Proposition \ref{cf} and the fact that every coherent flat sheaf is locally free, leads us to deduce that $n$-$\operatorname{\mathsf{proj}}\mathscr{C} (\mathcal L)=\mathcal L$. So the proof is completed. \end{proof} { \begin{rem}\label{chf}According to Example \ref{ex123}(2), there exists an integer $t\geq 0$ such that for any $F\in\mathsf {Flat}\mathsf {X}$, we have $F\in t$-$\operatorname{\mathsf{proj}}\mathsf {Flat}\mathsf {X}$. On the other hand, it is known that there is an integer $k\geq 0$ such that for any object $F\in\mathsf {Flat}\mathsf {X}$, one has an exact sequence $0\rightarrow F\rightarrow C^0\rightarrow\cdots\rightarrow C^k\rightarrow 0$, where each $C^i$ is cotorsion flat, implying that $F\in k$-$\operatorname{\mathsf{inj}}\mathsf {Flat}\mathsf {X}$, because each $C^i$ lies in $\operatorname{\mathsf{inj}}\mathsf {Flat}\mathsf {X}$. So, using the same method appeared in \cite[page 28]{ha1}, we may deduce that any complex of the form $\cdots\rightarrow 0\rightarrow F\stackrel{1}\rightarrow F\rightarrow 0\rightarrow\cdots$ with $F$ flat, is an $n$-projective and an $n$-injective object of $\mathsf {Ch}(\mathsf {Flat}\mathsf {X})$, for some integer $n$. Here $\mathsf {Ch}(\mathsf {Flat}\mathsf {X})$ stands for the category of complexes of flats. We let $\mathcal{J} $ denote the subcategory consisting of all contractible complexes of flats. {One should note that any object of $\mathcal{J} $ is a direct sum of complexes of the form $\cdots\rightarrow 0\rightarrow F\stackrel{1}\rightarrow F\rightarrow 0\rightarrow\cdots$ with $F$ flat, and so, it belongs to $n$-$\operatorname{\mathsf{proj}}\mathsf {Ch}(\mathsf {Flat}\mathsf {X})$ as well as to $n$-$\operatorname{\mathsf{inj}}\mathsf {Ch}(\mathsf {Flat}\mathsf {X})$.} Assume that $F^{\bullet}:\cdots\rightarrow F^{-1}\rightarrow F^0\rightarrow F^{1}\rightarrow\cdots$ is an arbitrary object of $\mathsf {Ch}(\mathsf {Flat}\mathsf {X})$. So, according to the short exact sequence of complexes, $0\rightarrow F^{\bullet}\rightarrow\operatorname{\mathsf{cone}}(1_{F^{\bullet}})\rightarrow F^{\bullet}[1]\rightarrow 0$, we infer that $F^{\bullet}$ is embeded in (and also a homomorphic image of) an object in $\mathcal{J} $. It is evident that $\mathcal{J} $ is a subcategory of $\mathsf {Ch}_{\mathsf {P}}(\mathsf {Flat}\mathsf {X})$, the category of flat complexes. The result below indicates that $\mathsf {Ch}(\mathsf {Flat}\mathsf {X})$ is an $n$-Frobenius category with $n$-$\operatorname{\mathsf{proj}}\mathsf {Ch}(\mathsf {Flat}\mathsf {X})=\mathsf {Ch}_{\mathsf {P}}(\mathsf {Flat}\mathsf {X})$.
\end{rem} \begin{theorem}\label{fp}For a non-negative integer $n$, $\mathsf {Ch}(\mathsf {Flat}\mathsf {X})$ is an $n$-Frobenius category, with $n$-$\operatorname{\mathsf{proj}}\mathsf {Ch}(\mathsf {Flat}\mathsf {X})=\mathsf {Ch}_{\mathsf {P}}(\mathsf {Flat}\mathsf {X})$. \end{theorem} \begin{proof}{According to Remark \ref{chf}, $\mathsf {Ch}(\mathsf {Flat}\mathsf {X})$ has enough $n$-projective and $n$-injective objects. So it remains to examine the validity of the equalities; $n$-$\operatorname{\mathsf{proj}}\mathsf {Ch}(\mathsf {Flat}\mathsf {X})=\mathsf {Ch}_{\mathsf {P}}(\mathsf {Flat}\mathsf {X})=n$-$\operatorname{\mathsf{inj}}\mathsf {Ch}(\mathsf {Flat}\mathsf {X})$. In this direction, first we show that $n$-$\operatorname{\mathsf{proj}}\mathsf {Ch}(\mathsf {Flat}\mathsf {X})=n$-$\operatorname{\mathsf{inj}}\mathsf {Ch}(\mathsf {Flat}\mathsf {X})$. Take an arbitrary object $N^\bullet\in n$-$\operatorname{\mathsf{inj}}\mathsf {Ch}(\mathsf {Flat}\mathsf {X})$. By using Remark \ref{chf}, we may have conflations; $N^{\bullet}\rightarrow {P^{\bullet}}^1\rightarrow\cdots\rightarrow {P^{\bullet}}^n\rightarrow\mathsf{\Omega}^{-n}N^{\bullet}$ and $\mathsf{\Omega}^{-n}N^{\bullet}\stackrel{h^{\bullet}}\rightarrow {P^{\bullet}}^{n+1}\rightarrow\mathsf{\Omega}^{-n-1}N^{\bullet}$, where $P_i^{\bullet}\in\mathcal{J} $, for any $i$. Since $\mathsf{\Omega}^{-n-1}N^{\bullet}\in\mathsf {Ch}(\mathsf {Flat}\mathsf {X}),$ we have $\operatorname{{\mathsf{Ext}}}^{n+1}_{\mathsf {Ch}}(\mathsf{\Omega}^{-n-1}N^{\bullet}, N^{\bullet})=0$. So, the argument given in the proof of Theorem \ref{subcat}, gives us the diagram similar to \ref{xy}. Take an arbitrary object $X^{\bullet}\in\mathsf {Ch}(\mathsf {Flat}\mathsf {X})$. As for any $i>n$, $\operatorname{{\mathsf{Ext}}}^i_{\mathsf {Ch}}(P_j^{\bullet}, X^{\bullet})=0$, applying the functor $\operatorname{{\mathsf{Ext}}}_{\mathsf {Ch}}(-, X^{\bullet})$, gives rise to the following commutative square; {\footnotesize \[\xymatrix{\operatorname{{\mathsf{Ext}}}_{\mathsf {Ch}}^{i}(N^{\bullet}, X^{\bullet})~\ar[r]^{\cong}\ar@{=}[d] & \operatorname{{\mathsf{Ext}}}_{\mathsf {Ch}}^{n+i}({\mathsf{\Omega}'}^{-n}N^{\bullet}, X^{\bullet})\ar[d]^{\mathbf g^{\bullet}\mathbf h^{\bullet}}\\ \operatorname{{\mathsf{Ext}}}_{\mathsf {Ch}}^{i}(N^{\bullet}, X^{\bullet})~\ar[r]^{\cong} & \operatorname{{\mathsf{Ext}}}_{\mathsf {Ch}}^{n+i}({\mathsf{\Omega}}^{-n}N^{\bullet}, X^{\bullet}).}\]}Since $g^{\bullet}h^{\bullet}$ factors through an object of $\mathcal{J} $, the right column will be zero, and then, $\operatorname{{\mathsf{Ext}}}_{\mathsf {Ch}}^{i}(N^{\bullet}, X^{\bullet})=0$ for any $i>n$, meaning that $N^{\bullet}\in n$-$\operatorname{\mathsf{proj}}\mathsf {Ch}(\mathsf {Flat}\mathsf {X})$. The dual method indicates that if $N^{\bullet}\in n$-$\operatorname{\mathsf{proj}}\mathsf {Ch}(\mathsf {Flat}\mathsf {X})$, it will belong to $n$-$\operatorname{\mathsf{inj}}\mathsf {Ch}(\mathsf {Flat}\mathsf {X})$. Next assume that $N^\bullet\in n$-$\operatorname{\mathsf{inj}}\mathsf {Ch}(\mathsf {Flat}\mathsf {X})$ and consider the latter conflations. Take an arbitrary object $X\in\operatorname{\mathsf{Qcoh}}(\mathsf {X})$. As ${P^{\bullet}}^j$, for any $j$, is contractible, $\operatorname{\mathsf{H}}_i({P^{\bullet}}^j\otimes_{\mathcal{O}_{\mathsf {X}}}X)=0$ for all $i\in \mathbb{Z} $. Hence, one may obtain the following commutative square; \[\xymatrix{\operatorname{\mathsf{H}}_{n+i}(\mathsf{\Omega}^{-n}N^\bullet\otimes_{\mathcal{O}_{\mathsf {X}}} X)~\ar[r]^{\cong}\ar[d] & \operatorname{\mathsf{H}}_{i}(N^\bullet\otimes_{\mathcal{O}_{\mathsf {X}}} X)\ar@{=}[d]\\ \operatorname{\mathsf{H}}_{n+i}({\mathsf{\Omega}'}^{-n}N^\bullet\otimes_{\mathcal{O}_{\mathsf {X}}} X)~\ar[r]^{\cong} & \operatorname{\mathsf{H}}_{i}(N^\bullet\otimes_{\mathcal{O}_{\mathsf {X}}} X).}\] Since $g^{\bullet}h^{\bullet}$ factors through ${P^{\bullet}}^{n+1}$, the left column will be zero, implying that $\operatorname{\mathsf{H}}_i(N^\bullet\otimes_{\mathcal{O}_{\mathsf {X}}}X)=0$, for all $i\in\mathbb{Z} $. Since $X$ was arbitrary, we conclude that $N^\bullet\in\mathsf {Ch}_{\mathsf {P}}(\mathsf {Flat}\mathsf {X})$. Conversely, assume that $N^\bullet\in\mathsf {Ch}_{\mathsf {P}}(\mathsf {Flat}\mathsf {X})$. By \cite[Proposition 2.6]{hs}, there is a short exact sequence, $0\longrightarrow N^\bullet\longrightarrow {C^\bullet}^0\longrightarrow {P^\bullet}^1\longrightarrow 0$ in $\mathsf {Ch}(\mathsf {Flat}\mathsf {X})$ with ${P^\bullet}^1\in\mathsf {Ch}_{\mathsf {P}}(\mathsf {Flat}\mathsf {X})$ and
${C^\bullet}^0$ is dg-cotorsion, that is, ${C^\bullet}^0\in\mathsf {Ch}_{\mathsf {P}}(\mathsf {Flat}\mathsf {X})^{\bot}$.
Thus ${C^\bullet}^0\in\mathsf {Ch}_{\mathsf {P}}(\mathsf {Flat}\mathsf {X})$, as well. Repeating this manner, one may obtain an exact sequence, $0\longrightarrow N^\bullet\longrightarrow {C^\bullet}^0\longrightarrow {C^\bullet}^1\longrightarrow\cdots\longrightarrow {C^\bullet}^{k-1}\longrightarrow {C^\bullet}^k\longrightarrow 0$ in $\mathsf {Ch}(\mathsf {Flat}\mathsf {X})$, with $k=\dim\mathsf {X}$. Since ${C^\bullet}^i$, for any $i$, is a pure acyclic complex of cotorsion flats, it is contractible by \cite[Corollary 3.1.2]{hs}, and in particular, it will belong to $n$-$\operatorname{\mathsf{proj}}\mathsf {Ch}(\mathsf {Flat}\mathsf {X})$. Hence, it is easily seen that $N^{\bullet}\in n$-$\operatorname{\mathsf{proj}}\mathsf {Ch}(\mathsf {Flat}\mathsf {X})$. So the proof is finished.}
\end{proof}
\begin{rem}\label{remfin}As we have mentioned in Example \ref{ex123}(1), $\mathcal L$ is a subcategory of $n$-$\operatorname{\mathsf{proj}}\operatorname{\mathsf{coh}}(\mathsf {X})$, for some integer $n$. On the other hand, since any locally free sheaf is flat and, as observed in Remark \ref{chf}, each flat sheaf lies in $n$-$\operatorname{\mathsf{inj}}\mathcal L$, we conclude that $\mathcal L$ will be also a subcategory of $n$-$\operatorname{\mathsf{inj}}\mathcal L$. So, similar to Remark \ref{chf}, we infer that any complex of the form $\cdots\rightarrow 0\rightarrow L\stackrel{1}\rightarrow L\rightarrow 0\rightarrow\cdots$, with $L$ locally free, is an $n$-projective and $n$-injective over $\mathsf {Ch}(\mathcal L)$, the category of complexes of locally free sheaves of finite rank. Assume that $\mathcal{J} $ is the subcategory consisting of all contractible complexes of locally free sheaves of finite rank. Again, similar to Remark \ref{chf}, one may observe that any object of $\mathsf {Ch}(\mathcal L)$ can be embedded in an object of $\mathcal{J} $, as well as, it is a homomorphic image of an objects in $\mathcal{J} $. The same argument given in the proof of Theorem \ref{fp}, clarifies that the subcategory $\mathsf {Ch}_{\mathsf {P}}(\mathcal L)$ consisting of all acyclic complexes of locally free sheaves with locally free kernels, forms $n$-projective objects of $\mathsf {Ch}(\mathcal L)$. Precisely, we have the next interesting result. \end{rem} \begin{theorem}$\mathsf {Ch}(\mathcal L)$ is an $n$-Frobenius category, for some integer $n$. Moreover, $n$-$\operatorname{\mathsf{proj}}\mathsf {Ch}(\mathcal L)=\mathsf {Ch}_{\mathsf {P}}(\mathcal L)$. \end{theorem}
\section{Quasi-invertible morphisms} Assume that $\mathscr{C} $ is an $n$-Frobenius category. In this section, we will show that a morphism $f$ acts as invertible on $\operatorname{{\mathsf{Ext}}}_{\mathscr{C} }^{n+1}$ from the left and from the right, simultaneously. A morphism satisfying this condition will be called a quasi-invertible morphism in $\mathscr{C} $.
In the remainder of this paper, unless otherwise specified, by a conflation of length $t$, we mean a conflation of length $t$ in $\mathscr{C} $. Also, if there is no ambiguity, we drop the ``of length $t$''. Furthermore, instead of $\operatorname{{\mathsf{Ext}}}^i_{\mathscr{C} }(-,-)$, we write $\operatorname{{\mathsf{Ext}}}^i(-,-)$.
\begin{lem}\label{101}(1) Let $N\stackrel{f}\rightarrow X$ and $N\stackrel{g}\rightarrow X'$ be two morphisms in $\mathscr{C} $ such that $f$ or $g$ is an inflation. Then $N\stackrel{[f~~g]^t}\longrightarrow X\oplus X'$ is also an inflation.\\ (2) Let $X\stackrel{f}\rightarrow N$ and $X'\stackrel{g}\rightarrow N$ be two morphisms in $\mathscr{C} $ such that $f$ or $g$ is a deflation. Then $X\oplus X'\stackrel{[f~~g]}\longrightarrow N$ is a deflation. \end{lem} \begin{proof}Let us prove the first assertion. The second one is obtained dually. Without loss of generality, we may assume that $f$ is an inflation. Consider the following commutative diagram; {\footnotesize\[\xymatrix{ & X'\ar@{=}[r]\ar[d]& X'\ar[d]\\ N~\ar[r]^{{{\tiny {\left[\begin{array}{ll} f \\ g \end{array} \right]}}}}\ar@{=}[d] & X\oplus X' \ar[r]\ar[d]_{[1~0]}&L'\ar[d]_{h}\\ N\ar[r]^{f}&X\ar[r]& L.}\]}Since the bottom row is a conflation and $\mathscr{C} $ is closed under extensions, we infer that $L'\in\mathscr{C} $, and so, $h$ is a morphism in $\mathscr{C} $. Now, as $\mathscr{C} $ is closed under pull-back, the middle row will be a conflation, giving the desired result. \end{proof}
\begin{dfn}Assume that $f:M\rightarrow N$ is a morphism in $\mathscr{C} $ and $i\geq 0$ is an integer. We say that $f$ acts as invertible on $\operatorname{{\mathsf{Ext}}}^i$ from the left (resp. right), if for any $X\in\mathscr{C} $, $\operatorname{{\mathsf{Ext}}}^i(X, M)\stackrel{\mathbf f}\rightarrow\operatorname{{\mathsf{Ext}}}^i(X, N)$ (resp. $\operatorname{{\mathsf{Ext}}}^i(N, X)\stackrel{\mathbf f}\rightarrow\operatorname{{\mathsf{Ext}}}^i(M, X)$) is an isomorphism. \end{dfn}
{ \begin{lem}\label{conf} Let $M\stackrel{f}\rightarrow N$ be a morphism in $\mathscr{C} $ acting as invertible on $\operatorname{{\mathsf{Ext}}}^{n+1}$ from the left or from the right. Assume that $Q\stackrel{\pi}\rightarrow N$ is a deflation and $M\stackrel{l}\rightarrow P$ is an inflation such that $P, Q\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $. Then \begin{enumerate}\item $M\oplus Q\stackrel{[f~~\pi]}\rightarrow N$ is a deflation such that its kernel is $n$-projective.\item $M\stackrel{[f~~l]^{t}}\rightarrow N\oplus P$ is an inflation such that its cokernel is $n$-projective.\end{enumerate} \end{lem} \begin{proof}We only deal with the case $f$ acts as invertible on $\operatorname{{\mathsf{Ext}}}^{n+1}$ from the left. The other case can be treated by the similar way. Since $M\stackrel{l}\rightarrow P$ is an inflation, by Lemma \ref{101}(1), we have the conflation $M\stackrel{h}\rightarrow N\oplus P\rightarrow L$ in $\mathscr{C} $, where $h={{{\tiny {\left[\begin{array}{ll} f \\ l \end{array} \right]}}}}$. {We shall prove that $L\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $. Take an arbitrary object $X\in\mathscr{C} $. By applying the functor $\operatorname{{\mathsf{Ext}}}(X, -)$ to this conflation and using Remark \ref{exact}, } we obtain the long exact sequence;{\footnotesize $$\cdots\rightarrow\operatorname{{\mathsf{Ext}}}^{i}(X, M)\stackrel{\mathbf h}\rightarrow\operatorname{{\mathsf{Ext}}}^{i}(X, N\oplus P)\rightarrow\operatorname{{\mathsf{Ext}}}^{i}(X, L)\rightarrow\operatorname{{\mathsf{Ext}}}^{i+1}(X, M)\stackrel{\mathbf h}\rightarrow\operatorname{{\mathsf{Ext}}}^{i+1}(X, N\oplus P)\rightarrow\cdots.$$}{As $f$ acts as invertible on $\operatorname{{\mathsf{Ext}}}^{n+1}$, it is easily seen that it acts as invertible on $\operatorname{{\mathsf{Ext}}}^i$ for all $i>n$. Moreover,} since $P\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $, $l\operatorname{{\mathsf{Ext}}}^{i}=0$ for all $i>n$, implying that $f\operatorname{{\mathsf{Ext}}}^{i}=h\operatorname{{\mathsf{Ext}}}^{i}$. In particular, $h$ acts as invertible on $\operatorname{{\mathsf{Ext}}}^{i}$, and then, $\operatorname{{\mathsf{Ext}}}^{i}(X, L)=0$ for all $i>n$, meaning that $L\in n$-$\operatorname{\mathsf{inj}}\mathscr{C} $, and so, it belongs to $n$-$\operatorname{\mathsf{proj}}\mathscr{C} $, {giving the first assertion. For the second one,}
one may apply Lemma \ref{101}(2) to obtain the conflation $L'\rightarrow M\oplus Q\stackrel{[f~~\pi]}\rightarrow N$. So, repeating the above method, we deduce that $\operatorname{{\mathsf{Ext}}}^{i}(X, L')=0$ for all $i>n+1$. Now, as $\mathscr{C} $ is $n$-Frobenius, it is easily seen that $L'\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $. So, the proof is finished. \end{proof}
\begin{cor}\label{lr}Let $f:M\rightarrow N$ be a morphism in $\mathscr{C} $. Then $f$ acts as invertible on $\operatorname{{\mathsf{Ext}}}^{n+1}$ from the left if and only if it acts as invertible from the right. \end{cor} \begin{proof}Assume that $f$ acts as invertible on $\operatorname{{\mathsf{Ext}}}^{n+1}$ from the left. In view of Lemma \ref{conf}, there exists a conflation $M\stackrel{h}\rightarrow N\oplus P\rightarrow P'$ in $\mathscr{C} $, where $P, P'\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $. Suppose that $X\in\mathscr{C} $ is arbitrary. So by applying the functor $\operatorname{{\mathsf{Ext}}}(-, X)$ to this conflation, we get the following long exact sequence; $$\cdots\rightarrow\operatorname{{\mathsf{Ext}}}^{i}(P', X)\rightarrow\operatorname{{\mathsf{Ext}}}^{i}(N\oplus P, X)\stackrel{\mathbf h}\rightarrow\operatorname{{\mathsf{Ext}}}^{i}(M, X)\rightarrow\operatorname{{\mathsf{Ext}}}^{i+1}(P', X)\rightarrow\cdots.$$ As $\operatorname{{\mathsf{Ext}}}^{i}(P', X)=0$, for any $i>n$, $h$ (and so, $f$) will act as invertible on $\operatorname{{\mathsf{Ext}}}^{n+1}$ from the right. Since the sufficiency can be shown in a dual manner, we ignore it. So the proof is finished. \end{proof} }
\begin{lem}\label{epi} Let $M\stackrel{f}\rightarrow N$ be a morphism in $\mathscr{C} $ such that $\operatorname{\mathsf{ker}} f, \operatorname{\mathsf{coker}} f\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $. Assume that $Q\stackrel{\pi}\rightarrow N$ is a deflation and $M\stackrel{i}\rightarrow P$ is an inflation such that $P, Q\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $. Then \begin{enumerate}\item $M\oplus Q\stackrel{[f~~\pi]}\longrightarrow N$ is a deflation such that its kernel is $n$-projective.\item $M\stackrel{[f~~i]^{t}}\longrightarrow N\oplus P$ is an inflation such that its cokernel is $n$-projective. \end{enumerate} \end{lem} \begin{proof}Let us prove only the first assertion. Then second one is obtained dually.
By the hypothesis, there exist conflations $P'\rightarrow M\stackrel{h}\rightarrow L$ and $L\stackrel{g}\rightarrow N\rightarrow P''$ such that $gh=f$ and $P', P''\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $. Consider the following commutative diagram; {\footnotesize\[\xymatrix{T~\ar[r]\ar[d]_{\varphi}& Q\ar[r]^{l\pi}\ar[d]_{\pi}& P''\ar@{=}[d]\\ L~\ar[r]^{g} & N\ar[r]^l& P'',}\]}where $\varphi$ is an induced map. One should note that since $l$ and $\pi$ are deflation, $l\pi$ is so. Thus, the top row is also a conflation. Since $Q, P''\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $, the same is true for $T$. In particular, we have the conflation $T\stackrel{[-\varphi~~\alpha]^t}\longrightarrow L\oplus Q\stackrel{[g~~\pi]}\longrightarrow N$. Now consider the following pull-back diagram; {\footnotesize\[\xymatrix{P'~\ar[r]\ar@{=}[d]& T'\ar[r]\ar[d]& T\ar[d]\\ P'~\ar[r] & M\oplus Q\ar[r]^{u}\ar[d]& L\oplus Q\ar[d]\\ & N~\ar@{=}[r] & N,}\]}where $u={\tiny {\left[\begin{array}{ll} h & 0 \\ 0 & {1} \end{array} \right]}}$. Evidently, $T'$ is $n$-projective, because $P',T\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $. Consequently, $T'\rightarrow M\oplus Q\stackrel{[f~~\pi]}\rightarrow N$ is the desired conflation. So the proof is finished. \end{proof} As a consequence of Lemma \ref{epi} and the proof of Corollary \ref{lr}, we include the next result. \begin{cor}\label{is}Let $f:M\rightarrow N$ be a morphism in $\mathscr{C} $ such that $\operatorname{\mathsf{ker}} f, \operatorname{\mathsf{coker}} f\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $. Then $f$ acts as invertible on $\operatorname{{\mathsf{Ext}}}^{n+1}$. \end{cor}
\begin{dfn}We say that a given morphism $f$ in $\mathscr{C} $ is quasi-invertible, provided that $f$ acts as invertible on $\operatorname{{\mathsf{Ext}}}^{n+1}$. The class of all quasi-invertible morphisms will be denoted by $\mathsf{\Sigma}$. \end{dfn}
\begin{rem}\label{rems}Assume that $f:M\rightarrow N$ is a morphism in $\mathsf{\Sigma}$. As we have seen in the proof of Lemma \ref{conf}, by taking an inflation (resp. a deflation) $g:M\rightarrow P$ (resp. $g:P\rightarrow N$), we will obtain an inflation (resp. a deflation) $M\stackrel{[f~~g]^t}\longrightarrow N\oplus P$ (resp. $M\oplus P\stackrel{[f~~g]}\longrightarrow N$) such that its cokernel (resp. kernel) is $n$-projective. Moreover, since $\operatorname{{\mathsf{Ext}}}^{n+1}(-, Q)=0=\operatorname{{\mathsf{Ext}}}^{n+1}(Q, -)$, for any $Q\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $, we may deduce that the maps $f$, ${\tiny {\left[\begin{array}{ll} f \\ g \end{array} \right]}}$ and $[f~~g]$ act identically on $\operatorname{{\mathsf{Ext}}}^{n+1}$. So, if $f$ acts as invertible on $\operatorname{{\mathsf{Ext}}}^{n+1}$, without loss of generality, we may further assume that $f$ is an inflation or a deflation with cokernel and kernel $n$-projective, respectively. \end{rem}
The result below reveals that being a unit conflation is stable under the pull-back and push-out along morphisms in $\mathsf{\Sigma}$.
\begin{lem}\label{unit}Let $\operatorname{\boldsymbol{\gamma}}\in\operatorname{{\mathsf{Ext}}}^k(N, \mathsf{\Omega}^k N)$ with $k\geq 1$ and let $a:X\rightarrow N$ and $b:\mathsf{\Omega}^kN\rightarrow Y$ be two morphisms in $\mathsf{\Sigma}$. Then the following assertions hold: \begin{enumerate}\item $\operatorname{\boldsymbol{\gamma}}$ is a unit conflation if and only if $\operatorname{\boldsymbol{\gamma}} a$ is so. \item $\operatorname{\boldsymbol{\gamma}}$ is a unit conflation if and only if $b\operatorname{\boldsymbol{\gamma}}$ is so. \end{enumerate} \end{lem} \begin{proof}We only prove the first assertion. The second one is obtained dually. First one should note that, by the definition of pull-back diagram, without loss of generality, we may assume that $k=1$. As $a\in\mathsf{\Sigma}$ , by Lemma \ref{conf}, there exists a conflation $Q\rightarrow X\oplus P\stackrel{[a~~\pi]}\rightarrow N$, where $P, Q\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $. So taking the following pull-back diagram; {\footnotesize \[\xymatrix{\operatorname{\boldsymbol{\gamma}} [a~~\pi]: \mathsf{\Omega} N~\ar[r]\ar@{=}[d]& H\ar[r]\ar[d]& X\oplus P\ar[d]_{[a~~\pi]}\\ \operatorname{\boldsymbol{\gamma}}: \mathsf{\Omega} N \ar[r] & T\ar[r] & N,}\]}gives rise to the conflation, $Q\rightarrow H\rightarrow T$. We show that $H$ and $T$ are $n$-projective, simultaneously. If $T$ is $n$-projective, then the same is true for $H$, because $n$-$\operatorname{\mathsf{proj}}\mathscr{C} $ is closed under extensions. Conversely, assume that $H$ is $n$-projective. As $\mathscr{C} $ is an $n$-Frobenius category, it is easily seen that the class $n$-$\operatorname{\mathsf{proj}}\mathscr{C} $ is closed under cokernels of monomorphisms, implying that $T$ is $n$-projective. This means that the conflation $\operatorname{\boldsymbol{\gamma}}$ is unit if and only if $\operatorname{\boldsymbol{\gamma}}[a~~\pi]$ is so. Next considering the following pull-back diagram; {\footnotesize\[\xymatrix{\operatorname{\boldsymbol{\beta}}:\mathsf{\Omega} N~\ar[r]\ar@{=}[d]& L\ar[r]\ar[d]& X\ar[d]_{{{\tiny {\left[\begin{array}{ll} 1\\ 0 \end{array} \right]}}}}\\ \operatorname{\boldsymbol{\gamma}}[a~~\pi]:\mathsf{\Omega} N~\ar[r] & H\ar[r]\ar[d]& X\oplus P\ar[d]\\ & P~\ar@{=}[r] & P,}\]}we conclude that $\operatorname{\boldsymbol{\gamma}}[a~~\pi]$ and $\operatorname{\boldsymbol{\beta}}$ are unit conflation, simultaneously. Now the equality $\operatorname{\boldsymbol{\beta}}=(\operatorname{\boldsymbol{\gamma}}[a~~\pi]){{{\tiny {\left[\begin{array}{ll} 1\\ 0 \end{array} \right]}}}}=\operatorname{\boldsymbol{\gamma}} a$, completes the proof. \end{proof}
We close this section with the following result. \begin{lem}\label{sig} Let $f:M\rightarrow N$ be an inflation or a deflation in $\mathscr{C} $. Then the following assertions hold: \begin{enumerate} \item If there exists $\delta_N\in\mathcal U_n(N)$ such that $\delta_Nf\in\mathcal U_n(M)$, then $f$ lies in $\mathsf{\Sigma}$. \item If there exists $\delta_M\in\mathcal U^n(M)$ such that $f\delta_M\in\mathcal U^n(N)$, then $f$ belongs to $\mathsf{\Sigma}$. \end{enumerate} \end{lem} \begin{proof} We only prove the first assertion. The second one is obtained dually. Without loss of generality, we assume that $f$ is an inflation. By the definition of pull-back diagram, we may assume that $n=1$. By the hypothesis, there exists a pull-back diagram; {\footnotesize\[\xymatrix{\mathsf{\Omega} N~\ar[r] \ar@{=}[d]& T\ar[r]\ar[d]_{h}& M\ar_{f}[d]\\ \delta_N:\mathsf{\Omega} N~\ar[r] & P\ar[r]& N,}\]}where $P,T\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $. Now since $h$ is an inflation, we have that $\operatorname{\mathsf{coker}} h$ is $n$-projective, and so, the same will be true for $\operatorname{\mathsf{coker}} f$. Now Corollary \ref{is} completes the proof. \end{proof}
\section{$\mathcal P$-subfunctor of $\operatorname{{\mathsf{Ext}}}^n$} Assume that $\mathscr{C} $ is an $n$-Frobenius category. The aim of this section is to study a subfunctor of $\operatorname{{\mathsf{Ext}}}^n$ consisting of all conflations arising as a pull-back along morphisms ending at $n$-projective objects that we call a $\mathcal P$-subfunctor of $\operatorname{{\mathsf{Ext}}}^n$. We begin with the following useful observation.
\begin{rem}\label{use}Assume that $X, Z$ are arbitrary objects of $\mathscr{C} $. Consider the unit conflations $Z\rightarrow P\rightarrow\mathsf{\Omega}^{-1}Z$ and $\mathsf{\Omega} X\rightarrow Q\rightarrow X$. So, we will have the following commutative diagram with exact rows and columns;
\[\xymatrix{ \operatorname{{\mathsf{Ext}}}^n(P, Q)~\ar[r]\ar[d]& \operatorname{{\mathsf{Ext}}}^n(Z, Q)\ar[r]\ar[d]_{\beta}& \operatorname{{\mathsf{Ext}}}^{n+1}(\mathsf{\Omega}^{-1}Z, Q)\ar[d]\ar[r] &0\\ \operatorname{{\mathsf{Ext}}}^n(P, X)~\ar[r]^{\alpha}\ar[d]& \operatorname{{\mathsf{Ext}}}^n(Z, X)\ar[r]^{\psi}\ar[d]_{\varphi}& \operatorname{{\mathsf{Ext}}}^{n+1}(\mathsf{\Omega}^{-1}Z, X)\ar[d]_{\eta}\ar[r] &0\\ \operatorname{{\mathsf{Ext}}}^{n+1}(P, \mathsf{\Omega} X) \ar[r] & \operatorname{{\mathsf{Ext}}}^{n+1}(Z, \mathsf{\Omega} X)\ar[r]^{\theta} & \operatorname{{\mathsf{Ext}}}^{n+2}(\mathsf{\Omega}^{-1}Z, \mathsf{\Omega} X)\ar[r]& 0.}\] Since $P,Q\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $, $\operatorname{{\mathsf{Ext}}}^{n+1}(P, \mathsf{\Omega} X)=0=\operatorname{{\mathsf{Ext}}}^{n+1}(\mathsf{\Omega}^{-1}Z, Q)$, and so, we may deduce that $\mathsf{im}\alpha=\mathsf{im}\beta$.
\end{rem}
\begin{s}\label{use1}Assume that $M,N\in\mathscr{C} $ and $\operatorname{\boldsymbol{\gamma}}\in\operatorname{{\mathsf{Ext}}}^n(M,N)$ such that there is a morphism $f:M\rightarrow P$ with $P\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $ and a conflation $\operatorname{\boldsymbol{\eta}}\in\operatorname{{\mathsf{Ext}}}^n(P, N)$ such that $\operatorname{\boldsymbol{\gamma}}=\operatorname{\boldsymbol{\eta}} f$. Since $\mathscr{C} $ is $n$-Frobenius, there is an inflation $i:M\rightarrow P'$, where $P'\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $. According to Lemma \ref{101}(1), $[f~~i]^t:M\rightarrow P\oplus P'$ is also an inflation. Now letting $\operatorname{\boldsymbol{\eta}}'=\operatorname{\boldsymbol{\eta}}\oplus(0\rightarrow\cdots\rightarrow 0\rightarrow P'\rightarrow P')$, we have $\operatorname{\boldsymbol{\gamma}}=\operatorname{\boldsymbol{\eta}}'[f~~i]^t$. Consequently, without loss of generality, we may assume that $f$ is an inflation. Dually, if $\operatorname{\boldsymbol{\gamma}}=g\operatorname{\boldsymbol{\beta}}$, for some morphism $g:Q\rightarrow N$, with $Q\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $ and $\operatorname{\boldsymbol{\beta}}\in\operatorname{{\mathsf{Ext}}}^n(M, Q)$, one may assume that $g$ is a deflation. \end{s}
The result below is an immediate consequence of Remark \ref{use} and \ref{use1}. So we omit its proof.
\begin{prop}\label{equal}Let $X, Z$ be arbitrary objects of $\mathscr{C} $ and $\operatorname{\boldsymbol{\gamma}}\in\operatorname{{\mathsf{Ext}}}^n(Z, X)$. Then the following statements are equivalent: \begin{enumerate}\item There is an object $\operatorname{\boldsymbol{\eta}}\in\operatorname{{\mathsf{Ext}}}^n(P, X)$, with $P\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $, and a morphism $Z\stackrel{f}\rightarrow P$ such that $\operatorname{\boldsymbol{\gamma}}=\operatorname{\boldsymbol{\eta}} f$. \item There is an object $\operatorname{\boldsymbol{\eta}}'\in\operatorname{{\mathsf{Ext}}}^n(Z, Q)$, with $Q\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $, and a morphism $Q\stackrel{f'}\rightarrow X$ such that $\operatorname{\boldsymbol{\gamma}}=f'\operatorname{\boldsymbol{\eta}}'$. \end{enumerate} \end{prop}
\begin{s}\label{pp} {\sc $\mathcal P$-subfunctor.} For every pair $X, Y$ of objects $\mathscr{C} $, we let $\mathcal P(X, Y)$ denote the additive subgroup of $\operatorname{{\mathsf{Ext}}}^n(X, Y)$ satisfying one of the equivalent conditions in Proposition \ref{equal}. It is easily seen that for given morphisms $A\stackrel{f}\rightarrow X$ and $Y\stackrel{g}\rightarrow B$ in $\mathscr{C} $, the natural transformation $\operatorname{{\mathsf{Ext}}}(f, g):\operatorname{{\mathsf{Ext}}}^n(X, Y)\longrightarrow\operatorname{{\mathsf{Ext}}}^n(A, B)$ respects $\mathcal P$. Namely, for any $\operatorname{\boldsymbol{\gamma}}\in\mathcal P(X, Y)$, $g(\operatorname{\boldsymbol{\gamma}} f)=(g\operatorname{\boldsymbol{\gamma}})f\in\mathcal P(A, B)$. Consequently, $\mathcal P$ is a subfunctor of $\operatorname{{\mathsf{Ext}}}^n$; see \cite{as, fght}. Indeed, from our point of view, $\mathcal P$ is a submodule of $\operatorname{{\mathsf{Ext}}}^n$. A given conflation $\operatorname{\boldsymbol{\gamma}}\in\operatorname{{\mathsf{Ext}}}^n(X, Y)$ will be called a {\em $\mathcal P$-conflation,} whenever $\operatorname{\boldsymbol{\gamma}}$ belongs to $\mathcal P(X, Y)$. It is worth noting that in the case $n=0$, $\mathcal P$-conflations are those morphisms in $\mathscr{C} $ factoring through projective objects. If there is no ambiguity, we denote $\mathcal P(-, -)$ by $\mathcal P$. \end{s} The next result is also a direct consequence of Remark \ref{use} and \ref{use1}. So we ignore its proof. \begin{cor}\label{lem2}Let $f:Z\rightarrow P$ with $P\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $, be an inflation. Then any $\mathcal P$-conflation $\operatorname{\boldsymbol{\gamma}}\in\operatorname{{\mathsf{Ext}}}^n(Z, X)$, factors through $f$. \end{cor}
{
\begin{prop}\label{pprop}Let $M\stackrel{f}\rightarrow N\stackrel{g}\rightarrow K$ be a conflation in $\mathscr{C} $. Then, for any object $X\in\mathscr{C} $, there exists an exact sequence; $$\operatorname{{\mathsf{Ext}}}^n(K, X)/{\mathcal P}\stackrel{\bar{\mathbf g}}\longrightarrow\operatorname{{\mathsf{Ext}}}^n(N, X)/{\mathcal P}\stackrel{\bar{\mathbf f}}\longrightarrow\operatorname{{\mathsf{Ext}}}^n(M, X)/{\mathcal P}.$$ \end{prop} \begin{proof} Take inflations $N\stackrel{h}\rightarrow P$ and $K\stackrel{h'}\rightarrow P'$, where $P, P'\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $. So, we will have the following commutative diagram; {\footnotesize \[\xymatrix{M~\ar[r]^{hf} \ar[d]_{f}& P\ar[r]\ar[d]_{[1~~0]^t}& \mathsf{\Omega}^{-1}M\ar[d]_{f'}\\ N~\ar[r]^{[h~~h'g]^t} \ar[d]_{g} & P\oplus P'\ar[r] \ar[d]_{[0~~1]}& \mathsf{\Omega}^{-1}N \ar[d]_{g'} \\ K\ar[r]^{h'} & P' \ar[r] & \mathsf{\Omega}^{-1}K,}\]}where rows and columns are conflation. Indeed, $hf$ is an inflation, because $f$ and $h$ are so. Moreover, $u:=[h~~h'g]^t$ is an inflation, thanks to Lemma \ref{101}. So, by applying the functor $\operatorname{{\mathsf{Ext}}}(-, X)$ to this diagram, gives us the following commutative diagram; {\footnotesize \[\xymatrix{\operatorname{{\mathsf{Ext}}}^n(P', X)~\ar[r] \ar[d]& \operatorname{{\mathsf{Ext}}}^n(K, X)\ar[r]\ar[d]_{\mathbf g}& \operatorname{{\mathsf{Ext}}}^{n+1}(\mathsf{\Omega}^{-1}K, X)\ar[d]\ar[r]&0\\ \operatorname{{\mathsf{Ext}}}^n(P\oplus P', X)~\ar[r] \ar[d] & \operatorname{{\mathsf{Ext}}}^n(N, X)\ar[r] \ar[d]_{\mathbf f}& \operatorname{{\mathsf{Ext}}}^{n+1}(\mathsf{\Omega}^{-1}N, X) \ar[d]\ar[r]&0 \\ \operatorname{{\mathsf{Ext}}}^n(P, X)\ar[r] & \operatorname{{\mathsf{Ext}}}^n(M, X) \ar[r] & \operatorname{{\mathsf{Ext}}}^{n+1}(\mathsf{\Omega}^{-1}M, X)\ar[r]&0,}\]}where rows and columns are exact. Now, by applying Corollary \ref{lem2}, we may obtain the exact sequence; $\operatorname{{\mathsf{Ext}}}^n(K, X)/{\mathcal P}\stackrel{\bar{\mathbf g}}\longrightarrow\operatorname{{\mathsf{Ext}}}^n(N, X)/{\mathcal P}\stackrel{\bar{\mathbf f}}\longrightarrow\operatorname{{\mathsf{Ext}}}^n(M, X)/{\mathcal P}.$ So the proof is finished. \end{proof}
\begin{s}\label{ccor}Let $M\rightarrow P\rightarrow\mathsf{\Omega}^{-1}M$ and $\mathsf{\Omega} N\rightarrow Q\rightarrow N$ be two arbitrary unit conflations in $\mathscr{C} $. According to the proof of Proposition \ref{pprop}, we may get the natural isomorphism $\operatorname{{\mathsf{Ext}}}^{n+1}(\mathsf{\Omega}^{-1}M, N)\cong\operatorname{{\mathsf{Ext}}}^n(M, N)/{\mathcal P}$. Also, a dual argument, gives us the natural isomorphism $\operatorname{{\mathsf{Ext}}}^{n+1}(M, \mathsf{\Omega} N)\cong\operatorname{{\mathsf{Ext}}}^n(M, N)/{\mathcal P}$. These facts, would imply the result below. \end{s} \begin{cor}\label{qo}A given morphism $f:M\rightarrow N$ is quasi-invertible if and only if it acts as invertible on $\operatorname{{\mathsf{Ext}}}^n/{\mathcal P}$. \end{cor}
The next result indicates that being $\mathcal P$-conflation behaves well with respect to the pull-back and push-out along morphisms in $\mathsf{\Sigma}$.
\begin{prop}\label{nul} Let $a:N\rightarrow X$ and $b:X'\rightarrow X$ be morphisms in $\mathsf{\Sigma}$. Then \begin{enumerate}\item a given conflation $\operatorname{\boldsymbol{\gamma}}\in\operatorname{{\mathsf{Ext}}}^n(M, N)$ is a $\mathcal P$-conflation if and only if $a\operatorname{\boldsymbol{\gamma}}$ is so. \item a given object $\operatorname{\boldsymbol{\beta}}\in\operatorname{{\mathsf{Ext}}}^n(X, N)$ is a $\mathcal P$-conflation if and only if $\operatorname{\boldsymbol{\beta}} b$ is so. \end{enumerate} \end{prop} \begin{proof}By the similarity, we prove only the first assertion. Since the `only if' part follows from the fact the subfunctor $\mathcal P$ is closed under push-outs, we only prove the `if' part. To this end, assume that $a\operatorname{\boldsymbol{\gamma}}$ is a $\mathcal P$-conflation. As $a\in\mathsf{\Sigma}$, Corollary \ref{qo}, the morphism $\operatorname{{\mathsf{Ext}}}^n(M, N)/{\mathcal P}\longrightarrow\operatorname{{\mathsf{Ext}}}^n(M, X)/{\mathcal P}$ sending $\operatorname{\boldsymbol{\gamma}}+\mathcal P$ to $a\operatorname{\boldsymbol{\gamma}}+\mathcal P$ is an isomorphism. Now since $a\operatorname{\boldsymbol{\gamma}}$ is $\mathcal P$-conflation, injectivity of this morphism yields that $\operatorname{\boldsymbol{\gamma}}$ is a $\mathcal P$-conflation, as needed. \end{proof}
As a direct consequence of Proposition \ref{nul}, we include the next result. \begin{cor}\label{div}Let $a:X\rightarrow X'$ be a morphism in $\mathsf{\Sigma}$ and $Y\in\mathscr{C} $. Then \begin{enumerate}\item for any $\operatorname{\boldsymbol{\gamma}}\in\operatorname{{\mathsf{Ext}}}^n(X, Y)$, there exists $\operatorname{\boldsymbol{\gamma}}'\in\operatorname{{\mathsf{Ext}}}^n(X', Y)$ such that $\operatorname{\boldsymbol{\gamma}}-\operatorname{\boldsymbol{\gamma}}'a$ is a $\mathcal P$-conflation. \item for a given $\operatorname{\boldsymbol{\beta}}\in\operatorname{{\mathsf{Ext}}}^n(Y, X')$, there exists $\operatorname{\boldsymbol{\beta}}'\in\operatorname{{\mathsf{Ext}}}^n(Y, X)$ such that $\operatorname{\boldsymbol{\beta}} -a\operatorname{\boldsymbol{\beta}}'$is a $\mathcal P$-conflation. \end{enumerate} \end{cor}
\section{Unit factorizations of conflations} In this section, we show that any conflation in $\mathscr{C} $ can be represented as a pull-back, as well as, push-out of unit conflations. We begin with the following lemma.
\begin{lem}\label{gencog}Let $k\geq 1$ and $\operatorname{\boldsymbol{\gamma}}\in\operatorname{{\mathsf{Ext}}}^k(M, N)$. Then the following assertions hold: \begin{enumerate}\item There exists an object $\operatorname{\boldsymbol{\gamma}}'\in\operatorname{{\mathsf{Ext}}}^k(M, N)$ and a morphism of conflations with fixed ends;
{\footnotesize \[\xymatrix{\operatorname{\boldsymbol{\gamma}}:N~\ar[r]\ar@{=}[d]& X_{k-1}\ar[r]\ar[d]_{a_{k-1}}& \cdots \ar[r]& X_1\ar[r]\ar[d]_{a_1} &X_0\ar[r]\ar[d]_{a_0}& M\ar@{=}[d]\\ \operatorname{\boldsymbol{\gamma}}':N~\ar[r] &Q_{k-1}\ar[r]&\cdots\ar[r]& Q_1\ar[r]& H\ar[r] & M,}\]} such that ${Q_i}^{,}$s are $n$-projective and each $a_i$ is an inflation.
\item There exists an object $\operatorname{\boldsymbol{\gamma}}''\in\operatorname{{\mathsf{Ext}}}^k(M, N)$ and a morphism of conflations with fixed ends;
{\footnotesize \[\xymatrix{\operatorname{\boldsymbol{\gamma}}'':N~\ar[r]\ar@{=}[d]& H'\ar[r]\ar[d]_{b_{k-1}}& P_{k-2}\ar[r]\ar[d]_{b_{k-2}}& \cdots \ar[r] &P_0\ar[r]\ar[d]_{b_0}& M\ar@{=}[d]\\ \operatorname{\boldsymbol{\gamma}}:N~\ar[r] &{X_{k-1}}\ar[r]& {X_{k-2}}\ar[r]& \cdots \ar[r]& X_0\ar[r] & M,}\]}
such that all $P_i^,$s are $n$-projective and each $b_i$ is a deflation. \end{enumerate} \end{lem} \begin{proof}Let us prove only the statement (1), since the other will be gained dually. To this end, we argue by induction on $k$. If $k=1$, then there is nothing to prove. So assume that $k\geq 2$ and the result has been proved for all integers smaller than $k$. Assume that $\operatorname{\boldsymbol{\gamma}}=N\rightarrow X_{k-1}\rightarrow\cdots\rightarrow X_0\rightarrow M$. So letting $\operatorname{\boldsymbol{\gamma}}_k=N\rightarrow X_{k-1}\rightarrow L$ and $\operatorname{\boldsymbol{\gamma}}^{k-1}=L\rightarrow X_{k-2}\rightarrow\cdots\rightarrow X_0\rightarrow M$, we have $\operatorname{\boldsymbol{\gamma}}=\operatorname{\boldsymbol{\gamma}}_k\operatorname{\boldsymbol{\gamma}}^{k-1}$. Take the following commutative diagram; \[\xymatrix{\operatorname{\boldsymbol{\gamma}}_k:N\ar@{=}[d]\ar[r]&X_{k-1}\ar[r]\ar[d]_{a_{k-1}}&L\ar[d]_{a} \\ \delta':N\ar[r]& Q_{k-1} \ar[r]& T,}\]
where $a_{k-1}$ is an inflation with $Q_{k-1}\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $. Since $a\operatorname{\boldsymbol{\gamma}}^{k-1}\in\operatorname{{\mathsf{Ext}}}^{k-1}(M, T)$, by the induction hypothesis, there is a conflation $\operatorname{\boldsymbol{\gamma}}_1:T\rightarrow Q_{k-2}\rightarrow\cdots\rightarrow Q_1\rightarrow H\rightarrow M$ and a morphism of conflations $a\operatorname{\boldsymbol{\gamma}}^{k-1}\rightarrow\operatorname{\boldsymbol{\gamma}}_1$ with fixed ends. Hence, by setting $\operatorname{\boldsymbol{\gamma}}':=\delta'\operatorname{\boldsymbol{\gamma}}_1$ and using the fact that $\operatorname{\boldsymbol{\gamma}}=(\delta' a)\operatorname{\boldsymbol{\gamma}}^{k-1}$, we will have a morphism of conflations $\operatorname{\boldsymbol{\gamma}}\rightarrow\operatorname{\boldsymbol{\gamma}}'$ with fixed ends. So the proof is finished. \end{proof}
\begin{prop}\label{102}Let
$M, N\in\mathscr{C} $ and let $\operatorname{\boldsymbol{\gamma}}\in\operatorname{{\mathsf{Ext}}}^k(M, \mathsf{\Omega}^kN)$ with $k\geq 1$. Then the following assertions hold: \begin{enumerate} \item There exists a unit conflation $\delta\in\mathcal U^k(\mathsf{\Omega}^kN)$ and $f\in\operatorname{\mathsf{H}}$ such that $\operatorname{\boldsymbol{\gamma}}=\delta f$. \item There exists a unit conflation $\delta \in\mathcal U_k(M)$ and $g\in\operatorname{\mathsf{H}}$ such that $\operatorname{\boldsymbol{\gamma}}=g\delta$ \end{enumerate} \end{prop} \begin{proof}We only prove the first assertion. The second one is obtained dually. By Lemma \ref{gencog} together with \cite[Proposition 3.1]{mit}, we may assume that $\operatorname{\boldsymbol{\gamma}}$ has the form $\mathsf{\Omega}^kN\rightarrow P_{k-1}\rightarrow\cdots\rightarrow P_1\rightarrow H\rightarrow M$, where $P_i\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $, for any $i$. Taking the conflation $L\rightarrow H\rightarrow M$ and an inflation $H\rightarrow P_0$ with $P_0\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $, one gets the following commutative diagram; \[\xymatrix{L~\ar[r]\ar@{=}[d]& H\ar[r]\ar[d]&M \ar[d]_{f}\\ L~\ar[r] & P_0\ar[r]& N'.}\] Now letting $\delta:=\mathsf{\Omega}^kN\rightarrow P_{k-1}\rightarrow\cdots\rightarrow P_1\rightarrow P_0\rightarrow N'$, one has the equality $\operatorname{\boldsymbol{\gamma}}=\delta f$, as desired. \end{proof}
\begin{dfn}Assume that $\operatorname{\boldsymbol{\gamma}}\in\operatorname{{\mathsf{Ext}}}^k(M, \mathsf{\Omega}^kN)$ with $k\geq 1$, is given. Assume that there is a unit conflation $\delta\in\mathcal U^k(\mathsf{\Omega}^kN)$ and $f\in\operatorname{\mathsf{H}}$ such that $\operatorname{\boldsymbol{\gamma}}=\delta f$. Then we say $\operatorname{\boldsymbol{\gamma}}$ factors through $f$ by the unit conflation $\delta$, and the equality $\operatorname{\boldsymbol{\gamma}}=\delta f$ is said to be {\it a right unit factorization} (abb. $\operatorname{\mathsf{RUF}}$) of $\operatorname{\boldsymbol{\gamma}}$. Dually, if there exists $\delta\in\mathcal U_k(M)$ and $g\in\operatorname{\mathsf{H}}$ such that $\operatorname{\boldsymbol{\gamma}}=g\delta$, then we call this a {\it left unit factorization} (abb. $\operatorname{\mathsf{LUF}}$) of $\operatorname{\boldsymbol{\gamma}}$.\\ In view of Proposition \ref{102}, every conflation admits an $\operatorname{\mathsf{RUF}}$, as well as, an $\operatorname{\mathsf{LUF}}$. \end{dfn}
}
\begin{dfn}Assume that $a$ is a morphism in $\mathsf{\Sigma}$ and $k\geq 1$. We say that $a$ is \begin{enumerate}\item {\it induced by identity over $N$}, provided that there are unit conflations $\delta, \delta'\in\mathcal U_k(N)$ such that $\delta=a\delta'$ and $a$ is inflation.\item {\it co-induced by identity over $N$}, if there exist $\delta, \delta'\in\mathcal U^k(N)$ such that $\delta=\delta'a$ and $a$ is deflation. \end{enumerate} \end{dfn}
\begin{prop}\label{pro100}Let $k\geq 1$. Then the following statements hold: \begin{enumerate}\item For given $\delta_1, \delta_2,\delta_3\in\mathcal U^k(N)$, there exist deflations {$a_1,a_2,a_3\in\mathsf{\Sigma}$} such that $\delta_1 a_1=\delta_2a_2=\delta_3a_3$. In particular, $a_1, a_2, a_3$ are co-induced by identity over $N$. \item For given $\delta_1, \delta_2, \delta_3 \in\mathcal U_k(N)$, there exist inflations {$a_1,a_2,a_3\in\mathsf{\Sigma}$} such that $a_1\delta_1=a_2\delta_2=a_3\delta_3$. In particular, $a_1, a_2,a_3$ are induced by identity over $N$. \end{enumerate} \end{prop} \begin{proof}We deal only with the first assertion, the second one is obtained dually.
Assume that $\delta_i=N\stackrel{j_i^0}\longrightarrow P_i^1\longrightarrow P_i^2\longrightarrow\cdots\longrightarrow P_i^k\longrightarrow N_i$, for any $1\leq i\leq 3$. According to the proof of Lemma \ref{101}, one gets the conflation $N\stackrel{j^0}\longrightarrow\oplus_{i=1}^{3}P_i^1\longrightarrow L^1$, where $j^0=[j_1^0~~j_2^0~~j_3^0]^t$. So we may have the following commutative diagram; \[\xymatrix{N~\ar[r]\ar@{=}[d]&\oplus_{i=1}^{3} P_i^1\ar[r]\ar[d]_{e_i^1}& L^1\ar[d]_{b_i^1}\\ N~\ar[r] & P_i^1\ar[r]& L_i^1,}\] where $e_i^1$ is the projection. Now taking an inflation $L^1\stackrel{u^1}\longrightarrow Q^2$, we obtain a conflation $L^1\stackrel{j^1}\longrightarrow\oplus_{i=1}^3P_i^2\oplus Q^2\longrightarrow L^2$, where $j^1=[j_1^1b_1^1~~j_2^1b_2^1~~j_3^1b_3^1~~u^1]^t$ and $j_i^1:L_i^1\rightarrow P_i^2$ is inclusion, for any $1\leq i\leq 3$. Consequently, we have the following commutative diagram; \[\xymatrix{L^1~\ar[r]\ar[d]_{b_i^1}&\oplus_{i=1}^{3} P_i^2\oplus Q^2\ar[r]\ar[d]_{e_i^2}& L^2\ar[d]_{b_i^2}\\ L_i^1~\ar[r] & P_i^2\ar[r]& L_i^2,}\] where $e_i^2$ is the projection. Thus, continuing this procedure and splicing the diagrams, gives us the following commutative diagram;{\footnotesize \[\xymatrix{ \delta_4:N~\ar[r]^{j^0}\ar@{=}[d] & \oplus_{i=1}^3P_i^1\ar[d]_{e_i^1}\ar[r] & \oplus_{i=1}^3P_i^2\oplus Q^2\ar[d]_{e_i^2}\ar[r]&\cdots \ar[r]& \oplus_{i=1}^3P_i^k\oplus Q^k\ar[d]_{e_i^k}\ar[r]\ar[d]_{e_i^k} & N_4\ar[d]_{a_i}\\ \delta_i:N~\ar[r]^{j_i^0} & P_i^1\ar[r]& P_i^2\ar[r]& \cdots \ar[r] & P_i^k\ar[r] & N_i,}\]}where each $e^j_i$ is the projection. Thus each $a_i$ is a deflation with kernel $n$-projective, and so, it belongs to $\mathsf{\Sigma}$, thanks to Corollary \ref{is}. In particular, by Remark \ref{pp1}(1), we have that $\delta_4=\delta_i a_i$, for any $1\leq i\leq 3$. So the proof is finished. \end{proof}
\begin{dfn}(1) Assume that $\delta, \delta' \in \mathcal U^k(N)$ with $k\geq 1$ and $a, a'$ are co-induced morphisms by identity over $N$ such that $\delta a=\delta'a'$. Then we say that $[\delta a, \delta'a']$
is a {\it co-angled pair}. In some cases, based on our need, we denote it by $\delta\stackrel{a}\longleftarrow\delta''\stackrel{a'}\longrightarrow\delta'$, where $\delta''=\delta a$. One should note that by Proposition \ref{pro100} such a co-angled pair exists.\\
(2) Assume that $\delta_1, \delta_2 \in\mathcal U_k(N)$ with $k\geq 1$ and $a_1,a_2$ are induced morphisms by identity over $N$. Then we say that $[a_1\delta_1, a_2\delta_2]$ is {\it an angled pair}, if $a_1\delta_1=a_2\delta_2$. Sometimes we display it by $\delta_1\stackrel{a_1}\longrightarrow\delta_3\stackrel{a_2}\longleftarrow\delta_2$, whenever $\delta_3=a_1\delta_1$. \end{dfn}
\begin{prop}\label{coin}Let $\operatorname{\boldsymbol{\gamma}}\in\operatorname{{\mathsf{Ext}}}^k(M, \mathsf{\Omega}^k N)$ with $k\geq 1$ and let $\operatorname{\boldsymbol{\gamma}}=\delta f=\delta'f'$ be two $\operatorname{\mathsf{RUF}}$s of $\operatorname{\boldsymbol{\gamma}}$. Assume that
$\delta\stackrel{a}\longleftarrow\delta''\stackrel{a'}\longrightarrow\delta'$ is a co-angled pair which is obtained in Proposition \ref{pro100}. Then there is a morphism $h\in\mathscr{C} $ such that $f=ah$ and $f'=a'h$. Particularly, $\operatorname{\boldsymbol{\gamma}}=\delta''h$ is an $\operatorname{\mathsf{RUF}}$ of $\operatorname{\boldsymbol{\gamma}}$. \end{prop} \begin{proof} By the hypothesis, we have the following commutative diagrams;{\footnotesize \[\xymatrix{\operatorname{\boldsymbol{\gamma}}:\mathsf{\Omega}^kN~\ar[r]\ar@{=}[d]& X_{k-1}\ar[r]\ar[d]_{f_{k-1}}& \cdots \ar[r]& X_0\ar[r]\ar[d]_{f_0}& M\ar[d]_{f}\\ \delta:\mathsf{\Omega}^kN~\ar[r] &P_{k-1}\ar[r]&\cdots\ar[r]& P_0\ar[r]& N,}\] \[\xymatrix{\operatorname{\boldsymbol{\gamma}}:\mathsf{\Omega}^kN~\ar[r]\ar@{=}[d]& X_{k-1}\ar[r]\ar[d]_{f'_{k-1}}& \cdots \ar[r]& X_0\ar[r]\ar[d]_{f'_0}& M\ar[d]_{f'}\\ \delta':\mathsf{\Omega}^kN~\ar[r] &P'_{k-1}\ar[r]&\cdots\ar[r]& P'_0\ar[r]& N'.}\]}So using the proof of Proposition \ref{pro100}, one may obtain the following commutative diagram; {\footnotesize \[\xymatrix{\operatorname{\boldsymbol{\gamma}}:\mathsf{\Omega}^kN~\ar[r]\ar@{=}[d]& X_{k-1}\ar[r]\ar[d]_{{{\tiny {\left[\begin{array}{ll} f_{k-1} \\ f'_{k-1} \end{array} \right]}}}}& \cdots \ar[r]& X_0\ar[r]\ar[d]_{{{{\tiny {\left[\begin{array}{ll} f_0 \\ f'_0 \\ 0 \end{array} \right]}}}}} & M\ar[d]_{h}\\ \delta'':\mathsf{\Omega}^kN~\ar[r] &P_{k-1}\oplus P'_{k-1}\ar[r]&\cdots\ar[r]& P_0\oplus P'_0\oplus Q_0\ar[r]& N'',}\]}namely $\delta''h=\operatorname{\boldsymbol{\gamma}}$. Now since $\delta''=\delta a=\delta'a'$, we have $\delta(ah)=\delta'(a'h)=\operatorname{\boldsymbol{\gamma}}$. Particularly, one has the commutative diagram;
{\footnotesize\[\xymatrix{N~& N''\ar[l]_{a}\ar[r]^{a'}& N'\\ & M\ar[u]^{h}\ar[ul]^{f}\ar[ur]_{f'} ,& }\]} meaning that $f=ah$ and $f'=a'h$. Thus the proof is completed. \end{proof}
The result below can be obtained by dualizing the argument given in the proof of Proposition \ref{coin}. So its proof will be omitted. \begin{prop}\label{lif}Let $k\geq 1$ and $\operatorname{\boldsymbol{\gamma}}\in\operatorname{{\mathsf{Ext}}}^k(M, \mathsf{\Omega}^k N)$ and let $\operatorname{\boldsymbol{\gamma}}=f\delta=f'\delta'$ be two $\operatorname{\mathsf{LUF}}$s of $\operatorname{\boldsymbol{\gamma}}$. Assume that $\delta\stackrel{a}\longrightarrow\delta''\stackrel{a'}\longleftarrow\delta'$ is an angled pair which is obtained in Proposition \ref{pro100}. Then there exists $h\in\operatorname{\mathsf{H}}$ such that $\operatorname{\boldsymbol{\gamma}}=h\delta''$ is also an $\operatorname{\mathsf{LUF}}$ of $\operatorname{\boldsymbol{\gamma}}$. In particular, $f=ha$ and $f'=ha'$. \end{prop}
\section{$n$-$\operatorname{{\mathsf{Ext}}}$-phantom morphisms}
Assume that $\mathscr{C} $ is an $n$-Frobenius category. In this section, we will see that an object $f\in\operatorname{\mathsf{H}}$ annihilates $\operatorname{{\mathsf{Ext}}}^n/{\mathcal P}$ from the left and from the right, simultaneously. We call such a morphism $f$, an {\em $n$-$\operatorname{{\mathsf{Ext}}}$-phantom morphism}. This notion has connection with many branches of mathematics; see \ref{s100}. We begin with the following easy observation.
\begin{s}\label{cof}Assume that $f:X\rightarrow N$ is a morphism in $\mathscr{C} $ factoring through an $n$-projective object. Then $f$ annihilates $\operatorname{{\mathsf{Ext}}}^n/{\mathcal P}$ from the both sides. To see this, take $P\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $ and morphisms $f_1, f_2$ such that $f:X\stackrel{f_1}\longrightarrow P\stackrel{f_2}\longrightarrow N$. So for a given object $\operatorname{\boldsymbol{\gamma}}\in\operatorname{{\mathsf{Ext}}}^n(N, Y)$, $\operatorname{\boldsymbol{\gamma}} f=(\operatorname{\boldsymbol{\gamma}} f_2)f_1$ will be a $\mathcal P$-conflation, because $\operatorname{\boldsymbol{\gamma}} f_2\in\operatorname{{\mathsf{Ext}}}^n(P, Y)$. Namely, $(\operatorname{{\mathsf{Ext}}}^n/{\mathcal P})f=0$. The same method reveals that $f(\operatorname{{\mathsf{Ext}}}^n/{\mathcal P})=0$. \end{s}
The next interesting result says that a given morphism $f$ annihilates $\operatorname{{\mathsf{Ext}}}^n/{\mathcal P}$, whenever it annihilates some unit conflation $\delta$. \begin{prop}\label{three} Let $n\geq 1$ and $f:X\rightarrow N$ be a morphism in $\mathscr{C} $. Then the following are equivalent: \begin{enumerate} \item There exists a unit conflation $\delta\in\mathcal U_n(N)$ such that $\delta f$ is a $\mathcal P$-conflation.\item
For any unit conflation $\delta'\in\mathcal U_n(N)$, $\delta' f$ is a $\mathcal P$-conflation.\item $(\operatorname{{\mathsf{Ext}}}^n/{\mathcal P})f=0$. \end{enumerate} \end{prop} \begin{proof} $(1)\Rightarrow (2)$ Take an arbitrary unit conflation $\delta'\in\mathcal U_n(N)$. We intend to show that $\delta'f$ is a $\mathcal P$-conflation. To see this, consider an angled pair $\delta\stackrel{a}\rightarrow\delta''\stackrel{b}\leftarrow \delta'$. Since $\delta f$ is a $\mathcal P$-conflation, by applying Proposition \ref{nul}(1), we deduce that the same is true for $a(\delta f)=(a\delta)f$. Moreover, as $a\delta=b\delta'$, another use of Proposition \ref{nul}(1) enables us to infer that $\delta'f$ is also a $\mathcal P$-conflation, as needed.\\ $(2)\Rightarrow (3)$ Assume that $\operatorname{\boldsymbol{\gamma}}\in\operatorname{{\mathsf{Ext}}}^n(N, M)$ is an arbitrary conflation and $\operatorname{\boldsymbol{\gamma}}=g\delta'$ is an $\operatorname{\mathsf{LUF}}$ of $\operatorname{\boldsymbol{\gamma}}$, where $\delta'\in\mathcal U_n(N)$. By the hypothesis, $\delta'f$ is a $\mathcal P$-conflation. So using the fact that being a $\mathcal P$-conflation is preserved under push-out, we infer that $\operatorname{\boldsymbol{\gamma}} f=g(\delta'f)$ is also a $\mathcal P$-conflation, that is, $\operatorname{\boldsymbol{\gamma}} f\in\mathcal P$. Consequently, $(\operatorname{{\mathsf{Ext}}}^n/{\mathcal P})f=0$ .\\ $(3)\Rightarrow (1)$ This implication is obvious. So the proof is finished. \end{proof}
As an immediate consequence of Proposition \ref{three}, we include the next result. \begin{cor}\label{ccoo}Let $n\geq 1$ and ${\operatorname{\boldsymbol{\gamma}}}\in\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^n N)$ be a conflation and let $\operatorname{\boldsymbol{\gamma}}=\delta f$ be an $\operatorname{\mathsf{RUF}}$ of $\operatorname{\boldsymbol{\gamma}}$. Then $\operatorname{\boldsymbol{\gamma}}$ is a $\mathcal P$-conflation if and only if $(\operatorname{{\mathsf{Ext}}}^n/{\mathcal P})f=0$. \end{cor}
\begin{lem}\label{ruf}Let $\operatorname{\boldsymbol{\gamma}}\in\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^nN)$ be a $\mathcal P$-conflation. Then there is an $\operatorname{\mathsf{RUF}}$ $\operatorname{\boldsymbol{\gamma}}=\delta f$ of $\operatorname{\boldsymbol{\gamma}}$ with $f$ factoring through an $n$-projective object. \end{lem} \begin{proof}Since $ \operatorname{\boldsymbol{\gamma}} $ is a $ \mathcal P $-conflation, there exist an object $\operatorname{\boldsymbol{\eta}}\in \operatorname{{\mathsf{Ext}}}^n( P , \mathsf{\Omega}^n N)$ with $P\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $, and a morphism $ h: M\rightarrow P $ such that $\operatorname{\boldsymbol{\gamma}}= \operatorname{\boldsymbol{\eta}} h$. Taking an $\operatorname{\mathsf{RUF}}$ $\operatorname{\boldsymbol{\eta}}=\delta g$ of $\operatorname{\boldsymbol{\eta}}$, we get $\operatorname{\boldsymbol{\gamma}}=\delta(gh)$ is an $\operatorname{\mathsf{RUF}}$ of $\operatorname{\boldsymbol{\gamma}}$, in which $gh$ factors through the $n$-projective object $P$. So we are done. \end{proof}
\begin{prop}\label{ph}Let $f:M\rightarrow N$ be a morphism in $\mathscr{C} $ and $n\geq 1$. If $(\operatorname{{\mathsf{Ext}}}^n/{\mathcal P})f=0$, then there are morphisms $N\stackrel{a}\leftarrow N''\stackrel{b}\rightarrow N'$ with $a, b\in\mathsf{\Sigma}$ and $h:M\rightarrow N''$ such that $f=ah$ and $bh$ factors through an $n$-projective object. \end{prop} \begin{proof}Fix a unit conflation $\delta_N\in\operatorname{{\mathsf{Ext}}}^n(N, \mathsf{\Omega}^nN)$. By our hypothesis, $\delta_Nf$ is a $\mathcal P$-conflation. So, using Lemma \ref{ruf}, we may find an $\operatorname{\mathsf{RUF}}$ $\delta_Nf=\delta g$ of $\delta_Nf$ such that $g$ factors through an $n$-projective object. Consider a co-angled pair $\delta_N\stackrel{a}\leftarrow \delta''\stackrel{b}\rightarrow \delta$, as the one obtained in Proposition \ref{pro100}. Indeed, we have a pair of morphisms $N\stackrel{a}\leftarrow N''\stackrel{b}\rightarrow N'$, where $N'$ (resp. $N''$) is the right end term of the unit conflation $\delta$ (resp. $\delta''$). So applying Proposition \ref{coin} ensures the existence of a morphism $h:M\rightarrow N''$ such that $\delta_Nf=\delta'' h$ is also an $\operatorname{\mathsf{RUF}}$ of $\delta_Nf$. Particulary, $f=ah$ and $bh$ factors through an $n$-projective object, because of the equality $bh=g$. So the proof is completed. \end{proof}
\begin{prop}\label{sif}Let $a:X\rightarrow N$ be a morphism in $\mathsf{\Sigma}$. Then the following are satisfied: \begin{enumerate}\item A given morphism $f:N\rightarrow M$ annihilates $\operatorname{{\mathsf{Ext}}}^n/{\mathcal P}$ from the right if and only if so does $fa$. \item A given morphism $g:M\rightarrow X$ annihilates $\operatorname{{\mathsf{Ext}}}^n/{\mathcal P}$ from the left if and only if so does $ag$. \end{enumerate} \end{prop} \begin{proof}By the similarity, we only prove the first statement.
If $(\operatorname{{\mathsf{Ext}}}^n/{\mathcal P})f=0$ , evidently the same is true for$(\operatorname{{\mathsf{Ext}}}^n/{\mathcal P})(fa)$, because being $\mathcal P$-conflation is closed under pull-back. Now assume that $(\operatorname{{\mathsf{Ext}}}^n/{\mathcal P})(fa)=0$. So, by Proposition \ref{three}, there exists a unit conflation $\delta_M\in\mathcal U_k(M)$ such that $\delta_M(fa)$ is a $\mathcal P$-conflation. Hence, invoking Proposition \ref{nul}(2) yields that $\delta_Mf$ is a $\mathcal P$-conflation, as well. Consequently, another use of Proposition \ref{three} forces $(\operatorname{{\mathsf{Ext}}}^n/{\mathcal P})f$ to be zero, as required. \end{proof} Now we are ready to state the main result of this section. \begin{theorem}\label{main} Let $f:M\rightarrow N$ be a morphism in $\mathscr{C} $. Then $(\operatorname{{\mathsf{Ext}}}^n/{\mathcal P})f=0$ if and only if $f(\operatorname{{\mathsf{Ext}}}^n/{\mathcal P})=0$. \end{theorem} \begin{proof}Since the result in the case $n=0$ holds obviously, we may assume that
$n\geq 1$. Suppose that $(\operatorname{{\mathsf{Ext}}}^n/{\mathcal P})f=0$. So, in view of Proposition \ref{ph}, there are morphisms $N\stackrel{a}\leftarrow N''\stackrel{b}\rightarrow N'$ with $a, b{\in\mathsf{\Sigma}}$ and $h:M\rightarrow N''$ such that $f=ah$ and $bh$ factors through an $n$-projective object. So, as we have observed in \ref{cof}, $(bh)(\operatorname{{\mathsf{Ext}}}^n/{\mathcal P})=0$. As $a, b\in\mathsf{\Sigma}$, by applying Proposition \ref{sif} successively, one may get that $f(\operatorname{{\mathsf{Ext}}}^n/{\mathcal P})=0$. Since the reverse implication is obtained analogously, we ignore it. So, the proof is finished. \end{proof}
\begin{dfn} A morphism $f$ in $\mathscr{C} $ is called an {\em $n$-$\operatorname{{\mathsf{Ext}}}$-phantom morphism}, if it annihilates $\operatorname{{\mathsf{Ext}}}^n/{\mathcal P}$. \end{dfn}
The result below follows directly from \ref{ccor}. So we skip its proof. \begin{cor}A given morphism $f$ in $\mathscr{C} $ is an $n$-$\operatorname{{\mathsf{Ext}}}$-phantom morphism if and only if $\operatorname{{\mathsf{Ext}}}^{n+1}f=0$. \end{cor}
\begin{rem}It should be noted that our notion of $n$-$\operatorname{{\mathsf{Ext}}}$-phantom morphisms, is indeed $(n+1)$-$\operatorname{{\mathsf{Ext}}}$-phantom morphisms in the sense of Mao \cite{mao}. However, due to the harmony with $n$-Frobenius category, we call them $n$-$\operatorname{{\mathsf{Ext}}}$-phantom morphisms. We also emphasize that a 1-$\operatorname{{\mathsf{Ext}}}$-phantom morphism is exactly a $\mathcal P$-phantom morphism in the sense of Fu et. al. \cite{fght}. \end{rem}
\begin{s}\label{s100}The concept of phantom morphisms has its roots in topology in the study of maps between CW-complexes \cite{mc}. A map $f : X \rightarrow Y$ between CW-complexes is said to be a phantom map, if its restriction to each skeleton $X_n$ is null homotopic. Later, this notion has been used in various settings of mathematics. In the context of triangulated categories, phantom morphisms were first studied by Neeman \cite{ne}. The notion of phantom morphisms also was developed in the stable category of a finite group ring in a series of works of Benson and Gnacadja \cite{gn, be2, be1, be}. The definition of a phantom morphism was generalized to the category of $R$-modules over an associative ring $R$ by Herzog \cite{he}. Precisely, a morphism $f:M\rightarrow N$ of left $R$-modules is called a phantom morphism, if the natural transformation $\operatorname{\mathsf{Tor}}^R_1(-, f):\operatorname{\mathsf{Tor}}^R_1(-, M)\rightarrow\operatorname{\mathsf{Tor}}^R_1(-, N)$ is zero, or equivalently, the pullback of any short exact sequence along $f$ is pure exact. Similarly, a morphism $g:M\rightarrow N$ of left $R$-modules is said to be an $\operatorname{{\mathsf{Ext}}}$-phantom morphism \cite{hext}, if the induced morphism $\operatorname{{\mathsf{Ext}}}^1_R(B, g):\operatorname{{\mathsf{Ext}}}^1_R(B,M)\rightarrow\operatorname{{\mathsf{Ext}}}^1_R(B,N)$ is 0, for every finitely presented left $R$-module $B$. For any integer $n\geq 1$, the concepts of $n$-phantom morphism and $n$-$\operatorname{{\mathsf{Ext}}}$-phantom morphisms, which are higher dimensional generalization of phantom morphisms and $\operatorname{{\mathsf{Ext}}}$-phantom morphisms, respectively, have been introduced and studied by Mao \cite{mao, mao1}. \end{s}
\section{A composition operator on $\operatorname{{\mathsf{Ext}}}^n/{\mathcal P}$}
Assume that $\mathscr{C} $ is an $n$-Frobenius category. In this section, we introduce a composition operator $``\circ"$ on $\operatorname{{\mathsf{Ext}}}^n/{\mathcal P}$. It is proved that the operator $``\circ"$ is associative and distributive over Baer sum on both sides. These facts enable us to see that for any object $M\in\mathscr{C} $, $(\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^nM)/{\mathcal P}, +, \circ)$ is a ring with identity, where $+$ stands for the Baer sum operation. Surprisingly, we find a ring homomorphism $\varphi:\operatorname{\mathsf{Hom}}_{\mathscr{C} }(M, M)\longrightarrow\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^nM)/{\mathcal P}$ such that for any quasi-invertible morphism $f$, $\varphi(f)$ is invertible and $ \varphi(f)=0$, if $f$ is an $n$-$\operatorname{{\mathsf{Ext}}}$-phantom morphism. We begin with the following notation.
\begin{s}Let $a:X\rightarrow X'$ be a quasi-invertible morphism and $\operatorname{\boldsymbol{\gamma}}\in\operatorname{{\mathsf{Ext}}}^n(X, Y)$. In view of Corollary \ref{div}, there exists $\operatorname{\boldsymbol{\gamma}}'\in\operatorname{{\mathsf{Ext}}}^n(X', Y)$ such that $\operatorname{\boldsymbol{\gamma}}-\operatorname{\boldsymbol{\gamma}}'a$ is a $\mathcal P$-conflation. In this case, for the simplicity, we write $\operatorname{\boldsymbol{\gamma}}'=_{_{\mathcal P}}\operatorname{\boldsymbol{\gamma}} a^{-1}$. Also, {for a given object} $\operatorname{\boldsymbol{\beta}}\in\operatorname{{\mathsf{Ext}}}^n(Y, X')$, there exists $\operatorname{\boldsymbol{\beta}}'\in\operatorname{{\mathsf{Ext}}}^n(Y, X)$ such that $\operatorname{\boldsymbol{\beta}} -a\operatorname{\boldsymbol{\beta}}'$ is a $\mathcal P$-conflation. Then we write $\operatorname{\boldsymbol{\beta}}'=_{_{\mathcal P}}a^{-1}\operatorname{\boldsymbol{\beta}}$. {From now on, a given object $\operatorname{\boldsymbol{\gamma}}+\mathcal P\in\operatorname{{\mathsf{Ext}}}^n/{\mathcal P}$, will be denoted by $\bar{\operatorname{\boldsymbol{\gamma}}}$.} \end{s}
\begin{dfn}\label{compo}Assume that $M, N, K\in\mathscr{C} $ and fix a unit conflation $\delta_N\in\operatorname{{\mathsf{Ext}}}^n(N, \mathsf{\Omega}^nN)$. We define the composition
$$\circ: \operatorname{{\mathsf{Ext}}}^n(N, \mathsf{\Omega}^nK)/{\mathcal P}\times\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^nN)/{\mathcal P}\longrightarrow\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^nK)/{\mathcal P},$$ $(\bar{\operatorname{\boldsymbol{\beta}}}, \bar{\operatorname{\boldsymbol{\gamma}}})\longrightarrow\bar{\operatorname{\boldsymbol{\beta}}}\circ\bar{\operatorname{\boldsymbol{\gamma}}}$ as follows: \\ Assume that $\operatorname{\boldsymbol{\gamma}}=\delta f$ is an $\operatorname{\mathsf{RUF}}$ of $\operatorname{\boldsymbol{\gamma}}$, where $\delta\in\mathcal U^n(\mathsf{\Omega}^nN)$ and $f\in\operatorname{\mathsf{H}}$. Now we set $\bar{\operatorname{\boldsymbol{\beta}}}\circ\bar{\operatorname{\boldsymbol{\gamma}}}:=\overline{((\operatorname{\boldsymbol{\beta}} a_1)b^{-1}_1)f}$, where $\delta_N\stackrel{a_1}\longleftarrow\delta_1\stackrel{b_1}\longrightarrow\delta$
is a co-angled pair. \end{dfn}
The result below allows us to assume that the co-angled pair in the above definition, as the one obtained in Proposition \ref{pro100}(1). \begin{lem}\label{ds}With the notation above, assume that $\delta_N\stackrel{a}\longleftarrow\delta''\stackrel{b}\longrightarrow\delta$ is the co-angled pair which is obtained in Proposition \ref{pro100}(1). Then $((\operatorname{\boldsymbol{\beta}} a_1)b^{-1}_1)f=_{\mathcal P}((\operatorname{\boldsymbol{\beta}} a)b^{-1})f$. \end{lem} \begin{proof} Since $\delta_N\stackrel{a_1}\longleftarrow\delta_1\stackrel{b_1}\longrightarrow\delta$ is a co-angled pair, we have $\delta_1=\delta_Na_1=\delta b_1$. So by Proposition \ref{coin}, $\delta_1=\delta''h$, for some morphism $h$ in $\mathscr{C} $. Indeed, the argument given in the proof of Proposition \ref{coin}, gives rise to the following commutative diagram;
{\footnotesize\[\xymatrix{N~& N''\ar[l]_{a}\ar[r]^{b}& N'\\ & N_1\ar[u]^{h}\ar[ul]^{a_1}\ar[ur]_{b_1} ,& }\]}where $N', N''$ and $N_1$ stand for the right end terms of $\delta, \delta''$ and $\delta_1$, respectively. This, in particular, implies that {$((\operatorname{\boldsymbol{\beta}} a_1)b^{-1}_1)f=_{\mathcal P}((\operatorname{\boldsymbol{\beta}} ah)(bh)^{-1})f=_{\mathcal P}((((\operatorname{\boldsymbol{\beta}} a)h)h^{-1})b^{-1})f=_{\mathcal P}((\operatorname{\boldsymbol{\beta}} a)b^{-1})f$}, giving the desired result. \end{proof}
\begin{theorem}\label{welldef}The definition of $``\circ"$ is independent of the choice of an $\operatorname{\mathsf{RUF}}$ of $\operatorname{\boldsymbol{\gamma}}$. \end{theorem} \begin{proof}
Assume that $\operatorname{\boldsymbol{\gamma}}=\delta'f'$ is another $\operatorname{\mathsf{RUF}}$ of $\operatorname{\boldsymbol{\gamma}}$ and suppose that $\delta_N\stackrel{a_2}\longleftarrow\delta_2\stackrel{b_2}\longrightarrow\delta'$ is a co-angled pair. We shall prove that $((\operatorname{\boldsymbol{\beta}} a_1)b_1^{-1})f=_{\mathcal P}((\operatorname{\boldsymbol{\beta}} a_2)b_2^{-1})f'$. Take a co-angled pair $\delta\stackrel{a_3}\longleftarrow\delta_3\stackrel{b_3}\longrightarrow\delta'$. According to Proposition \ref{pro100}(1), there exist $a_4,b_4,c_4\in\mathsf{\Sigma}$ such that $\delta_1a_4=\delta_2b_4=\delta_3c_4$ and $a_4,b_4,c_4$ are co-induced by identity over $\mathsf{\Omega}^nN$. In particular, denoting the latter equalities by $\delta_4$, we will get the following diagram of co-angled pairs; {\footnotesize \[\xymatrix{&\delta_N& \\ \delta_1~\ar[d]_{b_1}\ar[ur]^{a_1}& \delta_4\ar[l]_{a_4}\ar[r]^{b_4}\ar[d]_{c_4}& \delta_2\ar[d]_{b_2}\ar[ul]_{a_2}\\ \delta~ & \delta_3\ar[l]_{a_3}\ar[r]^{b_3} & \delta'.}\]} {Since by Lemma \ref{ds}, the co-angled pair $\delta\stackrel{a_3}\longleftarrow\delta_3\stackrel{b_3}\longrightarrow\delta'$ can be considered as the one obtained in Proposition \ref{pro100}(1), } by virtue of Proposition \ref{coin}, there is a morphism $h$ such that $\operatorname{\boldsymbol{\gamma}}=\delta_3h$, $f=a_3h$ and $f'=b_3h$. In particular, one may have the following diagram; {\footnotesize\[\xymatrix{\delta~& \delta_3\ar[l]_{a_3}\ar[r]^{b_3}& \delta'\\ & \operatorname{\boldsymbol{\gamma}}\ar[u]^{h}\ar[ul]^{f}\ar[ur]_{f'} .& }\]} Thus we have the following equalities modulo $\mathcal P$; $$((\operatorname{\boldsymbol{\beta}} a_2)b_2^{-1})f'=_{\mathcal P}((\operatorname{\boldsymbol{\beta}} a_2)b_2^{-1})b_3h=_{\mathcal P}(((\operatorname{\boldsymbol{\beta}} a_2)b_2^{-1})b_3)h=_{\mathcal P}((((\operatorname{\boldsymbol{\beta}} a_2)b_2^{-1})b_3)a_3^{-1})f$$ $$=_{\mathcal P}((((\operatorname{\boldsymbol{\beta}} a_2)b_4){c_4}^{-1})a_3^{-1})f=_{\mathcal P} (((\operatorname{\boldsymbol{\beta}}(a_2b_4){a_4}^{-1})b_1^{-1})f=_{\mathcal P}((\operatorname{\boldsymbol{\beta}} a_1)b_1^{-1})f.$$ Here the first and third equalities hold, because the latter diagram is commutative, and the second one is clear. The validity of the forth and fifth equalities come from the fact that $b_3c_4=b_2b_4$ and $b_1a_4=a_3c_4$, respectively. Finally, the last one holds true, because $a_2b_4=a_1a_4$. Thus $((\operatorname{\boldsymbol{\beta}} a_1)b_1^{-1})f=_{\mathcal P}((\operatorname{\boldsymbol{\beta}} a_2)b_2^{-1})f'$, as needed. \end{proof}
Let $M, N$ be two objects of $\mathscr{C} $. For given two objects $\bar{\operatorname{\boldsymbol{\gamma}}}, \bar{\operatorname{\boldsymbol{\gamma}}'}\in\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^nN)/{\mathcal P }$, we define the addition $\bar{\operatorname{\boldsymbol{\gamma}}}+\bar{\operatorname{\boldsymbol{\gamma}}'}:=\overline{\operatorname{\boldsymbol{\gamma}}+\operatorname{\boldsymbol{\gamma}}'}$, where $\operatorname{\boldsymbol{\gamma}}+\operatorname{\boldsymbol{\gamma}}'$ is the usual Baer sum operation. Evidently, this definition is well-defined.
\begin{prop}\label{srt1}The operator $``\circ"$ is distributive over $``+"$ on both sides. \end{prop} \begin{proof} Assume that $ K, M,N\in \mathscr{C} $ and fix unit conflations $\delta_N\in\operatorname{{\mathsf{Ext}}}^n(N, \mathsf{\Omega}^nN)$ and $\delta_K\in\operatorname{{\mathsf{Ext}}}^n(K, \mathsf{\Omega}^nK)$. Assume that $\bar{\operatorname{\boldsymbol{\alpha}}}\in\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^n N)/{\mathcal P}$, $\bar{\operatorname{\boldsymbol{\gamma}}}\in \operatorname{{\mathsf{Ext}}}^n(K, \mathsf{\Omega}^n L)/{\mathcal P}$ and $\bar{\operatorname{\boldsymbol{\beta}}}, \bar{\operatorname{\boldsymbol{\beta}}'}\in \operatorname{{\mathsf{Ext}}}^n(N, \mathsf{\Omega}^n K)/{\mathcal P}$. First, we show that $(\bar{\operatorname{\boldsymbol{\beta}}}+\bar{\operatorname{\boldsymbol{\beta}}'})\circ\bar{\operatorname{\boldsymbol{\alpha}}}=\bar{\operatorname{\boldsymbol{\beta}}}\circ\bar{\operatorname{\boldsymbol{\alpha}}}+ \bar{\operatorname{\boldsymbol{\beta}}'}\circ \bar{\operatorname{\boldsymbol{\alpha}}}$. In this direction, assume that $\operatorname{\boldsymbol{\alpha}}=\delta f$ is an $\operatorname{\mathsf{RUF}}$ of $\operatorname{\boldsymbol{\alpha}}$ and suppose that $[\delta_Na, \delta b]$ is a co-angled pair. So, $(\bar{\operatorname{\boldsymbol{\beta}}}+\bar{\operatorname{\boldsymbol{\beta}}'})\circ\bar{\operatorname{\boldsymbol{\alpha}}}=\overline{(((\operatorname{\boldsymbol{\beta}}+\operatorname{\boldsymbol{\beta}}')a)b^{-1})f}=\overline{((\operatorname{\boldsymbol{\beta}} a)b^{-1})f}+\overline{((\operatorname{\boldsymbol{\beta}}' a)b^{-1})f}=\bar{\operatorname{\boldsymbol{\beta}}}\circ\bar{\operatorname{\boldsymbol{\alpha}}}+ \bar{\operatorname{\boldsymbol{\beta}}'}\circ \bar{\operatorname{\boldsymbol{\alpha}}}$, where the second equality follows from the fact that pull-backs distributes over the Baer sum; see \cite[Chapter VII, Lemma 3.2]{mit}. Next we would like to show that $\bar{\operatorname{\boldsymbol{\gamma}} }\circ(\bar{\operatorname{\boldsymbol{\beta}}}+\bar{ \operatorname{\boldsymbol{\beta}}'})=\bar{\operatorname{\boldsymbol{\gamma}}}\circ\bar{\operatorname{\boldsymbol{\beta}}}+\bar{\operatorname{\boldsymbol{\gamma}}}\circ\bar{\operatorname{\boldsymbol{\beta}}'}$. Assume that $\operatorname{\boldsymbol{\beta}}=\delta f$ and $\operatorname{\boldsymbol{\beta}}'=\delta' f'$ are $\operatorname{\mathsf{RUF}}$s of $\operatorname{\boldsymbol{\beta}}$ and $\operatorname{\boldsymbol{\beta}}'$, respectively. Take an $\operatorname{\mathsf{RUF}}$, $\nabla(\delta\oplus\delta')=\delta''g$, where $\nabla:\mathsf{\Omega}^nK\oplus\mathsf{\Omega}^nK\stackrel{[1~~1]}\longrightarrow\mathsf{\Omega}^nK$. Namely, we have the following commutative diagram; \[\xymatrix{\nabla(\delta\oplus\delta'):\mathsf{\Omega}^n K~\ar[r]\ar@{=}[d]& T_{n-1}\ar[r]\ar[d] &\cdots\ar[r] &T_0\ar[r]\ar[d] & K'\oplus K''\ar[d]_{g}\\ \delta'':\mathsf{\Omega}^nK ~\ar[r] & {Q}_{n-1}\ar[r]& \cdots\ar[r]& Q_0\ar[r]& K_1,}\] where each $Q_i$ is $n$-projective. Now setting $g:=[g'~~g'']$,
one may easily deduce that $\operatorname{\boldsymbol{\beta}}=\delta''(g'f)$ and $\operatorname{\boldsymbol{\beta}}'=\delta''(g''f')$ are also $\operatorname{\mathsf{RUF}}$s of $\operatorname{\boldsymbol{\beta}}$ and $\operatorname{\boldsymbol{\beta}}'$, respectively. We claim that $\operatorname{\boldsymbol{\beta}}+\operatorname{\boldsymbol{\beta}}'=\delta''h$, for some morphism $h$ in $\mathscr{C} $. Since there is a morphism of conflations $\operatorname{\boldsymbol{\beta}}\oplus\operatorname{\boldsymbol{\beta}}'\longrightarrow\nabla(\delta\oplus\delta')$, by the universal property of push-out diagram, there exists a unique morphism $\nabla(\operatorname{\boldsymbol{\beta}}\oplus\operatorname{\boldsymbol{\beta}}')\longrightarrow\nabla(\delta\oplus\delta')$. Indeed, we have $\nabla(\operatorname{\boldsymbol{\beta}}\oplus\operatorname{\boldsymbol{\beta}}')=\nabla(\delta\oplus\delta')(f\oplus f')$. As $\operatorname{\boldsymbol{\beta}}+\operatorname{\boldsymbol{\beta}}'=\nabla(\operatorname{\boldsymbol{\beta}}\oplus\operatorname{\boldsymbol{\beta}}')\Delta$, where $\Delta:N\stackrel{{{\tiny {\left[\begin{array}{ll} 1 \\ 1 \end{array} \right]}}}}\longrightarrow N\oplus N$, by lettting $h=g(f\oplus f')\Delta$, we have $\operatorname{\boldsymbol{\beta}}+\operatorname{\boldsymbol{\beta}}'=\delta''h$, as claimed. Therefore, considering the co-angled pair $[\delta_Ka_1, \delta''b_1]$, we have $\bar{\operatorname{\boldsymbol{\gamma}}}\circ(\bar{\operatorname{\boldsymbol{\beta}}}+\bar{ \operatorname{\boldsymbol{\beta}}'})=\overline{((\operatorname{\boldsymbol{\gamma}} a_1)b_1^{-1})h}$. Now since $h=[g'~~g''](f\oplus f')\Delta=g'f+g''f'$, we infer that $\bar{\operatorname{\boldsymbol{\gamma}}}\circ(\bar{\operatorname{\boldsymbol{\beta}}}+\bar{ \operatorname{\boldsymbol{\beta}}'})=\overline{((\operatorname{\boldsymbol{\gamma}} a_1)b_1^{-1})g'f}+\overline{((\operatorname{\boldsymbol{\gamma}} a_1)b_1^{-1})g''f'}=\bar{\operatorname{\boldsymbol{\gamma}}}\circ\bar{\operatorname{\boldsymbol{\beta}}}+\bar{\operatorname{\boldsymbol{\gamma}}}\circ\bar{\operatorname{\boldsymbol{\beta}}'}$. So the proof is completed. \end{proof}
\begin{lem}\label{corwell} Let ${\operatorname{\boldsymbol{\gamma}}}\in\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^nN)$ and ${\operatorname{\boldsymbol{\beta}}}\in\operatorname{{\mathsf{Ext}}}^n(N, \mathsf{\Omega}^nK)$. If $\operatorname{\boldsymbol{\gamma}}$ or $\operatorname{\boldsymbol{\beta}}$ is a $\mathcal P$-conflation, then $\bar{\operatorname{\boldsymbol{\beta}}}\circ\bar{\operatorname{\boldsymbol{\gamma}}}=0$. In particular, if $\operatorname{\boldsymbol{\beta}}'$ and $\operatorname{\boldsymbol{\gamma}}'$ are two conflations such that $\bar{\operatorname{\boldsymbol{\beta}}}=\bar{\operatorname{\boldsymbol{\beta}}'}$ and $\bar{\operatorname{\boldsymbol{\gamma}}}=\bar{\operatorname{\boldsymbol{\gamma}}'}$, then $\bar{\operatorname{\boldsymbol{\beta}}}\circ\bar{\operatorname{\boldsymbol{\gamma}}}=\bar{\operatorname{\boldsymbol{\beta}}'}\circ\bar{\operatorname{\boldsymbol{\gamma}}'}$. \end{lem} \begin{proof} First assume that $ \operatorname{\boldsymbol{\beta}}$ is a $ \mathcal P $-conflation. Assume that $ \operatorname{\boldsymbol{\gamma}}= \delta f $ is an $\operatorname{\mathsf{RUF}}$ of $ \operatorname{\boldsymbol{\gamma}} $ and suppose that $[\delta_N a , \delta b]$ is a co-angled pair, where $\delta_N\in\operatorname{{\mathsf{Ext}}}^n(N, \mathsf{\Omega}^nN)$ is a unit conflation. As $\operatorname{\boldsymbol{\beta}}$ is a $\mathcal P$-conflation, applying Proposition \ref{nul} ensures that $(\operatorname{\boldsymbol{\beta}} a)b^{-1}$ is a $\mathcal P$-conflation, and so, the same will be true for $((\operatorname{\boldsymbol{\beta}} a)b^{-1})f$, meaning that $\bar{\operatorname{\boldsymbol{\beta}}}\circ\bar{\operatorname{\boldsymbol{\gamma}}}=\bar{0}$.\\ Next assume that $ \operatorname{\boldsymbol{\gamma}} $ is a $ \mathcal P $-conflation. So, in view of Lemma \ref{ruf}, there is an $\operatorname{\mathsf{RUF}}$ $\operatorname{\boldsymbol{\gamma}}=\delta f$ of $\operatorname{\boldsymbol{\gamma}}$ such that $f$ factors through an $n$-projective object.
Consider a co-angled pair $[\delta_N a, \delta b]$. According to the proof of Theorem \ref{welldef}, $\bar{\operatorname{\boldsymbol{\beta}}}\circ\bar{\operatorname{\boldsymbol{\gamma}}}=\overline{((\operatorname{\boldsymbol{\beta}} a) b^{-1})f}$. Now since $f$ factors through an $n$-projective object, it is evident that $((\operatorname{\boldsymbol{\beta}} a) b^{-1})f$ is a $\mathcal P$-conflation, and then, $\bar{\operatorname{\boldsymbol{\beta}}}\circ\bar{\operatorname{\boldsymbol{\gamma}}}=\bar{0}$. The second assertion follows by combining the first assertion with Proposition \ref{srt1}. Thus the proof is finished
\end{proof}
\begin{rem}\label{bimod}Let $X, Y$ be two arbitrary objects in $\mathscr{C} $ and $\operatorname{\boldsymbol{\beta}}, \operatorname{\boldsymbol{\beta}}'\in\operatorname{{\mathsf{Ext}}}^n(X, Y)$. Assume that there is a morphism of conflations $\operatorname{\boldsymbol{\beta}}\rightarrow\operatorname{\boldsymbol{\beta}}'$ with fixed ends. Then any $\operatorname{\mathsf{RUF}}$ of $\operatorname{\boldsymbol{\beta}}'$, will be an $\operatorname{\mathsf{RUF}}$ of $\operatorname{\boldsymbol{\beta}}$, as well. So, for any conflation $\operatorname{\boldsymbol{\gamma}}$, we will have $\bar{\operatorname{\boldsymbol{\gamma}}}\circ\bar{\operatorname{\boldsymbol{\beta}}}=\bar{\operatorname{\boldsymbol{\gamma}}}\circ\bar{\operatorname{\boldsymbol{\beta}}'}$, where the composition makes sense. On the other hand, since for a given morphism $f$, the equality $\operatorname{\boldsymbol{\beta}} f=\operatorname{\boldsymbol{\beta}}'f$ holds, one may infer that $\bar{\operatorname{\boldsymbol{\beta}}}\circ\bar{\operatorname{\boldsymbol{\alpha}}}=\bar{\operatorname{\boldsymbol{\beta}}'}\circ\bar{\operatorname{\boldsymbol{\alpha}}}$, for all conflations $\operatorname{\boldsymbol{\alpha}}\in\operatorname{{\mathsf{Ext}}}^n(Z, X)$.
These facts, in conjunction with \cite[Proposition 3.1]{mit} guarantees that the operator $``\circ^{,,}$ is compatible with the equivalence classes in $\operatorname{{\mathsf{Ext}}}^n(X, Y)$. On the other hand, as we have mentioned in \ref{pp}, $\mathcal P$ is an $\operatorname{\mathsf{H}}$-bisubmodule of $\operatorname{{\mathsf{Ext}}}^n$. Hence $\operatorname{{\mathsf{Ext}}}^n/{\mathcal P}$ will be an $\operatorname{\mathsf{H}}$-bimodule. \end{rem}
Taking all of the previous results together, leads us to deduce that the composition operator $``\circ"$, introduced in Definition \ref{compo}, is well-defined. Indeed we have the next result. \begin{theorem}\label{circ}The composition $``\circ"$ is well-defined \end{theorem}
\begin{prop}\label{ass}The composition operator $``\circ"$ is associative. \end{prop} \begin{proof} Assume that $M, N, K, L\in\mathscr{C} $. Fix unit conflations $\delta_N\in\operatorname{{\mathsf{Ext}}}^n(N, \mathsf{\Omega}^nN)$ and $\delta_K\in\operatorname{{\mathsf{Ext}}}^n(K, \mathsf{\Omega}^nK)$ and let $\bar{\operatorname{\boldsymbol{\alpha}}}\in\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^nN)/{\mathcal P}$, $\bar{\operatorname{\boldsymbol{\beta}}}\in\operatorname{{\mathsf{Ext}}}^n(N, \mathsf{\Omega}^n K)/{\mathcal P}$ and $\bar{\operatorname{\boldsymbol{\gamma}}}\in\operatorname{{\mathsf{Ext}}}^n(K, \mathsf{\Omega}^nL)/{\mathcal P}$. We would like to show that $(\bar{\operatorname{\boldsymbol{\gamma}}}\circ\bar{\operatorname{\boldsymbol{\beta}}})\circ\bar{\operatorname{\boldsymbol{\alpha}}}=\bar{\operatorname{\boldsymbol{\gamma}}}\circ(\bar{\operatorname{\boldsymbol{\beta}}}\circ\bar{\operatorname{\boldsymbol{\alpha}}})$. Let us first compute $\bar{\operatorname{\boldsymbol{\gamma}}}\circ(\bar{\operatorname{\boldsymbol{\beta}}}\circ\bar{\operatorname{\boldsymbol{\alpha}}})$. In this direction, assume that $\operatorname{\boldsymbol{\alpha}}=\delta f$ is an $\operatorname{\mathsf{RUF}}$ of $\operatorname{\boldsymbol{\alpha}}$ and suppose that $[\delta_Na_1, \delta b_1]$ is a co-angled pair. So by definition, $\bar{\operatorname{\boldsymbol{\beta}}}\circ\bar{\operatorname{\boldsymbol{\alpha}}}=\overline{((\operatorname{\boldsymbol{\beta}} a_1)b_1^{-1})f}$. Set, for the simplicity, $\operatorname{\boldsymbol{\beta}}':=(\operatorname{\boldsymbol{\beta}} a_1)b_1^{-1}$ and take an $\operatorname{\mathsf{RUF}}$, $\operatorname{\boldsymbol{\beta}}'=\delta' g'$ of $\operatorname{\boldsymbol{\beta}}'$, and then, $\operatorname{\boldsymbol{\beta}}'f=\delta'(g'f)$ is an $\operatorname{\mathsf{RUF}}$ of $\operatorname{\boldsymbol{\beta}}'f$. Now, assuming $[\delta_Ka_2, \delta'b_2]$ is a co-angled pair, we have $\bar{\operatorname{\boldsymbol{\gamma}}}\circ(\bar{\operatorname{\boldsymbol{\beta}}}\circ\bar{\operatorname{\boldsymbol{\alpha}}})=\overline{((\operatorname{\boldsymbol{\gamma}} a_2)b_2^{-1})g'f}$. Next we calculate $(\bar{\operatorname{\boldsymbol{\gamma}}}\circ\bar{\operatorname{\boldsymbol{\beta}}})\circ\bar{\operatorname{\boldsymbol{\alpha}}}$. Suppose that $\operatorname{\boldsymbol{\beta}}=\delta''g$ is an $\operatorname{\mathsf{RUF}}$ of $\operatorname{\boldsymbol{\beta}}$ and $[\delta_Ka_3, \delta''b_3]$ is a co-angled pair, and so, $\bar{\operatorname{\boldsymbol{\gamma}}}\circ\bar{\operatorname{\boldsymbol{\beta}}}=\overline{((\operatorname{\boldsymbol{\gamma}} a_3)b_3^{-1})g}$. By our assumption, $\operatorname{\boldsymbol{\beta}} a_1=_{\mathcal P}\operatorname{\boldsymbol{\beta}}'b_1$. {So applying Lemma \ref{corwell}, we have $0=\bar{\operatorname{\boldsymbol{\gamma}}}\circ\overline{(\operatorname{\boldsymbol{\beta}} a_1-\operatorname{\boldsymbol{\beta}}'b_1)}=\bar{\operatorname{\boldsymbol{\gamma}}}\circ\overline{\operatorname{\boldsymbol{\beta}} a_1}-\bar{\operatorname{\boldsymbol{\gamma}}}\circ\overline{\operatorname{\boldsymbol{\beta}}'b_1}$. As $\operatorname{\boldsymbol{\beta}}=\delta''g$, we have $\operatorname{\boldsymbol{\beta}} a_1=\delta''(ga_1)$. Thus, considering the co-angled pair $[\delta_Ka_3, \delta''b_3]$, one has $\bar{\operatorname{\boldsymbol{\gamma}}}\circ\overline{\operatorname{\boldsymbol{\beta}} a_1}=\overline{((\operatorname{\boldsymbol{\gamma}} a_3)b_3^{-1})g a_1}$. Similarly, since $\operatorname{\boldsymbol{\beta}}'b_1=\delta'(g'b_1)$, we obtain that $\bar{\operatorname{\boldsymbol{\gamma}}}\circ\overline{\operatorname{\boldsymbol{\beta}}' b_1}=\overline{((\operatorname{\boldsymbol{\gamma}} a_2)b_2^{-1})g' b_1}$.}
Consequently, $((\operatorname{\boldsymbol{\gamma}} a_3)b_3^{-1})g a_1=_{\mathcal P}{((\operatorname{\boldsymbol{\gamma}} a_2)b_2^{-1})g'b_1}$, meaning that $(\bar{\operatorname{\boldsymbol{\gamma}}}\circ\bar{\operatorname{\boldsymbol{\beta}}})a_1=\overline{(((\operatorname{\boldsymbol{\gamma}} a_2)b_2^{-1})g')b_1}$. Thus, by considering the co-angled pair $[\delta_Na_1, \delta b_1]$, one may deduce that $(\bar{\operatorname{\boldsymbol{\gamma}}}\circ\bar{\operatorname{\boldsymbol{\beta}}})\circ\bar{\operatorname{\boldsymbol{\alpha}}}=\overline{(((\operatorname{\boldsymbol{\gamma}} a_2)b_2^{-1})g')f}$. Therefore, $(\bar{\operatorname{\boldsymbol{\gamma}}}\circ\bar{\operatorname{\boldsymbol{\beta}}})\circ\bar{\operatorname{\boldsymbol{\alpha}}}=\bar{\operatorname{\boldsymbol{\gamma}}}\circ(\bar{\operatorname{\boldsymbol{\beta}}}\circ\bar{\operatorname{\boldsymbol{\alpha}}})$, as needed. \end{proof}
\begin{cor}\label{cor100}Let $M\in\mathscr{C} $ and fix a unit conflation $\delta_M\in\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^nM)$. Then $(\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^nM)/{\mathcal P}, +, \circ)$ has a ring structure with the identity element $\bar{\delta}_M$. Moreover, for any unit conflation $\delta\in\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^nM)$, $\bar{\delta}$ is invertible. \end{cor} \begin{proof}According to Theorem \ref{circ} and Propositions \ref{ass} and \ref{srt1}, we only need to show that $\bar{\delta}_M$ is the unit element of $\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^nM)/{\mathcal P}$. To see this, take $\bar{\operatorname{\boldsymbol{\gamma}}}\in\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^n M)/{\mathcal P}$. Since $\delta_M=\delta_M 1_M$ is an $\operatorname{\mathsf{RUF}}$ of $\delta_M$, one has $\bar{\operatorname{\boldsymbol{\gamma}}}\circ\bar{\delta}_M =\bar{\operatorname{\boldsymbol{\gamma}}}$. Now suppose that $\operatorname{\boldsymbol{\gamma}}=\delta f $ is an $\operatorname{\mathsf{RUF}}$ of $ \operatorname{\boldsymbol{\gamma}} $ and $[\delta_Ma, \delta b]$ is a co-angled pair. Consequently, $\bar{\delta}_M\circ\bar{\operatorname{\boldsymbol{\gamma}}}=\overline{((\delta_Ma)b^{-1})f}=\overline{\delta f}= \bar{ \operatorname{\boldsymbol{\gamma}}} $, giving the first assertion.
For the second assertion, assume that $\delta\in\mathcal U_n(M)\cap\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^nM)$. Consider a co-angled pair $\delta_M\stackrel{a}\longleftarrow\delta'\stackrel{b}\longrightarrow\delta$. Take $\operatorname{\boldsymbol{\alpha}}\in\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^nM)$ such that $(\operatorname{\boldsymbol{\alpha}} a)b^{-1}=_{\mathcal P}\delta_M$, i.e., $\operatorname{\boldsymbol{\alpha}} a=\delta_Mb$. According to Lemma \ref{unit}, $\operatorname{\boldsymbol{\alpha}}$ is a unit conflation. As $\operatorname{\boldsymbol{\alpha}}=\operatorname{\boldsymbol{\alpha}} 1_M$ is an $\operatorname{\mathsf{RUF}}$ of $\operatorname{\boldsymbol{\alpha}}$, $\bar{\delta}\circ\bar{\operatorname{\boldsymbol{\alpha}}}=\overline{((\delta b)a^{-1})1}_M=\bar{\delta}_M$. Similarly, since $\delta=\delta 1_M$ is an $\operatorname{\mathsf{RUF}}$ of $\delta$, by the definition, we have $\bar{\operatorname{\boldsymbol{\alpha}}}\circ\bar{\delta}=\overline{((\operatorname{\boldsymbol{\alpha}} a)b^{-1})1}_M=\bar{\delta}_M$. Hence $\bar{\operatorname{\boldsymbol{\alpha}}}$ is the inverse of $\bar{\delta}$, and so, the proof is finished. \end{proof}
We close this section with the following interesting result.
\begin{prop}\label{ringhom} Let $M\in\mathscr{C} $ and fix a unit conflation $\delta_M\in\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^nM)$. Then there exists a ring homomorphism $\varphi :\operatorname{\mathsf{Hom}}_{ \mathscr{C} }(M,M) \rightarrow \operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^n M)/{\mathcal P} $ with $ \varphi(f)=\overline{ \delta_Mf}$ such that for any quasi-invertible morphism $f$, $\varphi(f)$ is an invertible element of $\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^n M)/{\mathcal P}$, and $\varphi(f)=0$, if $f$ is an $n$-$\operatorname{{\mathsf{Ext}}}$-phantom morphism. \end{prop} \begin{proof} Assume that $f, g\in\operatorname{\mathsf{Hom}}_{\mathscr{C} }(M, M).$ According to \cite[Chapter VII, Lemma 3.2]{mit}, $ \delta_M (f+g)= \delta_Mf + \delta_Mg$, meaning that $ \varphi$ is a morphism of abelian groups. Moreover, we have $\varphi(g)\circ\varphi(f)=\overline{\delta_Mg}\circ\overline{\delta_Mf}=\overline{(\delta_Mg)f}=\overline{\delta_M(gf)}=\varphi(gf)$. Note that the second equality follows from the fact that, one may choose $[\delta_M1_M, \delta_M 1_M]$ as a co-angled pair. Finally, $\varphi(1_M)=\overline{\delta_M1}_M=\bar{\delta}_M$, and then, $\varphi$ is a ring homomorphism. Next assume that $f\in\mathsf{\Sigma}$. Since $\delta_M\in\mathcal U_n(M)$, by Lemma \ref{unit}, $\delta_Mf \in \mathcal U_n(M)$. Indeed, $\delta_Mf\in\mathcal U_n(M)\cap\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^nM)$. So Corollary \ref{cor100} implies that $\varphi(f)$ is invertible. Moreover, the last assertion holds true, because of Proposition \ref{three}. Hence the proof is finished. \end{proof}
\section{An equivalence relation on $\operatorname{{\mathsf{Ext}}}^n/{\mathcal P}$} Let $M$ and $N$ be objects of $\mathscr{C} $. This section is devoted to define an equivalence relation on the class $\bigcup_{ \delta_N}(\operatorname{{\mathsf{Ext}}}^n( M, \mathsf{\Omega}^n N)/{\mathcal P}, \delta_N)$. We will see that this relation is compatible with the composition operator $``\circ"$, as well as the operator $``+"$.
\begin{dfn}\label{rel} Assume that $M,N\in\mathscr{C} $ and $(\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_N), (\bar{\operatorname{\boldsymbol{\gamma}}'}, \delta'_N)$ are arbitrary objects of $\bigcup_{ \delta_N\in\mathcal U_n(N)}(\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^n N)/{\mathcal P}, \delta_N)$. We write $(\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_N)\thicksim(\bar{\operatorname{\boldsymbol{\gamma}}'}, \delta'_N)$, if $a\operatorname{\boldsymbol{\gamma}}-b\operatorname{\boldsymbol{\gamma}}'$ is a $\mathcal P$-conflation, where $[a\delta_N, b\delta'_N]$ is an angled pair, which exists by Proposition \ref{pro100}. \end{dfn}
In the sequel, we show that $``\thicksim"$ is an equivalence relation.
\begin{theorem} $``\thicksim"$ is an equivalence relation. \end{theorem} \begin{proof} Since the reflexivity and symmetry hold trivially, we only need to show the transitivity. To this end, assume that $(\bar{\operatorname{\boldsymbol{\gamma}}}, \delta)\sim (\bar{\operatorname{\boldsymbol{\gamma}}'}, \delta')$ and $(\bar{\operatorname{\boldsymbol{\gamma}}'}, \delta')\sim (\bar{\operatorname{\boldsymbol{\gamma}}''}, \delta'')$. We shall prove that $(\bar{\operatorname{\boldsymbol{\gamma}}}, \delta)\thicksim (\bar{\operatorname{\boldsymbol{\gamma}}''}, \delta'')$. By our assumption, there are angled pairs $\delta'\stackrel{a_1}\longrightarrow\delta_1\stackrel{b_1}\longleftarrow\delta$ and $\delta'\stackrel{a_2}\longrightarrow\delta_2\stackrel{b_2}\longleftarrow\delta''$ such that $a_1\operatorname{\boldsymbol{\gamma}}'-b_1\operatorname{\boldsymbol{\gamma}}$ and $a_2\operatorname{\boldsymbol{\gamma}}'-b_2\operatorname{\boldsymbol{\gamma}}''$ are $\mathcal P$-conflations. Now considering an angled pair $\delta_1\stackrel{a_3}\longrightarrow\delta_3\stackrel{b_3}\longleftarrow\delta_2$, which exists by part (2) of Proposition \ref{pro100}, we will get the following commutative diagram of angled pairs; \[\xymatrix{&&\delta'\ar[ld]_{a_1}\ar[rd]^{a_2}&& \\ \delta\ar[r]^{b_1}&\delta_1~\ar[r]^{a_3}& \delta_3& \delta_2\ar[l]_{b_3}&\delta''\ar[l]_{b_2} .}\] Since $(a_3a_1-b_3a_2)\delta'=0$, dualizing the argument given in the proof of Proposition \ref{three}, would imply that $(a_3a_1-b_3a_2)\operatorname{\boldsymbol{\gamma}}'$ is a $\mathcal P$-conflation. Hence, in view of our hypothesis, we may deduce that $(a_3b_1)\operatorname{\boldsymbol{\gamma}}-(b_3b_2)\operatorname{\boldsymbol{\gamma}}''$ is a $\mathcal P$-conflation. Since the class $\mathsf{\Sigma}$ is closed under composition, $[(a_3b_1)\delta, (b_3b_2)\delta'']$ will be also an agled pair. Consequently, $(\bar{\operatorname{\boldsymbol{\gamma}}}, \delta)\thicksim (\bar{\operatorname{\boldsymbol{\gamma}}''}, \delta'')$, as desired.
\end{proof}
\begin{prop}\label{ind}Let $M, N$ be objects of $\mathscr{C} $ and fix a unit conflation $\delta_N\in\operatorname{{\mathsf{Ext}}}^n(N, \mathsf{\Omega}^nN)$. Then for any $(\bar{\operatorname{\boldsymbol{\gamma}}'}, \delta'_{N})\in(\operatorname{{\mathsf{Ext}}}^n( M, {\mathsf{\Omega}'}^nN)/{\mathcal P}, \delta'_N)$, there is a unique object $(\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_N)\in(\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^nN)/{\mathcal P}, \delta_N)$ which is equivalent to $(\bar{\operatorname{\boldsymbol{\gamma}}'}, \delta'_N)$. \end{prop} \begin{proof} Take an angled pair $\delta_N\stackrel{a}\longrightarrow\delta''\stackrel{b}\longleftarrow\delta'_N$. Since $b\operatorname{\boldsymbol{\gamma}}'\in\operatorname{{\mathsf{Ext}}}^n(M, {\mathsf{\Omega}''}^nN)$ and $(\mathsf{\Omega}^nN\stackrel{a}\rightarrow{\mathsf{\Omega}''}^nN)\in\mathsf{\Sigma}$, in view of Corollary \ref{div}(2), there exists $\operatorname{\boldsymbol{\gamma}}\in\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^nN)$ such that $a\operatorname{\boldsymbol{\gamma}}-b\operatorname{\boldsymbol{\gamma}}'$ is a $\mathcal P$-conflation. Moreover, uniqueness of $(\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_N)$ follows from Proposition \ref{nul}. So the proof is finished. \end{proof}
\begin{rem} Assume that $M,N\in\mathscr{C} $ and fix a unit conflation $\delta_N\in\operatorname{{\mathsf{Ext}}}^n(N, \mathsf{\Omega}^nN)$. Then it follows from Proposition \ref{ind} that $ (\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^n N)/{\mathcal P}, \delta_N)$ is equal to the set of all equivalence classes of $\bigcup_{{\delta'}_{N}}(\operatorname{{\mathsf{Ext}}}^n(M, {\mathsf{\Omega}'}^n N)/{\mathcal P}, \delta'_N)$ modulo the equivalence relation $``\thicksim"$. \end{rem}
The following easy observation is needed in the next proposition. \begin{lem}\label{pushco}Let $\delta, \delta'\in\mathcal U^n(\mathsf{\Omega}^nM)$ and $\delta\stackrel{a}\longleftarrow\delta''\stackrel{a'}\longrightarrow\delta'$ be a co-angled pair. Let $b:\mathsf{\Omega}^nM\rightarrow{\mathsf{\Omega}'}^nM$ be a morphism in $\mathsf{\Sigma}$. Then $b\delta\stackrel{a}\longleftarrow b\delta''\stackrel{a'}\longrightarrow b\delta'$ is also a co-angled pair. \end{lem} \begin{proof}Since $b\in\mathsf{\Sigma}$, by Lemma \ref{unit}, the push-out of any unit conflation along $b$ is also a unit conflation. By the hypothesis, $\delta a=\delta''=\delta'a'$, and so, applying Remark \ref{pp1}, we obtain the co-angled pair $b\delta\stackrel{a}\longleftarrow b\delta''\stackrel{a'}\longrightarrow b\delta'$. So we are done. \end{proof}
\begin{prop}\label{comequ}The equivalence relation $``\thicksim"$ is compatible with the composition $``\circ"$. \end{prop} \begin{proof}Let $(\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_M)\in(\operatorname{{\mathsf{Ext}}}^n(K, \mathsf{\Omega}^nM)/{\mathcal P}, \delta_M)$ and $(\bar{\operatorname{\boldsymbol{\gamma}}'}, \delta'_M)\in(\operatorname{{\mathsf{Ext}}}^n(K, {\mathsf{\Omega}'}^nM)/{\mathcal P}, \delta'_M)$ such that $(\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_M)\thicksim(\bar{\operatorname{\boldsymbol{\gamma}}'}, \delta'_M)$. Suppose that $(\bar{\operatorname{\boldsymbol{\beta}}}, \delta_N)\in(\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^nN)/{\mathcal P}, \delta_N)$. We must show that $(\bar{\operatorname{\boldsymbol{\beta}}}\circ\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_N)\thicksim(\bar{\operatorname{\boldsymbol{\beta}}}\circ\bar{\operatorname{\boldsymbol{\gamma}}'}, \delta_N)$. By our hypothesis, there is an angled pair $\delta_M\stackrel{a}\longrightarrow\delta''_M\stackrel{b}\longleftarrow\delta'_M$ such that $a\operatorname{\boldsymbol{\gamma}}-b\operatorname{\boldsymbol{\gamma}}'$ is a $\mathcal P$-conflation. Set $\operatorname{\boldsymbol{\gamma}}'':=a\operatorname{\boldsymbol{\gamma}}$. So $(\bar{\operatorname{\boldsymbol{\gamma}}''}, \delta''_M)\in(\operatorname{{\mathsf{Ext}}}^n(K, {\mathsf{\Omega}''}^nM)/{\mathcal P}, \delta''_M)$. Hence in order to obtain the result, it suffices to show that $(\bar{\operatorname{\boldsymbol{\beta}}}\circ\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_N)\thicksim(\bar{\operatorname{\boldsymbol{\beta}}}\circ\bar{\operatorname{\boldsymbol{\gamma}}''}, \delta_N)$. To do this, assume that $\operatorname{\boldsymbol{\gamma}}=\delta_1f$ is an $\operatorname{\mathsf{RUF}}$ of $\operatorname{\boldsymbol{\gamma}}$. Take a co-angled pair $[\delta_M a_1, \delta_1 b_1]$. As $a\in\mathsf{\Sigma}$, by Lemma \ref{pushco}, $[(a\delta_M) a_1, (a\delta_1) b_1]$ is also a co-angled pair. Since $\operatorname{\boldsymbol{\gamma}}=\delta_1f$, one has $\operatorname{\boldsymbol{\gamma}}''=a\operatorname{\boldsymbol{\gamma}}=a(\delta_1f)=(a\delta_1)f$, meaning that $\operatorname{\boldsymbol{\gamma}}''=(a\delta_1)f$ is an $\operatorname{\mathsf{RUF}}$ of $\operatorname{\boldsymbol{\gamma}}''$. Consequently, $\bar{\operatorname{\boldsymbol{\beta}}}\circ\bar{\operatorname{\boldsymbol{\gamma}}}=\overline{((\operatorname{\boldsymbol{\beta}} a_1){b_1}^{-1})f}=\bar{\operatorname{\boldsymbol{\beta}}}\circ\bar{\operatorname{\boldsymbol{\gamma}}''}$, and so, $(\bar{\operatorname{\boldsymbol{\beta}}}\circ\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_N)\thicksim(\bar{\operatorname{\boldsymbol{\beta}}}\circ\bar{\operatorname{\boldsymbol{\gamma}}''}, \delta_N)$, as desired.
Next consider $(\bar{\operatorname{\boldsymbol{\beta}}'}, \delta'_N)\in(\operatorname{{\mathsf{Ext}}}^n(M, {\mathsf{\Omega}'}^nN)/{\mathcal P}, \delta'_N)$ such that $(\bar{\operatorname{\boldsymbol{\beta}}}, \delta_N)\thicksim(\bar{\operatorname{\boldsymbol{\beta}}'}, \delta'_N)$. We would like to show that $(\bar{\operatorname{\boldsymbol{\beta}}}\circ\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_N)\thicksim(\bar{\operatorname{\boldsymbol{\beta}}'}\circ\bar{\operatorname{\boldsymbol{\gamma}}}, \delta'_N)$. By the hypothesis, there is an angled pair $[a\delta_N, a'\delta'_N]$ such that $a\operatorname{\boldsymbol{\beta}}-a'\operatorname{\boldsymbol{\beta}}'$ is a $\mathcal P$-conflation, and so, $a\bar{\operatorname{\boldsymbol{\beta}}}=a'\bar{\operatorname{\boldsymbol{\beta}}'}$. Now from the defnition of the composition $``\circ"$, one may deduce that $a(\bar{\operatorname{\boldsymbol{\beta}}}\circ\bar{\operatorname{\boldsymbol{\gamma}}})=a'(\bar{\operatorname{\boldsymbol{\beta}}'}\circ\bar{\operatorname{\boldsymbol{\gamma}}})$, meaning that $(\bar{\operatorname{\boldsymbol{\beta}}}\circ\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_N)\thicksim(\bar{\operatorname{\boldsymbol{\beta}}'}\circ\bar{\operatorname{\boldsymbol{\gamma}}}, \delta'_N)$. Thus, the proof is finished. \end{proof}
\begin{lem}The equivalence relation $``\thicksim"$ is compatible with $``+"$. \end{lem} \begin{proof}Assume that $(\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_N), (\bar{\operatorname{\boldsymbol{\gamma}}'}, \delta_N)\in(\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^nN)/{\mathcal P}, \delta_N)$ and $(\bar{\operatorname{\boldsymbol{\beta}}}, \delta'_N), (\bar{\operatorname{\boldsymbol{\beta}}'}, \delta'_N)\in(\operatorname{{\mathsf{Ext}}}^n(M, {\mathsf{\Omega}'}^n N)/{\mathcal P}, \delta'_N)$ such that $(\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_N)\thicksim(\bar{\operatorname{\boldsymbol{\beta}}}, \delta'_N)$ and $(\bar{\operatorname{\boldsymbol{\gamma}}'}, \delta_N)\thicksim(\bar{\operatorname{\boldsymbol{\beta}}'}, \delta'_N)$. We must show that $(\overline{\operatorname{\boldsymbol{\gamma}}+\operatorname{\boldsymbol{\gamma}}'}, \delta_N)\thicksim(\overline{\operatorname{\boldsymbol{\beta}}+\operatorname{\boldsymbol{\beta}}'}, \delta'_N)$. Since $(\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_N)\thicksim(\bar{\operatorname{\boldsymbol{\beta}}}, \delta'_N)$ and $(\bar{\operatorname{\boldsymbol{\gamma}}'}, \delta_N)\thicksim(\bar{\operatorname{\boldsymbol{\beta}}'}, \delta'_N)$, by the definition, $a\operatorname{\boldsymbol{\gamma}}-b\operatorname{\boldsymbol{\beta}}$ and $a\operatorname{\boldsymbol{\gamma}}'-b\operatorname{\boldsymbol{\beta}}'$ are $\mathcal P$-conflations, where $[a\delta_N, b\delta'_N]$ is an angled pair. Hence, $a(\operatorname{\boldsymbol{\gamma}}+\operatorname{\boldsymbol{\gamma}}')-b(\operatorname{\boldsymbol{\beta}}+\operatorname{\boldsymbol{\beta}}')$ is a $\mathcal P$-conflation, as well. Consequently, $(\overline{\operatorname{\boldsymbol{\gamma}}+\operatorname{\boldsymbol{\gamma}}'}, \delta_N)\thicksim (\overline{\operatorname{\boldsymbol{\beta}}+\operatorname{\boldsymbol{\beta}}'}, \delta'_N)$, as required. \end{proof}
\begin{prop}\label{val}Let $(\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_N)\thicksim(\bar{\operatorname{\boldsymbol{\gamma}}'}, \delta'_N)$. Then $\operatorname{\boldsymbol{\gamma}}$ is $\mathcal P$-conflation if and only if $\operatorname{\boldsymbol{\gamma}}'$ is so. \end{prop} \begin{proof}Assume that $\operatorname{\boldsymbol{\gamma}}$ is a $\mathcal P$-conflation. We show that $\operatorname{\boldsymbol{\gamma}}'$ is a $\mathcal P$-conflation, as well. By the hypothesis, $a\operatorname{\boldsymbol{\gamma}}-b\operatorname{\boldsymbol{\gamma}}'$ is a $\mathcal P$-conflation, where $[a\delta_N, b\delta'_N]$ is an angled pair. Since $\operatorname{\boldsymbol{\gamma}}$ is a $\mathcal P$-conflation, applying Proposition \ref{nul} yields that $a\operatorname{\boldsymbol{\gamma}}$ is also a $\mathcal P$-conflation. So using the fact that $a\operatorname{\boldsymbol{\gamma}}-b\operatorname{\boldsymbol{\gamma}}'$ is a $\mathcal P$-conflation, we infer that the same is true for $b\operatorname{\boldsymbol{\gamma}}'$. Hence, another use of Proposition \ref{nul}, guarantees that $\operatorname{\boldsymbol{\gamma}}'$ is also a $\mathcal P$-conflation. Since the reverse implication is obtained similarly, we skip it. So the proof is finished. \end{proof}
{ \section{Phantom stable categories}
Inspired by the stabilization of a Frobenius category, we introduce and study the notion of phantom stable category of an $n$-Frobenius category. We begin with the following motivating observation.
\begin{s}\label{s1s1} Let $\mathscr{C} '$ be a Frobenius category and let $\mathcal{I}$ be the ideal consisting of all morphisms factoring through projective objects. Assume that $\mathscr{C} '/{\mathcal{I}}$ is the stable category of $\mathscr{C} '$. So we have the natural functor $\pi:\mathscr{C} '\longrightarrow\mathscr{C} '/{\mathcal{I}}$ such that for any morphism $f$ that its kernel and cokernel are both projective, $\pi(f)$ is an isomorphism and for any $g\in\mathcal{I}$, $\pi(g)=0$. It is easily seen that the pair $(\mathscr{C} '/{\mathcal{I}}, \pi)$ has universal property with respect to these conditions. This fact is our idea to introduce the notion of {\it phantom stable category} of an $n$-Frobenius category, for any $n\geq 0$. To be precise, let $\mathscr{C} $ be an $n$-Frobenius category and let $\operatorname{{\mathsf{Ext}}}^n$ be all equivalence classes of conflations of length $n$. Assume that $\mathcal P$ is a subfunctor of $\operatorname{{\mathsf{Ext}}}^n$ consisting of all conflations of length $n$ which are obtained as pull-back of conflations along morphisms of the form $M\rightarrow P$, for some $M\in\mathscr{C} $ and $P\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $. Assume that $\mathsf{\Sigma}$ is the class of all morphisms acting as invertible on $\operatorname{{\mathsf{Ext}}}^{n+1}$. We introduce the additive category $\mathscr{C} _{\mathcal P}$ and an additive functor $T:\mathscr{C} \rightarrow\mathscr{C} _{\mathcal P}$ with $T(s)$ is an isomorphism, for any quasi-invertible morphism $s$ and $T(f)=0$ for all $n$-$\operatorname{{\mathsf{Ext}}}$-phantom morphisms $f$, which has universal property with respect to these conditions. Indeed, we have the following definition. \end{s}
\begin{dfn}We say that a couple $(\mathscr{C} _{\mathcal P}, T)$, where $\mathscr{C} _{\mathcal P}$ is an additive category and $T:\mathscr{C} \longrightarrow\mathscr{C} _{\mathcal P}$ is a covariant additive functor, is the {\it phantom stable category of $\mathscr{C} $}, if:\\ (1) $T(s)$ is an isomorphism in $\mathscr{C} _{\mathcal P}$, for any quasi-invertible morphism $s$.\\ (2) For any $n$-$\operatorname{{\mathsf{Ext}}}$-phantom morphism $\varphi$, $T(\varphi)=0$ in $\mathscr{C} _{\mathcal P}$.\\(3) Any covariant additive functor $T':\mathscr{C} \longrightarrow\mathbb{D} $ satisfying the conditions (1) and (2), factors in a unique way through $T$. \end{dfn}
In the following, we show the existence of the phantom stable category of $\mathscr{C} $. First we qoute a couple of results.
\begin{lem}\label{iso}Let $(\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_N)\in(\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^nN)/{\mathcal P}, \delta_N)$ such that $\operatorname{\boldsymbol{\gamma}}$ is a unit conflation. Then for any $\delta_M\in\mathcal U_n(M)$, there exists $(\bar{\operatorname{\boldsymbol{\gamma}}'}, \delta_M)\in(\operatorname{{\mathsf{Ext}}}^n(N, \mathsf{\Omega}^nM)/{\mathcal P}, \delta_M)$ such that $\bar{\operatorname{\boldsymbol{\gamma}}'}\circ\bar{\operatorname{\boldsymbol{\gamma}}}=\bar{\delta}_M$ and $\bar{\operatorname{\boldsymbol{\gamma}}}\circ\bar{\operatorname{\boldsymbol{\gamma}}'}=\bar{\delta}_N$. \end{lem} \begin{proof}Take a co-angled pair $[\operatorname{\boldsymbol{\gamma}} a, \delta_Nb]$ and set $\operatorname{\boldsymbol{\gamma}}':=(\delta_Ma)b^{-1}$. So considering an $\operatorname{\mathsf{RUF}}$ $\operatorname{\boldsymbol{\gamma}}=\operatorname{\boldsymbol{\gamma}} 1_M$ of $\operatorname{\boldsymbol{\gamma}}$, we have $\bar{\operatorname{\boldsymbol{\gamma}}'}\circ\bar{\operatorname{\boldsymbol{\gamma}}}=\overline{(\operatorname{\boldsymbol{\gamma}}'b)a^{-1}}=\overline{(((\delta_Ma)b^{-1})b)a^{-1}}=\bar{\delta}_M$. Next we show that $\bar{\operatorname{\boldsymbol{\gamma}}}\circ\bar{\operatorname{\boldsymbol{\gamma}}'}=\bar{\delta}_N$. Since $\operatorname{\boldsymbol{\gamma}}'=(\delta_Ma)b^{-1}$, {by Lemma \ref{unit}, $\operatorname{\boldsymbol{\gamma}}'$ is a unit conflation and so} we may take the co-angled pair $[\operatorname{\boldsymbol{\gamma}}'b, \delta_Ma]$. Now considering $\operatorname{\boldsymbol{\gamma}}'=\operatorname{\boldsymbol{\gamma}}' 1_N$ as an $\operatorname{\mathsf{RUF}}$ of $\operatorname{\boldsymbol{\gamma}}'$, and applying Theorem \ref{welldef}, $\bar{\operatorname{\boldsymbol{\gamma}}}\circ\bar{\operatorname{\boldsymbol{\gamma}}'}=\overline{(\operatorname{\boldsymbol{\gamma}} a)b^{-1}}=\bar{\delta}_N$, giving the desired result. \end{proof}
\begin{rem}\label{00}Assume that $(\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_N)\thicksim(\bar{\operatorname{\boldsymbol{\gamma}}'}, \delta'_N)$. So, by the definition, $a\operatorname{\boldsymbol{\gamma}}-b\operatorname{\boldsymbol{\gamma}}'$ is a $\mathcal P$-conflation, where $[a\delta_N, b\delta'_N]$ is an angled pair. Thus for any morphism $f$, $(a\operatorname{\boldsymbol{\gamma}}-b\operatorname{\boldsymbol{\gamma}}')f=a(\operatorname{\boldsymbol{\gamma}} f)-b(\operatorname{\boldsymbol{\gamma}}'f)$ will be also a $\mathcal P$-conflation, meaning that $(\overline{\operatorname{\boldsymbol{\gamma}} f}, \delta_N)\thicksim(\overline{\operatorname{\boldsymbol{\gamma}}'f}, \delta'_N)$. \end{rem}
The result below is the main theorem of this section. \begin{theorem}\label{thmst}The phantom stable category $(\mathscr{C} _{\mathcal P}, T)$ of $\mathscr{C} $ exists. \end{theorem} \begin{proof}We define the category $\mathscr{C} _{\mathcal P}$ as follows; the objects of $\mathscr{C} _{\mathcal P}$ are the same as objects of $\mathscr{C} $. Moreover, for any two objects $M, N\in\mathscr{C} $, first we fix a unit conflation $\delta_N\in\operatorname{{\mathsf{Ext}}}^n(N, \mathsf{\Omega}^nN)$ and set $\operatorname{\mathsf{Hom}}_{\mathscr{C} _{\mathcal P}}(M, N):=(\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^nN)/{\mathcal P}, \delta_N)$ modulo the equivalence relation $``\thicksim"$. Assume that $(\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_N)\in(\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^nN)/{\mathcal P}, \delta_N)$ and $(\bar{\operatorname{\boldsymbol{\beta}}}, \delta_K)\in(\operatorname{{\mathsf{Ext}}}^n(N, \mathsf{\Omega}^nK)/{\mathcal P}, \delta_K)$. We define $(\bar{\operatorname{\boldsymbol{\beta}}}, \delta_K)\circ(\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_N):=(\bar{\operatorname{\boldsymbol{\beta}}}\circ\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_K)$, which is well-defined, thanks to Proposition \ref{comequ}. According to Proposition \ref{ass}, the composition operator $``\circ"$ is associative. Moreover, for a given object $M\in\mathscr{C} _{\mathcal P}$, we fix a unit conflation $\delta_M\in\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^nM)$. We claim that $1_M=(\bar{\delta}_M, \delta_M)$.
Indeed, it is evident that for any $(\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_N)\in(\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^nN)/{\mathcal P}, \delta_N)$, $(\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_N)\circ(\bar{\delta}_M, \delta_M)=(\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_N)$. Moreover, take a morphism $(\bar{\operatorname{\boldsymbol{\alpha}}}, \delta'_M)\in(\operatorname{{\mathsf{Ext}}}^n(N, {\mathsf{\Omega}'}^nM)/{\mathcal P}, \delta'_M)$. Applying Propositions \ref{comequ} and \ref{ind}, allows us to assume that $\delta'_M=\delta_M$, and then, one may easily see that $(\bar{\delta}_M, \delta_M)\circ(\bar{\operatorname{\boldsymbol{\alpha}}}, \delta_M)=(\bar{\operatorname{\boldsymbol{\alpha}}}, \delta_M)$. Thus $1_M=(\bar{\delta}_M, \delta_M),$ as claimed. So $\mathscr{C} _{\mathcal P}$ is a category. Clearly, $\mathscr{C} _{\mathcal P}$ is closed under finite direct sums, because the same is true for $\mathscr{C} $, and for any two objects $M, N$, $\operatorname{\mathsf{Hom}}_{\mathscr{C} _{\mathcal P}}(M, N)$ is an abelian group. Moreover, Propositions \ref{srt1} and \ref{comequ} guarantee that the composition is bilinear, that is, the composition distributes over addition. Consequently, $\mathscr{C} _{\mathcal P}$ is an additive category.
Now let us define the functor $T:\mathscr{C} \longrightarrow\mathscr{C} _{\mathcal P}$. For any object $M\in\mathscr{C} $, we let $T(M)=M$. Moreover, for given $M, N\in\mathscr{C} $, we consider a unit conflation $\delta_N$, and then we define the morphism $T_{M,N}:\operatorname{\mathsf{Hom}}_{\mathscr{C} }(M, N)\longrightarrow\operatorname{\mathsf{Hom}}_{\mathscr{C} _{\mathcal P}}(M, N)$, as $T(f):=(\overline{\delta_Nf}, \delta_N)$, for any morphism $f\in\operatorname{\mathsf{Hom}}_{\mathscr{C} }(M, N)$. It should be pointed out that, Proposition \ref{ind} together with Remark \ref{00}, insure that $T$ is well-defined.
By our definition, $T(1_N)=(\bar{\delta}_N, \delta_N)=1_{T(N)}$. Furthermore, it is easily seen that for any two composable morphisms $M\stackrel{f}\rightarrow N\stackrel{g}\rightarrow K$ in $\mathscr{C} $, the equalities $\overline{\delta_Kg}\circ\overline{\delta_Nf}=\overline{(\delta_Kg)f}=\overline{\delta_K(gf)}$ hold true, implying that $T(gf)=T(g)\circ T(f)$. Thus $T$ is a covariant functor. Since $\mathscr{C} $ and $\mathscr{C} _{\mathcal P}$ are additive categories and $T$ preserves finite direct sums, it will be an additive functor. Assume that $f:M\rightarrow N$ lies in $\mathsf{\Sigma}$. By virtue of Lemma \ref{unit}, $\delta_Nf$ is a unit conflation, and so, Lemma \ref{iso} forces $T(f)$ to be an isomorphism.
Suppose that $h:X\rightarrow N$ is an $n$-$\operatorname{{\mathsf{Ext}}}$-phantom morphism. So, by Proposition \ref{three}, $\delta_Nh$ will be a $\mathcal P$-conflation, implying that $T(h)=0$.
Finally, assume that $T':\mathscr{C} \longrightarrow\mathbb{D} $ is a covariant additive functor such that $T'(f)$ is an isomorphism, for any $f\in\mathsf{\Sigma}$ and for any $n$-$\operatorname{{\mathsf{Ext}}}$-phantom morphism $g$, $T'(g)=0$. In order to complete the proof, we must prove that there exists a unique additive functor $F:\mathscr{C} _{\mathcal P}\longrightarrow\mathbb{D} $ such that $FT=T'$. To do this, we shall define the functor $F:\mathscr{C} _{\mathcal P}\longrightarrow\mathbb{D} $ as follows; for any object $X\in\mathscr{C} _{\mathcal P}$, write $F(X)=T'(X)$. Also, for any morphism $(\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_N)\in\operatorname{\mathsf{Hom}}_{\mathscr{C} _{\mathcal P}}(M,N)=(\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^nN)/{\mathcal P}, \delta_N)$, we take an $\operatorname{\mathsf{RUF}}$ $\operatorname{\boldsymbol{\gamma}}=\delta' f'$ of $\operatorname{\boldsymbol{\gamma}}$. So, considering a co-angled pair $[\delta_Na_1, \delta' b_1]$, we define $F((\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_N))=T'(a_1)T'(b_1)^{-1}T'(f')$, with $(T'(b_1))^{-1}$ being the inverse of $T'(b_1)$. We would like to show that $F$ is well-defined. In this direction, first we prove that $F((\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_N))$ is independent of the choice of $\operatorname{\mathsf{RUF}}$ of $\operatorname{\boldsymbol{\gamma}}$. Assume that $\operatorname{\boldsymbol{\gamma}}=\delta'' f''$ is also another $\operatorname{\mathsf{RUF}}$ of $\operatorname{\boldsymbol{\gamma}}$. Now taking a co-angled pair $[\delta_N a_2, \delta''b_2]$, we shall show that $T'(a_1)T'(b_1)^{-1}T'(f')=T'(a_2)T'(b_2)^{-1}T'(f'')$. According to the proof of Theorem \ref{welldef} and applying the notation used there, we obtain the following commutative diagram in $\mathscr{C} $; {\footnotesize \[\xymatrix{&N& \\ N_1~\ar[d]_{b_1}\ar[ur]^{a_1}& N_4\ar[l]_{a_4}\ar[r]^{b_4}\ar[d]_{c_4}& N_2\ar[d]_{b_2}\ar[ul]_{a_2}\\ N'~ & N_3\ar[l]_{a_3}\ar[r]^{b_3} & N'',}\]}where $N'$ (resp. $N''$) is the right end term of the unit conflation $\delta'$ (resp. $\delta''$), $N_i$ for any $i$, is the right end term of $\delta_i$ and all morphisms are co-induced by identity over $\mathsf{\Omega}^nN$ . Moreover, by virtue of Proposition \ref{coin} and combining with Lemma \ref{ds}, there is a morphism $h:M\rightarrow N_3$ such that $f'=a_3h$ and $f''=b_3h$. Namely, one may have the following commutative diagram in $\mathscr{C} $; {\footnotesize\[\xymatrix{N'~& N_3\ar[l]_{a_3}\ar[r]^{b_3}& N''\\ & M\ar[u]^{h}\ar[ul]^{f'}\ar[ur]_{f''} .& }\]}Since $T'$ is an additive functor, applying to the above diagrams, gives us commutative diagrams in $\mathbb{D} $. This, in conjunction with the fact that $T'(a)$ is an isomorphism, for any $a\in\mathsf{\Sigma}$, one may deduce that $T'(a_1)T'(b_1)^{-1}T'(f')=T'(a_2)T'(b_2)^{-1}T'(f'')$, as claimed. Finally, we show that if $(\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_N)\thicksim(\bar{\operatorname{\boldsymbol{\gamma}}'}, \delta'_N)$, then $F((\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_N))=F((\bar{\operatorname{\boldsymbol{\gamma}}'}, \delta'_N))$. Assume that $\operatorname{\boldsymbol{\gamma}}=\delta f$ and $\operatorname{\boldsymbol{\gamma}}'=\delta'f'$ are $\operatorname{\mathsf{RUF}}$s of $\operatorname{\boldsymbol{\gamma}}$ and $\operatorname{\boldsymbol{\gamma}}'$, respectively. So, taking co-angled pairs $[\delta_Na_1, \delta b_1]$ and $[\delta'_Na_2, \delta'b_2]$, we have to show that $T'(a_1)T'(b_1)^{-1}T'(f)=T'(a_2)T'(b_2)^{-1}T'(f')$. By our hypothesis, there is an angled pair $\delta_N\stackrel{a}\longrightarrow\delta''_N\stackrel{b}\longleftarrow\delta'_N$ such that $a\operatorname{\boldsymbol{\gamma}}-b\operatorname{\boldsymbol{\gamma}}'$ is a $\mathcal P$-conflation. Set $\operatorname{\boldsymbol{\gamma}}'':=a\operatorname{\boldsymbol{\gamma}}=_{\mathcal P}b\operatorname{\boldsymbol{\gamma}}'$. So considering $(\bar{\operatorname{\boldsymbol{\gamma}}''}, \delta''_N)\in(\operatorname{{\mathsf{Ext}}}^n(M, {\mathsf{\Omega}''}^nN)/{\mathcal P}, \delta''_N)$, we infer that $\operatorname{\boldsymbol{\gamma}}''=(a\delta)f=(b\delta')f'$ are two $\operatorname{\mathsf{RUF}}$s of $\operatorname{\boldsymbol{\gamma}}''$. According to Lemma \ref{pushco}, we may have the co-angled pairs, $[\delta''_Na_1,(a\delta)b_1]$ and $[\delta''_Na_2, (b\delta')b_2]$. Since, as we have seen just above, the definition of $F$ is independent of the choice of $\operatorname{\mathsf{RUF}}$ of $\operatorname{\boldsymbol{\gamma}}''$, we infer that $T'(a_1)T'(b_1)^{-1}T'(f)=T'(a_2)T'(b_2)^{-1}T'(f')$, as desired. \new{For a given object $M\in\mathscr{C} _{\mathcal P}$, we clearly have the equalities, $F(1_M)=(\bar{\delta}_M, \delta_M)=T'(1_M)=1_{T'(M)}=1_{F(M)}$. Next assume that $(\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_N)\in(\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^nN)/{\mathcal P}, \delta_N)$ and $(\bar{\operatorname{\boldsymbol{\beta}}}, \delta_K)\in(\operatorname{{\mathsf{Ext}}}^n(N, \mathsf{\Omega}^nK)/{\mathcal P}, \delta_K)$. We have to show that $F((\bar{\operatorname{\boldsymbol{\beta}}}, \delta_K))\circ F((\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_N))=F((\bar{\operatorname{\boldsymbol{\beta}}}\circ\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_K))$. Suppose that $\operatorname{\boldsymbol{\gamma}}=\delta_{N'}f$ and $\operatorname{\boldsymbol{\beta}}=\delta_{K'}$ are $\operatorname{\mathsf{RUF}}$s of $\operatorname{\boldsymbol{\gamma}}$ and $\operatorname{\boldsymbol{\beta}}$, respectively. Thus, taking co-angled pairs $[\delta_Na, \delta_{N'}b]$ and $[\delta_Kc, \delta_{K'}e]$, we get the equalities; $F((\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_N))=T'(a)T'(b)^{-1}T'(f)$ and $F((\bar{\operatorname{\boldsymbol{\beta}}}, \delta_K))=T'(c)T'(e)^{-1}T'(g)$, and so, $F((\bar{\operatorname{\boldsymbol{\beta}}}, \delta_K))\circ F((\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_N))=T'(c)T'(e)^{-1}T'(g)T'(a)T'(b)^{-1}T'(f)$. On the other hand, $\bar{\operatorname{\boldsymbol{\beta}}}\circ\bar{\operatorname{\boldsymbol{\gamma}}}=\overline{((\operatorname{\boldsymbol{\beta}} a)b^{-1})f}.$ Set $\operatorname{\boldsymbol{\eta}}:=(\operatorname{\boldsymbol{\beta}} a)b^{-1}$. So one has that $(\overline{\operatorname{\boldsymbol{\beta}} a}, \delta_K)=(\overline{\operatorname{\boldsymbol{\eta}} b}, \delta_K)$. Taking an $\operatorname{\mathsf{RUF}}$ $\operatorname{\boldsymbol{\eta}}=\delta_{K''}h$ of $\operatorname{\boldsymbol{\eta}}$, one gets that $\operatorname{\boldsymbol{\eta}} f=\delta_{K''}(hf)$ is an $\operatorname{\mathsf{RUF}}$ of $\operatorname{\boldsymbol{\eta}} f$. Therefore, considering a co-angled pair$[\delta_K s, \delta_{K''}s]$, we have that $F(\bar{\operatorname{\boldsymbol{\beta}}}\circ\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_K)=F(\overline{\operatorname{\boldsymbol{\eta}} f}, \delta_K)=T'(s)T'(t)^{-1}T'(h)T'(f)$. Moreover, the well-definedness ofyeilds that $F((\overline{\operatorname{\boldsymbol{\beta}} a}, \delta_K))=F((\overline{\operatorname{\boldsymbol{\eta}} b}, \delta_K))$, and so, $T'(c)T'(e)^{-1}T'(g)T'(a)=T'(s)T'(t)^{-1}T'(h)T'(b)$. This would imply that $F((\bar{\operatorname{\boldsymbol{\beta}}}, \delta_K))\circ F((\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_N))=F((\bar{\operatorname{\boldsymbol{\beta}}}\circ\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_K))$, as desired.} It is clear that $FT=T'$ and also uniqueness of $F$ is obvious. So the proof is completed. \end{proof}
Assume that $\mathscr{C} '$ is a Frobenius category (or 0-Frobenius category in our sense). Then it follows from the proof of the above theorem the phantom stable category $(\mathscr{C} '_{\mathcal P}, T)$ is indeed $(\mathscr{C} '/{\mathcal{I} }, \pi)$, which has been mentioned in \ref{s1s1}. Namely, in the case $n=0$, the phantom stable category is actually the classical stable category of a Frobenius category.
From now on, to simplify the notation, we shall denote $\operatorname{\mathsf{Hom}}_{\mathscr{C} _{\mathcal P}}(-, -)$ by $\mathscr{C} _{\mathcal P}(-, -)$.
\begin{lem}\label{lem1}Let $\mathscr{C} '$ be a full subcategory of $\mathscr{C} $ which is {closed under extensions and kernels of epimorphisms}. Assume that: \begin{enumerate}\item $\mathscr{C} '$ is an $n$-Frobenius category. \item $n$-$\operatorname{\mathsf{proj}}\mathscr{C} '\subseteq n$-$\operatorname{\mathsf{proj}}\mathscr{C} $. \item $\mathscr{C} $ has enough $n$-$\operatorname{\mathsf{proj}}\mathscr{C} '$. \end{enumerate}Then for any two objects $M, N$ in $\mathscr{C} '$, ${\mathscr{C} _{\mathcal P}}(M, N)={\mathscr{C} '_{\mathcal P}}(M, N)$. \end{lem} \begin{proof} Assume that $M, N$ are arbitrary objects of $\mathscr{C} '$ and take a morphism $(\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_N)\in{\mathscr{C} _{\mathcal P}}(M, N)=(\operatorname{{\mathsf{Ext}}}^n_{\mathscr{C} }(M, \mathsf{\Omega}^nN)/{\mathcal P}, \delta_N)$. {Since $n$-$\operatorname{\mathsf{proj}}\mathscr{C} '\subseteq n$-$\operatorname{\mathsf{proj}}\mathscr{C} $, we may assume that $\mathsf{\Omega}^nN\in\mathscr{C} '$. Moreover}, using the fact that $\mathscr{C} $ has enough $n$-$\operatorname{\mathsf{proj}}\mathscr{C} '$, one may follow the argument given in the proof of Lemma \ref{gencog}, and get the following commutative diagram; {\footnotesize \[\xymatrix{\operatorname{\boldsymbol{\gamma}}':\mathsf{\Omega}^nN~\ar[r]\ar@{=}[d]& H'\ar[r]\ar[d]_{b_{n-1}}& P_{n-2}\ar[r]\ar[d]_{b_{n-2}} &\cdots \ar[r]& P_0\ar[r]\ar[d]_{b_0} & M\ar@{=}[d]\\ \operatorname{\boldsymbol{\gamma}}:\mathsf{\Omega}^nN~\ar[r] &X_{n-1}\ar[r]& X_{n-2}\ar[r]&\cdots\ar[r]& X_0\ar[r] & M,}\]}such that all $P_i^,$s belong to $n$-$\operatorname{\mathsf{proj}}\mathscr{C} '$. Since $\mathscr{C} '$ is a full subcategory of $\mathscr{C} $ which is closed under extensions and kernels of epimorphisms, we infer that $\operatorname{\boldsymbol{\gamma}}'$ is a conflation in $\mathscr{C} '$. This, in turn, implies that $\operatorname{\boldsymbol{\gamma}}$ can be considered as a conflation in $\mathscr{C} '$, because $\operatorname{\boldsymbol{\gamma}}=\operatorname{\boldsymbol{\gamma}}'$. Now, we should prove that if $\operatorname{\boldsymbol{\gamma}}$ is a $\mathcal P$-conflation in $\mathscr{C} '$, then it is a $\mathcal P$-conflation in $\mathscr{C} $ and vice versa. First, assume that $\operatorname{\boldsymbol{\gamma}}$ is a $\mathcal P$-conflation in $\mathscr{C} '$. Since $\mathscr{C} '\subseteq\mathscr{C} $ and any $n$-projective of $\mathscr{C} '$ lies in $n$-$\operatorname{\mathsf{proj}}\mathscr{C} $, we infer that $\operatorname{\boldsymbol{\gamma}}$ is a $\mathcal P$-conflation in $\mathscr{C} $. Next, we assume that $\operatorname{\boldsymbol{\gamma}}$ is a $\mathcal P$-conflation in $\mathscr{C} $. Thus, there is a morphism $h:P\rightarrow\mathsf{\Omega}^nN$ with $P\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $, and $\operatorname{\boldsymbol{\epsilon}}\in\operatorname{{\mathsf{Ext}}}^n_{\mathscr{C} }(M, P)$ such that $\operatorname{\boldsymbol{\gamma}}=h\operatorname{\boldsymbol{\epsilon}}$. Take a conflation $\mathsf{\Omega} P\rightarrow Q\stackrel{g}\rightarrow P$, where $Q\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} '$. Evidently, $\mathsf{\Omega} P\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $. This, in turn, yields that the morphism $\operatorname{{\mathsf{Ext}}}^n_{\mathscr{C} }(M, Q)\rightarrow\operatorname{{\mathsf{Ext}}}^n_{\mathscr{C} }(M, P)$ is an epimorphism, and so, there exists $\operatorname{\boldsymbol{\eta}}\in\operatorname{{\mathsf{Ext}}}^n_{\mathscr{C} }(M, Q)$ such that $g\operatorname{\boldsymbol{\eta}}=\operatorname{\boldsymbol{\epsilon}}$. As $M, Q\in\mathscr{C} ',$ similar to the above diagram, we may assume that $\operatorname{\boldsymbol{\eta}}\in\operatorname{{\mathsf{Ext}}}^n_{\mathscr{C} '}(M, Q)$. Consequently, $\operatorname{\boldsymbol{\gamma}}=(hg)\operatorname{\boldsymbol{\eta}}$, i.e., $\operatorname{\boldsymbol{\gamma}}$ is a $\mathcal P$-conflation in $\mathscr{C} '$. \end{proof}
\begin{prop}\label{l}Let $\mathscr{C} '$ be a full subcategory of $\mathscr{C} $ which is closed under extensions and kernels of epimorphisms and let $k\leq n$ be non-negative integers. Assume that: \begin{enumerate}\item $\mathscr{C} '$ is a $k$-Frobenius category. \item $k$-$\operatorname{\mathsf{proj}}\mathscr{C} '\subseteq n$-$\operatorname{\mathsf{proj}}\mathscr{C} $. \item $\mathscr{C} $ has enough $k$-$\operatorname{\mathsf{proj}}\mathscr{C} '$. \end{enumerate}Consider the composition functor $T'':\mathscr{C} '\stackrel{i}\rightarrow\mathscr{C} \stackrel{T}\rightarrow\mathscr{C} _{\mathcal P}$, with $i$ the inclusion functor. Then there is a unique induced fully faithful functor $F:\mathscr{C} '_{\mathcal P}\longrightarrow\mathscr{C} _{\mathcal P}$ such that $FT'=T''$, where $(\mathscr{C} _{\mathcal P}, T)$ and $(\mathscr{C} '_{\mathcal P}, T')$ are phantom stable categories. \end{prop} \begin{proof} For any $M, N\in \mathscr{C} '$, we consider a unit conflation $\delta_N:\mathsf{\Omega}^nN\rightarrow P_{n-1}\rightarrow\cdots\rightarrow P_0\rightarrow N$, with $P_i\in k$-$\operatorname{\mathsf{proj}}\mathscr{C} '$, for any $i$, and then we have $T''(f)=(\overline{\delta_Nf}, \delta_N)$, for any morphism $f:M\rightarrow N$, because the definition of $T$ is independent of the choice of the unit conflation $\delta_N$. Assume that $f\in\operatorname{\mathsf{Hom}}_{\mathscr{C} '}(M, N)$ which belongs to $\mathsf{\Sigma}$. {In view of Lemma \ref{conf}, there is a conflation $M\rightarrow N\oplus Q\rightarrow P$, in which $P, Q\in k$-$\operatorname{\mathsf{proj}}\mathscr{C} '$. Since by our assumption, $P, Q\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $, applying Corollary \ref{is} and Remark \ref{rems} together, we deduce that $f\in\mathsf{\Sigma},$ as a morphism in $\mathscr{C} $, and then,} $T''(f)=T(f)$ will be an isomorphism in $\mathscr{C} _{\mathcal P}$. Now suppose that $f:M\rightarrow N$ is an$n$-$\operatorname{{\mathsf{Ext}}}$-phantom morphism in $\mathscr{C} '$. We shall prove that $T''(f)=0$ in $\mathscr{C} _{\mathcal P}$. Since $f$ is an$n$-$\operatorname{{\mathsf{Ext}}}$-phantom morphism, in view of Proposition \ref{ph}, there are morphisms $N\stackrel{a}\leftarrow N''\stackrel{b}\rightarrow N'$ with $a, b\in\mathsf{\Sigma}$ and $h:M\rightarrow N''$ such that $f=ah$ and $bh$ factors through an object $Q\in k$-$\operatorname{\mathsf{proj}}\mathscr{C} '$. By our assumption, $Q\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $, and so $bh$ will be an $n$-$\operatorname{{\mathsf{Ext}}}$-phantom morphism in $\mathscr{C} $, implying that $T''(bh)=T(bh)=0$. Therefore, as $T''(b)$ is an isomorphism, we have $T''(h)=0$. Consequently, $T''(f)=T''(a)T''(h)=0$ in $\mathscr{C} _{\mathcal P}$, as needed. Therefore, the universal property of the phantom stable category, gives rise to the existence of a unique functor $F:\mathscr{C} '_{\mathcal P}\rightarrow\mathscr{C} _{\mathcal P}$ such that $FT'=T''$.
Now we prove that $F$ is faithful. The result for the case $k=n$, follows from Lemma \ref{lem1}. So assume that $k<n$. Take a morphism $(\bar{\operatorname{\boldsymbol{\gamma}}}, \delta)\in{\mathscr{C} '_{\mathcal P}}(M, N)=(\operatorname{{\mathsf{Ext}}}^k_{\mathscr{C} '}(M, \mathsf{\Omega}^kN)/{\mathcal P}, \delta)$ such that $F((\bar{\operatorname{\boldsymbol{\gamma}}}, \delta))=0$ in $\mathscr{C} _{\mathcal P}$. Suppose that $\operatorname{\boldsymbol{\gamma}}=\delta' f$ is an $\operatorname{\mathsf{RUF}}$ of $\operatorname{\boldsymbol{\gamma}}$ and take a co-angled pair $[\delta a, \delta' b]$. Since, by the definition $F((\bar{\operatorname{\boldsymbol{\gamma}}}, \delta))=T''(a)T''(b)^{-1}T''(f)$ and $T''(a)$ and $T''(b)^{-1}$ are isomorphisms, we get that $T''(f)\cong 0$ in $\mathscr{C} _{\mathcal P}$. So taking a unit conflation $\delta_1:\mathsf{\Omega}^nN\longrightarrow P_{n-1}\longrightarrow\cdots\longrightarrow P_k\longrightarrow\mathsf{\Omega}^kN$ with $P_i^,s\in k$-$\operatorname{\mathsf{proj}}\mathscr{C} '$, and letting $\delta'_N:=\delta_1\delta'$, we have $T''(f)=(\overline{\delta'_Nf}, \delta'_N)$. So one has the following push-out diagram; {\footnotesize\[\xymatrix{\operatorname{\boldsymbol{\eta}}:P~\ar[r]\ar[d]_{g} & L\ar[d]\ar[r] &\cdots \ar[r]& P_1\ar[r]\ar@{=}[d]\ar[r]& H\ar@{=}[d]\ar[r] & M\ar@{=}[d]\\ \delta'_Nf:\mathsf{\Omega}^nN~\ar[r] & P_{n-1}\ar[r]& \cdots \ar[r] & P_1\ar[r]& H\ar[r] & M,}\]}where $P\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $. Take a unit conflation $\mathsf{\Omega} P\rightarrow Q\rightarrow P$, where $Q\in k$-$\operatorname{\mathsf{proj}}\mathscr{C} '$. Since $k<n$ and $\mathsf{\Omega} P\in n$-$\operatorname{\mathsf{inj}}\mathscr{C} $, it is easily seen that $\operatorname{{\mathsf{Ext}}}^n_{\mathscr{C} }(-, P)=0$ over $\mathscr{C} '$. Consequently, $\operatorname{\boldsymbol{\eta}}=0$, and then, the same is true for $\delta'_Nf$. Now decomposing the unit conflation $\mathsf{\Omega}^nN\rightarrow P_{n-1}\rightarrow\cdots\rightarrow P_k\rightarrow\mathsf{\Omega}^kN$ into conflations of length 1, one may obtain the isomorphisms; $\operatorname{{\mathsf{Ext}}}^n_{\mathscr{C} '}(M, \mathsf{\Omega}^nN)\cong\operatorname{{\mathsf{Ext}}}^{n-1}_{\mathscr{C} '}(M, \mathsf{\Omega}^{n-1}N)\cong\cdots\cong\operatorname{{\mathsf{Ext}}}^{k+1}_{\mathscr{C} '}(M, \mathsf{\Omega}^{k+1}N)$. So, letting $\delta_2:=\mathsf{\Omega}^{k+1}N\rightarrow P_k\stackrel{h}\rightarrow\mathsf{\Omega}^kN$, we will have $\delta_2\operatorname{\boldsymbol{\gamma}}=0$. Now by considering the exact sequence $\operatorname{{\mathsf{Ext}}}^k_{\mathscr{C} '}(M, P_k)\stackrel{\mathbf h}\rightarrow\operatorname{{\mathsf{Ext}}}^k_{\mathscr{C} '}(M, \mathsf{\Omega}^kN)\stackrel{}\rightarrow\operatorname{{\mathsf{Ext}}}^{k+1}_{\mathscr{C} '}(M, \mathsf{\Omega}^{k+1}N)$, we infer that there exists $\operatorname{\boldsymbol{\alpha}}\in\operatorname{{\mathsf{Ext}}}^k_{\mathscr{C} '}(M, P_k)$ such that $h\operatorname{\boldsymbol{\alpha}}=\operatorname{\boldsymbol{\gamma}}$, that is, $\operatorname{\boldsymbol{\gamma}}$ is a $\mathcal P$-conflation in $\mathscr{C} '$, and then, $(\bar{\operatorname{\boldsymbol{\gamma}}}, \delta)=0$. Finally, we show that the functor $F$ is full. Assume that $M, N\in\mathscr{C} '$ and fix a unit conflation $\delta_N:\mathsf{\Omega}^nN\longrightarrow P_{n-1}\rightarrow\cdots\rightarrow P_0\rightarrow N$, where $P_i\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $, for any $i$. Suppose that $(\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_N)\in{\mathscr{C} _{\mathcal P}}(M, N)= (\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^nN)/\mathcal P, \delta_N)$. In the case $k=n$, the fullness of $F$ follows from Lemma \ref{lem1}. So assume that $k<n$. Since any object of $k$-$\operatorname{\mathsf{proj}}\mathscr{C} '$ is $n$-projective over $\mathscr{C} $, we may further assume that all terms of the unit conflation $\delta_N$ lie in $\mathscr{C} '$. So one may deduce that for any $Q\in k$-$\operatorname{\mathsf{proj}}\mathscr{C} '$ and $i>k$, $\operatorname{{\mathsf{Ext}}}^i_{\mathscr{C} }(-, Q)=0$ over $\mathscr{C} '$. Now, decomposing the unit conflation $\delta_1:\mathsf{\Omega}^nN\rightarrow P_{n-1}\rightarrow\cdots\rightarrow P_k\rightarrow\mathsf{\Omega}^kN$ into conflations of length 1, one may obtain the isomorphisms; $$\operatorname{{\mathsf{Ext}}}^n_{\mathscr{C} }(M, \mathsf{\Omega}^nN)\cong\operatorname{{\mathsf{Ext}}}^{n-1}_{\mathscr{C} }(M, \mathsf{\Omega}^{n-1}N)\cong\cdots\cong\operatorname{{\mathsf{Ext}}}^{k+1}_{\mathscr{C} }(M, \mathsf{\Omega}^{k+1}N)\cong \operatorname{{\mathsf{Ext}}}^k_{\mathscr{C} }(M, \mathsf{\Omega}^kN)/\mathcal P.$$ Hence, there exists $(\bar{\operatorname{\boldsymbol{\gamma}}'}, \delta'_N)\in{\mathscr{C} '_{\mathcal P}}(M, N)$ such that $F((\bar{\operatorname{\boldsymbol{\gamma}}'}, \delta'_N))=(\overline{\delta_1\operatorname{\boldsymbol{\gamma}}'}, \delta_1\delta'_N)=(\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_N)$, where $\delta'_N:\mathsf{\Omega}^kN\rightarrow P_{k-1}\rightarrow\cdots\rightarrow P_0\rightarrow N$. So the proof is finished. \end{proof}
\begin{cor}\label{p}Let $n>k$ and $\mathscr{C} $ be also a $k$-Frobenius category. Then the phantom stable categories of $\mathscr{C} $ as $k$ and $n$-Frobenius, are equivalent. \end{cor} \begin{proof}Assume that $\mathscr{C} '_{\mathcal P}$ and $\mathscr{C} _{\mathcal P}$ denote the phantom stable categories of $\mathscr{C} $, as a $k$ and an $n$-Frobenius category, respectively. According to Proposition \ref{l}, there is a fully faithful functor $F:\mathscr{C} '_{\mathcal P}\longrightarrow\mathscr{C} _{\mathcal P}$. Evidently, $F$ is also dense, and then, the proof is finished. \end{proof}
\begin{example}\label{exfaith} (1) With the notation of Example \ref{ex1}, we set $\mathscr{C} :=\mathcal{G}^{<\infty}$ and $\mathscr{C} ':=\mathcal{G}$. Then, by Proposition \ref{l}, there exists a fully faithful functor $\mathscr{C} '_{\mathcal P}\rightarrow\mathscr{C} _{\mathcal P}$. \\ (2) Assume that $\mathscr{C} $ (resp. $\mathscr{C} '$) is the category consisting of all syzygies of complete resolutions of $n$-projectives (resp. locally free) sheaves over $\mathsf {X}$. If $\mathscr{C} $ has enough locally free sheaves, then Proposition \ref{l} ensures the existence of a fully faithful functor $\mathscr{C} '_{\mathcal P}\rightarrow\mathscr{C} _{\mathcal P}$. \end{example} }
\begin{prop} For a given object $M\in\mathscr{C} _{\mathcal P}$, the following are equivalent: \begin{enumerate} \item ${\mathscr{C} _{\mathcal P}}(-, M)=0$. \item ${\mathscr{C} _{\mathcal P}}(M, -)=0$. \item ${\mathscr{C} _{\mathcal P}}(M, M)=0$. \item $M\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $. \end{enumerate} \end{prop} \begin{proof} $(1\Rightarrow 2)$: As ${\mathscr{C} _{\mathcal P}}(-, M)=0$, we have ${\mathscr{C} _{\mathcal P}}(M, M)=0$. So $\delta_M$ is a $\mathcal P$-conflation, and in particular, the same is true for $\delta_M1_M$. So applying Corollary \ref{ccoo}, we infer that $(\operatorname{{\mathsf{Ext}}}^n/{\mathcal P})1_M=0$, and particularly, $(\operatorname{{\mathsf{Ext}}}^n(M, -)/{\mathcal P})1_M=0$, namely, $\mathscr{C} _{\mathcal P}(M, -)=0$.\\ $(2\Rightarrow 3)$: This is obvious.\\
$(3\Rightarrow 4)$: Take the identity morphism $(\bar{\delta}_M, \delta_M)\in\mathscr{C} _{\mathcal P}(M, M)$. By our assumption $\delta_M$ is a $\mathcal P$-conflation. Thus there exist a morphism $h:P\rightarrow\mathsf{\Omega}^nM$ with $P\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $ and $\operatorname{\boldsymbol{\eta}}\in\operatorname{{\mathsf{Ext}}}^n(M, P)$ such that $\delta_M=h\operatorname{\boldsymbol{\eta}}$. Taking an $\operatorname{\mathsf{LUF}}$ $\operatorname{\boldsymbol{\eta}}=g\delta'_M$ of $\operatorname{\boldsymbol{\eta}}$, we obtain the following push-out diagram; \[\xymatrix{{\delta'_M:\mathsf{\Omega}'}^nM ~\ar[r] \ar[d]_{hg}& Q\ar[r]\ar[d]& \cdots\ar[r] & P_0\ar[r] \ar@{=}[d] & M\ar@{=}[d]\\ \delta_M:\mathsf{\Omega}^n M~\ar[r] &P_{n-1}~\ar[r] & \cdots\ar[r] & P_0 \ar[r] & M.}\] Hence, for a given object $X\in\mathscr{C} $, we will have the following commutative square; {\footnotesize\[\xymatrix{\operatorname{{\mathsf{Ext}}}^{n+1}(X, M)~\ar[r]^{\cong} \ar@{=}[d]& \operatorname{{\mathsf{Ext}}}^{2n+1}(X, {\mathsf{\Omega}'}^nM)\ar[d]^{ \mathbf h\mathbf g}\\ \operatorname{{\mathsf{Ext}}}^{n+1}(X, M)~\ar[r]^{\cong} &\operatorname{{\mathsf{Ext}}}^{2n+1}(X, \mathsf{\Omega}^nM).}\]}As $hg$ factors through the $n$-projective object $P$, the right column is zero, and then $\operatorname{{\mathsf{Ext}}}^{n+1}(X, M)=0$, meaning that $M\in n$-$\operatorname{\mathsf{inj}}\mathscr{C} $. Therefore, $M\in n$-$\operatorname{\mathsf{proj}}\mathscr{C} $, because $\mathscr{C} $ is $n$-Frobenius.\\ $(4\Rightarrow 1)$: This implication is clear. So the proof is finished. \end{proof}
\begin{prop}\label{cp}(1) Let $f:M\rightarrow N$ be a morphism in $\mathsf{\Sigma}$. Then for any $X\in\mathscr{C} $, ${\mathscr{C} _{\mathcal P}}(N, X)\stackrel{\bar{\mathbf f}}\rightarrow{\mathscr{C} _{\mathcal P}}(M, X)$ is an isomorphism.\\ (2) Let $M\stackrel{f}\rightarrow N\stackrel{g}\rightarrow K$ be a conflation in $\mathscr{C} $. Then, for any object $X\in\mathscr{C} $, there exists an exact sequence; $${\mathscr{C} _{\mathcal P}}(K, X)\stackrel{\bar{\mathbf g}}\longrightarrow{\mathscr{C} _{\mathcal P}}(N, X)\stackrel{\bar{\mathbf f}} \longrightarrow{\mathscr{C} _{\mathcal P}}(M, X).$$ \end{prop} \begin{proof}The first statement is clear. The second one follows from Corollary \ref{qo} and Proposition \ref{pprop}. \end{proof}
\begin{s}Let $M,N\in\mathscr{C} $ and let $\mathsf{\Omega} M\rightarrow Q\rightarrow M$ and $\mathsf{\Omega} N\rightarrow P\rightarrow N$ be two syzygy sequences of $M$ and $N$, respectively. We would like to define an induced map $\mathsf{\Omega}:\mathscr{C} _{\mathcal P}(M, N)\rightarrow\mathscr{C} _{\mathcal P}(\mathsf{\Omega} M, \mathsf{\Omega} N)$. In this direction, we must define a map; $$\mathsf{\Omega}:(\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^nN)/{\mathcal P}, \delta_N)\longrightarrow(\operatorname{{\mathsf{Ext}}}^n(\mathsf{\Omega} M, \mathsf{\Omega}^{n+1}N)/{\mathcal P}, \delta_{\mathsf{\Omega} N}).$$ One should note that, Proposition \ref{ind} allows us to take $\delta_N:=\mathsf{\Omega}^nN\rightarrow P_{n-1}\rightarrow\cdots\rightarrow P_0\rightarrow N$ and $\delta_{\mathsf{\Omega} N}:=\mathsf{\Omega}^{n+1}N\rightarrow P_n\rightarrow P_{n-1}\rightarrow\cdots\rightarrow P_0\rightarrow N.$ Consider the natural isomorphisms; $\operatorname{{\mathsf{Ext}}}^n(M, \mathsf{\Omega}^nN)/{\mathcal P}\cong\operatorname{{\mathsf{Ext}}}^{n+1}(M, \mathsf{\Omega}^{n+1}N)\cong\operatorname{{\mathsf{Ext}}}^n(\mathsf{\Omega} M, \mathsf{\Omega}^{n+1}N)/{\mathcal P}$, where the first isomorphism holds true, because of Proposition \ref{pprop} and the second one comes from \ref{ccor}. Denoting the composition of these isomorphisms by $\theta$, we define $\mathsf{\Omega}((\bar{\operatorname{\boldsymbol{\gamma}}}, \delta_N)):=(\theta(\bar{\operatorname{\boldsymbol{\gamma}}}), \delta_{\mathsf{\Omega} N})$. Indeed, we have the following result, which is analogue to the well-known result in the classical stable category of a Frobenius category. \end{s} \begin{theorem}\label{syziso}With the notation above, there is an induced map $\mathsf{\Omega}:\mathscr{C} _{\mathcal P}(M, N)\rightarrow\mathscr{C} _{\mathcal P}(\mathsf{\Omega} M, \mathsf{\Omega} N)$ which is isomorphism. \end{theorem}
\begin{rem}Let $\mathcal L$ be the subcategory of $\operatorname{\mathsf{coh}}(\mathsf {X})$ consisting of all locally free sheaves. As observed in Proposition \ref{locally}, $\mathscr{C} (\mathcal L)$ is an $n$-Frobenius subcategory of $\operatorname{\mathsf{coh}}(\mathsf {X})$ with $n$-$\operatorname{\mathsf{proj}}\mathscr{C} (\mathcal L)=\mathcal L$. So considering the phantom stable category $\mathscr{C} (\mathcal L)_{\mathcal P}$, we have that an object $\mathcal{F}\in\mathscr{C} (\mathcal L)$ is locally free if and only if $\mathscr{C} (\mathcal L)_{\mathcal P}(\mathcal{F}, -)=0=\mathscr{C} (\mathcal L)_{\mathcal P}(-, \mathcal{F})$. Next assume that $\mathsf {X}$ is a Grenstein scheme, i.e. all its local rings are Gorenstein local rings. Then, for any $\mathcal{F}\in\operatorname{\mathsf{coh}}(\mathsf {X})$, $\mathsf{\Omega}^d\mathcal{F}\in\mathscr{C} (\mathcal L)$, where $d=\dim\mathsf {X}$; see \cite[Theorem 2.2.3]{ajs}. Thus, we may infer that $\mathsf {X}$ is singular if and only if $\mathscr{C} (\mathcal L)_{\mathcal P}=0$. \end{rem}
{\bf{Acknowledgements.}} The authors are grateful to Sergio Estrada and Rasool Hafezi for their reading and suggestions on the first draft of the paper.
\end{document} |
\begin{document}
\newtheorem{definicao}{Definition} \newtheorem{teorema}{Theorem} \newtheorem{lema}{Lemma} \newtheorem{corolario}{Corolary} \newtheorem{proposicao}{Proposition} \newtheorem{axioma}{Axiom} \newtheorem{observacao}{Observation}
\title{Labels for Non-Individuals}
\begin{abstract}
Quasi-set theory is a first order theory without identity, which allows us to cope with non-individuals in a sense. A weaker equivalence relation called ``indistinguishability'' is an extension of identity in the sense that if $x$ is identical to $y$ then $x$ and $y$ are indistinguishable, although the reciprocal is not always valid. The interesting point is that quasi-set theory provides us a useful mathematical background for dealing with collections of indistinguishable elementary quantum particles. In the present paper, however, we show that even in quasi-set theory it is possible to label objects that are considered as non-individuals. We intend to prove that individuality has nothing to do with any labelling process at all, as suggested by some authors. We discuss the physical interpretation of our results.
\end{abstract}
\section{Introduction}
Concerning non-individuality in quantum mechanics, the problems raised by this have provided many papers in the literature. See, for example, the references in French (2004).
Elementary particles that share the same set of state-independent (intrinsic) properties are some times said to be {\em indistinguishable\/}. Although classical particles can share all their intrinsic properties, we might say that they `have' some kind of {\em quid\/} which makes them individuals. Hence, we are able to follow the trajectories of classical particles, at least in principle. That allows us to identify them. In quantum physics this is not possible, i.e., it is not possible, {\em a priori\/}, to keep track of individual particles in order to distinguish among them when they share the same set of intrinsic properties. In other words, it is not possible to label quantum particles. And this non-individuality plays a very important role in quantum mechanics (Sakurai, 1994).
On the possibility that collections of such indistinguishable entities should not be considered as sets in the usual sense, Manin (1976) proposed the search for axioms which should allow to deal with indiscernible objects. As he said,
\begin{quote} I would like to point out that it [standard set theory] is rather an extrapolation of common-place physics, where we can distinguish things, count them, put them in some order, etc. New quantum physics has shown us models of entities with quite different behavior. Even {\em sets\/} of photons in a looking-glass box, or of electrons in a nickel piece are much less Cantorian than the {\em sets\/} of grains of sand. \end{quote}
We are using the philosophical jargon in saying that `indistinguishable' objects are objects that share their properties, while `identical' objects means `the very same object'. Nevertheless, in considering the behavior of the ensembles of such particles, there is a fundamental difference between classical and quantum statistics. In classical statistical mechanics, particles are treated like individuals. In quantum statistics, on the other hand, the Indistinguishability Postulate asserts that if a permutation is applied to any state for an assembly of particles, then there is no way of distinguishing the resulting permuted state-function from the original one by means of any observation at any time. The Indistinguishability Postulate (IP) seems to be one of the most basic principles of quantum theory and implies that permutations of quantum particles are not regarded as an observable.
Usually, IP has been interpreted in two basic ways: the first assumes that IP implies that quantum particles cannot be regarded as `individuals', since an `individual' should be something having properties similar to those of usual (macroscopic) bodies. This interpretation is closely related to what is assumed in the context of quantum field theory, since, roughly speaking, quantum field theories do not deal with `individuals' . The second way regards particles as individuals in a sense, and the non-classical counting of quantum statistics are then viewed as resulting from the restrictions imposed to the set of the possible states accessible to the particles. In short, only symmetrical and anti-symmetrical states are available, and the initially attached individuality of particles is then `veiled' by such a criterion. Both alternatives, albeit used in current literature, present problems from the `foundational' point of view. There is some obscurity lurking in the concept of individuality in quantum physics. The idea of considering `non-individuals' is weird, and in general other metaphysical packages are used instead. For instance, that one which assumes that quantum objects are individuals of a sort, despite quite distinct from the usual objects described by classical mechanics (Sant'Anna and Krause, 1997).
Let us recall that some authors like Hermann Weyl expressed the calculation with `aggregates' so that some of the basic assumptions of quantum theory can be reached in an adequate way. Weyl's efforts were done in the sense of finding an alternative manner to express the procedure physicists implicitly use in treating indistinguishable particles, namely, the assumption that there is a {\it set\/} $S$ of (hence, distinguishable) objects (say, $n$ objects) endowed with an equivalence relation $\sim$. Then the `desired result', according to Weyl, is to obtain the {\it ordered decomposition\/} $n = n_{1} + \cdots + n_{k}$, where $n_{i}$ are the cardinalities of the equivalence classes $C_{i}$, $i = 1, \ldots, k$ of the quotient set $S/\sim$. But, as it is easy to note, this procedure `veils' the very nature of the elements of the set $S$, that is, veils the fact that they are individual objects since they are members of a {\it set\/}. We would like to emphasize that there is no scape. Classical logic and mathematics are committed with a conception of identity which does not make any distinction between identity and indistinguishability: indistinguishable things are the very same thing and conversely.
One manner to cope with the problem of non-individuality in quantum physics is by means of quasi-set theory (Krause, 1992; Krause, Sant'Anna, and Volkov, 1999; Sant'Anna and Santos, 2000), which is an extension of Zermelo-Fraenkel set theory, that allows to talk about certain indistinguishable objects that are not identical. Such indistinguishable objects are termed as non-individuals. In quasi-set theory identity does not apply to all objects. In other words, there are some kinds of terms in quasi-set theory where the sequence of symbols $x = y$ is not a well-formed-formula, i.e., it is meaningless. A weaker equivalence relation called ``indistinguishability'' is an extension of identity in the sense that it allows the existence of {\em two\/} objects that are indistinguishable. In standard mathematics, there is no sense in saying that two objects are identical. If $x = y$, then we are talking about one single object with two labels, namely, $x$ and $y$.
We want to continue our investigations on the use of quasi-set theory in the foundations of quantum mechanics, based on some questions that we think about and that may be interesting. Some of these questions have to do with the notion of {\em levels of individuality\/}, which is introduced in further details in the next sections. Actually, our main mathematical framework is some sort of quasi-set-theoretical predicate for quantum systems, which is a natural extension of Patrick Suppes (2002) ideas about axiomatization. We prove, e.g., that even in quasi-set theory it is possible to prove that objects without individuality (in the sense of the theory) may be labelled if certain conditions are satisfied. We want to investigate the meaning of this labelling process from the point of view of formal logic and we want to study the possibility of a new kind of quasi-set theory where this kind of labelling process cannot be performed.
\section{Quasi-sets}
This section is strongly based on other works about quasi-set theory (Krause, 1992; Krause, Sant'Anna, and Volkov, 1999; Sant'Anna and Santos, 2000). I use standard logical notation for first-order theories (Mendelson, 1997).
Quasi-set theory ${\cal Q}$ is based on Zermelo-Fraenkel-like axioms and allows the presence of two sorts of atoms ({\it Urelemente\/}), termed $m$-atoms (micro-atoms) and $M$-atoms (macro-atoms). Concerning the $m$-atoms, a weaker `relation of indistinguishability' (denoted by the symbol $\equiv$), is used instead of identity, and it is postulated that $\equiv$ has the properties of an equivalence relation. The predicate of equality cannot be applied to the $m$-atoms, since no expression of the form $x = y$ is a formula if $x$ or $y$ denote $m$-atoms. Hence, there is a precise sense in saying that $m$-atoms can be indistinguishable without being identical. This justifies what we said above about the `lack of identity' to some objects.
The universe of ${\cal Q}$ is composed by $m$-atoms, $M$-atoms and {\it quasi-sets\/} (qsets, for short). The axiomatization is adapted from that of ZFU (Zermelo-Fraenkel with {\it Urelemente\/}), and when we restrict the theory to the case which does not consider $m$-atoms, quasi-set theory is essentially equivalent to ZFU, and the corresponding quasi-sets can then be termed `sets' (similarly, if also the $M$-atoms are ruled out, the theory collapses into ZFC). The $M$-atoms play the same role of the {\it Urelemente\/} in the sense of ZFU.
In all that follows, $\exists_Q$ and $\forall_Q$ are the quantifiers relativized to quasi-sets. That is, $Q(x)$ reads as `$x$ is a quasi-set'.
In order to preserve the concept of identity for the `well-behaved' objects, an {\it Extensional Equality\/} is defined for those entities which are not $m$-atoms on the following grounds: for all $x$ and $y$, if they are not $m$-atoms, then $$x =_{E} y := \forall z ( z \in x \Leftrightarrow z \in y ) \vee (M(x) \wedge M(y) \wedge x \equiv y).$$
It is possible to prove that $=_{E}$ has all the properties of classical identity in a first order theory and so these properties hold regarding $M$-atoms and `sets'. In this text, all references to `$=$' (in quasi-set theory) stand for `$=_E$', and similarly `$\leq$' and `$\geq$' stand, respectively, for `$\leq_E$' and `$\geq_E$'. Among the specific axioms of ${\cal Q}$, few of them deserve explanation. The other axioms are adapted from ZFU.
For instance, to form certain elementary quasi-sets, such as those containing `two' objects, we cannot use something like the usual `pair axiom', since its standard formulation assumes identity; we use the weak relation of indistinguishability instead:
The `Weak-Pair' Axiom - For all $x$ and $y$, there exists a quasi-set whose elements are the indistinguishable objects from either $x$ or $y$. In symbols,
$$\forall x \forall y \exists_{Q} z \forall t (t \in z \Leftrightarrow t \equiv x \vee t \equiv y).$$
Such a quasi-set is denoted by $[x, y]$ and, when $x \equiv y$, we have $[x]$, by definition. We remark that this quasi-set {\it cannot\/} be regarded as the `singleton' of $x$, since its elements are {\it all\/} the objects indistinguishable from $x$, so its `cardinality' (see below) may be greater than $1$. A concept of {\it strong singleton\/}, which plays a crucial role in the applications of quasi-set theory, may be defined.
In ${\cal Q}$ we also assume a Separation Schema, which intuitively says that from a quasi-set $x$ and a formula $\alpha(t)$, we obtain a sub-quasi-set of $x$ denoted by $$[t\in x : \alpha(t)].$$
We use the standard notation with `$\{$' and `$\}$' instead of `$[$' and `$]$' only in the case where the quasi-set is a {\it set\/}.
It is intuitive that the concept of {\it function\/} cannot also be defined in the standard way, so we introduce a weaker concept of {\it quasi-function\/}, which maps collections of indistinguishable objects into collections of indistinguishable objects; when there are no $m$-atoms involved, the concept is reduced to that of function as usually understood. Relations (or {\em quasi-relations\/}), however, can be defined in the usual way, although no order relation can be defined on a quasi-set of indistinguishable $m$-atoms, since partial and total orders require antisymmetry, which cannot be stated without identity. Asymmetry also cannot be supposed, for if $x \equiv y$, then for every relation $R$ such that $\langle x, y \rangle \in R$, it follows that $\langle x, y \rangle =_{E} [[x]] =_{E} \langle y, x \rangle \in R$, by force of the axioms of ${\cal Q}$.
We remark that $[[x]]$ is the same ($=_{E}$) as $\langle x, x \rangle$ by the Kuratowski's definition.
It is possible to define a translation from the language of ZFU into the language of ${\cal Q}$ in such a way that we can obtain a `copy' of ZFU in ${\cal Q}$. In this copy, all the usual mathematical concepts (like those of cardinal, ordinal, etc.) can be defined; the `sets' (in reality, the `${\cal Q}$-sets' which are `copies' of the ZFU-sets) turn out to be those quasi-sets whose transitive closure (this concept is like the usual one) does not contain $m$-atoms.
Although some authors like Weyl (1949) sustain that (in what regard cardinals and ordinals) ``the concept of ordinal is the primary one'', quantum mechanics seems to present strong arguments for questioning this thesis, and the idea of presenting collections which have a cardinal but not an ordinal is one of the most basic and important assumptions of quasi-set theory.
The concept of {\it quasi-cardinal\/} is taken as primitive in ${\cal Q}$, subject to certain axioms that permit us to operate with quasi-cardinals in a similar way to that of cardinals in standard set theories. Among the axioms for quasi-cardinality, we mention those below, but first we recall that in ${\cal Q}$, $qc(x)$ stands for the `quasi-cardinal' of the quasi-set $x$, while $Z(x)$ says that $x$ is a {\it set\/} (in ${\cal Q}$). Furthermore, $Cd(x)$ and $card(x)$ mean `$x$ is a cardinal' and `the cardinal of $x$', respectively, defined as usual in the `copy' of ZFU.
Quasi-cardinality - Every qset has an unique quasi-cardinal which is a cardinal (as defined in the `ZFU-part' of the theory) and, if the quasi-set is in particular a set, then this quasi-cardinal is its cardinal {\em stricto sensu}:
$$\forall_{Q} x \exists_{Q} ! y (Cd(y) \wedge y =_{E} qc(x) \wedge (Z(x) \Rightarrow y =_{E} card(x))).$$
Then, every quasi-cardinal is a cardinal and the above expression `there is a unique' makes sense. Furthermore, from the fact that $\emptyset$ is a set, it follows that its quasi-cardinal is 0 (zero).
${\cal Q}$ still encompasses an axiom which says that if the quasi-cardinal of a quasi-set $x$ is $\alpha$, then for every quasi-cardinal $\beta \leq \alpha$, there is a sub-quasi-set of $x$ whose quasi-cardinal is $\beta$, where the concept of {\it sub-quasi-set\/} is like the usual one. In symbols,
The quasi-cardinals of sub-quasi-sets - $$\forall_{Q} x (qc(x) =_{E} \alpha \Rightarrow \forall \beta (\beta \leq_{E} \alpha \Rightarrow \exists_{Q} y (y \subseteq x \wedge qc(y) =_{E} \beta)).$$
Another axiom states that
The quasi-cardinal of the power quasi-set - $$\forall_{Q} x (qc({\cal P}(x)) =_{E} 2^{qc(x)}).$$
\noindent where $2^{qc(x)}$ has its usual meaning.
As remarked above, in ${\cal Q}$ there may exist qsets whose elements are $m$-atoms only, called `pure' qsets. Furthermore, it may be the case that the $m$-atoms of a pure qset $x$ are indistinguishable from one another, in the sense of sharing the indistinguishability relation $\equiv$. In this case, the axiomatization provides the grounds for saying that nothing in the theory can distinguish among the elements of $x$. But, in this case, one could ask what it is that sustains the idea that there is more than one entity in $x$. The answer is obtained through the above mentioned axioms (among others, of course). Since the quasi-cardinal of the power qset of $x$ has quasi-cardinal $2^{qc(x)}$, then if $qc(x) = \alpha$, for every quasi-cardinal $\beta \leq \alpha$ there exists a sub-quasi-set $y \subseteq x$ such that $qc(y) = \beta$, according to the axiom about the quasi-cardinality of the sub-quasi-sets. Thus, if $qc(x) = \alpha \not= 0$, the axiomatization does not forbid the existence of $\alpha$ sub-quasi-sets of $x$ which can be regarded as `singletons'.
Of course the theory cannot prove that these `unitary' sub-quasi-sets (supposing now that $qc(x) \geq 2$) are distinct, since we have no way of `identifying' their elements, but qset theory is compatible with this idea. In other words, it is consistent with ${\cal Q}$ to maintain that $x$ has $\alpha$ elements, which may be regarded as absolutely indistinguishable objects. Since the elements of $x$ may share the relation $\equiv$, they may be further understood as belonging to a same `equivalence class' (for instance, being indistinguishable electrons) but in such a way that we cannot assert either that they are identical or that they are distinct from one another (i.e., they act as `identical electrons' in the physicist's jargon).
We define $x$ and $y$ as {\it similar\/} qsets (in symbols, $Sim(x,y)$) if the elements of one of them are indistinguishable from the elements of the other one, that is, $Sim(x,y)$ if and only if $\forall z \forall t (z \in x \wedge t \in y \Rightarrow z \equiv t)$. Furthermore, $x$ and $y$ are {\it Q-Similar\/} ($QSim(x,y)$) if and only if they are similar and have the same quasi-cardinality. Then, since the quotient qset $x/_{\equiv}$ may be regarded as a collection of equivalence classes of indistinguishable objects, then the `weak' axiom of extensionality is:
Weak Extensionality - \begin{eqnarray} \forall_{Q} x \forall_{Q} y (\forall z (z \in x/_{\equiv} \Rightarrow \exists t (t \in y/_{\equiv} \wedge \, QSim(z,t)) \wedge \forall t(t \in y/_{\equiv} \Rightarrow\nonumber\\ \exists z (z \in x/_{\equiv} \wedge \, QSim(t,z)))) \Rightarrow x \equiv y)\nonumber \end{eqnarray}
In other words, this axiom says that those qsets that have `the same quantity of elements of the same sort (in the sense that they belong to the same equivalence class of indistinguishable objects) are indistinguishable.
\section{Some Applications}
Quasi-set theory has found its way in the sense of some applications in quantum physics. Here we list some of them:
\begin{enumerate}
\item It has been used (Krause, Sant'Anna, and Volkov, 1999) for an authentic proof of the quantum distributions. By ``authentic proof'' we mean a proof where elementary quantum particles are really considered as non-individuals right at the start. If the physicist says that some particles are indistinguishable (in a sense) and he/she still uses standard mathematics in order to cope with these particles, then something seems not to be sound. For standard mathematics is based on the concept of individuality, in the sense that it is grounded on the very notion of identity.
\item It has been proved (Sant'Anna and Santos, 2000) that even non-individuals may present a classical distribution like Maxwell-Boltzmann's. That is another way to say that a Maxwell-Boltzmann distribution in an ensemble of particles does not entail any ontological character concerning such particles, as it was previously advocated by Nick Huggett (1999).
\item Krause, Sant'Anna, and Volkov (1999) also introduced the quasi-set-theoretical version of the wave-function of the atom of Helium, which is a well known example where indistinguishability plays an important role. Other discussions may be found in the cited reference.
\end{enumerate}
\section{Individualizing Indiscernible Objects}\label{algoritmo}
This section presents the main contribution of the present paper. We introduce an algorithm which allows us to label indiscernible objects in the context of quasi-set theory. The algorithm is given below, followed by its interpretation and discussion.
\begin{description} \item[1.] INPUT $[ x ]$ \item[2.] DO $m =_E 0$ \item[3.] DO $w =_E \emptyset$ \item[4.] DO $m := m + 1$ \item[5.] DO $[ x ] := [ x ] - x'$ \item[6.] DO $w := w\cup [\langle x', m\rangle]$ \item[7.] OUTPUT $w$ \item[8.] IF $[ x ] =_E \emptyset$ THEN GO TO {\bf 10} \item[9.] GO TO {\bf 4} \item[10.] END \end{description}
In the first step, we introduce a finite weak singleton $[ x ]$, i.e., a pure quasi-set with a finite quasi-cardinality (a finite number of elements) where all its elements are indistinguishable objects. Next, we introduce a variable $m$ with an initial value equal to zero and another variable which is an empty quasi-set $w$. In the fourth step we transform $m$ into $m + 1$ (we use as attribution symbol the sign ``$:=$''). In the fifth step we subtract one of the elements of qset $[ x ]$ by means of a difference between the weak singleton $[ x ]$ and the strong singleton $x'$ (a weak singleton whose elements are indistinguishable from $x$ but such that $x'$ has actually just {\em one\/} element). Next we create an ordered pair defined by $x'$ and by the label $m$. In step seven, this ordered pair is an output. Actually the ordered pairs are stored in $w$. In a sense, this works as a data warehousing process. Such process repeats from step {\bf 4} until step {\bf 7} up to the moment when the qset $[ x ]$ is empty.
So, in a sense, it is possible to `individualize' (by means of integer labels $m$) objects that have no individuality in principle. We are seriously tempted to refer to micro-atoms as non-individuals, since the standard identity does not apply to them. Nevertheless, there is nothing within the scope of quasi-set theory that forbids us to attribute labels to micro-atoms, even in a finite collection of indistinguishable micro-atoms.
From the physical point of view this is like to say that two electrons, separated by a large distance between them, may be identified by their coordinates in spacetime. Another interpretation is the next one. Consider that we have a $Na$ atom, with its 11 electrons. Consider also that we excite this atom by adding a new electron to it. When the atom emits an electron, there is no way to know if the emitted electron is the `same' one used to excite the $Na$ atom. In this sense, the electrons of the $Na$ atom are indistinguishable, they are non-individuals. But when an electron is emitted, this particle may be labelled as that one which was emitted. For some very restrictive purposes, it is possible to label even non-individuals. But the most important point is that this labelling process is possible even in a formal mathematical framework which was designed specially for applications in quantum theories, namely, quasi-set theory.
So, we advocate that we need to be very careful with respect to the notions of identity and indistinguishability.
\section{A Conclusion}
What is individuality? If we answer this, it seems reasonable to consider, at least in principle, that the notion of non-individuality is also settled. That seems to happen because we intend to consider that a given object is a non-individual if it is not an individual at all. In order to talk about the individuality of an object, do we need to talk about its properties, like suggested by some authors (French, 2004)? Or should the individuality of an object be expressed in terms of its ‘haecceity’ or ‘primitive thisness’ like suggested by Adams (1979). Actually, I am not concerned with these points in this paper. Otherwise, a much longer discussion should be made. The point that I want to emphasize has to do with the possible relationship between individuality and the labelling process. Dalla Chiara and Toraldo di Francia (1993), for example, refer to quantum physics as ‘the land of anonymity’, in the sense that, particles cannot be uniquely labelled. Many other authors have a similar opinion. But I want to advocate a different position on this.
Quasi-set theory is a set theory without identity that allows the existence of collections of objects that are indistinguishable in the sense of the properties of the equivalence relation $\equiv$. In some cases, this indistinguishability collapses to the usual identity. That means that, in some cases, when we say that $x\equiv y$, then we are talking about just {\em one\/} object, named $x$ and $y$. It is usual to consider that such objects are individuals due to their uniqueness. Nevertheless, quasi-set theory allows also the existence of some objects (some micro-atoms) such that $x\equiv y$ does not ensure that we are really talking about one single (or unique) object. Any object $x$ that is indistinguishable from $y$ in this sense cannot be an individual, since it lacks the notion of uniqueness. Nevertheless, physicists usually consider that elementary particles are not individuals in the sense that they cannot be labelled and we cannot keep track of them in spacetime.
But with our algorithm we have proved that even objects that are considered as non-individuals right at the start can be labelled, at least in a weak sense. By weak sense me mean that two ordered pairs of $w$ can always be distinguished by means of the second element of the pair; but, on the other hand, the first coordinates of these two pairs are still indistinguishable in the quasi-set-theoretical sense. So, we think that we have proved (at least in the context of quasi-set theory) that individuality have nothing to do with any labelling process. The fact that we can label some object does not guarantee that this object is an individual in a precise sense.
Obviously, there is a limitation in our algorithm. Since this is a step-by-step algorithm, it works only for finite or denumerable weak singletons $[ x ]$. By denumerable weak singleton we mean a qset $[ x ]$ whose quasi-cardinality is the cardinality of the set of natural numbers. But for any physical interpretation of $[ x ]$, only finite weak singletons make sense. And my concern here is with the notion of individuality (or non-individuality) in physics, with special emphasis on quantum physics.
The ability of labelling non-individuals makes perfect sense from the physical point of view. See, e.g., the Einstein-Podolsky-Rosen Gedanken experiment (Sakurai, 1994), where two distant electrons seem to be strongly connected by some sort of non-local `interaction'. In this experiment the two electrons are formally considered as indistinguishable, due to their entangled quantum state. Nevertheless, someone could say that these electrons can be labelled by their coordinates in the three dimensional space. But the fact that these electrons can be labelled by their space coordinates does not affect the fact that they are truly indistinguishable, i.e., that they are non-individuals. A similar situation happens with electrons in an atom. If an atom has 11 electrons, all of them are considered as non-individuals, although they can be labelled by their respective quantum states, obeying Pauli's Principle of Exclusion.
\section{Open Problems}
Here we present two lists of open problems that we consider worth of investigation. Our main goal, in the present Section, is to propose some ideas for future papers related to quasi-set theory and the problems of non-individuality in quantum mechanics.
The first list has to do with our labelling algorithm.
\begin{enumerate}
\item Is it possible to create some kind of quasi-quasi-set theory where the algorithm introduced in section \ref{algoritmo} does not work? The existence of strong singletons is a crucial aspect in the algorithm. Another point is that the membership relation $\in$ in quasi-set theory is like the usual one. What should we change in quasi-set theory in order to avoid the labelling process of our algorithm? And if this new theory is possible, then it makes sense to talk about ``levels of individuality''. One first level would be in correspondence with the standard view of identity in first order and higher order theories; a second level would be in correspondence to the notion of indistinguishability in quasi-set theory as presented here; and a third level of individuality would be a very weak form where there would exist some kind of indistinguishability such that our algorithm does not apply.
\item If it is possible to create some sort of quasi-quasi-set theory where our algorithm does not work, another question is: can we use this quasi-quasi-set theory in order to ground the foundations of quantum mechanics? Can we derive, e.g., the quantum statistics in this new framework? That would be a way for a better understanding of the meaning of non-individuality among quantum particles.
\end{enumerate}
Our second list concerns related problems about quasi-set theory.
\begin{enumerate}
\item Maxwell-Boltzmann statistics can be derived even in a collection of indiscernible particles (Sant'Anna and Santos, 2000). This means that Maxwell-Boltzmann statistics is not necessarily committed to individuality. What are the physical (or metaphysical) implications of this kind of result?
\item How to derive the spin-statistics theorem (Ryder, 1996) within the scope of quasi-set theory?
\item Some authors have considered that there is a close relationship between indistinguishability and non-locality in quantum theory. One of them is Mandel (1991), who achieved some interesting results in the context of the two-slit experiment. On the other hand Einstein-Podolsky-Rosen (EPR) {\em Gedanken\/} experiment is also one of those outstanding results that have allowed many interesting discussions about the concept of non-locality. We speculate if it is possible to extend the notion of metric space in order to replace the notion of identity by the concept of indistinguishability in the axioms of distance, since nonlocal phenomena occur only between indistinguishable particles (or indistinguishable paths). In metric spaces it is considered that if $p=q$ then the distance between $p$ and $q$ is zero. What if we consider that the distance is zero if and only if $p\equiv q$? In this case, nonlocal phenomena would be more reasonable, since the distance (in this new metric space) between indistinguishable particles would be always null. The philosophical issue here is to consider that space and spacetime are concepts that are closely related to physical systems in the sense that space and spacetime are derived concepts and not primitive ones (da Costa and Sant'Anna, 2001; da Costa and Sant'Anna, 2002).
\end{enumerate}
\end{document} |
\begin{document}
\draft \wideabs{ \title{Classical Analog of Electromagnetically Induced Transparency} \author{C. L. Garrido Alzar$^{1}$, M. A. G. Martinez$^{2}$, and P. Nussenzveig$^{1}$}
\vskip 0.3cm
\address{$^{1}$ Instituto de F\'\i sica, Universidade de S\~ao Paulo, Caixa Postal 66318, 05315-970 S\~ao Paulo, SP, Brazil \\ [0.2cm] $^{2}$ P\'os-gradua\c c\~ao em Engenharia El\'etrica, Universidade Presbiteriana Mackenize, Rua da Consola\c c\~ao 896, \\ 01302-000, S\~ao Paulo, SP, Brazil} \date{April 2001} \maketitle
\begin{abstract} We present a classical analog for Electromagnetically Induced Transparency (EIT). In a system of just two coupled harmonic oscillators subject to a harmonic driving force we can reproduce the phenomenology observed in EIT. We describe a simple experiment performed with two linearly coupled $RLC$ circuits which can be taught in an undergraduate laboratory class. \end{abstract} \pacs{ } }
\section{Introduction} \label{sec:intro} \indent
Imagine a medium which strongly absorbs a light beam of a certain frequency. Add a second light source, with a frequency that would also be absorbed by the medium, and the medium becomes transparent to the first light beam. This curious phenomenon is called Electromagnetically Induced Transparency, or simply EIT~\cite{harris}. It usually takes place in vapors of three-level atoms. The light sources are lasers which drive two different transitions with one common level. Most authors attribute the effect to quantum interference in the atomic medium, involving two indistinguishable paths leading to the common state. The dispersive properties of the medium are also significantly modified as has been recently demonstrated with the impressive reduction of the group velocity of a light pulse to only 17~m/s~\cite{hau,kash,budker} and the ``freezing'' of a light pulse in an atomic medium~\cite{hau2,lukin}.
In this paper we develop a totally classical analog for Electromagnetically Induced Transparency and present a simple experiment which can be carried out in an undergraduate physics laboratory. The stimulated resonance Raman effect has already been modeled classically in a system of three coupled pendula by Hemmer and Prentiss~\cite{hemmprent88}. Even though many aspects of EIT are already present in the stimulated Raman effect, as can be seen in~\cite{hemmprent88}, at that time EIT had not yet been observed~\cite{harris} and the dispersive features were not considered. Our model involves only two oscillators with linear coupling. The experiment is performed with $RLC$ circuits. The interest of such an experiment and purpose of this paper is to enable undergraduate students to develop physical intuition on coherent phenomena which occur in atomic systems.
\section{Theoretical Model} \label{sec:theory} \indent
We will focus our attention on the simulation of EIT in material media composed of three-level atoms in the so-called $\Lambda$ configuration interacting with two laser fields as shown in Fig.~1.
The quantum states $|1\rangle$ and $|2\rangle$ represent the two ground
states of the atom, and the state $|0\rangle$ is the excited atomic level.
\begin{figure}
\caption{Energy diagram of a three-level $\Lambda$-type atom interacting with two laser beams, coupling the two ground states to a common excited atomic state.}
\label{fig1}
\end{figure}
The laser field coupled to the atomic transition between the states $|1\rangle$
and $|0\rangle$ will be called ``pumping (or pump) laser'', and the laser
coupled to the transition between the states $|2\rangle$ and $|0\rangle$ will be the ``probe laser''. A typical experiment consists of scanning the frequency of the probe laser and measuring its transmitted intensity. In the absence of the pump laser, one observes a standard absorption resonance profile. Under certain conditions, the addition of the pump laser prevents absorption in a narrow portion of the resonance profile, and the transmitted intensity as a function of the probe frequency presents a narrow peak of induced transparency.
The effect depends strongly on the pump beam intensity. Typically the pump laser has to be intense so that the Rabi frequency $\Omega_1$ associated to
the transition from state $|1\rangle$ to $|0\rangle$ is larger than all damping rates present (associated to spontaneous emission from the excited state and other relaxation processes). One of the effects of this pump beam is to induce an AC-Stark splitting of the excited
atomic state. The probe beam will therefore couple state $|2\rangle$ to two states instead of one. If the splitting (which varies linearly with the Rabi frequency $\Omega_1$) is smaller than the excited state width, the two levels are indistinguishable and one expects quantum interference in the probe absorption spectrum. As the Rabi frequency $\Omega_1$ increases, the splitting becomes more pronounced and indistinguishability is lost. The absorption spectrum becomes a doublet called the Autler-Townes doublet. We will keep this image in mind to present a classical system with the same features.
\subsection{EIT-like phenomena with masses and springs} \label{subsec:eitmec} \indent
We will model our atom as a simple harmonic oscillator, consisting of a particle 1, of mass $m_1$, attached to two springs with spring constants $k_1$ and $K$ (see Fig.~2). The spring with constant $k_1$ is attached to a wall, while the other spring is attached to a second particle of mass $m_2$ and initially kept immobile at a fixed position. Particle 1 is also subject to a harmonic force ${\cal F}_s=F e^{-i\ (\omega_{s} t + \phi_s)}$. If we analyze the power transferred from the harmonic source to particle 1, as a function of frequency $\omega_s$, in this situation we will observe the standard resonance absorption profile discussed above (peaked at frequency $\omega_{1}^{2}=(k_{1}+K)/m$). If we now allow particle 2 to move, subject only to the forces from the spring of constant $K$ and a third spring of constant $k_2$, attached to a wall (see Fig.~2), this profile is modified. As we shall see, depending on the spring constant $K$, we can observe a profile presenting features similar to EIT evolving to an Autler-Townes-like doublet (as a matter of fact, this doublet is simply a normal-mode splitting). For simplicity, we will consider the situation in which $k_1=k_2=k$ and $m_1=m_2=m$.
The physical analogy between our model and the three-level atom coupled to two light fields is simple. As mentioned, the atom is modeled as oscillator 1 (particle 1), with its resonance frequency $\omega_1$. Since we chose $k_1=k_2=k$ and $m_1=m_2=m$, we have the analog of a degenerate $\Lambda$ system. The pump field is simulated by the coupling of oscillator 1 to a second oscillator via the spring of constant $K$ (reminding us of the quantized description of the field, in terms of harmonic oscillators). The probe field is then modeled by the harmonic force acting on particle 1.
\begin{figure}
\caption{Mechanical model used to simulate EIT.}
\label{fig2}
\end{figure}
In order to provide a quantitative description of the system we write the equations of motion of particles 1 and 2 in terms of the displacements $x_1$ and $x_2$ from their respective equilibrium positions:
\begin{eqnarray}
\ddot{x_{1}}(t)+\gamma_{1}\dot{x_{1}}(t)+\omega^{2}x_{1}(t)-
\Omega_{r}^{2}x_{2}(t) = \frac{F}{m} e^{-i\ \omega_{s} t} \nonumber \\ [0.2cm]
\ddot{x_{2}}(t)+\gamma_{2}\dot{x_{2}}(t)+\omega^{2}x_{2}(t)-
\Omega_{r}^{2}x_{1}(t) = 0 \;, \label{eq15} \end{eqnarray}
\noindent where we set $\phi_{s}=0$ for the probe force without loss of generality. We also defined $\Omega_{r}^{2}=K/m$, the frequency associated with the coherent coupling between the pumping oscillator and the oscillator modeling the atom, $\gamma_{1}$ the friction constant associated to the energy dissipation rate acting on particle 1 (which simulates the spontaneous emission from the atomic excited state), and $\gamma_{2}$, the energy dissipation rate of the pumping transition.
Since, we are interested in the power absorbed by particle 1 from the probe force, we will seek a solution for $x_{1}(t)$. Let us suppose that $x_{1}(t)$ is of the form
\begin{equation}
x_{1}(t) = N e^{-i\ \omega_{s} t} \;, \label{eq16} \end{equation}
\noindent where $N$ is a constant. After taking a similar expression for $x_{2}(t)$ and substituting in eq.~(\ref{eq15}), we find
\begin{equation}
x_{1}(t)=\frac{(\omega^{2}-\omega_{s}^{2}-
i \gamma_{2} \omega_{s}) F e^{-i\ \omega_{s} t}}
{m[(\omega^{2}-\omega_{s}^{2}-i \gamma_{1} \omega_{s})
(\omega^{2}-\omega_{s}^{2}-i \gamma_{2} \omega_{s})-\Omega_{r}^{4}]} \;. \label{eq17} \end{equation}
Now, computing the mechanical power $P(t)$ absorbed by particle 1 from the probe force ${\cal F}_s$,
\begin{equation}
P(t)=F e^{-i\ \omega_{s} t} \dot{x_{1}}(t) \;, \label{eq18} \end{equation}
\noindent we find for the power absorbed during one period of oscillation of the probe force
\begin{equation}
P_{s}(\omega_{s})=-\frac{2 \pi i F^{2} \omega_{s}
(\omega^{2}-\omega_{s}^{2}-i \gamma_{2} \omega_{s})}
{m[(\omega^{2}-\omega_{s}^{2}-i \gamma_{1} \omega_{s})
(\omega^{2}-\omega_{s}^{2}-i \gamma_{2} \omega_{s})-\Omega_{r}^{4}]} \;. \label{eq19} \end{equation}
In Fig.~3 we show the real part of $P_{s}(\omega_{s})$, for six different values of the coupling frequency $\Omega_{r}$ expressed in frequency units. These curves were obtained using the values $\gamma_{1}=0.1\times 10^{-6}$, $\gamma_{2}=0.4\times 10^{-1}$, and $\omega_0 =\sqrt{k/m}=2.0$, all expressed in the same frequency units. The amplitude $F/m$ was taken equal to $0.1$ force per mass units.
For $\Omega_{r}=0$ we have a typical absorption profile, with a maximum probe power absorption for $\delta=0$, being $\delta$ the detuning between the probe and the oscillator frequencies ($\delta=\omega_{s}-\omega$). Incrementing the value of $\Omega_{r}$ to 0.1, we observe the appearance of a narrow dip in the absorption profile of the probe power. This zero absorption at the center frequency of the profile is an evidence of a destructive interference between the normal modes of oscillation of the system~\cite{foot}. A further increment of the coupling frequency leads to the apparition of two peaks in the probe power absorption profile, that are clearly separated for $\Omega_{r}=0.5$. This effect in atomic systems is called the AC-Stark splitting or Autler-Townes doublet.
\begin{figure}
\caption{Frequency dependence of the absorption of the probe energy by particle 1. The values of $\Omega_r$ in each case are (a) 0.0 , (b) 0.1 , (c) 0.2 , (d) 0.3 , (e) 0.4 , and (f) 0.5 , all expressed in frequency units.}
\label{fig3}
\end{figure}
An important fact to be pointed out is that the dissipation rate $\gamma_{2}$, associated to the energy loss of the pumping oscillator, must be much smaller than $\gamma_{1}$ in order to achieve a regime of coherent driving of particle 1's oscillation. In other words, there should be no significant increase of dissipation in the system by adding the pumping oscillator. In the case of EIT~\cite{li}, the condition analogous to $\gamma_2 \ll \gamma_1$ is that the transfer rate of population
from the atomic ground state $|1\rangle$ to $|2\rangle$ should be negligible (see Fig.~1). In both situations, the violation of this condition obstructs the observation of the induced transparency.
Another important result reproduced with this mechanical model is the dispersive behavior of the mass oscillator used to simulate the atom. The dispersive response is contained in the real part of the frequency dependence of the amplitude of oscillation $x_{1}(t)$ of particle 1, given by eq.~(\ref{eq17}). In Fig.~4 we plot this quantity for $\Omega_{r}=0.1$. This value of $\Omega_{r}$ corresponds to the situation when the induced transparency is more pronounced and, as we can see from Fig.~4, the dispersion observed, in the frequency interval where we have the induced transparency, is normal and with a very steep slope.
\begin{figure}
\caption{Dispersive (a) and absorptive (b) responses of the probe power transferred to particle 1, for $\Omega_r=0.1$ frequency units.}
\label{fig4}
\end{figure}
\noindent This result coincides with that reported before in an experimental realization of electromagnetically induced transparency~\cite{xiao}. It is also important to point out that this very steep normal dispersion is responsible for the recently observed slow propagation of light (slow light)~\cite{hau,kash,budker}, and the ``freezing'' of a light pulse in an atomic medium~\cite{hau2,lukin}. It should therefore be possible to observe such propagation effects considering absorption in a medium consisting of a series of mechanical `atom-analogs'.
This theoretical model is not the only classical one to simulate EIT-like phenomena. As mentioned, in this model the pump field is replaced by a harmonic oscillator, simulating a quantum-mechanical description. In most theoretical descriptions of EIT in atomic media, pump and probe fields are classical. The mechanical analog to this description would then involve only one oscillator (one particle of mass $m$). In this analysis, it becomes apparent that EIT arises directly from a destructive interference between the oscillatory forces driving the particle's movement. In order to keep the text simple, we have chosen to not present this description here. Furthermore, it is not related to the experimental results presented below.
\section{EIT-like phenomena in coupled $RLC$ circuits: a simple undergraduate laboratory experiment} \label{sec:eitrlc_exp} \indent
An experiment to observe the predictions of the previous section, although possible, would not be straightforward. Instead, we decided to use the well-known analogy between mechanical oscillators and electric circuits to perform a simple experiment. The electric analog to the system of Fig.~2, is the circuit shown in Fig.~5, where the circuit mesh composed by the inductor $L_{1}$ and the capacitors $C_{1}$ and $C$ simulates the pumping oscillator, and the resistor $R_{1}$ determines the losses associated with that oscillator. The atom is modeled using the resonant circuit formed by the inductor $L_{2}$, the capacitors $C_{2}$ and $C$, and the resistor $R_{2}$ represents the spontaneous radiative decay from the excited level. The capacitor $C$, belonging to both circuit meshes, models the coupling between the atom and the pumping field, and determines the Rabi frequency associated to the pumping transition. In this case, the probe field is simulated by the frequency-tunable voltage source $V_{S}$.
\begin{figure}
\caption{Electric circuit employed to investigate the induced transparency.}
\label{fig5}
\end{figure}
The circuit mesh used to model the atom has only one resonance frequency representing the energy of the atomic excited level. That is to say, the probability of excitation of this circuit will be maximal when the applied harmonic force is on resonance. Since in this case we have two possible paths to accomplish this excitation, we are dealing with the analog of a three-level atom in the $\Lambda$ configuration. Namely, the oscillator corresponding to the `atom-analog' can be excited directly by the applied voltage $V_s$ or by the coupling to the pumping oscillator.
Here again the induced transparency is investigated analyzing the frequency dependence of the power $P_{2}(\omega_{s})$ transferred from the voltage source $V_{S}$ to the resonant circuit $R_{2}L_{2}C_{e2}$,
\begin{equation}
P_{2}(\omega_{s})=\Re\{{\cal V}_{S}{\cal I}_{2}^{*}\} \;, \label{eq20} \end{equation}
\noindent where ${\cal V}_{S}$ and ${\cal I}_{2}$ are respectively the complex representations of $V_{S}$ and $i_{2}(t)$, and the equivalent capacitor $C_{e2}$ is the series combination of $C$ and $C_{2}$:
\begin{equation}
C_{e2}=\frac{C C_{2}}{C+C_{2}} \;. \label{eq21} \end{equation}
Setting $L_{1}=L_{2}=L$ ($m_1=m_2=m$ in the mechanical model) and writing the equations for the currents $i_{1}(t)=\dot{q_{1}}(t)$ and $i_{2}(t)=\dot{q_{2}}(t)$ shown in Fig.~5, we find the following system of coupled differential equations for the charges $q_{1}(t)$ and $q_{2}(t)$:
\begin{eqnarray}
\ddot{q_{1}}(t)+\gamma_{1}\dot{q_{1}}(t)+\omega_{1}^{2}q_{1}(t)-
\Omega_{r}^{2}q_{2}(t) = 0 \nonumber \\
\ddot{q_{2}}(t)+\gamma_{2}\dot{q_{2}}(t)+\omega_{2}^{2}q_{2}(t)-
\Omega_{r}^{2}q_{1}(t) = \frac{V_{S}(t)}{L_{2}} \;. \label{eq22} \end{eqnarray}
\noindent Here $\gamma_i = R_i/L_i$ ($i=1,2$), $\omega_i^2 = 1/(L_i C_{ei})$, and $\Omega_r^2 = 1/(L_2 C)$. These equations coincide with the eqs.~(\ref{eq15}) using the correspondences shown in Table~1 and $\omega_1=\omega_2$. Therefore, both models describe the same physics.
\begin{table}[htb] \begin{center}
\begin{tabular}{|c|c|} \hline Mechanical model & Electrical model \\ \hline \hline $\gamma_{1}$ ($\gamma_{2}$) & $R_{1}/L_1$ ($R_{2}/L_2$) \\ $m_1$ ($m_{2}$) & $L_{1}$ ($L_{2}$) \\ $k_{1}$ ($k_{2}$) & $1/C_{1}$ ($1/C_{2}$) \\ $K$ & $1/C$ \\ $x_{1}$ ($x_{2}$) & $q_{1}$ ($q_{2}$) \\ $(F/m) \, e^{-i\ \omega_{s}t}$ & ${\cal V}_{S}(t)/L_{2}$ \\ \hline \end{tabular} \end{center} \label{tab1} \caption{Correspondences between the mechanical and electrical parameters.} \end{table}
Once the current $i_{2}(t)$ (or ${\cal I}_{2}$) is known, the expression that determines the power $P_{2}(\omega_{s})$ as a function of the frequency $\omega_{s}$ of the applied voltage, when the switch $SW$ in Fig. 5 is closed, is given by
\begin{equation}
P_{2}(\omega_{s})=\frac{p1(\omega_{s})}
{p1(\omega_{s})^{2}+p2(\omega_{s})^{2}}|A_{S}|^{2} \;, \label{eq23} \end{equation}
\noindent where
\begin{equation}
p1(\omega_{s})=R_{2}+\frac{R_{1}/(\omega_{s} C)^{2}}
{R_{1}^{2}+(\omega_{s}L_{1}-1/(\omega_{s}C_{e1}))^{2}} \label{eq231} \end{equation}
\begin{eqnarray}
p2(\omega_{s}) &=& (\omega_{s} L_{2}-1/(\omega_{s}C_{e2}))- \nonumber \\ [0.15cm]
& & \frac{(1/(\omega_{s} C)^{2})(\omega_{s}L_{1}-1/(\omega_{s}C_{e1}))}
{R_{1}^{2}+(\omega_{s}L_{1}-1/(\omega_{s}C_{e1}))^{2}} \;, \label{eq232}
\end{eqnarray} being $C_{e1}$ the equivalent capacitor given by the series combination of $C$ and $C_{1}$ and $A_{S}$ represents the amplitude of the applied voltage. On the other hand, when the switch is open we have \begin{equation}
P_{2}(\omega_{s})=\frac{R_{2}}{R_{2}^{2}+
(\omega_{s} L_{2}-1/(\omega_{s}C_{e2}))^{2}}|A_{S}|^{2} \;. \label{eq24} \end{equation}
There are many ways of measuring the power $P_{2}(\omega_{s})$. We have chosen to measure the current flowing through the inductor $L_{2}$, which has the same frequency dependence as $P_{2}(\omega_{s})$. We actually measure the voltage drop across the inductor $L_{2}$ and integrate it to find a voltage proportional to the current flowing through the inductor. This voltage is an oscillatory signal at the frequency $\omega_s$. We are interested in the amplitude of this signal, which we read off an oscilloscope.
In Fig.~6 are presented the experimental amplitudes measured, corresponding to four different values of the coupling capacitor $C$. For each value of the coupling capacitor a measurement was made in two situations: with the switch open (open square) and with the switch closed (open circle). In Table~2 we present the specifications of the electronic components used in the experiment.
\begin{table}[tbh] \begin{center}
\begin{tabular}{|c|c|} \hline Electronic component & Value \\ \hline \hline $R_{1}$ & $0$ Ohms \\ $R_{2}$ & $51.7$ Ohms \\ $L_{1}$ & $1000$ $\mu$~H \\ $L_{2}$ & $1000$ $\mu$~H \\ $C_{1}$ & $0.1$ $\mu$~F \\ $C_{2}$ & $0.1$ $\mu$~F \\ \hline \end{tabular} \end{center} \caption{Specifications of the electronic components used in the experiment.} \label{tab2} \end{table}
\begin{figure}\label{fig6}
\end{figure}
As we can see from Fig.~6, in the open switch configuration (no pumping) we have a maximum coupling of electrical power from the voltage source $V_{S}$ to the resonant circuit $R_{2}L_{2}C_{e2}$ at the resonant frequency (zero detuning). When the switch is closed, that is to say, the pumping circuit (pumping force) is acting on the resonant circuit $R_{2}L_{2}C_{e2}$, we have a depression of the absorption of the electrical power of the voltage source at zero detuning. This fact corresponds to the transparent condition, and is more pronounced when the value of the coupling capacitor is reduced, corresponding to an increment of the Rabi frequency of the pumping field in electromagnetically induced transparency studied in atomic systems.
Here, as in the experiments with atoms and light, the observed transparency can be interpreted as a destructive interference. In this case, the inteference is between the power given by the voltage source to the resonant circuit $R_{2}L_{2}C_{e2}$, and the power transferred to the same circuit from the other oscillator representing the pumpimg force. As explained in \S\ref{subsec:eitmec}, it can also be viewed as an interference between the two possible excitation paths corresponding to the normal modes of the coupled oscillators. For the minimum value of $C$ used in our experiment (see Fig.~6(d)), we observe two absorption peaks, as the classical analog of the Autler-Townes effect or AC-Stark splitting, which correspond to the splitting of these normal modes. We should also point out that the smallest coupling value we used (Fig.~6(a)) did not lead to an infinitely narrow transparency peak as would be expected from the value $R_1=0$. This is probably due to internal ``residual'' series resistances of the components we used.
The solid lines shown in Fig.~6 represent the theoretical results obtained using eqs.~(\ref{eq23}) or (\ref{eq24}). These curves are not intended to fit exactly the experimental data since our measurements are affected by the frequency response of the integrator used to derive the voltage proportional to the current in the inductor $L_{2}$. It is not our purpose to analyze in detail the deviations of our system from the ideal model proposed.
It is also possible to measure the dispersive response of our `atom-analog'. One has to analyze the relative phase between the oscillating current flowing through inductor $L_2$ and the phase of the applied voltage. This measurement is best if performed with a lock-in amplifier. Since this equipment is not usually available in undergraduate laboratories, we prefer to describe a simple procedure which does not lead to a real measurement to be compared with the theoretical prediction (Fig.~4). We observe the oscillatory signal on the oscilloscope corresponding to the current flowing through the inductor, with the trigger signal from the voltage source $V_S(t)$. The phase of the sinusoidal signal is observed to ``jump'' as we scan $\omega_s$ across the absorption resonance and transparency region. If we scan $\omega_s$ increasing its value, we observe three abrupt phase variations, with the intermediate one being opposite to the other two. This is exactly what we would expect from Fig.~4.
\section{Conclusion} \label{sec:conclu} \indent
We have shown that EIT can be modeled in a totally classical system. Our results extend the results of Hemmer and Prentiss~\cite{hemmprent88} to EIT-like phenomena, with the use of only two coupled oscillators instead of three. We also deal with the dispersive response of the classical oscillator used to model the three-level atom. We performed an experiment with coupled $RLC$ circuits and observed EIT-like signals and the classical analog of the Autler-Townes doublet as a function of the coupling between the two $RLC$ circuits. The experiment can be performed in undergraduate physics laboratories and should help form physical intuition on coherent phenomena which take place in atomic vapors.
\section*{Acknowledgments} \indent
We acknowledge financial support from the Brazilian agencies FAPESP, CAPES, and CNPq.
\end{document} |
\begin{document}
\title[Schwarz Lemmata]{General Schwarz Lemmata and their applications}
\author{Lei Ni} \address{Lei Ni. Department of Mathematics, University of California, San Diego, La Jolla, CA 92093, USA} \email{lni@math.ucsd.edu}
\begin{abstract} We prove estimates interpolating the Schwarz Lemmata of Royden-Yau and the ones recently established by the author. These more flexible estimates provide additional information on (algebraic) geometric aspects of compact K\"ahler manifolds with nonnegative holomorphic sectional curvature, nonnegative $\operatorname{Ric}_\ell$ or positive $S_\ell$.
{\it Dedicated to Professor Luen-Fai Tam on the occasion of his 70th birthday.}
\end{abstract}
\maketitle
\section{Introduction}
There are many generalizations of the classical Schwarz Lemma on holomorphic maps between unit balls via the work of Ahlfors, Chen-Cheng-Look, Lu, Mok-Yau, Royden, Yau, etc (see \cite{Kobayashi-H} and \cite{Roy, Yau-sch} and references therein). The one obtained by Royden \cite{Roy} states: \begin{theorem}\label{thm-sch-roy}
Let $f: M^m\to N^n$ be a holomorphic map. Assume that the holomorphic sectional curvature of $N$, $H(Y)\le -\kappa |Y|^4, \, \forall Y\in T'N$ and the Ricci curvature of $M$, $\operatorname{Ric}^M(X, \overline{X})\ge -K |X|^2, \, \forall X\in T'M$ with $\kappa, K>0$. Let $d=\dim(f(M))$. Then \begin{equation}\label{eq:sch-roy1}
\|\partial f\|^2(x) \le \frac{2d}{d+1}\frac{K}{\kappa}. \end{equation} \end{theorem}
In \cite{Ni-1807} the author proved a new version which only involves the holomorphic sectional curvature of domain and target manifolds. Recall that for the tangent map $\partial f: T_x'M \to T'_{f(x)}N$ we define its maximum norm square to be \begin{equation}\label{eq:1}
\|\partial f\|^2_0(x)\doteqdot \sup_{v\ne 0}\frac{|\partial f(v)|^2}{|v|^2}. \end{equation}
\begin{theorem}\label{thm:sch1} Let $(M, g)$ be a complete K\"ahler manifold such that the holomorphic sectional curvature $H^M(X)/|X|^4 \ge -K$ for some $K\ge0$. Let $(N^n, h)$ be a K\"ahler manifold such that $H^N(Y)<-\kappa |Y|^4$ for some $\kappa>0$. Let $f:M\to N$ be a holomorphic map. Then \begin{equation}\label{eq:sch-ni}
\|\partial f\|^2_0(x) \le \frac{K}{\kappa}, \forall x\in M, \end{equation} provided that the bisectional curvature of $M$ is bounded from below if $M$ is not compact. In particular, if $K=0$, any holomorphic map $f: M\to N$ must be a constant map. \end{theorem}
The assumption on the bisectional curvature lower bound can be replaced with the existence of an exhaustion function $\rho(x)$ which satisfies that \begin{equation}\label{eq:2}
\limsup_{\rho\to \infty} \left(\frac{|\partial \rho|+[\sqrt{-1}\partial \bar{\partial} \rho]_{+}}{\rho}\right)=0. \end{equation}
The proof uses a viscosity consideration from PDE theory. It is also reminiscent of Pogorelov's Lemma \cite{Pogo} (cf. Lemma 4.1.1 of \cite{Gu}) for Monge-Amp\`ere equation, since the maximum eigenvalue of $\nabla^2 u$ is the $\|\cdot\|_0$ for the normal map $\nabla u$ for any smooth $u$. A consequence of Theorem \ref{thm:sch1} asserts that {\it the equivalence of the negative amplitude of the holomorphic sectional curvature implies the equivalence of the metrics}. Namely if $M^m$ admits two K\"ahler metrics $g_1$ and $g_2$ satisfying that $$
-L_1|X|_{g_1}^4\le H_{g_1}(X)\le -U_1|X|_{g_1}^4, \quad -L_2|X|_{g_2}^4 \le H_{g_2}(X)\le -U_2|X|_{g_2}^4 $$ then for any $v\in T_x'M$ we have the estimates: $$
|v|^2_{g_2}\le \frac{L_1}{U_2}|v|^2_{g_1};\quad |v|^2_{g_1}\le \frac{L_2}{U_1}|v|^2_{g_2}. $$ This result can be viewed as a stability statement of the classical result asserting that a complete K\"ahler manifold with the negative constant holomorphic sectional curvature must be a quotient of the complex hyperbolic space form. Motivated by Rauch's work which induces much work towards the $1/4$-pinching theorem and, the above stability of K\"ahler metrics it is natural to ask {\it whether or not a K\"ahler manifold $M$ with its homomorphic sectional curvature being close to $-1$ is biholomorphic to a quotient of the complex hyperbolic space.} Besides the Liouville type theorem for holomorphic maps into manifolds with negative holomorphic sectional curvature, we shall show in Section 5 further implications of this estimate towards the structure of the fundamental groups of manifolds with nonnegative holomorphic sectional curvature.
Before we state another recent result of the author we first recall some basic notions from Grassmann algebra \cite{Fede, Whit}. Let $\mathbb{C}^m$ be a complex Hermitian space (later we will identify the holomorphic tangent spaces $T_x'M$ and $T'_{f(x)}N$ with $\mathbb{C}^m$ and $\mathbb{C}^n$). Let $\wedge^\ell \mathbb{C}^m$ be the spaces of $\ell$-multi-vectors $\{v_1\wedge\cdots \wedge v_\ell\}$ with $v_i\in \mathbb{C}^m$. For ${\bf{a}}=v_1\wedge\cdots \wedge v_\ell, {\bf{b}}=w_1\wedge \cdots w_\ell$, the inner product can be defined as $\langle {\bf{a}}, \overline{{\bf{b}}}\rangle =\det(\langle v_i, \bar{w}_j\rangle)$. This endows $\wedge^\ell \mathbb{C}^n$ an Hermitian structure, hence a norm $|\cdot|$. There are also other norms, such as the {\it mass} and the {\it comass}, which shall be denoted as $|\cdot|_0$ as in \cite{Whit}, and could be useful for some problems. We refer \cite{Whit} Sections 13, 14 for detailed discussions. Assume that $f: (M^m, g)\to (N^n, h)$ is a holomorphic map between two K\"ahler manifolds. Let $\partial f: T'M\to T'N$ be the tangent map. Let $\Lambda^\ell \partial f:\wedge^\ell T_x'M \to \wedge^\ell T_{f(x)}'N$ be the associated map defined as
$\Lambda^\ell \partial f( v_1\wedge \cdots \wedge v_\ell)=\partial f(v_1)\wedge\cdots\wedge \partial f(v_\ell)$. Define $\|\cdot\|_0$ as $$\|\Lambda^\ell \partial f\|_0(x) \doteqdot\sup_{{\bf a}=v_1\wedge\cdots\wedge v_\ell\ne 0, {\bf a}\in \wedge^\ell T_x'M} \frac{|\Lambda^\ell \partial f({\bf a})|}{|{\bf a}|}.$$
The notion $\|\cdot\|_0$ is adapted to be consistent with the comass notion in \cite{Whit}.
By the singular value decomposition, we may choose normal coordinates centered at $x_0$ and $f(x_0)$ such that at $x_0$, $df\left(\frac{\partial\, }{\partial z^{\alpha}}\right)=\lambda_\alpha \delta_{i\alpha} \frac{\partial\, }{\partial w^i}$. If we order $\{\lambda_\alpha\}$ such that $|\lambda_1|\ge |\lambda_2|\ge \cdots \ge |\lambda_m|$, $\|\Lambda^\ell \partial f\|_0(x_0)=|\lambda_1\cdots\lambda_\ell|$. It is also easy to see that $\|\partial f \|^2 \doteqdot g^{\alpha\bar{\beta}}h_{i\bar{j}}\frac{\partial f^i}{\partial z^\alpha} \overline{ \frac{\partial f^j}{\partial z^\beta}}=\sum_{\alpha=1}^m |\lambda_\alpha|^2$. The following was proved in Corollary 3.4 of \cite{Ni-1807}.
\begin{theorem}\label{thm-schni2} Let $f:M^m\to N^n$ ($m\le n$) be a holomorphic map with $M$ being a complete manifold. Assume that $\operatorname{Ric}^M $ is bounded from below and the scalar curvature $S^M(x)\ge -K$. Assume further that $\operatorname{Ric}^N_m(x)\le -\kappa<0$. Then we have the estimate $$
\|\Lambda^m \partial f\|^2_0(x)\le \left(\frac{K}{m\kappa}\right)^m. $$ \end{theorem}
Here recall that in \cite{Ni-1807} $\operatorname{Ric}(x, \Sigma)$ is defined as the Ricci curvature of the curvature tensor restricted to the $k$-dimensional subspace $\Sigma\subset T_x'M$. Precisely for any $v\in \Sigma$, $\operatorname{Ric}(x, \Sigma)(v,\bar{v})\doteqdot \sum_{i=1}^k R(E_i,\overline{E}_i, v,\bar{v})$ with $\{E_i\}$ being a unitary basis of $\Sigma$. We say that $\operatorname{Ric}_k(x)<0$ if $\operatorname{Ric}(x, \Sigma)<0$ for every k-dimensional subspace $\Sigma$. Clearly $\operatorname{Ric}_k(x)<0$ implies that $S_k(x)<0$, and it coincides with $H$ when $k=1$, with the Ricci curvature $\operatorname{Ric}$ when $k=\dim(N)$. Here $S_k(x, \Sigma)$ is defined to be the scalar curvature of the curvature operator restricted to $\Sigma\subset T'_x N$. One can refer to \cite{Ni-1807, Ni, Ni-Zheng2} for the definitions and related results on the geometric significance of $\operatorname{Ric}_\ell$ and $S_\ell$.
Note that Theorem \ref{thm-schni2} has at least two limits in studying the holomporphic maps. The first it applies only to the case that $\dim(N)$, the dimension of the target manifold is at least as big as the dimension of the domain. The second limit is that it can only be applied to detect whether or not the map is full-dimensional, namely $\dim(f(M))=\dim(M)$ or not. The first goal of this paper is to prove a family of estimates for holomorphic maps between K\"ahler manifolds containing the above three results as special cases. The result below removes the above mentioned constraints of Theorem \ref{thm-schni2}.
\begin{theorem}\label{thm:main1} Let $f:M^m\to N^n$ be a holomorphic map with $M$ being a complete manifold. When $M$ is noncompact assume either the bisectional curvature is bounded from below or (\ref{eq:2}) holds for some exhaustion function $\rho$. Let $\ell\le \dim(M)$ be a positive integer.
(i) Assume that the holomorphic sectional curvature of $N$, $H^N(Y)\le -\kappa |Y|^4$ and $M$ has, $\operatorname{Ric}_\ell^M\ge -K $, for some $K\ge 0, \kappa >0$. Then $$ \sigma_\ell(x)\le \frac{2\ell'}{\ell'+1}\frac{K}{\kappa} $$
where $\sigma_\ell(x)=\sum_{\alpha=1}^\ell |\lambda_\alpha|^2(x)$, and $\ell'=\min\{\ell, \dim(f(M))\}$. In particular, if $K=0$, the map $f$ must be a constant.
(ii) Assume that $S^M_\ell(x)\ge -K$ and that $\operatorname{Ric}^N_\ell(x)\le -\kappa$ for some $K\ge 0, \kappa >0$. Then $$
\|\Lambda^\ell \partial f\|^2_0(x)\le \left(\frac{K}{\ell\kappa}\right)^\ell. $$ In particular, if $K=0$, the map $f$ has rank smaller than $\ell$. \end{theorem} Note that part (i) above recovers Theorem \ref{thm-sch-roy} for $\ell=\dim(M)$, and recovers Theorem \ref{thm:sch1} for $\ell=1$. Hence it provides a family of estimates interpolating between Theorem \ref{thm-sch-roy} and \ref{thm:sch1}. Similarly part (ii) recovers Theorem \ref{thm-schni2} when $\ell=\dim(M)$, and recovers Theorem \ref{thm:sch1} for $\ell=1$, noting that in the case $\ell=\dim(M)$, the assumption on the lower bound of bisectional curvature can be weakened to a lower bound of the Ricci curvature (from the proof this is obvious). Hence part (ii) provides a family of estimates interpolating between Theorem \ref{thm:sch1} and \ref{thm-schni2}. Part (ii) also implies that any K\"ahler manifold with $\operatorname{Ric}_\ell\le -\kappa<0$ must be $\ell$-hyperbolic, a result proved in \cite{Ni-1807}. Moreover it can also be applied to $M$ with $\dim(M)>\ell$ or even $\dim(M)>\dim(N)$ concluding more detailed degeneracy information of the map, re-enforcing the relationship between the $\ell$ dimensional ``holomorphic" area of $N$ and the $\operatorname{Ric}^N_\ell$.
The proof of the result (in Section 4) is built upon extensions of $\partial\bar{\partial}$-Bochner formulae of \cite{Ni-1807}, which are proved in Section 3 after some preliminaries in Section 2. In Section 5 we show that the estimates can be used to rule out the existence of certain holomorphic mappings under some curvature conditions (cf. Theorem \ref{thm:51}). In particular Theorem \ref{thm:sch1} (cf. Corollary 5.4 of \cite{Ni-1807}) implies that {\it if a compact K\"ahler manifold $(M, g)$ has $H\ge 0$, then there is no onto homomorphism from its fundamental group to the fundamental group of any oriented Riemann surface (complex curve) of genus greater than one.} The more flexible Theorem \ref{thm:main1} extends this statement to include all K\"ahler manifolds with $\operatorname{Ric}_\ell \ge0$ (for some $\ell\in \{1, \cdots, m\}$). Note that a similar statement was proved for Riemannian manifold with positive isotropic curvature in \cite{FW}. In \cite{Tsu, Ni} it was proved that if the holomorphic sectional curvature $H>0$ or more generally $\operatorname{Ric}_\ell>0$ then $\pi_1(M)=\{0\}$. The result here provides some information for the nonnegative case. Note that the examples in \cite{Hitchin} indicate that the class of K\"ahler manifolds with $H>0$ (most of them are not Fano) seems to be much larger than that with $\operatorname{Ric}>0$. There has been very little known for manifold $M$ with $H\ge 0$ (or $\operatorname{Ric}_\ell\ge 0$ for $\ell<\dim(M)$) comparing with the situation for compact manifolds with $\operatorname{Ric}\ge 0$. In fact when $M$ is a compact K\"ahler manifold with nonnegative bisectional curvature, Mok's classification result \cite{Mok} implies that the fundamental group $\pi_1(M)$ must be a Bieberbach one. In Corollary 5.1 of \cite{Ni-Tam} a paper by Tam and the author, this was extended (as a result of F. Zheng) to the case when $M$ is a non-compact complete K\"ahler manifold, but under the nonnegativity of sectional curvature. For compact Riemannian manifolds with nonnegative Ricci curvature Cheeger-Gromoll \cite{CG} proved that $\pi_1(M)$ must be a finite extension of a Bieberbach group. {\it Could this be proven for a compact K\"ahler manifold with $\operatorname{Ric}_\ell \ge 0$ with $\ell<\dim(M)$}? Note that such a statement can not be possibly true for K\"ahler manifold with $B^\perp\ge 0$ (hence nor with $\operatorname{Ric}^\perp\ge 0$). In a recent preprint \cite{Mu}, the question has been answered positively for $H\ge 0$, assuming additionally that $M$ is a projective variety. Given that there are many non-algebraic K\"ahler manifolds with $H\ge 0$, our result for general K\"ahler manifolds is not contained in \cite{Mu}.
In \cite{ABCKT}, two invariants were defined for a K\"ahler manifold $M$. One is the so-called Albanese dimension $a(M)\doteqdot \dim_{\mathbb{C}}(Alb(M))$ (we use the complex dimension instead), the dimension of the image of the Albanese map $Alb: M\to \mathbb{C}^{\dim(H^{1,0}(M))}/H_1(M, \mathbb{Z})$. The other invariant is the genus of $M$, $g(M)$ which is defined as the maximal $\dim(U)$ with $U$ being an isotropic subspace of $H^1(M, \mathbb{C})$. The above consequence of Theorem \ref{thm:51} can be rephrased as that for $M$ with $H^M(X)\ge 0$, or more generally $\operatorname{Ric}_\ell\ge 0$, we must have $g(M)\le 1$. The same conclusion is obtained in Section 6 for K\"ahler manifold $M$ with the Picard number $\rho(M)=1$ and $S_2>0$, or $h^{1,1}(M)=1$. A corollary of Theorem \ref{thm:51} concludes that if $S^M_\ell>0$, then $a(M)\le \ell-1$. ( This is also a consequence of the vanishing theorem proved in \cite{Ni-Zheng2}.) These results endow the curvature $\operatorname{Ric}_\ell$ and $S_\ell$ some algebraic geometric/topological implications.
In Section 5 we also illustrate that the $C^2$-estimate for the complex Monge-Amp\`ere equation is a special case of our computation in Section 3. In Section 6 we derive some estimates on the minimal ``energy" needed for a non-constant holomorphic map between certain K\"ahler manifolds extending earlier results in \cite{Ni-1807}.
\section{Preliminaries} We collect some needed algebraic results. For holomorphic map $f: (M^m, g)\to (N^n, h)$, let $\partial f(\frac{\partial\ }{\partial z^{\alpha}})=\sum_{i=1}^n f^i_{\alpha} \frac{\partial\ }{\partial w^i}$ with respect to local coordinates $(z^1, \cdots, z^m)$ and $(w^1, \cdots, w^n)$. The Hermitian form $A_{\alpha\bar{\beta}}dz^\alpha \wedge dz^{\bar{\beta}}$ with
$A_{\alpha\bar{\beta}}=f^i_{\alpha} \overline{f^j_\beta} h_{i\bar{j}}$ is the pull-back of K\"ahler form $\omega_h$ via $f$. By the singular value decomposition for $x_0\in M$ and $f(x_0)\in N$ we may choose normal coordinates centered at $x_0$ and $f(x_0)$ such that $\partial f(\frac{\partial\ }{\partial z^\alpha})=\lambda_\alpha \delta^i_{\alpha} \frac{\partial\ }{\partial w^i}$. Then $|\lambda_\alpha|$ are the singular values of $\partial f: (T'_{x_0}M, g) \to (T_{f(x_0)}'N, h)$. It is easy to see that $|\lambda_1|^2 \ge \cdots \ge |\lambda_m|^2$ are the eigenvalues of $A$ (with respect to $g$).
\begin{proposition} \label{prop:21} For any $1\le \ell\le m$ the following holds $$
\sigma_\ell\doteqdot \sum_{\alpha=1}^\ell |\lambda_\alpha|^2 \ge \sum_{1\le \alpha, \beta\le \ell} g^{\alpha \bar{\beta}}A_{\alpha\bar{\beta}}\doteqdot U_\ell. $$ \end{proposition} \begin{proof} Arguing invariantly we choose unitary basis of $T'_{x_0}M$ with respect to $g$. Then the left hand side is the partial sum of the eigenvalues of $A$ in descending order, and the right hand side is the trace of the first $\ell\times \ell$ block of $(A_{\alpha\bar{\beta}})$. Hence the result is well-known (cf. \cite{Horn-Johnson}, Corollary 4.3.34). \end{proof}
For a linear map $L: \mathbb{C}^m\to \mathbb{C}^n$ between two Hermitian linear spaces, $\Lambda^\ell L: \wedge^\ell \mathbb{C}^m \to \wedge^\ell \mathbb{C}^n$ is define as the linear extension of the action on simple vectors: $\Lambda^\ell L({\bf{a}})\doteqdot L(v_1)\wedge\cdots\wedge L(v_\ell)$ with ${\bf{a}}=v_1\wedge\cdots \wedge v_\ell$. The metric on $\wedge^\ell \mathbb{C}^m$ is defined as $\langle {\bf{a}}, \overline{{\bf{b}}}\rangle =\det(\langle v_i, \bar{w}_j\rangle)$. If $\{e_\alpha\}$ is a unitary frame of $\mathbb{C}^m$, the $\{e_{\lambda}\}$, with $\lambda=(\alpha_1, \cdots, \alpha_\ell)$, $\alpha_1\le \cdots \le \alpha_\ell$, being the multi-index, and $e_{\lambda}=e_{\alpha_1}\wedge \cdots\wedge e_{\alpha_\ell}$, is a unitary frame for $\wedge^\ell \mathbb{C}^m$. The Binet-Cauchy formula implies that this is consistent with the Hermitian product $\langle {\bf{a}}, \overline{{\bf{b}}}\rangle$ defined in the previous section. The norm $\|\Lambda^\ell L\|_0$ is the operator norm with respect to the Hermitian structures of $\wedge^\ell \mathbb{C}^m $ and $\wedge^\ell \mathbb{C}^m$ defined above, which equals to the Jacobian of a Lipschitz map $f$, when $\ell=m$ or $n$, applying to $L=\partial f$ (cf. Section 3.1 of \cite{Fede}).
For the local Hermitian matrices $A=(A_{\alpha\bar{\beta}})$ and $G=(g_{\alpha\bar{\beta}})$ we denote $A_\ell$ and $G_{\ell}$ be the upper-left $\ell\times \ell$ blocks of them.
\begin{proposition}\label{prop:22} For any $1\le \ell\le m$ the following holds: \begin{eqnarray}
\|\Lambda^\ell \partial f\|_0^2=\Pi_{\alpha=1}^\ell |\lambda_\alpha|^2&\ge& \frac{\det(A_\ell)}{\det(G_\ell)}\doteqdot W_\ell; \label{eq:21} \end{eqnarray} \end{proposition} \begin{proof} For the inequality in (\ref{eq:21}), as in the above proposition we may choose a unitary frame of $T'_{x_0}M$ such that $G=\operatorname{id}$. Then the claimed result is also a well-known statement about the partial products of the descending eigenvalues. The result can be seen by applying 4.1.6 of \cite{MM} to $(A+\epsilon G)^{-1}$ and let $\epsilon \to 0$ (see also Problem 4.3.P15 of \cite{Horn-Johnson}).
For the equality (\ref{eq:21}), first observe that $$
\|\Lambda^\ell \partial f\|^2_0(x) \ge \frac{| \partial f\left(v_1\right)\wedge \cdots \wedge \partial f\left(v_\ell\right)|^2}{|v_1\wedge\cdots \wedge v_\ell|^2}=\Pi_{\alpha=1}^\ell |\lambda_\alpha|^2 $$
if $\{v_\alpha \}$ are the eigenvectors of $A$ with eigenvalues $\{|\lambda_\alpha|^2\}$. On the other hand for general orthonormal vectors $\{v_\alpha\}$, the above paragraph implies $ \frac{| \partial f\left(v_1\right)\wedge \cdots \wedge \partial f\left(v_\ell\right)|^2}{|v_1\wedge\cdots \wedge v_\ell|^2}\le \Pi_{\alpha=1}^\ell |\lambda_\alpha|^2 $. Combining them we have the equality in (\ref{eq:21}). \end{proof}
\section{$\partial\bar{\partial}$-Bochner formulae}
Here we generalize the $\partial\bar{\partial}$-Bochner formula derived in \cite{Ni-1807} on $\|\partial f\|^2$ and $\|\Lambda^m \partial f\|_0^2$ to $\sigma_\ell$ and $\|\Lambda^\ell \partial f\|_0^2$. Since both $\sigma_\ell(x)$ and $\|\Lambda^\ell \partial f\|_0^2(x)$ are only continous in general we first derive formula on their barriers supplied by Proposition \ref{prop:21}, \ref{prop:22}.
\begin{proposition}\label{prop:31} Under the normal coordinates near $x_0$ and $f(x_0)$ such that $\partial f(\frac{\partial\ }{\partial z^\alpha})=\lambda_\alpha \delta^i_{\alpha} \frac{\partial\ }{\partial w^i}$ with $|\lambda_1|\ge \cdots\ge |\lambda_\alpha|\ge \cdots \ge |\lambda_m|$ being the singular values of $\partial f: (T'_{x_0}M, g) \to (T_{f(x_0)}'N, h)$, let $U_\ell(x)$ and $W_\ell(x)$ be the functions defined in the last section in a small neighborhood of $x_0$. Then at $x_0$, for $v\in T'_{x_0}M$, and nonzero $U_\ell$ and $W_\ell$, \begin{eqnarray}
\langle \sqrt{-1}\partial \bar{\partial} \log U_\ell, \frac{1}{\sqrt{-1}}v\wedge \bar{v}\rangle &=&\frac{U_\ell \sum_{1\le i\le n, 1\le \alpha \le \ell} |f^i_{\alpha v}|^2-|\sum_{\alpha =1}^\ell \overline{\lambda_\alpha}f^\alpha_{\alpha v}|^2}{U_\ell^2}\label{eq:31}\\
&\quad&+\sum_{\alpha=1}^\ell \frac{|\lambda_\alpha|^2}{U_\ell}(-R^N(\alpha, \bar{\alpha}, \partial f(v), \overline{\partial f(v)})+R^M(\alpha, \bar{\alpha}, v, \bar{v}));\nonumber \\
\langle \sqrt{-1}\partial \bar{\partial} \log W_\ell, \frac{1}{\sqrt{-1}}v\wedge \bar{v}\rangle &=& \sum_{\alpha=1}^\ell \sum_{ \ell+1\le i \le n} \frac{|f^i_{\alpha v}|^2}{|\lambda_\alpha|^2}\label{eq:32} \\ &\quad& + \sum_{\alpha=1}^\ell (- R^N(\alpha, \bar{\alpha}, \partial f(v), \overline{\partial f(v)})+R^M(\alpha,\bar{\alpha}, v, \bar{v})). \nonumber \end{eqnarray} \end{proposition} \begin{proof} The calculation is similar to that of \cite{Ni-1807}. Here we include the details of the first. Choose holomorphic normal coordinate $(z_1, z_2, \cdots, z_m)$ near a point $p$ on the domain manifold $M$, correspondingly $(w_1, w_2, \cdots, w_n)$ near $f(p)$ in the target. Let $\omega_g=\sqrt{-1}g_{a\bar{\beta}}dz^\alpha\wedge d\bar{z}^{\beta}$ and $\omega_h=\sqrt{-1}h_{i\bar{j}}dw^i\wedge d\bar{w}^{j}$ be the K\"ahler forms of $M$ and $N$ respectively. Correspondingly, the Christoffel symbols are given $$ ^M\Gamma_{\alpha \gamma}^\beta =\frac{\partial g_{\alpha \bar{\delta}}}{\partial z^{\gamma}}g^{\bar{\delta}\beta}=\Gamma_{\gamma \alpha }^\beta; \quad \quad ^N\Gamma_{i k}^j = \frac{\partial h_{i \bar{l}}}{\partial w^{k}}h^{\bar{l}k}=\Gamma_{k i }^j. $$ We always uses Einstein's convention when there is an repeated index. The symmetry in the Christoffel symbols is due to K\"ahlerity. If the appearance of the indices can distinguish the manifolds we omit the superscripts $^M$ and $^N$. Correspondingly the curvatures are given by $$ ^MR^\beta_{\alpha \bar{\delta} \gamma}=-\frac{\partial}{\partial \bar{z}^{\delta}} \Gamma_{\alpha \gamma}^\beta; \quad \quad \quad \,^NR^j_{i \bar{l} k}=-\frac{\partial}{\partial \bar{w}^{l}} \Gamma_{i k}^j. $$ At the points $x_0$ and $f(x_0)$, where the normal coordinates are centered we have that $$ R_{\bar{\beta}\alpha \bar{\delta} \gamma}=-\frac{\partial^2 g_{\bar{\beta}\alpha}}{\partial z^{\gamma}\partial \bar{z}^{\delta}}; \quad \quad R_{\bar{j}i \bar{l} k}=-\frac{\partial^2 h_{\bar{j}i}}{\partial w^{k}\partial \bar{w}^{l}}. $$ Direct calculation shows that at the point $x_0$ (here repeated indices $\alpha, \beta $ are summed from $1$ to $\ell$, while $i, j, k, l$ are summed from $1$ to $n$) \begin{eqnarray*} (\log U_\ell)_\gamma &=&\frac{g^{\alpha \bar{\beta}}_{\quad, \gamma} A_{\alpha\bar{\beta}}+g^{\alpha \bar{\beta}}f^i_{\alpha \gamma}h_{i\bar{j}}\overline{f^j_\beta}+g^{\alpha \bar{\beta}}f^i_{\alpha }\overline{f^j_\beta}f^k_\gamma h_{i\bar{j}, k} }{U_\ell}=\frac{f^i_{\alpha\gamma}\overline{f^i_\alpha}}{U_\ell};\\ (\log U_\ell)_{\bar{\gamma}} &=&\frac{g^{\alpha \bar{\beta}}_{\quad, \bar{\gamma}} A_{\alpha\bar{\beta}}+g^{\alpha \bar{\beta}}f^i_{\alpha}h_{i\bar{j}}\overline{f^j_{\beta\gamma}}+g^{\alpha \bar{\beta}}f^i_{\alpha }\overline{f^j_\beta}\overline{f^k_\gamma} h_{i\bar{j}, \bar{k}} }{U_\ell}=\frac{\overline{f^i_{\alpha\gamma}}f^i_\alpha}{U_\ell};\\
\left(\log U_\ell\right)_{\gamma \bar{\gamma}}&=& \frac{R^M_{\alpha\bar{\beta}\gamma\bar{\gamma}}f^i_\alpha \overline{f^i_\beta}+|f^i_{\alpha \gamma}|^2-R^N_{i\bar{j}k\bar{l}}f^i_\alpha \overline{f^j_\beta}f^k_\gamma \overline{f^l_\gamma}}{U_\ell}-\frac{|\sum_{1\le \alpha\le \ell; 1\le i\le n} f^i_{\alpha \gamma}\overline{f^i_\alpha}|^2}{U_\ell^2}. \end{eqnarray*} The claimed equation then follows. \end{proof}
\begin{corollary} Let $f: M\to N$ be a holomorphic map between two K\"ahler manifolds.
(i) If the bisectional curvature of $N$ is non-positive and the bisectional curvature of $M$ is nonnegative, then $\log \sigma_\ell(x)$ is a plurisubharmonic function.
(ii) Assume that $\operatorname{Ric}^N_\ell\le 0$ and $\operatorname{Ric}^M_\ell\ge0$. If $\|\Lambda^\ell \partial f\|^2_0$ not identically zero, then for every $x$, there exists a $\Sigma \subset T_x'M$ with $\dim(\Sigma)\ge \ell$ such that $\log \|\Lambda^\ell \partial f\|^2_0(x)$ is plurisubharmonic on $\Sigma$. \end{corollary}
\section{Proof of Theorem \ref{thm:main1}}
Since in general $\sigma_\ell$ and $\|\Lambda^\ell \partial f\|_0$ are not smooth we adopt the viscosity consideration as in Section 5 of \cite{Ni-1807} to prove the result. We also need to modify the algebraic argument in the Appendix of \cite{Ni-1807} for some point-wise estimates needed. Another difference of the argument is that we shall apply the maximum principle to a degenerate operator. First we need a Royden type lemma.
\begin{lemma}\label{lem:41} If the holomorphic sectional curvature $R^N$ has a upper bound $-\kappa$, with respect to the normal coordinates as in Proposition \ref{prop:21} at $x_0$ (and $f(x_0)$), $$ \sum_{1\le \alpha,\beta, \gamma, \delta\le \ell} g^{\alpha \bar{\beta}}g^{\gamma \bar{\delta}}R^N_{i\bar{j}k\bar{l}}f^i_{\alpha} \overline{f^{j}_\beta} f^k_\gamma \overline{f^l_\delta} \le -\frac{\ell'+1}{2\ell'} \kappa U^2_\ell, \mbox{ when }\kappa>0; \quad \le -\kappa U^2_\ell \mbox{ when } \kappa\le 0. $$ Here $\ell'=\min\{ \ell, \dim(f(M))\}$. \end{lemma}
\begin{proof} We follow the argument in Appendix of \cite{Ni-1807}, which is due to F. Zheng. The left hand side can be written as $ \sum_{1\le \alpha, \beta\le \ell'} R^N_{\alpha\bar{\alpha}\beta\bar{\beta}}|\lambda_\alpha|^2|\lambda_\beta|^2$. In the space $$\Sigma\doteqdot \operatorname{span} \{ \partial f\left( \frac{\partial\ }{\partial z^1}\right), \cdots, \partial f\left( \frac{\partial\ }{\partial z^{\ell'}}\right)\}, $$ consider the vector $Y=\sum_{1\le i \le \ell'} w^i\lambda_i \frac{\partial \ }{\partial w^i}$ with $(w^1, \cdots, w^{\ell'})\in \mathbb{S}^{2\ell'-1}\subset \Sigma$. Then direct calculations show that \begin{eqnarray*}
\sum_{1\le \alpha, \beta\le \ell'} R^N_{\alpha\bar{\alpha}\beta\bar{\beta}}|\lambda_\alpha|^2|\lambda_\beta|^2&=&\frac{\ell' (\ell'+1)}{2}\cdot \frac{1}{Vol (\mathbb{S}^{2\ell'-1})}\int_{\mathbb{S}^{2\ell'-1}} R^N(Y, \overline{Y}, Y,\overline{Y})\\
&\le & -\kappa \frac{\ell' (\ell'+1)}{2}\cdot \frac{1}{Vol (\mathbb{S}^{2\ell'-1})}\int_{\mathbb{S}^{2\ell'-1}} |Y|^4\\
&=& \frac{-\kappa }{2}\left(U_\ell^2+\sum_{1\le \alpha \le \ell'} |\lambda_\alpha|^4\right). \end{eqnarray*}
The result follows from elementary inequalities $\sum_{1\le \alpha \le \ell'} |\lambda_\alpha|^4\le U_\ell^2 \le\ell'\, \sum_{1\le \alpha \le \ell'} |\lambda_\alpha|^4$. \end{proof}
To prove part (i), let $\eta(t):[0, +\infty)\to [0, 1]$ be a function supported in $[0, 1]$ with $\eta'=0$ on $[0, \frac{1}{2}]$, $\eta' \le 0$, $\frac{|\eta'|^2}{\eta}+(-\eta'')\le C_1$. The construction of such $\eta$ is elementary. Let $\varphi_R(x)=\eta(\frac{r(x)}{R})$. When the meaning is clear we omit subscript $R$ in $\varphi_R$. Clearly $\sigma_\ell \cdot \varphi$ attains a maximum somewhere at $x_0$ in $B_p(R)$. With respect to the normal coordinates near $x_0$ and $f(x_0)$, $(U_\ell \varphi)(x_0)=(\sigma \varphi)(x_0)$, and $(U_\ell\varphi)(x)\le (\sigma_\ell \varphi)(x)\le (\sigma\varphi)(x_0)\le (U_\ell \varphi)(x_0)$ for $x$ in the small normal neighborhood. The maximum principle then implies that at $x_0$ $$ \nabla (U_\ell \varphi)=0; \sum_{1\le \alpha \le \ell} \frac{1}{2}(\nabla_{\alpha}\nabla_{\bar{\alpha}} +\nabla_{\bar{\alpha}}\nabla_{\alpha}) \log (U_\ell \varphi) \le 0. $$ Now applying the $\partial \bar{\partial}$ formula (\ref{eq:31}), the above Lemma and the complex Hessian comparison theorem of Li-Wang \cite{LW}, together with the argument in \cite{Ni-1807}, imply the result. It is clear from the proof that if $\ell=m=\dim(M)$, only the Laplacian comparison theorem is needed. Hence one only needs to assume that the Ricci curvature of $M$ is bounded from below.
The proof of part (ii) is similar. For the sake of the completeness we include the argument under the assumption (\ref{eq:2}). In this case we let $\varphi=\eta\left(\frac{\rho}{R}\right)$. Now $\varphi$ has support in $D(2R)\doteqdot \{\rho\le 2R\}$. Hence $W_\ell \cdot \varphi$ attains its maximum somewhere, say at $x_0 \in D(2R)$. Now at $x_0$ we have \begin{eqnarray*}
0&\ge& \sum_{\gamma=1}^\ell \frac{\partial^2}{\partial z^\gamma \partial z^{\bar{\gamma}}}\, \left(\log (W_\ell \, \varphi)\right) \ge \sum_{\alpha,\gamma=1}^\ell R^M_{\alpha\bar{\alpha}\gamma \bar{\gamma}}-R^N_{\alpha\bar{\alpha }\gamma\bar{\gamma}}|\lambda_\gamma|^2 + \sum_{\gamma=1}^\ell \frac{\partial^2 \log \varphi}{\partial z^\gamma \partial z^{\bar{\gamma}}} \\
&\ge& -K +\ell \cdot \kappa \cdot W_\ell^{1/\ell}+\frac{ \eta''}{R^2\varphi} |\nabla \rho|^2+\frac{\ell \eta'}{R \varphi}\left([\sqrt{-1}\partial \bar{\partial} \rho]_{+}\right)-\frac{|\eta'|^2}{\varphi^2 R^2}\cdot |\nabla \rho|^2\\
&\ge& -K +\ell \cdot \kappa \cdot W_\ell^{1/\ell} -\frac{C_1}{\varphi R^2}|\nabla \rho|^2 -\frac{C_1}{\varphi R} \cdot C(m)\left( [\sqrt{-1}\partial \bar{\partial} \rho]_{+}\right). \end{eqnarray*} Multiplying $\varphi$ on both sides of the above we have that $$
\sup_{D(R)}\|\Lambda^\ell \partial f\|_0^2(x)\le \left(\frac{K+ +\frac{C_1}{\varphi R^2}|\nabla \rho|^2 +\frac{C_1}{\varphi R} \cdot C(m)\left( [\sqrt{-1}\partial \bar{\partial} \rho]_{+}\right)}{\ell \kappa}\right)^\ell. $$
The result follows by observing that $\frac{|\nabla \rho|^2}{R^2}\le \frac{4|\nabla \rho|^2}{\rho^2} \to 0$ and $\frac{ [\sqrt{-1}\partial \bar{\partial} \rho]_{+}}{R}\le 2 \frac{ [\sqrt{-1}\partial \bar{\partial} \rho]_{+}}{\rho}\to 0$ as $R\to \infty$.
\section{Applications} First we show that the Pogorelov type estimate of \cite{Ni-1807} can be adapted to derive the $C^2$-estimate for the Monge-Amp\`ere equation related to the existence of K\"ahler-Einstein metrics and prescribing the Ricci curvature problem. Recall that the geometric problems reduce to a complex Monge-Amp\`ere equation $$ \frac{\det(g_{\alpha\bar{\beta}}+\varphi_{\alpha\bar{\beta}})}{\det(g_{\alpha\bar{\beta}})}=e^{t\varphi +f} $$
with $t\in [-1, 1]$, $f$ being a fixed function with prescribed complex Hessian. $g'_{\alpha\bar{\beta}}=g_{\alpha\bar{\beta}}+\varphi_{\alpha\bar{\beta}}$ is another K\"ahler metric with $[\omega_{g'}]=[\omega_g]$. We apply our previous setting to the map $\operatorname{id}:(M, g)\to (M, g')$. The computation in \cite{Aub, Yau} (See also the exposition in \cite{Siu}) is on $\mathcal{L} \|\partial f\|^2$. By the computation from Section 3 and 4, at the point where $\|\partial \operatorname{id}\|^2_0$ is attained we have that $$ 0\ge \frac{\partial^2}{\partial z^\gamma \partial \bar{z}^\gamma} \log (1+\varphi_{1\bar{1}})\ge R_{1\bar{1}\gamma\bar{\gamma}}-R'_{1\bar{1}\gamma \bar{\gamma}}\left(1+\varphi_{\gamma\bar{\gamma}}\right). $$
Here $R'$ is the curvature of $g'$ and $|\lambda_\gamma|^2=1+\varphi_{\gamma\bar{\gamma}}$. Since we do not have information on $R'$ in general, but only $\operatorname{Ric}^{g'}(\frac{\partial}{\partial z^1}, \frac{\partial}{\partial \bar{z}^1})=\operatorname{Ric}^g_{1\bar{1}}-t\varphi_{1\bar{1}}-f_{1\bar{1}}$, we multiply $\frac{1}{1+\varphi_{\gamma\bar{\gamma}}}$ on the both sides of the above inequality and then sum $\gamma$ from $1$ to $m$ arriving at \begin{eqnarray*} 0&\ge& \sum_{\gamma=1}^m\frac{1}{1+\varphi_{\gamma\bar{\gamma}}}R^g_{1\bar{1}\gamma\bar{\gamma}}- \frac{\operatorname{Ric}^g_{1\bar{1}}}{1+\varphi_{1\bar{1}}} +t\frac{\varphi_{1\bar{1}}}{1+\varphi_{1\bar{1}}}+\frac{f_{1\bar{1}}}{1+\varphi_{1\bar{1}}}\\ &\ge& -C(M, g, f)\sum_{\gamma=1}^m\frac{1}{1+\varphi_{\gamma\bar{\gamma}}}-1. \end{eqnarray*}
Now we apply/repeat the same consideration/calculation to $Q\doteqdot \log \sigma_1 -(C(M, g, f)+1)\varphi$. Then at the point $x_0$, where $Q$ attains its maximum, we have that $$ 0\ge -C(M, g, f)\sum_{\gamma=1}^m\frac{1}{1+\varphi_{\gamma\bar{\gamma}}}-(C(M, g, f)+2) +(C(M, g, f)+1)\sum_{\gamma=1}^m\frac{1}{1+\varphi_{\gamma\bar{\gamma}}}, $$ which then implies that $$ \sum_{\gamma=1}^m\frac{1}{1+\varphi_{\gamma\bar{\gamma}}}\le C(M, g, f)+2. $$ This implies that at the maximum point of $\sigma_1 e^{-(C(M, g, f)+1)\varphi}$, \begin{eqnarray*} \sigma_1 e^{-(C(M, g, f)+1)\varphi}&=&\sigma_1\frac{\omega^m_g}{\omega^m_{g'}}e^{t\varphi +f}e^{-(C(M, g, f)+1)\varphi}\\ &\le& \left(\frac{1}{m-1} \sum_{\gamma=2}^m \frac{1}{1+\varphi_{\gamma\bar{\gamma}}}\right)^{m-1} e^{t\varphi +f}e^{-(C(M, g, f)+1)\varphi}\\ &\le& \left(\frac{C(M, g, f)+2}{m-1} \right)^{m-1} e^{t\varphi +f}e^{-(C(M, g, f)+1)\varphi}. \end{eqnarray*} If we write $K=\left(\frac{C(M, g, f)+2}{m-1} \right)^{m-1}$, $\kappa=C(M, g, f)+2$, the above implies \begin{equation} 1+\varphi_{\gamma\bar{\gamma}}(x)\le \sigma_1(x)\le Ke^{\kappa (\varphi(x)-\varphi(x_0))} e^{t\varphi(x_0) +f(x_0)}, \quad \forall \gamma\in \{1, \cdots, m\}. \end{equation}
As mentioned in the introduction, Theorem \ref{thm:main1} removes the constrains that $\dim(M)\le \dim(N)$ in the previous results proved in \cite{Ni-1807}. As in \cite{NZ} we denote by $B^\perp$ the orthogonal bisectional curvature. We say $B^\perp\le \kappa$ if for any $X, Y\in T'N$ with $\langle X, \overline{Y}\rangle=0$, $R(X, \bar{X}, Y, \bar{Y})\le \kappa |X|^2|Y|^2$. The following is a corollary of the proof Theorem \ref{thm:main1}.
\begin{theorem}\label{thm:51} Let $f: (M, g)\to (N, h)$ be a holomorphic map.
(i) Assume that $M$ is compact. Under the assumptions either $\operatorname{Ric}^M_\ell >0$, and the holomorphic sectional curvature $H^N\le 0$, or $\operatorname{Ric}^M_\ell\ge 0$ and $H^N < 0$, $f$ must be constant. The same result also holds if $(B^M)^\perp>0$ and $(B^N)^\perp\le 0$ or $(B^M)^\perp\ge0$ and $(B^N)^\perp< 0$.
(ii) If $M$ is compact with $S^M_\ell\ge 0$ and $\operatorname{Ric}^N_\ell <0$, or $S^M_\ell >0$, $\operatorname{Ric}^N_\ell \le 0$ then $\dim(f(M))< \ell$. The same result holds if $\operatorname{Ric}^M_\ell\ge 0$ and $S^N_\ell <0$, or $\operatorname{Ric}^M_\ell> 0$ and $S^N_\ell \le 0$. \end{theorem} \begin{proof} Since $M$ is compact $\sigma_\ell$ attains a maximum somewhere, say at $x_0$. If $f$ is not constant, $\sigma_\ell(x_0)>0$. Applying (\ref{eq:31}), using the normal coordinates around $x_0$ and $f(x_0)$ specified as in the last two sections we have that $$
0\ge \sum_{\gamma=1}^\ell \frac{\partial^2\ }{\partial z^\gamma, \partial \bar{z}^\gamma}\left(\log U_\ell\right)\ge \sum_{1\le \alpha, \gamma \le \ell} \frac{-R^N_{\alpha\bar{\alpha}\gamma\bar{\gamma}}|\lambda_\alpha|^2|\lambda_\gamma|^2}{U_\ell}+ \sum_{\alpha =1}^\ell \frac{\operatorname{Ric}^M(x_0, \Sigma)(\alpha, \bar{\alpha})|\lambda_\alpha|^2}{U_\ell}. $$ Here $\Sigma=\operatorname{span}\{ \frac{\partial\ }{\partial z^1}, \cdots, \frac{\partial\ }{\partial z^\ell} \}$. By Lemma \ref{lem:41}, if $H^N<0$, the first term is positive, the second one is nonnegative since $\operatorname{Ric}^M_\ell\ge 0$. Hence a contradiction. From the proof, the same holds if $H^N\le0$ and $\operatorname{Ric}^M_\ell>0$. For the case concerning $B^\perp$ the proof is similar.
For (ii), if $\operatorname{rank}(f)\ge \ell$, $\|\Lambda^\ell\partial f\|_0$ has a nonzero maximum somewhere, say at $x_0$. Then applying (\ref{eq:32}), using the normal coordinates around $x_0$ and $f(x_0)$ specified as in the last two sections we have that $$
0\ge \sum_{\gamma=1}^\ell \frac{\partial^2\ }{\partial z^\gamma, \partial \bar{z}^\gamma}\left(\log W_\ell\right)\ge \sum_{1\le \gamma \le \ell} (-\operatorname{Ric}^N_\ell(x_0, \Sigma) |\lambda_\gamma|^2)+\operatorname{Scal}^M(x_0, \Sigma). $$ This leads to a contradiction under the assumptions either $S^M_\ell\ge 0$ and $\operatorname{Ric}^N_\ell <0$, or $S^M_\ell >0$, $\operatorname{Ric}^N_\ell \le 0$. For the second part, we introduce the operator: $$
\mathcal{L}_\ell=\sum_{\gamma=1}^\ell\frac{1}{2|\lambda_{\gamma}|^2}\left(\nabla_{\gamma}\nabla_{\bar{\gamma}} +\nabla_{\bar{\gamma}}\nabla_{\gamma}\right). $$ Since at $x_0$ $\operatorname{W}_\ell\ne 0$, the above operator is well defined in a small neighborhood of $x_0$. As before applying $\mathcal{L}$ at $x_0$ implies that $$
0\ge \mathcal{L}_\ell \left(\log W_\ell\right)\ge (-\operatorname{Scal}^N(x_0, \partial f(\Sigma))+\sum_{1\le \gamma \le \ell} \frac{\operatorname{Ric}^M(x_0, \Sigma)(\gamma, \bar{\gamma})}{|\lambda_\gamma|^2}. $$ The above also induces a contradiction under either $\operatorname{Ric}^M_\ell\ge 0$ and $S^N_\ell <0$, or $\operatorname{Ric}^M_\ell> 0$ and $S^N_\ell \le 0$. \end{proof}
This can be combined with the following result of Siu-Beauville (cf. Theorem 1.5 of \cite{ABCKT}) to infer information regarding the fundamental group of the manifolds with $\operatorname{Ric}_\ell\ge0$.
\begin{theorem}[Siu-Beauville] Let $M$ be a compact K\"ahler manifold. There exists a compact Riemann surface $C_g$ of genus greater than one and a surjective holomorphic map $f: M \to C'$ with $g(C')\ge g(C)$ with connected fibers if and only if there exists a surjective homomorphism $h: \pi_1(M)\to \pi_1(C_g)$. \end{theorem}
\begin{corollary} (i) Let $(M, g)$ be a compact K\"ahler manifold with $\operatorname{Ric}_\ell\ge 0$ for some $1\le \ell\le m$. Then there exists no surjective homomorphism $h: \pi_1(M)\to \pi_1(C_g)$. Furthermore, there is no subspace $V\subset H^1(M, \mathbb{C})$ with $\wedge^2 V=0$ in $H^2(M, \mathbb{C})$ and $\dim(V)\ge 2$. Namely $g(M)\le 1$. Similarly, if $\operatorname{Ric}_\ell\ge 0$, $\pi_1(M)$ can not be of the type of an amalgamated product $\Gamma_1*_{\Delta}\Gamma_2$ with the index of $\Delta$ in $\Gamma_1$ greater than one and index of $\Delta$ in $\Gamma_2$ greater than two.
(ii) Let $(M, g)$ be a compact K\"ahler manifold with $S^M_\ell> 0$ for some $1\le \ell\le m$. Then $a(M)\le\ell-1$.
(iii) If $S^M_n\ge 0$, then any harmonic map $f: M\to N$ with $N$ being a locally Hermitian symmetric space, can not have $\operatorname{rank}(f)=\dim(N)$.
\end{corollary} \begin{proof} The first part of (i) follows from part (i) of Theorem \ref{thm:51}. Namely apply it to $N=C_g$ and combine it with the above Siu-Beauville's result. The second part follows by combining Theorem \ref{thm:51} with Theorem 1.4 of \cite{ABCKT} due to Catanese (cf. Theorem 1.10 of \cite{Cat}). For the second part involving the amalgamated product, apply Theorem 6.27 of \cite{ABCKT}, namely a result of Gromov-Schoen below instead, to conclude that there exists an equivariant holomorphic map from $\widetilde{M} $ into the Poincar\'e disk. This induce a contradiction with part (i) of Theorem \ref{thm:51} since the maximum principle argument still applies (see also \cite{NR}). The statement of (ii) is an easy consequence of part (ii) of Theorem \ref{thm:51}.
For part (iii), by Siu's result on the holomorphicity of the harmonic maps between K\"ahler manifolds, namely Theorem 6.13 of \cite{ABCKT}, any such a harmonic map must be holomorphic. Then part (ii) of Theorem \ref{thm:51} induces a contradiction noting that the canonical metric on $N$ is K\"ahler-Einstein with negative Einstein constant. \end{proof}
\begin{theorem}[Gromov-Schoen]
Let $M$ be a compact K\"ahler manifold with fundamental group $\Gamma=\Gamma_1*_{\Delta}\Gamma_2$ with the index of $\Delta$ in $\Gamma_1$ greater than one and index of $\Delta$ in $\Gamma_2$ greater than two. Then there exists a representation $\rho: \pi_1(M)\to \operatorname{Aut}(\mathbb{D})$, where $\mathbb{D}=\{z\, |\, |z|=1\}$, with discrete cocompact image, and a holomorphic equivariant map from the universal cover $\widetilde{M}\to \mathbb{D}$, which also descends to a surjective map $M\to \rho(\Gamma)/\mathbb{D}$. \end{theorem}
In fact the vanishing theorem of \cite{Ni-Zheng2} implies that for K\"ahler manifolds with $S_\ell>0$, there does not exist a $k$-wedge subspace in $H^{1, 0}$ (in the sense of \cite{Cat}) for any $k\ge \ell$. Moreover, such manifolds have to be Albanese primitive for $k\ge \ell$.
For noncompact manifolds, Theorem \ref{eq:sch-ni} and Theorem \ref{thm:main1} can also be applied, together with Theorem 4.14 and 4.28 of \cite{ABCKT}, to infer some restriction on K\"ahler manifolds with nonnegative holomorphic sectional curvature or with $\operatorname{Ric}_\ell\ge 0$.
\begin{corollary} Assume that $M$ is a complete K\"ahler manifold with bounded geometry with $\operatorname{Ric}^M_\ell\ge 0$. Then (i) $H^1(M, \mathbb{C})=\{0\}$ implies that $\mathcal{H}^1_{L^2}(M)=\{0\}$;
(ii) And $\dim (\mathcal{H}^1_{L^2, ex}(M))\le 1$. \end{corollary}
Here $\mathcal{H}_{L^2}(M)$ is the space of the harmonic $L^2$-forms and $\mathcal{H}^1_{L^2, ex}(M)$ is the space of the $L^2$ harmonic exact forms. The statements are trivial when $M$ is compact.
\section{Mappings from positively curved manifolds}
In \cite{NZ}, the orthogonal $\operatorname{Ric}^\perp$ was studied. Recall that $\operatorname{Ric}^\perp (X, \overline{X})=\operatorname{Ric}(X, \overline{X})-H(X)/|X|^2$. We say $\operatorname{Ric}^\perp\ge K$ if $\operatorname{Ric}^\perp (X, \overline{X})\ge K|X|^2$. It is easy to see that $B^\perp\ge \kappa$ implies that $\operatorname{Ric}^\perp \ge (m-1)\kappa$. Similar upper estimate also holds if $B^\perp$ is bounded from above. It was also shown in \cite{NZ} via explicit examples that $B^\perp$ is independent of the holomorphic sectional curvature $H$, as well as the Ricci curvature. Similarly $\operatorname{Ric}^\perp$ is independent of $\operatorname{Ric}$, as well as $H$. It was proved in \cite{NZ} that for manifold whose $\operatorname{Ric}^\perp$ has a positive lower bound, the manifold is compact with an effective diameter uppper bound. (See \cite{Tsu} for the corresponding result for holomorphic sectional curvature.) It is not hard to see that for K\"ahler manifolds with $\operatorname{Ric}_\ell\ge K>0$, they must be compact with an upper diameter estimate.
Applying $\partial\bar{\partial}$-Bochner formulae we have the following estimates in the spirit of \cite{Ni-1807}.
\begin{theorem}\label{thm:hoop}
(i) Assume that $\operatorname{Ric}^M_\ell (X, \overline{X})\ge K|X|^2$, and $H^N(Y)\le \kappa |Y|^4$, with $K, \kappa>0$. Then for any nonconstant $f: M\to N$
$$
\max_{x\in M} \sigma_\ell(x)\ge \frac{K}{\kappa}.
$$
(ii) Assume that $(B^M)^{\perp}\ge K$, and $(B^N)^\perp\le \kappa$, with $K, \kappa>0$. Then for any nonconstant $f: M\to N$, $\dim(f(M))=m$. Moreover for any $\ell<\dim(M)$
$$
\max_{x\in M} \sigma_\ell(x)\ge \ell \frac{K}{\kappa}.
$$
(iii) Assume that $\operatorname{Ric}^M_\ell\ge K$, and that $\operatorname{Ric}^N_\ell \le \kappa $, with $K,\kappa>0$. Then for any holomorphic map $f:M\to N$ with $\dim(f(M))\ge \ell$
$$
\max_{x}\|\Lambda^\ell \partial f\|_0^2(x) \ge \left(\frac{K}{\kappa}\right)^\ell.
$$
(iv) Assume that $(\operatorname{Ric}^M)^\perp \ge K$, and that $(B^N)^\perp \le \kappa $, with $K,\kappa>0$. Then for any holomorphic map $f:M\to N$ with $\dim(f(M))\ge m-1$, $\dim(f(M))=m$. Moreover
$$
\max_{x}\|\Lambda^m \partial f\|_0^2(x) \ge \left(\frac{K}{(m-1)\kappa}\right)^{m}.
$$
In the case $\dim(M)=\dim(N)$, only $(\operatorname{Ric}^N)^{\perp}\le (m-1)\kappa$ is needed. In general $(B^N)^\perp \le \kappa $ can be weakened to $(\operatorname{Ric}^N_m)^{\perp}\le (m-1)\kappa$. Here $(\operatorname{Ric}^N_\ell)^{\perp}$ is the orthogonal Ricci curvature of the curvature tensor $R^N$ restricted to $m$-dimensional subspaces. \end{theorem} \begin{proof} First observe that under any assumption of the above theorem $M$ is compact. From Lemma \ref{lem:41} and (\ref{eq:31}), part (i) follows. For part (ii), at the point $x_0$ where $\sigma_\ell(x)$ attains its maximum, applying (\ref{eq:31}) to $v=\frac{\partial\ }{\partial z^m}$, we have that $$
0\ge -\kappa |\lambda_m|^2+K $$
which implies that $|\lambda_m|^2\ge \frac{K}{\kappa}$. Then claimed estimate follows from $\sigma_\ell\ge \ell |\lambda_m|^2$.
For part (iii), we apply (\ref{eq:32}) at the point $x_0$, where $\|\Lambda^\ell \partial f\|_0^2(x)$ attains its maximum. In particular we apply it to $v=\frac{\partial\ }{\partial z^\ell}$ and let $\Sigma=\operatorname{span}\{ \frac{\partial\ }{\partial z^1}, \cdots, \frac{\partial\ }{\partial z^\ell}\}$. Hence at $x_0$
$$
0\ge -\operatorname{Ric}^N(x_0, f(\Sigma)) |\lambda_\ell|^2+\operatorname{Ric}^M (x_0, \Sigma).
$$
Hence we derive that $|\lambda_\ell|^2\ge \frac{K}{\kappa}$. The claimed result then follows.
The part (iv) can be proved similarly. \end{proof}
The part (ii) of the theorem is not as strong as it appears, since $B^\perp>0$ implies that $h^{1,1}(M)=1$. On the other hand we have the following observation.
\begin{proposition}
Let $M$ be a K\"ahler manifold with $h^{1,1}(M)=1$. Then any holomorphic map $f: M\to N$, with $\dim(f(M))<\dim(M)$ must be a constant map. Hence $g(M)\le 1$, if $\dim(M)\ge 2$. In particular, if the Picard number $\rho(M)=1$ and $S_2^M>0$, any holomorphic map $f: M\to N$, with $\dim(f(M))<\dim(M)$ must be a constant map. \end{proposition} \begin{proof} In fact $f^*\omega_h$, with $\omega_h$ being the K\"ahler form of $N$, is a $d$-closed positive $(1,1)$-form. By the assumption $[f^*\omega_h]$ proportional to $[\omega_g]$. Hence it must be either zero or a positive multiple of $[\omega_g]$. Since the second case implies that $\dim(f(M))=m$, only the first case can occur, which implies that $f$ is a constant map.
Note that this implies that for any K\"ahler manifold $M$ with $\dim(M)\ge 2$ and $h^{1,1}(M)=1$, the genus $g(M)\le 1$, in view of the result of Catanese (cf. Theorem 1.10 of \cite{Cat}) since otherwise there exists a nonconstant holomorphic map $f: M\to C_g$ with $C_g$ being a Riemann surface of genus $g(M)$. Since the first Chern class map $c_1: H^{1}(M, \mathcal{O}^*)\to \mathcal{H}^{1,1}(M)\cap H^2(M, \mathbb{Z})$ is onto, and $S^M_2>0$ implies that $H^2(M, \mathbb{C})=\mathcal{H}^{1,1}(M)$, the assumption then implies $h^{1,1}(M)=1$. The last result then follows from the first. \end{proof} Taking $\kappa\to 0$, the part (ii) of Theorem \ref{thm:hoop} also implies that any holomorphic map from a compact manifold with $B^\perp>0$ into one with $B^\perp\le 0$ must be a constant map (cf. Theorem \ref{thm:51}). Given that $B^\perp$ is independent of $H$ and $\operatorname{Ric}$, this does not follow from Yau-Royden's estimate Theorem \ref{thm-sch-roy}, nor from Theorem \ref{thm:sch1}. The part (iv) provides an additional information on compact K\"ahler manifolds with $\operatorname{Ric}^\perp>0$.
\section*{Acknowledgments} {We would like to thank James McKernan (particularly bringing my attention to the work \cite{Laz}) and Fangyang Zheng for conversations regarding holomorphic maps from $\mathbb{P}^m$. We are also grateful to Yanyan Niu for informing \cite{Mu}.}
\end{document} |
\begin{document}
\title {Locally upper bounded poset-valued maps and stratifiable spaces}
\thanks{}
\author{Ying-Ying Jin} \address{(Y.Y. Jin) School of Mathematics and Computational Science, Wuyi University, Jiangmen 529020, P.R. China} \email{yingyjin@163.com}
\author{Li-Hong Xie} \address{(L.H. Xie) School of Mathematics and Computational Science, Wuyi University, Jiangmen 529020, P.R. China} \email{yunli198282@126.com}
\author{Han-Biao Yang$^*$} \address{(H.B. Yang) School of Mathematics and Computational Science, Wuyi University, Jiangmen 529020, P.R. China} \email{596283897@qq.com}
\thanks{* The corresponding author}
\thanks{$^1$Supported by NSFC (Nos. 11526158, 11601393).} \subjclass[2000]{54D20; 54D40; 54D45; 54E18; 54E35; 54H11}
\keywords{Locally upper bounded poset-valued maps; Stratifiable spaces; Semi-stratifiable spaces; MCP; MCM; Lower semi-continuous (l.s.c.); Upper semi-continuous (u.s.c.).}
\begin{abstract} In this paper, we characterize stratifiable (or semi-stratifiable) spaces, and monotonically countably paracompact (or monotonically countably metacompact) spaces by expansions of locally upper bounded semi-continuous poset-valued maps. These extend earlier results for real-valued Locally bounded functions. \end{abstract}
\maketitle
\section{Introduction} Throughout this paper, let $\mathbb{R}$ the set of all real numbers, and $\mathbb{N}$ set of all natural numbers. All topological spaces are assumed to be $T_1-$spaces.
J. Mack characterized \cite{JM} countably paracompact spaces with locally bounded real-valued functions as follows:
\begin{theorem}(\cite{JM})
A space $X$ is countably paracompact if and only if for each locally bounded function $h:X \rightarrow\mathbb{R} $ there exists a locally bounded l.s.c. function $g:X \rightarrow \mathbb{R}$ such that $|h|\leq g$. \end{theorem}
C.R. Borges \cite{Bo} introduced definitions called stratifiable spaces and semi-stratifiable spaces.
\begin{definition}\cite{Bo} A space $X$ is said to be stratifiable if, to each open set $U$, one can assign an increasing sequence $(U_n)_{n\in\mathbb{N}}$, called a stratification of $X$, of open subsets of $X$ such that \begin{enumerate}
\item $ \overline{U_n}\subseteq U$ for each $n\in\mathbb{N}$;
\item $\bigcup_{n\in\mathbb{N}}U_n=U$;
\item if $ U\subseteq V$, then $ U_n\subseteq V_n$ for each $n\in\mathbb{N}$. \end{enumerate} $X$ is said to be semi-stratifiable, if to each open set $U$, one can assign a sequence of closed subsets $(U_n)_{n\in\mathbb{N}}$ such that (2) and (3) above hold. \end{definition}
Recall that a space $X$ is said to be {\it perfect} \cite{En} if to each open set $U$ of $X$, one can assign an increasing sequence of closed subsets $(U_n)_{n\in\mathbb{N}}$ such that (2) above holds. A perfect space $X$ is said to be {\it perfectly normal} if $X$ is normal.
It is well known that a space is stratifiable if and only if it is monotonically normal and semi-stratifiable.
C. Good, R. Knight and I. Stares \cite{GK} and C. Pan \cite{Pa} introduced a monotone version of countably paracompact spaces, called monotonically countably paracompact spaces (MCP) and monotonically cp-spaces, respectively, and it was proved in \cite[Proposition 14]{GK} that both these notions are equivalent.
\begin{definition}\cite{GK}\label{def2.4} A space $X$ is said to be monotonically countably metacompact (MCM) if there is an operator $U$ assigning to each decreasing sequence $(D_j)_{j\in\mathbb{N}}$ of closed sets with empty intersection, a sequence of open sets $U((D_j))=(U(n,(D_j)))_{n\in\mathbb{N}}$ such that
\begin{enumerate}
\item $D_n \subseteq U(n,(D_j))$ for each $n\in\mathbb{N}$;
\item $\bigcap_{n\in\mathbb{N} }U(n,(D_j))=\emptyset$;
\item given two decreasing sequences of closed sets $(F_j)_{j\in \mathbb{N}}$ and $(E_j)_{j\in \mathbb{N}}$ such that $F_n \subseteq E_n$ for each $n\in\mathbb{N}$, then $U(n,(F_j))\subseteq U(n,(E_j))$ for each $n\in \mathbb{N}$. \end{enumerate} $X$ is said to be monotonically countably paracompact (MCP) if, in addition,
$(2') \bigcap_{n\in \mathbb{N}}\overline{U(n,(D_j))}=\emptyset$.
\end{definition}
Many insertion results present some classic characterizations of topological spaces, such as stratifiable spaces, monotonically countably paracompact spaces and others. T. Kubiak \cite{TK} investigated monotonically normal spaces by the monotonization of insertion properties. P. Nyikos and C. Pan \cite{Pa} and C. Good and I. Stares \cite{GS} respectively gave a characterization of stratifiable spaces by the monotonizations of insertion properties. Also, C. Good, R. Knight and I. Stares \cite{GK} characterized monotonically countably paracompact spaces by the insertions of semi-continuous functions.
By extending the insertion properties of real-valued maps, K. Yamazaki \cite{KY} introduced the notion of local boundedness for set-valued mappings and described MCP spaces by expansions of locally bounded set-valued mappings. L.H. Xie, P.F. Yan\cite{XH} gave some characterizations of stratifiable, semi-stratifiable by expansions of set-valued mappings. K. Yamazaki \cite{Ya}, Y.Y. Jin, L.H. Xie, H.W. Yue \cite{JX} considered the locally upper bounded maps with values in the ordered topological vector spaces and provided new monotone insertion theorems.
The following theorems were proved in \cite[Theorem 2.4]{KY}, \cite[Theorem 3.1 and Theorem 3.2]{Ya} and \cite[Theorem 3.1 and Theorem 3.2]{XH}.
\begin{theorem}(\cite{KY}) For a space $X$, the following statements are equivalent:
\begin{enumerate} \item $X$ is MCP (resp. MCM);
\item for every metric space $Y$, there exists a preserved order operator $\Phi$ assigning to each locally bounded set-valued mapping $\varphi: X \rightarrow \mathcal {B}(Y)$, a locally bounded l.s.c. (resp. a l.s.c.) set-valued mapping $\Phi(\varphi): X \rightarrow \mathcal {B}(Y)$ such that $\varphi\subseteq \Phi(\varphi)$;
\end{enumerate} where $\mathcal {B}(Y)$ is the set of all nonempty closed bounded sets of $Y$. \end{theorem}
\begin{theorem}(\cite{Ya}) Let $X$ be a topological space and $Y$ an ordered topological vector space with a positive interior point. Then, the following conditions are equivalent: \begin{enumerate} \item $X$ is MCP (resp. MCM). \item There exists an operator $\Phi$ assigning to each locally upper bounded map $f:X\rightarrow Y$, a locally upper bounded lower semi-continuous (resp. a lower semi-continuous) map $\Phi(f):X\rightarrow Y$ with $f\leq\Phi(f)$ such that $\Phi(f)\leq\Phi(f')$ whenever $f\leq f'$.
\end{enumerate} \end{theorem}
\begin{theorem}(\cite{XH}) For a space $X$, the following statements are equivalent:
\begin{enumerate} \item $X$ is perfectly normal (resp. stratifiable);
\item for every space $Y$ having a strictly increasing closed cover $\{B_n\}$, there exists an operator $\Phi$ (resp. a preserved order operator $\Phi$) assigning to each set-valued mapping $\varphi: X \rightarrow \mathcal {F}(Y)$, a l.s.c. set-valued mapping $\Phi(\varphi): X \rightarrow \mathcal {F}(Y)$ such that $\Phi(\varphi)$ is locally bounded at each $x\in U_\varphi$ and that $\varphi\subseteq \Phi(\varphi)$; \end{enumerate} \end{theorem}
\begin{theorem}(\cite{XH}) For a space $X$, the following statements are equivalent:
\begin{enumerate} \item $X$ is perfect (resp. semi-stratifiable);
\item for every space $Y$ having a strictly increasing closed cover $\{B_n\}$, there exists an operator $\Phi$ (resp. a preserved order operator $\Phi$) assigning to each set-valued mapping $\varphi: X \rightarrow \mathcal {F}(Y)$, a l.s.c. set-valued mapping $\Phi(\varphi): X \rightarrow \mathcal {F}(Y)$ such that $\Phi(\varphi)(x)$ is bounded at each $x\in U_\varphi$ and that $\varphi\subseteq \Phi(\varphi)$; \end{enumerate} \end{theorem}
The purpose of this paper is to generalize real-valued locally bounded functions to locally upper bounded maps with values into some bi-bounded complete and bi continuous posets, which are not necessarily vector spaces or spaces with strictly increasing closed covers, by using the way-below relation $\ll$ and the way-above relation $\ll_d$. This provides some advantage to the real-valued and set-valued cases. Indeed, the range $\mathbb{R}$ with the total order can be extended to spaces $P$ with the partial order. Inspired by Theorem 1.2, Theorem 1.3, Theorem 1.4 and Theorem 1.5, another purpose of this paper is to characterize stratifiable (or semi-stratifiable) spaces, and monotonically countably paracompact (or monotonically countably metacompact) spaces by expansions of locally upper bounded poset-valued maps along the same lines. At the last part, we also consider monotone poset-valued insertions on monotonically normal and monotonically countably paracompact spaces.
Throughout this paper, all the undefined topological concepts can be found in \cite{En}.
\section{Basic facts and definitions} In this section, some definitions are restated and some basic facts are listed. Also, some notions are introduced which seem to be convenient though they may be found in the references.
\begin{lemma}\label{lem2.2} For a space $X$, the following statements are equivalent: \begin{enumerate} \item $X$ is semi-stratifiable (resp. stratifiable);
\item there is an operator $F$ assigning to each increasing sequence of open sets $(U_{j})_{j\in \mathbb{N}}$, an increasing sequence of closed sets $(F(n,(U_{j})))_{n\in\mathbb{N}}$ such that \begin{enumerate} \item [(i)] $U_{n}\supseteq F(n,(U_{j}))$ for each $n\in\mathbb{N}$;
\item [(ii)] $\bigcup_{n\in \mathbb{N}}F(n,(U_{j}))=\bigcup_{n\in\mathbb{N}}U_{n}$ (resp. (ii)' $\bigcup_{n\in \mathbb{N}}Int F(n,(U_{j}))=\bigcup_{n\in\mathbb{N}}U_{n}$);
\item [(iii)] given two increasing sequences of open sets $(U_{j})_{j\in\mathbb{N}}$ and $(G_{j})_{j\in\mathbb{N}}$ such that $U_{n}\subseteq G_{n}$ for each $n\in\mathbb{N}$, then $F(n,(U_{j}))\subseteq F(n,(G_{j}))$ for each $n\in\mathbb{N}$. \end{enumerate} \end{enumerate} \end{lemma}
\begin{proof} From De Morgan's laws it follows easily that conditions (2) in Theorem 2.2 and (2) in \cite[Theorem 3.6, 3.7]{XY} are equivalent. \end{proof}
It is well known that semi-stratiffiable (stratiffiable) spaces are naturally monotone versions of perfect (perfectly normal) spaces. We can easily obtain the following result without proof.
\begin{lemma}\label{the2.4} For a space $X$, the following statements are equivalent: \begin{enumerate} \item $X$ is perfect (resp. perfectly normal);
\item there is an operator $F$ assigning to each increasing sequence of open sets $(U_{j})_{j\in \mathbb{N}}$, an increasing sequence of closed sets $(F(n,(U_{j})))_{n\in\mathbb{N}}$ such that \begin{enumerate} \item [(i)] $U_{n}\supseteq F(n,(U_{j}))$ for each $n\in\mathbb{N}$;
\item [(ii)] $\bigcup_{n\in \mathbb{N}}F(n,(U_{j}))=\bigcup_{n\in\mathbb{N}}U_{n}$ (resp. $\bigcup_{n\in \mathbb{N}}Int F(n,(U_{j}))=\bigcup_{n\in\mathbb{N}}U_{n}$); \end{enumerate} \end{enumerate} \end{lemma}
\begin{lemma}\label{lem2.5} A space $X$ is said to be monotonically countably metacompact (MCM) (resp. monotonically countably paracompact (MCP)) if and only if there is an operator $F$ assigning to each increasing sequence $(U_j)_{j\in\mathbb{N}}$ of open sets of $X$ satisfying $\bigcup_{i\in \mathbb{N}}U_i=X$, a sequence of closed sets $F((U_j))=(F(n,(U_j)))_{n\in\mathbb{N}}$ such that
\begin{enumerate}
\item $U_n \supseteq F(n,(U_j))$ for each $n\in\mathbb{N}$;
\item $\bigcup_{n\in\mathbb{N} }F(n,(U_j))=X$ (resp. (2)' $\bigcup_{n\in \mathbb{N}}Int F(n,(U_j))=X$);
\item given two increasing sequences of open sets $(U_j)_{j\in \mathbb{N}}$ and $(G_j)_{j\in \mathbb{N}}$ such that $U_n \subseteq G_n$ for each $n\in\mathbb{N}$, then $F(n,(U_j))\subseteq F(n,(G_j))$ for each $n\in \mathbb{N}$. \end{enumerate} \end{lemma}
\begin{proof} From De Morgan's laws it follows easily that conditions (1), (2) and (3) are equivalent to Definition 2.4. \end{proof}
In the rest of this section, let us recall some definitions and terminology from \cite{Gi, Ke}.
Let $P=(P,\leq)$ be a poset. For $a, b \in P$, the symbol $[a, b]$ stands for $\{y\in P:a \leq y\leq b\}$. A subset $A$ of $P$ is said to be {\it directed} (resp. {\it filtered}) if $A$ is nonempty and for every $x, y\in A$ there exists $z\in A$ such that $x\leq z$ and $y\leq z$ (resp. $z\leq x$ and $z\leq y$). For a subset $A$ of $P$, $\bigvee A$(resp. $\bigwedge A$) stands for the sup (resp. inf) of $A$, if exists. For $x, y\in P$, $x$ is {\it way below} $y$, in symbol $x \ll y$, if for all directed subset $D$ of $P$ with $\bigvee D$, the relation $y\leq\bigvee D$ always implies the existence of $d\in D$ with $x \leq d$. For $x, y\in P$, $x$ is {\it way above} $y$, in symbol $y\ll_d x$, if for all filtered subset $F$ of $P$ with $\bigwedge F$, the relation $\bigwedge F\leq y$ always implies the existence of $f\in F$ with $f\leq x$. In a lattice $L$, $x \ll y$ (resp. $x \ll_d y$) if and only if for every subset $A$ of $L$ with $\bigvee A$ (resp. $\bigwedge A$) the relation $y\leq \bigvee A$ (resp. $\bigwedge A \leq y$) always implies the existence of a finite subset $B$ of $A$ such that $x \leq \bigvee B$ ($\bigwedge B\leq x$). Note that the way-above relation $\ll_d$ is precisely the dual relation of the way below relation of $P^{op}$, i.e. $x \ll_d y\Leftrightarrow y\ll^{op}x$. An element $x$ of $P$ is {\it isolated from below} (resp. {\it isolated from above}) if $x \ll x$ (resp. $x \ll_dx$). Clearly, an element $x$ of $P$ is isolated from above iff $x$ is isolated from below in $P^{op}$. On $\mathbb{R}$, it is clear that $x \ll y$ if and only if $x <y$ if and only if $x \ll_dy$. On the unit interval $[0, 1]$ of $\mathbb{R}$, we have that $x \ll y$ if and only if $x <y$ or $x =y=0$, and that $x \ll_dy$ if and only if $x <y$ or $x =y=1$. A poset $P$ is {\it continuous} if $\{u \in P:u \ll x\}$ is directed and $x =\bigvee\{u \in P:u\ll x\}$ for all $x \in P$. A poset $P$ is {\it dually continuous} if $\{u\in P:x \ll_du\}$ is filtered and $x =\bigwedge\{u \in P:x \ll_du\}$ for all $x \in P$, in other words, if $P^{op}$ is continuous. A poset $P$ is called bicontinuous if $P$ is continuous and dually continuous. As is pointed out in \cite{Ke}, the way-below relation in a bicontinuous poset need not be the opposite of the way-above relation in the sense that $x \ll y$ does not imply $x \ll_dy$. A {\it bicontinuous lattice} $P$ is a lattice which is bicontinuous as a poset. It should be noted that we do not require the completeness of $P$ in our definition of $P$ being bicontinuous, where a {\it complete} lattice $P$ is a poset in which every subset has the sup and the inf.
We call a poset $P$ {\it lower-bounded complete} (resp. {\it upper-bounded complete}) if every non-empty subset $A$ of $P$ with a lower bound (resp. an upper bound) has the inf (resp. sup). When $P$ is lower-bounded complete and upper-bounded complete, we call $P$ {\it bi-bounded complete}. Note that every bounded complete domain in the sense of [5] is bi-bounded complete. For maps $f, g:X\rightarrow P$ into a poset $P$, the symbol $f\ll g$ (resp. $f\ll_d g$, $f\leq g$) stands for $f(x)\ll g(x)$ (resp. $f(x)\ll_d g(x)$, $f(x) \leq g(x)$) for each $x\in X$. For a point $z$ and a pair of points $\langle y,y'\rangle$ of a poset $P$, $z$ is an {\it interpolated point} of $\langle y,y'\rangle$ if $y\ll_d z\ll y'$. A pair $\langle f,g\rangle$ of maps $f, g:X\rightarrow P$ has {\it interpolated points pointwise} if $\langle f(x), g(x)\rangle$ has an interpolated point for each $x\in X$.
For a subset $B$ of a poset $P$ and $y, y'\in P$, the pair of points $\langle y,y'\rangle$ has interpolated points on $B$ if there exists $z\in B$ such that $z$ is an interpolated point of $\langle y,y'\rangle$. A pair $\langle f,g\rangle$ of maps $f, g:X\rightarrow P$ has interpolated points pointwise on B if $\langle f(x), g(x)\rangle$ has interpolated points on $B$ for each $x\in X$.
For a non-empty subset $A$ of $X$, a pair $\langle f,y\rangle$ (resp. $\langle y,f\rangle$) of a map $f: X\rightarrow P$ and a point $y\in P$ has {\it interpolated points of $A$} if $\langle f(x),y\rangle$ (resp. $\langle y,f(x)\rangle$) has interpolated points for each $x\in A$.
For a non-empty subset $A$ of $X$, a pair $\langle f,g\rangle$ of maps $f,g:X\rightarrow P$ has {\it interpolated points of $A$} if $\langle f(x),g(x)\rangle$ has interpolated points for each $x\in A$. We define $G_{f,g}=\{x\in X: \langle f(x),g(x)\rangle \text{~has an interpolated point~}\}.$
See \cite{En} and \cite{Gi} for undefined terminology.
\begin{lemma}\label{lem2.6}\cite{Gi} For a poset $P$, the following statements hold.
\begin{enumerate}
\item $x \ll_dy$$\Rightarrow$$x \leq y$;
\item $u \leq x \ll_dy\leq v\Rightarrow u \ll_dv$;
\item $z\ll_dx$ and $z\ll_dy$$\Rightarrow$$z\ll_dx \wedge y$, whenever $x \wedge y$ exists. \end{enumerate} If $P$ is a dually continuous poset, (4) below also holds.
(4) $x\ll_dy$ $\Rightarrow$ $\exists$ $z\in P$ s.t. $x \ll_dz\ll_dy$. \end{lemma}
For a subset $A$ of a topological space $X$ and $x\in X$, $\overline{A}$ stands for the closure of $A$ and $\mathcal{N}_x$ is the set of all neighborhoods of $x$.
Let $f:X\rightarrow P$ be a map from a topological space $X$ to a poset $P$ (which is not assumed to be a topological space), and $x \in X$. Set $$\mathcal{N}_{x*}(f) = \{N \in \mathcal{N}_x : f(N) \text{~has the inf}\}, \text{~and~} \mathcal{N}^*_x (f) = \{N \in \mathcal{N}_x : f(N) \text{~has the sup}\}.$$ We call that $f$ {\it admits} $f_*(x)$ if $\mathcal{N}_{x*}(f)\neq\emptyset$ and $\{\bigwedge f(N):N\in\mathcal{N}_{x*}(f)\}$ has the sup, and then we define $f_*(x)=\bigvee\{\bigwedge f(N):N\in\mathcal{N}_{x*}(f)\}$. Also, $f$ {\it admits} $f^*(x)$ if the set $\mathcal{N}^*_x(f)\neq\emptyset$ and $\{\bigvee f(N) :N\in \mathcal{N}_x^*(f)\}$ has the inf, and then we define $f^*(x) =\bigwedge\{\bigvee f(N):N\in \mathcal{N}_{x*}(f)\}$. A map $f:X\rightarrow P$ is {\it lower semi-continuous} (resp. {\it upper semi-continuous}) at $x$ if $f$ admits $f_*(x)$ (resp. $f^*(x)$) and $f(x) =f_*(x)$ (resp. $f(x) =f^*(x)$). A map $f:X\rightarrow P$ is {\it lower semi-continuous} (resp. {\it upper semi-continuous}) if $f$ is lower (resp. upper) semi-continuous at every $x\in X$. Since $P$ is not assumed to be complete, for $A \subset P$, $\bigwedge A$ or $\bigvee A$ does not necessarily exist. If there is no confusion, we simply express $f_*(x) =\bigvee_{N\in\mathcal{N}_x}\bigwedge f(N)$ and $f^*(x) =\bigwedge _{N\in \mathcal{N}_x}\bigvee f(N)$ for each $x \in X$ (if exists).
It is defined in \cite{En} that a real-valued function $f:X\rightarrow \mathbb{R} $ is lower semi-continuous if $\{x:f(x)>r\}$ is open for each $r\in \mathbb{R}$ (namely, for each $x\in X$ and each $\varepsilon>0$ there exists a neighborhood $O_x$ of $x$ such that $f(x')>f(x)-\varepsilon$ for each $x'\in O_x$). A real-valued function $f:X\rightarrow\mathbb{R}$ is upper semi-continuous if $-f$ is lower semi-continuous. Note that this definition coincide with the above definition of semi-continuous maps with values into ordered topological vector spaces $Y$ for $Y=\mathbb{R}$.
\begin{proposition}\cite{Yama}\label{prop2.7} Let $P$ be a poset, $x\in X$ and $f:X\rightarrow P$ a map. Consider the following conditions: \begin{enumerate} \item [(1)] $\{\bigwedge f(N):N\in\mathcal{N}_{x*}(f)\}$ (resp.) $\{\bigvee f(N):N\in\mathcal{N}_{x}^*(f)\}$ has the sup (resp. inf); \item [(2)] $\mathcal{N}_{x*}(f)\neq\emptyset$ (resp. $\mathcal{N}_{x}^*(f)\neq\emptyset$ ); \item [(3)] $\mathcal{N}_{x*}(f)$ (resp. $\mathcal{N}_{x}^*(f)$) is a neighborhood base of $x$. \end{enumerate} Then, the following statements (a), (b), (c) and (d) hold.
\begin{enumerate} \item [(a)] If P is lower-bounded (resp. upper-bounded) complete, (1)$\Rightarrow$(2) and (2)$\Rightarrow$(3) hold. \item [(b)] If P is a cdcpo (resp. cfcpo), (3)$\Rightarrow$(1) holds. \item [(c)] If $P$ is bi-bounded complete, the conditions (1), (2)and (3)are equivalent, that is, $f$ admits $f_*(x)$ (resp.$f^*(x)$) whenever either one of (1), (2 )and (3) holds \item [(d)] If $P$ is bi-bounded complete and $f(X)$ has a lower (resp. an upper) bound, (2) holds, thus, $f$ admits $f_*$ (resp. $f^*$). \end{enumerate} \end{proposition}
\begin{proposition}\cite{Yama}\label{prop2.8} Let $P$ be a poset, $x\in X$ and $f:X\rightarrow P$ a map. Consider the following conditions: Let $X$ be a topological space, $P$ a poset, and assume that $f$ admits $f_*$ and $\mathcal{N}_{x*}(f)$ is a neighborhood base of $x$ for each $x\in X$. For a map $f: X\rightarrow P$, consider the following conditions: \begin{enumerate} \item [(1)] f is lower semi-continuous; \item [(2)] $\{x\in X: a\ll f(x)\}$ is open for each $a\in P$; \item [(3)] $\{x\in X: f(x)\leq a\}$ is closed for each $a\in P$. \end{enumerate} Then, $(1)\Rightarrow(3)$ always holds. If P is continuous, $(1)\Leftrightarrow(2)$ holds. \end{proposition}
\begin{proposition}\cite{Yama}\label{prop2.9} For a topological space $X$ and a bi-bounded complete, continuous (resp. dually continuous) poset $P$, if $f:X\rightarrow P$ is lower (resp. upper)semi-continuous, then $\{x\in X: a \ll f(x)\}$ (resp. $\{x\in X: f(x) \ll_d a\}$ )is open for each $a\in P$. The converse holds if, in addition, $\mathcal{N}_{x*}(f)\neq\emptyset$ (resp. $\mathcal{N}_{x}^*(f)\neq\emptyset$). \end{proposition}
We call a point $z_0$ of a poset $P$ is a $\ll_d$-{\it increasing} $\ll-${\it limit point} \cite{Yama} if there exists a sequence $\{y_i:i\in\mathbb{N}\}\subset P$ such that $y_i\ll_d y_{i+1}$, $y_i\ll y_0 (i\in \mathbb{N})$ and $y_0=\bigvee_{i\in\mathbb{N}}y_i$. Also, $y_0$ of $P$ is a $\ll-${\it decreasing} $\ll_d-$ {\it limit point} if there exist $y_i\in P (i\in\mathbb{N})$ such that $y_{i+1}\ll y_i$, $y_0\ll_d y_i (i\in\mathbb{N})$ and $y_0=\bigwedge_{i\in\mathbb{N}}y_i$.
We introduce the following:
\begin{definition}\cite{Yama} For a poset $(P,\leq)$ and $y, z\in P$, $y$ is a lower bound (resp. upper bound) of $f:X\rightarrow P$ if $y$ is a lower (resp. upper) bound of $f(X)$. \end{definition}
\begin{definition} For a topological space $X$ and a bi-bounded complete, dually continuous poset $P$ with a $\ll_{d}$-increasing $\ll$-limit point $y_0$, a map $f:X\rightarrow P$ is called locally upper bounded about $(y_i)_{i\in\mathbb{N}}$ if for every $x\in X$ there exist $n\in\mathbb{N}$ and a neighborhood $O_x$ of $x$ such that $f(x')\leq y_n$ for each $x'\in O_x$. \end{definition}
For a mapping $g:$ $X\rightarrow P$, define$$U_g=\{x\in X: g(x) \text{~is locally upper bounded about $(y_i)_{i\in\mathbb{N}}$ ~} \}.$$ Clearly, $U_g$ is an open set in $X$. $$F_g=\{x\in X: \exists n\in\mathbb{N} \text{~such that~} g(x)\leq y_n\}.$$
\section{main results}
\begin{theorem}\label{the3.1} Let $P$ be a bi-bounded complete, continuous poset $P$ with a $\ll_{d}$-increasing $\ll$-limit point $y_0$. Then $X$ is stratifiable if and only if there exists an operators $\Phi$ assigning to each map $g:X\rightarrow P$ with $F_g\neq\emptyset$, a l.s.c. map $\Phi(g):X\rightarrow P$ such that $g\leq \Phi(g)$, $\Phi(g)$ is locally upper bounded about $(y_i)_{i\in\mathbb{N}}$ at each $x\in U_g$ and that $\Phi(g)\leq \Phi(g')$ whenever $g\leq g'$. \end{theorem}
\begin{proof} Assume that $X$ is a stratifiable. There exists an operator $F$ satisfying (i), (ii)' and (iii) in Lemma \ref{lem2.2}. Let $P$ be a bi-bounded complete, continuous poset $P$ with a $\ll_{d}$-increasing $\ll$-limit point $y_0$. For each map $g:X\rightarrow P$ with $F_g\neq\emptyset$ and each $n\in \mathbb{N}$, define $$ U_n(g)=Int\{x\in X: g(x)\leq y_n\} \quad\quad \quad\quad(\ref{the3.1}.1)$$ Then we have $U_g=\bigcup_{n\in \mathbb{N}}U_n(g).$ In fact, for each $x\in U_g$, then there exists an open neighborhood $O$ of $x$ such that $g(x')\leq y_i $ for some $i\in \mathbb{N}$ and each $x'\in O$, which implies that $ x\in U_i(g)$. It implies that $U_g\subseteq\bigcup_{n\in \mathbb{N}}U_n(g).$ On the other hand, take any $x\in \bigcup_{n\in \mathbb{N}}U_n(g)$. Then there is $U_j(g)$ such that $x\in U_j(g)$, and therefore, there exists an open neighborhood $O$ of $x$ such that $ g(x')\leq y_j$ for each $x'\in O$. It implies that $x\in O\subseteq U_g$.
Hence, $F((U_j(g)))=(F(n,(U_j(g))))_{n\in\mathbb{N}}$ is a sequence of closed subsets of $X$ such that $$F(n,(U_j(g)))\subset U_n(g) \text{~for each~} n\in\mathbb{N};\quad\quad \quad\quad(\ref{the3.1}.2)$$ $$\bigcup_{n\in\mathbb{N}}Int F(n, (U_j(g)))=\bigcup_{n\in \mathbb{N}}U_n(g);\quad\quad \quad\quad(\ref{the3.1}.3)$$ $$F(n,(U_j(g)))\subset F(n+1,(U_j(g))), n\in\mathbb{N};\quad\quad \quad\quad(\ref{the3.1}.4)$$ Thus, we can define $\Phi(g):X\rightarrow P$ as follows:
$$\Phi(g)(x)=\left\{ \begin{array}{rcl} y_1 & & {x\in F(1,(U_j(g)))}\\ y_{n+1} & & x\in F(n+1,(U_j(g)))\setminus F(n,(U_j(g)))\\ y_0 & &x\in X\setminus \bigcup_{n\in\mathbb{N}}F(n, (U_j(g)))\\ \end{array} \right. \quad\quad \quad\quad(\ref{the3.1}.5)$$ It is obvious that $\Phi(g)(x)\leq y_0$ and $\Phi(g)$ has a lower bounded $y_1$.
To show $g\leq\Phi(g)$. For each $x\in X\setminus U_g$, $g(x)\leq y_0=\Phi(g)(x)$ is obvious. Let $x\in U_\varphi $. Then, $\Phi(\varphi)(x)=y_{i}$ for some $i\in \mathbb{N}$, and therefore, $x\in F(i,(U_j(g)))\setminus F(i-1,(U_j(g)))$. Since $g(x)\leq y_{i}=\Phi(g)(x)$ by $x\in F(i,(U_j(g)))\subset U_{i}(g)=Int\{x\in X: g(x)\leq y_{i}\}$. If $x\in F(1,(U_j(g)))$, it is obvious that $g(x)\leq y_{1}=\Phi(g)(x)$. Thus, we have $g\leq\Phi(g)$.
To show that $\Phi(g):X\rightarrow P$ is locally upper bounded about $(y_i)_{i\in\mathbb{N}}$ at each $x\in U_g$. Take any $x\in U_g$, by (\ref{the3.1}.3), there exists $n\in\mathbb{N}$ such that $x\in Int F(n+1, (U_j(g)))\setminus Int F(n, (U_j(g)))$. Consider the neighborhood $O_x=Int F(n+1, (U_j(g)))$ of $x$. For each $x'\in O_x$, it follows from the definition of $\Phi(g)$ that $\Phi(g)(x')\leq y_{n+1}$. This completes the proof that $\Phi(g)$ is locally upper bounded about $(y_i)_{i\in\mathbb{N}}$ at each $x\in U_g$.
Next we show $\Phi(g)$ is l.s.c.. For each $x\in \bigcup_{n\in\mathbb{N}} F(n, (U_j(g)))$, there exists some $m\in\mathbb{N}$ such that $x\in F(m+1, (U_j(g)))\setminus F(m, (U_j(g)))$. We consider the neighborhood $O_x=X\setminus F(m, (U_j(g)))$ of $x$. For each $x'\in O_x$, we can get $\Phi(g)(x')\geq y_{m+1}=\Phi(g)(x)$. Therefore, $\bigwedge\Phi(g)(O_x)=y_{m+1}$ and $O_x\in \mathcal{N}_{x*}(\Phi(g))$. This provides that $\Phi(g)(x)$ admits $\Phi(g)_*(x)$ because of (c) of Proposition \ref{prop2.7}. We have $$\Phi(g)_*(x)=\bigvee_{N\in\mathcal{N}_{x*}(\Phi(g))}\bigwedge\Phi(g)(N)\geq\bigwedge_{x'\in O_x}\Phi(g)(x')=y_{m+1}=\Phi(g)(x).$$ Hence, $\Phi(g)_*(x)=\Phi(g)(x)$ for each $x\in X$, that is, $\Phi(g)(x)$ is l.s.c. at $x$. For each $x\in X\setminus\bigcup_{n\in\mathbb{N}} F(n, (U_j(g)))$, we consider the neighborhood $V=X\setminus\bigcup_{n\in\mathbb{N}} F(n, (U_j(g)))$ of $x$. For each $x'\in V$, we can get $\Phi(g)(x')= y_{0}=\Phi(g)(x)$. Therefore, $\bigwedge\Phi(g)(V)=y_{o}$ and $V\in \mathcal{N}_{x*}(\Phi(g))$. This provides that $\Phi(g)(x)$ admits $\Phi(g)_*(x)$ because of (c) of Proposition \ref{prop2.7}. We have $$\Phi(g)_*(x)=\bigvee_{N\in\mathcal{N}_{x*}(\Phi(g))}\bigwedge\Phi(g)(N)\geq\bigwedge_{x'\in O_x}\Phi(g)(x')=y_{0}=\Phi(g)(x).$$ Hence, $\Phi(g)_*(x)=\Phi(g)(x)$ for each $x\in X$, that is, $\Phi(g)(x)$ is l.s.c. .
Finally, let $g':X\rightarrow P$ be a map with $g\leq g'$. Then $$\{x\in X: g'(x)\leq y_n\}\subseteq \{x\in X: g(x) \leq y_n\}$$ and hence, $U_n(g)\supseteq U_n(g')$ for each $n\in\mathbb{N}$. Therefore we have $F(n,(U_j(g)))\supseteq F(n,(U_j(g')))$ for each $n\in\mathbb{N}$, and
$$\bigcup_{n\in\mathbb{N}}Int F(n, (U_j(g)))=\bigcup_{n\in \mathbb{N}}U_n(g)$$ $$\bigcup_{n\in\mathbb{N}}Int F(n, (U_j(g')))=\bigcup_{n\in \mathbb{N}}U_n(g')$$
For each $x\in \bigcup_{n\in \mathbb{N}}U_n(g')$, there exists $n\in \mathbb{N}$ such that $x\in F(n+1, (U_j(g')))\setminus F(n, (U_j(g')))$. That is $x\in F(n+1, (U_j(g')))\subseteq F(n+1, (U_j(g)))$. By (\ref{the3.1}.5), we can get $\Phi(g)(x)\leq y_{n+1}=\Phi(g')(x)$. $\Phi(g)(x)\leq y_0= \Phi(g')(x)$ is obvious whenever $x\in X\setminus\bigcup_{n\in \mathbb{N}}U_n(g')$, which proves the necessity.
Conversely, let $(U_j)_{j\in\mathbb{N}}$ be a sequence of increasing open subsets of $X$. Define a map $g_{((U_j))}:X\rightarrow P$ by: $$g_{((U_j))}(x)=\left\{ \begin{array}{rcl} y_1 & & {x\in U_{1}}\\ y_{n+1} & & {x\in U_{n+1}\backslash \texttt{}U_{n}}\\ y_0 & & {x\in X\setminus\bigcup_{n\in\mathbb{N}}U_n}\\ \end{array} \right. \quad\quad \quad\quad(\ref{the3.1}.6)$$ Then, we have $U_{g_{((U_j))}}=\bigcup_{n\in\mathbb{N}}U_n$, where $$U_{g_{((U_j))}}=\{x\in X: g_{((U_j))} \text{~is locally upper bounded about $(y_i)_{i\in\mathbb{N}}$ at~}x \}.$$ By the assumption, there exist an operators $\Phi$ assigning to each $g_{((U_j))}$ with an upper bound, a l.s.c. map $\Phi(g_{((U_j))}):X\rightarrow P$ such that $g_{((U_j))}\leq \Phi(g_{((U_j))})$, $\Phi(g_{((U_j))})(x)$ is locally upper bounded about $(y_i)_{i\in\mathbb{N}}$ at each $x\in$ and $\Phi(g)\leq \Phi(g')$ whenever $g\leq g'$. For each sequence $(U_j)_{j\in\mathbb{N}}$ of increasing open subsets of $X$, define
$$F(n,(U_j))=\{x\in X: \Phi(g_{((U_j))})(x)\leq y_n\}\quad\quad \quad\quad(\ref{the3.1}.7)$$ We can get that $F(n,(U_j))$ is closed, by (a) of Proposition \ref{prop2.7} and Proposition \ref{prop2.8}. It suffices to show the operator $F$ satisfies (i), (ii)' and (iii) of Lemma \ref{lem2.2}.
To see $U_n \supseteq F(n,(U_j))$ for each $n\in\mathbb{N}$ and let $x\in F(n,(U_j))$. Then, $g_{((U_j))}(x)\leq\Phi(g_{((U_j))})(x)\leq y_n$ and $g_{((U_j))}(x)\leq y_n$, and thus $x\in U_{m}\setminus U_{m-1}$ and $m\leq n$ by (\ref{the3.1}.1). So we have $x\in U_m\subset U_n$. Hence $U_n \supseteq F(n,(U_j))$ holds. In additional, $\Phi(g_{((U_j))})$ is l.s.c., so $F(n,(U_j))$ is a closed set of $X$ for each $n\in \mathbb{N}$. This shows that the condition (i) is satisfied.
To show (2)', note that $\Phi(g_{((U_j))})$ is locally upper bounded about $(y_i)_{i\in\mathbb{N}}$ at each $x\in U_{g_{((U_j))}}$. Then, for each $x\in U_{g_{((U_j))}}$, there exists an open neighborhood $O$ of $x$ such that $\Phi(g_{((U_j))})(x')\leq y_{n_0}$ for some $n_0\in \mathbb{N}$ and each $x'\in O$. It implies that $x\in IntF(n,(U_j))$. Hence, $\bigcup_{n\in\mathbb{N}}Int F(n, (U_j(g)))=U_{g_{((U_j))}}=\bigcup_{n\in \mathbb{N}}U_n(g)$.
To show (3), let $((G_j))$ be an increasing sequence of open subsets of $X$ such that $(U_j)\preceq (G_j)$. Since $U_n\subseteq G_n$ for each $n\in \mathbb{N}$, it follows from (\ref{the3.1}.6) that $g_{((G_j))}(x)\leq g_{((U_j))}(x)$. Hence, we have $\Phi(g_{((G_j))})\leq\Phi(g_{((U_j))})$. Furthermore,
$F(n,(U_j))=\{x\in X: \Phi(g_{((U_j))})(x)\leq y_n\}\subseteq \{x\in X: \Phi(g_{((G_j))})(x)\leq y_n\}=F(n,(G_j))$ for each $n\in\mathbb{N}$, which implies that $F((U_j))\preceq F((G_j))$. Thus, $X$ is a stratifiable space. \end{proof}
\begin{theorem}\label{the3.2} Let $P$ be a bi-bounded complete, continuous poset $P$ with a $\ll_{d}$-increasing $\ll$-limit point $y_0$. Then $X$ is semi-stratifiable if and only if there exists an operators $\Phi$ assigning to each map $g:X\rightarrow P$ with an upper bound $y_0$, a l.s.c. map $\Phi(g):X\rightarrow P$ such that $g\leq \Phi(g)$, $\Phi(g)$ is upper bounded at each $x\in U_g$ and that $\Phi(g)\leq \Phi(g')$ whenever $g\leq g'$. \end{theorem}
\begin{proof} Assume that $X$ is a semi-stratifiable. There exists an operator $F$ satisfying (i), (ii) and (iii) in Lemma \ref{lem2.2}. Let $P$ be a bi-bounded complete, continuous poset $P$ with a $\ll_{d}$-increasing $\ll$-limit point $y_0$. For each map $g:X\rightarrow P$ and each $n\in \mathbb{N}$, define $U_n(g)$ as (\ref{the3.1}.1) in Theorem \ref{the3.1}.
Then we have $U_g=\bigcup_{n\in \mathbb{N}}U_n(g).$ Define $\Phi(g):X \rightarrow P$ as (\ref{the3.1}.5) in Theorem \ref{the3.1}.
We only show that $\Phi(g)(x)$ is upper bounded at each $x\in U_g$, since the other properties of $\Phi(g)$ are proved in Theorem \ref{the3.1}.
Take any $x\in U_g$, by (ii) of lemma \ref{lem2.2}, we have $x\in \bigcup_{n\in \mathbb{N}}U_n(g)=\bigcup_{n\in\mathbb{N}}F(n, (U_j(g)))$, and therefore, there exists $k\in \mathbb{N}$ such that $x\in F(k, (U_j(g)))\setminus F(k-1, (U_j(g)))$. It implies that $\Phi(g)(x)=y_k$. This completes the proof that $\Phi(g)(x)$ is upper bounded at each $x\in U_g$.
Conversely, let $(U_j)_{j\in\mathbb{N}}$ be a sequence of increasing open subsets of $X$. Define a map $g_{((U_j))}:X\rightarrow P$ as (\ref{the3.1}.6) in Theorem \ref{the3.1}.
Then, we have $U_{g_{((U_j))}}=\bigcup_{n\in\mathbb{N}}U_n$, where $$U_{g_{((U_j))}}=\{x\in X: g_{((U_j))} \text{~is locally upper bounded at~}x \}.$$ By the assumption, there exist an operators $\Phi$ assigning to each $g_{((U_j))}$ with an upper bound, a l.s.c. map $\Phi(g_{((U_j))}):X\rightarrow P$ such that $\Phi(g_{((U_j))})$ is bounded at each $x\in U_{g_{((U_j))}}$, $g_{((U_j))}\leq \Phi(g_{((U_j))})$, and that $\Phi(g)\leq \Phi(g')$ whenever $g\leq g'$. For each sequence $(U_j)_{j\in\mathbb{N}}$ of increasing open subsets of $X$ and each $n\in \mathbb{N}$, define the operator $F$ as (\ref{the3.1}.7) in Theorem \ref{the3.1}.
It suffices to show that the operator $F$ satisfies (i), (ii) and (iii) of Lemma \ref{lem2.2}. We can get that $F(n,(U_j))$ is closed, by (a) of Proposition \ref{prop2.7} and Proposition \ref{prop2.8}. It suffices to show the operator $F$ satisfies (i), (ii) and (iii) of Lemma \ref{lem2.2}. The proof that the operator $F$ satisfies (i) and (iii) of Lemma \ref{lem2.2} is as same as Theorem \ref{the3.1}, so we only shows that the operator $F$ satisfies (ii) of Lemma \ref{lem2.2}.
To show (ii), note that $\Phi(g_{((U_j))})$ is upper bounded at each $x\in U_{g_{((U_j))}}$. Then, for each $x\in U_{g_{((U_j))}}$, there exists $n_0\in \mathbb{N}$ such that $\Phi(g_{((U_j))})(x)\leq y_{n_0}$. It implies that $x\in F(n_0,(U_j))$. Hence, $\bigcup_{n\in\mathbb{N}} F(n, (U_j(g)))=U_{g_{((U_j))}}=\bigcup_{n\in \mathbb{N}}U_n(g)$. Thus, $X$ is a semi-stratifiable space. \end{proof}
\begin{theorem}\label{the3.3} Let $P$ be a bi-bounded complete, dually continuous poset $P$ with a $\ll_{d}$-increasing $\ll$-limit point $y_0$. Then $X$ is MCP if and only if there exists an operators $\Phi$ assigning to each map $g:X\rightarrow P$ which is locally upper bounded about $(y_i)_{i\in\mathbb{N}}$ with a lower bound, a l.s.c. map $\Phi(g):X\rightarrow P$ which is locally upper bounded about $(y_i)_{i\in\mathbb{N}}$ such that $g\leq \Phi(g)$, and $\Phi(g)\leq \Phi(g')$ whenever $g\leq g'$. \end{theorem}
\begin{proof} Suppose that $X$ is MCP and $F$ is any operator that satisfies conditions (1), (2)' and (3) of Lemma \ref{lem2.5}. Let $g:X\rightarrow P$ be a locally upper bounded map. For each $n\in \mathbb{N}$, we define $$ U_n(g)=Int\{x\in X: g(x)\leq y_n\} \quad\quad \quad\quad(\ref{the3.3}.1)$$ Then, $\{U_n(g):n\in\mathbb{N}\}$ is a increasing sequence of open subsets of $X$ because of Proposition \ref{prop2.9}. It is clear that $\bigcup_{n\in \mathbb{N}}U_n(g)=X$.
Hence, $F((U_j(g)))=(F(n,(U_j(g))))_{n\in\mathbb{N}}$ is a sequence of closed subsets of $X$ such that $$F(n,(U_j(g)))\subset U_n(g) \text{~for each~} n\in\mathbb{N};\quad\quad \quad\quad(\ref{the3.3}.2)$$ $$\bigcup_{n\in\mathbb{N}}Int F(n, (U_j(g)))=X;\quad\quad \quad\quad(\ref{the3.3}.3)$$ $$F(n,(U_j(g)))\subset F(n+1,(U_j(g))), n\in\mathbb{N};\quad\quad \quad\quad(\ref{the3.3}.4)$$ Thus, we can define $\Phi(f):X\rightarrow P$ as follows:
$$\Phi(g)(x)=\left\{ \begin{array}{rcl} y_1 & & {x\in F(1,(U_j(g)))}\\ y_{n+1} & & x\in F(n+1,(U_j(g)))\setminus F(n,(U_j(g)))\\ \end{array} \right. \quad\quad \quad\quad(\ref{the3.3}.5)$$ It is obvious that $\Phi(g)(x)\leq y_0$ and $\Phi(g)$ has a lower bounded $y_1$.
Let us show that $g(x)\leq\Phi(g)(x)$ for each $x\in \bigcup_{n\in\mathbb{N}}F(n, (U_j(g)))$. Also there exists $n\in\mathbb{N} $ such that $x\in F(n+1,(U_j(g)))\setminus F(n,(U_j(g)))$. Then $g(x)\leq y_{n+1}=\Phi(g)(x)$ by $x\in F(n+1,(U_j(g)))\subset U_{n+1}(g)=Int\{x\in X: g(x)\leq y_{n+1}\}$. If $x\in F(1,(U_j(g)))$, it is obvious that $g(x)\leq y_{1}=\Phi(g)(x)$. Thus, we have $g\leq\Phi(g)$.
To show that $\Phi(g):X\rightarrow P$ is locally upper bounded about $(y_i)_{i\in\mathbb{N}}$, let $x\in X$. By (\ref{the3.3}.3), there exists $n\in\mathbb{N}$ such that $x\in Int F(n+1, (U_j(g)))\setminus Int F(n, (U_j(g)))$. Consider the neighborhood $O_x=Int F(n+1, (U_j(g)))$ of $x$. For each $x'\in O_x$, it follows from the definition of $\Phi(g)$ that $\Phi(g)(x')\leq y_{n+1}$. This completes the proof that $\Phi(g)$ is locally upper bounded about $(y_i)_{i\in\mathbb{N}}$.
Next we show $\Phi(g)$ is l.s.c.. For each $x\in \bigcup_{n\in\mathbb{N}} F(n, (U_j(g)))$, there exists some $m\in\mathbb{N}$ such that $x\in F(m+1, (U_j(g)))\setminus F(m, (U_j(g)))$. We consider the neighborhood $O_x=X\setminus F(m, (U_j(g)))$ of $x$. For each $x'\in O_x$, we can get $\Phi(g)(x')\geq y_{m+1}=\Phi(g)(x)$. Therefore, $\bigwedge\Phi(g)(O_x)=y_{m+1}$ and $O_x\in \mathcal{N}_{x*}(\Phi(g))$. This provides that $\Phi(g)(x)$ admits $\Phi(g)_*(x)$ because of (c) of Proposition \ref{prop2.7}. We have $$\Phi(g)_*(x)=\bigvee_{N\in\mathcal{N}_{x*}(\Phi(g))}\bigwedge\Phi(g)(N)\geq\bigwedge_{x'\in O_x}\Phi(g)(x')=y_{m+1}=\Phi(g)(x).$$ Hence, $\Phi(g)_*(x)=\Phi(g)(x)$ for each $x\in X$, that is, $\Phi(g)(x)$ is l.s.c..
Finally, let $g':X\rightarrow P$ be a map with $g\leq g'$. Then $$\{x\in X: g'(x)\leq y_n\}\subseteq \{x\in X: g(x) \leq y_n\}$$ and hence, $U_n(g)\supseteq U_n(g')$ for each $n\in\mathbb{N}$. Therefore we have $F(n,(U_j(g)))\supseteq F(n,(U_j(g')))$ for each $n\in\mathbb{N}$, and
$$\bigcup_{n\in\mathbb{N}}Int F(n, (U_j(g)))=\bigcup_{n\in \mathbb{N}}U_n(g)$$ $$\bigcup_{n\in\mathbb{N}}Int F(n, (U_j(g')))=\bigcup_{n\in \mathbb{N}}U_n(g')$$
For each $x\in \bigcup_{n\in \mathbb{N}}U_n(g')$, there exists $n\in \mathbb{N}$ such that $x\in F(n+1, (U_j(g')))\setminus F(n, (U_j(g')))$. That is $x\in F(n+1, (U_j(g')))\subseteq F(n+1, (U_j(g)))$. By (\ref{the3.3}.5), we can get $\Phi(g)(x)\leq y_{n+1}=\Phi(g')(x)$, which proves the necessity.
Conversely, let $(U_j)_{j\in\mathbb{N}}$ be an increasing open cover of $X$. Define a map $g_{((U_j))}:X\rightarrow P$ by:
$$g_{((U_j))}(x)=\left\{ \begin{array}{rcl} y_1 & & {x\in U_{1}}\\ y_{n+1} & & {x\in U_{n+1}\backslash \texttt{}U_{n}}\\ \end{array} \right. \quad\quad \quad\quad(\ref{the3.3}.1)$$ Then, $g_{((U_j))}$ is locally upper bounded. In fact, for each $x\in X$ there exists $n\in \mathbb{N}$ such that $x\in U_{n+1}\setminus U_n$. Consider the neighborhood $O_x=U_{n+1}$ of $x$. For each $x'\in O_x$, it follows from the definition of $g_{((U_j))}$ that $g_{((U_j))}(x')\leq y_{n+1}$. This shows that $g_{((U_j))}$ is locally upper bounded.
By the assumption, there exist an operators $\Phi$ assigning to each locally upper bounded map $g:X\rightarrow P$ with a lower bound, a l.s.c. map $\Phi(g):X\rightarrow P$ which is locally upper bounded about $(y_i)_{j\in\mathbb{N}}$ such that $g(x)\leq \Phi(g)$, and $\Phi(g)\leq \Phi(g')$ whenever $g\leq g'$.
For each sequence $(U_j)_{j\in\mathbb{N}}$ of increasing open cover of $X$, define
$$F(n,(U_j))=\{x\in X: \Phi(g_{((U_j))})(x)\leq y_n\}\quad\quad \quad\quad(\ref{the3.3}.2)$$ We can get that $F(n,(U_j))$ is closed, by (a) of Proposition \ref{prop2.7} and Proposition \ref{prop2.8}. It suffices to show the operator $F$ satisfies (1), (2)' and (3) of Lemma \ref{lem2.5}.
To show (1) and (2), let $(U_j)_{j\in\mathbb{N}}$ be an increasing open cover of $X$. To see $U_n \supseteq F(n,(U_j))$ for each $n\in\mathbb{N}$ and let $x\in F(n,(U_j))$. Then, $g(x)\leq\Phi(g_{((U_j))})(x)\leq y_n$ and $g(x)\leq y_n$, and thus $x\in U_{m}\setminus U_{m-1}$ and $m\leq n$ by (\ref{the3.3}.1). So we have $x\in U_m\subset U_n$. Hence $U_n \supseteq F(n,(U_j))$ holds. To show (2)', let $x\in X$, take a neighborhood $O_x$ of $x$ and $n\in\mathbb{N}$ such that $\Phi(g_{((U_j))}(x')\leq y_n$ for each $x'\in O_x$, that is, $x\in Int F(n,(U_j))$, which shows that $\bigcup_{n\in\mathbb{N} }IntF(n,(U_j))=X$.
To show (3), let $((G_j))$ be an increasing sequence of open subsets of $X$ such that $(U_j)\preceq (G_j)$. Since $U_n\subseteq G_n$ for each $n\in \mathbb{N}$, it follows from (\ref{the3.3}.1) that $g_{((G_j))}(x)\leq g_{((U_j))}(x)$. Hence, we have $\Phi(g_{((G_j))})\leq\Phi(g_{((U_j))})$. Furthermore,
$F(n,(U_j))=\{x\in X: \Phi(g_{((U_j))})(x)\leq y_n\}\subseteq \{x\in X: \Phi(g_{((G_j))})(x)\leq y_n\}=F(n,(G_j))$ for each $n\in\mathbb{N}$, which implies that $F((U_j))\preceq F((G_j))$. This shows that $U$ satisfies (3) of Lemma \ref{lem2.5}. So $X$ is MCP. \end{proof}
\begin{theorem}\label{the3.4} Let $P$ be a bi-bounded complete, dually continuous poset $P$ with a $\ll_{d}$-increasing $\ll$-limit point $y_0$. Then $X$ is MCM if and only if there exists an operators $\Phi$ assigning to each map $g:X\rightarrow P$ which is locally upper bounded about $(y_i)_{i\in\mathbb{N}}$ with a lower bound, a l.s.c. map $\Phi(g):X\rightarrow P$ such that $g\leq \Phi(g)\ll y_0$ and $\Phi(g)\leq \Phi(g')$ whenever $g\leq g'$. \end{theorem}
\begin{proof} The proof is obtained by a modification of that of Theorem \ref{the3.3}. So, we only show the outline of the proof.
Suppose that $X$ is MCM and $F$ is any operator that satisfies conditions (1), (2) and (3) of Lemma \ref{lem2.5}. Let $g:X\rightarrow P$ be locally upper bounded about $(y_j)_{j\in\mathbb{N}}$. For each $n\in \mathbb{N}$, we define $$ U_n(g)=Int\{x\in X: g(x)\leq y_n\} \quad\quad \quad\quad(\ref{the3.4}.1)$$ Then, $\{U_n(g):n\in\mathbb{N}\}$ is a increasing sequence of open subsets of $X$ because of Proposition \ref{prop2.9}. It is clear that $\bigcup_{n\in \mathbb{N}}U_n(g)=X$.
Thus, we can define $\Phi(g):X\rightarrow P$ as follows:
$$\Phi(g)(x)=\left\{ \begin{array}{rcl} y_1 & & {x\in F(1,(U_j(g)))}\\ y_{n+1} & & x\in F(n+1,(U_j(g)))\setminus F(n,(U_j(g)))\\ \end{array} \right. \quad\quad \quad\quad(\ref{the3.4}.2)$$ It is obvious that $\Phi(g)(x)\ll y_0$ and $\Phi(g)$ has a lower bounded $y_1$.
Then, $\Phi(g):X\rightarrow P$ is l.s.c. such that $g\leq\Phi(g)$. For a locally upper bounded map $g':X\rightarrow P$ with $g\leq g'$, we have $\Phi(g)\leq \Phi(g')$, which proves the necessity.
Conversely, let $(U_j)_{j\in\mathbb{N}}$ be an increasing open cover of $X$. Define a map $g_{((U_j))}:X\rightarrow P$ by:
$$g_{((U_j))}(x)=\left\{ \begin{array}{rcl} y_1 & & {x\in U_{1}}\\ y_{n+1} & & {x\in U_{n+1}\backslash \texttt{}U_{n}}\\ \end{array} \right. \quad\quad \quad\quad(\ref{the3.4}.3)$$ Then, $g_{((U_j))}$ is locally upper bounded.
Set $F(n,(U_j))=\{x\in X: \Phi(g_{((U_j))})(x)\leq y_n\}$ for each $n\in\mathbb{N}$. Then, $U_n \supseteq F(n,(U_j))$ for each $n\in\mathbb{N}$. Now, let us show that $\bigcup_{n\in\mathbb{N} }F(n,(U_j))=X$. Let $x\in X$. Since $\Phi(g)\ll y_0=\bigvee\{y_i:i\in\mathbb{N}\}$, and $\{y_i:i\in\mathbb{N}\}$ are directed, there exists $i\in\mathbb{N}$ such that $\Phi(g)(x)\leq y_i$. That is, $x\in F(i,(U_j))$, which shows that $\bigcup_{n\in\mathbb{N} }F(n,(U_j))=X$.
For an increasing sequence $((G_j))$ of open subsets of $X$ such that $(U_j)\preceq (G_j)$, we have $F((U_j))\preceq F((G_j))$. So $X$ is MCM. \end{proof}
\section{Other results} By analogy with Theorems 3.1 through 3.4, we can prove the following Theorems, which extend some earlier results.
\begin{theorem} Let $P$ be a bi-bounded complete, dually continuous poset $P$ with a $\ll_{d}$-increasing $\ll$-limit point $y_0$. Then $X$ is countably paracompact if and only if there exists an operators $\Phi$ assigning to each map $g:X\rightarrow P$ which is locally upper bounded about $(y_i)_{i\in\mathbb{N}}$ with a lower bound, a l.s.c. map $\Phi(g):X\rightarrow P$ which is locally upper bounded about $(y_i)_{i\in\mathbb{N}}$ such that $g\leq \Phi(g)$. \end{theorem}
\begin{theorem} Let $P$ be a bi-bounded complete, dually continuous poset $P$ with a $\ll_{d}$-increasing $\ll$-limit point $y_0$. Then $X$ is countably metacompact. if and only if there exists an operators $\Phi$ assigning to each map $g:X\rightarrow P$ which is locally upper bounded about $(y_i)_{i\in\mathbb{N}}$ with a lower bound, a l.s.c. map $\Phi(g):X\rightarrow P$ such that $g\leq \Phi(g)\ll y_0$. \end{theorem}
Recall that the stratifiable (semi-stratifiable) spaces is the monotone versions of the perfectly normal (perfect) spaces. We get the similar results for perfectly normal (perfect) spaces as follows.
\begin{theorem} Let $P$ be a bi-bounded complete, continuous poset $P$ with a $\ll_{d}$-increasing $\ll$-limit point $y_0$. Then $X$ is perfectly normal if and only if there exists an operators $\Phi$ assigning to each map $g:X\rightarrow P$ with $F_g\neq\emptyset$, a l.s.c. map $\Phi(g):X\rightarrow P$ such that $g\leq \Phi(g)$ and that $\Phi(g)$ is locally upper bounded about $(y_i)_{i\in\mathbb{N}}$ at each $x\in U_g$. \end{theorem}
\begin{theorem} Let $P$ be a bi-bounded complete, continuous poset $P$ with a $\ll_{d}$-increasing $\ll$-limit point $y_0$. Then $X$ is perfect if and only if there exists an operators $\Phi$ assigning to each map $g:X\rightarrow P$ with an upper bound $y_0$, a l.s.c. map $\Phi(g):X\rightarrow P$ such that $g\leq \Phi(g)$ and that $\Phi(g)$ is upper bounded at each $x\in U_g$. \end{theorem}
The proofs of Theorem 4.1, 4.2, 4.3 and 4.4 follow in the same way as Theorem 3.1, 3.2, 3.3 and 3.4.
T. Kubiak \cite[Theorem 2.5]{TK} and E. Lane and C. Pan (see \cite{LN} ) gave the characterizations of monotonically normal spaces by monotone insertions of real-valued functions. From viewpoints of Theorem \ref{the3.3}, it is natural to ask about monotone poset-valued insertions on monotonically normal and monotonically countably paracompact spaces.
\begin{definition}\cite{Yama} A poset $P$ endowed with a topology is called sup-continuous if $\bigvee: J_2\rightarrow P$ is continuous, where $J_2=\{\langle x,y\rangle\in P\times P:\exists x\vee y\in P\}$ is endowed with the subspace topology of the product space $P \times P$. Dually, a poset $P$ endowed with a topology is called inf-continuous if $\bigwedge:M_2\rightarrow P$ is continuous, where $M_2=\{\langle x,y\rangle\in P\times P:\exists x\wedge y\in P\}$ is endowed with the subspace topology of the product space $P \times P$. \end{definition} A topological poset is a sup-and inf-continuous poset. Now, define $J_1=M_1=P$ and $J_n=\{\langle x_1,x_2,\cdots, x_n\rangle\in P^n:\exists \bigvee_{i=1}^nx_i\in P\}$ and $M_n=\{\langle x_1,x_2,\cdots,x_n \rangle\in P^n:\exists \bigwedge_{i=1}^nx_i\in P\}$, endowed with the subspace topology of $P^n$, for each $n\in\mathbb{N}$ with $n\geq3$.
\begin{proposition}\label{prop4.6}\cite{Yama} If $P$ is an upper-bounded (resp. lower-bounded) complete and sup-continuous (resp. inf-continuous) poset, then $\bigvee:J_n\rightarrow P$ (resp. $\bigwedge:M_n\rightarrow P$) is continuous for each $n\in\mathbb{N}$. \end{proposition}
\begin{theorem}\label{the4.7} Let $X$ be monotonically normal and monotonically countably paracompact and $P$ be a bi-bounded complete, dually continuous poset with a $\ll_{d}$-increasing $\ll$-limit point $y_0$ such that $y_i\ll_d y_{i+1}$, $y_i\ll y_0$ $(i\in \mathbb{N})$ and $y_0=\bigvee_{i\in \mathbb{N}}y_i$. Let $g:X\rightarrow P$ be an u.s.c. map with a lower bound $\perp g$. Assume that $g\leq y_0$, and $\langle g, y_0\rangle$ has interpolated points on $\{y_n; n\in \mathbb{N}\}$ such that $\langle g(x),y_0\rangle$ has interpolated points $y_{j}(x)$ and $y_{k}(x)$ on $\{y_n; n\in \mathbb{N}\}$ with $y_{j}(x)\leq y_{k}(x)$ and a monotone increasing path $\varphi_x:[0,1]\rightarrow [y_j(x),y_0]$ from some lower point of $y_k(x)$ for each $x\in X$ to $y_0$. Then, there exist an operator $\Phi$ assigning to each u.s.c. map $g:X\rightarrow P$, a continuous map $\Phi(g):X\rightarrow P$ such that $g(x)\ll_{d}\Phi(g)(x)\ll y_0$ for each $x\in X$ and that $\Phi(g)\geq \Phi(g')$ whenever $g\leq g'$. \end{theorem}
\begin{proof} There exists an operator $F$ satisfying (1), (2)' and (3) in Lemma \ref{lem2.5}. Let $g:X\rightarrow P$ be a u.s.c. map where $g\leq y_0$, and $\langle g, y_0\rangle$ has interpolated points on $\{y_n: n\in \mathbb{N}\}$. For each $n\in \mathbb{N}$, we define $$ U_n=\{x\in X: g(x)\ll_dy_n\} \quad\quad \quad\quad(\ref{the4.7}.1)$$ Then, $\{U_n:n\in\mathbb{N}\}$ is a increasing sequence of open subsets of $X$ because of Proposition \ref{prop2.9}. It is clear that $\bigcup_{n\in \mathbb{N}}U_n=X$. In fact, for each $x\in X$, there exists $n\in\mathbb{N}$ such that $g(x)\ll_d y_n\ll y_0$. It provides that $x\in U_n$.
Hence, $F((U_j))=(F_n)_{n\in\mathbb{N}}$ is a sequence of closed subsets of $X$ such that $$F_n\subset U_n \text{~for each~} n\in\mathbb{N};$$ $$\bigcup_{n\in\mathbb{N}}Int F_n=X;$$ $$F_n\subset F_{n+1}, n\in\mathbb{N};$$ Similarly, $F((Int F_{n}))=(E_n)_{n\in\mathbb{N}}$ is a sequence of closed subsets of $X$ such that $$E_n\subset Int F_{n} \text{~for each~} n\in\mathbb{N};$$ $$\bigcup_{n\in \mathbb{N}}IntE_n=\bigcup_{n\in\mathbb{N}}Int F(n, (U_j))=X;$$ $$E_n\subset E_{n+1}, n\in\mathbb{N};$$ And $F((Int E_{n}))=(L_n)_{n\in\mathbb{N}}$ is a sequence of closed subsets of $X$ such that $$L_n\subset Int E_{n} \text{~for each~} n\in\mathbb{N};$$ $$\bigcup_{n\in \mathbb{N}}IntL_n=\bigcup_{n\in\mathbb{N}}Int E(n, (U_j))=X;$$ $$L_n\subset L_{n+1}, n\in\mathbb{N};$$ Let $H_n=U_n\setminus L_{n-1}$, $G_n=IntF_n\setminus E_{n-1}$ for each $n\in \mathbb{N}$ and $H_1=U_1$, $G_1=IntF_1$. It is obvious that $\{H_n:n\in\mathbb{N}\}$ and $\{G_n:n\in\mathbb{N}\}$ are locally finite open covers of $X$ such that $\overline{G_n}\subset H_n\subset U_n$ for each $n\in\mathbb{N}$.
Since $x$ is monotone normal, take a continuous function $j_n:X\rightarrow [0,1]$ by $$j_n(x)=\left\{ \begin{array}{rcl} 0 & & {x\in \overline{G_n}}\\ 1 & & {x\in X\setminus H_n}\\ \end{array} \right. \text{~for each~} n\in\mathbb{N}.\quad\quad \quad\quad(\ref{the4.7}.2)$$
For each $n\in\mathbb{N}$, there exists $t_n\in P$ and a continuous monotone increasing map $\varphi_n:[0,1]\rightarrow [y_n,y_0]\subset P$ such that $$ \varphi_n(0)=t_n, \text{~and~} \varphi_n(1)=y_0, y_n\leq t_n\leq y_{n+1}.\quad\quad \quad\quad(\ref{the4.7}.3)$$ Define a continuous map $k_n:X\rightarrow[y_n,y_0]\subset P$ by $$k_n=\varphi_n\circ j_n \text{~for each~} n\in\mathbb{N}.\quad\quad \quad\quad(\ref{the4.7}.4)$$ Thus, we can define a continuous map $\Phi(g):X\rightarrow P$ as follows: $$\Phi(g)(x)=\bigwedge_{n\in\mathbb{N}}k_n(x)\quad\quad \quad\quad(\ref{the4.7}.5)$$ for each $x\in X$. Now, to show that $\Phi(g)$ is defined, we set $\delta_x=\{n\in \mathbb{N}:x\in H_n\}$ for each $x\in X$. For each $n\notin \delta_x$, $k_n(x)=\varphi_n\circ j_n(x)=\varphi_n(1)=y_0$. For each $n\in \delta_x$, $k_n(x)=\varphi_n\circ j_n(x)\geq y_n$ because of the range of $\varphi_n$ is $[y_n,y_0]$. It follows from $H_n\subset U_n$ that $$g(x)\ll_dy_n\leq k_n(x) \text{~for each~} n\in \delta_x.$$ This shows that $\{k_n(x):n\in \delta_x\}$ and $\{y_n:n\in \delta_x\}$ have an lower bound $g(x)$, thus $\bigwedge_{n\in\delta_x}k_n(x)$ and $\bigwedge_{n\in\delta_x}y_n$ exist. Hence, by (1) of \cite[Proposition I-1.2 (3)]{Gi}, we obtain that $$g(x)\ll_d\bigwedge_{n\in\delta_x}y_n\leq\bigwedge_{n\in\delta_x}k_n(x)=\bigwedge_{n\in\delta_x}k_n(x)\wedge y_0=\Phi(g)(x).$$ Therefore, $\Phi(g)$ is defined, and we also have $g\ll_d \Phi(g)$.
Next, to show $\Phi(g)\ll y_0$, let $x\in X$. Since $\{G_n:n\in\mathbb{N}\}$ is a cover of $X$, take $n'\in\mathbb{N}$ so as to satisfy $x\in G_{n'}$. Then, it follows from $G_{n'}\subset U_{n'}$ that $$\Phi(g)(x)\leq k_{n'}(x)=\varphi_{n'}\circ j_{n'}(x)=\varphi_{n'}(0)=t(n')\leq y_{n'}\ll y_0.$$ Thus, $\Phi(g)\ll y_0$ holds, because of (2) of Lemma \ref{lem2.6}.
Finally, to show that $\Phi(g)$ is continuous. Let $x\in X$, and take a neighborhood $O_x$ of $x$ and a finite subset $\delta'_x$ of $\mathbb{N}$ such that $O_x\bigcap H_n\neq \emptyset$ for each $n\in \delta'_x$. Then, $\delta_y\subset \delta'_x$ for each $y\in\delta_y$. Since $\Phi(g)(y)$ can be re-expressed as $\Phi(g)(y)=\bigwedge_{n\in\delta'_x}k_n(y)\wedge y_0=\bigwedge_{n\in\delta'_x}k_n(y)$ for each $y\in O_x$, we have that $\langle k_n(y)\rangle_{n\in\delta'_x}\in M_{|\delta'_x|}$ for each $y\in O_x$. This means $(\Delta_{n\in\delta'_x}k_n)(O_x)\subset M_{|\delta'_x|}$, where $\Delta_{n\in\delta'_x}k_n$ is the diagonal of mappings $\{k_n:n\in\delta'_x\}$. Hence, on $O_x$, $\Phi(g)$ is the composition $\bigwedge\circ(\Delta_{n\in\delta'_x}k_n)$ of $\Delta_{n\in\delta'_x}k_n$ and $\bigwedge:M_{|\delta'_x|}\rightarrow P$. By Proposition \ref{prop4.6}, $\Phi(g)$ is continuous at $x$.
Finally, let $g':X\rightarrow P$ be a map with $g\leq g'$. Then $$\{x\in X: g'(x)\ll_dy_n\}\subseteq \{x\in X: g(x)\ll_dy_n\}$$ and hence, $U_n\supseteq U'_n$ for each $n\in\mathbb{N}$. Therefore we have $F_n\supseteq F'_n$, $E_n\supseteq E'_n$ and $L_n\supseteq L'_n$ for each $n\in\mathbb{N}$, and $$\bigcup_{n\in \mathbb{N}}U'_n=\bigcup_{n\in\mathbb{N}}Int F'_n=\bigcup_{n\in\mathbb{N}}Int E'_n=\bigcup_{n\in\mathbb{N}}Int L'_n$$
For each $x\in X$, there exists $n\in \mathbb{N}$ such that $x\in G'_n=IntF'_n\setminus E'_{n-1}$. That is $x\in IntF'_n\subseteq IntF_n$. By (\ref{the4.7}.2), (\ref{the4.7}.3), (\ref{the4.7}.4) and the monotonicity of $\varphi_n(x)$ we can get $k_n(x)\geq t_n=\varphi'_n(0)=\varphi'_n\circ j'_n(x)=k'_n(x)$. This implies that $\Phi(g)\geq \Phi(g')$ whenever $g\leq g'$. This completes the proof. \end{proof}
For a map $f:$ $X\rightarrow P$ and a point $y\in P$, we define $$U_{f,y}=\{x\in X: \langle f(x),y\rangle \text{~has an interpolated point~}\}$$ and $U_{y,f}=\{x\in X: \langle y,f(x)\rangle\text{~has an interpolated point~}\}$. We have known that monotonically normal and monotonically countably paracompact spaces are stratifiable, then we have the following question:
\begin{question} Let $X$ be stratifiable and $P$ be a bi-bounded complete, dually continuous poset with a $\ll_{d}$-increasing $\ll$-limit point $y_0$ such that $y_i\ll_d y_{i+1}$, $y_i\ll y_0$ $(i\in \mathbb{N})$ and $y_0=\bigvee_{i\in \mathbb{N}}y_i$. Do the following conditions exist?
Let $g:X\rightarrow P$ be an u.s.c. map with a lower bound $\perp g$. Assume that $g\leq y_0$, and $\langle g, y_0\rangle$ has interpolated points on $\{y_n; n\in \mathbb{N}\}$ such that $\langle g(x),y_0\rangle$ has interpolated points $y_{j}(x)$ and $y_{k}(x)$ on $\{y_n; n\in \mathbb{N}\}$ with $y_{j}(x)\leq y_{k}(x)$ and a monotone increasing path $\varphi_x:[0,1]\rightarrow [y_j(x),y_0]$ from some lower point of $y_k(x)$ for each $x\in X$ to $y_0$. Then, there exist an operator $\Phi$ assigning to each u.s.c. map $g:X\rightarrow P$, a continuous map $\Phi(g):X\rightarrow P$ such that $g(x)\leq\Phi(g)(x)\leq y_0$, $g(x)\ll_{d}\Phi(g)(x)\ll y_0$ for each $x\in U_{g,y_0}$ and that $\Phi(g)\geq \Phi(g')$ whenever $g\leq g'$. \end{question}
\vskip0.9cm
\end{document} |
\begin{document}
\begin{abstract} Let $G$ be a finite group with cyclic Sylow $p$-subgroup, and let $k$ be a field of characteristic $p$. Then $H^*(BG;k)$ and $H_*(\Omega BG{}^{^\wedge}_p;k)$ are $A_\infty$ algebras whose structure we determine up to quasi-isomorphism. \end{abstract}
\title{Massey products in the homology of the loopspace of a
$p$-completed classifying space:
finite groups with cyclic Sylow $p$-subgroups}
\section{Introduction} The general context is that we have a finite group $G$, and a field $k$ of characteristic $p$. We are interested in the differential graded cochain algebra $C^*(BG;k)$ and the differential graded algebra $C_*(\Omega (BG{}^{^\wedge}_p);k)$ of chains on the loop space: these two are Koszul dual to each other, and the Eilenberg-Moore and Rothenberg-Steenrod spectral sequences relate the cohomology ring $H^*(BG;k)$ to the homology ring $H_*(\Omega (BG{}^{^\wedge}_p);k)$. Of course if $G$ is a $p$-group, $BG$ is $p$-complete so $\Omega (BG{}^{^\wedge}_p)\simeq G$, but in general $H_*(\Omega (BG{}^{^\wedge}_p); k)$ is infinite dimensional. Henceforth we will omit the brackets from $\Omega (BG{}^{^\wedge}_p)$. \\[1ex]
We consider a simple case where the two rings are not formal, but we can identify the $A_{\infty}$ structures precisely (see Section \ref{sec:Ainfty} for a brief summary on $A_{\infty}$-algebras). From now on we suppose specifically that $G$ is a finite group with cyclic Sylow $p$-subgroup $P$, and let $BG$ be its classifying space. Then the inclusion of the Sylow $p$-normaliser $N_G(P) \to G$ and the quotient map $N_G(P) \to N_G(P)/O_{p'}N_G(P)$ induce mod $p$ cohomology equivalences \[ B(N_G(P)/O_{p'}N_G(P)) \leftarrow BN_G(P) \to BG, \] and hence homotopy equivalences after $p$-completion \[ B(N_G(P)/O_{p'}N_G(P)){}^{^\wedge}_p \xleftarrow{\ \sim\ } BN_G(P){}^{^\wedge}_p
\xrightarrow{\ \sim \ } BG{}^{^\wedge}_p. \] Here, $O_{p'}N_G(P)$ denotes the largest normal $p'$-subgroup of $N_G(P)$. Thus $N_G(P)/O_{p'}N_G(P)$ is a semidirect product $\mathbb Z/p^n\rtimes \mathbb Z/q$, where $q$ is a divisor of $p-1$, and
$\mathbb Z/q$ acts faithfully as a group of automorphisms of $\mathbb Z/p^n$. In particular, the isomorphism type of $N_G(P)/O_{p'}N_G(P)$ only depends on $|P|=p^n$ and the inertial index $q=|N_G(P):C_G(P)|$, and therefore so does the homotopy type of $BG{}^{^\wedge}_p$. Our main theorem determines the multiplication maps $m_i$ in the $A_\infty$ structure on $H^*(BG;k)$ and $H_*(\Omega (BG{}^{^\wedge}_p);k)$ arising from $C^*(BG;k)$ and $C_*(\Omega (BG{}^{^\wedge}_p); k)$ respectively. We will suppose from now on that $p^n>2, q>1$ since the case of a $p$-group is well understood.
The starting point is the cohomology ring $$H^*(BG; k)=H^*(B\mathbb Z/p^n; k)^{\mathbb Z/q}=k[x]\otimes \Lambda(t)
\mbox{ with }|x|=-2q, |t|=-2q+1. $$ There is a preferred generator $t\in H^1(B\mathbb Z/p^n;k)=\mathrm{Hom}(\mathbb Z/p^n,k)$ and we take $x$ to be the $n$th Bockstein of $t$.
Before stating our result we should be clear about grading and signs.
\begin{remark} \label{rem:deg} We will be discussing both homology and cohomology, so we should be explicit that everything is graded homologically, so that differentials always lower degree. Explicitly, the degree of an element of $H^i(G;k)$ is $-i$. \end{remark}
\begin{remark} \label{rem:signs} Sign conventions for Massey products and $A_{\infty}$ algebras mean that a specific sign will enter repeatedly in our statements, so for brevity we write $$\epsilon (s)= \begin{cases} +1 & s\equiv 0, 1 \mod 4\\ -1 &s\equiv 2, 3 \mod 4\\ \end{cases}. $$ \end{remark}
\begin{theorem} Let $G$ be a finite group with cyclic Sylow $p$-subgroup $P$ of order $p^n$ and inertial index $q$ so that $$H^*(BG;k)
=k[x]\otimes \Lambda(t) \mbox{ with } |x|=-2q, |t|=-2q+1 \mbox{ and } \beta_nt=x$$ Up to quasi-isomorphism, the $A_\infty$ structure on $H^*(BG;k)$ is determined by \[ m_{p^n}(t,\dots,t)=\epsilon (p^n) x^{h} \] where $h=p^n-(p^n-1)/q$. This implies \[ m_{p^n}(x^{j_1}t,\dots,x^{j_{p^n}}t)=\epsilon (p^n) x^{h+j_1+\dots+j_{p^n}}\] for all $j_1, \ldots, j_{p^n}\geq 0$.
All $m_i$ for $i>2$ on all other $i$-tuples of monomials give zero.
If $q>1$ and $p^n\ne 3$ then \[ H_*(\Omega BG{}^{^\wedge}_p;k) = k[\tau] \otimes \Lambda(\xi) \mbox{ where }
|\tau|=2q-2, |\xi|=2q-1. \]
Up to quasi-isomorphism, the $A_\infty$ structure is determined by \[ m_h(\xi,\dots,\xi )=\epsilon (h) \tau^{p^n}. \] This implies \[ m_h(\tau^{j_1}\xi,\dots,\tau^{j_h}\xi)=\epsilon (h) \tau^{p^n+j_1+\dots+j_h} \] for all $j_1, \ldots, j_{p^n}\geq 0$. All $m_i$ for $i>2$ on all other $i$-tuples of monomials give zero.
If $q>1$ and $p^n=3$ then $q=2$ and \[ H_*(\Omega BG{}^{^\wedge}_p;k) = k[\tau,\xi]/(\xi^2+\tau^3), \] and all $m_i$ are zero for $i>2$. \end{theorem}
\section{The group algebra and its cohomology}
We assume from now on, without loss of generality, that $G$ has a normal cyclic Sylow $p$-subgroup
$P=C_G(P)$, with inertial index $q=|G:P|$. We shall assume that $q>1$, which then forces $p$ to be odd. For notation, let \[ G=\langle g,s\mid g^{p^n}=1,\ s^q=1,\ sgs^{-1}=h^\gamma\rangle\cong \mathbb Z/p^n\rtimes\mathbb Z/q, \] where $\gamma$ is a primitive $q$th root of unity modulo $p^n$. Let $P=\langle g\rangle$ and $H=\langle s\rangle$ as subgroups of $G$.
Let $k$ be a field of characteristic $p$. The action of $H$ on
$kP$ by conjugation preserves the radical series, and since $|H|$ is not divisible by $p$, there are invariant complements. Thus we may choose an element $U\in J(kP)$ such that $U$ spans an $H$-invariant complement of $J^2(kP)$ in $J(kP)$. It can be checked that \[ U = \sum_{\substack{1\leqslant j \leqslant p^n-1, \\(p,j)=1}} g^j/j \] is such an element, and that $sUs^{-1}=\gamma U$. This gives us the following presentation for $kG$: \[ kG = k\langle s,U \mid U^{p^n}=0,\ s^q=1,\ sU = \gamma Us
\rangle. \]
We shall regard $kG$ as a $\mathbb Z[\frac{1}{q}]$-graded
algebra with $|s|=0$ and $|U|=1/q$. Then the bar resolution is doubly graded, and taking homomorphisms into $k$, the cochains $C^*(BG;k)$ inherit a double grading. The differential decreases the homological grading and preserves the internal grading. Thus the cohomology $H^*(G,k)=H^*(BG;k)$ is doubly graded: \[ H^*(BG;k) = k[x] \otimes \Lambda(t) \]
where $|x|=(-2q,p^n)$, $|t|=(-2q+1,h)$, and $h=p^n-(p^n-1)/q$. Here, the first degree is homological, the second internal. The Massey product $\langle t,t,\dots,t\rangle$ ($p^n$ repetitions) is equal to $-x^h$. This may easily be determined by restriction to $P$, where it is well known that the $p^n$-fold Massey product of the degree one exterior generator is a non-zero degree two element. The usual convention is to make the constant $-1$, because this Massey product is minus the $n$th Bockstein of $t$ \cite[Theorem 14]{Kraines:1966a}.
\section{$A_\infty$-algebras} \label{sec:Ainfty}
An $A_{\infty}$-algebra over a field is a $\mathbb Z$-graded vector space $A$ with graded maps $m_n: A^{\otimes n}\rightarrow A$ of degree $n-2$ for $n\geq 1$ satisfying $$\sum_{r+s+t=n}(-1)^{r+st}m_{r+1+t}(id^{\otimes r}\otimes m_s \otimes id^{\otimes t})=0 $$ for $n\geq 1$. The map $m_1$ is therefore a differential, and the map $m_2$ induces a product on $H_*(A)$.
A theorem of Kadeishvili~\cite{Kadeishvili:1982a} (see also Keller~\cite{Keller:2001a,Keller:2002a} or Merkulov~\cite{Merkulov:1999a}) may be stated as follows. Suppose that we are given a differential graded algebra $A$, over a field $k$. Let $Z^*(A)$ be the cocycles, $B^*(A)$ be the coboundaries, and $H^*(A)=Z^*(A)/B^*(A)$. Choose a vector space splitting $f_1\colon H^*(A) \to Z^*(A) \subseteq A$ of the quotient. Then this gives by an inductive procedure an $A_\infty$ structure on $H^*(A)$ so that the map $f_1$ is the degree one part of a quasi-isomorphism of $A_\infty$-algebras.
If $A$ happens to carry auxiliary gradings respected by the product structure and preserved by the differential, then it is easy to check from the inductive procedure that the maps in the construction may be chosen so that they also respect these gradings. It then follows that the structure maps $m_i$ of the $A_\infty$ structure on $H^*(A)$ also respect these gradings.
Let us apply this to $H^*(BG;k)$. We examine the elements $m_i(t,\dots,t)$. By definition, we have $m_1(t)=0$ and $m_2(t,t)=0$. The degree of $m_i(t,\dots,t)$ is $i$ times the degree of $t$, increased in the homological direction by $i-2$. This gives
\[ |m_i(t,\dots,t)| = i(-2q+1,h)+(i-2,0) =(-2iq+2i-2,ih). \] The homological degree is even, so if $m_i(t, \cdots , t)$ is non-zero then it is a multiple of a power of $x$. Comparing degrees, if $m_i(t,\dots,t)$ is a non-zero multiple of $x^\alpha$ then we have \[ 2iq-2i+2=2\alpha q,\qquad ih = \alpha p^n. \] Eliminating $\alpha$, we obtain $(iq-i+1)p^n=ihq$. Substituting $h=p^n-(p^n-1)/q$, this gives $i=p^n$. Finally, since the Massey product of $p^n$ copies of $t$ is equal to $-x^h$, it follows that $m_{p^n}(t,\dots,t)=\epsilon (p^n) x^h$, where the sign is as defined in Remark \ref{rem:signs} \cite[Theorem 3.1]{LuPalmieriWuZhang:2009}. Thus we have \[ m_i(t,\dots,t) = \begin{cases} \epsilon (p^n) x^h & i=p^n \\ 0 & \text{otherwise.} \end{cases} \] We shall elaborate on this argument in a more general context in the next section, where we shall see that the rest of the $A_\infty$ structure is also determined in a similar way.
\section{$A_\infty$ structures on a polynomial tensor exterior algebra}
In this section, we shall examine the following general situation. Our goal is to establish that there are only two possible $A_\infty$ structures satisfying Hypothesis \ref{hyp:grading} below, and that the Koszul dual also satisfies the same hypothesis with the roles of $a$ and $b$, and of $h$ and $\ell$ reversed.
\begin{hypothesis} \label{hyp:grading} $A$ is a $\mathbb Z\times\mathbb Z$-graded $A_\infty$-algebra over a field $k$, where the operators $m_i$ have degree $(i-2,0)$, satisfying \begin{enumerate} \item $m_1=0$, so that $m_2$ is strictly associative, \item ignoring the $m_i$ with $i>2$, the algebra $A$ is
$k[x] \otimes \Lambda(t)$ where $|x|=(-2a,\ell)$ and $|t|=(-2b-1,h)$, and \item $ha-\ell b = 1$. \end{enumerate} \end{hypothesis}
\begin{remarks} (i) The $A_\infty$-algebra $H^*(BG;k)$ of the last section satisfies this hypothesis, with $a=q$, $b=q-1$, $h=p^n-(p^n-1)/q$, $\ell=p^n$.
(ii) By comparing degrees, if we have $m_\ell(t,\dots,t)=\epsilon (p^n) x^h$ then $(2b+1)\ell + 2-\ell = 2ah$ and so $ha-\ell b = 1$. This explains the role of part (3) of the hypothesis. The consequence is, of course, that $a$ and $b$ are coprime, and so are $h$ and $\ell$. \end{remarks}
\begin{lemma} If $m_i(t,\dots,t)$ is non-zero, then $i=\ell > 2$ and $m_\ell(t,\dots,t)$ is a multiple of $x^h$. \end{lemma} \begin{proof} The argument is the same as in the last section. The degree of
$m_i(t,\dots,t)$ is $i|t| +i- 2 = (-2ib - 2, ih)$. Since the homological degree is even, if $m_i(t,\dots,t)$ is non-zero then it is a multiple of some power of $x$, say $x^\alpha$. Then we have \[ 2ib+2 = 2\alpha a,\qquad ih = \alpha\ell. \] Eliminating $\alpha$ gives $(ib+1)\ell=iha$, and so using $ha-\ell b =1$ we have $i=\ell$. Substituting back gives $\alpha = h$. \end{proof}
Elaborating on this argument gives the entire $A_\infty$ structure. If $m_\ell(t,\dots,t)$ is non-zero, then by rescaling the variables $t$ and $x$ if necessary we can assume that $m_\ell(t,\dots,t)= x^h$ (note that we can even do this without extending the field, since $\ell$ and $h$ are coprime).
\begin{proposition} If $m_\ell(t,\dots,t)=0$ then all $m_i$ are zero for $i>2$. If $m_\ell(t,\dots,t)= x^h$ then $m_\ell(x^{j_1}t,\dots,x^{j_\ell}t)= x^{h+j_1+\dots+j_\ell}$, and all $m_i$ for $i>2$ on all other $i$-tuples of monomials give zero. \end{proposition} \begin{proof} All monomials live in different degrees, so we do not need to consider linear combinations of monomials. Suppose that $m_i(x^{j_1}t^{\varepsilon_1},\dots,x^{j_i}t^{\varepsilon_i})$ is some constant multiple of $x^jt^\varepsilon$, where each of $\varepsilon_1,\dots,\varepsilon_i,\varepsilon$ is either zero or one. Then comparing degrees, we have
\[ (j_1+\dots+j_i)|x| + (\varepsilon_1+\dots+\varepsilon_i)|t| + (i-2,0) = j|x| + \varepsilon|t|. \] Setting \[ \alpha=j_1+\dots+j_i-j,\qquad \beta = \varepsilon_1+\dots+\varepsilon_i-\varepsilon \] we have $\beta \leqslant i$, and \[ \alpha(-2a,\ell) + \beta(-2b-1,h) + (i-2,0) = 0. \] Thus \[ 2\alpha a + 2\beta b + \beta + 2 - i = 0, \qquad \alpha \ell +
\beta h = 0. \] Eliminating $\alpha$, we obtain \[ -2\beta ha + 2\beta \ell b + \beta \ell + 2 \ell - i \ell = 0. \] Since $ha-\ell b=1$, this gives $\beta = \ell (i-2) /(\ell -2)$. Combining this with $\beta \leqslant i$ gives $i\leqslant \ell$. If $i<\ell$ then $\beta$ is not divisible by $\ell$, and so $\alpha \ell + \beta h = 0$ cannot hold. So we have $\beta=i=\ell$, $\varepsilon_1=\dots=\varepsilon_\ell=1$, $\varepsilon=0$, $\alpha=-h$, and $j=h+j_1+\dots+j_\ell$. Finally, the identities satisfied by the $m_i$ for an $A_\infty$ structure show that all the constant multiples have to be the same, hence all equal to zero or after rescaling, all equal to minus one. \end{proof}
Combining the lemma with the proposition, we obtain the following.
\begin{theorem}\label{th:Ainfty} Under the hypothesis above, if $\ell > 2$ then there are two possible $A_\infty$ structures on $A$. There is the formal one, where $m_i$ equal to zero for $i>2$, and the non-formal one, where after replacing $x$ and $t$ by suitable multiples, the only non-zero $m_i$ with $i>2$ is $m_\ell$, and the only non-zero values on monomials are given by \begin{equation*} m_\ell(x^{j_1}t,\dots,x^{j_\ell}t)=x^{h+j_1+\dots+j_\ell}. \end{equation*} \end{theorem}
\begin{theorem} Let $G=\mathbb Z/p^n \rtimes \mathbb Z/q$ as above, and $k$ a field of characteristic $p$. Then the $A_\infty$ structure on $H^*(G,k)$ given by Kadeishvili's theorem may be taken to be the non-formal possibility named in the above theorem, with $a=q$, $b=q-1$, $h=p^n-(p^n-1)/q$, $\ell=p^n$. \end{theorem} \begin{proof} Since we have $m_{p^n}(t,\dots,t)=\epsilon (p^n) x^h$, the formal possibility does not hold. \end{proof}
\begin{remark} Dag Madsen's thesis~\cite{Madsen:2002a} has an appendix in which the $A_\infty$ structure is computed for the cohomology of a truncated polynomial ring, reaching similar conclusions by more direct methods. \end{remark}
\section{Loops on $BG{}^{^\wedge}_p$}
In general for a finite group $G$ we have $H^*(BG{}^{^\wedge}_p;k)\cong H^*(BG;k)=H^*(G,k)$ and $\pi_1(BG{}^{^\wedge}_p)=G/O^p(G)$, the largest $p$-quotient of $G$. In our case, $G=\mathbb Z/p^n\rtimes\mathbb Z/q$ with $q>1$, we have $G=O^p(G)$ and so $BG{}^{^\wedge}_p$ is simply connected. So the Eilenberg--Moore spectral sequence converges to the homology of its loop space: \[ \mathsf{Tor}_{*,*}^{H^*(G,k)}(k,k) \Rightarrow H_*(\Omega BG{}^{^\wedge}_p;k). \] The internal grading on $C^*(BG;k)$ gives this spectral sequence a third grading that is preserved by the differentials, and $H_*(\Omega BG{}^{^\wedge}_p;k)$ is again doubly graded. Since $H^*(G,k)=k[x] \otimes
\Lambda(t)$ with $|x|=(-2q,p^n)$ and $|t|=(-2q+1,h)$, it follows that the $E^2$ page of this spectral sequence is $k[\tau] \otimes \Lambda(\xi)$
where $|\xi|=(-1,2q,p^n)$ and $|\tau|=(-1,2q-1,h)$ (recall $h=p^n-(p^n-1)/q$). Provided that we are not in the case $h=2$, which only happens if $p^n=3$, ungrading $E^\infty$ gives \[ H_*(\Omega BG{}^{^\wedge}_p;k) = k[\tau] \otimes \Lambda(\xi) \]
with $|\tau|=(2q-2,h)$ and $|\xi|=(2q-1,p^n)$.
In the exceptional case $h=2$, $p^n=3$, we have $q=2$, and the group $G$ is the symmetric group $\Sigma_3$ of degree three. An explicit computation (for example by squeezed resolutions \cite{Benson:2009b}) gives \[ H_*(\Omega (B\Sigma_3){}^{^\wedge}_3;k) = k[\tau,\xi]/(\xi^2+\tau^3) \]
with $|\tau|=(2,2)$ and $|\xi|=(3,3)$, and the two gradings collapse to a single grading.
Applying Theorem~\ref{th:Ainfty}, and using the fact that either formal case is Koszul dual to the other, we have the following.
\begin{theorem} Suppose that $p^n\ne 3$. Then the $A_\infty$ structure on $H_*(\Omega BG{}^{^\wedge}_p;k)=k[\tau]\otimes\Lambda(\xi)$ is given by \[ m_h(\tau^{j_1}\xi,\dots,\tau^{j_h}\xi)=\epsilon (h) \tau^{p^n+j_1+\dots+j_h}, \] and for $i>2$, all $m_i$ on all other $i$-tuples of monomials give zero.\qed \end{theorem}
Using \cite{LuPalmieriWuZhang:2009} again, we may reinterpret this in terms of Massey products.
\begin{corollary} In $H_*(\Omega BG{}^{^\wedge}_p)$, the Massey products $\langle \xi,\dots,\xi \rangle$ ($i$ times) vanish for $0<i<h$, and give $-\tau^{p^n}$ for $i=h$.\qed \end{corollary}
Note that the exceptional case $p^n=3$ also fits the corollary, if we interpret a $2$-fold Massey product as an ordinary product.
\newcommand{\noopsort}[1]{} \providecommand{\bysame}{\leavevmode\hbox to3em{\hrulefill}\thinspace} \providecommand{\MR}{\relax\ifhmode\unskip\space\fi MR }
\providecommand{\MRhref}[2]{
\href{http://www.ams.org/mathscinet-getitem?mr=#1}{#2} } \providecommand{\href}[2]{#2}
\end{document} |
\begin{document}
\title{\LARGE \bf A Pressure Associated with a Weak Solution to the Navier--Stokes Equations with Navier's Boundary Conditions}
\author{Ji\v{r}\'{\i} Neustupa, \ \v{S}\'arka Ne\v{c}asov\'a, \ Petr Ku\v{c}era \footnote{Authors' address: Czech Academy of Sciences, Institute of Mathematics, \v{Z}itn\'a 25, 115 67 Praha 1, Czech Re\-pub\-lic, e--mails: neustupa@math.cas.cz, matus@math.cas.cz, petr.kucera@cvut.cz}}
\date{}
\maketitle
\begin{abstract} We show that if $\mathbf{u}$ is a weak solution to the Navier--Stokes initial--boundary value problem with Navier's slip boundary conditions in $Q_T:=\Omega\times(0,T)$, where $\Omega$ is a domain in ${\mathbb R}^3$, then an associated pressure $p$ exists as a distribution with a certain structure. Furthermore, we also show that if $\Omega$ is a ``smooth'' domain in ${\mathbb R}^3$ then the pressure is represented by a function in $Q_T$ with a certain rate of integrability. Finally, we study the regularity of the pressure in sub-domains of $Q_T$, where $\mathbf{u}$ satisfies Serrin's integrability conditions. \end{abstract}
\noindent {\it AMS math.~classification (2010):} \
\noindent {\it Keywords:} \ Navier--Stokes equations, Navier's slip boundary conditions, weak solutions, associated pressure, regularity.
\section{Introduction} \label{S1}
{\bf 1.1. The Navier--Stokes initial--boundary value problem with Navier's boundary conditions.} \ Let $T>0$ and $\Omega$ be a locally Lipschitz domain in ${\mathbb R}^3$, satisfying the condition
\begin{list}{} {\setlength{\topsep 2pt} \setlength{\itemsep 1pt} \setlength{\leftmargin 18pt} \setlength{\rightmargin 0pt} \setlength{\labelwidth 10pt}}
\item[(i)] {\it there exists a sequence of bounded Lipschitz domains $\Omega_1\subseteq\Omega_2\subseteq\dots$ such that $\Omega=\bigcup_{n=1}^{\infty}\Omega_n$ and
$(\partial\Omega_n\cap\Omega)\subset\{\mathbf{x}\in{\mathbb R}^3;\ |\mathbf{x}|\geq n\}$ for all $n\in{\mathbb N}$.}
\end{list}
\noindent Note that condition (i) is automatically satisfied e.g.~if $\Omega={\mathbb R}^3$ or $\Omega$ is a half-space in ${\mathbb R}^3$ or $\Omega$ is a bounded or exterior Lipschitz domain in ${\mathbb R}^3$. Put $Q_T:=\Omega\times(0,T)$ and $\Gamma_T:= \partial\Omega\times(0,T)$. We deal with the Navier--Stokes system \begin{align} \partial_t\mathbf{u}+\mathbf{u}\cdot\nabla\mathbf{u}+\nabla p\ &=\ \nu\Delta\mathbf{u}+\mathbf{f} && \mbox{in}\ Q_T, \label{1.1} \\
\mathrm{div}\,\mathbf{u}\ &=\ 0 && \mbox{in}\ Q_T \label{1.2} \end{align} with the slip boundary conditions \begin{equation} \mbox{a)} \quad \mathbf{u}\cdot\mathbf{n}=0, \qquad \mbox{b)} \quad [\bbT_{\hspace{-0.5pt} \rm d}(\mathbf{u})\cdot\mathbf{n}]_{\tau}+\gamma\mathbf{u}=\mathbf{0} \qquad \mbox{on}\ \Gamma_T \label{1.3} \end{equation} and the initial condition \begin{equation}
\mathbf{u}\, \bigl|_{t=0} \bigr.\ =\ \mathbf{u}_0. \label{1.4} \end{equation} Equations (\ref{1.1}), (\ref{1.2}) describe the motion of a viscous incompressible fluid in domain $\Omega$ in the time interval $(0,T)$. The unknowns are $\mathbf{u}$ (the velocity) and $p$ (the pressure). Factor $\nu$ in equation (\ref{1.1}) denotes the kinematic coefficient of viscosity (it is supposed to be a positive constant) and $\mathbf{f}$ denotes an external body force. The outer normal vector field on $\Omega$ is denoted by $\mathbf{n}$, $\bbT_{\hspace{-0.5pt} \rm d}(\mathbf{u})$ denotes the dynamic stress tensor, $-\bbT_{\hspace{-0.5pt} \rm d}(\mathbf{u})\cdot\mathbf{n}$ is the force with which the fluid acts on the boundary of $\Omega$ (we put the minus sign in front of $\bbT_{\hspace{-0.5pt} \rm d}(\mathbf{u})\cdot\mathbf{n}$ because $\mathbf{n}$ is the outer normal vector and we express the force acting on $\partial\Omega$ from the interior of $\Omega$), subscript $\tau$ denotes the tangential component and $\gamma$ (which is supposed to be a nonnegative constant) is the coefficient of friction between the fluid and the boundary of $\Omega$. The density of the fluid is supposed to be constant and equal to one. In an incompressible Newtonian fluid, the dynamic stress tensor satisfies $\bbT_{\hspace{-0.5pt} \rm d}(\mathbf{u})=2\nu\hbox to 0.7pt{}{\mathbb D}(\mathbf{u})$, where the rate of deformation tensor ${\mathbb D}(\mathbf{u})$ equals $(\nabla\mathbf{u})_s$ (the symmetric part of $\nabla\mathbf{u}$).
Equations (\ref{1.1}), (\ref{1.2}) are mostly studied together with the no--slip boundary condition \begin{equation} \mathbf{u}\ =\ \mathbf{0} \label{1.5} \end{equation} on $\Gamma_T$. However, an increasing attention in recent years has also been given to boundary conditions (\ref{1.3}), which have a good physical sense. While condition (\ref{1.3}a) expresses the impermeability of $\partial\Omega$, condition (\ref{1.4}b) expresses the requirement that the tangential component of the force with which the fluid acts on the boundary be proportional to the tangential velocity. Conditions (\ref{1.3}) are mostly called Navier's boundary conditions, because they were proposed by H.~Navier in the first half of the 19th century.
\noindent {\bf 1.2. Briefly on the qualitative theory of the problem (\ref{1.1})--(\ref{1.4}).} \ As to the qualitative theory for the problem (\ref{1.1})--(\ref{1.4}), it is necessary to note that it is not at the moment so elaborated as in the case of the no-slip boundary condition (\ref{1.5}). Nevertheless, the readers can find the definition of a weak solution to the problem (\ref{1.1})--(\ref{1.4}) and the proof of the global in time existence of a weak solution e.g.~in the papers \cite{ChQi} (with $\mathbf{f}=\mathbf{0}$), \cite{NePe2} (in a time-varying domain $\Omega$) and \cite{Saal} (in a half-space). We repeat the definition in section \ref{S3}. Theorems on the local in time existence of a strong solution are proven e.g.~in \cite{ChQi} (for $\mathbf{f}=\mathbf{0}$) and \cite{KuNe} (in a smooth bounded domain $\Omega$). Steady problems are studied in \cite{AmRe1} and \cite{AmRe2}.
\noindent {\bf 1.3. On the contents and results of this paper.} \ We shall see in section \ref{S3} that the definition of a weak solution to the problem (\ref{1.1})--(\ref{1.4}) does not explicitly contain the pressure. (This situation is well known from the theory of the Navier--Stokes equations with the no--slip boundary condition (\ref{1.5}).) This is also why we usually understand, under a ``weak solution'', only the velocity $\mathbf{u}$ and not the pair $(\mathbf{u},p)$. There arises a question whether one can naturally assign some pressure $p$ to a weak solution $\mathbf{u}$. It is known from the theory of the Navier--Stokes equations with the no--slip boundary condition (\ref{1.5}) that the pressure, associated with a weak solution, generally exists only as a distribution in $Q_T$. (See \cite{Li}, \cite{Te}, \cite{Si}, \cite{Ga2}, \cite{So}, \cite{Wo} and \cite{Ne2}.) The distribution is regular (i.e.~it can be identified with a function with some rate of integrability in $Q_T$) if domain $\Omega$ is ``smooth'', see \cite{SoWa}, \cite{GiSo} and \cite{Ne2}. In section \ref{S4} of this paper, we show that one can naturally assign a pressure, as a distribution, to a weak solution to the Navier--Stokes equations with Navier's boundary conditions (\ref{1.3}), too. Moreover, we show in section \ref{S4} that the associated pressure is not just a distribution, satisfying together with the weak solution $\mathbf{u}$ equations (\ref{1.1}), (\ref{1.2}) in the sense of distributions in $Q_T$ (where the distributions are applied to test functions from $\mathbf{C}^{\infty}_0(Q_T)$), but that it is a distribution with a certain structure, which can be applied to functions from $\mathbf{C}^{\infty}(\overline{Q_T})$ with a compact support in $\overline{\Omega}\times(0,T)$ and with the normal component equal to zero on $\Gamma_T$. In section \ref{S5}, we show that if domain $\Omega$ is smooth and bounded then the associated pressure is a function with a certain rate of integrability in $Q_T$. Finally, in section \ref{S6}, we study the regularity of the associated pressure in a sub-domain $\Omega'\times(t_1,t_2)$ of $Q_T$, where $\mathbf{u}$ satisfies Serrin's integrability conditions. We shall see that the regularity depends on boundary conditions, satisfied by the velocity on $\Gamma_T$.
\section{Notation and auxiliary results} \label{S2}
{\bf 2.1. Notation.} \ We use this notation of functions, function spaces, dual spaces, etc.:
\begin{list}{$\circ$} {\setlength{\topsep 2pt} \setlength{\itemsep 1pt} \setlength{\leftmargin 14pt} \setlength{\rightmargin 0pt} \setlength{\labelwidth 6pt}}
\item $\Omega_0\subset\subset\Omega$ means that $\Omega_0$ is a bounded domain in ${\mathbb R}^3$ such that $\overline{\Omega_0}\subset\Omega$.
\item Vector functions and spaces of vector functions are denoted by boldface letters.
\item $\bfC^{\infty}_{0,\sigma}(\Omega)$ denotes the linear space of infinitely differentiable divergence-free vector functions in $\Omega$, with a compact support in $\Omega$.
\item Let $1<q<\infty$. We denote by $\bfL_{\tau,\sigma}^q(\Omega)$ the closure of $\bfC^{\infty}_{0,\sigma}(\Omega)$ in $\mathbf{L}^q(\Omega)$. The subscript $\tau$ means that functions from $\bfL_{\tau,\sigma}^q(\Omega)$ have the normal component on $\partial\Omega$ equal to zero in a certain weak sense of traces and they are therefore tangential on $\partial\Omega$. The subscript $\sigma$ expresses the fact that functions from $\bfL_{\tau,\sigma}^q(\Omega)$ are divergence--free in $\Omega$ in the sense of distributions. (See e.g.~\cite{Ga1} for more information.)
\item Put $\mathbf{G}_{q}(\Omega):=\{\nabla\psi\in\mathbf{L}^{q} (\Omega);\ \psi\in W^{1,q}_{\rm loc}(\Omega)\}$. $\mathbf{G}_{q}(\Omega)$ is a closed subspace of $\mathbf{L}^{q}(\Omega)$, see \cite[Exercise III.1.2] {Ga1}.
\item $\bfW_{\tau}^{1,q}(\Omega):=\{\mathbf{v}\in\mathbf{W}^{1,q}(\Omega);\, \mathbf{v}\cdot\mathbf{n}=0\ \mbox{a.e.~on}\ \partial\Omega\}$, \\ [3pt]
$\bfW_{\tau,\br c}^{1,q}(\Omega):=\bigl\{ \mbox{\boldmath $\varphi$}\in\bfW_{\tau}^{1,q}(\Omega)$, $\mathrm{supp}\,\mbox{\boldmath $\varphi$}$ is a compact set in ${\mathbb R}^3\bigr\}$, \\ [3pt]
$\bfW_{\tau,\br\sigma}^{1,q}(\Omega):=\mathbf{W}^{1,q}(\Omega)\cap\bfL_{\tau,\sigma}^q(\Omega)\equiv \bfW_{\tau}^{1,q}(\Omega)\cap\bfL_{\tau,\sigma}^q(\Omega)$, \\ [3pt]
$\bfW_{\tau,\br\sigma,\br c}^{1,q}(\Omega):=\bfW_{\tau,\br\sigma}^{1,q}(\Omega)\cap\bfW_{\tau,\br c}^{1,q}(\Omega)$.
\item The norms in $L^q(\Omega)$ and in $\mathbf{L}^q(\Omega)$ are denoted by
$\|\, .\, \|_q$. The norms in $W^{k,q}(\Omega)$ and in
$\mathbf{W}^{k,q}(\Omega)$ (for $k\in{\mathbb N}$) are denoted by $\|\, .\,
\|_{k,q}$. If the considered domain differs from $\Omega$ then we use e.g.~the notation $\|\, .\, \|_{q;\, \Omega'}$ or $\|\, .\,
\|_{k,q;\, \Omega'}$, etc. The scalar products in $L^2(\Omega)$ and in $\mathbf{L}^2(\Omega)$ are denoted by $(\, .\, ,\, .\, )_2$ and the scalar products in $W^{1,2}(\Omega)$ and in $\mathbf{W}^{1,2}(\Omega)$ are denoted by $(\, .\, ,\, .\, )_{1,2}$.
\item The conjugate exponent is denoted by prime, so that e.g.~$q'=q/(q-1)$. $\bfW_{\tau}^{-1,q'}(\Omega)$ denotes the dual space to
$\bfW_{\tau}^{1,q}(\Omega)$ and $\bfW_{\tau,\br\sigma}^{-1,q'}(\Omega)$ denotes the dual space to $\bfW_{\tau,\br\sigma}^{1,q}(\Omega)$. The norm in $\bfW_{\tau}^{-1,q'}(\Omega)$, respectively $\bfW_{\tau,\br\sigma}^{-1,q'}(\Omega)$, is denoted by $\|\, .\,
\|_{-1,q'}$, respectively by $\|\, .\, \|_{-1,q';\, \sigma}$.
\item The duality between elements of $\bfW_{\tau}^{-1,q'}(\Omega)$ and $\bfW_{\tau}^{1,q}(\Omega)$ is denoted by $\langle\, .\, ,\, .\, \rangle_{\tau}$ and the duality between elements of $\bfW_{\tau,\br\sigma}^{-1,q'}(\Omega)$ and $\bfW_{\tau,\br\sigma}^{1,q}(\Omega)$ is denoted by $\langle\, .\, ,\, .\, \rangle_{\tau,\sigma}$.
\item $\bfW_{\tau,\br\sigma}^{1,q}(\Omega)^{\perp}$ denotes the space of annihilators of $\bfW_{\tau,\br\sigma}^{1,q}(\Omega)$ in $\bfW_{\tau}^{-1,q'}(\Omega)$. i.e.~the space $\bigl\{\mathbf{g}\in\bfW_{\tau}^{-1,q'}(\Omega)$; $\forall\mbox{\boldmath $\varphi$}\in\bfW_{\tau,\br\sigma}^{1,q}(\Omega): \langle\mathbf{g},\mbox{\boldmath $\varphi$}\rangle_{\tau}=0\bigr\}$.
\end{list}
\noindent {\bf 2.2. $\mathbf{L}^{q'}(\Omega)$ and $\bfL_{\tau,\sigma}^{q'}(\Omega)$ as subspaces of $\bfW_{\tau}^{-1,q'}(\Omega)$ and $\bfW_{\tau,\br\sigma}^{-1,q'}(\Omega)$, respectively.} \ The Lebesgue space $\mathbf{L}^{q'}(\Omega)$ can be identified with a subspace of $\bfW_{\tau}^{-1,q'}(\Omega)$ so that if $\mathbf{g}\in\mathbf{L}^{q'}(\Omega)$ then \begin{equation} \langle\mathbf{g},\mbox{\boldmath $\varphi$}\rangle_{\tau}\ :=\ \int_{\Omega}\mathbf{g}\cdot\mbox{\boldmath $\varphi$}\; \mathrm{d}\mathbf{x} \label{2.1} \end{equation} for all $\mbox{\boldmath $\varphi$}\in \bfW_{\tau}^{1,q}(\Omega)$. Similarly, $\bfL_{\tau,\sigma}^{q'}(\Omega)$ can be identified with a subspace of $\bfW_{\tau,\br\sigma}^{-1,q'}(\Omega)$ so that if $\mathbf{g}\in\bfL_{\tau,\sigma}^{q'}(\Omega)$ then \begin{equation} \langle\mathbf{g},\mbox{\boldmath $\varphi$}\rangle_{\tau,\sigma}\ :=\ \int_{\Omega}\mathbf{f}\cdot\mbox{\boldmath $\varphi$}\; \mathrm{d}\mathbf{x} \label{2.2} \end{equation} for all $\mbox{\boldmath $\varphi$}\in\bfW_{\tau,\br\sigma}^{1,q}(\Omega)$. Thus, if $\mathbf{g}\in\bfL_{\tau,\sigma}^{q'}(\Omega)$ and $\mbox{\boldmath $\varphi$}\in\bfW_{\tau,\br\sigma}^{1,q}(\Omega)$ then the dualities $\langle\mathbf{g},\mbox{\boldmath $\varphi$}\rangle_{\tau}$ and $\langle\mathbf{g},\mbox{\boldmath $\varphi$}\rangle_{\tau,\sigma}$ coincide.
Note that if $\, \mathbf{g}\in\mathbf{L}^{q'}(\Omega)$ then the integral on the right hand side of (\ref{2.1}) also defines a bounded linear functional on $\bfW_{\tau,\br\sigma}^{1,q}(\Omega)$. This, however, does not mean that $\mathbf{L}^{q'}(\Omega)$ can be identified with a subspace of $\bfW_{\tau,\br\sigma}^{-1,q'}(\Omega)$. The reason is, for instance, that the spaces $\mathbf{L}^{q'}(\Omega)$ and $\bfW_{\tau,\br\sigma}^{-1,q'}(\Omega)$ do not have the same zero element. (If $\psi$ is a non-constant function in $C^{\infty}_0(\Omega)$ then $\nabla\psi$ is a non-zero element of $\mathbf{L}^{q'}(\Omega)$, but it induces the zero element of $\bfW_{\tau,\br\sigma}^{-1,q'}(\Omega)$.)
\noindent {\bf 2.3. Definition and some properties of operator $\cPs{q'}$.} \ $\bfW_{\tau,\br\sigma}^{1,q}(\Omega)$ is a closed subspace of $\bfW_{\tau}^{1,q}(\Omega)$. If $\mathbf{g}\in\bfW_{\tau}^{-1,q'}(\Omega)$ (i.e.~$\mathbf{f}$ is a bounded linear functional on $\bfW_{\tau}^{1,q}(\Omega)$) then we denote by $\cPs{q'}\mathbf{f}$ the element of $\bfW_{\tau,\br\sigma}^{-1,q'}(\Omega)$, defined by the equation \begin{displaymath} \langle \cPs{q'}\mathbf{g},\mbox{\boldmath $\varphi$}\rangle_{\tau,\sigma}\ :=\ \langle\mathbf{g},\mbox{\boldmath $\varphi$}\rangle_{\tau} \qquad \mbox{for all}\ \mbox{\boldmath $\varphi$}\in\bfW_{\tau,\br\sigma}^{1,q}(\Omega). \end{displaymath} Obviously, $\cPs{q'}$ is a linear operator from $\bfW_{\tau}^{-1,q'}(\Omega)$ to $\bfW_{\tau,\br\sigma}^{-1,q'}(\Omega)$, whose domain is the whole space $\bfW_{\tau}^{-1,q'}(\Omega)$.
\begin{lemma} \label{L2.1} The operator $\cPs{q'}$ is bounded, its range is $\bfW_{\tau,\br\sigma}^{-1,q'}(\Omega)$ and $\cPs{q'}$ is not one-to-one. \end{lemma}
\begin{proof} \rm The boundedness of operator $\cPs{q'}$ directly follows from the definition of the norms in the spaces $\bfW_{\tau}^{-1,q'}(\Omega)$, $\bfW_{\tau,\br\sigma}^{-1,q'}(\Omega)$ and the definition of $\cPs{q'}$.
Let $\mathbf{g}\in\bfW_{\tau,\br\sigma}^{-1,q'}(\Omega)$. There exists (by the Hahn-Banach theorem) an extension of $\mathbf{g}$ from $\bfW_{\tau,\br\sigma}^{1,q}(\Omega)$ to $\bfW_{\tau}^{1,q}(\Omega)$, which we denote by $\widetilde{\mathbf{g}}$. The extension is an element of $\bfW_{\tau}^{-1,q'}(\Omega)$, satisfying
$\|\widetilde{\mathbf{g}}\|_{-1,q'}=\|\mathbf{g}\|_{-1,q';\, \sigma}$ and \begin{displaymath} \langle \widetilde{\mathbf{g}},\mbox{\boldmath $\varphi$}\rangle_{\tau}\ =\ \langle\mathbf{g},\mbox{\boldmath $\varphi$}\rangle_{\tau,\sigma} \end{displaymath} for all $\mbox{\boldmath $\varphi$}\in\bfW_{\tau,\br\sigma}^{1,q}(\Omega)$. This shows that $\mathbf{g}=\cPs{q'}\widetilde{\mathbf{g}}$. Consequently, the range of $\cPs{q'}$ is the whole space $\mathbf{W}^{-1,q'}_{0,\sigma}(\Omega)$.
Finally, considering $\mathbf{g}=\nabla\psi$ for $\psi\in C^{\infty}_0(\Omega)$, we get \begin{displaymath} \langle \cPs{q'}\mathbf{g},\mbox{\boldmath $\varphi$}\rangle_{\tau,\sigma}\ =\ \langle \mathbf{g},\mbox{\boldmath $\varphi$} \rangle_{\tau}\ =\ \int_{\Omega}\nabla \psi\cdot\mbox{\boldmath $\varphi$}\; \mathrm{d}\mathbf{x}\ =\ 0 \end{displaymath} for all $\mbox{\boldmath $\varphi$}\in\bfW_{\tau,\br\sigma}^{1,q}(\Omega)$. This shows that the operator $\cPs{q'}$ is not one-to-one. \end{proof}
\noindent {\bf 2.4. The relation between operator $\cPs{q'}$ and the Helmholtz projection.} \ If each function $\mathbf{g}\in\mathbf{L}^{q'}(\Omega)$ can be uniquely expressed in the form $\mathbf{g}=\mathbf{v}+\nabla\psi$ for some $\mathbf{v}\in\bfL_{\tau,\sigma}^{q'}(\Omega)$ and $\nabla\psi\in\mathbf{G}_{q'}(\Omega)$, which is equivalent to the validity of the decomposition \begin{equation} \mathbf{L}^{q'}(\Omega)\ =\ \bfL_{\tau,\sigma}^{q'}(\Omega)\oplus\mathbf{G}_{q'}(\Omega), \label{2.1*} \end{equation} then we write $\mathbf{v}=\Ps{q'}\mathbf{g}$. Decomposition (\ref{2.1*}) is called the {\it Helmholtz decomposition} and the operator $\Ps{q'}$ is called the {\it Helmholtz projection.} The existence of the Helmholtz decomposition depends on exponent $q'$ and the shape of domain $\Omega$. If $q'=2$ then the Helmholtz decomposition exists on an arbitrary domain $\Omega$ and $\Ps{2}$, respectively $I-\Ps{2}$, is an orthogonal projection of $\mathbf{L}^2(\Omega)$ onto $\bfL_{\tau,\sigma}^2(\Omega)$, respectively onto $\mathbf{G}_2(\Omega)$. (See e.g.~\cite{Ga1}.) If $q'\not=2$ then various sufficient conditions for the existence of the Helmholtz decomposition can be found e.g.~in \cite{FaKoSo}, \cite{FuMo}, \cite{Ga1}, \cite{GeShe}, \cite{KoYa} and \cite{SiSo}.
Further on in this paragraph, we assume that the Helmholtz decomposition of $\mathbf{L}^{q'}(\Omega)$ exists. Let $\mathbf{g}\in\mathbf{L}^{q'}(\Omega)$. Treating $\mathbf{g}$ as an element of $\bfW_{\tau}^{-1,q'}(\Omega)$ in the sense of paragraph 2.2, we have $\langle\cPs{q'}\mathbf{g},\mbox{\boldmath $\varphi$}\rangle_{\tau,\sigma}= \langle\mathbf{g}, \mbox{\boldmath $\varphi$}\rangle_{\tau}$ for all $\mbox{\boldmath $\varphi$}\in\bfW_{\tau,\br\sigma}^{1,q}(\Omega)$. Writing $\mathbf{g}=\Ps{q'}\mathbf{g}+(I-\Ps{q'})\mathbf{g}$, we also have \begin{displaymath} \langle\mathbf{g}, \mbox{\boldmath $\varphi$}\rangle_{\tau}\ =\ \bigl\langle\Ps{q'}\mathbf{g}+(I-\Ps{q'})\mathbf{g},\mbox{\boldmath $\varphi$}\bigr\rangle_{\tau}\ =\ \bigl\langle\Ps{q'}\mathbf{g},\mbox{\boldmath $\varphi$}\bigr\rangle_{\tau} \end{displaymath} for all $\mbox{\boldmath $\varphi$}\in\bfW_{\tau,\br\sigma}^{1,q}(\Omega)$, because $(I-\Ps{q'})\mathbf{g}\in\mathbf{G}_{q'}(\Omega)$. Furthermore, \begin{displaymath} \bigl\langle\Ps{q'}\mathbf{g},\mbox{\boldmath $\varphi$}\bigr\rangle_{\tau}\ =\ \bigl\langle\Ps{q'}\mathbf{g},\mbox{\boldmath $\varphi$}\bigr\rangle_{\tau,\sigma}, \end{displaymath} because $\Ps{q'}\mathbf{g}\in\bfL_{\tau,\sigma}^{q'}(\Omega)$, $\mbox{\boldmath $\varphi$}\in\bfW_{\tau,\br\sigma}^{1,q}(\Omega)$ and the formulas (\ref{2.1}) and (\ref{2.2}) show that the dualities $\langle\Ps{q'}\mathbf{g},\mbox{\boldmath $\varphi$}\rangle_{\tau}$ and $\langle\Ps{q'}\mathbf{g},\mbox{\boldmath $\varphi$}\rangle_{\tau,\sigma}$ are expressed by the same integrals. Hence $\langle\cPs{q'}\mathbf{g},\mbox{\boldmath $\varphi$}\rangle_{\tau,\sigma}$ coincides with $\langle\Ps{q'}\mathbf{g},\mbox{\boldmath $\varphi$}\rangle_{\tau,\sigma}$ for all $\mbox{\boldmath $\varphi$}\in\bfW_{\tau,\br\sigma}^{1,q}(\Omega)$. Consequently, $\cPs{q'}\mathbf{g}$ and $\Ps{q'}\mathbf{g}$ represent the same element of $\bfW_{\tau,\br\sigma}^{-1,q'}(\Omega)$. As $\Ps{q'}\mathbf{g}\in\bfL_{\tau,\sigma}^{q'}(\Omega)$, $\cPs{q'}\mathbf{g}$ can also be considered to be an element of $\bfL_{\tau,\sigma}^{q'}(\Omega)$, which induces a functional in $\bfW_{\tau,\br\sigma}^{-1,q}(\Omega)$ in the sense of paragraph 2.2. Thus, {\it the Helmholtz projection $\Ps{q'}$ coincides with the restriction of $\cPs{q'}$ to $\mathbf{L}^{q'}(\Omega)$.}
\noindent {\bf 2.5. More on the space $\bfW_{\tau,\br\sigma}^{1,q}(\Omega)^{\perp}$.} \ Identifying $\mathbf{G}_{q'}(\Omega)$ with a subspace of $\bfW_{\tau}^{-1,q'}(\Omega)$ in the sense of paragraph 2.2, {\it we denote by ${}^{\perp}\mathbf{G}_{q'}(\Omega)$ the linear space $\bigl\{\mbox{\boldmath $\varphi$}\in\bfW_{\tau}^{1,q}(\Omega)$; $\forall\hbox to 0.7pt{}\mathbf{g}\in\mathbf{G}_{q'}(\Omega): \langle\mathbf{g},\mbox{\boldmath $\varphi$}\rangle_{\tau}=0\bigr\}$.} Using \cite[Lemma III.2.1]{Ga1}, we deduce that $\bfW_{\tau,\br\sigma}^{1,q}(\Omega)={}^{\perp}\mathbf{G}_{q'}(\Omega)$. Hence $\bfW_{\tau,\br\sigma}^{1,q}(\Omega)^{\perp}=(^{\perp}\mathbf{G}_{q'}(\Omega))^{\perp}$ and applying Theorem 4.7 in \cite{Ru}, we observe that $\bfW_{\tau,\br\sigma}^{1,q}(\Omega)^{\perp}$ is a closure of $\mathbf{G}_{q'}(\Omega)$ in the weak-$*$ topology of $\bfW_{\tau}^{-1,q'}(\Omega)$. The next lemma tells us more on elements of $\bfW_{\tau,\br\sigma}^{1,q}(\Omega)^{\perp}$.
\begin{lemma} \label{L2.2} Let $\mathbf{F}\in\bfW_{\tau,\br\sigma}^{1,q}(\Omega)^{\perp}$ and $\Omega_0\subset\subset\Omega$ be a nonempty sub-domain of $\Omega$. Then there exists a unique $p\in L^{q'}_{loc}(\Omega)$ such that $p\in L^{q'}(\Omega_R)$ for all $R>0$, \ $\int_{\Omega_0}p\; \mathrm{d}\mathbf{x}=0$ \ and \begin{alignat}{5}
& \|p\|_{q';\, \Omega_R}\ &&\leq\ c(R)\, \|\mathbf{F}\|_{-1,q} \quad && \mbox{for all}\ R>0, \label{2.9*} \\
& \bigl\langle\mathbf{F},\mbox{\boldmath $\psi$}\bigr\rangle_{\tau}\ &&=\ -\int_{\Omega}p\ \mathrm{div}\,\mbox{\boldmath $\psi$}\; \mathrm{d}\mathbf{x} \quad && \mbox{for all}\ \mbox{\boldmath $\psi$}\in\bfW_{\tau,\br c}^{1,q}(\Omega). \label{2.7*} \end{alignat} \end{lemma}
\begin{proof} Let $\{\Omega_n\}$ be the sequence of domains from condition (i). We can assume without the loss of generality that $\Omega_0\subseteq\Omega_1$. Let $n\in{\mathbb N}$. Denote by $L^q_{\rm mv= 0}(\Omega_n)$ the space of all functions from $L^q(\Omega_n)$, whose mean value in $\Omega_n$ is zero. There exists a bounded linear operator $\mathfrak{B}: L^q_{\rm mv=0} (\Omega_n)\to\mathbf{W}^{1,q}_0(\Omega_n)$, such that \begin{displaymath} \mathrm{div}\,\mathfrak{B}(g)\ =\ g \end{displaymath} for all $g\in L^q_{\rm mv=0}(\Omega_n)$. Operator $\mathfrak{B}$ is often called the {\it Bogovskij} or {\it Bogovskij--Pileckas} operator. More information on operator $\mathfrak{B}$, including its construction, can be found e.g.~in \cite[Sec.~III.3]{Ga1} or in \cite{BoSo}.
Denote by $\bfW_{\tau}^{1,q}(\Omega)_n$, respectively $\bfW_{\tau,\br\sigma}^{1,q}(\Omega)_n$, the space of all functions from $\bfW_{\tau}^{1,q}(\Omega)$, respectively from $\bfW_{\tau,\br\sigma}^{1,q}(\Omega)$, that have a support in $\overline{\Omega_n}$. Let $\mbox{\boldmath $\psi$}\in\bfW_{\tau}^{1,q}(\Omega)_n$. Then the restriction of $\mathrm{div}\,\mbox{\boldmath $\psi$}$ to $\Omega_n$ (which we again denote by $\mathrm{div}\,\mbox{\boldmath $\psi$}$ in order to keep a simple notation) belongs to $L^q_{\rm mv=0}(\Omega_n)$ and $\mathfrak{B}(\mathrm{div}\,\mbox{\boldmath $\psi$}_n)\in\mathbf{W}^{1,q}_0(\Omega_n)$. Identifying $\mathfrak{B}(\mathrm{div}\,\mbox{\boldmath $\psi$})$ with a function from $\mathbf{W}^{1,q}_0(\Omega)$ that equals zero in $\Omega\smallsetminus\Omega_n$, we have \begin{displaymath} \mbox{\boldmath $\psi$}\ =\ \mathfrak{B}(\mathrm{div}\,\mbox{\boldmath $\psi$})+\mathbf{w}, \end{displaymath} where $\mathbf{w}$ is an element of $\bfW_{\tau,\br\sigma}^{1,q}(\Omega)$, satisfying $\mathbf{w}=\mbox{\boldmath $\psi$}=\mathbf{0}$ in $\Omega\smallsetminus\Omega_n$. Hence \begin{equation} \bigl\langle\mathbf{F},\mbox{\boldmath $\psi$}\bigr\rangle_{\tau}\ =\ \bigl\langle\mathbf{F},\mathfrak{B}(\mathrm{div}\,\mbox{\boldmath $\psi$} \bigr\rangle_{\tau}. \label{2.2*} \end{equation} As $\mathbf{F}$ is a bounded linear functional on $\bfW_{\tau}^{1,q}(\Omega)$, vanishing on the subspace $\bfW_{\tau,\br\sigma}^{1,q}(\Omega)$, its restriction to $\bfW_{\tau}^{1,q}(\Omega)_n$ is an element of $\bfW_{\tau}^{-1,q'}(\Omega)_n$, vanishing on $\bfW_{\tau,\br\sigma}^{1,q}(\Omega)_n$. Furthermore, identifying functions from $\bfW_{\tau}^{1,q}(\Omega)_n$ with their restrictions to $\Omega_n$, we can also consider $\mathbf{F}$ to be an element of $\mathbf{W}^{-1,q'}_0(\Omega_n)$, vanishing on $\mathbf{W}^{1,q}_{0,\sigma}(\Omega_n)$. Thus, due to Lemma 1.4 in \cite{Ne2}, there exists $c(n)>0$ and a unique function $p_n\in L^{q'}(\Omega_n)$ such that $\int_{\Omega_0}p_n\; \mathrm{d}\mathbf{x}=0$ and \begin{align}
\|p_n\|_{q';\, \Omega_n}\ &\leq\ c(n)\, \|\mathbf{F}\|_{-1,q;\,
\Omega_n}\ \leq\ c(n)\, \|\mathbf{F}\|_{-1,q}, \label{2.8*} \\
\bigl\langle\mathbf{F},\mbox{\boldmath $\zeta$}\bigr\rangle_{\Omega_n}\ &=\ -\int_{\Omega_n}p_n\ \mathrm{div}\,\mbox{\boldmath $\zeta$}\; \mathrm{d}\mathbf{x} \label{2.3*} \end{align} for all $\mbox{\boldmath $\zeta$}\in\mathbf{W}^{1,q}_0(\Omega_n)$. Using identity (\ref{2.3*}) with $\mbox{\boldmath $\zeta$}=\mathfrak{B}(\mathrm{div}\,\mbox{\boldmath $\psi$})$, we obtain \begin{displaymath} \bigl\langle\mathbf{F},\mathfrak{B}(\mathrm{div}\,\mbox{\boldmath $\psi$})\bigr\rangle_{\tau}\ \equiv\ \bigl\langle\mathbf{F},\mathfrak{B}(\mathrm{div}\,\mbox{\boldmath $\psi$})\bigr\rangle_{\Omega_n}\ =\ -\int_{\Omega_n}\! p_n\ \mathrm{div}\,\mathfrak{B}(\mathrm{div}\,\mbox{\boldmath $\psi$})\; \mathrm{d}\mathbf{x}\ =\ -\int_{\Omega_n}\! p_n\ \mathrm{div}\,\mbox{\boldmath $\psi$}\; \mathrm{d}\mathbf{x}. \end{displaymath} As the same identities also hold for $n+1$ instead of $n$, we deduce that $p_{n+1}=p_n$ in $\Omega_n$. Hence we may define function $p$ in $\Omega$ by the formula $p:=p_n$ in $\Omega_n$ and we have \begin{equation} \bigl\langle\mathbf{F},\mathfrak{B}(\mathrm{div}\,\mbox{\boldmath $\psi$})\bigr\rangle_{\tau}\ =\ -\int_{\Omega} p\ \mathrm{div}\,\mbox{\boldmath $\psi$}\; \mathrm{d}\mathbf{x}. \label{2.4*} \end{equation} If $\mbox{\boldmath $\psi$}\in\bfW_{\tau,\br c}^{1,q}(\Omega)$ then $\mbox{\boldmath $\psi$}\in\bfW_{\tau}^{1,q}(\Omega)_n$ for sufficiently large $n$ and (\ref{2.4*}) holds as well. Inequality (\ref{2.9*}) now follows from (\ref{2.8*}). Identities (\ref{2.2*}) and (\ref{2.4*}) imply (\ref{2.7*}). \end{proof}
Note that if $\Omega$ is a bounded Lipschitz domain then the choice $\Omega_0=\Omega$ is also possible in Lemma \ref{L2.2}.
\section{Three equivalent weak formulations of the Navier--Stokes initial-boundary value problem (\ref{1.1})--(\ref{1.4})} \label{S3}
Recall that $\Omega$ is supposed to be a locally Lipschitz domain in ${\mathbb R}^3$.
\noindent {\bf 3.1. The 1st weak formulation of the Navier--Stokes IBVP (\ref{1.1})--(\ref{1.4}).} \ {\it Given $\mathbf{u}_0\in\bfL_{\tau,\sigma}^2(\Omega)$ and $\mathbf{f}\in L^2(0,T$; $\bfW_{\tau}^{-1,2}(\Omega))$. A function $\, \mathbf{u}\in L^{\infty}(0,T;\ \bfL_{\tau,\sigma}^2(\Omega)) \cap L^2(0,T;\ \bfW_{\tau,\br\sigma}^{1,2}(\Omega))$ is said to be a weak solution to the problem (\ref{1.1})--(\ref{1.4}) if the trace of $\mathbf{u}$ on $\Gamma_T$ is in $L^2(0,T$; $\mathbf{L}^2(\partial\Omega))$ and $\mathbf{u}$ satisfies \begin{align} \int_0^T & \int_{\Omega}\bigl[-\partial_t\mbox{\boldmath $\phi$}\cdot\mathbf{u}+ \mathbf{u}\cdot\nabla\mathbf{u}\cdot\mbox{\boldmath $\phi$}+2\nu\hbox to 0.7pt{}(\nabla\mathbf{u})_s: (\nabla\mbox{\boldmath $\phi$})_s\bigr]\, \mathrm{d}\mathbf{x}\, \mathrm{d} t \nonumber \\
& +\int_0^T\int_{\partial\Omega}\gamma\hbox to 0.7pt{}\mathbf{u}\cdot\mbox{\boldmath $\phi$}\; \mathrm{d} S\, \mathrm{d} t\, =\, \int_0^T\bigl\langle\mathbf{f},\mbox{\boldmath $\phi$}\bigr\rangle_{\tau}\; \mathrm{d} t+\int_{\Omega}\mathbf{u}_0\cdot\mbox{\boldmath $\phi$}(.\, ,0)\, \mathrm{d}\mathbf{x} \label{3.1} \end{align} for all vector--functions $\mbox{\boldmath $\phi$}\in C^{\infty}_0\bigl([0,T);\; \bfW_{\tau,\br\sigma,\br c}^{1,2}(\Omega)\bigr)$.}
Equation (\ref{3.1}) follows from (\ref{1.1}), (\ref{1.2}) if one formally multiplies equation (\ref{1.1}) by the test function $\mbox{\boldmath $\phi$}\in C^{\infty}_0\bigl([0,T);\; \bfW_{\tau,\br\sigma,\br c}^{1,2} (\Omega)\bigr)$, applies the integration by parts and uses the boundary conditions (\ref{1.3}) and the initial condition (\ref{1.4}). As the integral of $\nabla p\cdot\mbox{\boldmath $\phi$}$ vanishes, the pressure $p$ does not explicitly appear in (\ref{3.1}).
On the other hand, if $\mathbf{f}\in\mathbf{L}^2(Q_T)$ and $\mathbf{u}$ is a weak solution with the additional properties $\partial_t\mathbf{u}\in\mathbf{L}^2(Q_T)$ and $\mathbf{u}\in L^2(0,T;\, \mathbf{W}^{2,2}(\Omega))$ then, considering the test functions $\mbox{\boldmath $\phi$}$ in (\ref{3.1}) of the form $\mbox{\boldmath $\phi$}(\mathbf{x},t)=\mbox{\boldmath $\varphi$}(\mathbf{x})\, \vartheta(t)$ where $\mbox{\boldmath $\varphi$}\in\bfW_{\tau,\br\sigma,\br c}^{1,2}(\Omega)$ and $\vartheta\in C^{\infty}_0((0,T))$, and applying the backward integration by parts, one obtains the equation \begin{displaymath} \int_{\Omega} \bigl( \partial_t\mathbf{u}+\mathbf{u}\cdot\nabla\mathbf{u}- \nu\Delta\mathbf{u}-\mathbf{f} \bigr)\cdot\mbox{\boldmath $\varphi$}\; \mathrm{d}\mathbf{x}\ =\ 0 \end{displaymath} for a.a.~$t\in(0,T)$. As $\bfW_{\tau,\br\sigma,\br c}^{1,2}(\Omega)$ is dense in $\bfL_{\tau,\sigma}^2(\Omega)$, this equation shows that $\Ps{2}[\partial_t\mathbf{u} +\mathbf{u}\cdot\nabla\mathbf{u}-\nu\Delta\mathbf{u}- \mathbf{f}]=\mathbf{0}$ at a.a.~time instants $t\in(0,T)$. Consequently, to a.a.~$t\in(0,T)$, there exists $p\in W^{1,2}_{\rm loc}(\Omega)$ such that $\nabla p=(I-\Ps{2})[\partial_t\mathbf{u}+ \mathbf{u}\cdot\nabla\mathbf{u}-\nu\Delta\mathbf{u}- \mathbf{f}]$ and the functions $\mathbf{u}$ and $p$ satisfy equation (\ref{1.1}) (as an equation in $\mathbf{L}^2(\Omega)$) at a.a.~time instants $t\in(0,T)$. It follows from the boundedness of projection $\Ps{2}$ in $\mathbf{L}^2(\Omega)$ and the assumed properties of functions $\mathbf{u}$ and $\mathbf{f}$ that $\nabla p\in \mathbf{L}^2(Q_T)$. Considering afterwards the test functions $\mbox{\boldmath $\phi$}$ as in (\ref{3.1}), and integrating by parts in (\ref{3.1}), we get \begin{displaymath} \int_0^T\int_{\Omega}\bigl( \partial_t\mathbf{u}+\mathbf{u}\cdot\nabla\mathbf{u}- \nu\Delta\mathbf{u}-\mathbf{f} \bigr)\cdot\mbox{\boldmath $\phi$}\; \mathrm{d}\mathbf{x}+ \int_0^T\int_{\partial\Omega} \bigl( [\bbT_{\hspace{-0.5pt} \rm d}(\mathbf{u})\cdot\mathbf{n}]+ \gamma\mathbf{u} \bigr)\cdot\mbox{\boldmath $\phi$}\; \mathrm{d} S\, \mathrm{d} t\ =\ 0 \end{displaymath} The first integral is equal to zero, because the expression in the parentheses equals $-\nabla p$ a.e.~in $Q_T$ and the integral $\nabla p\cdot\mbox{\boldmath $\phi$}$ in $\Omega$ equals zero for a.a.~$t\in(0,T)$. In the second integral, since both $\mathbf{u}(\, .\, ,t)$ and $\mbox{\boldmath $\phi$}(\, .\, ,t)$ are tangent on $\partial\Omega$, we can replace $[\bbT_{\hspace{-0.5pt} \rm d}(\mathbf{u})\cdot\mathbf{n}]+\gamma\mathbf{u}$ by $[\bbT_{\hspace{-0.5pt} \rm d}(\mathbf{u})\cdot\mathbf{n}]_{\tau}+\gamma\mathbf{u}$ and we thus obtain \begin{displaymath} \int_0^T\int_{\partial\Omega} \bigl( [\bbT_{\hspace{-0.5pt} \rm d}(\mathbf{u})\cdot\mathbf{n}]_{\tau}+\gamma\mathbf{u} \bigr)\cdot\mbox{\boldmath $\phi$}\; \mathrm{d} S\, \mathrm{d} t\ =\ 0. \end{displaymath} As this equation holds for all test functions $\mbox{\boldmath $\phi$}\in C^{\infty}_0\bigl([0,T);\; \bfW_{\tau,\br\sigma,\br c}^{1,2}(\Omega)\bigr)$, we deduce that $\mathbf{u}$ satisfies the boundary condition (\ref{1.3}b). Recall that this procedure works only under additional assumptions on smoothness of the weak solution $\mathbf{u}$ and function $\mathbf{f}$. On a general level, however, it is not known whether the existing weak solution is smooth. Nevertheless, we show in subsection 4.4 that there exists a certain pressure, which can be naturally associated with the weak solution to (\ref{1.1})--(\ref{1.4}). The pressure generally exists only as a distribution, see Theorem \ref{T4.2}.
\noindent {\bf 3.2. The 2nd weak formulation of the Navier-Stokes IBVP (\ref{1.1})--(\ref{1.4}).} \ We define the operators ${\cal A}:\bfW_{\tau}^{1,2}(\Omega)\to\bfW_{\tau}^{-1,2}(\Omega)$ and ${\cal B}:\bigl[\bfW_{\tau}^{1,2}(\Omega)\bigr]^2\to \bfW_{\tau}^{-1,2}(\Omega)$ by the equations \begin{align*} & \bigl\langle{\cal A}\mathbf{v},\mbox{\boldmath $\varphi$}\bigr\rangle_{\tau}\ :=\ \int_{\Omega} 2\nu\hbox to 0.7pt{}(\nabla\mathbf{v})_s:(\nabla\mbox{\boldmath $\varphi$})_s\; \mathrm{d}\mathbf{x}+\int_{\partial\Omega}\gamma\mathbf{v}\cdot\mbox{\boldmath $\varphi$}\; \mathrm{d} S && \mbox{for}\ \mathbf{v},\mbox{\boldmath $\varphi$}\in\bfW_{\tau}^{1,2}(\Omega), \\
& \bigl\langle{\cal B}(\mathbf{v},\mathbf{w}),\mbox{\boldmath $\varphi$}\bigr\rangle_{\tau}\ :=\ \int_{\Omega} \mathbf{v}\cdot\nabla\mathbf{w}\cdot\mbox{\boldmath $\varphi$}\; \mathrm{d}\mathbf{x} && \mbox{for}\ \mathbf{v},\mathbf{w},\mbox{\boldmath $\varphi$}\in\bfW_{\tau}^{1,2}(\Omega). \end{align*} By Korn's inequality (see e.g.~\cite[Lemma 4]{SoSc}) and inequality \cite[(II.4.5), p.~63]{Ga1}, we have $\cn01$ \begin{equation} \bigl\langle{\cal A}\mathbf{v},\mathbf{v}\bigr\rangle_{\tau}\ =\ \int_{\Omega}\nu\,
|(\nabla\mathbf{v})_s|^2\; \mathrm{d}\mathbf{x}+ \int_{\partial\Omega}\gamma\,
|\mathbf{v}|^2\; \mathrm{d} S\ \geq\ \cc01\hbox to 0.7pt{} \nu\, \|\nabla\mathbf{v}\|_2^2. \label{3.2} \end{equation} Furthermore, using the boundedness of the operator of traces from $\bfW_{\tau}^{1,2}(\Omega)$ to $\mathbf{L}^2(\partial\Omega)$, we can also deduce that there exists $\cn02\cc02>0$ such that \begin{equation}
\|{\cal A}\mathbf{v}\|_{-1,2}\ \leq\ \cc02\, \|\nabla\mathbf{v}\|_2 \label{3.3} \end{equation} for all $\mathbf{v}\in\bfW_{\tau}^{1,2}(\Omega)$. Thus, ${\cal A}$ is a bounded one--to--one operator, mapping $\bfW_{\tau}^{1,2}(\Omega)$ into $\bfW_{\tau}^{-1,2}(\Omega)$. If $k>0$ then the range of ${\cal A}+kI$ is the whole space $\bfW_{\tau}^{-1,2}(\Omega)$ (by the Lax--Milgram theorem) and $({\cal A}+kI)^{-1}$ is a bounded operator from $\bfW_{\tau}^{-1,2}(\Omega)$ onto $\bfW_{\tau}^{1,2}(\Omega)$. If $\Omega$ is bounded then the same statements also hold for $k=0$. The bilinear operator ${\cal B}$ satisfies \begin{align}
& \|{\cal B}(\mathbf{v},\mathbf{w})\|_{-1,2}\ =\ \sup_{\boldsymbol{\varphi} \in\bfW_{\tau}^{1,2}(\Omega),\ \boldsymbol{\varphi}\not=\mathbf{0}}
\frac{|\hbox to 0.7pt{}\langle {\cal B}(\mathbf{v},\mathbf{w}),\mbox{\boldmath $\varphi$}\rangle_{\tau}\hbox to 0.7pt{}|}
{\|\mbox{\boldmath $\varphi$}\|_{1,2}} \nonumber \\
& \hspace{6pt} =\ \sup_{\boldsymbol{\varphi}\in \bfW_{\tau}^{1,2}(\Omega),\ \boldsymbol{\varphi}\not=\mathbf{0}}
\frac{|(\mathbf{v}\cdot\nabla\mathbf{w},\,\mbox{\boldmath $\varphi$})_2|}
{\|\mbox{\boldmath $\varphi$}\|_{1,2}}\ \leq \sup_{\boldsymbol{\varphi}\in \bfW_{\tau}^{1,2}(\Omega),\ \boldsymbol{\varphi}\not=\mathbf{0}}
\frac{\|\mathbf{v}\|_2^{1/2}\, \|\mathbf{v}\|_6^{1/2}\, \|\nabla\mathbf{w}\|_2\,
\|\mbox{\boldmath $\varphi$}\|_6}{\|\mbox{\boldmath $\varphi$}\|_{1,2}} \nonumber \\ \noalign{\vskip 4pt}
& \hspace{6pt} \leq\ c\, \|\mathbf{v}\|_2^{1/2}\,
\|\nabla\mathbf{v}\|_2^{1/2}\, \|\nabla\mathbf{w}\|_2. \label{3.4} \end{align}
(We have used the imbedding inequality $\|\mathbf{v}\|_6\leq c\,
\|\mathbf{v}\|_{1,2}$. Here and further on, $c$ denotes the generic constant.)
Let $\mathbf{u}$ be a weak solution of the IBVP (\ref{1.1})--(\ref{1.4}) in the sense of paragraph 3.1. It follows from the estimates (\ref{3.3}) and (\ref{3.4}) that \begin{equation} {\cal A}\mathbf{u}\in L^2(0,T;\, \bfW_{\tau}^{-1,2}(\Omega)) \quad \mbox{and} \quad {\cal B}(\mathbf{u},\mathbf{u})\in L^{4/3}(0,T;\, \bfW_{\tau}^{-1,2}(\Omega)). \label{3.5} \end{equation} Considering $\mbox{\boldmath $\phi$}$ in (\ref{3.1}) in the form $\mbox{\boldmath $\phi$}(\mathbf{x},t)=\mbox{\boldmath $\varphi$}(\mathbf{x})\, \vartheta(t)$, where $\mbox{\boldmath $\varphi$}\in\bfW_{\tau,\br\sigma,\br c}^{1,2}(\Omega)$ and $\vartheta\in C^{\infty}_0((0,T))$, we deduce that $\mathbf{u}$ satisfies the equation \begin{equation} \frac{\mathrm{d}}{\mathrm{d}\hbox to 0.7pt{} t}\, (\mathbf{u},\mbox{\boldmath $\varphi$})_2+\bigl\langle {\cal A}\mathbf{u},\mbox{\boldmath $\varphi$} \bigr\rangle_{\tau}+\bigl\langle {\cal B}(\mathbf{u},\mathbf{u}), \mbox{\boldmath $\varphi$}\bigr\rangle_{\tau}\ =\ \langle \mathbf{f},\mbox{\boldmath $\varphi$} \rangle_{\tau} \label{3.6} \end{equation} a.e.~in\ $(0,T)$, where the derivative of $(\mathbf{u},\mbox{\boldmath $\varphi$})_2$ means the derivative in the sense of distributions. As the space $\bfW_{\tau,\br\sigma,\br c}^{1,2}(\Omega)$ is dense in $\bfW_{\tau,\br\sigma}^{1,2}(\Omega)$, (\ref{3.6}) holds for all $\mbox{\boldmath $\varphi$}\in\bfW_{\tau,\br\sigma}^{1,2}(\Omega)$. It follows from (\ref{3.5}) that $\langle{\cal A}\mathbf{u},\mbox{\boldmath $\varphi$} \rangle_{\tau} \in L^2(0,T)$ and $\langle{\cal B}(\mathbf{u},\mathbf{u}),\mbox{\boldmath $\varphi$} \rangle_{\tau} \in L^{4/3}(0,T)$. Since $\langle\mathbf{f},\mbox{\boldmath $\varphi$}\rangle_{\tau}\in L^2(0,T)$, we obtain from (\ref{3.6}) that the distributional derivative of $(\mathbf{u},\mbox{\boldmath $\varphi$})_2$ with respect to $t$ is in $L^{4/3}(0,T)$. Hence $(\mathbf{u},\mbox{\boldmath $\varphi$})_2$ is a.e.~in $[0,T)$ equal to a continuous function and the weak solution $\mathbf{u}$ is (after a possible redefinition on a set of measure zero) a weakly continuous function from $[0,T)$ to $\bfL_{\tau,\sigma}^2(\Omega)$. Now, one can easily deduce from (\ref{3.1}) that $\mathbf{u}$ satisfies the initial condition (\ref{1.4}) in the sense that \begin{equation}
(\mathbf{u},\mbox{\boldmath $\varphi$})_2\hbox to 0.7pt{}\bigl|_{t=0}\ =\ (\mathbf{u}_0,\mbox{\boldmath $\varphi$})_2 \label{3.7} \end{equation} for all $\mbox{\boldmath $\varphi$}\in\bfW_{\tau,\br\sigma}^{1,2}(\Omega)$. Thus, we come to the 2nd weak formulation of the IBVP (\ref{1.1})--(\ref{1.4}):
{\it Given $\mathbf{u}_0\in\bfL_{\tau,\sigma}^2(\Omega)$ and $\mathbf{f}\in L^2(0,T;\ \bfW_{\tau}^{-1,2}(\Omega))$. Find $\mathbf{u}\in L^{\infty}(0,T;\ \bfL_{\tau,\sigma}^2(\Omega))\cap L^2(0,T$; $\bfW_{\tau,\br\sigma}^{1,2}(\Omega))$ (called the weak solution) such that $\mathbf{u}$ satisfies equation (\ref{3.6}) a.e.~in $(0,T)$ and the initial condition (\ref{3.7}) for all $\mbox{\boldmath $\varphi$}\in\bfW_{\tau,\br\sigma}^{1,2}(\Omega)$.}
We have shown that if $\mathbf{u}$ is a weak solution of the IBVP (\ref{1.1})--(\ref{1.4}) in the sense of the 1st definition (see paragraph 3.1) then it also satisfies the 2nd definition. Applying standard arguments, one can also show the opposite, i.e.~if $\mathbf{u}$ satisfies the 2nd definition then it also satisfies the 1st definition.
\noindent {\bf 3.3. The 3rd weak formulation of the Navier-Stokes IBVP (\ref{1.1})--(\ref{1.4}).} \ Equation (\ref{3.6}) can also be written in the equivalent form \begin{equation} \frac{\mathrm{d}}{\mathrm{d}\hbox to 0.7pt{} t}\, (\mathbf{u},\mbox{\boldmath $\varphi$})_2+\bigl\langle \cPs{2}\hbox to 0.7pt{}{\cal A}\mathbf{u},\mbox{\boldmath $\varphi$} \bigr\rangle_{\tau,\sigma}+ \bigl\langle\cPs{2}\hbox to 0.7pt{}{\cal B}(\mathbf{u},\mathbf{u}),\mbox{\boldmath $\varphi$}\bigr\rangle_{\Omega, \sigma}\ =\ \bigl\langle \cPs{2}\hbox to 0.7pt{}\mathbf{f}, \mbox{\boldmath $\varphi$}\bigr\rangle_{\tau,\sigma}. \label{3.8} \end{equation} Let us denote by $(\mathbf{u}')_{\sigma}$ the distributional derivative with respect to $t$ of $\mathbf{u}$, as a function from $(0,T)$ to $\bfW_{\tau,\br\sigma}^{-1,2}(\Omega)$. (We explain later why we use the notation $(\mathbf{u}')_{\sigma}$ and not just $\mathbf{u}'$.) Equation (\ref{3.8}) can also be written in the form \begin{equation} (\mathbf{u}')_{\sigma}+\cPs{2}\hbox to 0.7pt{}{\cal A}\mathbf{u}+\cPs{2}\hbox to 0.7pt{}{\cal B}(\mathbf{u},\mathbf{u})\ =\ \cPs{2}\hbox to 0.7pt{}\mathbf{f}, \label{3.9} \end{equation} which is an equation in $\bfW_{\tau,\br\sigma}^{-1,2}(\Omega)$, satisfied a.e.~in the time interval $(0,T)$. (This can be deduced by means of Lemma III.1.1 in \cite{Te}.) Due to (\ref{3.5}) and (\ref{3.6}), $(\mathbf{u}')_{\sigma}\in L^{4/3}(0,T;\, \bfW_{\tau,\br\sigma}^{-1,2}(\Omega))$. Hence $\mathbf{u}$ coincides a.e.~in $(0,T)$ with a continuous function from $[0,T)$ to $\bfW_{\tau,\br\sigma}^{-1,2}(\Omega)$ and it is therefore meaningful to prescribe an initial condition for $\mathbf{u}$ at time $t=0$. Thus, we obtain the 3rd equivalent definition of a weak solution to the IBVP (\ref{1.1})--(\ref{1.4}):
{\it Given $\mathbf{u}_0\in\bfL_{\tau,\sigma}^2(\Omega)$ and $\mathbf{f}\in L^2(0,T$; $\bfW_{\tau}^{-1,2}(\Omega))$. Function $\mathbf{u}\in L^{\infty}(0,T$; $\bfL_{\tau,\sigma}^2(\Omega))\cap L^2(0,T$; $\bfW_{\tau,\br\sigma}^{1,2}(\Omega))$ is called a weak solution to the IBVP (\ref{1.1})--(\ref{1.4}) if $\mathbf{u}$ satisfies equation (\ref{3.9}) a.e.~in the interval $(0,T)$ and the initial condition (\ref{1.4}).}
We have explained that if $\mathbf{u}$ is a weak solution in the sense of the 2nd definition then it satisfies the 3rd definition. The validity of the opposite implication can be again verified by means of Lemma III.1.1 in \cite{Te}.
\noindent {\bf 3.4. Remark.} \ \rm Recall that $(\mathbf{u}')_{\sigma}$ is the distributional derivative with respect to $t$ of $\mathbf{u}$, as a function from $(0,T)$ to $\bfW_{\tau,\br\sigma}^{-1,2}(\Omega)$. It is not the same as the distributional derivative with respect to $t$ of $\mathbf{u}$, as a function from $(0,T)$ to $\bfW_{\tau}^{-1,2}(\Omega)$, which can be naturally denoted by $\mathbf{u}'$. As it is important to distinguish between these two derivatives, we use the different notation. We can formally write $(\mathbf{u}')_{\sigma}=\cPs{2}\mathbf{u}'$.
Since $(\mathbf{u}')_{\sigma}\in L^{4/3}(0,T;\, \bfW_{\tau,\br\sigma}^{-1,2}(\Omega))$, $\mathbf{u}$ coincides a.e.~in $(0,T)$ with a continuous function from $[0,T)$ to $\bfW_{\tau,\br\sigma}^{-1,2}(\Omega)$. According to what is said in the first part of this remark, this, however, does not imply that $\mathbf{u}$ coincides a.e.~in $(0,T)$ with a continuous function from $[0,T)$ to $\bfW_{\tau}^{-1,2}(\Omega)$.
\section{An associated pressure, its uniqueness and existence} \label{S4}
{\bf 4.1. An associated pressure.} \ {\it Let $\mathbf{u}$ be a weak solution to the IBVP (\ref{1.1})--(\ref{1.4}). A distribution $p$ in $Q_T$ is called an associated pressure if the pair $(\mathbf{u},p)$ satisfies the equations (\ref{1.1}), (\ref{1.2}) in the sense of distributions in $Q_T$.}
\noindent {\bf 4.2. On uniqueness of the associated pressure.} \ Let $\mathbf{u}$ be a weak solution to the IBVP (\ref{1.1})--(\ref{1.4}) and $p$ be an associated pressure.
If $G$ is a distribution in $(0,T)$ and $\psi\in C_0^{\infty}(Q_T)$ then we define a distribution $g$ in $Q_T$ by the formula \begin{equation} \blangle\hspace{-2.5pt}\blangle g,\psi\brangle\hspace{-2.5pt}\brangle_{Q_T}\ :=\ \Bigl\langle G,\ \int_{\Omega}\psi\; \mathrm{d}\mathbf{x} \Bigr\rangle_{(0,T)}, \label{4.11} \end{equation} where $\langle\hspace{-1.9pt}\langle\, .\, ,\, .\, \rangle\hspace{-1.9pt}\rangle_{Q_T}$, respectively $\langle\, .\, ,\, .\, \rangle_{(0,T)}$, denotes the action of a distribution in $Q_T$ on a function from $C^{\infty}_0(Q_T)$ or $\mathbf{C}^{\infty}_0(Q_T)$, respectively the action of a distribution in $(0,T)$ on a function from $C_0^{\infty}((0,T))$. Obviously, if $\mbox{\boldmath $\phi$}\in C^{\infty}_0\bigl((0,T);\, \bfW_{\tau,\br c}^{1,2}(\Omega)\bigr)$ then \begin{equation} \blangle\hspace{-2.5pt}\blangle\nabla g,\mbox{\boldmath $\phi$}\brangle\hspace{-2.5pt}\brangle_{Q_T}\ =\ -\blangle\hspace{-2.5pt}\blangle g,\mathrm{div}\,\mbox{\boldmath $\phi$}\brangle\hspace{-2.5pt}\brangle_{Q_T}\ =\ -\Bigl\langle G,\ \int_{\Omega}\mathrm{div}\,\mbox{\boldmath $\phi$}\; \mathrm{d}\mathbf{x}\Bigr\rangle_{(0,T)}\ =\ 0, \label{4.12} \end{equation} because $\int_{\Omega}\mathrm{div}\,\mbox{\boldmath $\phi$}(\, .\, ,t)\; \mathrm{d}\mathbf{x}=0$ for all $t\in(0,T)$. Thus, $p+g$ is a pressure, associated with the weak solution $\mathbf{u}$ to the IBVP (\ref{1.1})--(\ref{1.4}), too.
For $h\in C_0^{\infty}((0,T))$, define \begin{equation} \bigl\langle G,h \bigr\rangle_{(0,T)}\ :=\ \blangle\hspace{-2.5pt}\blangle g,\psi\brangle\hspace{-2.5pt}\brangle_{Q_T}, \label{4.13} \end{equation} where $\psi\in C_0^{\infty}(Q_T)$ is chosen so that $h(t)=\int_{\Omega}\psi(\mathbf{x},t)\; \mathrm{d}\mathbf{x}$ for all $t\in(0,T)$. The definition of the distribution $G$ is independent of the concrete choice of function $\psi$ due to these reasons: let $\psi_1$ and $\psi_2$ be two functions from $C_0^{\infty}(Q_T)$ such that $h(t)=\int_{\Omega}\psi_1(\mathbf{x},t)\; \mathrm{d}\mathbf{x}=\int_{\Omega}\psi_2(\mathbf{x},t)\; \mathrm{d}\mathbf{x}$ for $t\in(0,T)$. Denote by $G_1$, respectively $G_2$, the distribution, defined by formula (\ref{4.13}) with $\psi=\psi_1$, respectively $\psi=\psi_2$. Since $\mathrm{supp}\,(\psi_1-\psi_2)$ is a compact subset of $Q_T$ and $\int_{\Omega}[\psi_1(\, .\, ,t)-\psi_2(\, .\, ,t)]\; \mathrm{d}\mathbf{x}=0$ for all $t\in(0,T)$, there exists a function $\mbox{\boldmath $\phi$}\in\mathbf{C}_0^{\infty}(Q_T)$ such that $\mathrm{div}\,\mbox{\boldmath $\phi$}= \psi_1-\psi_2$ in $Q_T$. (See e.g.~\cite[Sec.~III.3]{Ga1} or \cite{BoSo} for the construction of function $\mbox{\boldmath $\phi$}$.) Then \begin{displaymath} \bigl\langle G_1-G_2,h \bigr\rangle_{(0,T)}\ :=\ \blangle\hspace{-2.5pt}\blangle g,\psi_1-\psi_2\brangle\hspace{-2.5pt}\brangle_{Q_T}\ =\ \blangle\hspace{-2.5pt}\blangle g,\mathrm{div}\,\mbox{\boldmath $\phi$}\brangle\hspace{-2.5pt}\brangle_{Q_T}, \end{displaymath} which is equal to zero due to (\ref{4.12}). Formula (\ref{4.13}) and the identity $h(t)=\int_{\Omega}\psi(\mathbf{x},t)\; \mathrm{d}\mathbf{x}$ show that the distribution $g$ has the form (\ref{4.11}).
We have proven the theorem:
\begin{theorem} \label{T4.1} The pressure, associated with a weak solution to the IBVP (\ref{1.1})--(\ref{1.4}), is unique up to an additive distribution of the form (\ref{4.11}). \end{theorem}
\noindent {\bf 4.3. Projections $E^{1,2}_{\tau}$ and $E^{-1,2}_{\tau}$.} \ In this subsection, we introduce orthogonal projections $E^{1,2}_{\tau}$ and $E^{-1,2}_{\tau}$ in $\bfW_{\tau}^{1,2}(\Omega)$ and $\bfW_{\tau}^{-1,2}(\Omega)$, respectively, which further play an important role in the proof of the existence of an associated pressure.
$\bfW_{\tau}^{1,2}(\Omega)$ is a Hilbert space with the scalar product $(\, .\, ,\, .\, )_{1,2}=\bigl\langle({\cal A}_0+I)\, .\, ,\, .\ \bigr\rangle_{\tau}$, where ${\cal A}_0$ is the operator ${\cal A}$ from paragraph 3.2, corresponding to $\nu=1$ and $\gamma=0$. Similarly, $\bfW_{\tau}^{-1,2}(\Omega)$ is a Hilbert space with the scalar product \begin{equation} (\mathbf{g},\mathbf{h})_{-1,2}\ :=\ \bigl\langle\mathbf{g},({\cal A}_0+ I)^{-1}\mathbf{h}\bigr\rangle_{\tau}\ =\ \bigl(({\cal A}_0+ I)^{-1}\mathbf{g},({\cal A}_0+I)^{-1}\mathbf{h}\bigr)_{1,2}. \label{4.1} \end{equation} Denote by $E^{1,2}_{\tau}$ the orthogonal projection in $\bfW_{\tau}^{1,2}(\Omega)$ that vanishes just on $\bfW_{\tau,\br\sigma}^{1,2}(\Omega)$, which means that \begin{equation} \ker E^{1,2}_{\tau}\ =\ \bfW_{\tau,\br\sigma}^{1,2}(\Omega). \label{4.2} \end{equation} Denote by $E^{-1,2}_{\tau}$ the adjoint projection in $\bfW_{\tau}^{-1,2}(\Omega)$. Applying (\ref{4.2}), one can verify that the range of $E^{-1,2}_{\tau}$ is $\bfW_{\tau,\br\sigma}^{1,2}(\Omega)^{\perp}$.
Let $\mathbf{g}\in\bfW_{\tau}^{-1,2}(\Omega)$ and $\mbox{\boldmath $\psi$}\in\bfW_{\tau}^{1,2}(\Omega)$. Then, due to (\ref{4.1}) and the orthogonality of $E^{1,2}_{\tau}$, we have \begin{displaymath} \bigl\langle\mathbf{g},E^{1,2}_{\tau}\mbox{\boldmath $\psi$}\bigr\rangle_{\tau}\ =\ \bigl( ({\cal A}_0+I)^{-1}\mathbf{g},E^{1,2}_{\tau}\mbox{\boldmath $\psi$}\bigr)_{1,2}\ =\ \bigl( E^{1,2}_{\tau}({\cal A}_0+I)^{-1}\mathbf{g},\mbox{\boldmath $\psi$}\bigr)_{1,2}. \end{displaymath} However, the duality on the left hand side can also be expressed in another way: \ using again (\ref{4.1}) and the fact that $E^{-1,2}_{\tau}$ is adjoint to $E^{1,2}_{\tau}$, we get \begin{displaymath} \bigl\langle\mathbf{g},E^{1,2}_{\tau}\mbox{\boldmath $\psi$}\bigr\rangle_{\tau}\ =\ \bigl\langle E^{-1,2}_{\tau}\hbox to 0.7pt{}\mathbf{g},\mbox{\boldmath $\psi$}\bigr\rangle_{\tau}\ =\ \bigl( ({\cal A}_0+I)^{-1} E^{-1,2}_{\tau}\hbox to 0.7pt{}\mathbf{g},\mbox{\boldmath $\psi$}\bigr)_{1,2}. \end{displaymath} Thus, we obtain the important identity \begin{equation} E^{1,2}_{\tau}\hbox to 0.7pt{}({\cal A}_0+I)^{-1}\ =\ ({\cal A}_0+I)^{-1} E^{-1,2}_{\tau}. \label{4.3} \end{equation} Applying (\ref{4.3}), we can now show that the projection $E^{-1,2}_{\tau}$ is orthogonal in $\bfW_{\tau}^{-1,2}(\Omega)$. Indeed, if $\mathbf{g},\hbox to 0.7pt{}\mathbf{h}\in\bfW_{\tau}^{-1,2}(\Omega)$ then \begin{align*} & \bigl(E^{-1,2}_{\tau}\hbox to 0.7pt{}\mathbf{g},\mathbf{h}\bigr)_{-1,2}\ =\ \bigl(({\cal A}_0+I)^{-1}E^{-1,2}_{\tau}\hbox to 0.7pt{}\mathbf{g}, ({\cal A}_0+I)^{-1}\mathbf{h}\bigr)_{1,2} \\
& \hspace{20pt} =\ \bigr(E^{1,2}_{\tau}\hbox to 0.7pt{} ({\cal A}_0+I)^{-1}\hbox to 0.7pt{} \mathbf{g},({\cal A}_0+I)^{-1}\mathbf{h}\bigr)_{1,2}\ =\ \bigl(({\cal A}_0+I)^{-1} \mathbf{g},E^{1,2}_{\tau}({\cal A}_0+I)^{-1}\mathbf{h}\bigr)_{1,2} \\
& \hspace{20pt} =\ \bigl(({\cal A}_0+I)^{-1}\mathbf{g},({\cal A}_0+I)^{-1}E^{-1,2}_{\tau} \mathbf{h}\bigr)_{1,2}\ =\ \bigl(\mathbf{g},E^{-1,2}_{\tau}\mathbf{h}\bigr)_{-1,2}. \end{align*} This verifies the orthogonality of projection $E^{-1,2}_{\tau}$.
Finally, we will show that if $\phi\in C^{\infty}_0(\Omega)$ then \begin{equation} E^{1,2}_{\tau}\nabla\phi\ =\ \nabla\phi \qquad \mbox{for all $\phi\in C^{\infty}_0(\Omega)$}. \label{4.4} \end{equation} Thus, let $\phi\in C^{\infty}_0(\Omega)$. Then $\nabla\phi\in\bfW_{\tau}^{1,2}(\Omega)$ and $\, ({\cal A}_0+I)\nabla\phi\equiv\nabla(-\Delta+I)\phi\in\bfW_{\tau,\br\sigma}^{1,2} (\Omega)^{\perp}$. Hence \begin{displaymath} E^{-1,2}_{\tau}({\cal A}_0+I)\nabla\phi\ =\ ({\cal A}_0+I)\nabla\phi. \end{displaymath} Applying (\ref{4.3}), we also get \begin{displaymath} E^{-1,2}_{\tau}({\cal A}_0+I)\nabla\phi\ =\ ({\cal A}_0+ I)E^{1,2}_{\tau}\nabla\phi. \end{displaymath} Since ${\cal A}_0+I$ is a one-to-one operator from $\bfW_{\tau}^{1,2}(\Omega)$ to $\bfW_{\tau}^{-1,2}(\Omega)$, the last two identities show that (\ref{4.4}) holds.
\noindent {\bf 4.4. Existence of an associated pressure.} \ In this paragraph, we show that to every weak solution of the IBVP (\ref{1.1})--(\ref{1.4}), an associated pressure exists and has a certain structure.
Let $\mathbf{u}$ be a weak solution to the IBVP (\ref{1.1})--(\ref{1.4}). Due to \cite[Lemma III.1.1]{Te}, equation (\ref{3.9}) is equivalent to \begin{displaymath} \mathbf{u}(t)-\mathbf{u}(0)+\int_0^t\cPs{2}\bigl[{\cal A}\mathbf{u}+ {\cal B}(\mathbf{u},\mathbf{u})-\mathbf{f}\bigr]\; \mathrm{d}\tau\ =\ \mathbf{0} \end{displaymath} for a.a.~$t\in(0,T)$. (As usually, we identify $\mathbf{u}(\, .\, ,t)$ and $\mathbf{u}(t)$.) Since $\mathbf{u}(t)$ and $\mathbf{u}(0)$ are in $\bfL_{\tau,\sigma}^2(\Omega)$, they coincide with $\cPs{2}\mathbf{u}(t)$ and $\cPs{2}\mathbf{u}(0)$, respectively. (See paragraph 2.4.) Hence \begin{displaymath} \cPs{2}\biggl(\mathbf{u}(t)-\mathbf{u}(0)+\int_0^t\bigl[{\cal A}\mathbf{u}+ {\cal B}(\mathbf{u},\mathbf{u})-\mathbf{f}\bigr]\; \mathrm{d}\tau\biggr)\ =\ \mathbf{0}. \end{displaymath} Define $\mathbf{F}(t)\in\bfW_{\tau}^{-1,2}(\Omega)$ by the formula \begin{equation} \mathbf{F}(t)\ :=\ \mathbf{u}(t)-\mathbf{u}(0)+\int_0^t \bigl[{\cal A}\mathbf{u}+{\cal B}(\mathbf{u},\mathbf{u})-\mathbf{f}\bigr]\; \mathrm{d}\tau. \label{4.6} \end{equation} Since $\langle\mathbf{F}(t),\mbox{\boldmath $\psi$}\rangle_{\tau}=\langle\cPs{2} \mathbf{F}(t),\mbox{\boldmath $\psi$}\rangle_{\tau,\sigma}=0$ for all $\mbox{\boldmath $\psi$}\in \bfW_{\tau,\br\sigma}^{1,2}(\Omega)$, $\mathbf{F}(t)$ belongs to $\bfW_{\tau,\br\sigma}^{1,2}(\Omega)^{\perp}$. Hence $E^{-1,2}_{\tau}\hbox to 0.7pt{}\mathbf{F}(t)=\mathbf{F}(t)$ and $(I-E^{-1,2}_{\tau})\hbox to 0.7pt{}\mathbf{F}(t)=\mathbf{0}$. Thus, \begin{align*} (I-E^{-1,2}_{\tau})\hbox to 0.7pt{}\mathbf{u}(t) & -(I-E^{-1,2}_{\tau})\hbox to 0.7pt{}\mathbf{u}(0) \\
& +\int_0^t(I-E^{-1,2}_{\tau})\hbox to 0.7pt{}\bigl[{\cal A}\mathbf{u}+ {\cal B}(\mathbf{u},\mathbf{u})-\mathbf{f}\bigr]\; \mathrm{d}\tau\ =\ \mathbf{0} \end{align*} holds as an equation in $\bfW_{\tau}^{-1,2}(\Omega)$. Applying Lemma III.1.1 from \cite{Te}, we deduce that \begin{displaymath} \bigl[(I-E^{-1,2}_{\tau})\hbox to 0.7pt{}\mathbf{u}\bigr]'+(I-E^{-1,2}_{\tau})\hbox to 0.7pt{} \bigl[{\cal A}\mathbf{u}+{\cal B}(\mathbf{u},\mathbf{u})-\mathbf{f}\bigr]\ =\ \mathbf{0}. \end{displaymath} This yields \begin{align} \mathbf{u}'+{\cal A}\mathbf{u} & +{\cal B}(\mathbf{u},\mathbf{u})\ =\ \mathbf{f} \nonumber \\ \noalign{\vskip 2pt}
& +E^{-1,2}_{\tau}[\mathbf{u}'+{\cal A}\mathbf{u}+{\cal B}(\mathbf{u},\mathbf{u})-\mathbf{f}]. \label{4.7} \end{align} (Here, $[(I-E^{-1,2}_{\tau})\mathbf{u}]'$ and $\mathbf{u}'$ are the distributional derivatives with respect to $t$ of $(I-E^{-1,2}_{\tau})\mathbf{u}$ and $\mathbf{u}$, respectively, as functions from $(0,T)$ to $\bfW_{\tau}^{-1,2}(\Omega)$.) Let $\Omega_0\subset\subset\Omega$ be a non-empty domain. By Lemma \ref{L2.2}, there exist unique $p_1(t)$, $p_{21}(t)$, $p_{22}(t)$, $p_{23}(t)$ in $L^2_{loc}(\Omega)$ such that \begin{equation} \begin{array}{rl} \bigl\langle -E^{-1,2}_{\tau}\mathbf{u}(t),\mbox{\boldmath $\psi$}\bigr\rangle_{\tau}\ &=\ {\displaystyle -\int_{\Omega} p_1(t)\ \mathrm{div}\,\mbox{\boldmath $\psi$}\; \mathrm{d}\mathbf{x},} \\ [10pt]
\bigl\langle -E^{-1,2}_{\tau}{\cal A}\mathbf{u}(t),\mbox{\boldmath $\psi$} \bigr\rangle_{\tau}\ &=\ {\displaystyle -\int_{\Omega} p_{21}(t)\ \mathrm{div}\,\mbox{\boldmath $\psi$}\; \mathrm{d}\mathbf{x},} \\ [10pt]
\bigl\langle -E^{-1,2}_{\tau}{\cal B}(\mathbf{u}(t),\mathbf{u}(t)),\mbox{\boldmath $\psi$} \bigr\rangle_{\tau}\ &=\ {\displaystyle -\int_{\Omega} p_{22}(t)\ \mathrm{div}\,\mbox{\boldmath $\psi$}\; \mathrm{d}\mathbf{x},} \\ [10pt]
\bigl\langle -E^{-1,2}_{\tau}\mathbf{f}(t),\mbox{\boldmath $\psi$} \bigr\rangle_{\tau}\ &=\ {\displaystyle -\int_{\Omega} p_{23}(t)\ \mathrm{div}\,\mbox{\boldmath $\psi$}\; \mathrm{d}\mathbf{x}} \end{array} \label{4.8} \end{equation} for a.a.~$t\in(0,T)$ and all $\mbox{\boldmath $\psi$}\in\bfW_{\tau,\br c}^{1,2}(\Omega)$ and the inequalities \begin{equation} \begin{array}{lll}
\|p_1(t)\|_{2;\, \Omega_R}\ & \leq\ c(R)\,
\|E^{-1,2}_{\tau}\mathbf{u}(t)\|_{-1,2}\ & \leq\ c(R)\, \|\mathbf{u}(t)\|_{-1,2}, \\ [5pt]
\|p_{21}(t)\|_{2;\, \Omega_R}\ & \leq\ c(R)\,
\|E^{-1,2}_{\tau}{\cal A}\mathbf{u}(t)\|_{-1,2}\ & \leq\ c(R)\,
\|{\cal A}\mathbf{u}(t)\|_{-1,2}, \\ [5pt]
\|p_{22}(t)\|_{2;\, \Omega_R}\ & \leq\ c(R)\,
\|E^{-1,2}_{\tau}{\cal B}(\mathbf{u}(t),\mathbf{u}(t))\|_{-1,2}\ & \leq\ c(R)\,
\|{\cal B}(\mathbf{u}(t),\mathbf{u}(t))\|_{-1,2}, \\ [5pt]
\|p_{23}(t)\|_{2;\, \Omega_R}\ & \leq\ c(R)\,
\|E^{-1,2}_{\tau}\mathbf{f}(t)\|_{-1,2}\ & \leq\ c(R)\,
\|\mathbf{f}(t)\|_{-1,2} \end{array} \label{4.9} \end{equation} hold for all $R>0$ and a.a.~$t\in(0,T)$. Moreover, $\int_{\Omega_0}p_1(t)\; \mathrm{d}\mathbf{x}=\int_{\Omega_0}p_{2i}(t)\; \mathrm{d}\mathbf{x}=0$ ($i=1,2,3$) for a.a.~$t\in(0,T)$. Using the inequality
$\|\mathbf{u}(t)\|_{-1,2}\leq \|\mathbf{u}(t)\|_2$ and estimates (\ref{3.5}), we get \begin{equation} \begin{array}{ll} p_1\hspace{4.1pt}\in L^{\infty}(0,T;\, L^2(\Omega_R)), & p_{21}\in L^2(0,T;\, L^2(\Omega_R)), \\ [5pt] p_{22}\in L^{4/3} (0,T;\, L^2(\Omega_R)), \hbox to 10pt{} & p_{23}\in L^2(0,T;\, L^2(\Omega_R)) \end{array} \label{4.10} \end{equation} for all $R>0$.
For a.a.~$t\in(0,T)$, the functions $p_1(t)$ and $p_{21}(t)$ are harmonic in $\Omega$. This follows from the identities \begin{align*} \int_{\Omega} p_1(t)\, \Delta\phi\; \mathrm{d}\mathbf{x}\ &=\ -\bigl\langle \nabla p_1(t),\nabla\phi\bigr\rangle_{\tau}\ =\ \bigl\langle E^{-1,2}_{\tau}\mathbf{u}(t),\nabla\phi\bigr\rangle_{\tau}\ =\ \bigl\langle \mathbf{u}(t),E^{1,2}_{\tau}\hbox to 0.7pt{}\nabla\phi\bigr\rangle_{\tau} \\
&=\ \bigl\langle\mathbf{u}(t),\nabla\phi\bigr\rangle_{\tau}\ =\ \int_{\Omega} \mathbf{u}(t)\cdot\nabla\phi\; \mathrm{d}\mathbf{x}\ =\ 0 \quad \mbox{(for all $\phi\in C_0^{\infty}(\Omega)$).} \end{align*} (We have used (\ref{4.4}).) Hence, by Weyl's lemma, $p_1(t)$ is a harmonic function in $\Omega$. The fact that $p_{21}(t)$ is harmonic can be proved similarly.
Equation (\ref{4.7}) is an equation in $\bfW_{\tau}^{-1,2}(\Omega)$. Applying successively each term in (\ref{4.7}) to the function of the type $\mbox{\boldmath $\varphi$}(\mathbf{x})\, \eta(t)$, where $\mbox{\boldmath $\varphi$}\in\bfW_{\tau,\br c}(\Omega)$ and $\eta\in C^{\infty}_0(0,T)$, using formulas (\ref{4.8}), and denoting $p_2:=p_{21}+p_{22}+p_{23}$, we obtain \begin{gather*} \int_0^T \int_{\Omega} \bigl[ -\mathbf{u}\cdot\mbox{\boldmath $\varphi$}\, \eta'(t)+ \nu\hbox to 0.7pt{}\nabla\mathbf{u}:\nabla\mbox{\boldmath $\varphi$}\, \eta(t)+\mathbf{u}\cdot\nabla \mathbf{u}\cdot \mbox{\boldmath $\varphi$}\, \eta(t) \bigr]\; \mathrm{d}\mathbf{x}\, \mathrm{d} t+\int_0^T\int_{\Omega} \gamma\, \mathbf{u}\cdot\mbox{\boldmath $\varphi$}\, \eta(t)\; \mathrm{d} S\, \mathrm{d} t \\
=\ \int_0^T \langle\mathbf{f},\mbox{\boldmath $\varphi$}\rangle_{\tau}\, \eta(t)\; \mathrm{d} t-\int_0^T\int_{\Omega} p_1\ \mathrm{div}\,\mbox{\boldmath $\varphi$}\ \eta'(t)\; \mathrm{d}\mathbf{x}\, \mathrm{d} t+\int_0^T\int_{\Omega} p_2\ \mathrm{div}\, \mbox{\boldmath $\varphi$}\ \eta(t)\; \mathrm{d}\mathbf{x}\, \mathrm{d} t \end{gather*} for all functions $\mbox{\boldmath $\varphi$}\in\bfW_{\tau,\br c}^{1,2}(\Omega)$ and $\eta\in C^{\infty}_0((0,T))$. Since the set of all finite linear combinations of functions of the type $\mbox{\boldmath $\varphi$}(\mathbf{x})\, \eta(t)$, where $\mbox{\boldmath $\varphi$}\in\bfW_{\tau,\br c}^{1,2}(\Omega)$ and $\eta\in C^{\infty}_0((0,T))$, is dense in $C^{\infty}_0\bigl((0,T);\, \bfW_{\tau,\br c}^{1,2}(\Omega)\bigr)$ in the norm of $W^{1,2}_0(0,T;\, \bfW_{\tau}^{1,2}(\Omega))$, we also obtain the equation \begin{gather} \int_0^T \int_{\Omega} \bigl[ -\mathbf{u}\cdot\partial_t\mbox{\boldmath $\phi$}+ \nu\hbox to 0.7pt{}\nabla\mathbf{u}:\nabla\mbox{\boldmath $\phi$}+\mathbf{u}\cdot\nabla\mathbf{u}\cdot\mbox{\boldmath $\phi$} \bigr]\; \mathrm{d}\mathbf{x}\, \mathrm{d} t+\int_0^T\int_{\Omega} \gamma\, \mathbf{u}\cdot\mbox{\boldmath $\phi$}\; \mathrm{d} S\, \mathrm{d} t \nonumber \\
=\ \int_0^T \langle\mathbf{f},\mbox{\boldmath $\phi$}\rangle_{\tau}\; \mathrm{d} t-\int_0^T\int_{\Omega} p_1\ \mathrm{div}\,\partial_t\mbox{\boldmath $\phi$}\; \mathrm{d}\mathbf{x}\, \mathrm{d} t+\int_0^T\int_{\Omega} p_2\ \mathrm{div}\, \mbox{\boldmath $\phi$}\; \mathrm{d}\mathbf{x}\, \mathrm{d} t \label{4.5} \end{gather} for all $\mbox{\boldmath $\phi$}\in C^{\infty}_0\bigl((0,T);\, \bfW_{\tau,\br c}^{1,2}(\Omega)\bigr)$. Choosing particularly $\mbox{\boldmath $\phi$}\in\mathbf{C}^{\infty}_0(Q_T)$ and putting \begin{equation} p\ :=\ \partial_tp_1+p_2\ \equiv\ \partial_tp_1+p_{21}+p_{22}+ p_{23} \label{4.14} \end{equation} (where $\partial_tp_1$ is the derivative in the sense of distributions), we observe that $(\mathbf{u},p)$ is a distributional solution of the system (\ref{1.1}), (\ref{1.2}) in $Q_T$.
The next theorem summarizes the results of this subsection:
\begin{theorem} \label{T4.2} Let $T>0$ and $\Omega$ be a locally Lipschitz domain in ${\mathbb R}^3$, satisfying condition (i) from subsection 1.1. Let $\mathbf{u}$ be a weak solution to the Navier-Stokes IBVP (\ref{1.1})--(\ref{1.4}). Then there exists an associated pressure $p$ in the form (\ref{4.14}), where $p_1$, $p_{21}$, $p_{22}$, $p_{23}$ satisfy (\ref{4.8})--(\ref{4.10}). Moreover,
\begin{list}{} {\setlength{\topsep 2pt} \setlength{\itemsep 0pt} \setlength{\leftmargin 20pt} \setlength{\rightmargin 0pt} \setlength{\labelwidth 16pt}}
\item[1) ] if $\, \Omega_0\subset\subset\Omega$ then the functions $p_1(t),\, p_{21}(t),\, p_{22}(t),\, p_{32}(t)$ can be chosen so that they satisfy the additional conditions \begin{displaymath} \int_{\Omega_0}p_1(t)\; \mathrm{d}\mathbf{x}\ =\ \int_{\Omega_0}p_{21}(t)\; \mathrm{d}\mathbf{x}\ =\ \int_{\Omega_0}p_{23}(t)\; \mathrm{d}\mathbf{x}\ =\ \int_{\Omega_0}p_{23}(t)\; \mathrm{d}\mathbf{x}\ =\ 0, \end{displaymath}
\item[2) ] the functions $p_1(t)$ and $p_{21}(t)$ are harmonic in $\Omega$ for a.a.~$t\in(0,T)$,
\item[3) ] the functions $\mathbf{u}$, $p_1$ and $p_2\equiv p_{21}+p_{22}+p_{23}$ satisfy the integral equation (\ref{4.5}) for all test functions $\mbox{\boldmath $\phi$}\in C^{\infty}_0\bigl((0,T;\, \bfW_{\tau,\br c}^{1,2}(\Omega)\bigr)$.
\end{list} \end{theorem}
\noindent Note that if $\Omega$ is a bounded Lipschitz domain then the choice $\Omega_0=\Omega$ is also permitted in statement 1) of Theorem \ref{T4.2}.
\section{The case of a smooth bounded domain $\Omega$} \label{S5}
{\bf 5.1. Some results from paper \cite{AmEsGh}.} \ In this section, we assume that $\Omega$ is a bounded domain in ${\mathbb R}^3$ with the boundary of the class $C^2$. We denote by $A_q$ (for $1<q<\infty$) the linear operator in $\bfL_{\tau,\sigma}^q(\Omega)$ with the domain defined by the equation \begin{displaymath} A_q\mathbf{v}\ :=\ -\nu\, P_q\hbox to 0.7pt{}\Delta\mathbf{v} \end{displaymath} for $\mathbf{v}\in D(A_q)$, where \begin{displaymath} D(A_q)\ :=\ \bigl\{\mathbf{v}\in\mathbf{W}^{2,q}(\Omega)\cap\bfW_{\tau,\br\sigma}^{1,q}(\Omega);\ [\bbT_{\hspace{-0.5pt} \rm d}(\mathbf{v})\cdot\mathbf{n}]_{\tau}+\gamma\hbox to 0.7pt{}\mathbf{v}_{\tau}=\mathbf{0}\ \mbox{on}\ \partial\Omega \bigr\} \end{displaymath} is the domain of operator $A_q$. Recall that $\bbT_{\hspace{-0.5pt} \rm d}(\mathbf{v})\equiv 2\nu\hbox to 0.7pt{}{\mathbb D}(\mathbf{v})$ is the dynamic stress tensor, induced by the vector field $\mathbf{v}$, and $P_q$ is the Helmholtz projection in $\mathbf{L}^q(\Omega)$. Operator $A_q$ is usually called the {\it Stokes operator} in $\bfL_{\tau,\sigma}^q(\Omega)$. Particularly, if $q=2$ then $A_2$ coincides with the restriction of operator ${\cal A}$, defined in subsection 3.2, to $D(A_2)$. It is shown in the paper \cite{AmEsGh} by Ch.~Amrouche, M.~Escobedo and A.~Ghosh that $(-A_q)$ generates a bounded analytic semigroup $\mathrm{e}^{-A_q t}$ in $\bfL_{\tau,\sigma}^q(\Omega)$. The next lemma also comes from \cite{AmEsGh}, see \cite[Theorem 1.3]{AmEsGh}. It concerns the solution of the inhomogeneous non--steady Stokes problem, given by the equations \begin{equation} \partial_t\mathbf{u}+\nabla \pi\ =\ \nu\Delta\mathbf{u}+\mathbf{g} \label{5.1} \end{equation} and (\ref{1.2}) (in $Q_T$), by the boundary conditions (\ref{1.3}) and by the initial condition (\ref{1.4}). The initial velocity $\mathbf{u}_0$ is supposed to be from the space $\mathbf{E}_r^q(\Omega)$, which is defined to be the real interpolation space $[D(A_q),\, \bfL_{\tau,\sigma}^q(\Omega)]_{1/r,r}$. The problem (\ref{5.1}), (\ref{1.2})--(\ref{1.3}) can also be equivalently written in the form \begin{equation} \frac{\mathrm{d}\mathbf{u}}{\mathrm{d} t}+A_q\mathbf{u}\ =\ \mathbf{g}, \qquad \mathbf{u}(0)=\mathbf{u}_0, \label{5.1a} \end{equation} which is the initial--value problem in $\bfL_{\tau,\sigma}^q(\Omega)$. Although the pressure $\pi$ does not explicitly appear in (\ref{5.1a}), it can be always reconstructed in the way described in section \ref{S4}.) The lemma says:
\begin{lemma} \label{L5.1} Let $r,q\in(1,\infty)$, $T>0$, $\mathbf{g}\in L^r(0,T;\, \bfL_{\tau,\sigma}^q(\Omega))$ and $\mathbf{u}_0\in\mathbf{E}_r^q(\Omega)$. Then the Stokes problem (\ref{5.1}), (\ref{1.2}), (\ref{1.3}), (\ref{1.4}) has a unique solution $(\mathbf{u},\pi)$ in $\bigl[ W^{1,r}(0,T;\, \bfL_{\tau,\sigma}^q(\Omega))\cap L^r(0,T;\, \mathbf{W}^{2,q}(\Omega)) \bigr] \times L^r(0,T;\, W^{1,q}(\Omega)/{\mathbb R})$. The solution satisfies the estimate \begin{equation}
\int_0^T \|\partial_t\mathbf{u}\|_q^r\; \mathrm{d} t+\int_0^T\|\mathbf{u}\|_{2,q}^r\; \mathrm{d} t+\int_0^T\|\pi\|_{1,q}^r\; \mathrm{d} t\ \leq\ C\, \biggl( \int_0^T\|\mathbf{g}\|_q^r\; \mathrm{d} t+
\|\mathbf{u}_0\|_{\mathbf{E}_r^q(\Omega)}^r \biggr). \label{5.2} \end{equation} \end{lemma}
The proof is based on a more general theorem from the paper \cite{GiSo} by Y.~Giga and H.~Sohr.
\noindent {\bf 5.2. Application of Lemma \ref{L5.1}.} \ If $\mathbf{u}$ is a weak solution to the problem (\ref{1.1})--(\ref{1.4}) then, since $\mathbf{u}\in L^{\infty}(0,T;\ \bfL_{\tau,\sigma}^2(\Omega)) \cap L^2(0,T;\ \bfW_{\tau,\br\sigma}^{1,2}(\Omega))$, one can verify that $\mathbf{u}\cdot\nabla\mathbf{u}\in L^r(0,T;\, \mathbf{L}^q(\Omega))$ for all $1\leq r\leq 2$, $1\leq q\leq\frac{3}{2}$, satisfying $2/r+3/q=4$. In order to be consistent with the assumptions of Lemma \ref{L5.1} regarding $q$ and $r$, assume that $1<q<\frac{3}{2}$, $1<r<2$ and $2/r+3/q=4$. Furthermore, assume that $\mathbf{u}_0\in\mathbf{E}^q_r(\Omega)\cap\bfL_{\tau,\sigma}^2(\Omega)$ and function $\mathbf{f}$ on the right hand side of equation (\ref{1.1}) is in $L^r(0,T;\, \mathbf{L}^q(\Omega)) \cap L^2(0,T;\, \bfW_{\tau}^{-1,2} (\Omega))$. Put $\mathbf{g}:=P_q\mathbf{f}-P_q(\mathbf{u}\cdot\nabla\mathbf{u})$. Then, due to the boundedness of projection $P_q$ in $\mathbf{L}^q(\Omega)$, $\mathbf{g}\in L^r(0,T;\, \bfL_{\tau,\sigma}^q(\Omega))$. Assume, moreover, that $\mathbf{u}_0\in\mathbf{E}_r^q(\Omega)$. Now, we are in a position that we can apply Lemma \ref{L5.1} and deduce that the linear Stokes problem (\ref{5.1}), (\ref{1.2})--(\ref{1.4}) has a unique solution $(\mathbf{U},\pi)\in \bigl[ W^{1,r}(0,T;\, \bfL_{\tau,\sigma}^q(\Omega))\cap L^r(0,T;\, \mathbf{W}^{2,q}(\Omega)) \bigr] \times L^r(0,T;\, W^{1,q}(\Omega)/{\mathbb R})$, satisfying estimate (\ref{5.2}) with $\mathbf{U}$ instead of $\mathbf{u}$. In order to show that the weak solution $\mathbf{u}$ of the nonlinear Navier--Stokes problem (\ref{1.1})--(\ref{1.4}) satisfies the same estimate, too, we need to identify $\mathbf{u}$ with $\mathbf{U}$.
\noindent {\bf 5.3. The identification of $\mathbf{U}$ and $\mathbf{u}$.} \ It is not obvious at the first sight that $\mathbf{U}=\mathbf{u}$, because while $\mathbf{U}$ is a unique solution of the problem (\ref{5.1}), (\ref{1.2})--(\ref{1.4}) in the class $W^{1,r}(0,T;\, \bfL_{\tau,\sigma}^q(\Omega))\cap L^r(0,T;\, \mathbf{W}^{2,q}(\Omega))$, $\mathbf{u}$ is only known to be in $L^{\infty}(0,T;\ \bfL_{\tau,\sigma}^2(\Omega)) \cap L^2(0,T;\ \bfW_{\tau,\br\sigma}^{1,2}(\Omega))$. Nevertheless, applying the so called Yosida approximation of the identity operator in $\bfL_{\tau,\sigma}^q(\Omega)$, defined by the formula $J^{(k)}_q:=(I+k^{-1}A_q)^{-1}$ (for $k\in{\mathbb N}$), in the same spirit as in \cite{GiSo} or \cite{SoWa}, the equality $\mathbf{U}=\mathbf{u}$ can be established. We explain the main steps of the procedure in greater detail in the rest of this subsection.
At first, one can deduce from \cite[Section 3]{AmEsGh} that the spectrum of $A_q$ is a subset of the interval $(0,\infty)$ on the real axis, which implies that $J^{(k)}_q$ is a bounded operator on $\bfL_{\tau,\sigma}^q(\Omega)$ with values in $D(A_q)$. Obviously, $J^{(k)}_q$ commutes with $A_q$ and with $J^{(m)}_q$ (for $k,m\in{\mathbb N}$, $k\not=m$) and $J^{(k)}_q=J^{(k)}_s$ on $\bfL_{\tau,\sigma}^q(\Omega)\cap \bfL_{\tau,\sigma}^s(\Omega)$ (for $1<s<\infty$). If $q=2$ then $A_2$ is a positive selfadjoint operator in $\bfL_{\tau,\sigma}^2$, see \cite{BdV}. Consequently, $J^{(k)}_2$ is a selfadjoint operator in $\bfL_{\tau,\sigma}^2(\Omega)$, too. Finally, it is proven in \cite[p.~246]{Yo} that $J^{(k)}_q\mathbf{v}\to\mathbf{v}$ strongly in $\bfL_{\tau,\sigma}^q(\Omega)$ for all $\mathbf{v}\in\bfL_{\tau,\sigma}^q(\Omega)$ and $k\to\infty$.
Consider (\ref{3.1}) with $\mbox{\boldmath $\phi$}(\mathbf{x},t)=[J^{(k)}_q\mathbf{w}](\mathbf{x})\, \vartheta(t)$, where $k\in{\mathbb N}$, $\mathbf{w}\in\bfC^{\infty}_{0,\sigma}(\Omega)$ and $\vartheta\in C^{\infty}_0\bigl([0,T)\bigr)$. In this case, (\ref{3.1}) yields \begin{align} \int_0^T \int_{\Omega}\bigl[-\mathbf{u} & \cdot J^{(k)}_q\mathbf{w}\, \vartheta'+ (\mathbf{u}\cdot\nabla\mathbf{u})\cdot J^{(k)}_q\mathbf{w}\, \vartheta+ 2\nu\hbox to 0.7pt{}(\nabla\mathbf{u})_s:(\nabla J^{(k)}_q\mathbf{w})_s\bigr]\, \vartheta\; \mathrm{d}\mathbf{x}\, \mathrm{d} t \nonumber \\ \noalign{\vskip-4pt}
& \hspace{14pt} +\int_0^T\int_{\partial\Omega}\gamma\hbox to 0.7pt{}\mathbf{u}\cdot J^{(k)}_q\mathbf{w}\, \vartheta\; \mathrm{d} S\, \mathrm{d} t \nonumber \\ \noalign{\vskip 2pt}
&=\ \int_0^T\int_{\Omega}\mathbf{f}\cdot J^{(k)}_q\mathbf{w}\, \vartheta\; \mathrm{d}\mathbf{x}\, \mathrm{d} t+ \int_{\Omega}\mathbf{u}_0\cdot J^{(k)}_q\mathbf{w}\, \vartheta(0)\, \mathrm{d}\mathbf{x}. \label{5.3} \end{align} The integral of $(\mathbf{u}\cdot\nabla\mathbf{u})\cdot J^{(k)}_q\mathbf{w}$ in $\Omega$ can be rewritten as follows: \begin{align*} \int_{\Omega}(\mathbf{u} & \cdot\nabla\mathbf{u})\cdot J^{(k)}_q\mathbf{w}\; \mathrm{d}\mathbf{x} = \int_{\Omega}P_q(\mathbf{u}\cdot\nabla\mathbf{u})\cdot J^{(k)}_q\mathbf{w}\; \mathrm{d}\mathbf{x} = \int_{\Omega} P_q(\mathbf{u}\cdot\nabla\mathbf{u})\cdot J^{(k)}_2\mathbf{w}\; \mathrm{d}\mathbf{x} \\
&= \lim_{m\to\infty}\ \int_{\Omega}J^{(m)}_q P_q(\mathbf{u}\cdot\nabla\mathbf{u})\cdot J^{(k)}_2\mathbf{w}\; \mathrm{d}\mathbf{x} = \lim_{m\to\infty}\ \int_{\Omega}J^{(k)}_2J^{(m)}_q P_q(\mathbf{u}\cdot\nabla\mathbf{u})\cdot \mathbf{w}\; \mathrm{d}\mathbf{x} \\
&= \lim_{m\to\infty}\ \int_{\Omega}J^{(k)}_qJ^{(m)}_q P_q(\mathbf{u}\cdot\nabla\mathbf{u})\cdot \mathbf{w}\; \mathrm{d}\mathbf{x} = \lim_{m\to\infty}\ \int_{\Omega}J^{(m)}_qJ^{(k)}_q P_q(\mathbf{u}\cdot\nabla\mathbf{u})\cdot \mathbf{w}\; \mathrm{d}\mathbf{x} \\
&= \int_{\Omega} J^{(k)}_q P_q(\mathbf{u}\cdot\nabla\mathbf{u})\cdot \mathbf{w}\; \mathrm{d}\mathbf{x}. \end{align*} This shows, except others, that the integrals of $\mathbf{v}_1\cdot J^{(k)}_q\mathbf{v}_2$ and $J^{(k)}_q\mathbf{v}_1\cdot\mathbf{v}_2$ in $\Omega$ are equal for $\mathbf{v}_1,\, \mathbf{v}_2\in\bfL_{\tau,\sigma}^q(\Omega)$. The integrals of $2\nu\hbox to 0.7pt{}(\nabla\mathbf{u})_s:(\nabla J^{(k)}_q\mathbf{w})_s$ and $\gamma\hbox to 0.7pt{}\mathbf{u}\cdot J^{(k)}_q\mathbf{w}$ over $\Omega$ and $\partial\Omega$, respectively, can be modified by means of the identities: \begin{align*} \int_{\Omega} & 2\nu\hbox to 0.7pt{}(\nabla\mathbf{u})_s:(\nabla J^{(k)}_q\mathbf{w})_s\; \mathrm{d}\mathbf{x}+\int_{\partial\Omega}\gamma\mathbf{u}\cdot J^{(k)}_q\mathbf{w}\; \mathrm{d} S \\
&=\ \int_{\Omega}2\nu\hbox to 0.7pt{}\nabla\mathbf{u} : (\nabla J^{(k)}_q\mathbf{w})_s\; \mathrm{d}\mathbf{x}+\int_{\partial\Omega}\gamma\mathbf{u}\cdot J^{(k)}_q\mathbf{w}\; \mathrm{d} S \\
&=\ \int_{\partial\Omega}2\nu\hbox to 0.7pt{}\mathbf{u}\cdot[(\nabla J^{(k)}_q\mathbf{w})_s\cdot\mathbf{n}]\; \mathrm{d} S-\int_{\Omega}\nu\hbox to 0.7pt{}\mathbf{u}\cdot \Delta J^{(k)}_q\mathbf{w}\; \mathrm{d}\mathbf{x}+\int_{\partial\Omega}\gamma \mathbf{u}\cdot J^{(k)}_q\mathbf{w}\; \mathrm{d} S \\
&=\ -\int_{\Omega}\nu\hbox to 0.7pt{}\mathbf{u}\cdot \Delta J^{(k)}_q\mathbf{w}\; \mathrm{d}\mathbf{x}\ =\ \int_{\Omega}\mathbf{u}\cdot A_q J^{(k)}_q\mathbf{w}\; \mathrm{d}\mathbf{x}\ =\ \int_{\Omega}A_q\mathbf{u}\cdot J^{(k)}_q\mathbf{w}\; \mathrm{d}\mathbf{x} \\
&=\ -\int_{\Omega}J^{(k)} A_q\mathbf{u}\cdot\mathbf{w}\; \mathrm{d}\mathbf{x}\ =\ -\int_{\Omega}A_q J^{(k)}\mathbf{u}\cdot\mathbf{w}\; \mathrm{d}\mathbf{x}. \end{align*} Thus, we obtain from (\ref{5.3}): \begin{align*} \int_0^T \int_{\Omega}\bigl[-J^{(k)}_q\mathbf{u} & \cdot\mathbf{w}\, \vartheta'+J^{(k)}_q P_q(\mathbf{u}\cdot\nabla\mathbf{u})\cdot\mathbf{w}\, \vartheta-\nu A_q J^{(k)}_q\mathbf{u}\cdot\mathbf{w} \bigr]\, \vartheta\; \mathrm{d}\mathbf{x}\, \mathrm{d} t \nonumber \\ \noalign{\vskip 0pt}
&=\ \int_0^T\int_{\Omega} J^{(k)}_q\mathbf{f}\cdot\mathbf{w}\, \vartheta\; \mathrm{d}\mathbf{x}\, \mathrm{d} t+\int_{\Omega} J^{(k)}_q\mathbf{u}_0\cdot\mathbf{w}\, \vartheta(0)\, \mathrm{d}\mathbf{x}. \end{align*} As $\mathbf{w}$ and $\vartheta$ are arbitrary functions from $\bfC^{\infty}_{0,\sigma}(\Omega)$ and $C^{\infty}_0\bigl([0,T)\bigr)$, respectively, this shows that $J^{(k)}_q\mathbf{u}$ is a solution of the initial--value problem \begin{equation} (J^{(k)}_q\mathbf{u})'+A_q J^{(k)}_q\mathbf{u}\ =\ J^{(k)}_q\mathbf{g}, \qquad J^{(k)}_q\mathbf{u}(\, .\, ,0)=J^{(k)}_q\mathbf{u}_0 \label{5.4} \end{equation} (which is a problem in $\bfL_{\tau,\sigma}^q(\Omega)$) in the class $W^{1,r}(0,T;\, \bfL_{\tau,\sigma}^q(\Omega))\cap L^r(0,T;\, \mathbf{W}^{2,q}(\Omega))$. Since $J^{(k)}_q\mathbf{U}$ solves the same problem and belongs to the same class, we obtain the identity $J^{(k)}_q\mathbf{U}(t)=J^{(k)}_q\mathbf{u}(t)$ for a.a.~$t\in(0,T)$. Consequently, $\mathbf{U}(t)=\mathbf{u}(t)$ for a.a.~$t\in(0,T)$.
\noindent {\bf 5.4. The estimate of $\mathbf{u}$ and an associated pressure $p$.} \ Since $\mathbf{g}=P_q\mathbf{f}-P_q(\mathbf{u}\cdot\nabla\mathbf{u})$, we can also write equation (\ref{5.1}) in the form \begin{align*} \partial_t\mathbf{u}+\mathbf{u}\cdot\nabla\mathbf{u}\ &=\ -\nabla\pi+\nu\Delta\mathbf{u}+ \mathbf{f}+(I-P_q)(-\mathbf{f}+\mathbf{u}\cdot\nabla\mathbf{u}) \\
&=\ -\nabla(\pi+\zeta)+\nu\Delta\mathbf{u}+\mathbf{f}, \end{align*} where $\nabla\zeta=(I-P_q)(\mathbf{u}\cdot\nabla\mathbf{u}-\mathbf{f})$. (The fact that $(I-P_q)(\mathbf{u}\cdot\nabla\mathbf{u}-\mathbf{f})$ can be expressed in the form $\nabla\zeta$ follows e.g.~from \cite[section III.1]{Ga1}.) We observe that $p:=\pi+\zeta$ is a pressure, associated with the weak solution $\mathbf{u}$. Since the pair $(\mathbf{U},\pi)$ satisfies (\ref{5.2}), $\mathbf{u}$ and $p$ satisfy the analogous estimate \begin{align}
\int_0^T & \|\partial_t\mathbf{u}\|_q^r\; \mathrm{d} t+\int_0^T
\|\mathbf{u}\|_{2,q}^r\; \mathrm{d} t+\int_0^T\|p\|_{1,q}^r\; \mathrm{d} t \nonumber \\
&\leq\ C\int_0^T\bigl( \|\mathbf{f}\|_q^r+\| P_q(\mathbf{u}\cdot
\nabla\mathbf{u})\|_q^r \bigr)\; \mathrm{d} t+C\,
\|\mathbf{u}_0\|_{\mathbf{E}_r^q(\Omega)}^r. \label{5.5} \end{align} We have proven the theorem:
\begin{theorem} \label{T5.1} Let $\Omega$ be a bounded domain in ${\mathbb R}^3$ with the boundary of the class $C^2$ and $T>0$. Let $1<q<\frac{3}{2}$, $1<r<2$, $2/r+3/q=4$, $\mathbf{u}_0\in\mathbf{E}_r^q(\Omega)\cap \bfL_{\tau,\sigma}^2(\Omega)$ and $\mathbf{f}\in L^r(0,T;\, \mathbf{L}^q(\Omega))\cap L^2(0,T;\, \mathbf{L}^2(\Omega))$. Let $\mathbf{u}$ be a weak solution to the Navier-Stokes IBVP (\ref{1.1})--(\ref{1.4}) and $p$ be an associated pressure. Then $\mathbf{u}\in L^r(0,T;\ \mathbf{W}^{2,q}(\Omega)) \cap W^{1,r}(0,T;\, \mathbf{L}^q(\Omega))$ and $p$ can be identified with a function from $L^r(0,T;\, L^{3q/(3-q)}(\Omega))$. The functions $\mathbf{u}$, $p$ satisfy equations (\ref{1.1}), (\ref{1.2}) a.e.~in $Q_T$ and the boundary conditions (\ref{1.3}) a.e.~in $\Gamma_T$. Moreover, they also satisfy estimate (\ref{5.5}). \end{theorem}
\section{An interior regularity of the associated pressure} \label{S6}
{\bf 6.1. On previous results on the interior regularity of velocity and pressure.} \ The next lemma recalls the well known Serrin's result on the interior regularity of weak solutions to the system (\ref{1.1}), (\ref{1.2}). (See e.g.~\cite{Oh}, \cite{Se} or \cite{Ga2}.) It concerns weak solutions in $\Omega_1\times(t_1,t_2)$, where $\Omega_1$ is a sub-domain of $\Omega$, independently of boundary conditions on $\\Gamma_T$.
\begin{lemma} \label{L6.1} Let $\Omega_1$ be a sub-domain of $\Omega$, $0\leq t_1<t_2\leq T$ and let $\mathbf{u}$ be a weak solution to the system (\ref{1.1}), (\ref{1.2}) with $\mathbf{f}=\mathbf{0}$ in $\Omega_1\times(t_1,t_2)$. Let $\mathbf{u}\in L^r(t_1,t_2;\, \mathbf{L}^s(\Omega_1))$, where $r\in[2,\infty)$, $s\in(3,\infty]$ and $2/r+3/s=1$. Then, if $\Omega_2\subset\subset\Omega_1$ and $0<2\epsilon<t_2-t_1$, solution $\mathbf{u}$ has all spatial derivatives (of all orders) bounded in $\Omega_2\times(t_1+\epsilon,t_2-\epsilon)$. \end{lemma}
Note that Lemma \ref{L6.1} uses no assumptions on boundary conditions, satisfied by $\mathbf{u}$ on $\partial\Omega\times(0,T)$. The assumption that $\mathbf{u}$ is a weak solution to the system (\ref{1.1}), (\ref{1.2}) in $\Omega_1\times(t_1,t_2)$ means that $\mathbf{u}\in L^{\infty}(t_1,t_2;\, \mathbf{L}^{\infty}(\Omega_1))\cap L^2(t_1,t_2;\, \mathbf{W}^{1,2}(\Omega_1))$, $\mathrm{div}\,\mathbf{u}=0$ holds in the sense of distributions in $\Omega_1\times(t_1,t_2)$ and $\mathbf{u}$ satisfies (\ref{3.1}) for all infinitely differentiable divergence--free test functions $\mbox{\boldmath $\phi$}$ that have a compact support in $\Omega_1\times(t_1,t_2)$. (Then the last integral on the left hand side and both integrals on the right hand side are equal to zero.) Also note that applying the results of \cite{Sereg}, one can add to the conclusions of Lemma \ref{L6.1} that $\mathbf{u}$ is H\"older--continuous in $\Omega_2\times(t_1+\epsilon,t_2-\epsilon)$. Lemma \ref{L6.1} provides no information on the associated pressure $p$ or the time derivative $\partial_t\mathbf{u}$ in $\Omega_2\times(t_1+\epsilon,t_2- \epsilon)$. The known results on the regularity of $\mathbf{u}$ and $\partial_t$ in $\Omega_2\times(t_1+\epsilon,t_2-\epsilon)$, under the assumptions that $\mathbf{u}$ is a weak solution of (\ref{1.1}), (\ref{1.2}) in $\Omega\times(t_1,t_2)$ satisfying the conditions formulated in Lemma \ref{6.1} in $\Omega_1\times(t_1,t_2)$, say:
\begin{list}{} {\setlength{\topsep 4pt} \setlength{\itemsep 2pt} \setlength{\leftmargin 17pt} \setlength{\rightmargin 0pt} \setlength{\labelwidth 8pt}}
\item[a)] If $\Omega={\mathbb R}^3$ then $p$, $\partial_t\mathbf{u}$ and all their spatial derivatives (of all orders) are in $L^{\infty}(\Omega_2 \times(t_1+\epsilon,t_2-\epsilon)$, see \cite{Ne1}, \cite{Ne2} \cite{SkaKu}.
\item[b)] If $\Omega$ is a bounded or exterior domain ${\mathbb R}^3$ with the boundary of the class $C^{2+(h)}$ for some $h>0$ and $\mathbf{u}$ satisfies the no--slip boundary condition $\mathbf{u}=\mathbf{0}$ on $\partial\Omega\times(0,T)$ then $p$ and $\partial_t\mathbf{u}$ have all spatial derivatives (of all orders) in $L^q(t_1+\epsilon,t_2-\epsilon;\, L^{\infty}(\Omega_2))$ for any $q\in(1,2)$, see \cite{NePe1}, \cite{Ne1}, \cite{Ne2} or \cite{SkaKu}.
\item[c)] If $\Omega$ is a bounded domain ${\mathbb R}^3$ with the boundary of the class $C^{2+(h)}$ for some $h>0$ and $\mathbf{u}$ satisfies the Navier--type boundary conditions \begin{displaymath} \mathbf{u}\cdot\mathbf{n}=0, \qquad \mathbf{curl}\, \, \mathbf{u}\times\mathbf{n}=\mathbf{0} \qquad \mbox{on}\ \partial\Omega\times(t_1,t_2) \end{displaymath} then $p$ and $\partial_t\mathbf{u}$ have the same regularity in $\Omega_2\times(t_1+\epsilon,t_2-\epsilon)$ as stated in item a), see \cite{NeAlB}.
\end{list}
\noindent In the proofs, it is always sufficient to show that the aforementioned statements hold for $p$. The same statements on $\partial_t\mathbf{u}$ follow from the fact that $\nabla p$ and $\partial_t\mathbf{u}$ are interconnected through the Navier--Stokes equation (\ref{1.1}).
\noindent {\bf 6.2. An interior regularity of $p$ in case of Navier's boundary conditions.} \ We further assume that $\Omega$ and $T$ are as in Theorem \ref{T5.1} and $\mathbf{f}=\mathbf{0}$. The main result of this section says:
\begin{theorem} \label{T6.1} Let $\Omega$ and $T$ be as in Theorem \ref{T5.1} and $\mathbf{f}=\mathbf{0}$. Let $\mathbf{u}$ be a weak solution to the problem (\ref{1.1})--(\ref{1.4}). Let $\Omega_1$ be a sub-domain of $\Omega$, $0<t_1<t_2\leq T$ and let $\mathbf{u}\in L^r(t_1,t_2;\, \mathbf{L}^s(\Omega_1))$, where $r\in[2,\infty)$, $s\in(3,\infty]$ and $2/r+3/s=1$. Finally, let $\Omega_3\subset\subset\Omega_1$ and $0<\epsilon<t_2-t_1$. Then $p$ can be chosen so that all its spatial derivatives (of all orders) are in $L^4(t_1+\epsilon,t_2-\epsilon;\, L^{\infty}(\Omega_3))$. Similarly, $\partial_t\mathbf{u}$ and all its spatial derivatives (of all orders) are in $L^4(t_1+\epsilon,t_2-\epsilon;\, \mathbf{L}^{\infty}(\Omega_3))$. \end{theorem}
\begin{proof} There exists $t_*\in(0,t_1)$ such that $\mathbf{u}(\, .\, ,t_*)\in\bfW_{\tau,\br\sigma}^{1,2}(\Omega)\subset\mathbf{E}_r^q(\Omega)$ for all $r$ and $q$, considered in Theorem \ref{T5.1}. Hence $\mathbf{u}\in L^r(t_*,T;\ \mathbf{W}^{2,q}(\Omega))\cap W^{1,r}(t_*,T;\, \mathbf{L}^q(\Omega))$ and $p$ can be chosen so that $p\in L^r(t_*,T;\, L^{3q/(3-q)}(\Omega))$. Let $\epsilon$ and $\Omega_2$ be the number and domain, respectively, given by Lemma \ref{L6.1}. We may assume that $\Omega_2$ and $\Omega_3$ are chosen so that $\emptyset\not=\Omega_3\subset\subset\Omega_2\subset\subset\Omega$.
Applying the operator of divergence to equation (\ref{1.1}), we obtain the equation \begin{equation} \Delta p\ =\ -\nabla\mathbf{u}:(\nabla\mathbf{u})^T, \label{6.1} \end{equation} which holds in the sense of of distributions in $Q_T$. Taking into account that $p$ is at least locally integrable in $\Omega_1\times(t_1,t_2)$, we obtain from (\ref{6.1}) that \begin{displaymath} \int_{t_1}^{t_2}\theta(t)\int_{\Omega_1} \bigl[ p\, \Delta\varphi(\mathbf{x})+\nabla\mathbf{u}:(\nabla\mathbf{u})^T\, \varphi(\mathbf{x})\bigr]\; \mathrm{d}\mathbf{x}\, \mathrm{d} t\ =\ 0 \end{displaymath} for all $\theta\in C^{\infty}_0((t_1,t_2))$ and $\varphi\in C^{\infty}_0(\Omega_1)$. From this, we deduce that equation (\ref{6.1}) holds in $\Omega_1$ in the sense of distributions at a.a.~fixed time instants $t\in(t_1+\epsilon,t_2-\epsilon)$. Let further $t$ be one of these time instants and let $t$ be also chosen so that $\mathbf{u}(\, .\, ,t)\in\mathbf{W}^{2,q}(\Omega)$, $\partial_t\mathbf{u}(\, .\, ,t)\in\mathbf{L}^q(\Omega)$ and $p(\, .\, ,t)\in L^{3q/(3-q)}(\Omega)$. As $p(\, .\, ,t)\in L^1_{loc}(\Omega_1)$ and the right hand side of (\ref{6.1}) (at the fixed time $t$) is infinitely differentiable in the spatial variable in $\Omega_2$, the function $p(\, .\, ,t)$ is also infinitely differentiable in $\Omega_2$, see e.g.~\cite{FraFio}.
Let $\mathbf{x}_0\in\Omega_3$ and $0<\rho_1<\rho_2$ be so small that $B_{\rho_2}(\mathbf{x}_0)\subset\Omega_2$. Define an infinitely differentiable non-increasing cut--off function $\eta$ in $[0,\infty)$ by the formula \begin{displaymath} \eta(\sigma)\ \left\{ \begin{array}{ll} =1 & \mbox{for}\ 0\leq\sigma\leq\rho_1, \\ [1pt] \in(0,1) & \mbox{for}\ \rho_1<\sigma<\rho_2, \\ [1pt] =0 & \mbox{for}\ \rho_2\leq\sigma. \end{array} \right. \end{displaymath} Let $\mathbf{x}\in B_{\rho_1}(\mathbf{x}_0)$ and $\mathbf{e}$ be a constant unit vector in ${\mathbb R}^3$. Then \begin{align*} \nabla_{\!\mathbf{x}}\hbox to 0.7pt{} p(\mathbf{x},t)\cdot\mathbf{e}\ &=\
\eta\bigl(|\mathbf{x}-\mathbf{x}_0|\bigr)\, \nabla_{\!\mathbf{x}}\hbox to 0.7pt{} p(\mathbf{x},t)\cdot\mathbf{e} \\
&=\ -\frac{1}{4\pi}\int_{{\mathbb R}^3} \frac{1}{|\mathbf{y}-\mathbf{x}|}\
\Delta_{\mathbf{y}}\bigl[ \eta\bigl(|\mathbf{y}-\mathbf{x}_0|\bigr)\, \nabla_{\!\mathbf{y}}\hbox to 0.7pt{} p(\mathbf{y},t)\cdot\mathbf{e} \bigr]\; \mathrm{d}\mathbf{y}. \end{align*} Particularly, this also holds for $\mathbf{x}=\mathbf{x}_0$: \begin{align} \nabla_{\!\mathbf{x}}\hbox to 0.7pt{} p(\mathbf{x},t)\cdot\mathbf{e}\,
\bigl|_{\mathbf{x}=\mathbf{x}_0}\bigr.\ &=\ -\frac{1}{4\pi}\int_{{\mathbb R}^3}
\frac{1}{|\mathbf{y}-\mathbf{x}_0|}\ \Delta_{\mathbf{y}}\bigl[
\eta\bigl(|\mathbf{y}-\mathbf{x}_0|\bigr)\, \nabla_{\!\mathbf{y}}\hbox to 0.7pt{} p(\mathbf{y},t)\cdot\mathbf{e} \bigr]\; \mathrm{d}\mathbf{y} \nonumber \\
&=\ -\frac{1}{4\pi}\int_{{\mathbb R}^3} \frac{1}{|\mathbf{y}|}\
\Delta_{\mathbf{y}}\bigl[ \eta\bigl(|\mathbf{y}|\bigr)\, \nabla_{\!\mathbf{y}}\hbox to 0.7pt{} p(\mathbf{x}_0+\mathbf{y},t)\cdot\mathbf{e} \bigr]\; \mathrm{d}\mathbf{y} \nonumber \\
&=\ -\frac{1}{4\pi}\, \bigl[ P^{(1)}(\mathbf{x}_0)+2P^{(2)}(\mathbf{x}_0)+P^{(3)}(\mathbf{x}_0) \bigr], \label{6.3} \end{align} where \begin{align*}
P^{(1)}(\mathbf{x}_0)\ &=\ \int_{B_{\rho_2}(\mathbf{0})} \frac{1}{|\mathbf{y}|}\
\Delta_{\mathbf{y}}\eta\bigl(|\mathbf{y}|\bigr)\, \bigl[ \nabla_{\!\mathbf{y}}\hbox to 0.7pt{} p(\mathbf{x}_0+\mathbf{y},t)\cdot\mathbf{e} \bigr]\; \mathrm{d}\mathbf{y}, \\
P^{(2)}(\mathbf{x}_0)\ &=\ \int_{B_{\rho_2}(\mathbf{0})} \frac{1}{|\mathbf{y}|}\
\nabla_{\!\mathbf{y}}\hbox to 0.7pt{}\eta\bigl(|\mathbf{y}|\bigr)\cdot\nabla_{\!\mathbf{y}}\hbox to 0.7pt{} \bigl[ \nabla_{\!\mathbf{y}}\hbox to 0.7pt{} p(\mathbf{x}_0+\mathbf{y},t)\cdot\mathbf{e} \bigr]\; \mathrm{d}\mathbf{y}, \\
P^{(3)}(\mathbf{x}_0)\ &=\ \int_{B_{\rho_2}(\mathbf{0})}
\frac{\eta\bigl(|\mathbf{y}|\bigr)}{|\mathbf{y}|}\ \Delta_{\mathbf{y}}\hbox to 0.7pt{}\bigl[ \nabla_{\!\mathbf{y}}\hbox to 0.7pt{} p(\mathbf{x}_0+\mathbf{y},t)\cdot\mathbf{e} \bigr]\; \mathrm{d}\mathbf{y}. \end{align*}
\noindent {\it The estimate of $P^{(3)}(\mathbf{x}_0)$.} \ The estimate of the last term is easy: \begin{align}
\bigl| P^{(3)}(\mathbf{x}_0) \bigr|\ &=\ \biggl| \int_{B_{\rho_2}(\mathbf{0})} \Bigl( \nabla_{\!\mathbf{y}}\,
\frac{\eta\bigl(|\mathbf{y}|\bigr)}{|\mathbf{y}|}\cdot\mathbf{e}
\Bigr)\, \Delta_{\mathbf{y}} p(\mathbf{x}_0+\mathbf{y},t)\; \mathrm{d}\mathbf{y} \biggr| \nonumber \\
&=\ \biggl| \int_{B_{\rho_2}(\mathbf{0})} \Bigl( \nabla_{\!\mathbf{y}}\,
\frac{\eta\bigl(|\mathbf{y}|\bigr)}{|\mathbf{y}|}\cdot\mathbf{e} \Bigr)\, \bigl[ \nabla_{\!\mathbf{y}}\hbox to 0.7pt{}\mathbf{u}(\mathbf{x}_0+\mathbf{y},t):\bigl( \nabla_{\!\mathbf{y}}\hbox to 0.7pt{}\mathbf{u}(\mathbf{x}_0+
\mathbf{y},t)\bigr)^T\bigr]\; \mathrm{d}\mathbf{y} \biggr| \nonumber \\
&\leq\ c\, \int_{B_{\rho_2}(\mathbf{0})} \Bigl| \nabla_{\!\mathbf{y}}\,
\frac{\eta\bigl(|\mathbf{y}|\bigr)}{|\mathbf{y}|}\cdot\mathbf{e} \Bigr|\; \mathrm{d}\mathbf{y}\ \leq\ c. \label{6.4} \end{align}
\noindent {\it The estimate of $P^{(2)}(\mathbf{x}_0)$.} \ We can write \begin{displaymath}
\frac{1}{|\mathbf{y}|}\ \nabla_{\!\mathbf{y}}\hbox to 0.7pt{}\eta\bigl(|\mathbf{y}|\bigr)\ =\
\nabla_{\!\mathbf{y}}\hbox to 0.7pt{}{\cal F}\bigl(|\mathbf{y}|\bigr), \end{displaymath} where ${\cal F}(s):=-\int_s^{\infty} \eta'(\sigma)/\sigma\; \mathrm{d}\sigma$ for $s\geq 0$. We observe that ${\cal F}$ is constant on $[0,\rho_1]$, equal to zero on $[\rho_2,\infty)$ and ${\cal F}'(s)=\eta'(s)/s$ for $s>0$. Thus, we have \begin{align}
\bigl| P^{(2)}(\mathbf{x}_0) \bigr|\ &=\ \biggl| \int_{B_{\rho_2}(\mathbf{0})} \nabla_{\!\mathbf{y}}\hbox to 0.7pt{}
{\cal F}\bigl(|\mathbf{y}|\bigr)\cdot \nabla_{\!\mathbf{y}}\hbox to 0.7pt{} \bigl[ \nabla_{\!\mathbf{y}}\hbox to 0.7pt{} p(\mathbf{x}_0+\mathbf{y},t)\cdot\mathbf{e} \bigr]\;
\mathrm{d}\mathbf{y} \biggr| \nonumber \\
&=\ \biggl| \int_{B_{\rho_2}(\mathbf{0})}
\Delta_{\mathbf{y}}{\cal F}\bigl(|\mathbf{y}|\bigr)\, \mathbf{e}\cdot \nabla_{\!\mathbf{y}}\hbox to 0.7pt{} p(\mathbf{x}_0+\mathbf{y},t)\; \mathrm{d}\mathbf{y} \biggr|. \label{6.5} \end{align}
The vector function $\Delta_{\mathbf{y}}{\cal F}\bigl(|\mathbf{y}|\bigr)\, \mathbf{e}$ can be written in the form \begin{equation}
\Delta_{\mathbf{y}}{\cal F}\bigl(|\mathbf{y}|\bigr)\, \mathbf{e}\ =\ \nabla_{\!\mathbf{y}}\hbox to 0.7pt{}\varphi(\mathbf{y})+\mathbf{w}(\mathbf{y}), \label{6.6} \end{equation} where
\begin{displaymath}
\varphi(\mathbf{y})= \nabla_{\!\mathbf{y}}\hbox to 0.7pt{}{\cal F}\bigl(|\mathbf{y}|\bigr) \cdot\mathbf{e},
\qquad \mathbf{w}(\mathbf{y})=\Delta_{\mathbf{y}}{\cal F}\bigl(|\mathbf{y}|\bigr)\, \mathbf{e}-
\nabla_{\!\mathbf{y}}\hbox to 0.7pt{}\bigl[ \nabla_{\!\mathbf{y}}\hbox to 0.7pt{}{\cal F}\bigl(|\mathbf{y}|\bigr) \cdot\mathbf{e}\bigr]. \end{displaymath} The functions $\varphi$ and $\mathbf{w}$ are infinitely differentiable in ${\mathbb R}^3$ and $\varphi=0$, $\mathbf{w}=\mathbf{0}$ in ${\mathbb R}^3\smallsetminus B_{\rho_2}(\mathbf{0})$. Since \begin{displaymath} \mathrm{div}\,\mathbf{w}\ =\ \nabla_{\!\mathbf{y}}\hbox to 0.7pt{}\Delta_{\mathbf{y}}
{\cal F}\bigl(|\mathbf{y}|\bigr)\cdot\mathbf{e}-\Delta_{\mathbf{y}}\hbox to 0.7pt{}\bigl[
\nabla_{\!\mathbf{y}}\hbox to 0.7pt{}{\cal F}\bigl(|\mathbf{y}|\bigr) \cdot\mathbf{e}\bigr]\ =\ 0, \end{displaymath} (\ref{6.6}) in fact represents the Helmholtz decomposition of
$\Delta_{\mathbf{y}}{\cal F}\bigl(|\mathbf{y}|\bigr)\, \mathbf{e}$ in $B_{\rho_2}(\mathbf{0})$. Substituting from (\ref{6.6}) to (\ref{6.5}), we obtain \begin{align}
\bigl| P^{(2)}(\mathbf{x}_0) \bigr|\ &=\ \biggl| \int_{B_{\rho_2} (\mathbf{0})} \bigl[\nabla_{\!\mathbf{y}}\hbox to 0.7pt{}\varphi(\mathbf{y})+\mathbf{w}(\mathbf{y})\bigr]
\cdot\nabla_{\!\mathbf{y}}\hbox to 0.7pt{} p(\mathbf{x}_0+\mathbf{y},t)\; \mathrm{d}\mathbf{y} \biggr| \nonumber \\
&=\ \biggl| \int_{B_{\rho_2} (\mathbf{0})}
\nabla_{\!\mathbf{y}}\hbox to 0.7pt{}\varphi(\mathbf{y}) \cdot\nabla_{\!\mathbf{y}}\hbox to 0.7pt{} p(\mathbf{x}_0+\mathbf{y},t)\; \mathrm{d}\mathbf{y} \biggr| \nonumber \\
&=\ \biggl| \int_{B_{\rho_2} (\mathbf{0})} \varphi(\mathbf{y})\,
\Delta_{\mathbf{y}}p(\mathbf{x}_0+\mathbf{y},t)\; \mathrm{d}\mathbf{y} \biggr| \nonumber \\
&=\ \biggl| \int_{B_{\rho_2} (\mathbf{0})} \varphi(\mathbf{y})\, \bigl[ \nabla_{\!\mathbf{y}}\hbox to 0.7pt{}\mathbf{u}(\mathbf{x}_0+\mathbf{y},t): \bigl(\nabla_{\!\mathbf{y}}\hbox to 0.7pt{}\mathbf{u}(\mathbf{x}_0+\mathbf{y},t)\bigr)^T \bigr]\;
\mathrm{d}\mathbf{y} \biggr| \nonumber \\
&\leq\ \int_{B_{\rho_2} (\mathbf{0})} |\varphi(\mathbf{y})|\; \mathrm{d}\mathbf{y} \ \leq\ c. \label{6.11} \end{align}
\noindent {\it The estimate of $P^{(1)}(\mathbf{x}_0)$.} \ Finally, we have \begin{align}
P^{(1)}(\mathbf{x}_0)\ &=\ \int_{B_{\rho_2}(\mathbf{0})} \frac{1}{|\mathbf{y}|}\,
\nabla_{\!\mathbf{y}}\eta\bigl(|\mathbf{y}|\bigr)\cdot \nabla_{\!\mathbf{y}}\hbox to 0.7pt{} \bigl[\nabla_{\!\mathbf{y}}\hbox to 0.7pt{} p(\mathbf{x}_0+\mathbf{y},t)\cdot\mathbf{e}\bigr]\; \mathrm{d}\mathbf{y} \nonumber \\
& \hspace{21pt} -\int_{B_{\rho_2}(\mathbf{0})}
\Bigl[\frac{\mathbf{y}}{|\mathbf{y}|^3}\cdot\nabla_{\!\mathbf{y}}\eta
\bigl(|\mathbf{y}|\bigr) \Bigr]\, \bigl[ \nabla_{\!\mathbf{y}}\hbox to 0.7pt{} p(\mathbf{x}_0+\mathbf{y},t)\cdot\mathbf{e}\bigr]\; \mathrm{d}\mathbf{y}. \label{6.12} \end{align} The first integral coincides with the integral in the formula for $P^{(2)}(\mathbf{x}_0)$ and it can be therefore treated in the same way. The second integral on the right hand side of (\ref{6.12}) - let us denote it by $P^{(1)}_2(\mathbf{x}_0)$ - represents the main obstacle, which finally causes that $p$ and all its spatial derivatives are only in $L^4(t_1+\epsilon,t_2-\epsilon;\, L^{\infty}(\Omega_3))$ and not in $L^{\infty}(t_1+\epsilon,t_2-\epsilon;\, L^{\infty}(\Omega_3))$, as in the cases from items a) and c) in subsection 6.1. The integral can be written in the form \begin{align} P^{(1)}_2(\mathbf{x}_0)\ &=\ \int_{B_{\rho_2}(\mathbf{0})}
\frac{\eta'\bigl(|\mathbf{y}|\bigr)} {|\mathbf{y}|^2}\, \mathbf{e}\cdot\nabla_{\!\mathbf{y}}\hbox to 0.7pt{} p(\mathbf{x}_0+\mathbf{y},t)\; \mathrm{d}\mathbf{y} \nonumber \\
&=\ \int_{\Omega} \frac{\eta'\bigl(|\mathbf{y}-\mathbf{x}_0|\bigr)}
{|\mathbf{y}-\mathbf{x}_0|^2}\, \mathbf{e}\cdot\nabla_{\!\mathbf{y}}\hbox to 0.7pt{} p(\mathbf{y},t)\; \mathrm{d}\mathbf{y}. \label{6.13} \end{align} Now, we use the Helmholtz decomposition \begin{equation}
\frac{\eta'\bigl(|\mathbf{y}-\mathbf{x}_0|\bigr)} {|\mathbf{y}-\mathbf{x}_0|^2}\, \mathbf{e}\ =\ \nabla_{\!\mathbf{y}}\hbox to 0.7pt{}\psi(\mathbf{y})+\mathbf{z}(\mathbf{y}), \label{6.14} \end{equation} in the whole domain $\Omega$, where \begin{align*} \Delta_{\mathbf{y}}\psi(\mathbf{y}) &=
\mathrm{div}\,\Bigl(\frac{\eta'\bigl(|\mathbf{y}-\mathbf{x}_0|\bigr)}
{|\mathbf{y}-\mathbf{x}_0|^2}\, \mathbf{e}\Bigr) = \Bigl(
\frac{\eta''\bigl(|\mathbf{y}-\mathbf{x}_0|\bigr)} {|\mathbf{y}-\mathbf{x}_0|^3}-
\frac{\eta'\bigl(|\mathbf{y}-\mathbf{x}_0|\bigr) }{|\mathbf{y}-\mathbf{x}_0|^4} \Bigr)\, (\mathbf{y}-\mathbf{x}_0)\cdot\mathbf{e} && \mbox{for}\ \mathbf{y}\in\Omega, \\
\frac{\partial\psi}{\partial\mathbf{n}}(\mathbf{y})\ &=\ 0 && \mbox{for}\ \mathbf{y}\in \partial\Omega. \end{align*} As $\mathbf{z}$ is divergence--free and its normal component on $\partial\Omega$ is zero, and the integral of $\nabla\psi\cdot\partial_t\mathbf{u}$ is zero, we get \begin{align} P^{(1)}_2(\mathbf{x}_0)\ &=\ \int_{\Omega} \bigl[ \nabla_{\!\mathbf{y}}\hbox to 0.7pt{}\psi(\mathbf{y})+\mathbf{z}(\mathbf{y}) \bigr]\cdot \nabla_{\!\mathbf{y}}\hbox to 0.7pt{} p(\mathbf{y},t)\; \mathrm{d}\mathbf{y}\ =\ \int_{\Omega} \nabla_{\!\mathbf{y}}\hbox to 0.7pt{}\psi(\mathbf{y})\cdot \nabla_{\!\mathbf{y}}\hbox to 0.7pt{} p(\mathbf{y},t)\; \mathrm{d}\mathbf{y} \nonumber \\
&=\ \int_{\Omega} \nabla_{\!\mathbf{y}}\hbox to 0.7pt{}\psi(\mathbf{y})\cdot\bigl[\partial_t\mathbf{u}+\mathbf{u}\cdot \nabla\mathbf{u}-\nu\Delta\mathbf{u}\bigr](\mathbf{y},t)\; \mathrm{d}\mathbf{y} \nonumber \\
&=\ \int_{\Omega} \nabla_{\!\mathbf{y}}\hbox to 0.7pt{}\psi(\mathbf{y})\cdot\bigl[\mathbf{u}\cdot \nabla\mathbf{u}-\nu\Delta\mathbf{u}\bigr](\mathbf{y},t)\; \mathrm{d}\mathbf{y} \label{6.15} \end{align} We have \begin{align}
\biggl| \int_{\Omega} & \nabla_{\!\mathbf{y}}\psi\cdot(\mathbf{u}\cdot
\nabla\mathbf{u})\; \mathrm{d}\mathbf{y} \biggr|\ =\ \biggl|\int_{\Omega}
\nabla_{\!\mathbf{y}}^2\psi : (\mathbf{u}\otimes\mathbf{u})\; \mathrm{d}\mathbf{y}\biggr|\ \leq\
c\int_{\Omega}|\mathbf{u}|^2\; \mathrm{d}\mathbf{y}\ \leq\ c, \label{6.16} \\
\biggl| \int_{\Omega} & \nabla_{\!\mathbf{y}}\psi\cdot \nu\Delta\mathbf{u}\;
\mathrm{d}\mathbf{y} \biggr|\ =\ \biggl|\int_{\Omega}\nabla_{\!\mathbf{y}}\psi\cdot
\mathrm{div}\,\bbT_{\hspace{-0.5pt} \rm d}(\mathbf{u})\; \mathrm{d}\mathbf{y}\biggr| \nonumber \\
&=\ \biggl|\int_{\partial\Omega}\nabla_{\!\mathbf{y}}\psi\cdot [\bbT_{\hspace{-0.5pt} \rm d}(\mathbf{u})\cdot\mathbf{n}]\; \mathrm{d} S- \int_{\Omega}\nabla_{\!\mathbf{y}}^2\psi : \bbT_{\hspace{-0.5pt} \rm d}(\mathbf{u})\; \mathrm{d}\mathbf{y}
\biggr| \nonumber \\
&=\ \biggl|-\int_{\partial\Omega}\nabla_{\!\mathbf{y}}\psi\cdot \gamma\mathbf{u}\; \mathrm{d} S-\int_{\Omega}\nabla_{\!\mathbf{y}}^2\psi :
\nu\hbox to 0.7pt{}(\nabla\mathbf{u})_s\; \mathrm{d}\mathbf{y} \biggr| \nonumber \\
&\leq\ c\int_{\partial\Omega}|\mathbf{u}|\; \mathrm{d} S+\biggl| \int_{\Omega}\nabla_{\!\mathbf{y}}^2\psi : \nu\hbox to 0.7pt{}\nabla\mathbf{u}\; \mathrm{d}\mathbf{y}
\biggr|\ =\ c\int_{\partial\Omega}|\mathbf{u}|\; \mathrm{d} S+\biggl| \int_{\Omega}(\partial_i\partial_j\psi)\, \nu\,
(\partial_ju_i)\; \mathrm{d}\mathbf{y} \biggr| \nonumber \\
&=\ c\int_{\partial\Omega}|\mathbf{u}|\; \mathrm{d} S+\biggl| \int_{\partial\Omega}(\partial_j\psi)\, n_i\, \nu\,
(\partial_ju_i)\; \mathrm{d}\mathbf{y} \biggr| \nonumber \\
&=\ c\int_{\partial\Omega}|\mathbf{u}|\; \mathrm{d} S+\nu\,
\biggl|\int_{\partial\Omega}(\partial_j\psi)\,
[\partial_j(n_iu_i)-(\partial_jn_i)\, u_i]\; \mathrm{d}\mathbf{y} \biggr| \nonumber \\
&=\ c\int_{\partial\Omega}|\mathbf{u}|\; \mathrm{d} S+\nu\,
\biggl|\int_{\partial\Omega}(\partial_j\psi)\, (\partial_jn_i)\, u_i\; \mathrm{d}\mathbf{y} \biggr| \nonumber \\
&\leq\ c\int_{\partial\Omega}|\mathbf{u}|\; \mathrm{d} S\ \leq\ c\, \biggl(
\int_{\partial\Omega}|\mathbf{u}|^2\; \mathrm{d} S\biggr)^{\! 1/2}\ \leq\ c\,
\bigl( \|\mathbf{u}\|_2+\|\mathbf{u}\|_2^{1/2}\, \|\mathbf{u}\|_{1,2}^{1/2} \bigr) \nonumber \\
&\leq\ c+c\, \|\mathbf{u}\|_{1,2}^{1/2}. \label{6.17} \end{align} The right hand side is in $L^4(t_1+\epsilon,t_2-\epsilon)$. We have used the estimate \begin{displaymath}
\bigl| \nabla\psi \bigr|_{1+(h)}\ \leq\ c\, \Bigl| \Bigl(
\frac{\eta''\bigl(|\mathbf{y}-\mathbf{x}_0|\bigr)} {|\mathbf{y}-\mathbf{x}_0|^3}-
\frac{\eta'\bigl(|\mathbf{y}-\mathbf{x}_0|\bigr) }{|\mathbf{y}-\mathbf{x}_0|^4} \Bigr)\,
(\mathbf{y}-\mathbf{x}_0)\cdot\mathbf{e} \Bigr|_{0+(h)}\ \leq\ c, \end{displaymath}
where $|\, .\, |_{1+(h)}$ and $|\, .\, |_{0+(h)}$ are the norms in the H\"older spaces $\mathbf{C}^{1+(h)}(\overline{\Omega})$ and
$C^{0+(h)}(\overline{\Omega})$, respectively, see \cite{Na}. The integral of $|\mathbf{u}|^2$ on $\partial\Omega$ has been estimated by means of \cite[Theorem II.4.1]{Ga1}.
We have shown that the norm of $\,
\nabla_{\!\mathbf{x}}p(\mathbf{x},t)|_{\mathbf{x}=\mathbf{x}_0}\cdot\mathbf{e}\, $ in $L^4(t_1+\epsilon,t_2-\epsilon)$ is finite and inde\-pen\-dent of vector $\mathbf{e}$ and a concrete position of point $\mathbf{x}_0$ in domain $\Omega_3$. Hence $\nabla p\in L^4(0,T;\, \mathbf{L}^{\infty}(\Omega_3))$. From this, one can deduce that $p$ can be chosen so that $p\in L^4(0,T;\, L^{\infty}(\Omega_3))$. Similarly, dealing with $D^{\alpha}_{\mathbf{x}} p(\mathbf{x},t)$, where $\alpha\equiv(\alpha_1,\alpha_2,\alpha_3)$ is an arbitrary multi-index, instead of $p(\mathbf{x},t)$, we show that $D^{\alpha} p\in L^4(0,T;\, L^{\infty}(\Omega_3)$, too. The proof is completed \end{proof}
\noindent {\bf Acknowledgement.} \ The authors have been supported by the Academy of Sciences of the Czech Republic (RVO 67985840) and by the Grant Agency of the Czech Republic, grant No.~17-01747S.
\end{document} |
\begin{document}
\title{\bf On a generalization of Kelly's combinatorial lemma} \author{Aymen BEN AMIRA $^1$, Jamel DAMMAK $^1$, Hamza SI KADDOUR $^{2, {\ast}}$\\ \centerline{$^{1}$ {\small Department of Mathematics, Faculty of Sciences of Sfax, B.P. 802, 3018 Sfax, Tunisia}}\\ \centerline{$^{2}$ {\small ICJ, Department of Mathematics, University of Lyon, University Claude-Bernard Lyon1,}}\\ \centerline{\small 43 Bd du 11 Novembre 1918, 69622 Villeurbanne Cedex, France}}
\maketitle
\footnotetext{$^{\ast}$ Correspondence: sikaddour@univ-lyon1.fr} \footnotetext{{2000 Mathematical Subject Classification:} 05C50, 05C60.}
\noindent {\bf Abstract:} Kelly's combinatorial lemma is a basic tool in the study of Ulam's reconstruction conjecture. A generalization in terms of a family of $t$-elements subsets of a $v$-element set was given by Pouzet. We consider a version of this generalization modulo a prime $p$. We give illustrations to graphs and tournaments.\\
\noindent {\bf Key words:} {Set, matrix, graph, tournament, isomorphism}
\section{Introduction} \label{section def} Kelly's combinatorial lemma is the assertion that the number $s(F,G)$ of induced subgraphs of a given graph $G$, isomorphic to $F$, is determined by the deck of $G$, provided that $\vert V(F)\vert < \vert V(G)\vert$, namely $s(F,G) = \frac{1}{\vert V(G)\vert - \vert V(F)\vert} \sum_{x\in V(G)} s(F,G_{-x})$ (where $G_{-x}$ is the graph induced by $G$ on $ V(G)\setminus \{x\}$).\\ In terms of a family $\mathcal F$ of $t$-elements subsets of a $v$-element set, it simply says that $\vert \mathcal F \vert = \frac{1}{v-t} \sum_{x\in V(G)} \vert \mathcal F_{-x} \vert$ where $ \mathcal F_{-x}:= \mathcal F \cap [E\setminus \{x\}]^t$. \\ Pouzet \cite{Pm,Pm2} gave the following extension of this result. \begin{lemma} (M. Pouzet \cite{Pm}) \label{lem po} Let $t$ and $r$ be integers, $V$ be a set of size $v\geq t+r$ elements, $U$ and $U'$ be sets of subsets $T$ of $t$ elements of $V$. If for every subset $K$ of $k=t+r$ elements of $V$, the number of elements of $U$ which are contained in $K$ is equal to the number of elements of $U'$ which are contained in $K$, then for every finite subsets $T'$ and $K'$ of $V$, such that $T'$ is contained in $K'$ and $K'\setminus T'$ has at least $t+r$ elements, the number of elements of $U$ which contain $T'$ and are contained in $K'$ is equal to the number of elements of $U'$ which contain $T'$ and are contained in $K'$. \end{lemma}
In particular if $\vert V \vert \geq 2t+r=t+k$, we have this particular version of the combinatorial lemma of Pouzet :
\begin{lemma} (M. Pouzet \cite{Pm}) \label{particular mp} Let $v,t$ and $k$ be integers, $V$ be a set of $v$ elements with $t\leq min{(k,v-k)}$, $U$ and $U'$ be sets of subsets $T$ of $t$ elements of $V$. If for every subset $K$ of $k$ elements of $V$, the number of elements of $U$ which are contained in $K$ is equal to the number of elements of $U'$ which are contained in $K$, then $U=U'$. \end{lemma}
We denote by $n(U,K)$ the number of elements of $U$ which are contained in $K$, thus Lemma \ref{particular mp} says that if $n(U,K)=n(U',K)$ for every subset $K$ of $k$ elements of $V$ then $U=U'$. Here we consider the case where $n(U,K)\equiv n(U',K)$ modulo a prime $p$ for every subset $K$ of $k$ elements of $V$; our main result, Theorem \ref{thm js}, is then a version, modulo a prime $p$, of the particular version of the combinatorial lemma of Pouzet.\\
Kelly's combinatorial lemma is a basic tool in the study of Ulam's reconstruction conjecture. Pouzet's combinatorial lemma has been used several times in reconstruction problems (see for example \cite{ ABB, B, BD, BL, D1, D2}). Pouzet gave a proof of his lemma via a counting argument \cite{Pm2} and latter by using linear algebra (related to incidence matrices) \cite{Pm} (the paper was published earlier).
Let $n,p$ be positive integers, the decomposition of $n=\sum_{i=0}^{n(p)} n_i p^i$ in the basis $p$ is also denoted $[n_0,n_1,\dots ,n_{n(p)}]_p$ where $n_{n(p)}\neq 0$ if and only if $n\neq 0$.
\begin{theorem} \label{thm js} Let $p$ be a prime number. Let $v,t$ and $k$ be non-negative integers, $k=[k_0,k_1,\dots , k_{k(p)}]_p$, $t=[t_0,t_1,\dots , t_{t(p)}]_p$. Let $V$ be a set of $v$ elements with $t\leq min{(k,v-k)}$, $U$ and $U'$ be sets of subsets $T$ of $t$ elements of $V$. We assume that for every subset $K$ of $k$ elements of $V$, the number of elements of $U$ which are contained in $K$ is equal (mod $p$) to the number of elements of $U'$ which are contained in $K$.\\ 1) If $k_i=t_i$ for all $i<t(p)$ and $k_{t(p)}\geq t_{t(p)}$, then $U=U'$.\\ 2) If $t=t_{t(p)}p^{t(p)}$ and $k=\sum_{i={t(p)}+1}^{k(p)} k_{i}p^i$, we have $U=U'$, or one of the sets $U,U'$ is the set of all $t$ element-subsets of $V$ and the other is empty, or (whenever $p=2$) for all $t$-element subsets $T$ of $V$, $T\in U$ if and only if $T\not\in U'$. \end{theorem}
Our proof of Theorem \ref{thm js} is an application of properties of incidence matrices due to D.H. Gottlieb \cite{Go}, W. Kantor \cite{KA} and R.M. Wilson \cite{W}, we use Wilson's Theorem (Theorem \ref{thm Wilson}). \\
In a reconstruction problem of graphs up to complementation \cite{dlps1}, Wilson's Theorem yielded the following result: \begin{theorem} (\cite{dlps1})\label{k=0[4],p=2} Let $k$ be an integer, $2\leq k\leq v-2$, $k\equiv 0$ (mod $4$). Let $G$ and $G'$ be two graphs on the same set $V$ of $v$ vertices (possibly infinite). We assume that $e(G_{\restriction K})$ has the same parity as $e(G'_{\restriction K})$ for all $k$-element subsets $K$ of $V$. Then $G'=G$ or $G'=\overline {G}$. \end{theorem}
Here we look for similar results whenever $e(G_{\restriction K}) \equiv e(G'_{\restriction K})$ modulo a prime $p$. As an illustration of Theorem \ref{thm js}, we obtain the following result.
\begin{theorem}\label{k=2[4]} Let $p$ be a prime number and $k$ be an integer, $2\leq k\leq v-2$. Let $G$ and $G'$ be two graphs on the same set $V$ of $v$ vertices (possibly infinite). We assume that for all k-element subsets $K$ of $V$, $e(G_{\restriction K}) \equiv e(G'_{\restriction K})$ (mod $p$).\\ 1) If $p\geq3$, $k\not\equiv 0,1 \ (mod \ p)$, then $G'=G$.\\ 2) If $p\geq3$, $k\equiv 0$ (mod $p$), then $G'=G$, or one of the graphs $G,G'$ is the complete graph and the other is the empty graph.\\ 3) If $p=2$, $k\equiv 2$ (mod $4$), then $G'=G$.
\end{theorem}
We give another illustrations of Theorem \ref{thm js}, to graphs in section \ref{section graphs}, and to tournaments
in section \ref{section tournaments}.
\section{Incidence matrices}
We consider the matrix $W_{t\;k}$ defined as follows : Let $V$ be a finite set, with $v$ elements. Given non-negative integers $t,k$, let $W_{t\;k}$ be the ${v \choose t}$ by ${v \choose k}$ matrix of $0$'s and $1$'s, the rows of which are indexed by the $t$-element subsets
$T$ of $V$, the columns are indexed by the $k$-element subsets $K$ of $V$, and where the entry $W_{t\;k}(T,K)$ is $1$ if
$T\subseteq K$ and is $0$ otherwise. The matrix transpose of $W_{t\;k}$ is denoted $^tW_{t\;k}$.\\ We say that a matrix $D$ is a {\it {diagonal form}} for a matrix $M$ when $D$ is diagonal and there exist unimodular matrices (square integral matrices which have integral inverses) $E$ and $F$ such that $D=EMF$. We do not require that $M$ and $D$ are square; here "diagonal" just means that the $(i,j)$ entry of $D$ is $0$ if $i\neq j$. A fundamental result, due to R.M.Wilson \cite{W}, is the following. \begin{theorem} (R.M. Wilson \cite{W}) \label{thm Wilson+} For $t\leq min{(k,v-k)}$, $W_{t\; k}$ has as a diagonal form the ${v \choose t}\times {v \choose k}$ diagonal matrix with diagonal entries $$ {k-i \choose t-i}\ \mbox{with multiplicity}\ {v \choose i} - {v \choose i-1}, \ \ \ i=0,1,\dots ,t.$$ \end{theorem}
Clearly from Theorem \ref{thm Wilson+}, $rank\ W_{t\; k}$ over the field $\mathbb{Q}$ is ${v \choose t}$, that is Theorem \ref{gottlieb-kantor} due to Gottlieb \cite{Go}. On the other hand, from Theorem \ref{thm Wilson+}, follows $rank\ W_{t\; k}$ over the field $\mathbb{Z}/p\mathbb{Z}$, as given by Theorem \ref{thm Wilson}.
\begin{theorem} (R.M. Wilson \cite{W}) \label{thm Wilson} For $t\leq min{(k,v-k)}$, the rank of $W_{t\; k}$ modulo a prime $p$ is $$ \sum {v \choose i}- {v\choose i - 1} $$ where the sum is extended over those indices $i$, $0\leq i\leq t$, such that $p$ does not divide the binomial coefficient ${k-i \choose t-i}$. \end{theorem}
In the statement of the theorem, ${v \choose -1}$ should be interpreted as zero.\\
A fundamental result, due to D.H. Gottlieb \cite{Go}, and independently W. Kantor \cite {KA}, is this: \begin{theorem} (D.H. Gottlieb \cite{Go}, W. Kantor \cite {KA}) \label{gottlieb-kantor} For $t\leq min{(k,v-k)}$, $W_{t\; k}$ has full row rank over the field $\mathbb{Q}$ of rational numbers.
\end{theorem}
It is clear that $t\leq min{(k,v-k)}$ implies ${v \choose t}\leq {v \choose k}$ then, from Theorem \ref{gottlieb-kantor}, we have the following result :
\begin{corollary}\label{rk-gottlieb-kantor} For $t\leq min{(k,v-k)}$, the rank of $W_{t\; k}$ over the field $\mathbb{Q}$ of rational numbers is ${v \choose t}$ and thus $Ker(^tW_{t\; k})=\{0\}$.
\end{corollary}
If $k:=v-t$ then, up to a relabelling, $W_{t\; k}$ is the adjacency matrix $A_{t,v}$
of the {\it Kneser graph} $KG(t,v)$ \cite{GoRo}, graph whose vertices are the $t$-element subsets of $V$, two subsets forming an edge if they are disjoint. The eigenvalues of Kneser graphs are computed in \cite{GoRo} (Theorem 9.4.3), and thus an equivalent form of Theorem \ref{gottlieb-kantor} is: \begin{theorem} \label{Ka} $A_{t, v}$ is non-singular for $t\leq \frac{v}{2}$. \end{theorem}
We characterize values of $t$ and $k$ so that $dim \ Ker(^tW_{t\; k})\in \{0,1\}$
and give a basis of $Ker(^tW_{t\; k})$, that appears in the following result.
\begin{theorem} \label{thm js2} Let $p$ be a prime number. Let $v,t$ and $k$ be non-negative integers, $k=[k_0,k_1,\dots , k_{k(p)}]_p$, $t=[t_0,t_1,\dots , t_{t(p)}]_p$, $t\leq min{(k,v-k)}$. We have:\\ 1) $k_j=t_j$ for all $j<t(p)$ and $k_{t(p)}\geq t_{t(p)}$ if and only if $Ker(^tW_{t\; k})=\{0\}$ \mbox{(mod $p$)}.\\ 2) $t=t_{t(p)}p^{t(p)}$ and $k=\sum_{i={t(p)}+1}^{k(p)}k_ip^i$ if and only if $\dim Ker (^tW_{t\; k})= 1$\ \mbox{(mod $p$)} and $\{(1,1,\cdots ,1)\}$ is a basis of $Ker (^tW_{t\; k})$. \end{theorem}
The proof of Theorem \ref{thm js2} uses Lucas's Theorem. The notation $a\mid b$ (resp. $a\nmid b$) means $a$ divide $b$ (resp. $a$ not divide $b$).
\begin{theorem} (Lucas's Theorem \cite{Lucas}) \label{lucas} Let $p$ be a prime number, $t,k$ be positive integers, $t\leq k$, $t=[t_0,t_1,\dots ,t_{t(p)}]_p$ and $k=[k_0,k_1,\dots ,k_{k(p)}]_p$. Then $${k \choose t} = \prod_{i=0}^{t(p)} {k_i \choose t_i} \ (mod \ p),\ \mbox{where} \ {k_i \choose t_i} =0\ \mbox{if} \ t_i>k_i.$$ \end{theorem}
As a consequence of Theorem \ref{lucas}, we have the following result which is very useful in this paper.
\begin{corollary}\label{cor-lucas} Let $p$ be a prime number, $t,k$ be positive integers, $t\leq k$, $t=[t_0,t_1,\dots ,t_{t(p)}]_p$ and $k=[k_0,k_1,\dots ,k_{k(p)}]_p$. Then \begin{center}
$p \vert {k \choose t}$ if and only if there is $i\in \{0,1,\dots , t(p)\}$ such that $t_i>k_i$. \end{center} \end{corollary}
\noindent{\bf{Proof of Theorem \ref{thm js2}.}} 1) We begin by the direct implication. We will prove $p \nmid {{k-i} \choose {t-i}}$ for all $i=[i_0,i_1,\dots , i_{t(p)}]\in \{0,\dots ,t\}$ with $i_{t(p)}\leq t_{t(p)}$. Since $k_j=t_j$ for all $j< t(p)$, then $(t-i)_j=(k-i)_j$ for all $j < t(p)$. As $k_{t(p)} \geq t_{t(p)}\geq i_{t(p)}$ then $(k-i)_{t(p)} \geq (t-i)_{t(p)}$, thus, by Corollary \ref{cor-lucas}, $p \nmid {{k-i} \choose {t-i}}$ for all $i\in \{0,1, \dots , t\}$. Now from Theorem \ref{thm Wilson}, $rank \ W_{tk} = \sum_{i=0}^{t} {v \choose i} - {v \choose {i-1}}= {v \choose t}$. Then the kernel of $^tW_{t\; k}\ \mbox{(mod $p$)} \ \mbox{is}\ \{0\}$.\\ Now we prove the converse implication. From Theorem \ref{thm Wilson+}, $Ker(^tW_{t\; k})=\{0\}$ implies $p \nmid {k-i \choose t-i}$ for all $i\in \{0,1, \dots , t\}$, in particular $p \nmid {k \choose t}$. Then by Corollary \ref{cor-lucas},
$k_j\geq t_j$ for all $j \leq t(p)$. We will prove that $k_j = t_j$ for all $j \leq t(p)-1$. By contradiction, let $s$ be the least integer in $\{0,1, \dots , t(p)-1\}$, such that $k_s>t_s$. We have $(t-(t_s+1)p^s)_s = p-1$, $(k-(t_s+1)p^s)_s = k_s-t_s-1$ and $p-1>k_s-t_s-1$. From Corollary \ref{cor-lucas},
$p \mid {{k-(t_s+1)p^s} \choose {t-(t_s+1)p^s}}$, that is impossible.\\ 2) Set $n:=t(p)$. We begin by the direct implication. Since $0=k_n<t_n$ then, by Corollary \ref{cor-lucas}, $p \vert {{k} \choose {t}}$. We will prove $p \nmid {{k-i} \choose {t-i}}$ for all $i=[i_0,i_1,\dots , i_{n}]\in \{1,2,\dots ,t\}$. \\ Since $k_j=t_j=0$ for all $j<n$, then $(t-i)_j=(k-i)_j$ for all $j < n$. From $t_n\geq i_n$, we have $(t-i)_n\in \{t_n- i_n,t_n- i_n-1\}$. Note that $(k-i)_n\in\{p- i_n-1,p- i_n\}$ and $p-i_n-1\geq t_n-i_n$; thus $(k-i)_n \geq (t-i)_n$. So for all $j\leq n$, $(k-i)_j \geq (t-i)_j$. Then, by Corollary \ref{cor-lucas}, $p \nmid {{k-i} \choose {t-i}}$ for all $i\in \{1,2,\dots ,t\}$. Now from Theorem \ref{thm Wilson}, $rank \ W_{tk} = \sum_{i=1}^{t} {v \choose i} - {v \choose {i-1}}= {v \choose t}-1$, and thus $\dim Ker (^tW_{t\; k})= 1$. Now
$(1,1,\cdots ,1)W_{t\; k}=({k \choose t},{k \choose t},\cdots ,{k \choose t})$.\\ Since $p \mid {k \choose t}$, then
$(1,1,\cdots ,1)W_{t\; k}\equiv0$ (mod $p$). Then $\{(1,1,\cdots ,1)\}$ is a basis of the kernel of $^tW_{t\; k}$ (mod $p$).\\ Now we prove the converse implication. Since $\{(1,1,\cdots ,1)\}$ is a basis of the kernel of $^tW_{t\; k}$ (mod $p$) and $(1,1,\cdots ,1)W_{t\; k}=({k \choose t},{k \choose t},\cdots ,{k \choose t})$, then $p \mid {k \choose t}$. Since $dim \ Ker(^tW_{t\; k})=1$, then from Theorem \ref{thm Wilson}, $p \nmid {k-i \choose t-i}$ for all $i\in \{1,2,\dots ,t\}$.\\ First, let us prove that $t=t_np^n$. Note that $t_n\neq 0$ since $t\neq 0$. Since $p \vert {k \choose t}$ then, from Corollary \ref{cor-lucas}, there is an integer $j\in \{0,1,\dots ,n\}$ such that $t_j > k_j$. Let $A:=\{ j<n\ : \ t_j\neq 0\}$. By contradiction, assume $A\neq \emptyset$. \\ Case 1. There is $j\in A$ such that $t_j > k_j$. We have $(t-p^n)_j = t_j$, $ (k-p^n)_j=k_j$. Then from Corollary \ref{cor-lucas}, we have $p \mid {{k-p^n} \choose {t-p^n}}$, that is impossible.\\ Case 2. For all $j\in A$, $t_j \leq k_j$. Then $t_n > k_n$. We have $(t-p^j)_n = t_n$, $ (k-p^j)_n=k_n$. Then, from Corollary \ref{cor-lucas}, we have $p \mid {{k-p^j} \choose {t-p^j}}$, that is impossible.\\ From the above two cases, we deduce $t=t_np^n$.\\ Secondly, since $p \vert {{k} \choose {t}}$, then by Corollary \ref{cor-lucas}, $t_n>k_n$. Let us show that $k_n=0$. By contradiction, if $k_n\neq 0$ then $(t-p^n)_n=t_{n}-1> k_n-1=(k-p^n)_n$. From Corollary \ref{cor-lucas}, $p \mid {{k-p^n} \choose {t-p^n}}$, that is impossible. Let $s\in \{0,1,\dots ,n-1\}$, let us show that $k_s=0$. By contradiction, if $k_s\neq 0$ then, $(t-p^s)_s =p-1$, $(k-p^s)_s = k_s-1$, thus $(t-p^s)_s > (k-p^s)_s$ so, from Corollary \ref{cor-lucas},
$p \mid {{k-p^s} \choose {t-p^s}}$, that is impossible.\endproof
\section{Proof of Theorem \ref{thm js}.}
Let $T_1,T_2, \cdots ,T_{{v \choose t}}$ be an enumeration of the $t$-element subsets of $V$, let $K_1,K_2, \cdots ,K_{{v \choose k}}$ be an enumeration of the $k$-element subsets of $V$ and $W_{t\; k}$ be the matrix of the $t$-element subsets versus the $k$-element subsets.
Let $w_U$ be the row matrix $(u_1,u_2, \cdots , u_{v \choose t})$ where $u_i=1$ if $T_i\in U$, $0$ otherwise. We have
$$w_UW_{t\; k}=(\vert \{T_i\in U : T_i \subseteq K_1\}\vert , \cdots ,\vert \{ T_i\in U : T_i \subseteq K_{{v \choose k}} \}\vert).$$ $$w_{U'}W_{t\; k}=(\vert \{T_i\in U' : T_i \subseteq K_1\}\vert , \cdots ,\vert \{ T_i\in U' : T_i \subseteq K_{{v \choose k}} \}\vert).$$
Since for all $j\in \{1,\dots ,{v \choose k}\}$, the number of elements of $U$ which are contained in $K_j$ is equal (mod $p$) to the number of elements of $U'$ which are contained in $K_j$, then $(w_U-w_{U'})W_{t\; k}=0$ \ \mbox{(mod $p$)}, so $w_U-w_{U'}\in Ker (^tW_{t\; k})$.\\ 1) Assume $k_i=t_i$ for all $i<t(p)$ and $k_{t(p)}\geq t_{t(p)}$. From 1) of Theorem \ref{thm js2}, $w_U-w_{U'}=0$, that gives $U=U'$.\\ 2) Assume $t=t_{t(p)}p^{t(p)}$ and $k=\sum_{i={t(p)}+1}^{k(p)}k_ip^i$. From 2) of Theorem \ref{thm js2}, there is an integer $\lambda \in [0,p-1]$ such that $w_U-w_{U'}=\lambda (1,1,\cdots ,1)$. It is clear that $\lambda \in \{0,1,-1\}$.
If $\lambda =0$ then $U=U'$. If $\lambda =1$ and $p\geq 3$ then $U=\{T_1,T_2, \cdots ,T_{{v \choose t}}\}$, $U'=\emptyset$. If $\lambda =1$ and $p= 2$ then $U=\{T_1,T_2, \cdots ,T_{{v \choose t}}\}$, $U'=\emptyset$, or $T\in U$ if and only if $T\not\in U'$.
If $\lambda =-1$ and $p\geq 3$ then $U=\emptyset$, $U'=\{T_1,T_2, \cdots ,T_{{v \choose t}}\}$.
If $\lambda =-1$ and $p= 2$ then $U'=\{T_1,T_2, \cdots ,T_{{v \choose t}}\}$, $U=\emptyset$, or $T\in U$ if and only if $T\not\in U'$. \endproof
\section{Illustrations to graphs} \label{section graphs} Our notations and terminology follow \cite {Bo}. A \textit{digraph} $G = (V, E)$ or $G=(V(G),E(G))$, is formed
by a finite set $V$ of vertices and a set $E$ of pairs of distinct vertices, called {\it arcs} of $G$. The {\it order} (or {\it cardinal}) of $G$ is the number of its vertices. If $K$ is a subset of $V$, the {\it restriction} of $G$ to $K$, also called the {\it induced subdigraph} of $G$ on $K$ is the digraph $G_{\restriction K}:= (K, K^2\cap E)$. If $K=V\setminus \{x\}$, we denote this digraph by $G_{-x}$. Let $G = (V, E)$ and $G' = (V', E')$ be two digraphs. A one-to-one correspondence $f$ from $V$ onto $V'$ is an {\it isomorphism from} $G$ {\it onto} $G'$ provided that for $x, y \in V$, $(x, y) \in E$ if and only if $(f(x), f(y)) \in E'$. The digraphs $G$ and $G'$ are then said to be {\it isomorphic}, which is denoted by $G \simeq G^{\prime}$. A subset $I$ of $V$ is an \textit{interval} \cite{FR,ST} (or a {\it clan} \cite{ER}, or an {\it homogenous subset} \cite{TG}) of $G$ provided that for all $a, b\in I$ and $x \in V\setminus I$, $(a, x) \in E(G)$ if and only if $(b, x)\in E(G)$, and the same for $(x,a)$ and $(x,b)$. For example $\emptyset$, $\{x\}$ where $x \in V$, and $V$ are intervals of $G$, called {\it trivial intervals}. A digraph is then said to be {\it indecomposable} \cite{ST} (or {\it primitive }\cite{ER}) if all its intervals are trivial, otherwise it is said to be {\it decomposable}.\\ We say that $G$ is a {\it graph} (resp. {\it tournament}) when for every distinct vertices $x,y$ of $V$, $(x, y) \in E$ if and only if $(y, x)\in E$ (resp $(x, y) \in E$ if and only if $(y, x)\not\in E$); we say that $\{x,y\}$ is an {\it edge} of the graph $G$ if $(x,y)\in E$, thus $E$ is identified with a subset of $[V]^2$, the set of pairs $\{x,y\}$ of distinct elements of $V$.\\ Let $G= (V, E)$ be a graph, the {\it complement} of $G$ is the graph $\overline G:= (V, [V]^2\setminus E)$. We denote by $e(G):=\vert E(G)\vert $ the number of edges of $G$.
The {\it degree} of a vertex $x$ of $G$, denoted $d_G(x)$, is the number of edges which contain $x$. A $3$-element subset $T$ of $V$ such that all pairs belong to $E(G)$ is a {\it triangle} of $G$. Let $T(G)$ be the set of {\it triangles} of $G$ and let $t(G):=\mid T(G)\mid$. A $3$-element subset of $V$ which is a triangle of $G$ or of $\overline G$ is a $3$-{\it homogeneous} subset of $G$. We set $H^{(3)}(G):=T(G)\cup T(\overline G)$, the set of $3$-homogeneous subsets of $G$, and $h^{(3)}(G):=\mid H^{(3)}(G)\mid$.\\
\noindent{\bf Another proof of Theorem \ref{k=0[4],p=2} using Theorem \ref{thm js}.} Here $p=2$, $t=2=[0,1]_p$ and $k=[0,0,k_2,\dots]_p$. From 2) of Theorem \ref{thm js}, $U=U'$, or one of the sets $U,U'$ is the set of all $2$ element-subsets of $V$ and the other is empty, or for all $2$-element subsets $T$ of $V$, $T\in U$ if and only if $T\not\in U'$. Thus $G'=G$ or $G'=\overline{G}$.\endproof \\
\noindent{\bf Proof of Theorem \ref{k=2[4]}.} We set $U:=E(G)$, $U':=E(G')$. For all $K\subseteq V$ with $\vert K\vert = k$, we have: $\{\{x,y\}\subseteq K : \{x,y\} \in U\}= E(G_{\restriction K})$ and $\{\{x,y\}\subseteq K : \{x,y\} \in U'\}= E(G'_{\restriction K})$. Since $e(G_{\restriction K}) \equiv e(G'_{\restriction K})$ (mod $p$), then $\vert\{\{x,y\}\subseteq K : \{x,y\} \in U\}\vert \equiv \vert\{\{x,y\}\subseteq K : \{x,y\} \in U'\}\vert$ (mod $p$).\\ 1) $p\geq3$, $t=2=[2]_p$ and $k_0\geq 2$. From 1) of Theorem \ref{thm js}, $U=U'$, thus $G=G'$.\\ 2) $p\geq3$, $t=2=[2]_p$ and $k_0=0$. From 2) of Theorem \ref{thm js}, we have $U=U'$ or one of $U,U'$ is the set of all $2$-elements subsets of $V$ and the other is empty. Then $G=G'$ or one of the graphs $G,G'$ is the complete graph and the other is the empty graph.\\ 3) $p=2$, $t=2=[0,1]_p$ and $k=[0,1,k_2,\dots]_p$. From 1) of Theorem \ref{thm js}, we have $U=U'$, thus $G=G'$.
\endproof \\
The following result concerns graphs $G$ and $G'$ such that $h^{(3)}(G_{\restriction K})\equiv h^{(3)}(G'_{\restriction K})$ modulo a prime $p$, for all $k$-element subsets $K$ of $V$.
\begin{theorem} \label{Ka+lem+TR} Let $G$ and $G'$ be two graphs on the same set $V$ of $v$ vertices. Let $p$ be a prime number and $k$ be an integer, $3\leq k\leq v-3$.\\ 1) If $h^{(3)}(G_{\restriction K})=h^{(3)}(G'_{\restriction K})$ for all $k$-element subsets $K$ of $V$ then $G$ and $G'$ have the same $3$-element homogeneous sets.\\ 2) Assume $p\geq 5$. If $k\not\equiv 1,2 \ (mod \ p)$ and $h^{(3)}(G_{\restriction K})\equiv h^{(3)}(G'_{\restriction K})$ (mod $p$) for all $k$-element subsets $K$ of $V$, then $G$ and $G'$ have the same $3$-element homogeneous sets.\\ 3) If ($p=2$ and $k\equiv 3 \ (mod \ 4)$) or ($p=3$ and $3\mid k$), and $h^{(3)}(G_{\restriction K})\equiv h^{(3)}(G'_{\restriction K})$ (mod $p$) for all $k$-element subsets $K$ of $V$, then $G$ and $G'$ have the same $3$-element homogeneous sets. \end{theorem}
{\parindent0pt {\bf Proof.\ }} $H^{(3)}(G)=\{\{a,b,c\}: G_{\restriction \{a,b,c\}} \ \mbox{is a $3$-element homogeneous set}\}$. We set $U:=H^{(3)}(G)$ and $U':=H^{(3)}(G')$. For all $K\subseteq V$ with $\vert K\vert = k$, we have: $\{T\subseteq K : T \in U\}=H^{(3)}_{G_{\restriction K}}$ and $\{T\subseteq K : T \in U'\}=H^{(3)}_{G'_{\restriction K}}$. Set $t:=\mid T\mid =3$.\\ 1) Since $h^{(3)}(G_{\restriction K})=h^{(3)}(G'_{\restriction K})$ for all $k$-element subsets $K$ of $V$ then $\vert\{T\subseteq K : T \in U\}\vert = \vert\{T\subseteq K : T \in U'\}\vert$. From Lemma \ref{particular mp} it follows that $U=U'$, then
$G$ and $G'$ have the same $3$-element homogeneous sets. \\ 2) Since $h^{(3)}(G_{\restriction K})\equiv h^{(3)}(G'_{\restriction K})$ (mod $p$) for all $k$-element subsets $K$ of $V$ then $\vert\{T\subseteq K : T \in U\}\vert \equiv \vert\{T\subseteq K : T \in U'\}\vert$ (mod $p$). \\ Case 1. $p\geq5$, $t=3=[3]_p$, $k=[k_0,\dots]_p$ and $t_0=3\leq k_0$. From 1) of Theorem \ref{thm js} we have $U=U'$, thus $G$ and $G'$ have the same $3$-element homogeneous sets.\\ Case 2. $p\geq5$, $t=3=[3]_p$, $k=[0,k_1,\dots]_p$. By Ramsey's Theorem \cite {Ra}, every graph with at least $6$ vertices contains a $3$-element homogeneous set. Then $U$ and $U'$ are nonempty, so from 2) of Theorem \ref{thm js}, $U=U'$, thus $G$ and $G'$ have the same $3$-element homogeneous sets.\\ 3) Since $h^{(3)}(G_{\restriction K})\equiv h^{(3)}(G'_{\restriction K})$ (mod $p$) for all $k$-element subsets $K$ of $V$ then $\vert\{T\subseteq K : T \in U\}\vert \equiv \vert\{T\subseteq K : T \in U'\}\vert$ (mod $p$). \\ Case 1. $p=2$, $t=3=[1,1]_p$ and $k\equiv 3 \ (mod \ 4)$. In this case, $k=[1,1,k_2, \dots]_p$, then
from 1) of Theorem \ref{thm js} we have $U=U'$, thus $G$ and $G'$ have the same $3$-element homogeneous sets.\\ Case 2. $p=3$, $t=3=[0,1]_p$ and $k=[0,k_1,\dots,k_{k(p)}]_p$.\\ Case 2.1. $k_1\in\{1,2\}$, then from 1) of Theorem \ref{thm js} we have $U=U'$, thus $G$ and $G'$ have the same $3$-element homogeneous sets.\\ Case 2.2. $k_1=0$. By Ramsey's Theorem \cite {Ra}, every graph with at least $6$ vertices contains a $3$-element homogeneous set. Then $U$ and $U'$ are nonempty, so from 2) of Theorem \ref{thm js}, $U=U'$, thus $G$ and $G'$ have the same $3$-element homogeneous sets. \endproof \\
Let $G=(V,E)$ be a graph.
From \cite{ST}, every indecomposable graph of size $4$ is isomorphic to $P_4=\left(\{0,1,2,3\},\{\{0,1\},\{1,2\},\{2,3\}\}\right)$. Let ${\mathcal P}^{(4)}(G)$ be the set of indecomposable induced subgraphs of $G$ of size $4$, we set $p^{(4)}(G):=\vert{\mathcal P}^{(4)}(G)\vert$. The following result concerns graphs $G$ and $G'$ such that $p^{(4)}(G_{\restriction K})\equiv p^{(4)}(G'_{\restriction K})$ modulo a prime $p$, for all $k$-element subsets $K$ of $V$.
\begin{theorem} \label{Ka+lem+P4} Let $G$ and $G'$ be two graphs on the same set $V$ of $v$ vertices. Let $p$ be a prime number and $k$ be an integer, $4\leq k\leq v-4$.\\ 1) If $p^{(4)}(G_{\restriction K})=p^{(4)}(G'_{\restriction K})$ for all $k$-element subsets $K$ of $V$ then $G$ and $G'$ have the same indecomposable sets of size $4$.\\ 2) Assume $p^{(4)}(G_{\restriction K})\equiv p^{(4)}(G'_{\restriction K})$ (mod $p$) for all $k$-element subsets $K$ of $V$.\\ a) If $p\geq 5$ and $k\not\equiv 1,2,3 \ (mod \ p)$, then $G$ and $G'$ have the same indecomposable sets of size $4$.\\ b) If ($p=2$, $4\mid k$ and $8\nmid k$) or ($p=3$, $3\mid k-1$ and $9\nmid k-1$), then $G$ and $G'$ have the same indecomposable sets of size $4$.\\ c) If $p=2$ and $8\mid k$, then $G$ and $G'$ have the same indecomposable sets of size $4$, or for all $4$-element subsets $T$ of $V$, $G_{\restriction T}$ is indecomposable if and only if $G'_{\restriction T}$ is decomposable. \end{theorem}
{\parindent0pt {\bf Proof.\ }} Let $U:=\{T\subseteq V : \vert T\vert = 4, \ G_{\restriction T}\simeq P_4 \}={\mathcal P}^{(4)}(G) $, $U':=\{T\subseteq V : \vert T\vert = 4, \ G'_{\restriction T}\simeq P_4 \}={\mathcal P}^{(4)}(G') $. For all $K\subseteq V$, we have $\{T\subseteq K : T\in U\}= {\mathcal P}_4( G_{\restriction K})$ and $\{T\subseteq K : T\in U'\}= {\mathcal P}_4( G'_{\restriction K})$. Set $t:= \vert T \vert =4$.\\ 1) Since $p^{(4)}(G_{\restriction K})=p^{(4)}(G'_{\restriction K})$ then $\vert\{T\subseteq K : T\in U\}\vert=\vert\{T\subseteq K : T\in U'\}\vert$. From Lemma \ref{particular mp}, $U=U'$, then $G$ and $G'$ have the same indecomposable sets of size $4$.\\ 2) We have $p^{(4)}(G_{\restriction K})\equiv p^{(4)}(G'_{\restriction K})$ (mod $p$) for all $k$-element subsets $K$ of $V$, then $\vert\{T\subseteq K : T\in U\}\vert \equiv \vert\{T\subseteq K : T\in U'\}\vert$ (mod $p$).\\ a) Case 1. $p\geq5$, $t=4=[4]_p$, $k=[k_0,\dots]_p$ and $t_0=4\leq k_0$. From 1) of Theorem \ref{thm js} we have $U=U'$, thus $G$ and $G'$ have the same indecomposable sets of size $4$.\\ Case 2. $p\geq5$, $t=4=[4]_p$, $k=[0,k_1,\dots]_p$. Since in every graph of order $5$, there is a restriction of size $4$ not isomorphic to $P_4$ then, from 2) of Theorem \ref{thm js}, $U=U'$, thus $G$ and $G'$ have the same indecomposable sets of size $4$.\\ b) Case 1. $p=2$, $t=4=[0,0,1]_p$ and $k=[0,0,1,k_3,\dots,k_{k(p)}]_p$. From 1) of Theorem \ref{thm js}, we have $U=U'$, thus $G$ and $G'$ have the same indecomposable sets of size $4$.\\ Case 2. $p=3$, $t=4=[1,1]_p$, $k=[1,k_1,\dots,k_{k(p)}]_p$ and $t_1=1\leq k_1$. From 1) of Theorem \ref{thm js}, $U=U'$, thus $G$ and $G'$ have the same indecomposable sets of size $4$.\\ c) We have $p=2$, $t=4=[0,0,1]_p$, $k=[0,0,0,k_3,\dots,k_{k(p)}]_p$. Since in every graph of order $5$, there is a restriction of size $4$ not isomorphic to $P_4$, then from 2) of Theorem \ref{thm js}, $U=U'$, or for all $4$-element subsets $T$ of $V$, $T\in U$ if and only if $T\not\in U'$. Thus $G$ and $G'$ have the same indecomposable sets of size $4$, or for all $4$-element subsets $T$ of $V$, $G_{\restriction T}$ is indecomposable if and only if $G'_{\restriction T}$ is decomposable. \endproof \\
In a reconstruction problem of graphs up to complementation \cite{dlps1}, Wilson's Theorem yielded the following result:
\begin{theorem} (\cite{dlps1})\label{k=1[4]} Let $G$ and $G'$ be two graphs on the same set $V$ of $v$ vertices (possibly infinite). Let $k$ be an integer, $5\leq k\leq v-2$, $k\equiv 1$ (mod $4$). Then the following properties are equivalent:\\ (i) $e(G_{\restriction K})$ has the same parity as $e(G'_{\restriction K})$ for all $k$-element subsets $K$ of $V$; and $G_{\restriction K}$, $G'_{\restriction K}$ have the same $3$-homogeneous subsets;\\ (ii) $G'= G$ or $G'= \overline G$. \end{theorem}
Here, we just want to point out that we can obtain a similar result for $k\equiv 3$ (mod $4$), namely Theorem \ref{k=3[4]}, using the same proof as that of Theorem \ref{k=1[4]}.
The {\it boolean sum} $G\dot{+} G'$ of two graphs $G=(V,E)$ and $G'=(V,E')$ is the graph $U$ on $V$ whose edges are pairs $e$ of vertices such that $e\in E$ if and only if $e\notin E'$.
\begin{theorem}\label{k=3[4]} Let $G$ and $G'$ be two graphs on the same set $V$ of $v$ vertices (possibly infinite). Let $k$ be an integer, $3\leq k\leq v-2$, $k\equiv 3$ (mod $4$). Then the following properties are equivalent:\\ (i) $e(G_{\restriction K})$ has the same parity as $e(G'_{\restriction K})$ for all $k$-element subsets $K$ of $V$; and $G_{\restriction K}$, $G'_{\restriction K}$ have the same $3$-homogeneous subsets;\\ (ii) $G'= G$. \end{theorem}
{\parindent0pt {\bf Proof.\ }} It is exactly the same as that of Theorem \ref{k=1[4]} (see (\cite{dlps1}).
The implication $(ii)\Rightarrow (i)$ is trivial. We prove $(i)\Rightarrow (ii)$. We suppose $V$ finite, we set $U:= G\dot{+} G'$, let $T_1,T_2, \cdots , T_{{v \choose 2}}$ be an enumeration of the $2$-element subsets of $V$, let $K_1,K_2,\cdots ,K_{{v \choose k}}$ be an enumeration of the $k$-element subsets of $V$. Let $w_U$ be the row matrix $(u_1,u_2,\cdots , u_{v \choose 2})$ where $u_i=1$ if $T_i$ is an edge of $U$, $0$ otherwise.\\ We have
$w_UW_{2\; k}=(e(U_{\restriction K_1}),e(U_{\restriction K_2}),\cdots , e(U_{\restriction K_{v \choose k}}))$.
From the facts that $e(G_{\restriction K})$ has the same parity as $e(G'_{\restriction K})$ and $e(U_{\restriction K})=e(G_{\restriction K})+e(G'_{\restriction K})-2e(G_{\restriction K}\cap G'_{\restriction K})$ for all $k$-element subsets $K$, $w_U$ belongs to the kernel of $^tW_{2\; k}$ over the $2$-element field. According to Theorem \ref{thm Wilson}, the rank of $W_{2k}$ (mod $2$) is ${v \choose 2} -v+1$. Hence $\dim Ker(^tW_{2\; k})=v-1$.
We give a similar claim as Claim 2.8 of \cite{dlps1}, the proof is identical. \begin{claim} \label{bipartite}Let $k$ be an integer such that $3\leq k\leq v-2$, $k\equiv 3$ (mod $4$), then the kernel of $^t W_{2\; k}$ consists of complete bipartite graphs (including the empty graph). \end{claim} {\parindent0pt {\bf Proof.\ }} Let us recall that a {\it star-graph} of $v$ vertices consists of a vertex linked to all other vertices, those $v-1$ vertices forming an independent set.
First we prove that each star-graph $S$ belongs to $\mathbb{K}$, the kernel of $^t W_{2\; k}$. Let $w_S$ be the row matrix $(s_1,s_2,\cdots , s_{v \choose 2})$ where $s_i=1$ if $T_i$ is an edge of $S$, $0$ otherwise. We have
$w_SW_{2\; k}=(e(S_{\restriction K_1}),e(S_{\restriction K_2}),\cdots , e(S_{\restriction K_{v \choose k}}))$. For all $i\in \{1,\dots ,{v \choose k}\}$, $e(S_{\restriction K_i})=k-1$ if $1\in K_i$, $0$ otherwise. Since $k$ is odd, each star-graph $S$ belongs to $\mathbb{K}$.
The vector space (over the $2$-element field) generated by the star-graphs on $V$ consists of all complete bipartite graphs; since $v\geq3$, these are distinct from the complete graph (but include the empty graph). Moreover, its dimension is $v-1$ (a basis being made of star-graphs). Since $\dim Ker(^tW_{2\; k})=v-1$, then $\mathbb{K}$ consists of complete bipartite graphs as claimed.\endproof
A {\it claw} is a star-graph on four vertices, that is a graph made of a vertex joined to three other vertices, with no edges between these three vertices. A graph is {\it claw-free} if no induced subgraph is a claw.
\begin{claim} \label{clawfree} (\cite{dlps1}) Let $G$ and $G'$ be two graphs on the same set and having the same $3$-homogeneous subsets, then the boolean sum $U: =G\dot {+} G'$ is claw-free. \end{claim}
From Claim \ref{bipartite}, $U$ is a complete bipartite graph and,
from Claim \ref{clawfree}, $U$ is claw-free. Since $v\geq 5$, it follows that $U$ is the empty graph. Hence $G'=G$ as claimed. \endproof
\section{Illustrations to tournaments} \label{section tournaments} Let $T=(V,E)$ be a tournament. For two distinct vertices $x$ and $y$ of $T$, $x\longrightarrow_Ty$ (or simply $x\longrightarrow y$) means that $(x, y)\in E$ and $(y, x)\not\in E$. For $A\subseteq V$ and $y\in V$, $A\longrightarrow y$ means $x\longrightarrow y$ for all $x\in A$. The {\it degree} of a vertex $x$ of $T$ is $d_T(x):=\vert\{ y\in V:x\longrightarrow y\}\vert$. We denote by $T^*$ {\it the dual} of $T$ that is $T^*=(V,E^*)$ with $(x,y)\in E^*$ if and only if $(y,x)\in E$. A {\it transitive} tournament or a {\it total order } or {\it$k$-chain} (denoted $O_k$) is a tournament of cardinality $k$, such that for $x, y, z \in V$, if $x\longrightarrow y$ and $y\longrightarrow z$, then $x\longrightarrow z$. If $x$ and $y$ are two distinct vertices of a total order, the notation $x < y$ means that $x\longrightarrow y$. The tournament $C_3 :=\{\{0,1,2\}, \{(0,1),(1,2),(2,0)\}\}$ (resp. $C_4:=(\{0,1,2,3\},\{(0,3), (0,1), (3,1), (1,2), (2,0), (2,3)\})$) is a $3$-{\it cycle} (resp. $4$-{\it cycle}).
A {\it diamond} is a tournament on $4$ vertices admitting only one interval of cardinality $3$ which is a $3$-cycle. Up to isomorphism, there are exactly two diamonds $\delta^{+}$ and $\delta^{-}=(\delta^{+})^{*}$, where $\delta^+$ is the tournament defined on $\{0, 1, 2, 3\}$ by $\delta^+_{\restriction\{0, 1, 2\}} = C_3$ and $\{0, 1, 2\}\rightarrow 3 $. A tournament isomorphic to $\delta^+$ (resp. isomorphic to $\delta^-$) is said to be a {\it positive diamond} (resp. {\it negative diamond}).
The {\it boolean sum} $U:=T\dot{+} T'$ of two tournaments $T=(V,E)$ and $T'=(V,E')$, is the graph $U$ on $V$ whose edges are pairs $\{x,y\}$ of vertices such that $(x,y)\in E$ if and only if $(x,y)\notin E'$.\\
\begin{figure}
\caption{Cycle $C_3$, Cycle $C_4$, Positive Diamond, Negative Diamond.}
\label{diamond}
\end{figure}
\begin{theorem} \label{tournois}Let $T=(V,E)$ and $T'=(V,E')$ be two tournaments. Let $p$ be a prime number and $k$ be an integer, $2\leq k\leq v-2$. Let $G:=T \dot{+} T'$. We assume that for all $k$-element subsets $K$ of $V$, $e(G_{\restriction K})\equiv 0$ (mod $p$).\\ 1) If $p\geq3$, $k \not\equiv 0,1$ (mod $p$), then $T'=T$.\\ 2) If $p\geq 3$, $k\equiv 0$ (mod $p$), then $T'=T$ or $T'=T^*$.\\ 3) If $p=2$, $k\equiv 2$ (mod $4$), then $T'=T$.\\ 4) If $p=2$, $k\equiv 0$ (mod $4$), then $T'=T$ or $T'=T^*$. \end{theorem}
{\parindent0pt {\bf Proof.\ }} We set $G':=$ The empty graph. Then $e(G_{\restriction K})\equiv e(G'_{\restriction K})$ (mod $p$).\\ 1) From 1) of Theorem \ref{k=2[4]}, $G$ is the empty graph, then $T'=T$.\\ 2) From 2) of Theorem \ref{k=2[4]}, $G$ is empty or the complete graph, then $T'=T$ or $T'=T^*$.\\ 3) From 3) of Theorem \ref{k=2[4]}, $G$ is the empty graph, then $T'=T$.\\ 4) From Theorem \ref{k=0[4],p=2}, $G$ is the empty graph or the complete graph, then $T'=T$ or $T'=T^*$. \endproof \\
Let $T$ be a tournament, we set $C^{(3)}(T):=\{\{a,b,c\} : T_{\restriction \{a,b,c\}} \ \mbox{is a $3$-cycle} \}$, and $c^{(3)}(T):=\mid C^{(3)}(T)\mid$. Let $T=(V,E)$ and $T'=(V,E')$ be two tournaments, let $k$ be a non-negative integer, $T$ and $T'$ are $k$-{\it hypomorphic} \cite{Bou-Lop,Lr} (resp. $k$-{\it hypomorphic} up to duality)
if for every $k$-element subset $K$ of $V$, the induced subtournaments $T'_{\restriction K}$ and $T_{\restriction K}$ are isomorphic (resp. $T'_{\restriction K}$ is isomorphic to $T_{\restriction K}$ or to $T^*_{\restriction K}$). We say that $T$ and $T'$ are ($\leq k$)-{\it hypomorphic}
if $T$ and $T'$ are $h$-hypomorphic for every $h\leq k$. Similarly, we say that $T$ and $T'$ are $(\leq k)$-{\it hypomorphic up to duality} if $T$ and $T'$ are $h$-hypomorphic up to duality for every $h\leq k$.
\begin{theorem} Let $T$ and $T'$ be two tournaments on the same set $V$ of $v$ vertices. Let $p$ be a prime number and $k$ be an integer, $3\leq k\leq v-3$.\\ 1) If $c^{(3)}(T_{\restriction K})=c^{(3)}(T'_{\restriction K})$ for all $k$-element subsets $K$ of $V$ then $T$ and $T'$ are $(\leq 3)$-hypomorphic.\\ 2) Assume $p\geq5$. If $k\not\equiv 1,2$ (mod $p$), and $c^{(3)}(T_{\restriction K})\equiv c^{(3)}(T'_{\restriction K})$ (mod $p$) for all $k$-element subsets $K$ of $V$, then $T$ and $T'$ are $(\leq 3)$-hypomorphic.\\ 3) If ($p=2$ and $k\equiv3$ (mod $4$)) or ($p=3$ and $3\mid k$), and $c^{(3)}(G_{\restriction K})\equiv c^{(3)}(G'_{\restriction K})$ (mod $p$) for all $k$-element subsets $K$ of $V$, then $T$ and $T'$ are $(\leq 3)$-hypomorphic. \end{theorem}
{\parindent0pt {\bf Proof.\ }} Since every tournament, of cardinality $\geq 4$, has at least a restriction of cardinality $3$ which is not a $3$-cycle, then the proof is similar to that of Theorem \ref{Ka+lem+TR}.\endproof
Let $T$ be a tournament, we set $D^+_4(T):=\{\{a,b,c,d\} : T_{\restriction \{a,b,c,d\}} \simeq \delta^+ \}$, $D^-_4(T):=\{\{a,b,c,d\} : T_{\restriction \{a,b,c,d\}} \simeq \delta^- \}$, $d^+_4(T):=\mid D^+_4(T)\mid$ and $d^-_4(T):=\mid D^-_4(T)\mid$.
It is well-known that every subtournament of order $4$ of a tournament is either a diamond, a $4$-chain, or a $4$-cycle subtournament. We have $c^{(3)}(O_4)=0$, $c^{(3)}(\delta^+)=c^{(3)}(\delta^-)=1$, $c^{(3)}(C_4)=2$ and $C_4\simeq C_4^*$. \begin{theorem}\label{tournament} Let $T$ and $T'$ be two $(\leq 3)$-hypomorphic tournaments on the same set $V$ of $v$ vertices. Let $p$ be a prime number and $k$ be an integer, $4\leq k\leq v-4$.\\ 1) If $d^+_4(T_{\restriction K})=d^+_4(T'_{\restriction K})$ for all $k$-element subsets $K$ of $V$ then $T'$ and $T$ are $(\leq 5)$-hypomorphic.\\ 2) Assume $d^+_4(T_{\restriction K})\equiv d^+_4(T'_{\restriction K})$ (mod $p$) for all $k$-element subsets $K$ of $V$.\\
a)
If $p\geq5$ and, $k \not\equiv 1,2,3$ (mod $p$), then $T'$ and $T$ are $(\leq 5)$-hypomorphic.\\ b) If ($p=3$, $3\mid k-1$ and $9\nmid k-1$) or ($p=2$, $4\mid k$ and $8\nmid k$), then $T'$ and $T$ are $(\leq 5)$-hypomorphic.\\ c) If $p=2$ and $8\mid k$, then $T'$ and $T$ are $(\leq 5)$-hypomorphic or for all $4$-elements subset $S$ of V, $T_{\restriction S}$ is isomorphic to $\delta^+$ if and only if $T'_{\restriction S}$ is isomorphic to $\delta^-$. \end{theorem}
{\parindent0pt {\bf Proof.\ }} To prove that $T'$ and $T$ are ($\leq 5$)-hypomorphic, the following lemma shows that it is sufficient to prove that $T'$ and $T$ are ($\leq 4$)-hypomorphic. \begin{lemma} \label{hypomorphe} \cite{B} Let $T$ and $T'$ be two $(\leq 4)$-hypomorphic tournaments on at least $5$ vertices. Then, $T$ and $T'$ are $(\leq 5)$-hypomorphic. \end{lemma}
Now, let $U^+:=\{S\subseteq V, \ T_{\restriction S} \simeq \delta^+ \}=D^+_4(T) $, $U'^+:=D^+_4(T') $, $U^-:=D^-_4(T)$ and $U'^-:=D^-_4(T') $. \begin{claim}\label{3hyp4hyp} If $T$ and $T'$ are $(\leq 3)$-hypomorphic and $U^+=U'^+$, then $U^-=U'^-$; $T$ and $T'$ are $(\leq 5)$-hypomorphic. \end{claim} {\parindent0pt {\bf Proof.\ }} Let $S\in U^-$, $T_{\restriction S}\simeq \delta^-$. Since $T$ and $T'$ are ($\leq 3$)-hypomorphic, then $T'_{\restriction S}\simeq \delta^+$ or $T'_{\restriction S}\simeq \delta^-$. We have $\{S\subseteq V, \ T'_{\restriction S} \simeq \delta^+ \}=\{S\subseteq V, \ T_{\restriction S} \simeq \delta^+ \}$, then $T'_{\restriction S}\simeq \delta^-$, $S\in U'^-$ and $U^-=U'^-$. So, for $X\subset V$, if $T_{\restriction X}$ is a diamond then $T'_{\restriction X} \simeq T_{\restriction X}$.\\
Now we prove that $T$ and $T'$ are $4$-hypomorphic. Let $X\subset V$ such that $|X|=4$. If $ T_{\restriction X} \simeq C_4$, then $c^{(3)}(T_{\restriction X})=2$. Since $T$ and $T'$ are ($\leq 3$)-hypomorphic then $c^{(3)}(T'_{\restriction X})=2$, thus $T'_{\restriction X} \simeq T_{\restriction X} \simeq C_4$. The same, if $ T_{\restriction X} \simeq O_4$ then $T'_{\restriction X} \simeq T_{\restriction X} \simeq O_4$. So, $T'$ and $T$ are ($\leq 4$)-hypomorphic. Then, From Lemma \ref{hypomorphe}, $T'$ and $T$ are ($\leq 5$)-hypomorphic. \endproof
From Claim \ref{3hyp4hyp}, it is sufficient to prove that $U^+=U'^+$.\\ For all $K\subseteq V$ with $\vert K\vert = k$, we have $\{S\subseteq K : S\in U^+\}= D^+_4( T_{\restriction K})$ and $\{S\subseteq K : S\in U'^+\}= D^+_4( T'_{\restriction K})$. \\ 1) Since $d^+_4(T_{\restriction K})=d^+_4(T'_{\restriction K})$ then $\vert\{S\subseteq K : S\in U^+\}\vert=\vert\{S\subseteq K : S\in U'^+\}\vert$. From Lemma \ref{particular mp}, we have $U^+=U'^+$.\\ 2) We have $d^+_4(T_{\restriction K})\equiv d^+_4(T'_{\restriction K})$ (mod $p$) for all $k$-element subsets $K$ of $V$, then $\vert\{S\subseteq K : S\in U^+\}\vert \equiv \vert\{S\subseteq K : S\in U'^+\}\vert$ (mod $p$).\\ a) Case 1. $p\geq5$, $t=4=[4]_p$, $k=[k_0,\dots]_p$ and $t_0=4\leq k_0$. From 1) of Theorem \ref{thm js} we have $U^+=U'^+$.\\ Case 2. $p\geq5$, $t=4=[4]_p$, $k=[0,k_1,\dots]_p$. Since every tournament of cardinality $\geq 5$ has at least a restriction of cardinality $4$ which is not a diamond, then from 2) of Theorem \ref{thm js}, $U^+=U'^+$.\\ b) Case 1. $p=3$, $t=4=[1,1]_p$, $k=[1,k_1,\dots,k_{k(p)}]_p$ and $t_1=1\leq k_1$. From 1) of Theorem \ref{thm js} we have $U^+=U'^+$.\\ Case 2. $p=2$, $t=4=[0,0,1]_p$ and $k=[0,0,1,k_3,\dots,k_{k(p)}]_p$.\\ From 1) of Theorem \ref{thm js} we have $U^+=U'^+$.\\ c) We have $p=2$, $t=4=[0,0,1]_p$, $k=[0,0,0,k_3,\dots,k_{k(p)}]_p$. Since every tournament of cardinality $\geq 5$ has at least a restriction of cardinality $4$ which is not a diamond, and the fact that $T$ and $T'$ are $3$-hypomorphic, then from 2) of Theorem \ref{thm js}, $U^+=U'^+$, thus $T'$ and $T$ are ($\leq 5$)-hypomorphic, or for all $4$-element subsets $S$ of V, $T_{\restriction S}$ is isomorphic to $\delta^+$ if and only if $T'_{\restriction S}$ is isomorphic to $\delta^-$. \endproof \\
Given a digraph $S=(\{0,1,\dots ,m-1\},A)$, where $m\geq 1$
is an integer, for $i\in \{0,1,\dots ,m-1\}$ we associate a digraph $G_{i}=(V_{i},A_{i})$, with $|V_i|\geq 1$, such that the $V_{i}$'s are mutually disjoint. The \textit{lexicographic sum} of $S$ by the digraphs $G_i$ or simply the S-\textit{sum} of the $G_{i}$'s, is the digraph denoted by $S(G_{0},G_{1},\dots ,G_{m-1})$ and defined on the union of the $V_{i}$'s as follows: given $x\in V_{i}$ and $y\in V_{j}$, where $i,j\in \{0,1,\dots ,m-1\}$, $(x,y)$ is an arc of $S(G_{0},G_{1},\dots ,G_{m-1})$ if either $i=j$ and $(x,y)\in A_{i}$ or $i\neq j$ and $(i,j)\in A$: this digraph replaces each vertex $i$ of $S$ by $G_{i}$. We say that the vertex $i$ of $S$ is \textit{dilated by} $G_{i}$.\\
Let $h$ be a non-negative integer. The integers below are considered modulo $2h+1$. The {\it circular tournament} $T_{2h+1}$ (see Figure $2$) is defined on $\{0,1, \dots , 2h\}$ by : ${T_{2h+1}}_{\restriction \{0,1, \dots , h\}}$ is the usual total order on $\{0,1, \dots , h\}, {T_{2h+1}}_{\restriction \{h + 1,\dots, 2h\}}$ is also the usual order on $\{h + 1,h + 2, \dots , 2h\}$, however $ \{i + 1,i + 2, . . .\dots , h\}\longrightarrow_{T_{2h+1}} i+h+1 \longrightarrow_{T_{2h+1}}\{0,1, \dots , i\}$ for every $i\in \{0,1, \dots , h - 1\}$. A tournament $T$ is said to be an element of $D(T_{2h+1})$ if $T$ is obtained by dilating each vertex of $T_{2h+1}$ by a finite chain $p_{i}$, then $T = T_{2h+1}(p_{0},p_{1},\dots ,p_{2h})$. We recall that $T_{2h+1}$ is indecomposable and $D(T_{2h+1})$ is the class of finite tournaments without diamond \cite{Lr}.\\
We define the tournament $\beta^+_6:= T_{3}(p_{0},p_{1},p_{2})$ with $p_0=(0<1<2)$, $p_1=(3<4)$ and $|p_2|=1$ (see Figure $3$). We set $\beta^-_6:=(\beta^+_6)^*$. For a tournament $T=(V,E)$, we set $B^+_6(T):=\{S\subseteq V : T_{\restriction S} \simeq \beta^+_6 \}$,
$B^-_6(T):=\{S\subseteq V : T_{\restriction S} \simeq \beta^-_6 \}$, $b^+_6(T):=\mid B^+_6(T)\mid$ and $b^-_6(T):=\mid B^-_6(T)\mid$.\\
Two tournaments $T$ and $T'$ on the same vertex set $V$ are \textit{hereditarily isomorphic} if for all $X\subseteq V$, $T_{\restriction X}$ and $T'_{\restriction X}$ are isomorphic \cite{BBN}.
\begin{figure}
\caption{Circular tournament $T_{2h+1}$}
\end{figure}
\begin{figure}
\caption{$\beta_6^+$.}
\end{figure}
Let $G=(V,E)$ and $G'=(V,E')$ be two $(\leq 2)$-hypomorphic digraphs. Denote $D_{G,G'}$ the binary relation on $V$ such that: for $x\in V$, $x D_{G,G'}x$; and for $x \neq y\in V$, $x D_{G,G'}y$ if there exists a sequence $x_{0} =x, . . . , x_{n} =y$ of elements of $V$ satisfying $(x_{i}, x_{i+1})\in E$ if and only if $(x_{i}, x_{i+1})\notin E'$, for all $i$, $0 \leq i \leq n - 1$. The relation $D_{G,G'}$ is an equivalence relation called {\it the difference relation}, its classes are called {\it difference classes}.
Using difference classes, G. Lopez \cite{L1,ls} showed that if $T$ and $T'$ are ($\leq 6$)-hypomorphic then $T$ and $T'$ are isomorphic. One may deduce the next corollary.
\begin{corollary}\label{l1} (\cite{L1,ls}) Let $T$ and $T'$ be two tournaments. We have the following properties:\\ 1) If $T$ and $T'$ are $(\leq 6)$-hypomorphic then $T$ and $T'$ are hereditarily
isomorphic.\\ 2) If for each equivalence class $C$ of $D_{T,T'}$, $C$ is an interval of $T$ and $T'$, and $T'_{\restriction C}$, $T_{\restriction C}$ are $(\leq 6)$-hypomorphic, then $T$ and $T'$ are hereditarily isomorphic. \end{corollary}
\begin{lemma} \label{41} \cite{L2} Given two $(\leq 4)$-hypomorphic tournaments $T$ and $T'$, and $C$ an equivalence class of $ D_{T,T'}$, then:\\ 1) $C$ is an interval of $T'$ and $T$. \\ 2) Every $3$-cycle in $T_{\restriction C}$ is reversed in $T'_{\restriction C}$.\\ 3) There exists an integer $h\geq 0$ such that $T_{\restriction C}=T_{2h+1}(p_0,p_1,\dots,p_{2h})$ and $T'_{\restriction C}=T^*_{2h+1}(p'_0,p'_1,\dots,p'_{2h})$ with $p_i$, $p'_i$ are chains on the same basis, for all $i\in \{0,1,\dots ,2h\}$. \end{lemma}
\begin{theorem} Let $T$ and $T'$ be two $(\leq 4)$-hypomorphic tournaments on the same set $V$ of $v$ vertices. Let $p$ be a prime number and $k=[k_0,k_1,\dots ,k_{k(p)}]_p$ be an integer, $6\leq k\leq v-6$.\\ 1) If $b^+_6(T_{\restriction K})=b^+_6(T'_{\restriction K})$ for all $k$-element subsets $K$ of $V$ then $T'$ and $T$ are hereditarily isomorphic.\\ 2) Assume $b^+_6(T_{\restriction K})\equiv b^+_6(T'_{\restriction K})$ (mod $p$) for all $k$-element subsets $K$ of $V$.\\ a) If $p\geq7$, and $k_0\geq 6$ or $k_0=0$, then $T'$ and $T$ are hereditarily
isomorphic.\\ b) If ($p=5$, $k_0=1$ and $k_1\neq 0$) or ($p=3$, $k_0=0$ and $k_1=2$) or ($p=3$ and $k_0=k_1=0$) or ($p=2$, $k_0=0$ and $k_1=k_2=1$), then $T'$ and $T$ are hereditarily
isomorphic. \end{theorem}
{\parindent0pt {\bf Proof.\ }} Let $U^+:=\{S\subseteq V, \ T_{\restriction S} \simeq \beta^+_6 \}=B^+_6(T) $, $U'^+:=B^+_6(T') $, $U^-:=\{S\subseteq V, \ T_{\restriction S} \simeq \beta^-_6 \}=B^-_6(T) $, $U'^-:= B^-_6(T') $.\\ Every tournament of cardinality $\geq 7$ has at least a restriction of cardinality $6$ which is not isomorphic to $\beta^+_6$ and $\beta^-_6$.
Then for all cases, similarly to the proof of Theorem \ref{tournament}, we have $U^+=U'^+$.\\
Let $C$ be an equivalence class of $D_{T,T'}$, $S\in U^-$, $T_{\restriction S}\simeq \beta_6^-$. Since $T$ and $T'$ are ($\leq 3$)-hypomorphic, then $T'_{\restriction S}\simeq \beta^+_6$ or $T'_{\restriction S}\simeq \beta^-_6$. We have $\{S\subseteq V, \ T'_{\restriction S} \simeq \beta^+_6 \}=\{S\subseteq V, \ T_{\restriction S} \simeq \beta^+_6 \}$, then $T'_{\restriction S}\simeq \beta^-_6$, $S\in U'^-$ and $U^-=U'^-$. Let $X\subseteq C$ such that $|X|=6$; if $T_X\simeq\beta^+_6$ then, from 2) of Lemma \ref{41}, $T'_X\simeq\beta^-_6$, that is impossible, so $T_C$ and $T'_C$ has not a restriction of cardinality $6$ isomorphic to $\beta^+_6$ and $\beta^-_6$. \\ Now we will prove that $T_{\restriction C}$ and $T'_{\restriction C}$ are $(\leq 6)$-hypomorphic.\\ From 3) of Lemma \ref{41}, there exists an integer $h\geq 0$ such that $T_{\restriction C}=T_{2h+1}(p_0,p_1,\dots,p_{2h})$, with $p_i$ is a chain and $a_i\in p_i$ for all $i\in \{0,1,\dots ,2h\}$ . Since $T_{\restriction C}$ hasn't a tournament isomorphic to $\beta_6^+$, then $h\leq3$. Indeed, if $h\geq4$, then $T_{\restriction\{a_0,a_1,a_2,a_3,a_4,a_{3+h}\}}\simeq \beta_6^+$, and $\{a_0,a_1,a_2\}$, $\{a_3,a_4\}$ are two intervals of $T_{\restriction\{a_0,a_1,a_2,a_3,a_4,a_{3+h}\}}$, that is impossible.\\ a) If $h=3$, then $T_{\restriction C}=T_7$. Indeed, if $a_0,b_0\in V(p_0)$ then $T_{\restriction\{a_0,b_0,a_1,a_2,a_3,a_5\}}\simeq \beta_6^+$, and $\{a_0,b_0,a_1\}$, $\{a_2,a_3\}$ are two intervals of $T_{\restriction\{a_0,b_0,a_1,a_2,a_3,a_5\}}$, that is impossible.\\ b) If $h=2$, then $T_{\restriction C}=T_5$, or $T_{\restriction C}$ is obtained by dilating one vertex of $T_5$ by a chain of cardinality $2$. Indeed :
Case 1. $a_0,b_0,c_0\in V(p_0)$, then $T_{\restriction\{a_0,b_0,c_0,a_1,a_2,a_3\}}\simeq \beta_6^+$ and $\{a_0,b_0,c_0\}$, $\{a_1,a_2\}$ are two intervals of $T_{\restriction\{a_0,b_0,c_0,a_1,a_2,a_3\}}$, that is impossible.
Case 2. If $a_i,b_i\in V(p_i)$ for all $i\in\{0,1\}$, then $T_{\restriction\{a_0,b_0,a_1,b_1,a_3,a_4\}}\simeq \beta_6^+$ and $\{a_0,b_0,a_4\}$, $\{a_1,b_1\}$ are two intervals of $T_{\restriction\{a_0,b_0,a_1,b_1,a_3,a_4\}}$, that is impossible.
Case 3. If $a_i,b_i\in V(p_i)$ for all $i\in\{0,2\}$, then $T_{\restriction\{a_0,b_0,a_1,a_2,b_2,a_4\}}\simeq \beta_6^+$ and $\{a_0,b_0,a_1\}$, $\{a_2,b_2\}$ are two intervals of $T_{\restriction\{a_0,b_0,a_1,a_2,b_2,a_4\}}$, that is impossible.\\ c) If $h=1$, then $T_{\restriction C}$ is obtained by dilating one vertex of $C_3$ by a chain or by dilating two or three vertices of $C_3$ by a chain of cardinality $2$.\\ d) If $h=0$, then $T_{\restriction C}$ is a chain.\\ In all cases, $T_{\restriction C}$ and $T'_{\restriction C}$ are ($\leq 6$)-hypomorphic. From 1) of Lemma \ref{41}, $C$ is an interval of $T'$ and $T$. Then, from 2) of Corollary \ref{l1}, $T$ and $T'$ are hereditarily isomorphic. \endproof
\end{document} |
\begin{document}
\author{Kees Kok} \address{\parbox{0.9\textwidth}{KdV Institute for Mathematics, University of Amsterdam, Netherlands}} \email{k.kok@uva.nl} \begin{abstract} In this paper we show the failure of the integral Hodge/Tate conjecture for the product of an Enriques surface with a smooth odd-dimensional projective hypersurface. To do this, we use a specialization argument of Colliot-Thélène (\cite{CT}) applied to Schreieder's refined unramified cohomology (\cite{Sch1}). The results obtained in this way give an interpretation of Shen's result (\cite{S}) in terms of refined unramified cohomology. Moreover, using this interpretation, we avoid the need to work over the complex numbers so that we may conclude that Shen's result also holds over general algebraically closed fields of characteristic not 2.
\end{abstract} \title{On the failure of the integral Hodge/Tate conjecture for products with projective hypersurfaces}
\section{Introduction} Let $X$ be a smooth projective complex variety and write $\cl^i_\mathbb{Q}\colon \CH^i(X)_\mathbb{Q}\to\mathrm{H}^{2i}(X,\mathbb{Q}(i))$ for the rational cycle class map from the Chow group to the Betti cohomology of $X$. It is known that its image is contained in the Hodge classes $\mathrm{Hdg}^{2i}(X,\mathbb{Q}):=\mathrm{H}^{2i}(X,\mathbb{Q})\cap\mathrm{H}^{i,i}(X)$, where $\mathrm{H}^k(X,\mathbb{Q})\otimes\mathbb{C}\cong\bigoplus_{p+q=k}\mathrm{H}^{p,q}(X)$ is the Hodge decomposition of $X$. The famous \emph{Hodge conjecture}, first introduced by W.V.D. Hodge in \cite{original-HC} and later modified by A. Grothendieck \cite{adjusted-HC}, predicts that all the Hodge classes are algebraic. The modern formulation is as follows. \begin{conj}[Hodge Conjecture]\label{HC} Let $X$ be a smooth projective complex variety, then $\im(\cl^i_\mathbb{Q})=\mathrm{Hdg}^{2i}(X,\mathbb{Q})$. \end{conj} The Hodge conjecture thus says that topological properties of algebraic varieties can be studied algebraically. We note that it also has a motivic nature as it implies all standard conjectures in characteristic zero \cite[Chapitre 5]{Andre}. A lot of cases are known, but in general the conjecture is wide open. The interested reader can look at \cite{known-cases} for a nice overview.
In view of Hodge's original statement, one could ask what happens if $\mathbb{Q}$ is replaced by $\mathbb{Z}$. This is known as the \emph{integral} Hodge conjecture. \begin{conj}[Integral Hodge Conjecture]\label{IHC} Let $X$ be a smooth projective complex variety, then all integral Hodge classes are integral linear combinations of algebraic cycles, that is $\im(\cl^i)=\mathrm{Hdg}^{2i}(X,\mathbb{Z}):=\mathrm{H}^{2i}(X,\mathbb{Z})\cap\mathrm{H}^{i,i}(X)$. \end{conj} \Cref{IHC} clearly implies \Cref{HC} and trivially holds for $i=0,\dim(X)$. The so called `Lefschetz-(1,1)-theorem' due to Solomon Lefschetz \cite{Lefschetz} gives the $i=1$ case. It turns out however that \Cref{IHC} fails to be true in general.
This failure was first established by Atiyah and Hirzebruch in \cite{AH}. They showed that a certain \emph{torsion} cohomology class, which is automatically Hodge, cannot be algebraic. Kollár was the first to find a Hodge class of infinite order that is not algebraic, whereas a multiple of this class is, \cite{Kol}. Thoughout the years, many more counterexamples for the integral Hodge conjecture were found, \cite{SV}, \cite{OS}, \cite{BO}, \cite{CT}, \cite{CT-V}, \cite{S}, \cite{Diaz1}, \cite{Diaz2} to name a few relatively recent ones.
Let us explain how this paper relates to some of the aforementioned ones. In \cite{BO} Benoist and Ottem showed that certain products of a projective curve with an Enriques surface violated \Cref{IHC}. This violation was given an interpretation in terms of \emph{unramified cohomology} by Colliot-Thélène in \cite{CT}. This interpretation was based on the relation between the failure of the integral Hodge conjecture in codimension 2 and the non-vanishing of certain unramified cohomology groups, observed by Colliot-Thélène and Voisin, \cite[Théorème 3.7]{CT-V}.
Later, Shen gave in \cite{S} an interpretation of \cite{BO} in terms of a topological obstruction to algebraicity. A careful analysis of this obstruction allowed Shen to show the existence of non-algebraic Hodge classes on a product of a very general odd-dimensional projective hypersurface of degree $\geq 3$ with an Enriques surface. Shen ended his introduction by asking whether his result also has an interpretation in terms of unramified cohomology, \cite[Remark 1.6]{S}. This paper aims to give this interpretation. In the process, we replace Shen's topological obstruction arguments by purely algebraic ones and doing so, we avoid the need to work over the complex numbers. Instead, in this paper we shall work over an algebraically closed field $k$ of any characteristic (and we shall later specify to $\mathrm{char}(k)\neq 2$). Consequently, we get results involving the \emph{Integral Tate Conjecture}, which we shall describe now.
Let $X$ be a smooth projective variety over $k$ and let $\ell\neq\mathrm{char}(k)$ be a prime. Let $k_0\subseteq k$ be any subfield over which $X$ is defined so that $\bar k_0=k$, that is, there exists a variety $X_0$ over $k_0$ so that $X_0\times_{k_0}k=X$. There is a cycle class map $\cl^i_{\mathbb{Z}_\ell}\colon\CH^i(X)_{\mathbb{Z}_\ell}\to\mathrm{H}_{\acute{e}t}^{2i}(X,\mathbb{Z}_\ell(i))$ and it is known that its image is contained in $\varinjlim_{k_0\subseteq k'}\mathrm{H}_{\acute{e}t}^{2i}(X,\mathbb{Z}_\ell(i))^{G_{k'}}$, where the limit runs over all finite extensions $k'$ of $k_0$ and $G_{k'}$ is the absolute Galois group of $k'$, \cite[ Chapter VI \textsection 9]{Milne} and \cite[Cycle \textsection 2]{SGA42}.
\begin{conj}[Integral Tate Conjecture as in \cite{Schoen}]\label{ITC} Assume $k_0$ is finite over its prime field, then \[ \im(\cl^i_{\mathbb{Z}_\ell})=\varinjlim_{k_0\subseteq k'}\mathrm{H}_{\acute{e}t}^{2i}(X,\mathbb{Z}_\ell(i))^{G_{k'}}. \] \end{conj}
Our main result is the following. \begin{theorem}[cf. \Cref{failure ITC}]\label{intro thm} Let $\mathcal X\to\mathbb{P}^1$ be a Lefschetz pencil of odd-dimensional projective hypersurfaces of degree $d\geq 3$ over a field $k=\bar k$ with $\mathrm{char}(k)\neq 2$ and let $S$ be an Enriques surface over the same field $k$. Then the integral Tate conjecture fails for $X_{\bar\eta}\times S$, where $X_{\bar\eta}$ is the geometric generic fibre of $\mathcal X\to\mathbb{P}^1$. \end{theorem}
As mentioned before, this result is due to an interpretation of \cite{S} in terms of unramified cohomology. More precisely, our approach relies on the theory of \emph{refined unramified cohomology} developed by Schreieder in \cite{Sch1}, which generalizes the obstruction given in \cite{CT-V}. To give a more accurate statement of our main result for the moment, we quickly recall that the refined unramified cohomology group of $X$ with coefficients in $A$ is denoted by $\Hrnr{j}^i(X,A)$ (see \Cref{ref unram cohom} for precise definitions and statements), then Schreieder showed the following. \begin{theorem}[{\cite[Theorem 7.7]{Sch1}}] There is an isomorphism \[ \frac{\Hrnr{i-2}^{2i-1}(X,\mu_{\ell^r}^{\otimes i})}{\Hrnr{i-2}^{2i-1}(X,\mathbb{Z}_\ell(i))}\cong\coker(\cl^i_{\mathbb{Z}_\ell}\colon \CH^i(X)_{\mathbb{Z}_\ell}\to\mathrm{H}_{\acute{e}t}^{2i}(X,\mathbb{Z}_\ell(i)))[\ell^r]. \] \end{theorem}
Then \Cref{intro thm} becomes \begin{theorem}[More precise form of \Cref{intro thm}] If the dimension of the hypersurfaces appearing in the Lefschetz pencil $\mathcal X$ is $2n-1$, then the natural map \[ \frac{\mathrm{H}_{\acute{e}t}^{2n+1}(X_{\bar\eta}\times S,\mu_2^{\otimes n+1})}{\mathrm{H}_{\acute{e}t}^{2n+1}(X_{\bar\eta}\times S,\mathbb{Z}_2(n+1))}\to\frac{\Hrnr{n-1}^{2n+1}(X_{\bar\eta}\times S,\mu_2^{\otimes n+1})}{\Hrnr{n-1}^{2n+1}(X_{\bar\eta}\times S,\mathbb{Z}_2(n+1))} \] is non-zero. That is, there exists a 2-torsion class in $\mathrm{H}_{\acute{e}t}^{2n+2}(X_{\bar\eta}\times S,\mathbb{Z}_2(n+1))$ which is not in the image of $\cl_{\mathbb{Z}_2}^{n+1}$. \end{theorem}
Having established this, we also obtain as direct consequences similar results involving $X_{\bar\eta}\times S^i$ (cf. \Cref{gen failure ITC}) and certain variety of lines (cf. \Cref{var of lines failure}).
\subsection*{Structure of the paper} In \Cref{preliminaries} we recall some general result concerning étale cohomology and cover the construction of the refined unramified cohomology groups and the specialization map from \cite{Sch2}. In \Cref{comparing sp and cosp} we introduce the cospecialization map from \cite[Chapter I\textsection 8]{FK} and show how this map compares with the aforementioned specialization map. Then in \Cref{main section} we apply everything to certain Lefschetz pencils to deduce our application on the integral Hodge/Tate type conjectures, similar to \cite{CT}. We end with some general observations in \Cref{closing remarks}.
\section{Preliminaries}\label{preliminaries} Here we recall the theory of étale cohomology and give proofs of standard properties for reference purposes. Moreover we recall the theory of \cite{Sch1} on refined unramified cohomology and some of its properties we will use. \subsection{Étale Cohomology} \subsubsection{Verdier Duality}\label{verdier duality} Let $f\colon X\to S$ be a compactifiable morphism over $k$. The following is \cite[Chapitre XVIII, Théorème 3.1.4]{SGA4}. \begin{theorem}\label{VD} Let $\mathrm{char}(k)\nmid n$, then the functor $Rf_!\colon D(X,\mathbb{Z}/n\mathbb{Z})\to D(S,\mathbb{Z}/n\mathbb{Z})$ has a partial right adjoint $Rf^!\colon D^+(S,\mathbb{Z}/n\mathbb{Z})\to D^+(X,\mathbb{Z}/n\mathbb{Z})$.
If we moreover assume $f$ to be smooth, then we can define $Rf^!$ as in \cite[\textsection 4.4]{Verdier}. \end{theorem}
Consider the following condition on $f$ from \cite[Definition 1.4]{FK}: \emph{All non-empty geometric fibres of $f$ are smooth of dimension $d$} or more generally condition $(\ast)_d$ from \cite[Chapitre XVIII, Théorème 2.9]{SGA4}: \emph{There exists an open $U\subseteq X$ such that $f|_U$ is flat with fibres of dimension $\leq d$ and the fibres over $X\setminus U$ have dimension $<d$.}
If $f$ satisfies $(\ast)_d$, then there is a trace map \[ \int_{X/S}\colon R^{2d}f_!\Lambda(d)\to\Lambda, \] where the $\Lambda$ are constant $n$-torsion sheaves on $X$ and $S$ respectively \cite[Theorem II.1.6]{FK} or \cite[Chapitre VXIII, Théorème 2.9]{SGA4}. Compatibility of the trace map with base change implies that the stalk at a point $s\in S$ of $\int_{X/S}$ equals the trace map $\int_{X_{\bar s}}\colon\mathrm{H}_c^{2d}(X_{\bar s},\Lambda(d))\to\Lambda$.
There is a morphism of functors $t_f\colon f^\ast(d)[2d]\to Rf^!$ so that the diagram \[ \begin{tikzcd} Rf_! f^\ast(d)[2d]\ar[d,"\int_{X/S}"]\ar[r,"t_f"]&Rf_! Rf^!\ar[ld,"Rf_!\vdash Rf^!"]\\ \mathrm{id}&~ \end{tikzcd} \] commutes, \cite[Chapitre XVIII, Lemme 3.2.3]{SGA4}. By \cite[Chapitre XVIII, Théorème 3.2.5]{SGA4} this $t_f$ is an isomorphism if $f$ is smooth, in particular, if we take $f\colon X\to S=\mathrm{Spec}(\bar k)$ smooth, then the unit $Rf_!Rf^!\Lambda\to\Lambda$ is the trace morphism, which is (in degree 0) $\int_X\colon\mathrm{H}_c^{2d_X}(X,\Lambda)\;\tilde{\rightarrow}\;\Lambda$.
\begin{prop}[Poincaré Duality] Let $X$ be smooth over $k=\bar k$ and let $\mathrm{char}(k)\nmid n$. For $\mathcal F$ a locally constant constructible sheaf of $\mathbb{Z}/n\mathbb{Z}$-modules, there is a natural isomorphism $\mathrm{H}_{\acute{e}t}^{2d_X-i}(X,\mathcal F^\vee)\cong\mathrm{H}_c^i(X,\mathcal F)^\vee=\Hom_{\mathbb{Z}/n\mathbb{Z}}(\mathrm{H}_c^i(X,\mathcal F),\mathbb{Z}/n\mathbb{Z})$. \end{prop} \begin{proof} This follows either from the Verdier Duality \cite[Chapitre XVIII, (3.2.6.2)]{SGA4} or can be shown directly: \cite[Theorem II.1.13]{FK}, \cite[Chapter VI Theorem 11.1]{Milne}. \end{proof}
\begin{definition}\label{Gysin map} Let $f\colon X\to Y$ be a proper map between smooth varieties and set $c=d_X-d_Y$. We define the Gysin map $f_\ast\colon\mathrm{H}_{\acute{e}t}^i(X,\Lambda)\to\mathrm{H}_{\acute{e}t}^{i-2c}(Y,\Lambda)$ to be the composition \[ \mathrm{H}_{\acute{e}t}^i(X,\Lambda)\cong\mathrm{H}_c^{2d_X-i}(X,\Lambda)^\vee\overset{(f^\ast)^\vee}{\to}\mathrm{H}_c^{2d_X-i}(Y,\Lambda)^\vee\cong\mathrm{H}^{i-2c}(Y,\Lambda). \] \end{definition}
\subsubsection{Cup Product} Let $\mathcal F$ be a sheaf on a scheme $S$ and $U\to S$ étale. If $\alpha\in\Gamma(U,\mathcal F)$ is a section of a sheaf and $\bar s\to U$ a geometric point, then we will write $\alpha_{\bar s}\in\mathcal F_{\bar s}$ for its image in the stalk. In particular, if $f\colon X\to S$ is compactifiable and $\alpha\in\Gamma(U,R^if_!\Lambda)$, then the base-change theorem gives $\alpha_{\bar s}\in \mathrm{H}^i_c(X_{\bar s},\Lambda)$.
Given an étale neighborhood $U\to S$ of $s\in S$, then we would like to have a cup product $\Gamma(U,R^if_!\Lambda)\times\mathrm{H}_{\acute{e}t}^j(X_U,\Lambda)\overset{-\cup-}{\to}\Gamma(U,R^{i+j}f_!\Lambda)$ fitting in a commutative diagram \begin{equation}\label{cup product} \begin{tikzcd} \Gamma(U,R^if_!\Lambda)\times\mathrm{H}_{\acute{e}t}^j(X_U,\Lambda)\ar[d]\ar[r,"-\cup-"]&\Gamma(U,R^{i+j}f_!\Lambda)\ar[d]\\ \mathrm{H}_c^i(X_{\bar s},\Lambda)\times\mathrm{H}_{\acute{e}t}^j(X_{\bar s},\Lambda)\ar[r,"-\cup-"]&\mathrm{H}_c^{i+j}(X_{\bar s},\Lambda) \end{tikzcd} \end{equation} where the vertical maps are the natural stalk and restriction maps and the bottom map is the ordinary cup product, defined as follows (\cite[page 303]{FK}): let $\varphi\in\Hom_{D(X,\Lambda)}(\Lambda,\Lambda[j])=\mathrm{H}_{\acute{e}t}^j(X,\Lambda)$, which induces $R(p_X)_!(\varphi)\colon R(p_X)_!(\Lambda)\to R(p_X)_!(\Lambda[j])$, where we write $p_X\colon X\to\mathrm{Spec}(k)$ for the structure map, and taking $i$-th cohomology gives $\mathrm{H}_c^i(X,\Lambda)\to\mathrm{H}_c^{i+j}(X,\Lambda)$.
Write $f^U:=f|_{X_U}\colon X_U\to U$ for the restriction to the étale neighborhood $U\to S$. Then any $\varphi\in\mathrm{H}_{\acute{e}t}^j(X_U,\Lambda)=\Hom_{D(X_U,\Lambda)}(\Lambda,\Lambda[j])$ induces a map $Rf^U_!\varphi\colon Rf^U_!\Lambda\to Rf^U_!\Lambda[j]$. Taking $i$-th cohomology and sections gives us a morphism \[ \Gamma(U,R^if_!\Lambda)\times\mathrm{H}_{\acute{e}t}^j(X_U,\Lambda)\overset{\cup}{\to}\Gamma(U,R^{i+j}f_!\Lambda). \] To show that the desired diagram (\ref{cup product}) commutes, write $i\colon X_{\bar s}\to X_U$ for the geometric fibre, then the composition \[ \Hom_{D(X_U,\Lambda)}(\Lambda,\Lambda[j])\to\Hom_{D(X_U,\Lambda)}(\Lambda,Ri_\ast i^\ast\Lambda[j])\cong\Hom_{D(X_{\bar s},\Lambda)}(\Lambda,\Lambda[j]) \] is given by $i^\ast$ and induces the restriction map $\mathrm{H}_{\acute{e}t}^j(X_U,\Lambda)\to\mathrm{H}_{\acute{e}t}^j(X_{\bar s},\Lambda)$ on cohomology. This shows that $Rf^U_!\varphi(\tilde\alpha)_{\bar s}=\tilde\alpha_{\bar s}\cup i^\ast\varphi$, for any $\tilde\alpha\in\Gamma(U,R^if_!\Lambda)$ and $\varphi\in\mathrm{H}_{\acute{e}t}^j(X_U,\Lambda)$, as wished.
\subsubsection{Norm Map}
We recall here the construction of the Norm map from \cite[0BD2]{Stacks}. Let $f\colon X\to Y$ be a finite flat map, then $f_\ast{\mathcal{O}}_X$ is a finite locally free ${\mathcal{O}}_Y$-module \cite[02KB]{Stacks}, so there exists a Zariski open cover $\mathcal U$ of $Y$ so that $(f_\ast{\mathcal{O}}_X)|_U\cong{\mathcal{O}}_U^{\oplus d}$ for every $U\in\mathcal U$. Then we can define the norm map $(f_\ast{\mathcal{O}}_X^\ast)|_U\to{\mathcal{O}}_U^\ast$ by the determinant of the corresponding multiplication matrix. This also respects localization, so it glues to a map $f_\ast{\mathcal{O}}_X^\ast\to{\mathcal{O}}_Y^\ast$. Note that this also extends over étale sheaves, thus we obtain the following. \begin{definition} Denote the norm map on étale sheaves $N\colon f_\ast\mathbb{G}_{m,X}\to\mathbb{G}_{m,Y}$. \end{definition}
Let $f\colon B'\to B$ be a finite map between smooth curves. Note that such a map is automatically flat. For $\mathcal X\to B$ a smooth family, write $f\colon\mathcal X':=\mathcal X\times_BB'\to\mathcal X$ for the base change as well, which is thus also finite flat. For each Zariski open $U\subseteq B$ we have $U':=f^{-1}(U)\to U$ is still finite flat. By \cite[page 136]{FK} we have a commutative diagram involving the norm and trace map \[ \begin{tikzcd} 0\ar[r]&f_\ast\mu_{\ell^r}\ar[r]\ar[d,"\int_{\mathcal X'_{U'}/\mathcal X_U}"]&f_\ast{\mathcal{O}}_{\mathcal X'_{U'}}^\ast\ar[r,"\ell^r"]\ar[d,"N"]&f_\ast{\mathcal{O}}_{\mathcal X'_{U'}}^\ast\ar[r]\ar[d,"N"]&0\\ 0\ar[r]&\mu_{\ell^r}\ar[r]&{\mathcal{O}}_{\mathcal X_U}^\ast\ar[r,"\ell^r"]&{\mathcal{O}}_{\mathcal X_U}^\ast\ar[r]&0 \end{tikzcd}. \] The corresponding long exact sequence then gives a commutative diagram \[ \begin{tikzcd} \mathrm{H}_{\acute{e}t}^0(\mathcal X_U,f_\ast{\mathcal{O}}_{\mathcal X'_{U'}}^\ast)\ar[r]\ar[d,"N"]&\mathrm{H}_{\acute{e}t}^1(\mathcal X_U,f_\ast\mu_{\ell^r})\ar[d,"\int_{\mathcal X'_{U'}/\mathcal X_U}"]\\ \mathrm{H}_{\acute{e}t}^0(\mathcal X_U,{\mathcal{O}}_{\mathcal X_U}^\ast)\ar[r]&\mathrm{H}_{\acute{e}t}^1(\mathcal X_U,\mu_{\ell^r}) \end{tikzcd}, \] taking the direct limit over all Zariski opens $U\subseteq B$, we obtain a commuting diagram \[ \begin{tikzcd} \mathrm{H}_{\acute{e}t}^0(X_{\eta'},{\mathcal{O}}_{X_{\eta'}}^\ast)\ar[r]\ar[d,"N"]&\mathrm{H}_{\acute{e}t}^1(X_{\eta'},\mu_{\ell^r})\ar[d,"\varinjlim_{U\subseteq B} f_\ast"]\\ \mathrm{H}_{\acute{e}t}^0(X_\eta,{\mathcal{O}}^\ast_{X_\eta})\ar[r]&\mathrm{H}_{\acute{e}t}^1(X_\eta,\mu_{\ell^r}) \end{tikzcd}. \] We obtain the following compatibility. \begin{lemma}\label{Compatibility Norm and Gysin} The following diagram commutes \[ \begin{tikzcd} \mathrm{H}_{\acute{e}t}^0(\eta',{\mathcal{O}}^\ast_{\eta'})\ar[r, two heads]\ar[d,"N"]&\mathrm{H}_{\acute{e}t}^1(\eta',\mu_{\ell^r})\ar[r]\ar[d,"\varinjlim f_\ast"]&\mathrm{H}_{\acute{e}t}^1(X_{\eta'},\mu_{\ell^r})\ar[d,"\varinjlim f_\ast"]\\ \mathrm{H}_{\acute{e}t}^0(\eta,{\mathcal{O}}^\ast_{\eta})\ar[r, two heads]&\mathrm{H}_{\acute{e}t}^1(\eta,\mu_{\ell^r})\ar[r]&\mathrm{H}_{\acute{e}t}^1(X_{\eta},\mu_{\ell^r}) \end{tikzcd}. \] \end{lemma} \begin{proof} The left square commutes by the beginning. As the first horizontal maps are surjective by Hilbert-90, to show that the rigth square commutes, it suffices to show that the `outer' square commutes. Note that this square decomposes as \[ \begin{tikzcd} \mathrm{H}_{\acute{e}t}^0(\eta',{\mathcal{O}}^\ast_{\eta'})\ar[r]\ar[d,"N"]&\mathrm{H}_{\acute{e}t}^0(X_{\eta'},\mathcal O_{X_{\eta'}}^\ast)\ar[r]\ar[d,"N"]&\mathrm{H}_{\acute{e}t}^1(X_{\eta'},\mu_{\ell^r})\ar[d,"\lim f_\ast"]\\ \mathrm{H}_{\acute{e}t}^0(\eta,{\mathcal{O}}^\ast_{\eta})\ar[r]&\mathrm{H}_{\acute{e}t}^0(X_\eta,\mathcal O_{X_\eta}^\ast)\ar[r]&\mathrm{H}_{\acute{e}t}^1(X_{\eta},\mu_{\ell^r}) \end{tikzcd}. \] Now the right square commutes by the beginning and the left square commutes by a direct computation. \end{proof}
\subsubsection{Residue Map} Suppose $X$ is smooth and $i\colon Z\to X$ is a smooth closed subvariety of codimension $c$ with open complement $j\colon U\hookrightarrow X$. Then there exists a long exact sequence \[ \cdots\to\mathrm{H}_{\acute{e}t}^{i-2c}(Z,\Lambda)\overset{i_\ast}{\to} \mathrm{H}_{\acute{e}t}^i(X,\Lambda)\overset{j^\ast}{\to} \mathrm{H}_{\acute{e}t}^i(U,\Lambda)\to \mathrm{H}_{\acute{e}t}^{i+1-2c}(Z,\Lambda)\overset{i_\ast}{\to}\cdots, \] See for example \cite[Remark VI.5.4]{Milne}. From now on, we shall refer to the connecting morphism as the \emph{residue map}, $Res\colon\mathrm{H}_{\acute{e}t}^i(U,\Lambda)\to \mathrm{H}_{\acute{e}t}^{i+1-2c}(Z,\Lambda)$.
\begin{remark}\label{residue compatible restriction} It is known that the residue map is compatible with the natural restriction to opens \cite[Lemma 2.3]{Sch2}. \end{remark}
The following is \cite[Lemma 2.4]{SchNotes}, but we state it here again for reference purposes, together with a direct corollary. \begin{prop}\label{cup-global-class}
Let $(X,Z)$ be a smooth pair and let $\alpha\in\mathrm{H}_{\acute{e}t}^i(X\setminus Z,\Lambda)$ and $\beta\in\mathrm{H}_{\acute{e}t}^i(X,\Lambda)$. Then $Res(\alpha\cup\beta|_{X\setminus Z})=Res(\alpha)\cup\beta|_Z$. \end{prop}
\begin{cor}\label{product-residue} Let $(X,Z)$ be a smooth pair with $Z$ closed of codimension $c$ and let $Y$ be any smooth variety. Let $\alpha\in\mathrm{H}_{\acute{e}t}^i(X\setminus Z,\Lambda)$ and $\beta\in\mathrm{H}_{\acute{e}t}^j(Y,\Lambda)$, then $Res(\alpha\times\beta)=Res(\alpha)\times\beta\in\mathrm{H}_{\acute{e}t}^{i+j+1-2c}(Z\times Y,\Lambda)$. \end{cor}
\subsection{Refined Unramified Cohomology}\label{ref unram cohom} Here we set some notation and gather results from \cite{Sch1} which we will use throughout. We let $X$ be a smooth variety over a field $k$ so that we are in the situation of \cite[Lemma 6.5]{Sch1}. We let $\Lambda$ be $\mu_{\ell^r}$ or $\mathbb{Z}_\ell$ with $\ell$ prime to $\mathrm{char}(k)$.
\begin{definition}[\cite{Sch1}] Let $j$ be an integer, we set \begin{itemize} \item $F_jX:=\{x\in X\mid \mathrm{codim}_X(\overline{\{x\}})\leq j\}$; \item $\mathrm{H}^i(F_jX,\Lambda):=\varinjlim_{F_jX\subseteq U\subseteq X}\mathrm{H}_{\acute{e}t}^i(U,\Lambda)$, where the direct limit is over all non-empty Zariski open $U$ so that $X\setminus U$ is of codimension $>j$. \end{itemize} \end{definition} Note that for $j'\geq j$ we have natural maps $\mathrm{H}^i(F_{j'}X,\Lambda)\to\mathrm{H}^i(F_jX,\Lambda)$. We will usually abuse notation and call this map $F_j$, assuming that the domain is understood from the context. Now the refined unramified cohomology is defined as \[ \Hrnr{j}^i(X,\Lambda):=\im(\mathrm{H}^i(F_{j+1}X,\Lambda)\to\mathrm{H}^i(F_jX,\Lambda)). \]
\begin{lemma}[{\cite[Lemma 5.8]{Sch1}}]\label{les Sch} There is a long exact sequence \[ \cdots\to\bigoplus_{x\in X^{(j)}}\mathrm{H}_{\acute{e}t}^{i-2j}(\kappa(x))\to\mathrm{H}^i(F_jX)\to\mathrm{H}^i(F_{j-1}X)\to\bigoplus_{x\in X^{(j)}}\mathrm{H}_{\acute{e}t}^{i-2j+1}(\kappa(x))\to\mathrm{H}_{\acute{e}t}^{i+1}(F_jX)\to\cdots, \] where all cohomology groups have coefficients $\Lambda$. \end{lemma}
\begin{cor}[{\cite[Corollary 5.10]{Sch1}}] The natural map $\mathrm{H}_{\acute{e}t}^i(X,\Lambda)\to\mathrm{H}^i(F_jX,\Lambda)$ is an isomorphism if $j\geq\lceil\frac{i}{2}\rceil$. \end{cor}
We will use the following interpretation of the failure of the integral Hodge/Tate type conjecture in terms of refined unramified cohomology.
\begin{theorem}[{\cite[Theorem 7.7]{Sch1}}] Write $Z^i(X):=\coker(\cl^i\colon \CH^i(X)_{\mathbb{Z}_\ell}\to\mathrm{H}_{\acute{e}t}^{2i}(X,\mathbb{Z}_\ell(i))$, then \[ Z^i(X)[\ell^r]\cong\frac{\Hrnr{i-2}^{2i-1}(X,\mu_{\ell^r})}{\Hrnr{i-2}^{2i-1}(X,\mathbb{Z}_\ell)}. \] \end{theorem}
\begin{remark}\label{Tate classes} Note that the non-vanishing of $Z^i(X)[\ell^r]$ indeed gives the failure of the integral Tate \Cref{ITC}. After all, suppose $0\neq\alpha\in Z^i(X)[\ell^r]$, then $\alpha$ is non-algebraic but $\ell^r\alpha=\cl^i(\Gamma)$ is. This implies that $\ell^r\alpha\in\mathrm{H}_{\acute{e}t}^{2i}(X,\mathbb{Z}_\ell(i))^{G_{k_0}}$ for some $k_0\subseteq k$ over which $\Gamma$ is defined. We would now like to show that there exists a finite field extension $k_0\subseteq k_0'$ so that $\alpha\in\mathrm{H}_{\acute{e}t}^{2i}(X,\mathbb{Z}_\ell(i))^{G_{k'}}$. It holds that $g\alpha-\alpha\in\mathrm{H}_{\acute{e}t}^{2i}(X,\mathbb{Z}_\ell(i))[\ell^r]$ for every $g\in G_{k_0}$. So $G_{k_0}$ acts on the finite set $\alpha+\mathrm{H}_{\acute{e}t}^{2i}(X,\mathbb{Z}_\ell(i))[\ell^r]$. As a profinite set is Hausdorff, $G_{k_0}$ now acts on a discrete space, so it has open stabalizers. This means there exists a finite field extension $k_0\subseteq k'$ so that $G_{k'}$ stablizes $\alpha$, that is $\alpha\in\varinjlim_{k_0\subseteq k'}\mathrm{H}_{\acute{e}t}^{2i}(X,\mathbb{Z}_\ell(i))^{G_{k'}}$ and is non-algebraic. \end{remark}
We will also use the following properties. They essentially follow from the known properties on ordinary cohomology (ie. for $j$ large), \cite[09YQ and 01YZ]{Stacks}. \begin{lemma}\label{refined-geometric-point} Let $X$ be defined over a field $\eta$ (ie. the generic fibre of a family) and $\Lambda=\mu_{\ell^r}$ be finite. Then there is a natural isomorphism $\varinjlim_{\eta\subseteq\eta'}\mathrm{H}^i(F_jX_{\eta'},\Lambda)\to\mathrm{H}^i(F_jX_{\bar\eta},\Lambda)$ compatible with the $F_j$ filtration, where $\bar\eta$ is the algebraic (resp. seperable) closure of $\eta$ and the direct limit runs over all finite (resp. finite separable) extensions $\eta'$ of $\eta$. \end{lemma} \begin{proof} All cohomology groups below have coefficients in $\Lambda$.
Let $\eta'$ be a finite extension of $\eta$, we start by defining the map $\mathrm{H}_{\acute{e}t}^i(F_jX_{\eta'})\to\mathrm{H}^i(F_jX_{\bar\eta})$. Let $\alpha\in\mathrm{H}^i(F_jX_{\eta'})$, then $\alpha$ is represented by some $\alpha_{U}\in\mathrm{H}^i(U)$ with $Z:=X_{\eta'}\setminus U$ of codimension $>j$. As the dimensions do not change under under algebraic field extensions \cite[Proposition 3.2.7]{Liu}, we have that $Z_{\bar\eta}=X_{\bar\eta}\setminus U_{\bar\eta}$ is still of codimension $>j$. We let the image of $\alpha$ in $\mathrm{H}_{\acute{e}t}^i(F_jX_{\eta'})\to\mathrm{H}^i(F_jX_{\bar\eta})$ to be the image of $\alpha_U$ under the composition $\mathrm{H}_{\acute{e}t}^i(U)\to\mathrm{H}_{\acute{e}t}^i(U_{\bar\eta})\to\mathrm{H}^i(F_jX_{\bar\eta})$. This is well-defined because restricting to opens commutes with base extension. It is also natural with respect to compositions of base-extensions by construction. Thus this defines a map on the direct limits $\varinjlim_{(\eta'\to\eta)}\mathrm{H}^i(F_jX_{\eta'})\to\mathrm{H}^i(F_jX_{\bar\eta})$.
We start by showing it is surjective. Let $\alpha\in\mathrm{H}^i(F_jX_{\bar\eta})$, then $\alpha=\alpha_U\in\mathrm{H}_{\acute{e}t}^i(U)$ for some $F_jX_{\bar\eta}\subseteq U\subseteq X_{\bar\eta}$, say with complement $Z$ of codimension $>j$. Then by \cite[Lemma 3.2.6]{Liu} there exists a finite extension $\eta'\to\eta$ and $Z'\subseteq X_{\eta'}$ such that $Z'_{\bar\eta}=Z$. So if we put $U':=X_{\eta'}\setminus Z'$, then $U'_{\bar\eta}=U$. We know that $\varinjlim_{\eta''\to\eta'}\mathrm{H}_{\acute{e}t}^i(U'_{\eta''})\;\tilde{\rightarrow}\;\mathrm{H}_{\acute{e}t}^i(U'_{\bar\eta})=\mathrm{H}_{\acute{e}t}^i(U)$, so $\alpha_U=\alpha_{U'_{\eta''}}$ for some $\eta''$ (\cite[09YQ and 01YZ]{Stacks}). Using again \cite[Proposition 3.2.7]{Liu} we see that $\dim(Z'_{\eta''})=\dim(Z')=\dim(Z)$, so $F_jX_{\eta''}\subseteq U'_{\eta''}\subseteq X_{\eta''}$. Then by construction, the image of $\alpha_{U'_{\eta''}}$ in $\mathrm{H}^i(F_jX_{\eta''})$ maps to $\alpha\in\mathrm{H}^i(F_jX_{\bar\eta})$.
For injectivity, let $\alpha\in\mathrm{H}^i(F_jX_{\eta'})$ and suppose it maps to $0$. We will show that $\alpha=0\in\varinjlim_{\eta''\to\eta'}\mathrm{H}^i(F_jX_{\eta''})$ and for the sake of notation, we write $\eta=\eta'$. By definition, this means that if $\alpha$ is represented by $\alpha_U$, then there exists an open $F_jX_{\bar\eta}\subseteq V\subseteq U_{\bar\eta}\subseteq X_{\bar\eta}$ such that $\alpha_{U_{\bar\eta}}|_V=0$. For a similar reason as above, using again \cite[Lemma 3.2.6]{Liu} there exists a finite extension $\eta'\to\eta$ and $V'\subseteq X_{\eta'}$ such that $V'_{\bar\eta}=V$ and $V'\subseteq U_{\eta'}=:U'$. So $0=\alpha_{U_{\bar\eta}}=(\alpha_{U'}|_{V'})_{\bar\eta}$ and thus there exists a finite extension $\eta''\to\eta'$ such that $\alpha_{U_{\eta''}}|_{V'_{\eta''}}=(\alpha_{U'}|_{V'})_{\eta''}=0$, but for the same reason as above, $F_jX_{\eta''}\subseteq V'_{\eta''}\subseteq U_{\eta''}\subseteq X_{\eta''}$, implying that $F_j(\alpha_{U_{\eta''}})=0\in \mathrm{H}^i(F_jX_{\eta''})$ as desired. \end{proof}
\begin{lemma}\label{refined-generic-point} Let $\mathcal X\to Y$ be a family over a smooth irreducible variety $Y$ with generic point $\eta$. There is a natural isomorphism $\varinjlim_{U\subseteq Y}\mathrm{H}^i(F_j\mathcal X_U,\Lambda)\to\mathrm{H}^i(F_j X_\eta,\Lambda)$, where the direct limit runs over all Zariski opens $U\subseteq Y$. \end{lemma} \begin{proof}
Let $\alpha\in\varinjlim_{U\subseteq Y}\mathrm{H}^i(F_j\mathcal X_U,\Lambda)$ be represented by some $\alpha_U\in\mathrm{H}^i(F_j\mathcal X_U,\Lambda)$. Say this $\alpha_U$ is represented by some $\alpha_{U,V}\in \mathrm{H}_{\acute{e}t}^i(V,\Lambda)$ for some $F_j\mathcal X_U\subseteq V\subseteq\mathcal X_U$ and let $Z:=\mathcal X_U\setminus V$. By generic flatness, there exists a $U'\subseteq U$ so that the induced map $Z\cap\mathcal X_{U'}\to U'$ is flat. This implies that $\dim(Z_\eta)+\dim(Y)=\dim(Z\cap\mathcal X_{U'})\leq\dim(Z)$. So $\dim(Y_\eta)-\dim(Z_\eta)=\dim(\mathcal X)-\dim(Y)-\dim(Z_\eta)\geq\dim(\mathcal X)-\dim(Z)>j$. This means that $V_\eta:=Z\cap X_\eta$ has the property that $F_jX_\eta\subseteq V_\eta$, so we may define the image of $\alpha$ to be $[\alpha_{U,V}|_{V_\eta}]\in\mathrm{H}^i(F_jX_\eta,\Lambda)$. As this is independent of chosen $U$ and $V$, this defines a morphism $\varinjlim_{U\subseteq Y}\mathrm{H}^i(F_j\mathcal X_U)\to\mathrm{H}^i(F_j X_\eta,\Lambda)$.
To show it is an isomorphism, we construct its inverse. Let $\alpha\in\mathrm{H}^i(F_jX_\eta,\Lambda)$ and say it is represented by some $\alpha_V\in\mathrm{H}_{\acute{e}t}^i(V,\Lambda)$ with $F_jX_\eta\subseteq V\subseteq X_\eta$ with complement $Z:=X_\eta\setminus V$. Let $\mathcal Z$ be the closure of $Z$ in $\mathcal X$. So we have an induced map $\mathcal Z\to Y$ and by generic flatness again, there exists an open $U\subseteq Y$ so that $\mathcal Z_U\to U$ is flat. Write $\mathcal V_U:=\mathcal X_U\setminus\mathcal Z_U$, then note that $(\mathcal V_U)_\eta=V$. As $\alpha_V\in\mathrm{H}_{\acute{e}t}^i(V,\Lambda)=\varinjlim_{U'\subseteq U}\mathrm{H}_{\acute{e}t}^i(\mathcal V_{U'},\Lambda)$ (\cite[09YQ and 01YZ]{Stacks}), there exists an open $U'\subseteq U$ so that $\alpha_V$ comes from some $\alpha_{V,U'}\in\mathrm{H}_{\acute{e}t}^i(\mathcal V_{U'},\Lambda)$. Note that $\mathcal Z_{U'}\to U'$ is still flat, so we have $\dim(\mathcal X_{U'})-\dim(\mathcal Z_{U'})=\dim(\mathcal X_{U'})-\dim(U')-\dim(Z)=\dim(X_\eta)-\dim(Z)>j$. So if we define the image of $\alpha$ to be the image of $\alpha_{V,U'}$ under the map $\mathrm{H}_{\acute{e}t}^i(\mathcal V_{U'},\Lambda)\to\mathrm{H}_{\acute{e}t}^i(F_j\mathcal X_{U'},\Lambda)\to\varinjlim_{U\subseteq Y}\mathrm{H}^i(F_j\mathcal X_U)$, then one can check that this defines the inverse of the map above. \end{proof}
We will also need the following, which is an easier version of \cite[Lemma 7.8]{Sch1}. \begin{lemma}\label{liftable lemma} Let $r\in\mathbb{Z}_{>0}$ and $A=\mu_{\ell^s}$ with $s\geq r$. Then the natural map \[ \frac{\mathrm{H}_{\acute{e}t}^{2i}(X,\mu_{\ell^r})}{\mathrm{H}_{\acute{e}t}^{2i}(X,A)}=\frac{\mathrm{H}_{\acute{e}t}^{2i}(F_iX,\mu_{\ell^r})}{\mathrm{H}_{\acute{e}t}^{2i}(F_iX,A)}\overset{F_{i-1}}{\to}\frac{\Hrnr{i-1}^{2i}(X,\mu_{\ell^r})}{\Hrnr{i-1}^{2i}(X,A)} \] is an isomorphism. \end{lemma} \begin{proof} This follows directly from a chase in the commuting diagram with exact rows \[ \begin{tikzcd} \bigoplus_{x\in X^{(i)}}x\cdot A\ar[r,"\cl^i"]\ar[d, two heads,"\mod \ell^r"]&\mathrm{H}^{2i}(X,A)\ar[r,"F_{i-1}"]\ar[d]&\Hrnr{i-1}^{2i}(X,A)\ar[d]\ar[r]&0\\ \bigoplus_{x\in X^{(i)}}x\cdot\mu_{\ell^r}\ar[r,"\cl^i"]&\mathrm{H}^{2i}(X,\mu_{\ell^r})\ar[r,"F_{i-1}"]&\Hrnr{i-1}^{2i}(X,\mu_{\ell^r})\ar[r]&0 \end{tikzcd}. \] Let $\alpha\in\mathrm{H}^{2i}(X,\mu_{\ell^r})$ and suppose $F_{i-1}(\alpha)=F_{i-1}(\beta)\mod\ell^r=F_{i-1}(\beta\mod\ell^r)$ for some $\beta\in\mathrm{H}^{2i}(X,A)$, then $\alpha-(\beta\mod\ell^r)$ is algebraic and thus equal to $\cl^i(\xi\mod\ell^r)$ for some $\xi\in\bigoplus_{x\in X^{(i)}}x\cdot A$ by the surjectivity of the left vertical map. Then we compute that indeed $(\cl^i(\xi)+\beta)\mod\ell^r=\alpha$. \end{proof}
\subsection{Specialization Map}\label{def spec map} In this section we recall the construction of the specialization map from \cite[Section 4]{Sch2} and recollect some of its properties.
\subsubsection{The Construction} Let $\mathcal X\to B$ be a smooth family over a smooth curve $B$. Let $\eta\in B$ be the generic point and $b\in B$ a closed point. Note that we have a residue map $\mathrm{H}_{\acute{e}t}^i(X_\eta,\Lambda)=\varinjlim_{U\subseteq B}\mathrm{H}_{\acute{e}t}^i(\mathcal X_U,\Lambda)\overset{Res}{\to}\bigoplus_{b\in B}\mathrm{H}_{\acute{e}t}^{i-1}(X_b,\Lambda)$, where the direct limit is over the Zariski open subsets of $B$. Define $Res_b\colon\mathrm{H}_{\acute{e}t}^i(X_\eta,\Lambda)\to\mathrm{H}_{\acute{e}t}^{i-1}(X_b,\Lambda)$ to be the above residue map composed with the projection to the $b$-th component.
For the construction of the specialization map, note that ${\mathcal{O}}_{B,b}$ is a disrete valuation ring and let $\pi\in Frac({\mathcal{O}}_{B,b})=\eta$ be a uniformizer. We can view this as an element $\pi\in\eta^\ast/\eta^{\ast\ell^r}=\mathrm{H}_{\acute{e}t}^1(\eta,\mu_{\ell^r})$ and shall write $(\pi):=f^\ast\pi\in\mathrm{H}_{\acute{e}t}^1(X_\eta,\mu_{\ell^r})$. \begin{definition} The specialization map $sp_b\colon\mathrm{H}_{\acute{e}t}^i(X_\eta,\mu_{\ell^r})\to\mathrm{H}_{\acute{e}t}^i(X_b,\mu_{\ell^r})$ is defined as \[ sp_b(\alpha):=-Res_b((\pi)\cup\alpha). \] \end{definition} This is independent of chosen uniformizer, for if $\pi'$ is another uniformizer, then $\pi-\pi'$ is a unit, hence has valuation zero. As the residue map $\mathrm{H}_{\acute{e}t}^1(\eta,\mu_{\ell^r})\to\mathrm{H}_{\acute{e}t}^0(b,\mu_{\ell^r})\cong\mathbb{Z}/\ell^r\mathbb{Z}$ is given by taking the valuation \cite[Proposition 1.3]{CT-O}, we see that this implies that $\pi-\pi'$ has residue $0\in\mathrm{H}_{\acute{e}t}^0(b,\mu_{\ell^r})$. So we are done by the following lemma.
\begin{lemma}\label{Specialization Along Liftable} Let $\pi\in\mathrm{H}_{\acute{e}t}^1(\eta,\mu_{\ell^r})$ such that $Res_b(\pi)=0$ in $\mathrm{H}_{\acute{e}t}^{0}(b,\mu_{\ell^r})$, then $Res_b((\pi)\cup\alpha)=0$ in $\mathrm{H}_{\acute{e}t}^i(X_b,\mu_{\ell^r})$ for every $\alpha\in\mathrm{H}_{\acute{e}t}^i(X_\eta,\mu_{\ell^r})$.
Moreover, if $\pi\in\mathrm{H}_{\acute{e}t}^j(\eta,\mu_{\ell^r})$ for $j>1$, then $Res_b((\pi)\cup\alpha)=0$. \end{lemma} \begin{proof}
As $\mathrm{H}_{\acute{e}t}^{j-1}(b,\mu_{\ell^r})=0$ for $j>1$, both assumptions imply that $\pi$ lifts to some $\mu\in\mathrm{H}_{\acute{e}t}^j({\mathcal{O}}_{B,b},\mu_{\ell^r})$, thus by \Cref{cup-global-class} we have $Res_b((\mu)|_{X_\eta}\cup\alpha)=(-1)^j(\mu)|_{X_b}\cup Res_b(\alpha)$. But $(\mu)|_{X_b}=(\mu_b)=0$ as $\mathrm{H}_{\acute{e}t}^j(b)=0$ for $j>0$. \end{proof}
We also note the following property, which is a consequence of \Cref{cup-global-class}. \begin{prop}[{\cite[Lemma 4.4]{Sch2}}]\label{global sp}
If $\alpha\in \mathrm{H}_{\acute{e}t}^i(X_\eta,\mu_{\ell^r})$ is extendable to some $\alpha_U\in \mathrm{H}_{\acute{e}t}^i(\mathcal X_U,\mu_{\ell^r})$ with $b\in U$, then $sp_b(\alpha)=\alpha_U|_{X_b}$. \end{prop}
At some point, we will need the following property of the specialization map. \begin{prop} Let $\pi$ be a uniformizer at $b$, then $\im(\mathrm{H}_{\acute{e}t}^i(X_\eta,\mu_{\ell^r})\overset{(\pi)\cup -}{\to}\mathrm{H}_{\acute{e}t}^{i+1}(X_\eta,\mu_{\ell^r}))\subseteq\ker(sp_b)$ \end{prop} \begin{proof} \Cref{Specialization Along Liftable} implies directly that $Res_b((\pi\cup\pi)\cup\alpha)=0$.
\end{proof}
\begin{remark}\label{deRham1} Let us note the similarity with the residue map on logarithmic deRham cohomology as in \cite[\textsection 4.1]{PS}. If we let $t$ be a local coordinate around $b$, then we can set $sp(\omega):=-res(\frac{\mathrm{d}t}{t}\wedge\omega)$, where now $\frac{\mathrm{d}t}{t}$ serves as the uniformizer. So we clearly see that $sp(\frac{\mathrm{d}t}{t}\wedge\omega)=0$. \end{remark}
\begin{prop}\label{product-sp} Let $\mathcal X\to B$ be a smooth family over a curve and $Y$ any smooth variety. Let $\alpha\in\mathrm{H}_{\acute{e}t}^i(X_\eta,\mu_{\ell^r})$ and $\beta\in\mathrm{H}_{\acute{e}t}^j(Y,\mu_{\ell^r})$, then $sp_b(\alpha\times\beta)=sp_b(\alpha)\times\beta\in\mathrm{H}_{\acute{e}t}^{i+j}(X_b\times Y,\mu_{\ell^r})$. \end{prop} \begin{proof} This follows directly from \Cref{product-residue}.
\end{proof}
\begin{remark}\label{coeff sp} We note that the specialization map is also functorial in its coefficients with respect to maps $\mu_{\ell^r}\twoheadrightarrow\mu_{\ell^s}$ with $r\geq s$. \end{remark}
The following lemma implies that the specialization map can be defined on the $F_j$ filtrations, which is also \cite[Lemma 4.6]{Sch2}. \begin{lemma}\label{Residue Refined} Let $X$ be a smooth variety and $D\subseteq X$ a smooth divisor. There exists a refined residue morphism $\mathrm{H}^i(F_n(X\setminus D),\Lambda)\overset{Res}{\to}\mathrm{H}^{i-1}(F_n D,\Lambda)$ compatible with the filtrations $F_\ast$. \end{lemma} \begin{proof} Let $F_n(X\setminus D)\subseteq U\subseteq X\setminus D$ be a Zariski open, so $Z:=(X\setminus D)\setminus U$ is of codimension $>n$ in $X\setminus D$. Let $\bar Z$ be the closure of $Z$ in $X$. Then we compute that $U=X\setminus (D\cup\bar Z)=U'\setminus D'$, where $U':=X\setminus \bar Z$ and $D'=D\cap U'=D\setminus D\cap \bar Z$. Note that $D'\neq\emptyset$, else $D\subseteq\bar Z$, but this implies that either $\bar Z=X$, so $Z=X\setminus D$ which is not possible, or there exists an irreducible component $Z_0\subseteq Z$ such that $\bar Z_0=D$, but $Z_0\cap D\subseteq Z\cap D=\emptyset$. So using \cite[0A21]{Stacks}, we conclude that $D'$ is a smooth divisor of the smooth $U'$ with open complement $U$. So we have the residue map $\mathrm{H}_{\acute{e}t}^i(U,\Lambda)\to\mathrm{H}_{\acute{e}t}^{i-1}(D',\Lambda)$.
We will show that $D\setminus D'$ has codimension $>n$ in $D$ so that we have $\mathrm{H}_{\acute{e}t}^{i-1}(D',\Lambda)\to \mathrm{H}^{i-1}(F_nD,\Lambda)$. As before, no irreducible component of $\bar Z$ is contained in $D$, so $\bar Z$ intersects $D$ transversally because $D$ is a divisor (\cite[begin \textsection 13.1.1]{EisHar} or \cite[above Lemma 9.9]{VoisII}) or $\bar Z\cap D=\emptyset$ in which case $D=D'$. This means that $\dim(\bar Z)-1=\dim(D\cap\bar Z)=\dim(D\setminus D')$ and thus $\dim(D)-\dim(D\setminus D')=\dim(D)-\dim(\bar Z)+1=\dim(X)-\dim(\bar Z)$. But $X\setminus D\subseteq X$ and $Z=\bar Z\cap (X\setminus D)\subseteq \bar Z$ are opens, so again by \cite[0A21]{Stacks} we have $\dim(X)-\dim(\bar Z)=\dim(X\setminus D)-\dim(Z)>n$ as wished.
As the residue map is compatible with restriction to opens, the above gives a well-defined map $\mathrm{H}^i(F_n(X\setminus D),\Lambda)\to\mathrm{H}^{i-1}(F_nD,\Lambda)$.
\end{proof}
Using the above Lemma and \Cref{refined-generic-point}, we can now consider the residue map $\mathrm{H}^i(F_n X_\eta,\Lambda)\cong\varinjlim_{U\subseteq B}\mathrm{H}^i(F_n\mathcal X_U,\Lambda)\to\bigoplus_{b\in B}\mathrm{H}^{i-1}(F_nX_b,\Lambda)$. Write again $Res_b\colon\mathrm{H}^i(F_nX_\eta,\Lambda)\to\mathrm{H}^{i-1}(F_nX_b,\Lambda)$ for the composition of this map with the projection to the $b$-th component. Using this, we can define the specialization map on $\mathrm{H}_{\acute{e}t}^i(F_nX_\eta,\mu_{\ell^r})$ for every $n$.
\begin{cor}[{\cite[Lemma 4.6]{Sch2}}]\label{Specialization Refined} For each $n$ there exists a map $sp_b\colon\mathrm{H}_{\acute{e}t}^i(F_nX_\eta,\mu_{\ell^r})\to\mathrm{H}_{\acute{e}t}^i(F_nX_b,\mu_{\ell^r})$ compatible with the filtration $F_\ast$. \end{cor} \begin{proof}
Let $\alpha\in\mathrm{H}_{\acute{e}t}^i(F_nX_\eta,\mu_{\ell^r})$, then $\alpha=[\alpha_U]$ for some $\alpha_U\in\mathrm{H}_{\acute{e}t}^i(U,\mu_{\ell^r})$ with $X_\eta\setminus U$ of codimension $>n$. We write $(\pi)\cup\alpha:=[(\pi)|_U\cup\alpha_U]\in\mathrm{H}_{\acute{e}t}^{i+1}(F_nX_\eta)$ and set \[ sp_b(\alpha):=-Res_b((\pi)\cup\alpha)\in \mathrm{H}_{\acute{e}t}^i(F_nX_b,\Lambda), \] which is precisely \cite[Lemma 4.6]{Sch2}. Similar as before, one can check that it is still independent of chosen uniformizer. \end{proof}
\subsubsection{Compatibility with Finite Extensions} Let $\mathcal X\to B$ be a smooth family over a smooth curve $B$ and $f\colon B'\to B$ be a smooth finite morphism. Write also $f\colon\mathcal X'\to\mathcal X$ for the base change. Let $b'\in B'$ and $b=f(b')\in B$. Because we work over an algebraically closed field, $f$ induces an isomorphism $f_{b'}\colon X'_{b'}\;\tilde{\rightarrow}\; X_b$.
\begin{lemma}\label{Gysin Respects Uniformizer} Suppose $f^{-1}(b)=\{b',b_1',\dots,b_n'\}$, then there exists a uniformizer $\pi'\in{\mathcal{O}}_{B',b'}$ that does not vanish at any of the other $b_i'$. Moreover, for such $\pi'$ we have that $f_\ast\pi'\in\mathrm{H}_{\acute{e}t}^1(\eta,\mu_{\ell^r})$ represents a uniformizer of $b$. \end{lemma} \begin{proof} Embed $B'\subseteq\mathbb{P}^N$ in some projective space. Let $H_0,H_1$ be hyperplanes of $\mathbb{P}^N$, intersecting $B$ transversally and so that $H_0\cap f^{-1}(b)=\emptyset$ and $H_1\cap f^{-1}(b)=\{b'\}$. Then inside $\mathbb{P}^N\setminus H_0\cong\mathbb{A}^N$, the hyperplane $H_1$ defines a function that vanishes at $b'$ but not at any of the other $b_i'$.
Suppose this $\pi'$ is defined on some $U'\subseteq B'$. Let $U:=B\setminus f(B'\setminus U')$ and shrink $U'$ to be $f^{-1}(U)$. Note that now $b\notin U$ as $b'\notin U'$ and by \cite[Lemma 2.3]{Sch2} we have a commutative diagram \[ \begin{tikzcd} \mathrm{H}_{\acute{e}t}^1(U')\ar[r,"Res"]\ar[d,"f_\ast"]&\bigoplus_{b'\in B'\setminus U'}\mathrm{H}_{\acute{e}t}^0(b')\ar[d,"f_\ast"]\\ \mathrm{H}_{\acute{e}t}^1(U)\ar[r,"Res"]&\bigoplus_{b\in B\setminus U}\mathrm{H}_{\acute{e}t}^0(b) \end{tikzcd}. \] So we see that $Res_b(f_\ast\pi')=\sum_{b'\in f^{-1}(b)}f_\ast Res_{b'}(\pi')=f_\ast Res_{b'}(\pi')$ by construction of $\pi'$. Because $\pi'$ is a uniformizer at $b'$, we conclude that $f_\ast\pi'$ represents a uniformizer at $b$. \end{proof}
\begin{prop}\label{compatibility-sp-fin-ext} The diagram \[ \begin{tikzcd} \mathrm{H}_{\acute{e}t}^i(X_{\eta'},\mu_\ell)\ar[r,"sp_{b'}"]&\mathrm{H}_{\acute{e}t}^i(X'_{b'},\mu_\ell)\ar[d,"(f_{b'})_\ast"]\\ \mathrm{H}_{\acute{e}t}^i(X_{\eta},\mu_\ell)\ar[u,"f^\ast"]\ar[r,"sp_b"]&\mathrm{H}_{\acute{e}t}^i(X_b,\mu_\ell) \end{tikzcd} \] commutes, that is, $(f_{b'})_\ast sp_{b'}(f^\ast\alpha)=sp_b(\alpha)$. \end{prop} \begin{proof} Using \cite[Lemma 2.3]{Sch2} we have a commutative diagram \[ \begin{tikzcd} \mathrm{H}_{\acute{e}t}^{i+1}(\mathcal X')\ar[r]\ar[d,"f_\ast"]&\mathrm{H}_{\acute{e}t}^{i+1}(X_{\eta'})\ar[r,"Res"]\ar[d,"f_\ast"]&\bigoplus_{b'\in B'}\mathrm{H}_{\acute{e}t}^i(X'_{b'})\ar[d,"(\sum_{b'\in f^{-1}(b)}(f_{b'})_\ast)_b"]\\ \mathrm{H}_{\acute{e}t}^{i+1}(\mathcal X)\ar[r]&\mathrm{H}_{\acute{e}t}^{i+1}(X_\eta)\ar[r,"Res"]&\bigoplus_{b\in B}\mathrm{H}_{\acute{e}t}^i(X_{b}) \end{tikzcd}, \] where the middle vertical map must be viewed as the direct limit of push forwards.
Let $\pi'$ be a uniformizer at $b'$ with the property from \Cref{Gysin Respects Uniformizer}, so that $f_\ast\pi'=:\pi$ is a uniformizer at $b$. Then we compute \[ -Res_b(f_\ast((\pi')\cup f^\ast\alpha))=-Res_b((\pi)\cup\alpha)=sp_b(\alpha), \] using the projection formula and the second part of \Cref{Gysin Respects Uniformizer}. And on the other hand \[ -Res_b(f_\ast((\pi')\cup f^\ast\alpha))=-\sum_{b'\in f^{-1}(b)}(f_{b'})_\ast Res_{b'}((\pi')\cup f^\ast\alpha)=(f_{b'})_\ast sp_{b'}(f^\ast\alpha), \] where in the last step we used the property of $\pi'$ and \Cref{Specialization Along Liftable} again. \end{proof}
\begin{remark} Note that \Cref{compatibility-sp-fin-ext} also implies \cite[Lemma 4.5]{Sch2} as if $\alpha,\tilde\alpha\in\mathrm{H}_{\acute{e}t}^i(X_\eta,\mu_{\ell^r})$ so that $\alpha=\tilde\alpha\in\mathrm{H}_{\acute{e}t}^i(X_{\bar\eta},\mu_{\ell^r})=\varinjlim_{\eta'\to\eta}\mathrm{H}_{\acute{e}t}^i(X_{\eta'},\mu_{\ell^r})$, then there exists a finite extension $\eta'\to\eta$ so that $\alpha=\tilde\alpha\in\mathrm{H}_{\acute{e}t}^i(X_{\eta'},\mu_{\ell^r})$. Let $b'\mapsto b$, then we compute $sp_b(\alpha)=(f_{b'})_\ast sp_{b'}(\alpha)=(f_{b'})_\ast sp_{b'}(\tilde\alpha)=sp_b(\tilde\alpha)$. \end{remark}
\section{Comparing the Specialization and Cospecialization Map}\label{comparing sp and cosp} The goal of this section is to give a relation between the cospecialization map as defined in \cite{FK} and the specialization map from \Cref{def spec map}. We start by recalling the cospecialization map.
\subsection{The Cospecialization map}\label{cospecialization map} The following definition is a special case of the construction given on \cite[page 95]{FK}. \begin{definition} Let $\mathcal F$ be a sheaf on an irreducible variety $X$ with generic point $\eta$. For every $x\in X$, there is a cospecialization map \[ cosp\colon\mathcal F_{\bar x}\to\mathcal F_{\bar\eta}, \] defined by the inclusion of directed sets $\{(U,u)\overset{\acute{e} t}{\to}(X,x)\}\to\{U\overset{\acute{e} t}{\to}X\}$. \end{definition}
If $f\colon X\to S$ is a compactifiable morphism and taking $\mathcal F=R^i f_!\Lambda$, then the base change isomorphism gives a homomorphism \[ cosp\colon\mathrm{H}_c^i(X_{\bar s},\Lambda)\to\mathrm{H}_c^i(X_{\bar\eta},\Lambda). \] If $f$ is proper, we obtain \[ cosp\colon\mathrm{H}_{\acute{e}t}^i(X_{\bar s},\Lambda)\to\mathrm{H}_{\acute{e}t}^i(X_{\bar\eta},\Lambda). \]
The following property of the cospecialization map follows directly from the definition. We state it for reference purposes. \begin{prop}\label{functorial-cosp} Given a morphism of sheaves $\varphi\colon\mathcal F\to\mathcal G$ on $X$, then for every $x\in X$ the following diagram commutes \[ \begin{tikzcd} \mathcal F_{\bar x}\ar[r,"cosp"]\ar[d,"\varphi_{\bar x}"]&\mathcal F_{\bar\eta}\ar[d,"\varphi_{\bar\eta}"]\\ \mathcal G_{\bar x}\ar[r,"cosp"]&\mathcal G_{\bar\eta} \end{tikzcd}. \] \end{prop}
\begin{cor}\label{constant trace} Let $f\colon X\to S$ be a morphism satisfying $(\ast)_d$ (cf. \Cref{verdier duality}) and let $U\to S$ be an étale neighborhood where $U$ is irreducible with generic point $\eta$. Then for $\tilde\alpha\in\Gamma(U,R^{2d}f_!\Lambda)$ and $s\in U$, we have $\int_{X_{\bar\eta}}\tilde\alpha_{\bar\eta}=cosp\int_{X_{\bar s}}\tilde\alpha_{\bar s}$. \end{cor} \begin{proof} This follows because the trace map is compatible with base change and by definition we see that $cosp(\tilde\alpha_{\bar s})=\tilde\alpha_{\bar\eta}$, so it follows from \Cref{functorial-cosp}. \end{proof}
\begin{cor}\label{relation cosp} Let $j\colon U\hookrightarrow X$ be an open and $f\colon X\to S$ a compactifiable morphism. Then the diagram \[ \begin{tikzcd} \mathrm{H}_c^i(U_{\bar s},\Lambda)\ar[r,"cosp"]\ar[d]&\mathrm{H}_c^i(U_{\bar\eta},\Lambda)\ar[d]\\ \mathrm{H}_c^i(X_{\bar s},\Lambda)\ar[r,"cosp"]&\mathrm{H}_c^i(X_{\bar\eta},\Lambda) \end{tikzcd} \] commutes. \end{cor} \begin{proof}
The vertical morphisms are defined by taking the stalks of the cohomology of the composition $R^i(f|_U)_!\Lambda=R^if_!j_!j^\ast\Lambda\overset{j_!j^\ast\to\mathrm{id}}{\to}R^if_!\Lambda$. The result now follows from \Cref{functorial-cosp}. \end{proof}
\begin{prop}\label{cosp-base-point-indep} Let $f\colon\mathcal X\to B$ be a smooth family over a curve and $\pi\colon B'\to B$ a finite map. Let $b'\in B'$ be a closed point lying over $b\in B$. Then the following diagram commutes \[ \begin{tikzcd} \mathrm{H}_c^i(X_b,\Lambda)\ar[r,"cosp"]\ar[d,"\pi_{b'}^\ast"]&\mathrm{H}_c^i(X_{\bar\eta},\Lambda)\ar[d]\\ \mathrm{H}_c^i(X'_{b'},\Lambda)\ar[r,"cosp"]&\mathrm{H}_c^i(X_{\bar\eta'},\Lambda) \end{tikzcd}. \] \end{prop} \begin{proof} Consider the diagram \[ \begin{tikzcd} \mathrm{H}_c^i(X_b,\Lambda)=(R^if_!\Lambda)_b\ar[r,"\cong"]\ar[d,"cosp"]&(\pi^\ast R^if_!\Lambda)_{b'}\ar[r,"\cong"]\ar[d,"cosp"]&(R^if'_!\Lambda)_{b'}\ar[d,"cosp"]=\mathrm{H}_c^i(X'_{b'},\Lambda)\\ \mathrm{H}_c^i(X_{\bar\eta},\Lambda)=(R^if_!\Lambda)_{\bar\eta}\ar[r,"\cong"]&(\pi^\ast R^if_!\Lambda)_{\bar\eta'}\ar[r,"\cong"]&(R^if'_!\Lambda)_{\bar\eta'}=\mathrm{H}_c^i(X_{\bar\eta'},\Lambda) \end{tikzcd}, \] where the right horizontal maps are the base-change isomorphisms. So the right square commutes by \Cref{functorial-cosp}. One can check directly that the left square commutes.
So we are left to check that the horizontal maps are the pullback maps on cohomology. This follows from formal compatibilities between the base-change isomorphisms.
\end{proof}
Let us from now on assume that $\Lambda$ is finite of order prime to the characteristic of the base field $k$. \begin{definition}\label{def dual cosp} Let $f\colon\mathcal X\to B$ be a smooth family over a curve. We define \[ cosp^\vee\colon\mathrm{H}_{\acute{e}t}^i(X_{\bar\eta},\Lambda)\to\mathrm{H}_{\acute{e}t}^i(X_b,\Lambda), \] to be the composition of $(\mathrm{H}_c^{2d-i}(X_b,\Lambda)\overset{cosp}{\to}\mathrm{H}_c^{2d-i}(X_{\bar\eta},\Lambda))^\vee$ and the Poincaré Duality isomorphisms, where we use the identification \[ \begin{tikzcd} \mathrm{H}^{2d}_c(X_b,\Lambda)\ar[r,"cosp"]\ar[d,"\int_{X_b}"]&\mathrm{H}^{2d}_c(X_{\bar\eta},\Lambda)\ar[d,"\int_{X_{\bar\eta}}"]\\ \Lambda_b\ar[r,"cosp"]&\Lambda_{\bar\eta} \end{tikzcd}. \] So $cosp^\vee(\alpha)$ is characterized by $cosp\int_{X_b}\beta\cup cosp^\vee(\alpha)=\int_{X_{\bar\eta}}cosp(\beta)\cup\alpha$ for every $\beta\in \mathrm{H}^{2d-i}_c(X,\Lambda)$. \end{definition}
The following is also a direct consequence from \Cref{cosp-base-point-indep}. \begin{cor}\label{dualcosp-base-point-indep} The same setup as \Cref{cosp-base-point-indep}. The following diagram commutes \[ \begin{tikzcd} \mathrm{H}_{\acute{e}t}^i(X_{\bar\eta'},\Lambda)\ar[r,"cosp^\vee"]\ar[d]&\mathrm{H}_{\acute{e}t}^i(X_{b'},\Lambda)\ar[d,"(\pi_{b'})_\ast"]\\ \mathrm{H}_{\acute{e}t}^i(X_{\bar\eta},\Lambda)\ar[r,"cosp^\vee"]&\mathrm{H}_{\acute{e}t}^i(X_b,\Lambda) \end{tikzcd}. \] \end{cor}
\subsection{General Comparison} In this subsection we study under which conditions the specialization map from \Cref{def spec map} and the dual of the cospecialization map \Cref{def dual cosp} compare.
We again let $\mathcal X\to B$ be a smooth family over a smooth curve $B$ with generic point $\eta$. We also let $B'\to B$ be a finite map from a smooth curve $B'$ with generic point $\eta'$. Write $\mathcal X':=\mathcal X\times_BB'$ for the base change. We start with a technical lemma. \begin{lemma}\label{Cohomology Decomposition}
Let $b\in B$ be a closed point and suppose $\mathrm{H}_{\acute{e}t}^i(\mathcal X,\Lambda)\to\mathrm{H}_{\acute{e}t}^i(X_b,\Lambda)$ is surjective. Let $b'\in B'$ lying over $b$, then any $\beta'\in\mathrm{H}_{\acute{e}t}^{i+1}(X_{\eta'},\mu_\ell)$ can be written as $\beta'=\tilde\beta'|_{X_{\eta'}}-(\pi_{b'})\cup\beta_0'$, for some $\tilde\beta'\in\mathrm{H}_{\acute{e}t}^{i+1}(\mathcal X'_{U'},\Lambda)$ with $b'\in U'\subseteq B'$, $\beta_0'\in\mathrm{H}_{\acute{e}t}^i(X_{\eta'},\Lambda)$ and $\pi_{b'}$ a uniformizer at $b'$. \end{lemma} \begin{remark}\label{deRham2} To elaborate on the relation with logarithmic deRham cohomology of \Cref{deRham1}, in \cite[\textsection 4.1]{PS} to define the residue map one uses the fact that we can write $\omega$ as $\omega=\frac{\mathrm{d}t}{t}\wedge \eta+\eta'$, where $\eta'$ (and $\eta$) is global. The above lemma is analogous to this. \end{remark} \begin{proof} First of all, note that the commutativity of the diagram \[ \begin{tikzcd} X'_{b'}\ar[r,"\cong"]\ar[d]&X_b\ar[d]\\ \mathcal X'\ar[r]&\mathcal X \end{tikzcd} \] implies that $\mathrm{H}_{\acute{e}t}^i(\mathcal X',\Lambda)\to\mathrm{H}_{\acute{e}t}^i(X'_{b'},\Lambda)$ is also surjective. So without loss of generality, assume $B'=B$.
Let $\beta\in\mathrm{H}_{\acute{e}t}^{i+1}(X_\eta,\Lambda)$, then by assumption, $Res_b\beta\in\mathrm{H}_{\acute{e}t}^i(X_b,\Lambda)$ can be extended to $\widetilde{Res_b\beta}\in\mathrm{H}_{\acute{e}t}^i(\mathcal X,\Lambda)$. Let $\beta_0:=\widetilde{Res_b\beta}|_{X_\eta}$, then using \Cref{global sp} we compute $Res_b(\beta+(\pi_b)\cup\beta_0)=Res_b\beta-sp_b(\beta_0)=Res_b\beta-\widetilde{Res_b\beta}|_{X_b}=0$, so there exists $\tilde\beta\in\mathrm{H}_{\acute{e}t}^{i+1}(\mathcal X_U,\mu_\ell)$ with $b\in U\subseteq B$ such that $\tilde\beta|_{X_\eta}=\beta+(\pi_b)\cup\beta_0$, as wished. \end{proof}
\begin{lemma}\label{completion of curve} Let $f\colon(U,u)\to (B,b)$ be an irreducible étale neighborhood of $b\in B$ a smooth complete curve. Then there exists a smooth curve $B'$ with $U\subseteq B'$ and a finite morphism $B'\to B$ extending $U\to B$. \end{lemma} \begin{proof} Let $B'$ be the normalized completion of $U$. Then we have a rational map $B'\to B$. Using the valuative criterion of properness and the fact that $B$ is complete, we can extend over every $b'\in B'\setminus U$ giving a map $B'\to B$ (\cite[0BXZ]{Stacks}). As $B'$ is smooth, complete and irreducible, \cite[Proposition II.6.8]{Hart} implies that $B'\to B$ is finite. \end{proof}
\begin{prop}\label{general-comparison-cosp} Same assumptions as in \Cref{Cohomology Decomposition}, but now assume that $\eta\subseteq\eta'$ is a finite separable extension (that is, $B'\to B$ is locally étale). Then the following diagram commutes \[ \begin{tikzcd} \mathrm{H}_{\acute{e}t}^{i+1}(X_{\bar\eta},\Lambda)\ar[r,"cosp^\vee"]&\mathrm{H}_{\acute{e}t}^{i+1}(X_b,\Lambda)\\ \mathrm{H}_{\acute{e}t}^{i+1}(X_{\eta'},\Lambda)\ar[u]\ar[r,"sp_{b'}"]&\mathrm{H}_{\acute{e}t}^{i+1}(X'_{b'},\Lambda)\ar[u,"\cong"] \end{tikzcd}. \] \end{prop} \begin{proof} By \Cref{dualcosp-base-point-indep} it suffices to show the commutativity of \[ \begin{tikzcd} \mathrm{H}_{\acute{e}t}^{i+1}(X_{\bar{\eta}},\Lambda)\ar[r,"cosp^\vee"]&\mathrm{H}_{\acute{e}t}^{i+1}(X'_{b'},\Lambda)\\ \mathrm{H}_{\acute{e}t}^{i+1}(X_{\eta'},\Lambda)\ar[u]\ar[ru,"sp_{b'}"]& \end{tikzcd}. \] For the sake of notation, write $\eta'=\eta$ and $b'=b$. Then we need the equality \[ \int_{X_{\bar\eta}}cosp(\alpha)\cup\beta_{\bar\eta}=cosp\int_{X_b}\alpha\cup sp_b(\beta) \] to hold for every $\alpha\in\mathrm{H}_c^{2d-i-1}(X_b)$ and every $\beta\in\mathrm{H}_{\acute{e}t}^{i+1}(X_\eta)$. As $\alpha\in (R^{2d-i-1}f_!\Lambda)_b$, there exists an étale neighborhood $(U,u)\to (B,b)$ so that $\alpha$ can be represented by some $\tilde\alpha\in\Gamma(U,R^{2d-i-1}f_!\Lambda)$ with $\tilde\alpha_u=\alpha$ and $\tilde\alpha_{\bar\eta}=cosp(\alpha)$.
We may assume that $U$ is irreducible with generic point $\eta'$, so we view $U\to B$ as an open of some finite $B'\to B$ by \Cref{completion of curve}. Pull back $\beta$ to $\beta'\in\mathrm{H}_{\acute{e}t}^{i+1}(X_{\eta'},\Lambda)$, then by \Cref{Cohomology Decomposition} we can write $\beta'=\tilde\beta'|_{X_{\eta'}}-(\pi_u)\cup\beta_0'$. Now we compute that \[
\int_{X_{\bar\eta}}cosp(\alpha)\cup\beta'_{\bar\eta}=\int_{X_{\bar\eta'}}\tilde\alpha_{\bar\eta'}\cup\tilde\beta'|_{X_{\bar\eta}}=\int_{X_{\bar\eta}}(\tilde\alpha\cup\tilde\beta')_{\bar\eta} \] and \[
cosp\int_{X_u}\alpha\cup sp_u(\beta')=cosp\int_{X_u}\tilde\alpha_u\cup\tilde\beta'|_{X_u}=cosp\int_{X_u}(\tilde\alpha\cup\tilde\beta')_u, \] which are equal by \Cref{constant trace}. This equality implies that the diagram \[ \begin{tikzcd} \mathrm{H}_{\acute{e}t}^{i+1}(X_{\bar{\eta}},\Lambda)\ar[r,"cosp^\vee"]&\mathrm{H}_{\acute{e}t}^{i+1}(X_u,\Lambda)\\ \mathrm{H}_{\acute{e}t}^{i+1}(X_{\eta'},\Lambda)\ar[u]\ar[ru,"sp_u"]& \end{tikzcd} \] commutes. By \Cref{compatibility-sp-fin-ext} we know that \[ \begin{tikzcd} \mathrm{H}_{\acute{e}t}^{i+1}(X_{\eta'},\Lambda)\ar[r,"sp_u"]&\mathrm{H}_{\acute{e}t}^{i+1}(X_u,\Lambda)\ar[d,"\cong"]\\ \mathrm{H}_{\acute{e}t}^{i+1}(X_\eta,\Lambda)\ar[u]\ar[r,"sp_b"]&\mathrm{H}_{\acute{e}t}^{i+1}(X_b,\Lambda) \end{tikzcd} \] commutes. So again by \Cref{dualcosp-base-point-indep} we conclude the result. \end{proof}
\subsection{Comparison for Lefschetz Pencils}\label{L pencil} Here we are going to apply the previous section on a Lefschetz pencil $f\colon\mathcal X\to\mathbb{P}^1$ with fibres $2n-1$ dimensional projective hypersurfaces of degree $\geq 3$, \cite[III\textsection 1]{FK}. We let $0\in\mathbb{P}^1$ be a critical value and $X_0$ its corresponding singular fibre with ordinary double point $x_0\in X_0$. Note that now (locally around $0$) ${\mathcal X\setminus\{x_0\}\to\mathbb{P}^1}$ is a smooth family. We will then consider the maps $cosp^\vee\colon\mathrm{H}_{\acute{e}t}^i(X_{\bar\eta},\Lambda)\to\mathrm{H}_{\acute{e}t}^i(X_0\setminus\{x_0\},\Lambda)$ and $sp\colon \mathrm{H}_{\acute{e}t}^i(X_\eta,\Lambda)\to\mathrm{H}_{\acute{e}t}^i(X_0\setminus \{x_0\},\Lambda)$ and show they are compatible in the sense of the previous section.
\begin{prop}\label{extendable singular point} Let $X\subseteq\mathbb{P}^{n+1}$ be a hypersurface of degree $\geq3$ with ordinary double point $x\in X$. Then the restriction map $\mathrm{H}_{\acute{e}t}^{2r}(X,\Lambda)\to\mathrm{H}_{\acute{e}t}^{2r}(X\setminus \{x\},\Lambda)$ is surjective for every $2r<n$. \end{prop} \begin{proof} Let $\pi\colon\widetilde X:=\mathrm{Bl}_{\{x\}}(X)\to X$ be the blow-up with exceptional divisor $i\colon D\to\widetilde X$, which is a smooth quadric. Write $j\colon U=X\setminus\{x\}\cong\widetilde X\setminus D\to\widetilde X$ for the inclusion of the complement. The localization exact sequence gives us a commutative diagram \[ \begin{tikzcd} \mathrm{H}_{\acute{e}t}^{2r-2}(D,\Lambda)\ar[r,"i_\ast"]&\mathrm{H}_{\acute{e}t}^{2r}(\widetilde X,\Lambda)\ar[r, two heads,"j^\ast"]&\mathrm{H}_{\acute{e}t}^{2r}(U,\Lambda)\ar[r]&\mathrm{H}_{\acute{e}t}^{2r-1}(D,\Lambda)=0\\ &\mathrm{H}_{\acute{e}t}^{2r}(X,\Lambda)\ar[u,"\pi^\ast"]\ar[r]&\mathrm{H}_{\acute{e}t}^{2r}(X\setminus\{x\},\Lambda)\ar[u,"\cong"]& \end{tikzcd}. \]
To show that $j^\ast\circ\pi^\ast$ is surjective, it suffices to show that $\pi^\ast+i_\ast$ is surjective. For this, we can view $\widetilde X\subseteq\mathrm{Bl}_{\{x\}}(\mathbb{P}^{n+1}):=\widetilde\mathbb{P}$ as an embedded blow-up inside $\tau\colon\widetilde\mathbb{P}\to\mathbb{P}^{n+1}$, with exceptional divisor $l\colon\mathbb{P}^n\cong E\to\widetilde \mathbb{P}$. By \cite[Lemma 3.10]{Lindner} we know that $\widetilde X\subseteq\widetilde \mathbb{P}$ is an ample divisor, so that $\mathrm{H}_{\acute{e}t}^i(\widetilde \mathbb{P},\Lambda)\;\tilde{\rightarrow}\;\mathrm{H}_{\acute{e}t}^i(\widetilde X,\Lambda)$ is an isomorphism for $i<n$.
We claim that the following diagram commutes \[ \begin{tikzcd} \mathrm{H}_{\acute{e}t}^{2r}(\mathbb{P}^{n+1},\Lambda)\oplus\mathrm{H}_{\acute{e}t}^{2r-2}(E,\Lambda)\ar[r,"\tau^\ast+l_\ast"]\ar[r,swap,"\cong"]\ar[d]&\mathrm{H}_{\acute{e}t}^{2r}(\widetilde\mathbb{P},\Lambda)\ar[d,"\cong"]\\ \mathrm{H}_{\acute{e}t}^{2r}(X,\Lambda)\oplus\mathrm{H}_{\acute{e}t}^{2r-2}(D,\Lambda)\ar[r,"\pi^\ast+i_\ast"]&\mathrm{H}_{\acute{e}t}^{2r}(\widetilde X,\Lambda) \end{tikzcd}. \] For this, write $k\colon\widetilde X\to\widetilde\mathbb{P}$, then as the cohomology of $E$ is generated by $j^\ast[E]^{r-1}$ we compute that $k^\ast j_\ast j^\ast[E]^{r-1}=k^\ast [E]^r=[D]^r$ and $i_\ast k^\ast j^\ast[E]^{r-1}=i_\ast i^\ast[D]^{r-1}=[D]^r$.
Now the top horizontal map is an isomorphism by the blow-up formula and the right vertical map is an isomorphism for $2r<n$ by the above. So we conclude that the bottom horizontal map is surjective, as wished. \end{proof}
\begin{theorem}\label{Lefschetz Extendable} Let $\mathcal X\to\mathbb{P}^1$ be a Lefschetz pencil as in the beginning of this section. Let $B'\to\mathbb{P}^1$ be a finite map and $\mathcal X':=\mathcal X\times_{\mathbb{P}^1}B'$ be the base change. Then $\mathrm{H}_{\acute{e}t}^{2r}(\mathcal X,\Lambda)\to\mathrm{H}_{\acute{e}t}^{2r}(X_0\setminus\{x_0\},\Lambda)$ is surjective for every $2r<\dim X_0$. \end{theorem} \begin{proof} Suppose $\mathcal X$ is the Lefschetz pencil of hyperplane sections of some smooth projective space $Y$ (of the same dimension). Then $\mathcal X$ can be identified with the blow-up of $Y$ in the base locus, giving $\mathbb{P}^1\leftarrow\mathcal X\to Y$. By the Lefschetz hyperplane theorem, we have that $\mathrm{H}_{\acute{e}t}^i(Y,\Lambda)\to\mathrm{H}_{\acute{e}t}^i(X_0,\Lambda)$ is an isomorphism for $i<\dim X_0$. If the pencil consists of projective hypersurfaces, we conclude from \Cref{extendable singular point} that $\mathrm{H}_{\acute{e}t}^{2r}(X_0,\Lambda)\to\mathrm{H}_{\acute{e}t}^{2r}(X_0\setminus\{x_0\},\Lambda)$ is surjective for $2r<\dim X_0$. Now note that \[ \begin{tikzcd} X_0\setminus\{x_0\}\ar[r]\ar[d]&X_0\ar[r]&Y\\ \mathcal X\ar[rru]&& \end{tikzcd} \] commutes and as pull-back via the top horizontal composition is surjective, implies that pulling back via the vertical map is surjective.
\end{proof}
\begin{cor}\label{comparison-Lefschetz} Let $\mathcal X\to\mathbb{P}^1$ be a Lefschetz pencil with odd dimensional fibres and $B'\to\mathbb{P}^1$ be a finite morphism from a smooth $B'$ with generic point $\eta'$, separable over $\eta$. Let $0'\in B'$ lying over $0\in \mathbb{P}^1$, then the diagram \[ \begin{tikzcd} \mathrm{H}_{\acute{e}t}^i(X_{\bar\eta},\Lambda)\ar[r,"cosp^\vee"]&\mathrm{H}_{\acute{e}t}^i(X_0\setminus\{x_0\},\Lambda)\\ \mathrm{H}_{\acute{e}t}^i(X_{\eta'},\Lambda)\ar[u]\ar[r,"sp_{0'}"]&\mathrm{H}_{\acute{e}t}^i(X'_{0'}\setminus\{x'_{0'}\},\Lambda)\ar[u,"(f_{0'})_\ast"]\ar[u,swap,"\cong"] \end{tikzcd} \] commutes for every $i\leq\dim X_0$. \end{cor} \begin{proof} Write $\mathcal X_{sm}$ for $\mathcal X$ with all the singular points of the singular fibres removed. The result follows if we can apply \Cref{general-comparison-cosp} to the smooth family $\mathcal X_{sm}\to\mathbb{P}^1$. This is possible if we show that $\mathrm{H}_{\acute{e}t}^{i-1}(\mathcal X_{sm},\Lambda)\to\mathrm{H}_{\acute{e}t}^{i-1}(X_0\setminus\{x_0\},\Lambda)$ is surjective for every $i\leq\dim(X_0)$.
When $i-1$ is even, this follows from \Cref{Lefschetz Extendable} as the surjection $\mathrm{H}_{\acute{e}t}^{2r}(\mathcal X,\Lambda)\twoheadrightarrow\mathrm{H}_{\acute{e}t}^{2r}(X_0\setminus\{x_0\},\Lambda)$ decomposes as $\mathrm{H}_{\acute{e}t}^{2r}(\mathcal X,\Lambda)\to\mathrm{H}_{\acute{e}t}^{2r}(\mathcal X_{sm},\Lambda)\to\mathrm{H}_{\acute{e}t}^{2r}(X_0\setminus\{x_0\},\Lambda)$.
When $i-1$ is odd, we compute that $\mathrm{H}_{\acute{e}t}^{i-1}(X_0\setminus\{x_0\},\Lambda)\cong\mathrm{H}_c^{2d-i+1}(X_0\setminus\{x_0\},\Lambda)^\vee\cong\mathrm{H}_{\acute{e}t}^{2d-i+1}(X_0,\Lambda)^\vee=0$ by \cite[Theorem III.4.3(0)]{FK}, so \Cref{general-comparison-cosp}. \end{proof}
\begin{remark} \Cref{comparison-Lefschetz} can be viewed more generally as follows, let $f\colon\mathcal X\to B$ be a proper degeneration over a smooth curve $B$. Then by \cite[0A3S]{Stacks} and the proof of \cite[095T]{Stacks} we get that $\mathrm{H}_{\acute{e}t}^i(\mathcal X,\Lambda)\to\mathrm{H}_{\acute{e}t}^i(X_b,\Lambda)$ is surjective for every closed point $b$.
Let $b$ be the critical value and set $\mathcal X_{sm}:=\mathcal X\setminus Sing(X_b)$ so that $(X_b)_{sm}:=X_b\cap\mathcal X_{sm}$ and $\mathcal X_{sm}\to B$ is (around $b$) a smooth family. Then with the above, if $\mathrm{H}_{\acute{e}t}^i(X_b,\Lambda)\to\mathrm{H}_{\acute{e}t}^i((X_b)_{sm},\Lambda)$ is surjective, we have $\mathrm{H}_{\acute{e}t}^i(\mathcal X_{sm},\Lambda)\to\mathrm{H}_{\acute{e}t}^i((X_b)_{sm},\Lambda)$ is surjective. So as in \Cref{general-comparison-cosp}, the surjectivity of $\mathrm{H}_{\acute{e}t}^i(X_b,\Lambda)\to\mathrm{H}_{\acute{e}t}^i((X_b)_{sm},\Lambda)$ implies a comparison between $cosp^\vee$ and $sp$. \end{remark}
\section{Application to the Integral Hodge/Tate Conjecture}\label{main section} In this section we present our main result. We start with some computations of the cohomology groups of varieties related to an ordinary double point. After this we give the full computation of the cohomology of an Enriques surface. This is probably well known to the experts, but no precise reference could be found. After this, we obtain our main results directly.
\subsection{Residue of an Ordinary Double Point}
Let $X_0\subseteq\mathbb{P}^{2n}$ be a projective hypersurface of dimension $2n-1$ over an algebraically closed fied $k$ with ordinary double point $x_0\in X_0$ and of degree $\geq 3$. Let $\widetilde{X_0}:=\mathrm{Bl}_{\{x_0\}}(X_0)$ be the blow-up of $X_0$ in this singular point. Then $\widetilde{X_0}$ is smooth and the exceptional divisor is a smooth quadric $D\subseteq\widetilde{X_0}$ of dimension $2n-2$. We view $\widetilde{X_0}\subseteq\widetilde{\mathbb{P}^{2n}}$, where $\widetilde{\mathbb{P}^{2n}}:=\mathrm{Bl}_{\{x_0\}}(\mathbb{P}^{2n})$ with exceptional divisor $\mathbb{P}^{2n-1}\subseteq\widetilde{\mathbb{P}^{2n}}$ and $D\subseteq\mathbb{P}^{2n-1}$.
Let $\Lambda=\underline{\Lambda}$ be a constant sheaf with finite stalks of order prime to $\mathrm{char}(k)$. We note that $\widetilde{X_0}\setminus D\;\tilde{\rightarrow}\; X_0\setminus\{x_0\}$ is an isomorphism so that we have a residue map \[ Res\colon\mathrm{H}_{\acute{e}t}^i(X_0\setminus\{x_0\},\Lambda)\;\tilde{\rightarrow}\;\mathrm{H}_{\acute{e}t}^i(\widetilde X_0\setminus D,\Lambda)\to\mathrm{H}_{\acute{e}t}^{i-1}(D,\Lambda). \]
\begin{lemma}\label{rank 1} We have $\mathrm{H}_{\acute{e}t}^{2n}(\widetilde{X_0}\setminus D,\Lambda)\cong\begin{cases}\Lambda&\text{ if }n>1;\\ 0&\text{ if }n=1.\end{cases}$ \end{lemma} \begin{proof} Suppose $n=1$, then the localization sequence gives $0\to\mathrm{H}_c^0(X_0\setminus\{x_0\},\Lambda)\to\mathrm{H}_{\acute{e}t}^0(X_0,\Lambda)\;\tilde{\rightarrow}\;\mathrm{H}_{\acute{e}t}^0(x_0,\Lambda)$. So $\mathrm{H}_{\acute{e}t}^2(\tilde X_0\setminus D,\Lambda)\cong\mathrm{H}_{\acute{e}t}^2(X_0\setminus\{x_0\},\Lambda)\cong\mathrm{H}_c^0(X_0\setminus\{x_0\},\Lambda)^\vee=0$.
Now suppose $n>1$, then the Lefschetz Hyperplane Theorem \cite[Corollary I.9.4]{FK} gives that $\Lambda\cong\mathrm{H}_{\acute{e}t}^{2n-2}(\mathbb{P}^{2n},\Lambda)\;\tilde{\rightarrow}\;\mathrm{H}_{\acute{e}t}^{2n-2}(X_0,\Lambda)$. Using the localization exact sequence again now gives that $\mathrm{H}^{2n-2}_c(X_0\setminus\{x_0\},\Lambda)\;\tilde{\rightarrow}\;\mathrm{H}_{\acute{e}t}^{2n-2}(X_0,\Lambda)$. So by Poincaré Duality we conclude that $\mathrm{H}_{\acute{e}t}^{2n}(\widetilde X_0\setminus D,\Lambda)\cong \mathrm{H}_{\acute{e}t}^{2n}(X_0\setminus\{x_0\},\Lambda)\cong \Hom_\Lambda(\mathrm{H}^{2n-2}_c(X_0\setminus\{x_0\},\Lambda),\Lambda)\cong\Lambda$. \end{proof}
\begin{lemma}\label{rank 2} We have $\mathrm{H}_{\acute{e}t}^{2n}(\widetilde{X_0},\Lambda)\cong\begin{cases}\Lambda^{\oplus 2}&\text{ if }n>1;\\ \Lambda&\text{ if }n=1.\end{cases}$ \end{lemma} \begin{proof} When $n=1$ we see that $\mathrm{H}_{\acute{e}t}^2(\widetilde{X_0},\Lambda)\cong\mathrm{H}_{\acute{e}t}^0(\widetilde{X_0},\Lambda)^\vee\cong\Lambda$.
Suppose $n>1$, using Poincaré Duality again we have $\mathrm{H}_{\acute{e}t}^{2n}(\widetilde{X_0},\Lambda)\cong\Hom_{\Lambda}(\mathrm{H}_{\acute{e}t}^{2n-2}(\widetilde X_0,\Lambda),\Lambda)$, so it suffices to show that $\mathrm{H}_{\acute{e}t}^{2n-2}(\widetilde{X_0},\Lambda)\cong\Lambda^{\oplus 2}$. As the degree of $X_0$ is at least 3, then by \cite[Lemma 3.10]{Lindner} $\widetilde{X_0}$ is an ample divisor of $\widetilde{\mathbb{P}^{2n}}$, so that $\mathrm{H}_{\acute{e}t}^{2n-2}(\widetilde{\mathbb{P}^{2n}},\Lambda)\to\mathrm{H}_{\acute{e}t}^{2n-2}(\widetilde{X_0},\Lambda)$ is an isomorphism. So the result follows from the blow-up formula. \end{proof}
\begin{prop}\label{non-zero residue} The residue map $\mathrm{H}_{\acute{e}t}^{2n-1}(\widetilde{X_0}\setminus D,\Lambda)\to\mathrm{H}_{\acute{e}t}^{2n-2}(D,\Lambda)$ is non-zero. \end{prop} \begin{proof} Consider the long exact sequence \[ \mathrm{H}_{\acute{e}t}^{2n-1}(\widetilde{X_0}\setminus D,\Lambda)\overset{Res}{\to}\mathrm{H}_{\acute{e}t}^{2n-2}(D,\Lambda)\to\mathrm{H}_{\acute{e}t}^{2n}(\widetilde{X_0},\Lambda)\to\mathrm{H}_{\acute{e}t}^{2n}(\widetilde{X_0}\setminus D,\Lambda)\to\mathrm{H}_{\acute{e}t}^{2n-1}(D,\Lambda). \] Plugging in the computations from \Cref{rank 1} and \Cref{rank 2} and the cohomology of a smooth quadric (\cite[Proposition III.4.4]{FK}) gives \begin{align*} \mathrm{H}_{\acute{e}t}^{2n-1}(\widetilde{X_0}\setminus D,\Lambda)\overset{Res}{\to}\Lambda^{\oplus 2}\to\Lambda\to0\to 0&\quad\text{ if } n=1;\\ \mathrm{H}_{\acute{e}t}^{2n-1}(\widetilde{X_0}\setminus D,\Lambda)\overset{Res}{\to}\Lambda^{\oplus 2}\to\Lambda^{\oplus 2}\to\Lambda\to0&\quad\text{ if }n>1. \end{align*} So in the case $n=1$, the residue map clearly cannot be zero. If the residue map were zero in the second case, then the second map would be an isomorphism, implying that the last surjection is zero, a contradiction. \qedhere
\end{proof}
\begin{prop}\label{Existence Non-zero ResidueSp} Let $\mathcal X\to B$ be a Lefschetz pencil with singular fibre $X_0$. There exists a finite extension $B'\to B$ and $\beta'\in\mathrm{H}_{\acute{e}t}^{2n-1}(X_{\eta'},\Lambda)$ so that $Res(sp_{0'}(\beta'))\neq 0$ in $\mathrm{H}_{\acute{e}t}^{2n-2}(D,\Lambda)$ for some $0'\in B'$ mapping to $0\in B$. \end{prop} \begin{proof} By \cite[Theorem III.4.3(1)]{FK} we have that $cosp\colon\mathrm{H}_{\acute{e}t}^{2n-1}(X_0,\Lambda)\to\mathrm{H}_{\acute{e}t}^{2n-1}(X_{\bar\eta},\Lambda)$ is injective (with cokernel generated by the vanishing sphere). Using \Cref{relation cosp} this implies that $cosp\colon\mathrm{H}_c^{2n-1}(X_0\setminus \{x_0\},\Lambda)\;\tilde{\rightarrow}\;\mathrm{H}_{\acute{e}t}^{2n-1}(X_0,\Lambda)\to\mathrm{H}_{\acute{e}t}^{2n-1}(X_{\bar\eta},\Lambda)$ is injective. As $\Lambda$ is an injective $\Lambda$-module, the dual $cosp^\vee\colon\mathrm{H}_{\acute{e}t}^{2n-1}(X_{\bar\eta},\Lambda)\to\mathrm{H}_{\acute{e}t}^{2n-1}(X_0\setminus \{x_0\},\Lambda)$ is surjective (with kernel generated by the vanishing sphere). Thus by \Cref{non-zero residue}, there exists a $\beta\in\mathrm{H}_{\acute{e}t}^{2n-1}(X_{\bar\eta},\Lambda)$ such that $cosp^\vee(\beta)$ has non-zero residue.
Let $\beta'\in\mathrm{H}_{\acute{e}t}^{2n-1}(X_{\eta'},\Lambda)$ representing $\beta$ for some finite (separable) extension $\eta'\to\eta$. Then by \Cref{comparison-Lefschetz} we conclude that $sp_{0'}(\beta')=cosp^\vee(\beta)$ has non-zero residue. \end{proof}
\subsection{Non-Vanishing Refined Unramified Cohomology}
We start with the computation of the cohomology of an Enriques surface. \begin{prop}\label{cohom-Enriques} Let $S$ be an Enriques surface over an algebraically closed field $k$ and let $\ell\neq \mathrm{char}(k)$ be a prime. For $\ell\neq 2$ we have \begin{align*} \mathrm{H}_{\acute{e}t}^0(S,\mu_{\ell^r})&\cong\mathbb{Z}/\ell^r\mathbb{Z}\cong\mathrm{H}_{\acute{e}t}^4(S,\mu_{\ell^r}) & \mathrm{H}_{\acute{e}t}^0(S,\mathbb{Z}_\ell)&\cong\mathbb{Z}_\ell\cong\mathrm{H}_{\acute{e}t}^4(S,\mathbb{Z}_\ell);\\ \mathrm{H}_{\acute{e}t}^1(S,\mu_{\ell^r})&=0=\mathrm{H}_{\acute{e}t}^3(S,\mu_{\ell^r}) & \mathrm{H}_{\acute{e}t}^1(S,\mathbb{Z}_\ell)&=0=\mathrm{H}_{\acute{e}t}^3(S,\mathbb{Z}_\ell);\\ \mathrm{H}_{\acute{e}t}^2(S,\mu_{\ell^r})&\cong(\mathbb{Z}/\ell^r\mathbb{Z})^{\oplus 10} & \mathrm{H}_{\acute{e}t}^2(S,\mathbb{Z}_\ell)&\cong\mathbb{Z}_\ell^{\oplus 10}. \end{align*} If $\ell=2$, we have \begin{align*} \mathrm{H}_{\acute{e}t}^0(S,\mu_{2^r})&\cong\mathbb{Z}/2^r\mathbb{Z}\cong\mathrm{H}_{\acute{e}t}^4(S,\mu_{2^r}) & \mathrm{H}_{\acute{e}t}^0(S,\mathbb{Z}_2)&\cong\mathbb{Z}_2\cong\mathrm{H}_{\acute{e}t}^4(S,\mathbb{Z}_2);\\ \mathrm{H}_{\acute{e}t}^1(S,\mu_{2^r})&=\mathbb{Z}/2\mathbb{Z} & \mathrm{H}_{\acute{e}t}^1(S,\mathbb{Z}_2)&=0;\\ \mathrm{H}_{\acute{e}t}^2(S,\mu_{2^r})&\cong(\mathbb{Z}/2^r\mathbb{Z})^{\oplus 10}\oplus(\mathbb{Z}/2\mathbb{Z})^{\oplus 2} & \mathrm{H}_{\acute{e}t}^2(S,\mathbb{Z}_2)&\cong\mathbb{Z}_2^{\oplus 10}\oplus\mathbb{Z}/2\mathbb{Z};\\ \mathrm{H}_{\acute{e}t}^3(S,\mu_{2^r})&=\mathbb{Z}/2\mathbb{Z} & \mathrm{H}_{\acute{e}t}^3(S,\mathbb{Z}_2)&=\mathbb{Z}/2\mathbb{Z}. \end{align*} \end{prop} \begin{proof} By the assumptions on $\ell$ and $k$, we see that $\mathrm{H}_{\acute{e}t}^0(S,\mu_{\ell^r})=\Gamma(S,\mu_{\ell^r})\cong\mathbb{Z}/\ell^r\mathbb{Z}$ and $\int_S\colon\mathrm{H}_{\acute{e}t}^4(S,\mu_{\ell^r})\;\tilde{\rightarrow}\;\mathbb{Z}/\ell^r\mathbb{Z}$. From this, the $\mathbb{Z}_\ell$ coefficients also follow.
We will use the Kummer sequence $0\to\mu_{\ell^r}\to\mathbb{G}_m\overset{(-)^{\ell^r}}{\to}\mathbb{G}_m\to 0$ to compute the remaining cohomological degrees. By assumption on $k$, the short exact Kummer sequence induces the exact sequences \[ 0\to \Pic(S)/\ell^r \Pic(S)\to\mathrm{H}_{\acute{e}t}^2(S,\mu_{\ell^r})\to\mathrm{H}_{\acute{e}t}^2(S,\mathbb{G}_m)[\ell^r]\to0 \] and \[ 0\to\mathrm{H}_{\acute{e}t}^1(S,\mu_{\ell^r})\to\Pic(S)[\ell^r]\to0. \]
First we claim that $\Pic(S)\cong\mathbb{Z}^{\oplus 10}\oplus\mathbb{Z}/2\mathbb{Z}$, which gives the torsion cohomology in degree 1, and in turn degree 3 via Poincaré Duality. For the claim, use \cite[Theorem 1.2.7]{Enr-Surf} giving that $rk(NS(S))=\rho(S)=10$, where $NS(S)=\Pic(S)/\Pic^0(S)$. And Theorem 1.2.1 of loc. cit. gives $\Pic^0(S)=0$, hence we conclude that $\Pic(S)$ is of rank 10. Moreover, in the proof of Theorem 1.2.1 it is given that $Tors(\Pic(S))=\mathbb{Z}/2\mathbb{Z}$, generated by the canonical divisor, proving the claim.
By \cite[Theorem 1.2.17]{Enr-Surf}, we have that $Br(S)\cong\mathbb{Z}/2\mathbb{Z}$, implying that $\mathrm{H}_{\acute{e}t}^2(S,\mathbb{G}_m)[\ell^r]=\begin{cases}0 &\text{ if }\ell\neq 2\\ \mathbb{Z}/2\mathbb{Z}&\text{ if } \ell=2\end{cases}$. So if $\ell\neq 2$, we have a full description of the torsion cohomology. Also, combining the commutative diagram \[ \begin{tikzcd} 0\ar[r]&\mu_{\ell^r}\ar[r]\ar[d,"(-)^\ell"]&\mathbb{G}_m\ar[r,"(-)^{\ell^r}"]\ar[d,"(-)^\ell"]&\mathbb{G}_m\ar[r]\ar[d,"\mathrm{id}"]&0\\ 0\ar[r]&\mu_{\ell^{r-1}}\ar[r]&\mathbb{G}_m\ar[r,"(-)^{\ell^{r-1}}"]&\mathbb{G}_m\ar[r]&0\\ \end{tikzcd} \] with the long exact sequences gives the transition morphisms in this case, computing the $\ell$-adic cohomology.
For $\ell=2$ we obtain the short exact sequence \[ 0\to(\mathbb{Z}/2^r\mathbb{Z})^{\oplus 10}\oplus\mathbb{Z}/2\mathbb{Z}\to\mathrm{H}_{\acute{e}t}^2(S,\mu_{2^r})\to\mathbb{Z}/2\mathbb{Z}\to 0, \] and we claim that this sequence splits for every $r$. Clearly it does for $r=1$, so we assume $r\geq 2$. As $\mathrm{Ext}^1_{\mathbb{Z}/2^r\mathbb{Z}}((\mathbb{Z}/2^r\mathbb{Z})^{\oplus 10}\oplus \mathbb{Z}/2\mathbb{Z},\mathbb{Z}/2\mathbb{Z})\cong\mathbb{Z}/2\mathbb{Z}$, there are only two options, either the sequence splits, or the middle term equals $(\mathbb{Z}/2^r\mathbb{Z})^{\oplus 10}\oplus\mathbb{Z}/4\mathbb{Z}$. Suppose for some $r$ the term is $\mathbb{Z}/4\mathbb{Z}$, then using the above commutative diagram, we would then have a commutative diagram \[ \begin{tikzcd} 0\ar[r]&\mathbb{Z}/2\mathbb{Z}\ar[r]\ar[d,"\mathrm{id}"]&\mathbb{Z}/4\mathbb{Z}\ar[r]\ar[d,"f"]&\mathbb{Z}/2\mathbb{Z}\ar[r]\ar[d,"0"]&0\\ 0\ar[r]&\mathbb{Z}/2\mathbb{Z}\ar[r]&A\ar[r]&\mathbb{Z}/2\mathbb{Z}\ar[r]&0\\ \end{tikzcd}, \] where $A\in\{(\mathbb{Z}/2\mathbb{Z})^{\oplus2},\mathbb{Z}/4\mathbb{Z}\}$. But one can check that for both options for $A$, such an $f$ making the diagram commute does not exist. So we conclude that the sequence must split for every $r$ and thus $\mathrm{H}_{\acute{e}t}^2(S,\mu_{2^r})\cong(\mathbb{Z}/2^r\mathbb{Z})^{\oplus 10}\oplus(\mathbb{Z}/2\mathbb{Z})^{\oplus 2}$. And in turn $\mathrm{H}_{\acute{e}t}^2(S,\mathbb{Z}_2)\cong\mathbb{Z}_2^{\oplus 10}\oplus\mathbb{Z}/2\mathbb{Z}$.
The first paragraph implies that $\mathrm{H}_{\acute{e}t}^1(S,\mathbb{Z}_2)=0$. For $\mathrm{H}_{\acute{e}t}^3(S,\mathbb{Z}_2)$ we use the long exact Bockstein sequence $0\to\mu_{2}\to\mu_{2^r}\overset{(-)^2}{\to}\mu_{2^{r-1}}\to0$ to conclude that $\mathrm{H}_{\acute{e}t}^3(S,\mu_{2^r})\cong\mathbb{Z}/2\mathbb{Z}\to\mathbb{Z}/2\mathbb{Z}\cong\mathrm{H}_{\acute{e}t}^3(S,\mu_{2^{r-1}})$ is the identity. \end{proof}
\begin{cor}\label{cohom-class-Enriques} Let $S$ be an Enriques surface over an algebraically closed field $k$ with $\mathrm{char}(k)\neq 2$. Then there exists an element $\alpha\in\mathrm{H}_{\acute{e}t}^2(S,\mu_2)$ that does not lift to $\mu_4$ coefficients. In particular $k(\alpha)\neq0$ in $\mathrm{H}_{\acute{e}t}^2(k(S),\mu_2)$. \end{cor} \begin{proof} From the proof of \Cref{cohom-Enriques}, we see that we can pick $\alpha$ to be the non-zero class coming from $\mathrm{H}_{\acute{e}t}^2(S,\mathbb{G}_m)[2]$ as the map $\mathrm{H}_{\acute{e}t}^2(S,\mathbb{G}_m)[4]\to\mathrm{H}_{\acute{e}t}^2(S,\mathbb{G}_m)[2]$ is zero.
If $k(\alpha)$ were zero, then $\alpha$ is algebraic by \Cref{les Sch}, impying it would lift to any $\mathrm{H}_{\acute{e}t}^2(S,\mu_{\ell^r})$. \end{proof}
\begin{lemma}\label{Product Algebraic and Liftable} Let $X$ and $Y$ be smooth projective varieties. \begin{enumerate} \item Suppose the cohomology of $X$ is algebraic. Then for any $0\neq\alpha\in\mathrm{H}_{\acute{e}t}^{2n}(X,\mu_\ell)$ and $\beta\in\mathrm{H}_{\acute{e}t}^{2m}(Y,\mu_\ell)$ we have $\alpha\times\beta$ is algbraic if and only if $\beta$ is algebraic.
\item Suppose that $\mathrm{H}_{\acute{e}t}^\ast(X,\mu_{\ell^r})\twoheadrightarrow\mathrm{H}_{\acute{e}t}^\ast(X,\mu_\ell)$ is surjective and let $\alpha\in\mathrm{H}_{\acute{e}t}^i(X,\mu_\ell)$ and $\beta\in\mathrm{H}_{\acute{e}t}^j(Y,\mu_\ell)$. Then $\alpha\times\beta$ lifts to $\mu_{\ell^r}$ if and only if $\beta$ does. \end{enumerate} \end{lemma} \begin{proof} \emph{1.} As $\alpha$ is algebraic by assumption, the `if'-part is clear.
Conversely, write $\alpha\times\beta=[\Gamma]$ for some algebraic cycle $\Gamma$. By assumption, we know $\alpha=[Z]$ is algebraic as well. Let $\alpha'\in\mathrm{H}_{\acute{e}t}^{2(d_X-n)}(X,\mu_\ell)$ so that $\int_X\alpha\cup\alpha'=\lambda$ is non-zero, which exists because $\mathrm{H}_{\acute{e}t}^{2n}(X,\mu_\ell)\cong\mathrm{H}_{\acute{e}t}^{2(d_X-n)}(X,\mu_\ell)^\vee$ where $\alpha\mapsto\alpha^\vee(-):=\int_X\alpha\cup-$, which is non-zero by assumption. By the assumption on $X$, there exists a closed $Z'\in \CH^{d_x-n}(X)$ so that $\alpha'=[Z']$. Then we compute that $\lambda\beta=(\alpha\times\beta)^\ast\alpha'=([\Gamma])^\ast([Z'])$ is algebraic, hence $\beta$ is because $\lambda$ is invertible.
\emph{2.} As $\alpha$ lifts to $\mu_{\ell^r}$ by assumption, the `if'-part is clear.
Conversely, if we let $\alpha'$ be as before, then by assumption, it lifts to $\mu_{\ell^r}$ so we again conclude that $\lambda\beta=(\alpha\times\beta)^\ast\alpha'$ lifts to $\mu_{\ell^r}$ hence $\beta$ does as the lift of $\lambda$ in $\mu_{\ell^r}$ remains invertible. \end{proof}
\begin{theorem}\label{failure ITC} Let $\alpha\in\mathrm{H}_{\acute{e}t}^2(S,\mu_2)$ be the one from \Cref{cohom-class-Enriques} and let $\mathcal X\to\mathbb{P}^1$ be a Lefschetz pencil as in \Cref{L pencil}. There exists a $\beta\in\mathrm{H}_{\acute{e}t}^{2n-1}(X_{\bar\eta},\mu_2)$ such that $F_{n-1}(\beta\times\alpha)\in\frac{\Hrnr{n-1}^{2n+1}(X_{\bar\eta}\times S,\mu_2)}{\Hrnr{n-1}^{2n+1}(X_{\bar\eta}\times S,\mathbb{Z}_2)}\cong Z^{n+1}(X_{\bar\eta}\times S)[2]$ is non-zero. \end{theorem} \begin{proof} Let $\beta\in\mathrm{H}_{\acute{e}t}^{2n-1}(X_{\bar\eta},\mu_2)$ be the image of the $\beta'$ from \Cref{Existence Non-zero ResidueSp} in $\mathrm{H}_{\acute{e}t}^{2n-1}(X_{\bar\eta},\mu_2)$. And suppose $F_{n-1}(\beta\times\alpha)=0$ in $\mathrm{H}_{\acute{e}t}^{2n+1}(F_{n-1}(X_{\bar\eta}\times S),\mu_2)$. By \Cref{refined-geometric-point} there now exists a finite extension $\eta''\to\eta'$ and $\beta'':=(\beta')_{\eta''}\in\mathrm{H}_{\acute{e}t}^{2n-1}(X_{\eta''},\mu_2)$ so that $F_{n-1}(\beta''\times\alpha)=0\in\mathrm{H}_{\acute{e}t}^{2n+1}(F_{n-1}(X_{\eta''}\times S),\mu_2)$. Suppose $B''\to B'$ is the finite map of smooth curves extending $\eta''\to\eta'$ with some $0''\mapsto 0'$. Then using \Cref{Specialization Refined} and \Cref{product-sp} we see that $F_{n-1}(sp_{0''}(\beta'')\times\alpha)=sp_{0''}F_{n-1}(\beta''\times\alpha)=0$. Taking the residue, we conclude by \Cref{Residue Refined} and \Cref{product-residue} that $F_{n-1}(\gamma\times\alpha)=0$ in $\mathrm{H}_{\acute{e}t}^{2n}(F_{n-1}(D\times S),\mu_2)$, where $\gamma:=Res(sp_{0''}(\beta''))$. By \Cref{les Sch} this happens if and only if $\gamma\times\alpha$ is algebraic. Note that \Cref{compatibility-sp-fin-ext} gives $(f_{0''})_\ast sp_{0''}(\beta'')=sp_{0'}(\beta')$, so by definition of $\beta'$, we conclude that $\gamma\neq 0$. Now \Cref{Product Algebraic and Liftable} gives that $\alpha$ is algebraic, but this would mean that $k(\alpha)=0$, a contradiction.
Now suppose $F_{n-1}(\beta\times\alpha)\in\Hrnr{n-1}^{2n+1}(X_{\bar\eta}\times S,\mu_2)$ lifts to $\Hrnr{n-1}^{2n+1}(X_{\bar\eta}\times S,\mathbb{Z}_2)$, then in particular it lifts to $\Hrnr{n-1}^{2n+1}(X_{\bar\eta}\times S,\mu_4)$. That is, there exists a $\delta\in \mathrm{H}^{2n+1}(F_n(X_{\bar\eta}\times S),\mu_4)$ so that $F_{n-1}(\delta)\mod 2=F_{n-1}(\beta\times\alpha)\in\Hrnr{n-1}^{2n+1}(X_{\bar\eta}\times S,\mu_2)$. This implies that there is a finite extension $\eta''\to\eta'$ and $\delta''\in \mathrm{H}^{2n+1}(F_n(X_{\eta''}\times S),\mu_4)$ with the property that $F_{n-1}(\delta'')\mod 2=F_{n-1}(\beta''\times\alpha)\in\mathrm{H}^{2n+1}(F_{n-1}(X_{\eta''}\times S),\mu_2)$. As before, now also using \Cref{coeff sp}, this implies that $F_{n-1}(\gamma\times\alpha)=0\in\frac{\Hrnr{n-1}^{2n}(D\times S,\mu_2)}{\Hrnr{n-1}^{2n}(D\times S,\mu_4)}$. Using \Cref{liftable lemma} this implies that $\gamma\times\alpha$ lifts to $\mu_4$, hence $\alpha$ would lift to $\mu_4$ by \Cref{Product Algebraic and Liftable}, a contradiction again by our choice of $\alpha$ from \Cref{cohom-class-Enriques}. \end{proof}
\begin{remark}\label{explicite class} Now that we know that $X_{\bar\eta}\times S$ contains non-algebraic torsion cohomology classes, it is interesting to see which class it exactly is. From the proof of \cite[Theorem 7.7]{Sch1} we see that the non-algebraic class is given by $\delta(\beta\times\alpha)\in\mathrm{H}_{\acute{e}t}^{2n+2}(X_{\bar\eta}\times S,\mathbb{Z}_2(n+1))[2]$, where $\delta$ is the Bockstein homomorphism. As the cohomology of $X_{\bar\eta}$ is torsion free, we see that $\delta(\beta\times\alpha)=\beta'\times\delta(\alpha)$, where $\beta'\in\mathrm{H}_{\acute{e}t}^{2n-1}(X,\mathbb{Z}_2)$ is so that $\beta'\mod 2=\beta$, in particular, $\beta'$ is not divisible by $2$. Moreover, $\delta(\alpha)\in\mathrm{H}_{\acute{e}t}^3(S,\mathbb{Z}_2)\cong\mathbb{Z}/2\mathbb{Z}$ is the unique non-zero class. So when $k=\mathbb{C}$, we indeed recover the same non-algebraic class found by Shen in \cite{S}. \end{remark}
\subsection{General Statement}\label{general statement} Let $\mathcal X\to\mathbb{P}^1$ be a Lefschetz pencil as before and write $X:=X_{\bar\eta}$ for its geometric generic fibre. Using the exact same arguments as in the proof of \Cref{failure ITC} we obtain the following.
\begin{theorem}\label{cor-gen-IHC} Let $Y$ be a smooth variety, then the following hold \begin{enumerate}[(i)] \item if $\Hrnr{i-1}^{2i}(Y,\mu_\ell)\neq 0$, then $\Hrnr{n+i-2}^{2n+2i-1}(X\times Y,\mu_\ell)\neq0$;\\ \item if $\frac{\mathrm{H}_{\acute{e}t}^{2i}(Y,\mu_\ell)}{\mathrm{H}_{\acute{e}t}^{2i}(Y,\mu_{\ell^r})}\overset{\ref{liftable lemma}}{\cong}\frac{\Hrnr{i-1}^{2i}(Y,\mu_\ell)}{\Hrnr{i-1}^{2i}(Y,\mu_{\ell^r})}\neq0$ for some $r$, then $Z^{n+i}(X\times Y)[\ell]\neq 0$. \end{enumerate} \end{theorem}
\begin{cor}\label{gen failure ITC} Let $S$ be an Enriques surface. Then $Z^{n+i}(X\times S^i)[2]\neq 0$ for every $i\in\mathbb{Z}_{\geq 1}$. \end{cor} \begin{proof} Let $i\in\mathbb{Z}_{\geq 1}$ and write $Y=S^i$. By \Cref{cor-gen-IHC}, it suffices to show there exists an $\alpha\in\mathrm{H}^{2i}(Y,\mu_\ell)$ that does not lift to $\mu_{\ell^r}$ for some $r$.
\Cref{cohom-class-Enriques} gives the result for $i=1$. Suppose inductively that $\alpha\in\mathrm{H}_{\acute{e}t}^{2i-2}(S^{i-1},\mu_2)$ is a non-extendable class, say the extendability fails for $\mu_{2^r}$. Let $\beta\in\mathrm{H}_{\acute{e}t}^2(S,\mu_{2^r})\cong (\mathbb{Z}/2^r\mathbb{Z})^{10}\oplus(\mathbb{Z}/2\mathbb{Z})^2$ be not a multiple of 2, so that there exists $\beta'\in\mathrm{H}_{\acute{e}t}^2(S,\mu_{2^r})\cong\Hom(\mathrm{H}_{\acute{e}t}^2(S,\mu_{2^r}),\mathbb{Z}/2^r\mathbb{Z})$ with the property $\beta\cup\beta'=1$. As this implies that $\bar\beta\cup\bar\beta'=1$, we have $(\alpha\times\bar\beta)^\ast\bar\beta'=\alpha$, where we write $\overline{(-)}$ for the natural map on cohomology to $\mu_2$ coefficients. So if $\alpha\times\bar\beta$ is extendable, then $\alpha$ is as well. So we conclude that $\alpha\times\bar\beta\in\mathrm{H}_{\acute{e}t}^{2i}(S^i,\mu_2)$ cannot be lifted to $\mu_{2^r}$. (Note that $r=2$ suffices here.) \end{proof}
\subsection{Variety of Lines} Let $X\subseteq\mathbb{P}^{n+1}$ be a smooth cubic hypersurface over $k=\bar k$ with $n$ odd. Let $F=F(X)\subseteq Gr(\mathbb{P}^{n+1},\mathbb{P}^1)$ be the variety of lines, which is known to be smooth of dimension $2n-4$, \cite[Corollary 1.4 + Corollary 1.12]{AK}. We also write $P:=\{(x,[\ell])\in X\times F\mid x\in\ell\}$ for the incidence subscheme of $X\times F$. Note that $P\to F$ is a (smooth) $\mathbb{P}^1$-bundle over $F$ and $P$ is of codimension $n-1$ in $X\times F$. The action by correspondence of $P$ is known as the cylinder homomorphism $\Phi:=[P]^\ast\colon \mathrm{H}_{\acute{e}t}^{3n-6}(F,\mathbb{Z}/\ell^r\mathbb{Z})\to \mathrm{H}_{\acute{e}t}^n(X,\mathbb{Z}/\ell^r\mathbb{Z})$. We can take its inverse limit over $r$ to obtain $\Phi\colon \mathrm{H}_{\acute{e}t}^{3n-6}(F,\mathbb{Z}_\ell)\to\mathrm{H}_{\acute{e}t}^n(X,\mathbb{Z}_\ell)$. The following is a direct generalization of \cite[Lemma 3.2]{Renjie} (which comes from \cite[Lemma 3.17]{Renjiethesis}). \begin{prop}\label{cylinder isom} The cylinder homomorphism $\Phi\colon\mathrm{H}_{\acute{e}t}^{3n-6}(F,\mathbb{Z}_\ell)\to\mathrm{H}_{\acute{e}t}^n(X,\mathbb{Z}_\ell)$ is an isomorphism. \end{prop} \begin{proof} Suppose first that the transcendence degree of $k$ over its prime field is less than the cardinality of $\mathbb{C}$.
Suppose $\mathrm{char}(k)=0$ so we can embed $k\subseteq \mathbb{C}$, then using \cite[Corollary VI.4.3]{Milne} we have natural isomorphisms induced by the pullback $\mathrm{H}_{\acute{e}t}^i(F,\mu_{\ell^r})\;\tilde{\rightarrow}\;\mathrm{H}_{\acute{e}t}^i(F_\mathbb{C},\mu_{\ell^r})$ and $\mathrm{H}_{\acute{e}t}^i(X,\mu_{\ell^r})\;\tilde{\rightarrow}\;\mathrm{H}_{\acute{e}t}^i(X_\mathbb{C},\mu_{\ell^r})$ for every $r$. So it suffices to show $\mathrm{H}_{\acute{e}t}^{3n-6}(F_\mathbb{C},\mu_{\ell^r})\to\mathrm{H}_{\acute{e}t}^n(X_\mathbb{C},\mu_{\ell^r})$ is an isomorphism for every $r$.
As the cohomology of $X$ and $F$ are torsion free, \cite[\textsection 3.1]{S} and \cite[Corollary 4.14]{Renjiethesis} respectively, we conclude by \cite{Shimada} that for $X$ and $F=F(X)$ over $\mathbb{C}$, the cylinder morphism $\mathrm{H}_{\acute{e}t}^{3n-6}(F,\mathbb{Z})\to\mathrm{H}_{\acute{e}t}^n(X,\mathbb{Z})$ is an isomorphism. Moreover, $\mathrm{H}_{\acute{e}t}^{3n-6}(F,\mathbb{Z}/m\mathbb{Z})\to\mathrm{H}_{\acute{e}t}^n(X,\mathbb{Z}/m\mathbb{Z})$ is an isomorphism for every $m$. So the comparison theorem above implies that $\mathrm{H}_{\acute{e}t}^{3n-6}(F_\mathbb{C},\mu_{\ell^r})\to\mathrm{H}_{\acute{e}t}^n(X_\mathbb{C},\mu_{\ell^r})$ is an isomorphism for every $r$.
Suppose $\mathrm{char}(k)=p$, then let $R=W_p(k)$ the Witt ring of mixed characteristic. As is done above \cite[Lemma 3.17]{Renjiethesis} we can lift this smooth projective hypersurface $X$ to $\mathcal X\to R$, with generic point $Q=\mathrm{Frac}(R)$ of characteristic 0 and special fibre $X$.
Now we can embed $Q\subseteq\mathbb{C}$. And thus have a geometric point $\bar Q\colon\mathrm{Spec}(\mathbb{C})\to\mathrm{Spec}(Q)\to\mathrm{Spec}(R)$. So by the smooth and proper base change \cite[Corollary 4.2]{Milne}, it suffices to show $\mathrm{H}_{\acute{e}t}^{3n-6}(F_{\bar Q},\mathbb{Z}_\ell)\to\mathrm{H}_{\acute{e}t}^n(X_{\bar Q},\mathbb{Z}_\ell)$ is an isomorphism. Which follows if we can show that $\mathrm{H}_{\acute{e}t}^{3n-6}(F_{\bar Q},\mu_{\ell^r})\to\mathrm{H}_{\acute{e}t}^n(X_{\bar Q},\mu_{\ell^r})$ is an isomorphism for every $r$. This we have seen above.
For a general field $k$ (without cardinality assumption), because we are working with varieties, we can always find a subfield $k_0\subseteq k$ and $X_0/k_0$ so that $X_0\times_{k_0}k=X$ and $\overline{k_0}$ is small as assumed in the beginning of the proof. Now we can do the argument above, working with $\overline{X_{0}}=X_0\times_{k_0}\bar{k_0}$ and using again \cite[Cor VI.4.3]{Milne} we see that $\mathrm{H}_{\acute{e}t}^i(\overline{X_{0}},\mu_{\ell^r})\cong\mathrm{H}_{\acute{e}t}^i(X,\mu_{\ell^r})$, giving the result for general fields $k$. \end{proof}
Now let $X$ be as in \Cref{general statement} and $F=F(X)$. \begin{cor}\label{var of lines failure} For every $i\in\mathbb{Z}_{\geq 1}$ we have $Z^{\frac{3n-5}{2}+i}(F\times S^i)[2]\neq 0$. \end{cor} \begin{proof} We know by \Cref{gen failure ITC} that $X\times S^i$ contains non-algebraic cohomology classes. Similar to \Cref{explicite class} we see that this class is of the form $\beta\times\alpha\in\mathrm{H}_{\acute{e}t}^n(X,\mathbb{Z}_2)\otimes\mathrm{H}_{\acute{e}t}^{2i+1}(S^i,\mathbb{Z}_2)$.
\Cref{cylinder isom} gives an algebraic correspondence isomorphism $\mathrm{H}_{\acute{e}t}^{3n-6}(F,\mathbb{Z}_2)\otimes\mathrm{H}_{\acute{e}t}^{2i+1}(S^i,\mathbb{Z}_2)\overset{\Phi\times\Delta}{\to}\mathrm{H}_{\acute{e}t}^n(X,\mathbb{Z}_2)\otimes\mathrm{H}_{\acute{e}t}^{2i+1}(S^i,\mathbb{Z}_2)$. As its image contains non-algebraic (torsion) classes by the above, then so must $\mathrm{H}_{\acute{e}t}^{3n-5+2i}(F\times S,\mathbb{Z}_2)$. \end{proof}
\section{Closing Remarks}\label{closing remarks}
\subsection{Very General Fibre} Given a family $\mathcal X\to S$ over a universal domain $\Omega$, then by \cite[Lemma 2.1]{Vial} we know that the geometric generic fibre is isomorphic to a very general fibre.
Now suppose $\mathcal X\to S$ is defined over any algebraically closed field $k$. Let $k\subseteq\Omega$ be any inclusion into a universal domain. Consider the base change $\mathcal X_\Omega\to S_\Omega$ of $X\to S$ to $\Omega$. By the above we know that there exists a very general fibre $s_\omega\in S_\Omega(\Omega)$ such that $X_{\overline{\Omega(S_\Omega)}}\cong X_{s_\omega}$. Say $s_\omega\mapsto s\in S(k)$, so $s_\omega\subseteq s$ can be seen as a field extension of separably closed fields. Then we know that the map $\mathrm{H}_{\acute{e}t}^\ast(X_s,\mathbb{Z}_\ell)\to\mathrm{H}_{\acute{e}t}^\ast(X_{s_\omega},\mathbb{Z}_\ell)$, the pullback along $X_{s_\omega}\to X_s$, is an isomorphism \cite[0DDG]{Stacks} or \cite[Corollary VI.4.3]{Milne}. The pullback map preserves algebraic cycles, so suppose we have found a non-algebraic class in $\mathrm{H}_{\acute{e}t}^\ast(X_{\overline{\Omega(S)}},\mathbb{Z}_\ell)\cong\mathrm{H}_{\acute{e}t}^\ast(X_{s_\omega},\mathbb{Z}_\ell)$ (eg. \Cref{failure ITC}), then there must also be a non-algebraic class in $\mathrm{H}_{\acute{e}t}^\ast(X_s,\mathbb{Z}_\ell)$, on a very general fibre of $\mathcal X\to S$.
\subsection{The Result by Colliot-Thélène} Here we give a direct generalization of the results of \cite{CT} on which the present paper is based. \begin{prop}[{cf. \cite[Proposition A7]{Gabber}}]\label{Gabber Refined}
Let $X\to S$ be a smooth morphism with $X$ a finite dimensional smooth irreducible variety over a field $k$ with qcqs fibres and $S$ a smooth curve. Let $\ell$ be a prime invertible in $k$ and $\alpha\in \mathrm{H}^i(X,\mu_{\ell^r})$. Then the set $\Phi:=\{s\in S\mid F_j(\alpha|_{X_{\bar s}})=0\in\mathrm{H}^i(F_j (X_{\bar s}),\mu_{\ell^r})\}$ is closed under specialization. \end{prop} \begin{proof}
Let $\eta$ be the generic point of $S$. We want to show if $\eta\in\Phi$, then $s\in\Phi$ for every closed point $s\in S$.
So suppose $F_j(\alpha|_{X_{\bar\eta}})=0$, using \Cref{refined-geometric-point} there exists a finite extension $\eta'\to\eta$ such that $F_j(\alpha|_{X_{\eta'}})=0$. So by \cite[0BXX]{Stacks} there exists a finite morphism $f\colon S'\to S$ where $\eta'$ is the generic point of a smooth curve $S'$. We can find $s'\in S'$ such that $f(s')=s$ and $\kappa(s)\to\kappa(s')$ is a finite extension. We let $R:=\mathcal O_{S',s'}$, which is a DVR as $s'\in S'$ is a smooth point.
The map $\mathrm{Spec}(R)\to S'$ hits precisely $\eta'$ and $s'$. Consider the base change $X_R:=X_{S'}\times_{S'}R$, which has fibres $X_{\eta'}$ and $X_{s'}$. Then using \Cref{global sp} and \Cref{Specialization Refined} we see that $F_j(\alpha|_{X_{s'}})=sp(F_j(\alpha|_{X_{\eta'}}))=0$. Using again \Cref{refined-geometric-point} we conclude that $F_j(\alpha|_{X_{\overline{s}}})=0$ and thus $s\in \Phi$ as wished. \end{proof}
\begin{theorem}[{cf. \cite[Théorème 1.1]{CT}}]\label{CT Refined} Let $\ell$ be a prime and suppose that the natural map $\mathrm{H}^i(X,\mu_{\ell})\to\mathrm{H}^i(F_jX,\mu_{\ell})$ is non-zero. Then there exists an elliptic curve $E$ such that the natural map $\mathrm{H}^{i+1}(X\times E,\mu_{\ell})\to\mathrm{H}^{i+1}(F_j(X\times E),\mu_{\ell})$ is non-zero. \end{theorem} \begin{proof}
As in the proof of \cite[Théorème 1.1]{CT} a result of Gabber \cite[Proposition A4]{Gabber} gives a class $\beta\in\mathrm{H}^1(Y,\mu_\ell)$, where $Y=\mathcal E\to U$ is a family with general fibres elliptic curves and special fibre $\mathbb G_m$ over a $P\in U(\mathbb{Q})$, such that $Res(\beta|_P)\neq 0\in \mathrm{H}^0(\{0\},\mu_\ell)$, where we view $\mathbb G_m=\mathbb{A}^1\setminus\{0\}$.
Let $\alpha\in\mathrm{H}^i(X,\mu_\ell)$ such that $F_j(\alpha)\neq0$. Then $\alpha\times\beta\in\mathrm{H}^{i+1}(X\times Y_\mathbb{C},\mu_\ell)$ and $Res(\alpha\times\beta|_P)=\pm\alpha\times Res(\beta|_P)=u\cdot\alpha\in\mathrm{H}^i(X,\mu_\ell)$, where $u\in\mu_\ell$ is non-zero. Thus $F_j(Res(\alpha\times\beta|_P))=u\cdot F_j(\alpha)$ is non-zero in $\mathrm{H}^i(F_jX,\mu_\ell)$ by assumption. By \Cref{Residue Refined}, we have that $Res(F_j(\alpha\times\beta|_P))\neq 0$ and thus in particular that $F_j(\alpha\times\beta|_P)\neq 0$.
By \Cref{Gabber Refined}, the set $\Phi=\{u\in U\mid F_j(\alpha\times\beta|_{\bar u})=0\}$ is closed under specialization. The above implies that $P\not\in\Phi$, so in particular the $\eta_U\not\in\Phi$. Because the geometric generic fibre can be identified with a very general fibre \cite[Lemma 2.1]{Vial}, we conclude that there exists a very general elliptic curve $E$ such that $\mathrm{H}^{i+1}(X\times E,\mu_{\ell^r})\to\mathrm{H}^{i+1}(F_j(X\times E),\mu_{\ell^r})$ is non-zero. \end{proof}
\begin{cor}[cf. \Cref{cor-gen-IHC}] Let $X/\mathbb{C}$ be a smooth variety and $\alpha\in\mathrm{H}^{2n}(X,\mu_\ell)$ such that $F_{n-1}(\alpha)\neq0\in\mathrm{H}^{2n}(F_{n-1}X,\mu_\ell)$ and $\alpha$ does not lift to $\mu_{\ell^r}$ for some $r$. Then there exists an elliptic curve $E$ such that the integral Hodge conjecture for $n+1$ cycles fails on $X\times E$. \end{cor} \begin{proof} We know that $Z^{n+1}(X\times E)[\ell]\cong\frac{\Hrnr{n-1}^{2n+1}(X\times E,\mu_\ell)}{\Hrnr{n-1}^{2n+1}(X\times E,\mathbb{Z})}$ and by \Cref{CT Refined} we have a non-zero class $F_{n-1}(\alpha\times\beta)\in\Hrnr{n-1}^{2n+1}(X\times E,\mu_\ell)$. By \Cref{refined-geometric-point} we know that $F_{n-1}(\alpha\times\beta)$ lifts to $\mathbb{Z}$ if and only if there exists a finite extenstion $\eta'\to\eta$ such that $F_{n-1}(\alpha\times\beta_{\eta'})$ lifts to $\mathbb{Z}$. Thus $F_{n-1}(\alpha\times\beta_{\eta'})$ lifts to $\mu_{\ell^r}$. This implies that $F_{n-1}(Res(sp(\alpha\times\beta_{\eta'})))$ lifts to $\mu_{\ell^r}$. By \Cref{liftable lemma} this implies that $Res(sp(\alpha\times\beta_{\eta'}))$ lifts to $\mu_{\ell^r}$. But $Res(sp(\alpha\times\beta_{\eta'}))=u\times\alpha$ with $u$ a unit hence this implies that $\alpha$ lifts to $\mu_{\ell^r}$, a contradiction.
We conclude that $F_{n-1}(\alpha\times\beta)$ is non-zero in the quotient $\frac{\Hrnr{n-1}^{2n+1}(X\times E,\mu_\ell)}{\Hrnr{n-1}^{2n+1}(X\times E,\mathbb{Z})}\cong Z^{n+1}(E\times X)[\ell]$. \end{proof}
\subsection{Refined Cospecialization}
In this section we show that the dual of the cospecialization map \Cref{def dual cosp} can be defined compatibly on the refinement degrees. With this, the application on the integral Hodge/Tate conjecture (eg. \Cref{failure ITC}) could have been concluded, without making use of the comparison between the specialization and the cospecialization map \Cref{comparison-Lefschetz} and just use the cohomology class $\beta$ found in the proof of \Cref{Existence Non-zero ResidueSp} with respect to $cosp^\vee$.
\begin{prop} Let $\mathcal X\to B$ be a smooth family over a curve and $b\in B$ a closed point. For every $j\geq0$ there is a map $\mathrm{H}_{\acute{e}t}^i(F_jX_{\bar\eta},\Lambda)\to\mathrm{H}_{\acute{e}t}^i(F_j X_b,\Lambda)$ compatible with $F_j\to F_{j-1}$ and equal to $cosp^\vee\colon\mathrm{H}_{\acute{e}t}^i(X_{\bar\eta},\Lambda)\to\mathrm{H}_{\acute{e}t}^i(X_b,\Lambda)$ for $j\gg0$. \end{prop} \begin{proof} As expected, the proof has a similar flavour as \cite[\textsection 4.2]{Sch2}.
Let $\alpha\in\mathrm{H}_{\acute{e}t}^i(F_jX_{\bar\eta},\Lambda)$, so there exists $F_jX_{\bar\eta}\subseteq U\subseteq X_{\bar\eta}$ with complement $Z$ so that $\mathrm{codim}_{X_{\bar\eta}}(Z)>j$ and $\alpha$ is represented by some $\alpha_U\in\mathrm{H}_{\acute{e}t}^i(U,\Lambda)$. There exists a finite extension $\eta'\to\eta$ and $Z'\subseteq X_{\eta'}$ such that $Z'_{\bar\eta}=Z$. Now if we write $U':=X_{\eta'}\setminus Z'$, then also $U'_{\bar\eta}=U$.
Write $Z_\eta$ for the image of $Z'$ under the finite map $X_{\eta'}\to X_\eta$ (give it the reduced structure, as $Z'$ is reduced, we have a map $Z'\to Z_\eta^{red}$ by the universal property). Then we see that $Z'\subseteq(Z_\eta)_{\eta'}$, so $\dim(Z')\leq\dim((Z_\eta)_{\eta'})=\dim(Z_\eta)$. But we also have $\dim(Z')\geq\dim(Z_\eta)$, so we conclude that $\dim(Z')=\dim(Z_\eta)$. We also know that $\dim(Z)=\dim(Z')$.
Let $\mathcal Z$ be the closure of $Z_\eta$ in $\mathcal X$. As we work over a smooth curve, the map $\mathcal Z\to B$ is flat (\cite[Corollary 4.3.10]{Liu}) and thus $\mathrm{codim}_{X_b}(Z_b)>j$ as well. Let $\mathcal U$ be the complement of $\mathcal Z$ in $\mathcal X$, giving a smooth family $\mathcal U\to\mathcal B$. This family then gives the specialization map $\mathrm{H}_{\acute{e}t}^i(\mathcal U_{\bar\eta},\Lambda)\overset{cosp^\vee}{\to}\mathrm{H}_{\acute{e}t}^i(\mathcal U_b,\Lambda)$. As $U_b:=\mathcal U_b$ is the complement of $Z_b$ in $X_b$, there is a natural map $\mathrm{H}_{\acute{e}t}^i(U_b,\Lambda)\to\mathrm{H}_{\acute{e}t}^i(F_jX_b,\Lambda)$. We also compute that $\mathcal U_{\bar\eta}=X_{\bar\eta}\setminus (Z_\eta)_{\bar\eta}\subseteq X_{\bar\eta}\setminus Z=U$, so we define the image of $\alpha$ to be the image of $\alpha_U$ under the composition $\mathrm{H}_{\acute{e}t}^i(U,\Lambda)\to\mathrm{H}_{\acute{e}t}^i(\mathcal U_{\bar\eta},\Lambda)\to\mathrm{H}_{\acute{e}t}^i(U_b,\Lambda)\to\mathrm{H}_{\acute{e}t}^i(F_jX_b,\Lambda)$.
We are left to check that this construction is compatible with restriction to opens. Suppose we have $F_jX_{\bar\eta}\subseteq U\subseteq W\subseteq X_{\bar\eta}$. Using the same construction as above, we now obtain an inclusion of families $\mathcal U\subseteq\mathcal W\to B$ and we have to show that the following diagram commutes \[ \begin{tikzcd} \mathrm{H}_c^\ast(U_b,\Lambda)\ar[r,"cosp"]\ar[d]&\mathrm{H}_c^\ast(\mathcal U_{\bar\eta},\Lambda)\ar[d]\\ \mathrm{H}_c^\ast(W_b,\Lambda)\ar[r,"cosp"]&\mathrm{H}_c^\ast(\mathcal W_{\bar\eta},\Lambda) \end{tikzcd}, \] where the vertical maps are the natural maps given by $j_!j^\ast\to \mathrm{id}$ for $j\colon U\hookrightarrow W$ the inclusion of an open. As the Poincaré dual of this map is the restriction to an open on cohomology, the commuativity indeed shows the claim after applying the restriction morphisms $\mathrm{H}_{\acute{e}t}^i(U,\Lambda)\to \mathrm{H}_{\acute{e}t}^i(\mathcal U_{\bar\eta},\Lambda)$ and $\mathrm{H}_{\acute{e}t}^i(W,\Lambda)\to \mathrm{H}_{\acute{e}t}^i(\mathcal W_{\bar\eta},\Lambda)$.
Let $j\colon\mathcal U\to \mathcal W$ be the inclusion and $f_{\mathcal U}$ and $f_{\mathcal W}$ be the maps to $B$ so that $f_{\mathcal W}\circ j=f_{\mathcal U}$. Then the vertical maps are given by taking the stalks at $b$ and $\bar\eta$ of the natural morphism $R(f_{\mathcal U})_!\Lambda=R(f_{\mathcal W})_!j_!j^\ast\Lambda\to R(f_{\mathcal W})_!\Lambda$. Using the functoriality of the cospecialization map \Cref{functorial-cosp}, we can apply it on this morphism, which precisely gives the commutativity of the diagram above. \end{proof}
\subsection{Relation with the Étale Fundamental Group} Here we note that we could have obtained \Cref{cohom-class-Enriques} without having to compute the full cohomology of an Enriques surfaces. After all, \cite[Table below Theorem 1.4.10]{Enr-Surf} gives that $\pi_1^{\acute{e} t}(S)\cong\mathbb{Z}/2\mathbb{Z}$. This implies that $\mathrm{H}_{\acute{e}t}^1(S,\mu_{2^r})\cong\Hom_{\mathbb{Z}/2^r\mathbb{Z}}(\pi^{\acute{e} t}_1(S),\mathbb{Z}/2^r\mathbb{Z})\cong\mathbb{Z}/2\mathbb{Z}$ and thus that $\mathrm{H}_{\acute{e}t}^3(S,\mu_{2^r})\cong\mathbb{Z}/2\mathbb{Z}$. Now the Bockstein long exact sequence corresponding to $0\to\mu_2\to\mu_4\to\mu_2\to0$ gives us \[ \mathrm{H}_{\acute{e}t}^2(S,\mu_4)\to\mathrm{H}_{\acute{e}t}^2(S,\mu_2)\twoheadrightarrow\mathrm{H}_{\acute{e}t}^3(S,\mu_2)\overset{0}{\to}\mathrm{H}_{\acute{e}t}^3(S,\mu_4)\;\tilde{\rightarrow}\;\mathrm{H}_{\acute{e}t}^3(S,\mu_2)\overset{0}{\to}\mathrm{H}_{\acute{e}t}^4(S,\mu_2)\hookrightarrow\mathrm{H}_{\acute{e}t}^4(S,\mu_4), \] implying that $\mathrm{H}_{\acute{e}t}^2(S,\mu_4)\to\mathrm{H}_{\acute{e}t}^2(S,\mu_2)$ cannot be surjective.
The above argument only uses that $\pi_1^{\acute{e} t}(S)$ has some torsion class. Combining this with \Cref{cor-gen-IHC}, we obtain the following. \begin{cor} Let $S$ be a surface over an algebraically closed field $k$ with $\mathrm{char}(k)\neq\ell$. If $\pi_1^{\acute{e} t}(S)[\ell]\neq 0$, then the integral Tate conjecture fails for $X\times S$, where $X$ is the geometric generic fibre of a Lefschetz pencil as in \Cref{general statement}.\qed \end{cor}
\end{document} |
\begin{document}
\title{Depth of segments and circles through points \ enclosing many points: a note hanks{
Partially supported by CAM grant S-0505/DPI/0235-02.
Part of this work was done while the author was visiting the Mathematical Sciences Research Institute.}
\begin{abstract} Neumann-Lara and Urrutia showed in 1985 that in any set of $n$ points in the plane in general position there is always a pair of points such that any circle through them contains at least $\tfrac{n-2}{60}$ points. In a series of papers, this result was subsequently improved till $\tfrac{n}{4.7}$, which is currently the best known lower bound. In this paper we propose a new approach to the problem that allows us, by using known results about $j$-facets of sets of points in $\mathbb{R}^3$, to give a simple proof of a somehow stronger result: there is always a pair of points such that any circle through them has, both inside and outside, at least $\tfrac{n}{4.7}$ points. \end{abstract}
\section{Introduction}
The problem that we address in this work was proposed by Neumann-Lara and Urrutia in \cite{nlu}, where the following result is shown: given a set $P$ of $n$ points in the plane in general position -- no three of them are collinear and no four of them are cocircular -- there is always a pair of points $p,q\in P$ such that every circle through $p$ and $q$ contains at least $\left\lceil\tfrac{n-2}{60}\right\rceil$ other points of $P$. In a series of papers \cite{hrw,bssu,h} this bound was slightly improved and, shortly afterwards, Edelsbrunner et al. \cite{ehss}, by using techniques related to the complexity of higher order Voronoi diagrams, showed a bound of $(\tfrac{1}{2}-\tfrac{1}{\sqrt{12}})n+O(1) \approx \tfrac{n}{4.7}$, which is the best currently known lower bound for the problem. Regarding the upper bound, in \cite{hrw} Hayward et al. constructed a set of $4m$ points such that for any two of them there are circles passing through them and containing less than $m$ points. Therefore, this example shows that $\lceil\tfrac{n}{4}\rceil-1$ is an upper bound for the problem. In the same paper, the authors study the problem for sets of points in convex position, and give a bound of $\lceil\tfrac{n}{3}\rceil-1$, which is also shown to be tight. Urrutia \cite{u} has conjectured that $\tfrac{n}{4}$ is, up to perhaps an additive constant, the tight bound for the general problem.
In this note we give an alternative proof of the result by Edelsbrunner et al., transforming the problem from circles in the plane to planes in the space. We introduce the concept of depth of a segment in a set of points $P\subset\mathbb{R}^3$ and, by using known results about the number of $j$-facets, we show that there is always a pair of points such that every circle through them has, both inside and outside, at least $\tfrac{n}{4.7}$ points. Furthermore, we propose a new conjecture about the maximal number of segments with depth $k$ that a set of points in convex position can have, which implies a stronger version of the original conjecture.
\section{Transforming the problem}
We use the well known transformation which maps the point $p=(p_x,p_y)\in\mathbb{R}^2$ to the point $\hat{p}=(p_x,p_y,p_x^2+p_y^2)\in\mathbb{R}^3$ in the paraboloid $z=x^2+y^2$. Among the useful properties of this transformation (see, for instance, \cite{e}) we will use the next one:
\begin{obs} Given three non collinear points $p,q,r\in\mathbb{R}^2$, a point $s$ is inside the circle through them if and only if point $\hat{s}$ is bellow the plane defined by $\hat{p},\hat{q},\hat{r}\in\mathbb{R}^3$. \end{obs}
Therefore, the original problem is transformed into this one: given a set of $n$ points in the paraboloid $z=x^2+y^2$, show that there exist a pair of points such that any plane passing through them leaves bellow at least $\lceil \tfrac{n}{4}\rceil - 1$ points. This motivates the following definition:
\begin{defin} Given a set of points $P\subset\mathbb{R}^3$ and two points $p,q\in P$, the {\em depth} of segment $pq$ is defined as the smallest integer $k$ such that any plane through $p$ and $q$ has on each side at least $k$ points of $P$. \end{defin}
We observe that segments with depth zero are the edges of the convex hull and we are interested in showing that any set of points has segments with ``high depth''.
We recall that, given points $p,q,r\in P$, the (oriented) triangle $pqr$ is a $j$-facet of $P$ if it has exactly $j$ points on the positive side of its affine hull. Therefore, if $pqr$ is a $j$-facet, its edges have depth at most~$j$. A subset $T\subset P$ is a $k$-set if it has $k$ points and the sets $T$ and $P\smallsetminus T$ can be separated by a plane. The number of $j$-facets of a set of points in $\mathbb{R}^d$ is related to the number of $(j\pm d)$-sets and obtaining tight bounds for these quantities, even for $d=2$, is a famous open problem. The number of $(\leq j)$-facets is much better understood. In order to state the result, we need some notation.
Let $e_j(P)$ be the number of $j$-facets of $P$ and let $E_j(P)=\sum_{i=0}^j e_i(P)$ be the number of $(\leq j)$-facets. In \cite{w} Welzl shows the following:
\begin{thm} Let $P\subset\mathbb{R}^3$ be a set of $n$ points in general position. Then, $$ E_j(P)\leq 2\Big[\binom{j+2}{2}\,n - 2\,\binom{j+3}{3} \Big] \qquad \text{if $\,\,0\leq 2j \leq n-4$.} $$ Furthermore, the bound is tight and is achieved if and only if the set $P$ is in convex position. \end{thm}
Because for a set of points in convex position $E_j(P)$ is known, the following result follows immediately:
\begin{cor} \label{cor:ej} Let $P\subset\mathbb{R}^3$ be a set of $n$ points in convex position. Then, $$ e_j(P)=E_j(P)-E_{j-1}(P)=2(j+1)n-2(j+1)(j+2) \qquad \text{if $\,\,0\leq 2j \leq n-4$.} $$ \end{cor}
Next we use this result to bound the number of segments with depth at most $j$ for a set of points in convex position. We denote by $s_j(P)$ the number of segments of $P$ with depth $j$ and by $S_j(P)=\sum_{i=0}^j s_i(P)$ the number of segments with depth at most $j$.
\begin{prop}\label{p:T_j} Let $P\subset\mathbb{R}^3$ be a set of $n$ points in convex position. Then, $$ S_j(P) \leq 3(j+1)n-3(j+1)(j+2) \qquad \text{if $\,\,0\leq 2j \leq n-4$.} $$ \end{prop}
\begin{proof}
Let $j$ be such that $0\leq 2j \leq n-4$. We claim that if $pq$ is a segment with depth at most $j$, then it is contained in at least two $j$-facets of $P$. In order to prove the claim, consider first the case when the depth is smaller than $j$ and let $\pi$ be an oriented plane passing through $p$ and $q$ and having less than $j$ points in the positive side (denoted $\pi^+$ in Figure~\ref{fig1}). Because in the negative side of $\pi$ there are more than $\lceil\tfrac{n}{2}\rceil$ points, if we rotate the plane around $pq$ in a direction we find, before having rotated 180º, a point $r$ such that the plane $\pi_1$ passing through $p$, $q$ and $r$ leaves on the positive side exactly $j$ points of $P$ and, therefore, $pqr$ (oriented conveniently) is a $j$-facet of $P$. In the same way, if we rotate plane $\pi$ in the opposite direction, we find another point $s$ and, thus, another $j$-facet containing segment $pq$. Finally, if the depth of $pq$ is $j$, we observe that the first point that we find when the plane rotates must be in the negative side of the plane and thus it defines a $j$-facet.
\begin{figure}
\caption{Illustration for the proof of Proposition~\ref{p:T_j}.}
\label{fig1}
\end{figure}
Because each $j$-facet has 3 edges, it follows that $2S_j(P)\leq 3e_j(P)$ and, from Corollary~\ref{cor:ej} we get $$ S_j(P) \leq \frac{3}{2}\, e_j(P) = 3(j+1)n-3(j+1)(j+2) \qquad \text{for $\,0\leq 2j \leq n-4$.} $$ \end{proof}
We are ready to show the main result of this paper.
\begin{thm} In a set $P\subset\mathbb{R}^3$ of $n$ points in convex position there exist segments with depth at least $$ \Bigl(\frac{1}{2}-\frac{1}{\sqrt{12}}\Bigr)\,n + O(1) \approx \frac{n}{4.7}. $$ \end{thm}
\begin{proof} Because $n$ determine $\binom{n}{2}$ segments, while $S_j(P)$ is smaller than $\binom{n}{2}$ there must be segments with depth bigger than $j$. Therefore, from Proposition~\ref{p:T_j} we get $$ 3(j+1)n-3(j+1)(j+2) = \binom{n}{2}, $$ whose smaller solution is $$ j=\frac{n-3}{2}-\Bigl( \frac{(n-2)^2-1}{12} \Bigr)^{1/2} = \Bigl(\frac{1}{2}-\frac{1}{\sqrt{12}}\Bigr)\,n + O(1). $$
\end{proof}
Finally, if we apply this result to the original problem of circles passing through pairs of points, we obtain immediately the following result:
\begin{cor} Let $P$ be a set of $n$ points in the plane in general position. There always exists a pair of points $p,q\in P$ such that every circle through $p$ and $q$ has, both inside and outside, at least $$ \Bigl(\frac{1}{2}-\frac{1}{\sqrt{12}}\Bigr)\,n + O(1) \approx \frac{n}{4.7} $$ points of $P$. \end{cor}
\section{A new conjecture}
We propose a new conjecture which has arisen during our study of this problem.
\begin{conj}\label{c2} Let $P\subset\mathbb{R}^3$ be a set of $n$ points in convex position and let $s_j(P)$ be the number of segments with depth $j$. Then, $$ s_j(P) \leq 3n-8j-6 \qquad \text{if $\,\,0\leq j \leq \lceil\tfrac{n}{4}\rceil - 1$.} $$ \end{conj}
Of course, the result is obvious (with equality) for $j=0$ and it is easy to give an almost tight bound for $j=1$:
\begin{prop} Let $P\subset\mathbb{R}^3$ be a set of $n$ points in convex position. Then, $$ s_1(P) \leq 3n-12. $$ \end{prop}
\begin{proof} A segment $uv$ has depth one if and only if it is not an edge of the convex hull of $P$, denoted by $\operatorname{conv} (P)$, but there exists a point $p\in P$ such that $uv$ is an edge of $\operatorname{conv}(P\smallsetminus\{p\})$. If we denote by $\delta(p)$ the number of vertices adjacent to $p$ in $\operatorname{conv} (P)$, the number of new edges in $\operatorname{conv} (P\smallsetminus\{p\})$ is exactly $\delta(p)-3$. Therefore, \begin{equation}\label{eq:s1} s_1(P) \leq \sum_{p\in P} (\delta(p)-3) = 3n-12. \end{equation} \end{proof}
\begin{remark} The inequality in (\ref{eq:s1}) is strict if there is a segment $uv$ with depth one and points $p$ and $q$ such that $uv$ is an edge both of $\operatorname{conv} (P\smallsetminus\{p\})$ and $\operatorname{conv} (P\smallsetminus\{q\})$. In this situation, we say that segment $uv$ is generated by two points. It is easy to see that a segment with depth one cannot be generated by more than two points. Therefore, Conjecture~\ref{c2} for $s_1(P)$ is equivalent to show that there are always at least two segments generated by two points.
\end{remark}
In the following we construct a set $P\subset\mathbb{R}^3$ such that $s_j(P) = 3n-8j-6$ for every $j=0,\ldots,\tfrac{n}{4} - 1$, thus showing that the bound in Conjecture~\ref{c2}
would be tight. Consider the arc of circle $C=\{(x,y,z)\in\mathbb{R}^3\,|\,x^2+z^2=1,y=0,x>0.99\}$ and rotate it $45º$ counterclockwise around the $x$ axis. Let $n=4m$, put points $C_p=\{p_1,\ldots,p_m\}$ in $C$ and perturb them slightly to achieve general position. Now construct points $C_q$ and $C_r$ by rotating $C_p$ around the $z$ axis, $120º$ and $240º$, respectively. Finally, consider the arc
$C'=\{(x,y,z)\in\mathbb{R}^3\,|\,x^2+z^2=1,y=0,z>0.99\}$ and put the rest of the points, $C_s=\{s_1,\ldots,s_m\}$, near $C'$ but slightly perturbed to achieve general position. The convex hull of $P=C_p\cup C_q \cup C_r\cup C_s$ is shown in Figure~\ref{fig2}.a (top view) and Figure~\ref{fig2}.b (bottom view).
\begin{figure}
\caption{Construction reaching $s_j(P) = 3n-8j-6$ for $j=0,\ldots,\tfrac{n}{4} - 1$.}
\label{fig2}
\end{figure}
The fact that $s_j(P)=4n-8j-6$ for $j=0,\ldots,\tfrac{n}{4}-1$ can be easily checked taking into account the following simple observations: \begin{itemize} \item[--] A segment $s$ has depth $j$ if it is in the convex hull of $P\smallsetminus T$ for some $j$-set $T$ and it is not in the convex hull of $P\smallsetminus S$ for any $k$-set $S$ with $k<j$.
\item[--] Given $T\subset P$ with $|T|<n/4$, the convex hull of $P'=P\smallsetminus T$ has ``the same structure'' as $\operatorname{conv} (P)$, i.e., consecutive points in each of the chains are adjacent, the first point in $C_s'$ is adjacent to all the points in $C_r'$ and $C_p'$, and so on. \end{itemize}
We conclude the note stating a direct implication of the previous conjecture. Because $$ \sum_{j=0}^{\lfloor\tfrac{n}{4}\rfloor -2} (3n-8j-6) \leq \binom{n}{2}-(n+2), $$ Conjecture~\ref{c2} would imply: \begin{conj}\label{c3} For every set of $n$ points in the plane in general position, there are always $n+2$ pairs of points such that any circle through them has, both inside and outside, at least $\lfloor\tfrac{n}{4}\rfloor - 1$ points. \end{conj}
\end{document}
Anexo:
$r_{j+1}$ con $s_{j+1},\ldots,s_m$ $\rightarrow m-j$.
$s_{j+1}$ con $r_{j+1},\ldots,r_m$ $\rightarrow m-j$ (contamos dos veces $s_{j+1}r_{j+1}$).
$s_{m-j}$ con $q_{j+1},\ldots,q_m$ $\rightarrow m-j$.
$q_{j+1}$ con $s_{j+1},\ldots,s_m$ $\rightarrow m-j$ (contamos dos veces $q_{j+1}s_{j+1}$).
$s_{j+1}$ con $p_1,\ldots,p_m$ $\rightarrow m$.
$r_{j+1}$ con $q_1,\ldots,q_m$ $\rightarrow m$.
$p_{j+1}$ con $r_1,\ldots,r_m$ $\rightarrow m$.
$q_{j+1}$ con $p_1,\ldots,p_m$ $\rightarrow m$.
Además, entre puntos de cada cadena,
$r_ir_{j+2},\ldots,r_{m-j-1}r_m$ $\rightarrow m-j-1$ (cuatro veces)
En total, $$ 4(m-j)-2+4m+4(m-j-1) = 12m-8j-6 = 3n-8j-6. $$
\end{document} |
\begin{document}
\titlerunning{Geometry of Spherical Harmonics}
\title{\textbf{Some Recent Developments on the Geometry of Random Spherical Eigenfunctions}}
\author{Domenico Marinucci}
\date{}
\maketitle
\address{Department of Mathematics, University of Rome Tor Vergata, \email{marinucc@mat.uniroma2.it}}
\begin{abstract}
A lot of efforts have been devoted in the last decade to the investigation of the high-frequency behaviour of geometric functionals for the excursion sets of random spherical harmonics, i.e., Gaussian eigenfunctions for the spherical Laplacian $\Delta_{\mathbf{S}^2}$. In this survey we shall review some of these results, with particular reference to the asymptotic behaviour of variances, phase transitions in the nodal case (the \emph{Berry's Cancellation Phenomenon}), the distribution of the fluctuations around the expected values, and the asymptotic correlation among different functionals. We shall also discuss some connections with the Gaussian Kinematic Formula, with Wiener-Chaos expansions and with recent developments in the derivation of Quantitative Central Limit Theorems (the so-called Stein-Malliavin approach).
\subjclass{60G60, 62M15, 53C65, 42C10, 33C55} \keywords{Random Eigenfunctions, Spherical Harmonics, Lipschitz-Killing Curvatures, Kinematic Formulae, Nodal Lines, Wiener-Ito Expansions} \end{abstract}
\section{Introduction}
Spherical eigenfunctions are defined as the solutions of the Helmholtz equation \begin{equation*} \Delta _{\mathbb{S}^{2}}f_{\ell }+\lambda _{\ell }f_{\ell }=0\text{ },\text{ }f_{\ell }:\mathbb{S}^{2}\rightarrow \mathbb{R}\text{ },\text{ }\ell =1,2,..., \end{equation*} where $\Delta _{\mathbb{S}^{2}}$ is the spherical Laplacian and $\left\{ -\lambda _{\ell }=-\ell (\ell +1)\right\} _{\ell =1,2,...}$ is the set of its eigenvalues. A random structure can be constructed easily by assuming that the eigenfunctions $\left\{ f_{\ell }(\cdot)\right\} $ follow a Gaussian isotropic random process on $\mathbb{S}^{2}.$ More precisely, for each $x\in \mathbb{S}^{2}$, we take $f_{\ell }(x)$ to be a Gaussian random variable defined on a suitable probability space $\left\{ \Omega ,\Im ,\mathbb{P}\right\} $; without loss of generality, we assume $\left\{ f_{\ell }(\cdot)\right\} $ to have mean zero, unit variance, and covariance function given by \begin{equation*} \mathbb{E}\left[ f_{\ell }(x)f_{\ell }(y)\right] =P_{\ell }(\left\langle x,y\right\rangle) \text{ , }x,y\in \mathbb{S}^{2},\text{ }P_{\ell }(t):= \frac{1}{2^{\ell }\ell !}\frac{d^{\ell }}{dt^{\ell }}(t^{2}-1)\text{ , } t \in \left[ -1,1 \right ] \text{ ,} \end{equation*} where $\left\{ P_{\ell }(.)\right\} $ denotes the family of Legendre polynomials: this is the only covariance structure to ensure that the random eigenfunctions are isotropic, that is, invariant in law with respect to the action of the group of rotations $SO(3)$. Random spherical eigenfunctions, also known as random spherical harmonics, arise in a huge number of applications, especially in connection with Mathematical Physics: in particular, their role in Quantum Chaos has drawn strong interest in the last two decades, starting from the seminal papers by \cite{Berry 1977}, \cite{Berry 2002}, \cite{Wig}, \cite{nazarov}; also, they represent the Fourier components of isotropic spherical random fields, whose analysis has an extremely important role in Cosmology (see e.g., \cite{MaPeCUP}). Of course, random spherical harmonics are just a special case of a much richer literature on random eigenfunctions on general manifolds; special interest has been drawn for instance by \emph{Arithmetic Random Waves, }i.e., random eigenfunctions on the torus $\mathbb{T}^{d},$ which were introduced by \cite {RW08} and then studied among others by \cite{KKW}, \cite{KW}, \cite {MPRW2015}, \cite{dalmao}, \cite{Rudnick}, \cite{RudnickYesha}, \cite{Cam19} , \cite{Maff}, \cite{Buckley}, see also \cite{CH}, \cite{Sarnak} and the references therein. Although some of the results that we shall discuss have related counterparts on the torus, on the higher-dimensional spheres, on more general compact manifolds and in the Euclidean case, we will stick mainly to $ \mathbb{S}^{2}$ for brevity and simplicity.
A lot of efforts have been spent in the last decade to characterize the geometry of the excursion sets of random spherical harmonics, which are defined as \begin{equation} A_{u}(f_{\ell };\mathbb{S}^{2}):=\left\{ x\in \mathbb{S}^{2}:f_{\ell }(x)\geq u\right\} \text{ , }u\in \mathbb{R}\text{ .} \label{Excursion} \end{equation} A classical tool for the investigation of these sets is given by the so-called Lipschitz-Killing Curvatures (or equivalently, by Minkowski functionals, see \cite{adlertaylor}), which in dimension 2 correspond to the Euler-Poincar\'{e} characteristic, (half of) the boundary length and the excursion area. A general expression for their expected values (covering much more general Gaussian fields than random eigenfunctions) is given by the \emph{Gaussian Kinematic Formula} (see \cite{Taylor},\cite{adlertaylor} ). Over the last decade, more refined characterizations for random spherical harmonics have been obtained, including neat analytic expressions (in the high energy limit $\lambda _{\ell }\rightarrow \infty $) for the fluctuations around their expected values and the correlation among these different functionals; much of the literature has been concerned with the \emph{nodal} case, corresponding to $u=0,$ to which we shall devote special attention. In this survey, we shall review some of these results and present some open issues for future research.
\section{The Gaussian Kinematic Formula for Lipschitz-Killing Curvatures on Excursions Sets}
\subsection{The Kac-Rice Formula and the Expectation Metatheorem}
The first modern attempt to investigate the geometry of random processes and fields can probably be traced back to the groundbreaking work by Kac (1943) and Rice (1945) (\cite{Kac}, \cite{Rice}) on the zeroes of stochastic processes. Their pioneering argument can be introduced as follows: let $ f(.,.):\Omega \times \mathbb{R}\rightarrow \mathbb{R}$ be a continuous stochastic process satisfying regularity conditions; our aim is to derive the expected cardinality of its zero set in some finite interval (say $[0,T]$), i.e. the mean of \begin{equation*} N_{0}([0,T]):=Card\left\{ t\in \mathbb{[}0,T\mathbb{]}:f(t)=0\right\} \text{ .} \end{equation*} Now assume that $\left\{ f(.)\right\} $ is $C^{1}$ with probability one, such that $f(0),f(T)\neq 0$ and \begin{equation*} \left\{ t:f(t)=0,\text{ }f^{\prime }(t)=0\right\} =\varnothing \text{ ;} \end{equation*} then the following result (\emph{Kac's counting Lemma}) can be established easily (see \cite{azaiswschebor}, p.69): \begin{equation*} N_{0}([0,T])=\lim_{\varepsilon \rightarrow 0}\int_{0}^{T}\frac{1}{
2\varepsilon }\mathbb{I}_{(-\varepsilon ,\varepsilon )}(f(t))|f^{\prime
}(t)|dt\text{ ,} \end{equation*} where as usual $\mathbb{I}_{A}$ denotes the indicator function of the set $A$. With further efforts and assuming all exchanges of integrals and limits can be justified, one obtains also \begin{equation} \mathbb{E}\left[ N_{0}([0,T])\right] =\int_{0}^{T}\mathbb{E}\left[ \left.
|f^{\prime }(t)|\right\vert f(t)=0\right] p_{f(t)}(0)dt\text{ ,} \label{KacRice} \end{equation} where $\mathbb{E}\left[ \left. .\right\vert .\right] $ denotes as usual conditional expected value and $p_{f}(.)$ the marginal density of $f(.),$ which is assumed to exist and admit enough regularity conditions (in the overwhelming majority of the literature and in this whole survey, $f(.)$ will indeed be assumed to be Gaussian); (\ref{KacRice}) is the simplest example of the \emph{Kac-Rice Formula.}
The basic idea behind the Kac-Rice approach has proved to be extremely fruitful, leading to an enormous amount of applications and generalizations. In particular, in the research monographs \cite{adlertaylor}, \cite {azaiswschebor}, (slightly different) versions of a general \emph{ Expectation Metatheorem} (in the terminology of \cite{adlertaylor}) are proved. More precisely, let us take $M$ to be a compact, $d-$dimensional oriented $C^{1}$ manifold with a $C^{1}$ Riemannian metric $g.$ Assume $ f:M\rightarrow \mathbb{R}^{d}$ and $h:M\rightarrow \mathbb{R}^{k}$ are vector-valued random fields which satisfy suitable regularity conditions (see \cite{adlertaylor}, \cite{azaiswschebor} for more details and \cite {Stecconi2021} for some very recent developments). Let $B\subset \mathbb{R} ^{k}$ be a subset with boundary dimension smaller or equal than $k-1;$ then define \begin{equation*} N_{u}(f,h,M,B)=\left\{ t\in M:f(t)=u,h(t)\in B\right\} \text{ , }u\in \mathbb{R}^{d}. \end{equation*} The following extension of the Kac-Rice formula holds:
\begin{thm} \label{Metatheorem} (\cite{adlertaylor}, \cite{azaiswschebor}) We have that \begin{equation*} \mathbb{E}\left[ N_{u}(f,h,M,B)\right] =\int_{M}\mathbb{E}\left[ \left. \left\vert \det \left\{ \nabla f(t)\right\} \right\vert \mathbb{I} _{B}(h(t))\right\vert f(t)=u\right] p_{f(t)}(u)\sigma _{g}(dt)\text{ ,} \end{equation*} where as before $\mathbb{I}_{B}(.)$ denotes the indicator function, $\nabla f(.)$ the (covariant) gradient of $f(.)$ and $\sigma _{g}(.)$ the volume form induced by the metric $g$. \end{thm}
\begin{rem} By taking $k=1$, $f:=\nabla h$ the gradient of $h$ (and hence $\nabla f=\nabla ^{2}h$ its Hessian) and $u=(0,...,0),$ Theorem \ref{Metatheorem} yields the expected number of critical points with values in $B$ for the scalar random field $h$. Simple modifications similarly yield the expected values for maxima, minima and saddle points. \end{rem}
The previous results have all been restricted to vector-valued random fields whose image space has co-dimension zero. However, the results can be similarly generalized to strictly positive co-dimensions. Indeed, under the same setting as before assume instead that $f:M\rightarrow \mathbb{R} ^{d^{\prime }}$ is such that $d^{\prime }<d;$ then $\nabla X$ is a $d\times d^{\prime }$ rectangular matrix, and the following generalization of the Expectation Metatheorem holds (see \cite{adlertaylor}, \cite{azaiswschebor})
\begin{thm} \label{Metatheorem2} (\cite{adlertaylor}, \cite{azaiswschebor}) It holds that \begin{equation*} \mathbb{E}\left[ \mathcal{H}_{u}(f,h,M,B)\right] \end{equation*} \[
=\int_{M}\mathbb{E}\left[ \left. \left\vert \det \left\{ (\nabla f(t))^{T}(\nabla f(t))\right\} \right\vert ^{1/2}\mathbb{I}_{B}(h)\right\vert f(t)=u\right] p_{f(t)}(u)\sigma _{g}(dt)\text{ ,} \] where $\mathcal{H}_{u}(f,h,M,B)$ denotes the $d-d^{\prime }$ dimensional Hausdorff measure of the set $\left\{ t\in M:f(t)=u\text{ and }h(t)\in B\right\} .$ \end{thm}
\begin{exa} Let $M=\mathbb{S}^{2}$ the standard unit-dimensional sphere in $\mathbb{R} ^{3},$ $f:\mathbb{S}^{2}\times \Omega \rightarrow \mathbb{R}$ a random field, and let \begin{equation*} Len(f):=\mathcal{H}_{0}(f,\mathbb{S}^{2},0)=meas\left\{ t\in \mathbb{S} ^{2}:f(t)=0\right\} \text{ ,} \end{equation*} i.e., the length of the nodal lines of $f(.).$ Then \begin{eqnarray*} \mathbb{E}\left[ Len(f)\right] &=&\int_{\mathbb{S}^{2}}\mathbb{E}\left[ \left. \left\vert \det \left\{ (\nabla f(t))^{T}(\nabla f(t))\right\} \right\vert ^{1/2}\right\vert f(t)=0\right] p_{f(t)}(0)\sigma (dt) \\ &=&\int_{\mathbb{S}^{2}}\mathbb{E}\left[ \left. \left\Vert \nabla f(t)\right\Vert \right\vert f(t)=0\right] p_{f(t)}(0)\sigma (dt)\text{ ,} \end{eqnarray*} where $\left\Vert .\right\Vert $ denotes Euclidean norm and $\sigma (.)$ the standard Lebesgue measure on the unit sphere. In particular, assuming that the law of $f(.)$ is isotropic (that is, invariant with respect to the action of the group of rotations $SO(3))$ we get \begin{equation*} \mathbb{E}\left[ Len(f)\right] =4\pi \times \mathbb{E}\left[ \left. \left\Vert \nabla f(t)\right\Vert \right\vert f(t)=0\right] p_{f(t)}(0)\text{ .} \end{equation*} \end{exa}
\subsection{Intrinsic Volumes and Lipschitz-Killing Curvatures}
In the sequel, as mentioned earlier we will restrict our attention only to Gaussian processes, which have driven the vast majority of research in this area. We need now to introduce the Gaussian Kinematic Formula (see \cite {Taylor} and \cite{adlertaylor}); to this aim, let us first recall the notion of \emph{Lipschitz-Killing Curvatures}. In the simplest setting of convex subsets of the Euclidean space $\mathbb{R}^{d},$ Lipschitz-Killing Curvatures (also known as intrinsic volumes) can be defined implicitly by means of \emph{Steiner's Tube Formula}; to recall the latter, for any convex set $d$-dimensional set $A\subset \mathbb{R}^{d}$ define the Tube of radius $ \rho $ around $A$ as \begin{equation*} Tube(A,\rho ):=\left\{ x\in \mathbb{R}^{d}:d(x,A)\leq \rho \right\} \text{ , }d(x,A)=\inf_{y\in A}d(x,y)\text{ , } \end{equation*} where $d(.,.)$ is the standard Euclidean distance. Then the following expansion holds: \begin{equation*} \mu _{d}\left\{ Tube(A,\rho )\right\} =\sum_{j=0}^{d}\omega _{d-j}\rho ^{d-j} \mathcal{L}_{j}(A)\text{ ,} \end{equation*} where $\mathcal{L}_{j}(A)$ denotes the $j$-th Lipschitz-Killing Curvatures, $ \mu _{d}(.)$ denotes the $d$-dimensional Lebesgue measure and $\omega _{j}:= \frac{\pi ^{j/2}}{\Gamma (\frac{j}{2}+1)}$ is the volume of the $j$ -dimensional unit ball ($\omega _{0}=1,$ $\omega _{1}=2,$ $\omega _{2}=\pi ,$ $\omega _{3}=\frac{4}{3}\pi ).$
Lipschitz-Killing Curvatures can be shown to be additive and to scale with dimensionality, in the sense that \begin{equation*} \mathcal{L}_{j}(\lambda A)=\lambda ^{j}\mathcal{L}_{j}(A)\text{ for all } \lambda >0\text{ ,} \end{equation*} and \begin{equation*} \mathcal{L}_{j}(A_{1}\cup A_{2})=\mathcal{L}_{j}(A_{1})+\mathcal{L} _{j}(A_{2})-\mathcal{L}_{j}(A_{1}\cap A_{2})\text{ .} \end{equation*} For $j=d,$ it is immediately seen that $\mathcal{L}_{d}(A)$ is just the Hausdorff measure of $A,$ whereas for $j=0$ we obtain $\mathcal{L} _{0}(A)=\varphi (A),$ the (integer-valued) Euler-Poincar\'e characteristic of $A.$ A more general definition of $\mathcal{L}_{j}(.)$ can be given for basic complexes (i.e., disjoint union of complex sets), for which the following characterization (due to Hadwiger, see \cite{adlertaylor}) holds: \begin{equation} \mathcal{L}_{j}(A)=\frac{\omega _{d}}{\omega _{d-j}\omega _{j}}\binom{d}{j} \int_{\mathcal{G}_{d}}\varphi (A\cap gE_{d-j})\mu (dg) \label{Hadwiger} \end{equation} where $\mathcal{G}_{d}=\mathbb{R}^{d}\times O(n)$ is the group of rigid motions, $ E_{d-j}$ is any $d-j$ dimensional affine subspace and the volume form $\mu (dg)$ is normalized so that \begin{equation*} \text{for all }x\in \mathbb{R}^{d},\text{ }A\subset \mathbb{R}^{d}\text{ , } \mu \left\{ g:gx\in A\right\} =\mathcal{H}(A)\text{ ,} \end{equation*} where as before $\mathcal{H}(.)$ denotes the Hausdorff measure. For instance, for $A=\mathbb{S}^{2}$ it is well-known and easy to check that ( \ref{Hadwiger}) gives \begin{equation*} \mathcal{L}_{0}(\mathbb{S}^{2})=2\text{ , }\mathcal{L}_{1}(\mathbb{S}^{2})=0 \text{ , }\mathcal{L}_{2}(\mathbb{S}^{2})=4\pi \text{ ,} \end{equation*} which represent, respectively, the Euler-Poincar\'e characteristic, (half) the boundary length and the area of the 2-dimensional unit sphere.
\subsection{The Gaussian Kinematic Formula}
From now on, we shall restrict our attention to Gaussian processes $ f:M\rightarrow \mathbb{R}$, which we shall take to be zero-mean and isotropic, meaning as usual that $\mathbb{E}\left[ f(t)\right] =0$ and $f(gt) \overset{d}{=}f(t)$ for all $t\in M\subset \mathbb{R}^{d}$ and $g\in SO(d);$ more explicitly, the law of the field $f(\cdot)$ will always be taken to be invariant to rotations. In order to present the Gaussian Kinematic Formula, let us first introduce a Riemannian structure governed by the covariance function of the field $\left\{ f(.)\right\} $; more precisely, consider the metric induced on the tangent plan $T_{t}M$ by the following inner product ( \cite{adlertaylor}, p.305): \begin{equation*} g^{f}(X_{t},Y_{t}):=\mathbb{E}\left[ X_{t}f\cdot Y_{t}f\right] \text{ , } X_{t},Y_{t}\in T_{t}M\text{ . } \end{equation*} This metric takes a particular simple form in case the field $f(.)$ is isotropic; in these circumstances, $g^{f}(.,.)$ is simply the standard Euclidean metric, rescaled by a factor that corresponds to the square root of (minus) the derivative of the covariance density at the origin.
\begin{exa} Consider the random spherical eigenfunction satisfying \begin{equation*} \Delta f_{\ell }=-\lambda_{\ell}f_{\ell }\text{ , }f_{\ell }:\mathbb{S} ^{2}\rightarrow \mathbb{R}\text{ , }\ell=0,1,2,..., \end{equation*} with \begin{equation*} \mathbb{E}\left[ f_{\ell }(x)\right] =0\text{ , }\mathbb{E}\left[ f_{\ell }(x_{1})f_{\ell }(x_{2})\right] =P_{\ell }\left( \left\langle x_{1},x_{2}\right\rangle \right) \text{ , }P_{\ell }^{\prime }(1)=-\frac{ \ell (\ell +1)}{2}\text{ .} \end{equation*} Then the induced inner product is simply \begin{equation*} g^{f_{\ell }}(X,Y)=\sqrt{\frac{\ell (\ell +1)}{2}}\left\langle X,Y\right\rangle _{\mathbb{R}^{3}}\text{ ;} \end{equation*} this change of metric can of course be realized by transforming $\mathbb{S} ^{2}$ into $\mathbb{S}_{\sqrt{\lambda _{\ell }/2}}^{2}:=\sqrt{\lambda _{\ell }/2}\mathbb{S}^{2}.$ \end{exa}
Let us now write $\mathcal{L}_{j}^{f}(A)$ for the $j$-th Lipschitz-Killing Curvatures of the set $A$ under the metric induced by the zero-mean Gaussian field $f;$ for instance, in the case of spherical random eigenfunctions we get immediately \begin{equation*} \mathcal{L}_{0}^{f_{\ell }}(\mathbb{S}^{2})=\mathcal{L}_{0}(\mathbb{S}_{ \sqrt{\lambda _{\ell }/2}}^{2})=2\text{ , }\mathcal{L}_{1}^{f_{\ell }}( \mathbb{S}^{2})=0\text{ , }\mathcal{L}_{2}^{f_{\ell }}(\mathbb{S}^{2})=4\pi \frac{\lambda _{\ell }}{2}\text{ .} \end{equation*} For further notation, as in \cite{adlertaylor} we shall write \begin{eqnarray*} \rho _{j}(u) &:&=\frac{1}{(2\pi )^{1/2+j/2}}\exp (-u^{2}/2)H_{j-1}(u)\text{ , } j\geq 1 \\ \rho _{0}(u) &:&=1-\Phi (u)=\int_{u}^{\infty }\varphi(t)dt\text{ ,} \end{eqnarray*} where as usual $\varphi(t)=(2\pi)^{-1/2}exp(-t^2/2)$ denotes the standard Gaussian density and we introduced the Hermite polynomials \begin{equation} H_{k}(u):=(-1)^{k}\exp (\frac{u^{2}}{2})\frac{d^{k}}{du^{k}}\exp (-\frac{ u^{2}}{2})\text{ , }k=0,1,2,...,u\in \mathbb{R}\text{ ;} \label{Hermite} \end{equation} for instance $H_{0}(u)=1,$ $H_{1}(u)=u,$ $H_{2}(u)=u^{2}-1,...$ Finally, we shall introduce the \emph{flag coefficients} \begin{equation} \left [ \begin{matrix} d \\ k \end{matrix} \right] :=\binom{d}{k}\frac{\omega _{d}}{\omega _{k}\omega _{d-k}} \text{ , }k=0,1,...,d\text{ .} \label{flag} \end{equation}
We are now in the position to state the following:
\begin{thm} (Gaussian Kinematic Formula (\cite{Taylor}, \cite{adlertaylor}, Theorem 13.4.1)) Under Regularity Conditions, for all $j=0,1,...,n$ we have that \begin{equation} \mathbb{E}\left[ \mathcal{L}_{j}^{f}(A_{u}(f;M))\right] =\sum_{k=0}^{d-j} \left [ \begin{matrix} k+j \\ k \end{matrix} \right] \rho _{k}(u)\mathcal{L}_{k+j}^{f}(M)\text{ .} \label{GKF} \end{equation} \end{thm}
Before we proceed with some examples, it is worth discussing formula (\ref{GKF}). We are evaluating the expected value of a complex geometric functional on a complicated excursion set, in very general circumstances (under minimal regularity conditions on the field and on the manifold on which it is defined). It is clear that the expected value should depend on the manifold, on the threshold level, and on the field one considers, and one may expect these three factors to be intertwined in a complicated manner. On the contrary, formula (\ref{GKF}) shows that their role is completely decoupled; more precisely
\begin{itemize} \item the threshold $u$ enters the formula merely through the functions $ \rho _{j}(u)$ which are very simple and fully universal (i.e., they do not depend neither on the field nor on the manifold);
\item on the left-hand side Lipschitz-Killing Curvatures appear, but they are computed on the original manifold, not on the excursion sets, and they are therefore again extremely simple to compute;
\item the role of the field $f$ is confined to the new metric $g^{f}(.,.)$ that it induces and under which the Lipschitz-Killing Curvatures are computed on both sides; under the (standard) assumption of isotropy, this implies only a rescaling of the manifold by means of a factor depending only on the derivative of the covariance function at the origin. \end{itemize}
\begin{exa} Let us consider a zero-mean isotropic Gaussian field $f$ defined on $\mathbb{ S}^{d}$ (the unit sphere in $\mathbb{R}^{d+1});$ its covariance function can be written as \begin{equation*} \mathbb{E}\left[ f(x_{1})f(x_{2})\right] =\sum_{\ell =0}^{\infty }\frac{ n_{\ell ,d}}{s_{d+1}}C_{\ell }G_{\ell;\frac{d}{2}}(\left\langle x_{1},x_{2}\right\rangle )\text{ ,} \end{equation*} where $s_{d+1}=(d+1)\omega_{d+1}$ is the surface measure of $\mathbb{ S}^{d}$, $G_{\ell;\alpha}(.)$ denotes the normalized Gegenbauer polynomials of order $ \alpha$, whereas \begin{equation*} n_{\ell ,d}=\frac{2\ell +d-1}{\ell }\binom{\ell +d-2}{\ell -1}\sim \frac{2}{ (d-1)!}\ell ^{d-1},\text{ as }\ell \rightarrow \infty , \end{equation*} is the dimension of the eigenspace corresponding to the $\ell $-th eigenvalue $\lambda _{\ell ;d}:=\ell (\ell +d-1);$ here $\left\{ C_{\ell }\right\} $ is a sequence of non-negative weights which represent the so-called angular power spectrum of the random field. The derivative of the covariance function at the origin is \begin{equation*} \mu :=\sum_{\ell =0}^{\infty }\frac{n_{\ell ,d}}{s_{d+1}}C_{\ell }\frac{ \lambda _{\ell ;d}}{d}\text{ .} \end{equation*} Recall the Lipschitz-Killing Curvatures of the manifold $\mathbb{S}_{\lambda }^{d}:=\lambda \mathbb{S}^{d}$ are given by (\cite{adlertaylor}, page 179): \begin{equation*} \mathcal{L}_{j}(\lambda \mathbb{S}^{d})=2\binom{d}{j}\frac{s_{d+1}}{s_{d+1-j}} \lambda ^{j}\text{ , } \end{equation*} for $d-j$ even, and $0$ otherwise. Then the Gaussian Kinematic Formula reads \begin{eqnarray*} \mathbb{E}\left[ \mathcal{L}_{j}^{f}(A_{u}(f;\mathbb{S}^{d}))\right] &=&\sum_{k=0}^{d-j}\rho _{k}(u) \left [ \begin{matrix} k+j \\ k \end{matrix} \right] \mathcal{L}_{k+j}(\sqrt{\mu}\mathbb{S}^{d}) \\ &=&\sum_{k=0}^{d-j}\rho _{k}(u) \left [ \begin{matrix} k+j \\ k \end{matrix} \right] \mathcal{L}_{k+j}(\mathbb{S }^{d})\mu^{(k+j)/2}. \end{eqnarray*} \end{exa}
\begin{exa} As a special case of the previous example, assume $f=f_{\ell }$ is actually a unit variance random eigenfunction on $\mathbb{S}^{2}$ corresponding to the eigenvalue $-\ell(\ell+1)$, $\ell=0,1,2,...$. Then the Gaussian Kinematic Formula gives \begin{eqnarray*} \mathbb{E}\left[ \mathcal{L}_{0}^{f_{\ell }}(A_{u}(f_{\ell };\mathbb{S}^{2})) \right] &=&\mathbb{E}\left[ \mathcal{L}_{0}(A_{u}(f_{\ell };\mathbb{S}^{2})) \right] \\ &=&2\left\{ 1-\Phi (u)\right\} +\frac{1}{2\pi }u\phi (u)\mathcal{(}4\mathcal{ \pi )}\frac{\ell (\ell +1)}{2}\text{ ,} \end{eqnarray*} \begin{equation*} \mathbb{E}\left[ \mathcal{L}_{1}^{f_{\ell }}(A_{u}(f_{\ell };\mathbb{S}^{2})) \right] =\rho _{1}(u) \left [ \begin{matrix} 2 \\ 1 \end{matrix} \right] \mathcal{L}_{2}(\mathbb{S}^{2})\left\{ \frac{ \ell (\ell +1)}{2}\right\} \end{equation*} so that \begin{equation*} \mathbb{E}\left[ \mathcal{L}_{1}(A_{u}(f_{\ell };\mathbb{S}^{2}))\right] = \pi \exp (-\frac{u^{2}}{2})\left\{ \frac{\ell (\ell +1)}{2}\right\} ^{1/2}, \end{equation*} and finally \begin{equation*} \mathbb{E}\left[ \mathcal{L}_{2}(A_{u}(f_{\ell };\mathbb{S}^{2}))\right] =\left\{ 1-\Phi (u)\right\} \mathcal{L}_{2}(\mathbb{S}^{2})=\left\{ 1-\Phi (u)\right\} 4\pi \text{ }. \end{equation*} \end{exa}
\begin{exa} In the special case of the nodal volume $\mathcal{L}_{d-1}(A_{0}(\mathbb{S} ^{d}),f_{\ell })$ of random eigenfunctions, i.e., half the Hausdorff measure of the zero-set of the eigenfunction, the Gaussian Kinematic Formula gives \begin{eqnarray*} \mathbb{E}\left[ \mathcal{L}_{d-1}^{f}(A_{u}(f_{\ell };\mathbb{S}^{d})) \right] &=&(\frac{\lambda _{\ell }}{d})^{(d-1)/2}\mathbb{E}\left[ \mathcal{L} _{d-1}(A_{u}(f_{\ell };\mathbb{S}^{d}))\right] \\ &=&\rho _{1}(u)\frac{d\omega _{d}}{\omega _{1}\omega _{d-1}}\mathcal{L}_{d}( \mathbb{S}^{d})(\frac{\lambda _{\ell }}{d})^{d/2} \end{eqnarray*} so that, recalling $\omega _{j}=\frac{\pi ^{j/2}}{\Gamma (\frac{j}{2}+1)}$ and $\mathcal{L}_{d}(\mathbb{S}^{d})=(d+1)\omega _{d+1}$ \begin{eqnarray} \mathbb{E}\left[ \mathcal{L}_{d-1}(A_{u}(f_{\ell };\mathbb{S}^{d}))\right] &=&\frac{1}{2\pi }\exp (-\frac{u^{2}}{2})\frac{d\omega _{d}}{\omega _{1}\omega _{d-1}}\mathcal{L}_{d}(\mathbb{S}^{d})(\frac{\lambda _{\ell }}{d} )^{1/2} \notag \\ &=&\exp (-\frac{u^{2}}{2})\frac{\pi ^{d/2}}{\Gamma (\frac{d}{2})}(\frac{ \lambda _{\ell }}{d})^{1/2}. \label{ExpNodal} \end{eqnarray} \end{exa}
For $u=0$ (\ref{ExpNodal}) was derived for instance by \cite{Berard} (see \cite{Wig}) and it is consistent with a celebrated conjecture by \cite{Yau}, which states that for $C^{\infty }$ manifolds the nodal volume of any eigenfunction corresponding to the eigenvalue $E$ should belong to the interval $[c_{1}\sqrt{E},c_{2}\sqrt{E}]$ for some constants $0<c_{1}\leq c_{2}<\infty$. The conjecture was settled for real analytic manifolds by \cite {DonnellyFefferman}; for smooth manifolds the lower bound was established much more recently, see \cite{Logunov}, \cite{Logunov2}, \cite{Logunov3} while the upper bound is addressed in \cite{Logunov4}. As a consequence of the results in the next two Sections below in the case of the sphere in a probabilistic sense the upper and lower constants can be taken nearly coincident, in the limit of diverging eigenvalues.
\section{Wiener-Chaos Expansions, Variances and Correlations}
In view of the results detailed in the previous Section, the question related to the expectation of intrinsic volumes in the case of Gaussian fields can be considered completely settled. The next step of interest is the computation of the corresponding variances, and the asymptotic laws of fluctuations around the expected values, in the high-frequency regime. The first rigorous results in this area can be traced back to a seminal paper by Igor Wigman (\cite{Wig}) where the variance of the nodal length (i.e., $ Len(f_{\ell },\mathbb{S}^{2}):=2\mathcal{L}_{1}(A_{0}(f_{\ell},\mathbb{S}^{2}))$ for random spherical harmonics in dimension 2 is computed and shown to be asymptotic to \begin{equation} Var\left[ Len(f_{\ell },\mathbb{S}^{2})\right] =\frac{\log \ell }{32} +O_{\ell \rightarrow \infty }(1)\text{ }. \label{Wig10} \end{equation} We shall start instead from the derivation of variances and central limit theorems for Lipschitz-Killing Curvatures of excursion sets at $u\neq 0,$ although these results were actually obtained more recently than (\ref{Wig10}).
Let us recall first the notion of Wiener chaos expansions. In the simplest setting, consider $Y=G(Z)$ i.e., the transform of a zero mean, unit variance Gaussian random variable $Z,$ such that $\mathbb{E}\left[ G(Z)^{2}\right] <\infty ;$ it is well-known that the following expansion holds, in the $ L^{2}(\Omega )$ sense: \begin{equation} G(Z)=\sum_{q=0}^{\infty }\frac{J_{q}(G)}{q!}H_{q}(Z)\text{ ,} \label{Chaos_exp} \end{equation} where $\left\{ H_{q}(.)\right\} _{q=0,1,2,...}$ denotes the family of Hermite polynomials that we introduced earlier in (\ref{Hermite}), and $ J_{q}(G)$ are projection coefficients given by $J_{q}(G):=\mathbb{E}\left[ G(Z)H_{q}(Z)\right] $ (see i.e., \cite{Jansson}, \cite{noupebook})$.$ The summands in (\ref{Chaos_exp}) are orthogonal, because when evaluated on pairs of standard Gaussian variables $Z_{1},Z_{2},$ Hermite polynomials enjoy a very simple formula for the computation of covariances: \begin{equation} \mathbb{E}\left[ H_{q_{1}}(Z_{1})H_{q_{2}}(Z_{2})\right] =\delta _{q_{1}}^{q_{2}}q_{1}!\left\{ \mathbb{E}\left[ Z_{1}Z_{2}\right] \right\} ^{q_{1}}, \label{Wick} \end{equation} where $\delta _{q_{1}}^{q_{2}}$ denotes the Kronecker delta. Equation (\ref{Wick}) is just a special case of the celebrated \emph{Diagram (or Wick's) Formula}, see \cite{noupebook} for much more discussion and details. We thus have immediately \begin{equation*} Var\left\{ G(Z)\right\} =\sum_{q=0}^{\infty }\frac{J_{q}^{2}(G)}{q!}\text{ .} \end{equation*} More generally, let $\left\{ Z_{1},...,Z_{j},...\right\} $ be any array of independent standard Gaussian variables, and consider elements of the form \begin{equation*} H_{q_{1}}(Z_{1})\cdot ...\cdot H_{q_{p}}(Z_{p})\text{ , }q_{1}+...+q_{p}=q \text{ ;} \end{equation*} the linear span (in the $L^{2}(\Omega )$ sense) of these random variables is usually written $\mathcal{C}_{q}$ (denoted the $q$-th order Wiener chaos, see again \cite{noupebook}) and we have the orthogonal decomposition \begin{equation*} L^{2}(\Omega )=\bigoplus\limits_{q=0}^{\infty }\mathcal{C}_{q}\text{ .} \end{equation*}
\subsection{Wiener-Chaos Expansions for Random Eigenfunctions}
Let us now explain how these techniques can be pivotal for the investigation of fluctuations of geometric functionals. We start from the simplest case, the excursion volume/area for the two-dimensional sphere, which we can write as \begin{equation*} \mathcal{L}_{2}(A_{u}(f_{\ell };\mathbb{S}^{2}))=\int_{\mathbb{S}^{2}}\mathbb{I}_{[u,\infty )}(f_{\ell }(x))dx\text{ ,} \end{equation*} $\mathbb{I}_{[u,\infty )}(.)$ denoting the indicator function of the semi-interval $[u,\infty ).$ It is not difficult to show that \begin{eqnarray*} J_{q}(\mathbb{I}_{[u,\infty )}(.)) &=&\mathbb{E}\left[ \mathbb{I}_{[u,\infty )}(Z)H_{q}(Z)\right] \\ &=&\int_{u}^{\infty }H_{q}(z)\phi (z)dz=(-1)^{q}H_{q-1}(u)\phi (u)\text{ ,} \end{eqnarray*} the last result following by integration by parts, under the convention that \begin{equation*} (-1)H_{-1}(u)\phi (u):=1-\Phi (u)\text{ .} \end{equation*} In view of (\ref{Chaos_exp}), we thus have (\cite{DI,MW}) \begin{eqnarray*} \mathcal{L}_{2}(A_{u}(f_{\ell };\mathbb{S}^{2})) &=&\int_{\mathbb{S}^{2}}\sum_{q=0}^{\infty }(-1)^{q}H_{q-1}(u)\phi (u)\frac{H_{q}(f_{\ell }(x))}{q!}dx \\ &=&\sum_{q=0}^{\infty }\frac{(-1)^{q}}{q!}H_{q-1}(u)\phi (u)h_{\ell ;q}\text{ , where }h_{\ell ;q}=\int_{\mathbb{S}^{2}}H_{q}(f_{\ell }(x))dx\text{ ;} \end{eqnarray*} as a consequence, we have also \begin{equation} Var\left\{ \mathcal{L}_{2}(A_{u}(f_{\ell };\mathbb{S}^{2}))\right\} =\sum_{q=0}^{\infty }\frac{1}{ (q!)^{2}}H_{q-1}^{2}(u)\phi ^{2}(u)Var\left\{ h_{\ell ;q}\right\} \text{ .} \label{VarDefect} \end{equation}
The crucial observation to be drawn at this stage is that the variances of the components $\left\{ h_{\ell ;q}\right\} $ exhibit a form of phase transition with respect to their order $q$, in the high-frequency/high energy limit $\ell \rightarrow \infty $. In particular, a simple application of the Diagram Formula (\ref{Wick}), isotropy and a change of variable yield \begin{eqnarray*} Var\left\{ h_{\ell ;q}\right\} &=&\int_{\mathbb{S}^{2}\times \mathbb{S}^{2}} \mathbb{E}\left\{ H_{q}(f_{\ell }(x))H_{q}(f_{\ell }(y))\right\} dxdy \\ &=&8\pi ^{2}q!\int_{0}^{\pi }\left\{ P_{\ell }(\cos \theta )\right\} ^{q}\sin \theta d\theta \text{ ;} \end{eqnarray*} for instance, for $q=2$ we obtain exactly \begin{equation*} Var\left\{ h_{\ell ;q}\right\} =2\times 8\pi ^{2}\int_{0}^{\pi }P_{\ell }^{2}(\cos \theta )\sin \theta d\theta =16\pi ^{2}\frac{2}{2\ell +1}\text{ .} \end{equation*} Given two sequences of positive numbers ${a_n,b_n}$, we shall write $a_n \approx b_n$ when we have that $a_n/b_n \rightarrow c$ as $n \rightarrow \infty$, $c>0$. By means of the so-called Hilb's asymptotics (\cite{Szego}, \cite{Wig}) it is possible to show that, as $\ell \rightarrow \infty $ (\cite {MW2014}) \begin{eqnarray*} Var\left\{ h_{\ell ;q}\right\} &\approx &\frac{1}{\ell ^{2}}\times \int_{0}^{\ell \pi }\frac{1}{\psi ^{q/2}}\psi d\psi \\ &\approx &\left\{ \begin{array}{c} \ell ^{-1}\text{ for }q=2 \\ \ell ^{-2}\log \ell\text{ for }q=4 \\ \ell ^{-2}\text{ for }q=3,5,... \end{array} \right. . \end{eqnarray*}
Note that $h_{\ell ;1}\equiv 0$ for all $\ell =1,2,...,$ whereas the term for $q=3$ requires an ad-hoc argument given in \cite{M2008, MW}. As a consequence, the dominant terms in the variance expansion correspond to $q=2$ when $H_{1}(u)$ is non-zero, i.e., for $u\neq 0;$ for $u=0$ the even-order chaoses vanish and all the remaining terms contribute by the same order of magnitude with respect to $\ell $. In conclusion, we have that \begin{equation} \mathcal{L}_{2}(A_{u}(f_{\ell };\mathbb{S}^{2}))-\mathbb{E}\left[ \mathcal{L}_{2}(A_{u}(f_{\ell };\mathbb{S}^{2}))\right] =\frac{ 1}{2}H_{1}(u)\phi (u)h_{\ell ;2}+O_{p}(\sqrt{\log \ell /\ell ^{2}})\text{ , } \label{Area} \end{equation} and for $u\neq 0$ \begin{equation*} Var\left\{ \mathcal{L}_{2}(A_{u}(f_{\ell };\mathbb{S}^{2}))\right\} \sim \left\{ \frac{1}{2} H_{1}(u)\phi (u)\right\} ^{2}Var\left\{ h_{\ell ;2}\right\} \text{ , as } \ell \rightarrow \infty \text{ .} \end{equation*} Because \begin{equation*} h_{\ell ;2}=\int_{\mathbb{S}^{2}}\left\{ f_{\ell }^{2}(x)-1\right\} dx=\left\Vert f_{\ell }\right\Vert _{L^{2}(\mathbb{S}^{2})}^{2}-\mathbb{E} \left[ \left\Vert f_{\ell }\right\Vert _{L^{2}(\mathbb{S}^{2})}^{2}\right] \text{ ,} \end{equation*} equation (\ref{Area}) is basically stating that the fluctuations in the excursion area for $u\neq 0$ are dominated by the fluctuations in the random norm of the eigenfunctions.
Interestingly, the same behaviour characterizes also the other Lipschitz-Killing Curvatures; for the boundary length we have the expansion \begin{equation*} 2\mathcal{L}_{1}(A_{u}(f_{\ell };\mathbb{S}^{2}))=\lim_{\varepsilon \rightarrow 0}\int_{\mathbb{S}^{2}}\left\Vert \nabla f_{\ell }(x)\right\Vert \delta _{\varepsilon }(f_{\ell }(x)-u)dx \end{equation*} which holds both $\omega $-almost surely and in $L^{2}(\Omega )$; here we write $\delta _{\varepsilon }(\cdot)=\frac{1}{2\varepsilon} \mathbb{I}(\cdot)$. Similarly for the Euler-Poincar\'{e} Characteristic we have \begin{equation*} \mathcal{L}_{0}(A_{u}(f_{\ell };\mathbb{S}^{2}))=\lim_{\varepsilon \rightarrow 0}\int_{\mathbb{S}^{2}}\det \left\{ \nabla ^{2}f_{\ell }(x)\right\} \delta _{\varepsilon }(\nabla f_{\ell }(x))\mathbb{I} _{[u,\infty )}\mathbb{(}f_{\ell }(x))dx\text{ .} \end{equation*}
Similar arguments can be developed, expanding the integrand function into polynomials evaluated on the random vectors $\left\{ \nabla ^{2}f_{\ell }(.),\nabla f_{\ell }(.),f_{\ell }(.)\right\}$; algebraic simplifications occur and the expansions read as follows:
\begin{thm} \label{2GKF} As $\ell \rightarrow \infty $, for $j=0,1,2$ \begin{eqnarray} &&\mathcal{L}_{j}(A_{u}(f_{\ell },\mathbb{S}^{2}))-\mathbb{E}\left[ \mathcal{ L}_{j}(A_{u}(f_{\ell };\mathbb{S}^{2}))\right] \label{Leading} \\ &=&-\frac{1}{2} \left [ \begin{matrix} 2 \\ 2-j \end{matrix} \right] u\rho _{2-j}^{\prime }(u)\left( \lambda _{\ell }/2\right) ^{(2-j)/2}\int_{\mathbb{S}^{2}}H_{2}(f_{\ell }(x))dx+R_{\ell ;j}\text{ ,} \notag \end{eqnarray} where \[ \mathbb{E}[R^2_{\ell ;j}]=o_{\ell \rightarrow \infty }(\ell^{3-2j})\text{ ;} \] as a consequence, we have also the following Variance asymptotics \[ Var\left\{ \mathcal{L}_{j}(A_{u}(f_{\ell };\mathbb{S}^{2}))\right\} \] \begin{equation} =\frac{1}{4}\left\{\left [ \begin{matrix} 2 \\ 2-j \end{matrix} \right] u\rho _{2-j}^{\prime }(u)\left( \lambda _{\ell }/2\right) ^{(2-j)/2}\right\} ^{2}\times \frac{32\pi ^{2}}{2\ell +1}+o_{\ell \rightarrow \infty }(\lambda _{\ell }^{2-j-1})\text{ .} \label{Variance} \end{equation} \end{thm}
Some features of the previous result are worth discussing:
\begin{itemize} \item The asymptotic behaviour of all the Lipschitz-Killing Curvatures is proportional to a sequence of scalar random variables $\left\{ h_{\ell ;2}\right\} _{\ell \in \mathbb{N}}.$ As a consequence, these geometric functionals are fully correlated in the high-energy limit $\ell \rightarrow \infty ;$
\item For the same reasons, these functionals are also fully correlated, in the high energy limit, when evaluated across different levels $u_{1},u_{2}$: for the boundary length, this correlation phenomenon was first noted by \cite{WigSur};
\item The leading terms all disappear in the "nodal" case $u=0$ where the variances are hence an order of magnitude smaller. This is an instance of the so-called Berry's cancellation phenomenon (\cite{Wig}), to which we shall return in the following Section. We noted before that the leading terms are proportional to the centred random norm; it is thus natural that these terms should disappear in the nodal case, which is independent of scaling factors. Note that for $j=0$ the cancellation of the leading term occurs also at $u=1$.
\end{itemize}
\begin{rem} The proof of Theorem \ref{2GKF} was given in \cite{CM2018}, in the case of the $2$-dimensional sphere $\mathbb{S}^{2}.$ However, we conjecture the result to hold as stated for spherical eigenfunctions in arbitrary dimension, see below for more details. Extensions have also been given to cover for instance the two-dimensional torus (see \cite{CMR}), for which a formula completely analogous to (\ref{2GKF}) holds. \end{rem}
Similar results can be shown to hold for other geometric functionals; let us consider for instance critical values, defined by \begin{equation*} \mathcal{N}_{u}(f_{\ell };\mathbb{S}^{2})=\#\left\{ x\in \mathbb{S} ^{2}:\nabla f_{\ell }(x)=0\text{ and }f_{\ell }(x)\geq u\right\} \text{ .} \end{equation*} The asymptotic variance of $\left\{ \mathcal{N}_{u}(f_{\ell };\mathbb{S} ^{2})\right\} _{\ell =1,2,...}$ was established in \cite{CMW}, \cite{CW}, and in particular we have \begin{eqnarray*} \mathbb{E}\left[ \mathcal{N}_{u}(f_{\ell };\mathbb{S}^{2})\right] &=&\lambda _{\ell }g_{1}(u)\text{ , } \\ g_{1}(u) &=&\frac{1}{\sqrt{2\pi }}\int_{u}^{\infty }(2e^{-t^{2}}+(t^{2}-1)e^{-t^{2}/2})dt \\ &=&u\phi (u)+\sqrt{2}(1-\Phi (\sqrt{2}u))\text{ ,} \end{eqnarray*} \begin{eqnarray*} Var\left[ \mathcal{N}_{u}(f_{\ell };\mathbb{S}^{2})\right] &=&\frac{1}{4} \lambda _{\ell }^{2}g_{2}^{2}(u)Var\left\{ \int_{\mathbb{S} ^{2}}H_{2}(f_{\ell }(x))dx\right\} +o_{\ell \rightarrow \infty }(\ell ^{3}) \\ &=&\frac{1}{4}\lambda _{\ell }^{2}g_{2}^{2}(u)\frac{2(4\pi )^{2}}{2\ell +1} +o_{\ell \rightarrow \infty }(\ell ^{3})\text{ ,} \end{eqnarray*} where \begin{equation*} g_{2}(u)=\int_{u}^{\infty }\frac{1}{\sqrt{8\pi }} e^{-3t^{2}/2}(2-6t^{2}-e^{-t^{2}}(1-4t+t^{4}))dt\text{ .} \end{equation*} Later in \cite{CM2020} it was shown that the critical values above the threshold level $u$ satisfy the asymptotic \begin{eqnarray*} &&\mathcal{N}_{u}(f_{\ell };\mathbb{S}^{2})-\mathbb{E}\left[ \mathcal{N} _{u}(f_{\ell };\mathbb{S}^{2})\right] \\ &=&\frac{1}{2}\lambda _{\ell }g_{2}(u)\int_{\mathbb{S}^{2}}H_{2}(f_{\ell }(x))dx+o_{p}(\sqrt{Var\left[ \mathcal{N}_{u}(f_{\ell };\mathbb{S}^{2}) \right] )}\text{ ,} \end{eqnarray*} As a consequence, one has also, for all $u\neq 0,1$ the following correlation result \begin{eqnarray*} &&Corr^{2}\left\{ \mathcal{N}_{u}(f_{\ell };\mathbb{S}^{2}),\mathcal{L} _{j}(A_{u}(f_{\ell };\mathbb{S}^{2}))\right\} \\ &:&=\frac{Cov^{2}\left\{ \mathcal{N}_{u}(f_{\ell };\mathbb{S}^{2}),\mathcal{L }_{j}(A_{u}(f_{\ell };\mathbb{S}^{2}))\right\} }{Var\left\{ \mathcal{N} _{u}(f_{\ell };\mathbb{S}^{2})\right\} Var\left\{ \mathcal{L}_{j}(A_{u}(f_{\ell };\mathbb{S}^{2}))\right\} }\rightarrow 1\text{ , as }\ell \rightarrow \infty \text{ ;} \end{eqnarray*} the value $u=1$ has to be excluded only for $j=0$. We also have that \begin{equation*} Corr^{2}\left\{ \mathcal{N}_{u_{1}}(f_{\ell };\mathbb{S}^{2}),\mathcal{N} _{u_{2}}(f_{\ell };\mathbb{S}^{2})\right\} \rightarrow 1\text{ , as }\ell \rightarrow \infty \text{ ,} \end{equation*} that is, asymptotically full correlation between the number of critical values above any two non-zero thresholds $u_{1},u_{2}.$
As for the Lipschitz-Killing Curvatures, a form of Berry's cancellation occurs at $u=0$ and $u \rightarrow \pm \infty ;$ the total number of critical points has then a lower-order variance (see \cite{CW}), as we shall discuss in the next section.
\subsection{Quantitative Central Limit Theorems}
The results reviewed in the previous subsection can be considered as following from a \emph{Reduction Principle }(see \cite{DehTaq}), where the limiting behaviour of $\left\{ \mathcal{N}_{u}(f_{\ell };\mathbb{S}^{2}), \mathcal{L}_{j}(A_{u}(f_{\ell };\mathbb{S}^{2})\right\} $ is dominated by a deterministic function of the threshold level $u,$ times a sequence of random variables $\left\{ h_{\ell ;2}\right\} $ which do not depend on $u.$ To derive the asymptotic law of these fluctuations, it is hence enough to investigate the convergence in distribution of $\left\{ h_{\ell ;2}\right\} , $ as $\ell \rightarrow \infty $. In fact, it is possible to show a stronger result, namely a \emph{Quantitative Central Limit Theorem}; to this aim, let us recall that the Wasserstein distance between two random variables $X$ and $Y$ is defined by \begin{equation*} d_{W}(X,Y):=\sup_{h\in Lip(1)}\left\vert \mathbb{E}h(X)-\mathbb{E} h(Y)\right\vert \text{ ,} \end{equation*} where $Lip(1)$ denotes the class of Lipschitz functions of constant 1, i.e., $\left\vert h(x)-h(y)\right\vert \leq \left\vert x-y\right\vert $ for all $ x,y\in \mathbb{R}$. $D_{W}(.,.)$ defines a metric on the space of probability distributions (for more details and other examples of probability metrics, see \cite{noupebook}, Appendix C). Taking $Z\sim N(0,1)$ to be a standard Gaussian random variable, a Quantitative Central Limit theorem is defined as a result of the form \begin{equation*} \lim_{n\rightarrow \infty }d_{W}(\frac{X_{n}-\mathbb{E}X_{n}}{\sqrt{ Var(X_{n})}},Z)=0\text{ .} \end{equation*} The field of Quantitative Central Limit Theorems has been very active in the last few decades; more recently, a breakthrough has been provided by the discovery of the so-called \emph{Stein-Malliavin approach} by Nourdin-Peccati (\cite{nuape, NP09, noupebook}). These results entail that for sequences of random variables belonging to a Wiener-chaos, say $\mathcal{C}_q$, a quantitative central limit theorem for the Wasserstein distance can be given simply controlling the fourth-moment of $X_{n},$ as follows: \begin{equation} d_{W}(\frac{X_{n}-\mathbb{E}X_{n}}{\sqrt{Var(X_{n})}},Z)\leq \sqrt{\frac{2q-2 }{3\pi q}}\sqrt{\mathbb{E}\left[ (\frac{X_{n}-\mathbb{E}X_{n}}{\sqrt{ Var(X_{n})}})^{4}\right] -3}\text{ .} \label{FMB} \end{equation} Similar results hold for other probability metrics, for instance the Kolmogorov and Total Variation distances, see again \cite{noupebook}.
Quantitative Central Limit Theorems lend themselves to an immediate application for the sequences $\left\{ h_{\ell ;q}\right\} $ that we introduced above. It should be noted indeed that by construction all these random variables belong to the $q$-th order Wiener chaos; it is then possible to exploit (\ref{FMB}) to obtain Quantitative Central Limit Theorems for these polyspectra at arbitrary orders: their fourth moment can be computed by means of the Diagram formula. These results were first given in \cite{MW} and then refined in \cite{MR2015}, yielding the following
\begin{thm} \label{QCLT_h} As $\ell \rightarrow \infty $ \begin{equation*} d_{W} \left (\frac{h_{\ell ;q}-\mathbb{E}\left[ h_{\ell ;q}\right] }{\sqrt{Var(h_{\ell ;q})}}, Z \right )=\left\{ \begin{array}{c} O(\frac{1}{\sqrt{\ell }})\text{ for }q=2,3 \\ O(\frac{1}{\log \ell })\text{ for }q=4 \\ O(\ell ^{-1/4})\text{ for }q=5,6,... \end{array} \right. . \end{equation*} \end{thm}
Now, we have just shown that for nonzero thresholds $u\neq 0$ the Lipschitz-Killing Curvatures and the critical values are indeed proportional to a term belonging to the second-order chaos, plus a remainder that it is asymptotically negligible. The following Quantitative Central Limit Theorem then follows immediately (see \cite{MW}, \cite{RossiJTP}, \cite{CM2018}).
\begin{thm} As $\ell \rightarrow \infty ,$ for $u\neq 0$ ($j=1,2)$ and for $u\neq 0,1$ (for $j=0$) we have that \begin{equation*} d_{W}(\frac{\mathcal{L}_{j}(A_{u}(f_{\ell };\mathbb{S}^{2}))-\mathbb{E}\left[ \mathcal{L}_{j}(A_{u}(f_{\ell };\mathbb{S}^{2}))\right] }{\sqrt{Var(\mathcal{ L}_{j}(A_{u}(f_{\ell };\mathbb{S}^{2}))}},Z)=O(\ell ^{-1/2})\text{ .} \end{equation*} \end{thm}
\subsection{A Higher-Dimensional Conjecture}
The results we discussed so far have been limited to random-spherical harmonics on the two-dimensional sphere $\mathbb{S}^{2}.$ Research in progress suggests however that further generalizations should hold: to this aim, let us define the set of singular points $P_{j}:=\left\{ u\in \mathbb{R} :u\rho _{j}^{\prime }(u)=0\right\} $ (for instance, $P_{0}=P_{1}=\left\{ 0\right\} ,$ $P_{2}=\left\{ 0,1\right\} ,$ $P_{3}=\left\{ 0,\pm \sqrt{3} \right\} ,...).$ Let us now consider Gaussian random eigenfunctions on the higher-dimensional unit sphere $\mathbb{S}^{d}$, e.g. \begin{equation*} \Delta _{\mathbb{S}^{d}}f_{\ell ;d}=-\lambda _{\ell ;d}f_{\ell ;d}\text{ , } \lambda _{\ell ;d}:=\ell (\ell +d-1)\text{ ;} \end{equation*} these eigenfunctions are normalized so that (see \cite{MR2015},\cite {Rossi2018}) \begin{equation*} \mathbb{E}\left[ f_{\ell ;d}\right] =0\text{ , }\mathbb{E}\left[ f_{\ell ;d}^{2}\right] =1\text{ , }\mathbb{E}\left[ f_{\ell ;d}(x)f_{\ell ;d}(y) \right] =G_{\ell ;d/2}(\left\langle x,y\right\rangle )\text{ ,} \end{equation*} where as before $G_{\ell ;d/2}(.)$ is the standardized $\ell $-th Gegenbauer polynomial of order $\frac{d}{2}$ (normalized with $G_{\ell ;d/2}(1)=1$); it is convenient to recall that \begin{equation*} G_{\ell ;d/2}^{\prime }(1)=\frac{\lambda _{\ell ;d}}{d}\text{ .} \end{equation*} We recall also that the dimension of the corresponding eigenspaces is \begin{equation*} n_{\ell ;d}=\frac{2\ell +d-1}{\ell }\binom{\ell +d-2}{\ell -1}\sim \frac{2}{ (d-1)!}\ell ^{d-1},\text{ as }\ell \rightarrow \infty \text{ .} \end{equation*} By means of Parseval's equality we have also as a consequence \begin{eqnarray*} Var\left[ \int_{\mathbb{S}^{d}}H_{2}(f_{\ell ;d}(x))dx\right] &=&\frac{ 2s_{d}^{2}}{n_{\ell ;d}}=\frac{2(d+1)^{2}\omega _{d+1}^{2}}{n_{\ell ;d}} \\ &\sim &\frac{(d+1)^{2}\omega _{d+1}^{2}(d-1)!}{\ell ^{d-1}}\text{ as }\ell \rightarrow \infty \text{ .} \end{eqnarray*} We then propose the following
\begin{conjecture} \label{H2GKF} As $\ell \rightarrow \infty ,$ for all $k=0,1,...,d$ we have that \begin{equation*} \mathcal{L}_{k}(A_{u}(f_{\ell };\mathbb{S}^{d}))-\mathbb{E}\left[ \mathcal{L} _{k}(A_{u}(f_{\ell };\mathbb{S}^{d}))\right] \end{equation*} \begin{equation*} =-\frac{1}{2}\left [ \begin{matrix} d \\ k \end{matrix} \right] \rho _{d-k}^{\prime }(u)u\left( \frac{\lambda _{\ell ;d}}{d}\right) ^{(d-k)/2}\int_{\mathbb{S}^{d}}H_{2}(f_{\ell ;d}(x))dx+o_{p}(\sqrt{\ell ^{d-2k+1}})\text{ .} \end{equation*} \end{conjecture}
\begin{rem} An immediate consequence of this conjecture would be \begin{eqnarray*} \frac{\mathcal{L}_{k}(A_{u}(f_{\ell };\mathbb{S}^{d}))-\mathbb{E}\left[ \mathcal{L}_{k}(A_{u}(f_{\ell };\mathbb{S}^{d}))\right] }{\sqrt{Var\left[ \mathcal{L}_{k}(A_{u}(f_{\ell };\mathbb{S}^{d}))\right] }} &=&\frac{h_{\ell ;q}}{\sqrt{Var\left[ h_{\ell ;d}(2)\right] }}+o_{p}(1)\text{ ,} \\ h_{\ell ;q} &=&\int_{\mathbb{S}^{d}}H_{2}(f_{\ell ;d}(x))dx\text{ .} \end{eqnarray*} \end{rem}
\begin{rem} The remainder term in Conjecture (\ref{H2GKF}) is expected to be $O(\sqrt{\ell ^{d-2k}}),$ in the $L^{2}(\Omega )$ sense. \end{rem}
Three further consequences of Conjecture \ref{H2GKF} would be the following:
\begin{itemize} \item \emph{(Variance Asymptotics)} As $\ell \rightarrow \infty ,$ for all $k=0,1,...,d$ and for non-singular points $u \notin P_{d-k}$, \begin{equation*} Var\left\{ \mathcal{L}_{k}(A_{u}(f_{\ell };\mathbb{S}^{d}))\right\} \end{equation*} \begin{equation*} =\frac{H_{d-k}^{2}(u)\phi ^{2}(u)u^{2}}{(2\pi d)^{(d-k)}}\frac{d!}{(d-k)!k!} \frac{\omega _{d}^{2}\omega _{d+1}^{2}}{\omega _{k}^{2}\omega _{d-k}^{2}} \frac{(d+1)^{2}\lambda _{\ell ;d}^{d-k}}{2n_{\ell ;d}}+o(\ell ^{d-2k+1}) \text{ .} \end{equation*}
\item \emph{(Central Limit Theorem) }As $\ell \rightarrow \infty ,$ for all $k=0,1,...,d$ and for non-singular points $u \notin P_{d-k}$, \begin{equation*} d_{W}\left( \frac{\mathcal{L}_{k}(A_{u}(f_{\ell };\mathbb{S}^{d}))-\mathbb{E} \left[ \mathcal{L}_{k}(A_{u}(f_{\ell };\mathbb{S}^{d}))\right] }{\sqrt{Var \left[ \mathcal{L}_{k}(A_{u}(f_{\ell };\mathbb{S}^{d}))\right] }},Z\right) =o(1)\text{ ,} \end{equation*} where $Z \sim \mathcal{N}(0,1)$. \item \emph{(Correlation Asymptotics)} As $\ell \rightarrow \infty ,$ for all $k_{1},k_{2}=0,1,...,d$ and all $u_{1},u_{2}$ such that $ u_{1}u_{2}H_{d-k_{1}}(u_{1})H_{d-k_{2}}(u_{2})\neq 0$ \begin{equation*} \lim_{\ell \rightarrow \infty }Corr^{2}\left( \mathcal{L}_{k_{1}}(A_{u}(f_{\ell };\mathbb{S}^{d})),\mathcal{L}_{k_{2}}(A_{u}(f_{\ell };\mathbb{S}^{d}))\right) =1\text{ .} \end{equation*} \end{itemize}
The driving rationale behind these conjectures is the \emph{ansatz} that the asymptotic variance of the geometric functionals should be governed by fluctuations in the random $L^{2}(\mathbb{S}^{d})$ norm of the eigenfunctions, for non-singular points $u\notin P_{j}$. In this sense, we believe the result has even greater applicability, for instance to cover combinations of random eigenfunctions defined on more general submanifolds of $\mathbb{R}^{n},$ such as \emph{Berry Random Waves} or "short windows" averages of isotropic random eigenfunctions on general manifolds (see \cite{Berry 1977}, \cite{Berry 2002}, \cite{Zel}, \cite{npr}, \cite{Duerickx}, \cite{DalmaoEstradeLeon}). These issues are the object of currently ongoing research.
\section{Nodal Cases: Berry Cancellation and the Role of The Fourth-Order Chaos}
The previous Section has discussed the behaviour of geometric functionals for non-zero threshold levels $u\neq 0;$ under isotropy, it has been shown that all these functionals are asymptotically proportional, in the $ L^{2}(\Omega )$ sense, to a single random variable representing the (centred) random $L^{2}(\mathbb{S}^{2})$-norm of the eigenfunction. This dominant term has been shown to disappear in the nodal case $u=0$ (and, more generally, for $\rho _{d-k}^{\prime }(u)u=0,$ i.e., for the singular points $ u\in P_{j}$); the asymptotic behaviour must then be derived by a different route in these circumstances.
As mentioned above, the first paper to investigate the variance of the nodal length for random spherical harmonics was the seminal work by Igor Wigman ( \cite{Wig}), which made rigorous an \emph{ansatz} by Michael Berry in the Physical literature (\cite{Berry 2002}). In particular, by using an higher-order version of the Expectation Metatheorem (see again \cite {adlertaylor}, \cite{azaiswschebor}) the following representation for the second moment of the nodal length can be given: \begin{equation*} \mathbb{E}\left[ \left\{ Len(f_{\ell };\mathbb{S}^{2})\right\}^2 \right] \end{equation*} \begin{equation*} =\int_{\mathbb{S}^{2}\times \mathbb{S}^{2}}\mathbb{E}\left[ \left. \left\Vert \left\{ \nabla f_{\ell }(t_{1})\right\} \right\Vert \left\Vert \left\{ \nabla f_{\ell }(t_{2})\right\} \right\Vert \right\vert f_{\ell }(t_{1})=0,f_{\ell }(t_{2})=0\right] \end{equation*} \[ \times p_{f_{\ell }(t_{1}),f_{\ell }(t_{2})}(0,0)\sigma _{g}(dt_{1})\sigma _{g}(dt_{2})\text{ }, \] where as before we write $Len(f_{\ell };\mathbb{S}^{2})=2\mathcal{L}_{1}(A_{0}(f_{\ell };\mathbb{S}^{2}))$ for the nodal length. The integrand in the previous formula is denoted the \emph{2-point correlation function of the nodal length }and generalizes the Kac-Rice argument to second-order moments; analogous generalizations are possible for the other geometric functionals we considered and for higher-order moments as well (see \cite{adlertaylor}). By means of a challenging and careful expansion of this correlation function and a deep investigation of its behaviour for $\ell \rightarrow \infty ,$ Wigman was able to investigate the asymptotic for the variance of the nodal length and to show that (\ref{Wig10}) holds.
A natural question which was investigated shortly after this seminal paper was the possibility to derive the asymptotic variances of nodal statistics, and further characterizations such as the law of the asymptotic fluctuations, in terms of the Wiener-Chaos expansions that we discussed in the previous Section. The first efforts were devoted to the analysis of the "nodal area" $\mathcal{L}_{2}(A_{0}(f_{\ell };\mathbb{S}^{2})),$ for which it is easily shown that all even-order terms vanish at $u=0;$ from (\ref {VarDefect}) we are then left with (see \cite{MW2014}) \begin{equation*} Var\left\{ \mathcal{L}_{2}(A_{0}(f_{\ell };\mathbb{S}^{2}))\right\} =\frac{1 }{\ell ^{2}}\sum_{q=1}^{\infty }\frac{c_{2q+1}}{2\pi q!}H_{2q}^{2}(0)+o(\ell ^{-2})\text{ ,} \end{equation*} where \begin{eqnarray*} c_{2q+1} &=&\lim_{\ell \rightarrow \infty }\ell ^{2}\int_{0}^{\pi }P_{\ell }^{2q+1}(\cos \theta )\sin \theta d\theta \\ &=&\int_{0}^{\infty }J_{0}^{2q+1}(\psi )\psi d\psi \text{ , }J_{0}(\psi ):=\sum_{k=0}^{\infty }\frac{(-1)^{k+1}(x/2)^{2k}}{(k!)^{2}}\text{ .} \end{eqnarray*} The computation of the variance and the results in Theorem \ref{QCLT_h} lead easily also to a Central Limit Theorem, which was given first in \cite {MW} and then extended to higher dimensions by \cite{RossiJTP}.
\begin{thm} (\cite{MW}) As $\ell \rightarrow \infty $ \begin{equation*} d_{W}(\frac{\mathcal{L}_{2}(A_{0}(f_{\ell };\mathbb{S}^{2}))-\mathbb{E}\left[ \mathcal{L}_{2}(A_{0}(f_{\ell };\mathbb{S}^{2}))\right] }{\sqrt{Var\left\{ \mathcal{L}_{2}(A_{0}(f_{\ell };\mathbb{S}^{2}))\right\} }},Z)=o(1)\text{ ,} \end{equation*} and hence \begin{equation*} \frac{\mathcal{L}_{2}(A_{0}(f_{\ell };\mathbb{S}^{2}))-\mathbb{E}\left[ \mathcal{L}_{2}(A_{0}(f_{\ell };\mathbb{S}^{2})\right] }{\sqrt{Var\left\{ \mathcal{L}_{2}(A_{0}(f_{\ell };\mathbb{S}^{2}))\right\} }}\rightarrow _{d}\mathcal{N}(0,1))\text{ .} \end{equation*} \end{thm}
The proof of the previous result is standard; in short, the idea is to write \begin{equation*} \mathcal{L}_{2}(A_{0}(f_{\ell };\mathbb{S}^{2}))-\mathbb{E}\left[ \mathcal{L} _{2}(A_{0}(f_{\ell };\mathbb{S}^{2})\right] =\sum_{k=1}^{M}\frac{(-1)^{2k+1} }{(2k+1)!}H_{2k}(u)\phi (u)h_{\ell ;2k+1}+R_{M}\text{ ,} \end{equation*} where the remainder term is such that, as $M\rightarrow \infty ,$ \begin{equation*} R_{M}=\sum_{k=M+1}^{\infty }\frac{(-1)^{2k+1}}{(2k+1)!}H_{2k}(u)\phi (u)h_{\ell ;2k+1}=o_{p}(\sqrt{Var\left\{ \mathcal{L}_{2}(A_{0}(f_{\ell };\mathbb{S}^{2}))\right\} })\text{ .} \end{equation*} It is then enough to show that the Central Limit Theorem holds for $M$ (sufficiently large but) finite; this can be achieved by an application of the multivariate Fourth Moment Theorem to the terms $(h_{\ell ;3},...,h_{\ell ;2M+1})$ (see \cite{noupebook})$.$ It should be noted that in the case of the Defect the limiting behaviour depends on the full sequence $\left\{ h_{\ell ;2k+1}\right\} _{k=1,2,...};$ this is due to the exact disappearance of the two natural candidates to be leading terms, that is, $\left\{ h_{\ell ;2}\right\} $ and $\left\{ h_{\ell ;4}\right\} ,$ both whose coefficients vanish for $u=0.$
It is thus even more remarkable that for the nodal lines the situation simplifies drastically, to yield the following result.
\begin{thm} (\cite{MRW}) \label{nodal} As $\ell \rightarrow \infty $ \begin{equation} Len(f_{\ell };\mathbb{S}^{2})-\mathbb{E}\left[ Len(f_{\ell };\mathbb{S}^{2}) \right] =-\frac{1}{4}\sqrt{\frac{\lambda _{\ell }}{2}}\frac{1}{4!}h_{\ell ;4}+o_{p}(\sqrt{Var\left\{ h_{\ell ;4}\right\} })\text{ ,} \label{MRW} \end{equation} and hence, in view of (\ref{QCLT_h}) \begin{equation*} d_{W}(\frac{Len(f_{\ell };\mathbb{S}^{2})-\mathbb{E}\left[ Len(f_{\ell };\mathbb{S}^{2})\right] }{\sqrt{Var\left\{ Len(f_{\ell};\mathbb{S}^{2})\right\} }},Z)=o(1)\text{ .} \end{equation*} \end{thm}
The most notable aspect of Theorem \ref{nodal} is that the limiting behaviour of nodal lines is asymptotically fully correlated with the sequence of random variables $\left\{ h_{\ell ;4}\right\}$, so that in principle it would be possible to "predict" nodal lengths by simply computing the integral of a fourth-order polynomial of the eigenfunctions over the sphere.
A natural question that arises is the structure of correlation among functionals evaluated at different thresholds and those considered for the nodal case $u=0$. Focussing for instance on the boundary length, it is immediate to understand that the latter, which is dominated by the second order chaos term $\left\{ h_{\ell ;2\text{ }}\right\}$ when $u\neq 0,$ must be independent from the nodal length, which is asymptotically proportional to $\left\{ h_{\ell ;4}\right\} .$ A more refined analysis, however, should take into account the fluctuations of the boundary length when the effects of the random norm $\left\Vert f_{\ell }\right\Vert _{L^{2}(\mathbb{S}^{2})}$ is subtracted, that is, dropping the second-order chaos term from the Wiener expansion. This corresponds to the evaluation of the so-called partial correlation coefficients $Corr^{\ast }$, for which it was shown in \cite {MR21} that \begin{equation*} \lim_{\ell \rightarrow \infty }Corr^{\ast }(Len(f_{\ell };\mathbb{S}^{2}), \mathcal{L}_{1}(A_{u}(f_{\ell };\mathbb{S}^{2})))=1\text{ .} \end{equation*} More explicitly, when compensating the effect of random norm fluctuations the boundary length at any threshold $u\neq 0$ can be fully predicted on the basis of the knowledge of the nodal length, up to a remainder term which is asymptotically negligible in the limit $\ell \rightarrow \infty .$ It is interesting to note that a similar phenomenon occurs also for the total number of critical points, for which (building on earlier computations by \cite{CW}) it was shown in (\cite{CM2021}) that
\begin{equation*} \mathcal{N}_{-\infty }(f_{\ell };\mathbb{S}^{2})-\mathbb{E}\left[ \mathcal{N} _{-\infty }(f_{\ell };\mathbb{S}^{2})\right] =-\frac{\lambda _{\ell }}{ 2^{3}3^{2}\sqrt{3}\pi }h_{\ell ;4}+o_{p}(\ell ^{2}\log \ell )\ ; \end{equation*} as a consequence, the nodal length of random spherical harmonics and the number of their critical points are perfectly correlated in the high-energy limit: \begin{equation*} \lim_{\ell \rightarrow \infty }Corr^{2}(Len(f_{\ell };\mathbb{S}^{2}), \mathcal{N}_{-\infty }(f_{\ell };\mathbb{S}^{2}))=1\text{ .} \end{equation*} Let us now denote by $Len^{\ast }(u)$ the boundary length at level $u$ after the fluctuations induced by the random norm have been subtracted (e.g., after removing its projection on the second-order chaos); moreover, for brevity's sake we write \begin{eqnarray*} \mathcal{L}_{j}(A_{u}(f_{\ell };\mathbb{S}^{2})) &=&\mathcal{L}_{j}(u)\text{ , }j=0,1,2\text{ ,} \\ \mathcal{N}_{u}(f_{\ell };\mathbb{S}^{2}) &=&\mathcal{N}_{u}\text{ , }Len(f_{\ell };\mathbb{S}^{2})=Len(0)\text{ ,} \end{eqnarray*} so that $\mathcal{N}_{-\infty }$ is the total number of critical points and $ \mathcal{L}_{2}(0)$ is the excursion area for $u=0.$ The correlation results that we discussed so far can be summarized in the following table; here, we denote by $u_{1},u_{2}\neq 0,1$ any two non-singular threshold values.
\begin{center} The limiting value of $Corr^{2}(.,.),$ as $\ell \rightarrow \infty $
\begin{tabular}{llllllll} & $\mathcal{L}_{j}(u_{1})$ & $\mathcal{L}_{j}(u_{2})$ & $Len(0)$ & $ Len^{\ast }(u)$ & $\mathcal{L}_{2}(0)$ & $\mathcal{N}_{u}$ & $\mathcal{N} _{-\infty }$ \\ $\mathcal{L}_{j}(u_{1})$ & 1 & 1 & 0 & 0 & 0 & 1 & 0 \\ $\mathcal{L}_{j}(u_{2})$ & 1 & 1 & 0 & 0 & 0 & 1 & 0 \\ $Len(0)$ & 0 & 0 & 1 & 1 & 0 & 0 & 1 \\ $Len^{\ast }(u)$ & 0 & 0 & 1 & 1 & 0 & 0 & 1 \\ $\mathcal{L}_{2}(0)$ & 0 & 0 & 0 & 0 & 1 & 0 & 0 \\ $\mathcal{N}_{u}$ & 1 & 1 & 0 & 0 & 0 & 1 & 0 \\ $\mathcal{N}_{-\infty }$ & 0 & 0 & 1 & 1 & 0 & 0 & 1 \end{tabular} $.$ \end{center}
\section{Eigenfunctions on Different Domains}
For brevity and simplicity's sake, this survey has focussed only on the behaviour of random eigenfunctions on the sphere. Of course, as mentioned in the Introduction this is just a special case of a much broader research area, including for instance eigenfunctions on $\mathbb{R}^{d}$ and on the standard flat torus $\mathbb{T}^{d}:=\mathbb{R}^d/\mathbb{Z}^d$. We do not even attempt to do justice to these developments, but it is important to mention some of them which are particularly close to the results we discussed for $\mathbb{S}^{2}$.
\subsection{Eigenfunctions on the Torus: Arithmetic Random Waves}
Eigenfunctions on the torus were first introduced in \cite{RW08} and have then been studied by several other authors, see for instance \cite {Cam19,granville,KKW,Maff,MPRW2015,Rudnick,RudnickYesha} and the references therein. In dimension $2$ these eigenfunctions (\emph{Arithmetic Random Waves }) are defined by the equations \begin{equation*} \Delta _{\mathbb{T}^{2}}f_{n}+E_{n}f_{n}=0\text{ , }E_{n}=4\pi n\text{ , } n=a^{2}+b^{2}\text{ ,} \end{equation*} for $a,b\in \mathbb{Z};$ the dimension of the $n$-th eigenspace is $\mathcal{ N}_{n}:=Card\left\{ a,b\in \mathbb{Z}:a^{2}+b^{2}=n\right\} $, while the expected value of nodal lengths is (\cite{RW08}) \begin{equation*} \mathbb{E}\left[ Len(f_{n};\mathbb{T}^{2})\right] =\frac{\sqrt{E_{n}}}{2 \sqrt{2}}\text{ .} \end{equation*} A major breakthrough was then obtained with the derivation of the variance in \cite{KKW}. In this paper, the authors introduce a probability measure on $\mathbb{S}^{1}$ defined by \begin{equation*} \mu _{n}(.):=\frac{1}{\mathcal{ N}_{n}}\sum_{a,b:a^{2}+b^{2}=n}\delta _{(a,b)}(\cdot) \text{ ,} \end{equation*} $\delta _{(a,b)}(.)$ denoting the Dirac measure; its $k$-th order Fourier coefficients are defined by $\widehat{\mu }_{n}(k):=\int_{\mathbb{S} ^{1}}\exp (ik\theta )\mu _{n}(d\theta )$. In \cite{KKW} it is then shown that the variance of nodal lengths has a non-universal behaviour and is proportional to \begin{equation*} Var\left\{ Len(f_{n};\mathbb{T}^{2})\right\} =\frac{1+\widehat{\mu } _{n}(4)^{4}}{512}\frac{E_{n}}{N_{n}^{2}}+o\left(\frac{E_{n}}{N_{n}^{2}}\right)\text{ , as }n\rightarrow \infty \text{ s.t. }\mathcal{ N}_{n} \rightarrow \infty . \end{equation*} It was later shown by \cite{MPRW2015} that the behaviour of $Len(\mathbb{T} ^{2},f_{n})$ is dominated by its fourth-order chaos component, similarly to what we observed above for random spherical harmonics (the result on the torus was actually established earlier than the corresponding case for the sphere). More precisely, we have that \begin{equation*} Len(f_{n};\mathbb{T}^{2})-\mathbb{E}[Len(f_{n};\mathbb{T}^{2})]=\sum_{q=2}^{ \infty }\text{Proj}\left[ \left. Len(f_{n};\mathbb{T}^{2})\right\vert 2q \right] \end{equation*} \begin{equation*} =\text{Proj}\left[ \left. Len(f_{n};\mathbb{T}^{2})\right\vert 4\right] +o_{p}(\sqrt{Var\left\{ Len(f_{n};\mathbb{T}^{2})\right\} })\text{ ,} \end{equation*} where Proj$\left[ \left. .\right\vert q\right] $ denotes projection on the $q $-th order chaos. On the contrary of what we observed for the case of the sphere, here it is not possible to express the fourth-order chaos as a polynomial functional of the random eigenfunctions $\left\{ f_{n}\right\} $ alone. Moreover, the limiting distribution is non-Gaussian and non-universal, i.e. it depends on the asymptotic behaviour of $ \lim_{j\rightarrow \infty }\widehat{\mu }_{n_{j}}(4)$ which varies along different subsequences $\left\{ n_{j}\right\} _{j=1,2,...}$(the attainable measures for the weak-convergence of the sequences $\left\{ \mu _{n_{j}}(.)\right\} _{n\in \mathbb{N}}$ have been investigated by \cite {KKW,KW}). Further results in this area include \cite{Cam19}, \cite{Not} for arithmetic random waves in higher-dimension and \cite{WigYesha} for the excursion area on subdomains of $\mathbb{T}^{2};$ as mentioned earlier, an extension of Theorem \ref{2GKF} to the torus has been given in \cite{CMR}. It should be noted that Arithmetic Random Waves can be viewed as an instance of random trigonometric polynomials, whose zeroes have been studied, among others, by \cite{Angst}, \cite{Bally}.
\subsection{The Euclidean Case: Berry's Random Waves}
Spherical harmonics on the sphere $\mathbb{S}^2$ are known to exhibit a scaling limit, i.e. after a change of coordinates they converge locally to a Gaussian random process on $\mathbb{R}^{2}$ which is isotropic, zero mean and has covariance function \begin{equation*} \mathbb{E}\left[ f(x)f(y)\right] =J_{0}(2\pi \left\Vert x-y\right\Vert ), \text{ }x,y\in \mathbb{R}^{2},\text{ }J_{0}(z):=\sum_{k=0}^{\infty }\frac{ (-1)^{k}z^{2k}}{(k!)^{2}2^{2k}}\text{ ;} \end{equation*} here $J_{0}(.)$ corresponds to the standard Bessel functions, for which the following scaling asymptotics holds: \begin{equation*} P_{\ell }(\cos \frac{\psi }{\ell })\rightarrow _{\ell \rightarrow \infty }J_{0}(\psi )\text{ },\text{ }\psi \in \mathbb{R}\text{ }. \end{equation*} The behaviour of nodal lines $\mathcal{L}_{E}(f)=\left\{ x\in \mathbb{R} ^{2}:f(x)=0,\left\Vert x\right\Vert <2\pi \sqrt{E}\right\} $ can then be studied in the asymptotic regime $E\rightarrow \infty ;$ this is indeed the physical setting under which Berry first investigated cancellation phenomena in his pioneering paper \cite{Berry 2002}. The topology of nodal sets for Berry's random waves was studied by \cite{nazarov}, \cite{Sarnak}, \cite{CH} and others. Concerning nodal lengths, a (Quantitative) Central Limit Theorem was established in \cite{npr}, where intersections of independent random waves were also investigated; more recently, \cite{Vidotto21} proved a result analogous to Theorem \ref{nodal}, namely that, as $E \rightarrow \infty$ \[ \mathcal{L}_{E}(f)-\mathbb{E}\left[\mathcal{L}_{E}(f)\right] \] \begin{equation} =-\frac{1}{4}\frac{2\pi }{4!}\sqrt{\frac{E}{2}} \int_{\left\Vert x\right\Vert <2\pi \sqrt{E}}H_{4}(f(x))dx+o_{p}(\sqrt{ Var\left\{ \mathcal{L}_{E}(f)\right\} })\text{ .} \label{vidotto} \end{equation} We expect that results analogous to (\ref{MRW}) and (\ref{vidotto}) will hold for more general Riemannian waves on two-dimensional manifolds \cite{Zel}; extensions to random waves in $\mathbb{R}^{3}$ have been studied, among others, by \cite{DalmaoEstradeLeon}, but in these higher-dimensional settings it is no longer the case that nodal volumes are dominated by a single chaotic component.
\subsection{Shrinking Domains}
As a final issue, we recall how some of the previous results can be extended to shrinking subdomains of the torus and of the sphere. In this respect, a surprising result was derived in \cite{BMW} concerning the asymptotic behaviour of the nodal length on a suitably shrinking subdomain $ B_{n}\subset \mathbb{T}^{2};$ indeed it was shown that, for density one subsequences in $n$ \begin{equation*} \lim_{n\rightarrow \infty }Corr(Len(\mathbb{T}^{2},f_{n}),Len(\mathbb{T} ^{2}\cap B_{n},f_{n}))=1\text{ ,} \end{equation*} entailing that the behaviour of the nodal length on the whole torus is fully determined by its behaviour on any shrinking disk $B_{n},$ provided the radius of this disk is not smaller than $n^{-1/2+\varepsilon },$ some $ \varepsilon >0.$ Of course, the asymptotic variance and distributions of the Nodal Length in this shrinking domain is then immediately shown to be the same as those for the full torus, up to a normalizing factor. Interestingly, the same phenomenon does not occur on the sphere, where on the contrary it was shown in \cite{Todino20} that \begin{equation*} \lim_{\ell \rightarrow \infty }Corr(Len(\mathbb{S}^{2},f_{\ell }),Len( \mathbb{S}^{2}\cap B_{\ell },f_{\ell }))=0\text{ ,} \end{equation*} so that the nodal length when evaluated on a shrinking subset $B_{\ell }$ of the two-dimensional sphere is actually asymptotically independent from its global value; in the same paper, it is indeed shown that (\ref{MRW}) generalizes to \begin{eqnarray} &&Len(\mathbb{S}^{2}\cap B_{\ell },f_{\ell })-\mathbb{E}\left[ Len(\mathbb{S} ^{2}\cap B_{\ell },f_{\ell })\right] \label{todino} \\ &=&-\frac{1}{4}\sqrt{\frac{\lambda _{\ell }}{2}}\frac{1}{4!}h_{\ell ;4}(B_{\ell })+o_{p}(\sqrt{Var\left\{ h_{\ell ;4}(B_{\ell })\right\} })\text{ ,} \label{Todino2020} \end{eqnarray} \begin{equation*} h_{\ell ;4}(B_{\ell })=\int_{B_{\ell }}H_{4}(f_{\ell }(x))dx\text{ ;} \end{equation*} from this characterization, a Central Limit Theorem follows easily along the same lines that we discussed in the previous Section, see \cite{Todino20} for more details and discussion.
\begin{acknowledgments} I am grateful to Valentina Cammarota, Maurizia Rossi, Anna Paola Todino and an anonymous referee for a number of comments and suggestions on an earlier draft. This research was partly supported by the MIUR Departments of Excellence Program \emph{Math@Tov}. \end{acknowledgments}
\small
\end{document} |
\begin{document} \title{Bell-Type Quantum Field Theories} \begin{abstract}
In \cite{BellBeables} John~S.~Bell proposed how to associate particle trajectories with a lattice quantum field theory, yielding what can be regarded as a $|\Psi|^2$-distributed Markov process on the appropriate configuration space. A similar process can be defined in the continuum, for more or less any regularized quantum field theory; such processes we call Bell-type quantum field theories. We describe methods for explicitly constructing these processes. These concern, in addition to the definition of the Markov processes, the efficient calculation of jump rates, how to obtain the process from the processes corresponding to the free and interaction Hamiltonian alone, and how to obtain the free process from the free Hamiltonian or, alternatively, from the one-particle process by a construction analogous to ``second quantization.'' As an example, we consider the process for a second quantized Dirac field in an external electromagnetic field.
\noindent PACS numbers: 03.65.Ta, 02.50.-r, 03.70.+k \end{abstract}
\tableofcontents
\section{Introduction}
The aim of this paper is to present methods for constructing Bell-type QFTs. The primary variables of Bell-type QFTs are the positions of the particles. Bell suggested a dynamical law, governing the motion of the particles, in which the Hamiltonian $H$ and the state vector $\Psi$ determine certain jump rates \cite{BellBeables}. Since these rates are in a sense the smallest choice possible, we call them the \emph{minimal jump rates}. By construction, they preserve the
$|\Psi|^2$ distribution. We assume a well-defined Hamiltonian as given; to achieve this, it is often necessary to introduce cut-offs. We shall assume this has been done where needed. In cases in which one has to choose between several possible position observables, for example because of issues related to the Newton--Wigner operator \cite{NewtonWigner,Haag}, we shall also assume that a choice has been made.
Bell-type QFTs can also be regarded as extensions of Bohmian mechanics. When one tries to incorporate particle creation and annihilation into Bohmian mechanics, one is naturally lead to models like the one we presented in \cite{crea1}. The quantum equilibrium distribution, playing a central role in Bohmian mechanics, then more or less dictates that creation of a particle occurs in a stochastic manner---just as in Bell's model.
Bell-type QFTs have in common a good deal of mathematical structure, which we will elucidate. The paper is organized as follows. In Section 2 we introduce all the main ideas and reasonings; a superficial reading should focus on this section. Some examples of Bell-type QFTs are presented in Section 3. (Simple examples of minimal jump rates can be found in \cite{crea2A}.) In Section 4 we describe the construction of a process for the free Hamiltonian based on ``second quantization.'' In Section 5 we sketch the concept of the ``minimal process'' associated with a Hamiltonian $H$. Section 6 concerns some properties of Bell-type QFTs that derive from the construction methods developed in this paper. In Section 7 we conclude.
\section{Ingredients of Bell-Type Quantum Field Theories} \label{sec:making}
\subsection{Review of Bohmian Mechanics and Equivariance}
Bohmian mechanics \cite{Bohm52,DGZ,Stanford} is a non-relativistic theory about $N$ point particles moving in 3-space, according to which the configuration $Q=({\boldsymbol Q}_1,\ldots,{\boldsymbol Q}_N)$ evolves according to\footnote{ The masses $m_k$ of the particles have been absorbed in the Riemann metric $g_{\mu\nu}$ on configuration space $\mathbb{R}^{3N}$, $g_{ia,jb} = m_i \, \delta_{ij}\, \delta_{ab}$, $i,j=1\ldots N, \: a,b=1,2,3$, and $\nabla$ is the gradient associated with $g_{\mu\nu}$, i.e., $\nabla =(m_1^{-1}\nabla_{{\boldsymbol q}_1}, \dots, m_N^{-1}\nabla_{{\boldsymbol q}_N})$.} \begin{equation}\label{Bohm}
\frac{dQ}{dt} = v(Q)\,,\qquad
v=\hbar \, \mathrm{Im} \, \frac{\Psi^* \nabla\Psi} {\Psi^* \, \Psi}\,. \end{equation} $\Psi=\Psi_t(q)$ is the wave function, which evolves according to the Schr\"odinger equation \begin{equation}\label{Seq}
i\hbar\frac{\partial\Psi}{\partial t} = H \Psi\,, \end{equation} with \begin{equation}\label{Hamil}
H= -\frac{\hbar^2}{2} \Delta + V \end{equation} for spinless particles, with $\Delta = \,\mathrm{div}\,\nabla$. For particles with spin, $\Psi$ takes values in the appropriate spin space $\mathbb{C}^k$, $V$ may be matrix valued, and numerator and denominator of
\eqref{Bohm} have to be understood as involving inner products in spin space. The secret of the success of Bohmian mechanics in yielding the predictions of standard quantum mechanics is the fact that the configuration $Q_t$ is $|\Psi_t|^2$-distributed in configuration space at all times $t$, provided that the initial configuration $Q_0$ (part of the Cauchy data of the theory) is so distributed. This property, called \emph{equivariance} in \cite{DGZ}, suffices for empirical agreement between \emph{any} quantum theory (such as a QFT) and \emph{any} version thereof with additional (often called ``hidden'') variables $Q$, provided the outcomes of all experiments are registered or recorded in these variables. That is why equivariance will be our guide for obtaining the dynamics of the particles.
The equivariance of Bohmian mechanics follows immediately from comparing the continuity equation for a probability distribution $\rho$ associated with (\ref{Bohm}), \begin{equation}\label{master}
\frac{\partial \rho}{\partial t} = -\,\mathrm{div}\,(\rho v)\,, \end{equation}
with the equation satisfied by $|\Psi|^2$ which follows from (\ref{Seq}), \begin{equation}\label{continuity1}
\frac{\partial |\Psi|^2}{\partial t}(q,t) = \frac{2}{\hbar} \, \mathrm{Im}
\, \Big[ \Psi^*(q,t)\, (H\Psi)(q,t) \Big]\,. \end{equation} In fact, it follows from (\ref{Hamil}) that \begin{equation}\label{JJJ}
\frac{2}{\hbar} \, \mathrm{Im} \, \Big[ \Psi^*(q,t)\, (H\Psi)(q,t) \Big]=
-\,\mathrm{div}\,\Big[\hbar \, \mathrm{Im} \, \Psi^*(q,t) \nabla\Psi(q,t) \Big] \end{equation} so, recalling (\ref{Bohm}), one obtains that \begin{equation}\label{continuity2}
\frac{\partial |\Psi|^2}{\partial t} = -\,\mathrm{div}\,(|\Psi|^2 v)\,, \end{equation}
and hence that if $\rho_t=|\Psi_t|^2$ at some time $t$ then
$\rho_t=|\Psi_t|^2$ for \emph{all} times. Equivariance is an expression of the compatibility between the Schr\"odinger evolution for the wave function and the law, such as (\ref{Bohm}), governing the motion of the actual configuration. In \cite{DGZ}, in which we were concerned only with the Bohmian dynamics \eqref{Bohm}, we spoke of the distribution $|\Psi|^2$ as being equivariant. Here we wish to find processes for which we have equivariance, and we shall therefore speak of equivariant processes and motions.
\subsection{Equivariant Markov Processes}
The study of example QFTs like that of \cite{crea1} has lead us to the consideration of Markov processes as candidates for the equivariant motion of the configuration $Q$ for Hamiltonians $H$ more general than those of the form \eqref{Hamil}.
Consider a Markov process $Q_t$ on configuration space. The transition probabilities are characterized by the \emph{backward generator} $L_t$, a (time-dependent) linear operator acting on functions $f$ on configuration space: \begin{equation}\label{backgenerator}
L_t f(q) = \frac{d}{ds} \mathbb{E} (f(Q_{t+s})|Q_t = q) \end{equation} where $d/ds$ means the right derivative at $s=0$ and
$\mathbb{E}(\,\cdot\,|\,\cdot\,)$ denotes the conditional expectation. Equivalently, the transition probabilities are characterized by the \emph{forward generator} $\mathscr{L}_t$ (or, as we shall simply say, \emph{generator}), which is also a linear operator but acts on (signed) measures on the configuration space. Its defining property is that for every process $Q_t$ with the given transition probabilities, the distribution $\rho_t$ of $Q_t$ evolves according to \begin{equation}\label{rhoL}
\frac{\partial \rho_t}{\partial t} = \mathscr{L}_t \rho_t\,. \end{equation} $\mathscr{L}_t$ is the dual of $L_t$ in the sense that \begin{equation}\label{generatorduality}
\int f(q) \, \mathscr{L}_t \rho(dq) = \int L_t f(q) \, \rho(dq)\,. \end{equation} We will use both $L_t$ and $\mathscr{L}_t$, whichever is more convenient. We will encounter several examples of generators in the subsequent sections.
We can easily extend the notion of equivariance from deterministic to Markov processes. Given the Markov transition probabilities, we say that
\emph{the $|\Psi|^2$ distribution is equivariant} if and only if for all times $t$ and $t'$ with $t<t'$, a configuration $Q_t$ with distribution
$|\Psi_t|^2$ evolves, according to the transition probabilities, into a configuration $Q_{t'}$ with distribution $|\Psi_{t'}|^2$. In this case, we also simply say that the transition probabilities are
\emph{equivariant}, without explicitly mentioning $|\Psi|^2$. Equivariance is equivalent to \begin{equation}\label{genequivariance}
\mathscr{L}_t |\Psi_t|^2 = \frac{\partial |\Psi_t|^2}{\partial t} \end{equation} for all $t$. When \eqref{genequivariance} holds (for a fixed $t$) we also say that $\mathscr{L}_t$ is an \emph{equivariant generator} (with respect to $\Psi_t$ and $H$). Note that this definition of equivariance agrees with the previous meaning for deterministic processes.
We call a Markov process $Q$ \emph{equivariant} if and only if for every
$t$ the distribution $\rho_t$ of $Q_t$ equals $|\Psi_t|^2$. For this to be the case, equivariant transition probabilities are necessary but not sufficient. (While for a Markov process $Q$ to have equivariant transition probabilities amounts to the property that if $\rho_t =
|\Psi_t|^2$ for one time $t$, where $\rho_t$ denotes the distribution of
$Q_t$, then $\rho_{t'} = |\Psi_{t'}|^2$ for every $t'>t$, according to our definition of an equivariant Markov process, in fact $\rho_t =
|\Psi_t|^2$ for all $t$.) However, for equivariant transition probabilities there exists a unique equivariant Markov process.
The crucial idea for our construction of an equivariant Markov process is to note that \eqref{continuity1} is completely general, and to find a generator $\mathscr{L}_t$ such that the right hand side of (\ref{continuity1}) can be read as the action of $\mathscr{L}$ on $\rho
= |\Psi|^2$, \begin{equation}\label{mainequ}
\frac{2}{\hbar} \, \mathrm{Im} \, \Psi^* H\Psi = \mathscr{L} |\Psi|^2\,. \end{equation} We shall implement this idea beginning in Section \ref{sec:mini1}, after a review of jump processes and some general considerations. But first we shall illustrate the idea with the familiar case of Bohmian mechanics.
For $H$ of the form \eqref{Hamil}, we have (\ref{JJJ}) and hence that \begin{equation}\label{mequ}
\frac{2}{\hbar} \, \mathrm{Im} \, \Psi^*H\Psi = -\,\mathrm{div}\,\left(\hbar \, \mathrm{Im} \,
\Psi^* \nabla\Psi \right) = -\,\mathrm{div}\,\left( |\Psi|^2 \hbar \, \mathrm{Im} \,
\frac{\Psi^* \nabla\Psi} {|\Psi|^2} \right) \,. \end{equation} Since the generator of the (deterministic) Markov process corresponding to the dynamical system $dQ/dt=v(Q)$ given by a velocity vector field $v$ is \begin{equation}\label{dynamical}
\mathscr{L} \rho = -\,\mathrm{div}\,(\rho v)\,, \end{equation} we may recognize the last term of (\ref{mequ}) as $\mathscr{L}
|\Psi|^2$ with $\mathscr{L}$ the generator of the deterministic process defined by \eqref{Bohm}. Thus, as is well known, Bohmian mechanics arises as the natural equivariant process on configuration space associated with $H$ and $\Psi$.
To be sure, Bohmian mechanics is not the only solution of (\ref{mainequ}) for $H$ given by \eqref{Hamil}. Among the alternatives are Nelson's stochastic mechanics \cite{stochmech} and other velocity formulas \cite{Deotto}. However, Bohmian mechanics is the most natural choice, the one most likely to be relevant to physics. It is, in fact, the canonical choice, in the sense of minimal process which we shall explain in Section \ref{sec:mini}.
\subsection{Equivariant Jump Processes}\label{sec:revjump}
Let $\mathcal{Q}$ denote the configuration space of the process, whatever sort of space that may be (vector space, lattice, manifold, etc.); mathematically speaking, we need that $\mathcal{Q}$ be a measurable space. A (pure) jump process is a Markov process on $\mathcal{Q}$ for which the only motion that occurs is via jumps. Given that $Q_t =q$, the probability for a jump to $q'$, i.e., into the infinitesimal volume
$dq'$ about $q'$, by time $t+dt$ is $\sigma_t(dq'|q)\, dt$, where
$\sigma$ is called the \emph{jump rate}. In this notation, $\sigma$ is a finite measure in the first variable; $\sigma(B|q)$ is the rate (the probability per unit time) of jumping to somewhere in the set
$B\subseteq\mathcal{Q}$, given that the present location is $q$. The overall jump rate is $\sigma(\mathcal{Q}|q)$.
It is often the case that $\mathcal{Q}$ is equipped with a distinguished measure, which we shall denote by $dq$ or $dq'$, slightly abusing notation. For example, if $\mathcal{Q} = \mathbb{R}^d$, $dq$ may be the Lebesgue measure, or if $\mathcal{Q}$ is a Riemannian manifold, $dq$ may be the Riemannian volume element. When $\sigma(\,\cdot\,|q)$ is absolutely continuous relative to the distinguished measure, we also write
$\sigma(q'|q)\, dq'$ instead of $\sigma(dq'|q)$. Similarly, we sometimes use the letter $\rho$ for denoting a measure and sometimes the density of a measure, $\rho(dq) = \rho(q)\,dq$.
A jump first occurs when a random waiting time $T$ has elapsed, after the time $t_0$ at which the process was started or at which the most recent previous jump has occurred. For purposes of simulating or constructing the process, the destination $q'$ can be chosen at the time of jumping, $t_0 + T$, with probability distribution
$\sigma_{t_0+T} (\mathcal{Q}|q)^{-1} \, \sigma_{t_0+T} (\,\cdot\,|q)$. In case the overall jump rate is time-independent, $T$ is exponentially distributed with mean $\sigma(\mathcal{Q}|q)^{-1}$. When the rates are time-dependent---as they will typically be in what follows---the waiting time remains such that \[
\int_{t_0}^{t_0+T} \sigma_t(\mathcal{Q}|q) \, dt \] is exponentially distributed with mean 1, i.e., $T$ becomes exponential after a suitable (time-dependent) rescaling of time. For more details about jump processes, see \cite{Breiman}.
The generator of a pure jump process can be expressed in terms of the rates: \begin{equation}\label{continuity3}
\mathscr{L}_\sigma \rho(dq) = \int\limits_{q'\in\mathcal{Q}} \Big(
\sigma(dq|q') \rho(dq') - \sigma(dq'|q) \rho(dq) \Big)\,, \end{equation} a ``balance'' or ``master'' equation expressing $\partial \rho/\partial t$ as the gain due to jumps to $dq$ minus the loss due to jumps away from $q$.
We shall say that jump rates $\sigma$ are \emph{equivariant} if $\mathscr{L}_\sigma$ is an equivariant generator. It is one of our goals in this paper to describe a general scheme for obtaining equivariant jump rates. In Sections \ref{sec:mini1} and \ref{sec:mini2} we will explain how this leads us to formula \eqref{tranrates}.
\subsection{Process Additivity}\label{sec:introadd}
The Hamiltonian of a QFT usually comes as a sum, such as \begin{equation}\label{Hsum}
H = H_0 + H_{I} \end{equation} with $H_0$ the free Hamiltonian and $H_{I}$ the interaction Hamiltonian. If several particle species are involved, $H_0$ is itself a sum containing one free Hamiltonian for each species. The left hand side of (\ref{mainequ}), which should govern our choice of the generator, is then also a sum, \begin{equation}\label{Hsumgen}
\frac{2}{\hbar} \, \mathrm{Im} \, \Psi^* H_0 \Psi + \frac{2}{\hbar} \, \mathrm{Im}
\, \Psi^* H_{I} \Psi = \mathscr{L} |\Psi|^2\,. \end{equation} This opens the possibility of finding a generator $\mathscr{L}$ by setting $\mathscr{L} = \mathscr{L}_0 + \mathscr{L}_{I}$, provided we have generators $\mathscr{L}_0$ and $\mathscr{L}_{I}$ corresponding to $H_0$ and $H_{I}$ in the sense that \begin{subequations} \begin{align}
\frac{2}{\hbar} \, \mathrm{Im} \, \Psi^* H_0 \Psi
&= \mathscr{L}_0 |\Psi|^2 \\
\frac{2}{\hbar} \, \mathrm{Im} \, \Psi^* H_{I} \Psi
&= \mathscr{L}_{I} |\Psi|^2\,. \end{align} \end{subequations} This feature of (\ref{mainequ}) we call \emph{process additivity}; it is based on the fact that the left hand side of (\ref{mainequ}) is linear in $H$. Note that the backward generator of the process with forward generator $\mathscr{L}_0 + \mathscr{L}_{I}$ is $L_0 + L_{I}$; thus forward and backward generators lead to the same notion of process additivity, and to the same process corresponding to $H_0 + H_{I}$. In many cases, as will be elaborated in Section \ref{sec:free}, $H_0$ is based on an operator known from quantum mechanics (e.g., the Dirac operator), in such a way that $\mathscr{L}_0$ can be obtained from the appropriate Bohmian law of motion. In Section \ref{sec:mini1} we will explain how $\mathscr{L}_{I}$ can usually be taken as the generator of a jump process.
Our proposal is to take seriously the process generated by $\mathscr{L} = \mathscr{L}_0 + \mathscr{L}_{I}$ and regard it as the process naturally associated with $H$. The bottom line is that process additivity provides a \emph{method of constructing} a Bell-type theory.
Obviously, the mathematical observation of process additivity (that sums of generators define an equivariant process associated with sums of Hamiltonians) applies not only to the splitting of $H$ into a free and an interaction contribution, but to every case where $H$ is a sum. And it seems that process additivity provides a physically very reasonable process in every case where $H$ is naturally a sum, in fact the most reasonable process: the one that should be considered \emph{the} Bell-type process, defining \emph{the} Bell-type theory.
\subsection{What Added Processes May Look Like}
To get some feeling for what addition of generators, $\mathscr{L} = \mathscr{L}_1 + \mathscr{L}_2$, means for the corresponding processes, we consider some examples. First consider two deterministic processes (on the same configuration space), having generators of the form $\mathscr{L} \rho = -\,\mathrm{div}\,(\rho v)$. To add the generators obviously means to add the velocity vector fields, $v=v_1 + v_2$, so the resulting velocity is a superposition of two contributions.
Next consider a pure jump process. Since, according to (\ref{continuity3}), the generator $\mathscr{L}$ is linear in $\sigma$, adding generators means adding rates, $\sigma = \sigma_1 + \sigma_2$. This is equivalent to saying there are two kinds of jumps: if the present location is $q\in\mathcal{Q}$, with probability
$\sigma_1(\mathcal{Q}|q)\,dt$ the process performs a jump of the first type within the next $dt$ time units, and with probability
$\sigma_2(\mathcal{Q}|q)\,dt$ a jump of the second type. That does not mean, however, that one can decide from a given realization of the process which jump was of which type.
Next suppose we add the generators of a deterministic and a jump process, \begin{equation}\label{continuity4}
\mathscr{L} \rho(q) = -\,\mathrm{div}\,(\rho v)(q) + \int\limits_{q'\in\mathcal{Q}}
\Big( \sigma(q|q')\, \rho(q') - \sigma(q'|q)\, \rho(q) \Big) dq'\,. \end{equation} This process moves with velocity $v(q)$ until it jumps to $q'$, where it continues moving, with velocity $v(q')$. The jump rate may vary with time in two ways: first because $\sigma$ may be time-dependent, second because $\sigma$ may be position-dependent and $Q_t$ moves with velocity $v$. One can easily understand (\ref{continuity4}) in terms of gain or loss of probability density due to motion and jumps. So this process is piecewise deterministic: although the temporal length of the pieces (the intervals between two subsequent jumps) and the starting points (the jump destinations) are random, given this data the trajectory is determined.
The generator of the Wiener process in $\mathbb{R}^d$ is the Laplacian, and to add to it the generator of a deterministic process means to introduce a drift. Note that this is different from adding, in $\mathbb{R}^d$, a Wiener process to a solution of the deterministic process. In spaces like $\mathbb{R}^d$, where it so happens that one is allowed to add locations, there is a danger of confusing addition of generators with addition of realizations. Whenever we speak of adding processes, it means we add generators.
To add generators of a diffusion and a pure jump process yields what is often called a jump diffusion process, one making jumps with time- and position-dependent rates and following a diffusion path in between. Diffusion processes, however, will play almost no role in this paper.
\subsection{Integral Operators Correspond to Jump Processes} \label{sec:mini1}
We now address the interaction part $H_{I}$ of the Hamiltonian (\ref{Hsum}). In QFTs with cutoffs it is usually the case that $H_{I}$ is an integral operator. For that reason, we shall in this work focus on integral operators for $H_{I}$. We now point out why the naturally associated process is a pure jump process. For short, we will write $H$ rather than $H_{I}$ in this and the subsequent section. For the time being, think of $\mathcal{Q}$ as $\mathbb{R}^d$ and of wave functions as complex valued.
What characterizes jump processes versus continuous processes is that some amount of probability that vanishes at $q\in\mathcal{Q}$ can reappear in an entirely different region of configuration space, say at $q'\in\mathcal{Q}$. This is manifest in the equation for $\partial \rho/\partial t$, (\ref{continuity3}): the first term in the integrand is the probability increase due to arriving jumps, the second the decrease due to departing jumps, and the integration over $q'$ reflects that $q'$ can be anywhere in $\mathcal{Q}$. This suggests that Hamiltonians for which the expression \eqref{continuity1} for
$\partial |\Psi|^2/\partial t$ is naturally an integral over $dq'$ correspond to pure jump processes. So when is the left hand side of
(\ref{mainequ}) an integral over $dq'$? When $H$ is an integral operator, i.e., when $\sp{q}{H|q'}$ is not merely a formal symbol, but represents an integral kernel that exists as a function or a measure and satisfies \begin{equation}
(H\Psi)(q) = \int dq'\,\sp{q}{H|q'}\, \Psi(q')\,. \end{equation}
In this case, we should choose the jump rates in such a way that, when $\rho = |\Psi|^2$, \begin{equation}\label{la1}
\sigma(q|q') \,\rho(q') - \sigma(q'|q) \,\rho(q) = \frac{2}{\hbar}
\, \mathrm{Im} \, \Psi^*(q)\, \sp{q}{H|q'} \, \Psi(q') \,, \end{equation} and this suggests, since jump rates must be nonnegative (and the right hand side of \eqref{la1} is anti-symmetric), that \[
\sigma(q|q') \,\rho(q') = \Big[ \frac{2}{\hbar} \, \mathrm{Im} \,
\Psi^*(q)\, \sp{q}{H|q'} \, \Psi(q') \Big]^+ \] (where $x^+$ denotes the positive part of $x\in\mathbb{R}$, that is, $x^+$ is equal to $x $ for $x>0$ and is zero otherwise), or \begin{equation}\label{mini1}
\sigma(q|q') = \frac{ \big[ (2/\hbar) \, \mathrm{Im} \, \Psi^*(q) \, \sp{q}
{H|q'} \, \Psi(q') \big]^+}{\Psi^*(q')\, \Psi(q')} . \end{equation} These rates are an instance of what we call the \emph{minimal jump rates} associated with $H$ (and $\Psi$). The name comes from the fact that they are actually the minimal possible values given (\ref{la1}), as is expressed by the inequality \eqref{minimality} and will be explained in detail in Section \ref{sec:mini4}. Minimality entails that at any time $t$, one of the transitions $q_1 \to q_2$ or $q_2 \to q_1$ is forbidden. We will call the process defined by the minimal jump rates the \emph{minimal jump process} (associated with $H$).
In contrast to jump processes, continuous motion, as in Bohmian mechanics, corresponds to such Hamiltonians that the formal matrix elements $\sp{q}{H|q'}$ are nonzero only infinitesimally close to the diagonal, and in particular to differential operators like the Schr\"odinger Hamiltonian (\ref{Hamil}), which has matrix elements of the type $\delta''(q-q') + V(q) \,\delta(q-q')$. We can summarize the situation, as a rule of thumb, by the following table:
\begin{center}
\begin{tabular}{|r|l|}
\hline
A contribution to $H$ that is a \ldots & corresponds to \ldots\\\hline
integral operator & jumps\\
differential operator & deterministic continuous motion\\
multiplication operator & no motion ($\mathscr{L} = 0$)\\\hline \end{tabular} \end{center}
The minimal jump rates as given by (\ref{mini1}) have some nice features. The possible jumps for this process correspond to the nonvanishing matrix elements of $H$ (though, depending on the state $\Psi$, even some of the jump rates corresponding to nonvanishing matrix elements of $H$ might happen to vanish). Moreover, in their dependence on the state $\Psi$, the jump rates $\sigma$ depend only ``locally'' upon $\Psi$: the jump rate for a given jump $q'\to q$ depends only on the values $\Psi(q')$ and $\Psi(q)$ corresponding to the configurations linked by that jump. Discretizing $\mathbb{R}^3$ to a lattice $\varepsilon \mathbb{Z}^3$, one can obtain Bohmian mechanics as a limit $\varepsilon\to 0$ of minimal jump processes \cite{Sudbery,Vink}, whereas greater-than-minimal jump rates lead to Nelson's stochastic mechanics \cite{stochmech} and similar diffusions, such as (\ref{diffusion}); see \cite{Vink,Guerra}. If the Schr\"odinger operator \eqref{Hamil} is approximated in other ways by operators corresponding to jump processes, e.g., by $H_\varepsilon = e^{-\varepsilon H} H e^{-\varepsilon H}$, the minimal jump processes presumably also converge to Bohmian mechanics.
We have reason to believe that there are lots of self-adjoint operators which do not correspond to any stochastic process that can be regarded as defined, in any reasonable sense, by \eqref{mini1}.\footnote{Consider, for example, $H = p \cos p$ where $p$ is the one-dimensional momentum operator $-i\hbar
\partial/\partial q$. Its formal kernel $\sp{q}{H|q'}$ is the distribution $-\frac{i}{2} \delta'(q-q'-1) - \frac{i}{2} \delta'(q-q'+1)$, for which \eqref{mini1} would not have a meaning. {}From a sequence of smooth functions converging to this distribution, one can obtain a sequence of jump processes with rates \eqref{mini1}: the jumps occur very frequently, and are by amounts of approximately $\pm 1$. A limiting process, however, does not exist.} But such operators seem never to occur in QFT. (The Klein--Gordon operator $\sqrt{m^2 c^4 - \hbar^2 c^2 \Delta}$ does seem to have a process, but it requires a more detailed discussion which will be provided in a forthcoming work \cite{klein2}.)
\subsection{Minimal Jump Rates} \label{sec:mini2}
The reasoning of the previous section applies to a far more general setting than just considered: to arbitrary configuration spaces $\mathcal{Q}$ and ``generalized observables''---POVMs---defining, for our purposes, what the ``position representation'' is. We now present this more general reasoning, which leads to one of the main formulas of this paper, (\ref{tranrates}).
The process we construct relies on the following ingredients from QFT: \begin{enumerate} \item A Hilbert space $\mathscr{H}$ with scalar product $\sp{\Psi}
{\Phi}$.
\item A unitary one-parameter group $U_t$ in $\mathscr{H}$ with
Hamiltonian $H$,
\[
U_t = e^{-\frac{i}{\hbar}tH}\,,
\]
so that in the Schr\"odinger picture the state $\Psi$ evolves
according to
\begin{equation}
i\hbar\frac{d\Psi_t}{dt} = H\Psi_t\,.
\end{equation}
$U_t$ could be part of a representation of the Poincar\'e group.
\item A positive-operator-valued measure (POVM) ${P}(dq)$ on $\mathcal{Q}$
acting on $\mathscr{H}$, so that the probability that the system in the
state $\Psi$ is localized in $dq$ at time $t$ is
\begin{equation} \label{mis}
\mathbb{P}_t(dq)= \sp{\Psi_t}{{P}(dq)| \Psi_t} \,.
\end{equation} \end{enumerate}
Mathematically, a POVM ${P}$ on $\mathcal{Q}$ is a countably additive set function (``measure''), defined on measurable subsets of $\mathcal{Q}$, with values in the positive (bounded self-adjoint) operators on (a Hilbert space) $\mathscr{H}$, such that ${P}(\mathcal{Q})$ is the identity operator.\footnote{The countable additivity is to be understood as in the sense of the weak operator topology. This in fact implies that countable additivity also holds in the strong topology.} Physically, for our purposes, ${P}(\,\cdot\,)$ represents the (generalized) position observable, with values in $\mathcal{Q}$. The notion of POVM generalizes the more familiar situation of observables given by a set of commuting self-adjoint operators, corresponding, by means of the spectral theorem, to a projection-valued measure (PVM): the case where the positive operators are projection operators. A typical example is the single Dirac particle: the position operators on $L^2(\mathbb{R}^3,\mathbb{C}^4)$ induce there a natural PVM ${P}_0(\,\cdot\,)$:
for any Borel set $B\subseteq \mathbb{R}^3$, ${P}_0(B)$ is the projection to the subspace of functions that vanish outside $B$, or, equivalently, ${P}_0(B)\Psi(q) = \mathbf{1}_B(q) \, \Psi(q)$ with $\mathbf{1}_B$ the indicator function of the set $B$. Thus, $\sp{\Psi} {{P}_0 (dq)|
\Psi} = |\Psi(q)|^2 dq$. When one considers as Hilbert space $\mathscr{H}$ only the subspace of positive energy states, however, the localization probability is given by ${P}(\,\cdot\,) = P_+ {P}_0(\,\cdot\,) I$ with $P_+:L^2(\mathbb{R}^3,\mathbb{C}^4) \to \mathscr{H}$ the projection and $I:\mathscr{H} \to L^2(\mathbb{R}^3,\mathbb{C}^4)$ the inclusion mapping. Since $P_+$ does not commute with most of the operators ${P}_0(B)$, ${P} (\,\cdot\,)$ is no longer a PVM but a genuine POVM\footnote{This situation is indeed more general than it may seem. By a theorem of Naimark \cite[p.~142]{Davies}, every POVM ${P} (\,\cdot\,)$ acting on $\mathscr{H}$ is of the form ${P}(\,\cdot\,) = P_+ {P}_0 (\,\cdot\,) P_+$ where ${P}_0$ is a PVM on a larger Hilbert space, and $P_+$ the projection to
$\mathscr{H}$. \label{ft:Naimark}} and consequently does not correspond to any position operator---although it remains true (for $\Psi$ in the positive energy subspace) that $\sp{\Psi}{{P}(dq)| \Psi} =
|\Psi(q)|^2 dq$. That is why in QFT, the position observable is indeed more often a POVM than a PVM. POVMs are also relevant to photons \cite{ali,kraus}. In one approach, the photon wave function $\Psi: \mathbb{R}^3 \to \mathbb{C}^3$ is subject to the constraint condition $\nabla \cdot \Psi = \partial_1 \Psi_1 + \partial_2 \Psi_2 + \partial_3 \Psi_3 =0$. Thus, the physical Hilbert space $\mathscr{H}$ is the (closure of the) subspace of $L^2(\mathbb{R}^3,\mathbb{C}^3)$ defined by this constraint, and the natural PVM on $L^2(\mathbb{R}^3,\mathbb{C}^3)$ gives rise, by projection, to a POVM on $\mathscr{H}$. So much for POVMs. Let us get back to the construction of a jump process.
The goal is to specify equivariant jump rates $\sigma = \sigma^{\Psi, H, {P}}$, i.e., such rates that \begin{equation}\label{equirates}
\mathscr{L}_\sigma \mathbb{P} = \frac{d\mathbb{P}}{dt} \,. \end{equation} To this end, one may take the following steps:
\begin{enumerate} \item Note that
\begin{equation}\label{dPdt}
\frac{d\mathbb{P}_t(dq)}{dt} = \frac{2}{\hbar} \, \mathrm{Im} \,
\sp{\Psi_t}{{P}(dq) H| \Psi_t}\,.
\end{equation} \item Insert the resolution of the identity $I = \int\limits_{q'\in\mathcal{Q}}
{P}(dq')$ and obtain
\begin{equation}\label{dPdtJ}
\frac{d\mathbb{P}_t(dq)}{dt} =\int\limits_{q'\in\mathcal{Q}}
\mathbb{J}_t(dq,dq') \,,
\end{equation}
where
\begin{equation}\label{Jdef}
\mathbb{J}_t(dq,dq') = \frac{2}{\hbar} \,
\mathrm{Im} \, \sp{\Psi_t}{{P}(dq)H {P}(dq')| \Psi_t} \,.
\end{equation} \item Observe that $\mathbb{J}$ is anti-symmetric, $\mathbb{J}(dq',dq) = -
\mathbb{J}(dq,dq')$. Thus, since $x = x^+ - (-x)^+$,
\[
\mathbb{J}(dq,dq') = \left[(2/\hbar) \, \mathrm{Im} \, \sp{\Psi} {{P}(dq) H
{P}(dq') |\Psi}\right]^+ - \left[(2/\hbar)\, \mathrm{Im} \, \sp{\Psi}
{{P}(dq') H {P}(dq) |\Psi}\right]^+ .
\] \item Multiply and divide both terms by $\mathbb{P}(\,\cdot\,)$,
obtaining that
\begin{eqnarray*}
\int\limits_{q'\in\mathcal{Q}} \mathbb{J}(dq,dq') = \int\limits_{q'\in\mathcal{Q}}
\bigg( \hspace{-3ex} &&
\frac{[(2/\hbar) \, \mathrm{Im} \, \sp{\Psi} {{P}(dq) H {P}(dq')| \Psi}]^+}
{\sp{\Psi}{{P}(dq')| \Psi}} \mathbb{P}(dq') -
\\-&&
\frac{[(2/\hbar) \, \mathrm{Im} \, \sp{\Psi} {{P}(dq') H {P}(dq)| \Psi}
]^+} {\sp{\Psi} {{P}(dq)| \Psi}} \mathbb{P}(dq) \bigg) \,. \end{eqnarray*} \item By comparison with \eqref{continuity3}, recognize the right hand
side of the above equation as $\mathscr{L}_\sigma \mathbb{P}$, with
$\mathscr{L}_\sigma$ the generator of a Markov jump process with jump
rates
\begin{equation} \label{tranrates}
\sigma(dq|q')= \frac{[(2/\hbar) \, \mathrm{Im} \, \sp{\Psi} {{P}(dq) H
{P}(dq')| \Psi}]^+}{\sp{\Psi}{{P}(dq')| \Psi}}\,, \end{equation}
which we call the \emph{minimal jump rates}. \end{enumerate} Mathematically, the right hand side of this formula as a function of $q'$ must be understood as a density (Radon--Nikod{\'y}m derivative) of one measure relative to another.\footnote{Quite aside from the previous discussion, it is perhaps worth noting that there are not so many expressions in $H,{P}$, and $\Psi$ that would meet the formal criteria for being a candidate for the jump rate. Since the only connection between abstract Hilbert space and configuration space is by ${P}$, which leads to \emph{measures} on $\mathcal{Q}$, the only way to obtain a \emph{function} on $\mathcal{Q}$ is to form a Radon--Nikod{\'y}m quotient of two measures, $\sigma(q') = A(dq')/B(dq')$. Since $\sigma$
must be a measure-valued function, the numerator should be a bi-measure (a measure in each of two variables). The simplest measure one can form from $H,{P}$, and $\Psi$ is $\sp{\Psi}{{P}(dq)|\Psi}$; the simplest bi-measures are $\sp{\Psi}{H^{n_1} {P}(dq) H^{n_2}
{P}(dq') H^{n_3}| \Psi}$. Jump rates must have dimension 1/time, and the only object at hand having this dimension is $H/\hbar$. Thus, $H$ can appear only once in the numerator. The expressions $\sp{\Psi}{H
{P}(dq) {P}(dq')| \Psi}$ and $\sp{\Psi}{{P}(dq) {P}(dq') H| \Psi}$ are no good because for PVMs ${P}$ they are concentrated on the diagonal of $\mathcal{Q} \times \mathcal{Q}$ and hence do not lead to nontrivial jumps. Let us write $\mu$ for the measure-valued function we have arrived at: \[
\mu (dq,q') = \frac{1}{\hbar} \frac{\sp{\Psi}{{P}(dq) H {P}(dq')
| \Psi}} {\sp{\Psi}{{P}(dq')|\Psi}}\,. \]
This provides \emph{complex} measures, whereas $\sigma(\,\cdot\,|q')$ must be a positive real measure. There are not many ways of forming a positive real measure from a complex one, the essential ones being \[
|\mu|, |\mathrm{Re} \, \mu|, |\mathrm{Im} \, \mu|, (\mathrm{Re} \, \mu)^+, (\mathrm{Re} \, \mu)^-,
(\mathrm{Im} \, \mu)^+, (\mathrm{Im} \, \mu)^- \] times a numerical constant $\lambda>0$. One could of course form additional expressions at the price of higher complexity.
This has gotten us already pretty close to the minimal rates \eqref{tranrates}, which correspond to $\sigma = 2(\mathrm{Im} \, \mu)^+$. To proceed further, we might demand the absence of unnecessary jumps; that means that at any time, either the jump $q_1 \to q_2$ or $q_2 \to q_1$ is forbidden; this leaves only $\lambda (\mathrm{Im} \, \mu)^\pm$. Moreover, $2 (\mathrm{Im} \, \mu)^+$ is the only expression in the list that has Bohmian mechanics as a limiting case or implies equivariance. Furthermore it corresponds to the natural guess \eqref{Ltilde} for a backward generator, discussed in Section \ref{sec:mini}.} The plus symbol denotes the positive part of a signed measure; it can also be understood as applying the plus function, $x^+ = \max (x,0)$, to the density, if it exists, of the numerator.
To sum up, we have argued that with $H$ and $\Psi$ is naturally associated a Markov jump process $Q_t$ whose marginal distributions coincide at all times by construction with the quantum probability measure, $\rho_t(\,\cdot\,) = \mathbb{P}_t(\,\cdot\,)$, so that $Q_t$ is an equivariant Markov process.
In Section~4 of \cite{crea2A}, we establish precise conditions on $H,{P}$, and $\Psi$ under which the jump rates \eqref{tranrates} are well-defined and finite $\mathbb{P}$-almost everywhere, and prove that in this case the rates are equivariant, as suggested by the steps 1-5 above. It is perhaps worth remarking at this point that any $H$ can be approximated by Hamiltonians $H_n$ (namely Hilbert--Schmidt operators) for which the rates \eqref{tranrates} are always (for all $\Psi$) well-defined and equivariant \cite{crea2A}. Concerning this, see also the end of Section \ref{sec:mini}.
\subsection{Process Associated with the Free Hamiltonian} \label{sec:free}
We now address the free Hamiltonian $H_0$ of a QFT. We describe the process naturally associated with $H_0$, when this is the second quantized Schr\"odinger or Dirac operator. We will treat more general free Hamiltonians in the next section. We shall consider here only Hamiltonians for one type of particle.
We first define the configuration space $\mathcal{Q}$. Let us write $\mathcal{Q}^{(1)}$ (``one-particle configuration space'') for physical space; this is typically, but not necessarily, $\mathbb{R}^3$. The space $\mathcal{Q}$ in which the ``free process'' takes place is the configuration space for a variable number of identical particles; we call it $\Gamma \mathcal{Q}^{(1)}$. It can be defined as the space of all finite subsets-with-multiplicities of $\mathcal{Q}^{(1)}$. A set-with-multiplicities consists of a set and, for each element $x$ of the set, a positive integer, called the multiplicity of $x$. The number of particles in a configuration $q$ is the sum of its multiplicities, $\#q$. Such configurations describe several identical particles, some of which may be located at the same position in space. Equivalently, one could say that $\Gamma \mathcal{Q}^{(1)}$ is the set of all mappings $n:\mathcal{Q}^{(1)} \to \mathbb{N}\cup\{0\}$ (meaning the number of particles at a given location) such that \[
\sum_{{\boldsymbol q} \in \mathcal{Q}^{(1)}} n({\boldsymbol q} ) < \infty\,. \] Another equivalent definition is the set of all finite nonnegative measures $n(\,\cdot\,)$ on $\mathcal{Q}^{(1)}$ that assume only integer values; the meaning of $n(R)$ is the number of particles in the region $R$ of physical space. Finally, one can define \[
\Gamma \mathcal{Q}^{(1)} = \bigcup_{n=0}^\infty \mathcal{Q}^{(n)} \mbox{ where }
\mathcal{Q}^{(n)} = (\mathcal{Q}^{(1)})^n/\mbox{permutations}. \]
A related space, for which we write $\Gamma_{\!\neq} \mathcal{Q}^{(1)}$, is the space of all finite subsets of $\mathcal{Q}^{(1)}$; it is contained in $\Gamma \mathcal{Q}^{(1)}$, after obvious identifications. In fact, $\Gamma_{\!\neq} \mathcal{Q}^{(1)} = \Gamma \mathcal{Q}^{(1)} \setminus \Delta$, where $\Delta$ is the set of coincidence configurations, i.e., those having two or more particles at the same position. $\Gamma_{\!\neq} \mathcal{Q}^{(1)}$ is the union of the spaces ${\mathcal{Q}}^{(n)}_{\neq}$ for $n=0,1,2, \ldots$, where ${\mathcal{Q}}^{(n)}_{\neq}$ is the space of subsets of $\mathcal{Q}^{(1)}$ with $n$ elements.
For $\mathcal{Q}^{(1)} = \mathbb{R}^d$, the $n$-particle sector ${\mathcal{Q}}^{(n)}_{\neq}$ is a manifold of dimension $nd$ (see \cite{identical} for a discussion of Bohmian mechanics on this manifold). If $d\geq 2$, the set $\Delta$ of coincidence configurations has codimension $\geq 2$ and thus can usually be ignored. We can then replace $\Gamma \mathbb{R}^d$ by the somewhat simpler space $\Gamma_{\!\neq} \mathbb{R}^d$.
The position POVM ${P}^{(1)}$ on $\mathcal{Q}^{(1)}$ (acting on the one-particle Hilbert space) naturally leads to a POVM we call $\Gamma {P}^{(1)}$ on $\mathcal{Q} = \Gamma \mathcal{Q}^{(1)}$, acting on Fock space (see Section \ref{sec:GammaPOVM} for the definition).\footnote{The coincidence configurations form a null set, $\Gamma {P}^{(1)}(\Delta) =0$, when $\mathcal{Q}^{(1)}$ is a continuum, or, more precisely, when ${P}^{(1)}$ is nonatomic as a measure.} Since a configuration from $\Gamma(\mathbb{R}^3)$ defines the number of particles and their positions, the name ``position observable'' for ${P} = \Gamma {P}^{(1)}$ stretches the meaning of ``position'' somewhat: it now also encompasses the number of particles.
We now give a description of the free process associated with the second-quantized Schr\"odinger operator; it arises from Bohmian mechanics. Fock space $\mathscr{H} = \mathscr{F}$ is a direct sum \begin{equation}\label{fockspace}
\mathscr{F}= \bigoplus_{n=0}^{\infty} \mathscr{F}^{(n)} , \end{equation} where $\mathscr{F}^{(n)}$ is the $n$-particle Hilbert space. $\mathscr{F}^{(n)}$ is the subspace of symmetric (for bosons) or anti-symmetric (for fermions) functions in $L^2 (\mathbb{R}^{3n}, (\mathbb{C}^{2s+1})^{\otimes n})$ for spin-$s$ particles. Thus, $\Psi \in \mathscr{F}$ can be decomposed into a sequence $\Psi = \left( \Psi^{(0)}, \Psi^{(1)}, \ldots, \Psi^{(n)}, \ldots \right)$, the $n$-th member $\Psi^{(n)}$ being an $n$-particle wave function, the wave function representing the $n$-particle sector of the quantum state vector. The obvious way to obtain a process on $\mathcal{Q} = \Gamma \mathbb{R}^3$ is to let the configuration $Q(t)$, containing $N = \#Q(t)$ particles, move according to the $N$-particle version of Bohm's law (\ref{Bohm}), guided by $\Psi^{(N)}$.\footnote{As defined, configurations are unordered, whereas we have written Bohm's law \eqref{Bohm} for ordered configurations. Thanks to the (anti\nobreakdash-)symmetry of the wave function, however, all orderings will lead to the same particle motion. For more about such considerations, see our forthcoming work \cite{identical}.} This is indeed an equivariant process since $H_0$ has a block diagonal form with respect to the decomposition (\ref{fockspace}), \[
H_0 = \bigoplus_{n=0}^\infty H_0^{(n)}\,, \] and $H_0^{(n)}$ is just a Schr\"odinger operator for $n$ noninteracting particles, for which, as we already know, Bohmian mechanics is equivariant. We used a very similar process in \cite{crea1} (the only difference being that particles were numbered in \cite{crea1}).
Similarly, if $H_0$ is the second quantized Dirac operator, we let a configuration $Q$ with $N$ particles move according to the usual $N$-particle Bohm--Dirac law \cite[p.~274]{BH} \begin{equation}\label{BohmDirac}
\frac{dQ}{dt} = c\frac{\Psi^*(Q) \, \alpha_{N} \, \Psi(Q)}
{\Psi^*(Q) \, \Psi(Q)} \end{equation} where $c$ denotes the speed of light and $\alpha_{N} = ({\boldsymbol \alpha}^{(1)}, \ldots, {\boldsymbol \alpha}^{(N)})$ with ${\boldsymbol \alpha}^{(k)}$ acting on the spin index of the $k$-th particle.
\subsection{Other Approaches to the Free Process} \label{sec:free2}
We will give below a general velocity formula, applicable to a wider class of free Hamiltonians. Alternatively, we can provide a free process for any $H_0$ if we are given an equivariant process for the one-particle Hamiltonian $H^{(1)}$. This is based on the particular mathematical structure of $H_0$, which can be expressed by saying it arises from a one-particle Hamiltonian $H^{(1)}$ by applying a ``second quantization functor $\Gamma$'' \cite{RS}. That is, there is an algorithm (in a bosonic or fermionic version) for forming, from a one-particle Hilbert space $\mathscr{H}^{(1)}$ and a one-particle Hamiltonian $H^{(1)}$, a Fock space $\mathscr{F} = \Gamma\mathscr{H}^{(1)}$ and free Hamiltonian $H_0 = \Gamma H^{(1)}$. And parallel to this ``second quantization'' algorithm, there is an algorithm for the canonical construction, from a given equivariant one-particle Markov process $Q^{(1)}_t$, of a process we call $\Gamma Q^{(1)}_t$ that takes place in $\mathcal{Q} = \Gamma \mathcal{Q}^{(1)}$ and is equivariant with respect to $H_0$. This algorithm may be called the ``second quantization'' of a Markov process.
The algorithm is described in Section \ref{sec:Gamma}. What the algorithm does is essentially to construct an $n$-particle version of $Q^{(1)}_t$ for every $n$, and finally combine these by means of a random particle number $N = N(t) = \# Q(t)$ which is constant under the free process, parallel to the fact that the particle number operator is conserved by $H_0$. We note further that the process $\Gamma Q^{(1)}_t$ is deterministic if $Q^{(1)}_t$ is. If we take the one-particle process to be Bohmian mechanics or the Bohm--Dirac motion, the algorithm reproduces the processes described in the previous section.
The algorithm leaves us with the task of finding a suitable one-particle law, which we do not address in this paper. For some Hamiltonians, such as the Dirac operator, this is immediate, for others it is rather nontrivial, or even unsolved. The Klein--Gordon operator $\sqrt{m^2c^4 - \hbar^2c^2\Delta}$ will be discussed in forthcoming work \cite{klein2}, and for a study of photons see \cite{photon}.
When $H_0$ is made of differential operators of up to second order (which includes of course the Schr\"odinger and Dirac operators), there is another way to characterize the process associated with $H_0$, a way which allows a particularly succinct description of the process and a particularly direct derivation and construction. In fact, we give a formula for its backward generator $L_0$, or alternatively the velocity (or the forward generator $\mathscr{L}_0$), in terms of $H_0,{P}$, and $\Psi$.
We begin by defining, for any $H,{P}$, and $\Psi$, an operator $L$ acting on functions $f:\mathcal{Q} \to \mathbb{R}$, which may or may not be the backward generator of a process, by \begin{equation}\label{LH}
Lf(q) = \mathrm{Re} \frac{\sp{\Psi} {{P}(dq) \hat{L} \hat{f} |\Psi}}
{\sp{\Psi} {{P}(dq)|\Psi}} = \mathrm{Re} \frac{\sp{\Psi} {{P}(dq)
\frac{i}{\hbar} [H,\hat{f}] |\Psi}} {\sp{\Psi} {{P}(dq) |\Psi}}. \end{equation} where $[\;,\,]$ means the commutator, \begin{equation}\label{hatf}
\hat{f} = \int\limits_{q \in \mathcal{Q}} f(q) \, {P}(dq)\,, \end{equation} and $\hat{L}$ is the ``generator'' of the (Heisenberg) time evolution of the operator $\hat{f}$, \begin{equation}\label{hatLdef}
\hat{L}\hat{f} = \frac{d}{d\tau} e^{i H \tau/\hbar} \, \hat{f} \,
e^{-i H \tau/\hbar} \Big|_{\tau =0} = \tfrac{i}{\hbar}
[H,\hat{f}] \,. \end{equation} (If ${P}$ is a PVM, then $\hat{f} = f(\hat{q})$, where $\hat{q}$ is the configuration operator.) \eqref{LH} could be guessed in the following way: since $Lf$ is in a certain sense, see \eqref{backgenerator}, the time derivative of $f$, it might be expected to be related to $\hat{L} \hat{f}$, which is in a certain sense, see \eqref{hatLdef}, the time derivative of $\hat{f}$. As a way of turning the operator $\hat{L} \hat{f}$ into a function $Lf(q)$, the middle term in \eqref{LH} is an obvious possibility. Note that this way of arriving at \eqref{LH} does not make use of equivariance; for another way that does, see Section \ref{sec:freeflow}.
The formula for the forward generator equivalent to \eqref{LH} reads \begin{equation}\label{genH}
\mathscr{L} \rho(dq) = \mathrm{Re} \, \sp{\Psi}{\widehat{\tfrac{d\rho}
{d\mathbb{P}}}\, \tfrac{i}{\hbar} [H, {P} (dq)] |\Psi}, \end{equation} as follows from \eqref{generatorduality}.
Whenever $L$ is indeed a backward generator, we call it the \emph{minimal free (backward) generator} associated with $\Psi, H$, and ${P}$. (The name is based on the concept of minimal process as explained in Section \ref{sec:mini}.) Then the corresponding process is equivariant (see Section \ref{sec:freeflow}). This is the case if (and, there is reason to expect, \emph{only if}) ${P}$ is a PVM and $H$ is a differential operator of up to second order in the position representation, in which ${P}$ is diagonal. In that case, the process is deterministic, and the backward generator has the form $L = v \cdot \nabla$ where $v$ is the velocity vector field; thus, \eqref{LH} directly specifies the velocity, in the form of a first-order differential operator $v \cdot \nabla$. In case $H$ is the $N$-particle Schr\"odinger operator with or without spin, \eqref{LH} yields the Bohmian velocity \eqref{Bohm}, and if $H$ is the Dirac operator, the Bohm--Dirac velocity \eqref{BohmDirac}. To sum up, in some cases definition \eqref{LH} leads to just the right backward generator.
To return to our starting point: if the one-particle generator $\mathscr{L}^{(1)}$ arises from the one-particle Hamiltonian $H^{(1)}$ by \eqref{genH}, then \eqref{genH} also holds between the free generator $\mathscr{L}_0 = \Gamma \mathscr{L}^{(1)}$ and the free Hamiltonian $H_0 = \Gamma H^{(1)}$. (See Section \ref{sec:freeflow} for details.) In other words, \eqref{LH} is compatible with the ``second quantization'' algorithm. Thus, in relevant cases \eqref{LH} allows a direct definition of the free process in terms of $H_0$, just as \eqref{tranrates} directly defines, in terms of $H_{I}$, the jump rates.
A relevant point is that the ``second quantization'' of a differential operator is again a differential operator, in a suitable sense, and has the same order. Note also that \eqref{LH}, when applied to the second quantized Schr\"odinger or Dirac Hamiltonian, defines the same vector field on $\Gamma(\mathbb{R}^3)$ as described in the previous section.
\subsection{Bell-Type QFT}
We briefly summarize what we have obtained. A Bell-type QFT is about particles moving in physical 3-space; their number and positions are represented by a point $Q_t$ in configuration space $\mathcal{Q}$. Provided physical space is $\mathbb{R}^3$, $\mathcal{Q}$ is usually $\Gamma \mathbb{R}^3$ or a Cartesian product of several such spaces, each factor representing a different particle species. $Q_t$ follows a Markov process in $\mathcal{Q}$, which is governed by a state vector $\Psi$ in a suitable Hilbert space $\mathscr{H}$. $\mathscr{H}$ is related to $\mathcal{Q}$ by means of a PVM or POVM ${P}$. $\Psi$ undergoes a unitary evolution with Hamiltonian $H$. The process $Q_t$ usually consists of deterministic continuous trajectories interrupted by stochastic jumps; more generally, it arises by process additivity (i.e., by adding generators) from a free process associated with $H_0$ and a jump process associated with $H_{I}$. The jump rates are given by
\eqref{tranrates} for $H= H_{I}$. The free process arises from Bohmian mechanics, or a suitable analogue, by a construction that can be formalized as the ``second quantization'' of a one-particle Markov process; when appropriate, it is defined directly by \eqref{LH}. The process $Q_t$ is equivariant, i.e., $\sp{\Psi_t} {{P}(dq) |\Psi_t}$ distributed.
Examples of Bell-type QFTs can be found in \cite{BellBeables,crea1} and in Section~\ref{sec:example}. It is our contention that, essentially, there is a unique Bell-type version of every regularized QFT. We have to postpone, however, the discussion of operators of the Klein--Gordon type. We also have to assume that the QFT provides us with the POVM ${P}(\,\cdot\,)$; this is related to an ongoing discussion in the literature \cite{NewtonWigner,kraus,Haag} concerning the right position operator.
\subsection{More on Identical Particles}\label{sec:identical}
The $n$-particle sector of the configuration space (without coincidence configurations) of identical particles $\Gamma_{\!\neq}(\mathbb{R}^3)$ is the manifold of $n$-point subsets of $\mathbb{R}^3$; let $\mathcal{Q}$ be this manifold. The most common way of describing the quantum state of $n$ fermions is by an anti-symmetric (square-integrable) wave function $\Psi$ on $\hat\mathcal{Q} := \mathbb{R}^{3n}$; let $\mathscr{H}$ be the space of such functions. Whereas for bosons $\Psi$ could be viewed as a function on $\mathcal{Q}$, for fermions $\Psi$ is not a function on $\mathcal{Q}$.
Nonetheless, the configuration observable still corresponds to a PVM ${P}$ on $\mathcal{Q}$: for $B \subseteq \mathcal{Q}$, we set ${P}(B) \Psi({\boldsymbol q}_1, \ldots, {\boldsymbol q}_n) = \Psi({\boldsymbol q}_1, \ldots, {\boldsymbol q}_n)$ if $\{{\boldsymbol q}_1, \ldots, {\boldsymbol q}_n\} \in B$ and zero otherwise. In other words, ${P}(B)$ is multiplication by the indicator function of $\pi^{-1}(B)$ where $\pi$ is the obvious projection mapping $\hat\mathcal{Q} \setminus \Delta \to \mathcal{Q}$, with $\Delta$ the set of coincidence configurations.
To obtain other useful expressions for this PVM, we introduce the formal kets $|\hat{q} \rangle$ for $\hat{q} \in \hat\mathcal{Q}$ (to be treated like elements of $L^2(\hat\mathcal{Q})$), the anti-symmetrization operator $S$ (i.e., the projection $L^2(\hat\mathcal{Q}) \to \mathscr{H}$), the normalized anti-symmetrizer\footnote{The name means this: since $S$ is a projection, $S \Psi$ is usually not a unit vector when $\Psi$ is. Whenever $\Psi \in L^2(\hat\mathcal{Q})$ is supported by a fundamental domain of the permutation group, i.e., by a set $\Omega \subseteq
\hat\mathcal{Q}$ on which (the restriction of) $\pi$ is a bijection to $\mathcal{Q}$, the norm of $S\Psi$ is $1/\sqrt{n!}$, so that $s\Psi$ is again a unit vector.} $s= \sqrt{n!} \, S$, and the formal kets $|s
\hat{q}\rangle := s|\hat{q} \rangle$ (to be treated like elements of
$\mathscr{H}$). The $|\hat{q} \rangle$ and $|s\hat{q} \rangle$ are normalized in the sense that \[
\sp{\hat{q}} {\hat{q}'} = \delta(\hat{q} - \hat{q}') \text{ and }
\sp{s\hat{q}} {s\hat{q}'} = (-1)^{\varrho(\hat{q},\hat{q}')} \,
\delta(q-q'), \] where $q=\pi(\hat{q})$, $q'=\pi(\hat{q}')$, $\varrho(\hat{q},\hat{q}')$ is the permutation that carries $\hat{q}$ into $\hat{q}'$ given that $q=q'$, and $(-1)^\varrho$ is the sign of the permutation $\varrho$. Now we can write \begin{equation}\label{idenpovm}
{P}(dq) = \sum_{\hat{q} \in \pi^{-1}(q)} |\hat{q} \rangle
\langle \hat{q}| \, dq = n! \, S |\hat{q} \rangle \langle
\hat{q}| \, dq = |s\hat{q} \rangle \langle s\hat{q}| \, dq, \end{equation} where the sum is over the $n!$ ways of numbering the $n$ points in $q$; the last two terms actually do not depend on the choice of $\hat{q} \in \pi^{-1}(q)$, the numbering of $q$.
The probability distribution arising from this PVM is \begin{equation}\label{idenmeasure}
\mathbb{P}(dq) = \sum_{\hat{q} \in \pi^{-1}(q)}
|\Psi(\hat{q})|^2 \, dq = n! \, |\Psi(\hat{q})|^2 \, dq =
|\sp{s\hat{q}}{\Psi}|^2 \, dq \end{equation} with arbitrary $\hat{q} \in \pi^{-1}(q)$.
There is a way of viewing fermion wave functions as being defined on $\mathcal{Q}$, rather than $\mathbb{R}^{3n}$, by regarding them as cross-sections of a particular 1-dimensional vector bundle over $\mathcal{Q}$. To this end, define an $n!$-dimensional vector bundle $E$ by \begin{equation}\label{idenEdef}
E_q := \bigoplus_{\hat{q} \in \pi^{-1}(q)} \mathbb{C}\,. \end{equation} Every function $\Psi:\mathbb{R}^{3n} \to \mathbb{C}$ naturally gives rise to a cross-section $\Phi$ of $E$, defined by \begin{equation}
\Phi(q) := \bigoplus_{\hat{q} \in \pi^{-1}(q)} \Psi(\hat{q})\,. \end{equation} The anti-symmetric functions form a 1-dimensional subbundle of $E$ (see also \cite{identical} for a discussion of this bundle).
\section{Application to Simple Models} \label{sec:example}
In this section, we point out how the jump rates of the model in \cite{crea1} are contained in \eqref{tranrates} and present a full-fledged Bell-type QFT for the second-quantized Dirac equation in an external electromagnetic field.
Further cut-off QFTs that may provide interesting examples of Bell-type QFTs, worth a detailed discussion in a future work \cite{crea4}, are the scalar self-interacting field (e.g., $\Phi^4$), QED, and other gauge field theories. We have to postpone the treatment of these theories because they require discussions lying outside the scope of this paper, in particular a discussion of the position representation of photon wave functions in QED, and, concerning $\Phi^4$, of the appropriate probability current for the Klein--Gordon equation.
\subsection{A Simple QFT}\label{sec:crea1}
We presented a simple example of a Bell-type QFT in \cite{crea1}, and we will now briefly point to the aspects of this model that are relevant here. The model is based on one of the simplest possible QFTs \cite[p.~339]{Schweber}.
The relevant configuration space $\mathcal{Q}$ for a QFT (with a single particle species) is the configuration space of a variable number of identical particles in $\mathbb{R}^3$, which is the set $\Gamma(\mathbb{R}^3)$, or, ignoring the coincidence configurations (as they are exceptions), the set $\Gamma_{\!\neq} (\mathbb{R}^3)$ of all finite subsets of $\mathbb{R}^3$. The $n$-particle sector of this is a manifold of dimension $3n$; this configuration space is thus a union of (disjoint) manifolds of different dimensions. The relevant configuration space for a theory with several particle species is the Cartesian product of several copies of $\Gamma_{\!\neq} (\mathbb{R}^3)$. In the model of \cite{crea1}, there are two particle species, a fermion and a boson, and thus the configuration space is \begin{equation}\label{conffermionboson}
\mathcal{Q} = \Gamma_{\!\neq} (\mathbb{R}^3) \times \Gamma_{\!\neq} (\mathbb{R}^3). \end{equation} We will denote configurations by $q=(x,y)$ with $x$ the configuration of the fermions and $y$ the configuration of the bosons.
For simplicity, we replaced in \cite{crea1} the sectors of $\Gamma_{\!\neq} (\mathbb{R}^3) \times \Gamma_{\!\neq} (\mathbb{R}^3)$, which are manifolds, by vector spaces of the same dimension (by artificially numbering the particles), and obtained the union \begin{equation}\label{crea1conf}
\hat{\mathcal{Q}} = \bigcup_{n=0}^\infty (\mathbb{R}^3)^n \times
\bigcup_{m=0}^\infty (\mathbb{R}^3)^m \,, \end{equation} with $n$ the number of fermions and $m$ the number of bosons. Here, however, we will use \eqref{conffermionboson} as the configuration space, since we have already discussed the space $\Gamma_{\!\neq} (\mathbb{R}^3)$. In comparison with \eqref{crea1conf}, this amounts to (merely) ignoring the numbering of the particles.
$\mathscr{H}$ is the tensor product of a fermion Fock space and a boson Fock space, and thus the subspace of wave functions in $L^2(\hat{\mathcal{Q}})$ that are anti-symmetric in the fermion coordinates and symmetric in the boson coordinates. Let $S$ denote the appropriate symmetrization operator, i.e., the projection operator $L^2(\hat{\mathcal{Q}}) \to \mathscr{H}$, and $s$ the normalized symmetrizer \begin{equation}\label{sdef}
s\Psi({\boldsymbol x}_1, \ldots, {\boldsymbol x}_n,{\boldsymbol y}_1, \ldots, {\boldsymbol y}_m) = \sqrt{n!\, m!} \,
S\Psi({\boldsymbol x}_1, \ldots, {\boldsymbol x}_n,{\boldsymbol y}_1, \ldots, {\boldsymbol y}_m), \end{equation} i.e., $s = \sqrt{N! \, M!} \, S$ with $N$ and $M$ the fermion and boson number operators, which commute with $S$ and with each other. As in Section \ref{sec:identical}, we denote by $\pi$ the projection mapping $\hat{\mathcal{Q}} \setminus \Delta \to \mathcal{Q}$, $\pi({\boldsymbol x}_1, \ldots, {\boldsymbol x}_n,{\boldsymbol y}_1, \ldots, {\boldsymbol y}_m) = (\{{\boldsymbol x}_1, \ldots,{\boldsymbol x}_n\}, \{{\boldsymbol y}_1, \ldots, {\boldsymbol y}_m\})$. The configuration PVM ${P}(B)$ on $\mathcal{Q}$ is multiplication by $\mathbf{1}_{\pi^{-1}(B)}$, which can be understood as acting on
$\mathscr{H}$, though it is defined on $L^2(\hat{\mathcal{Q}})$, since it is permutation invariant and thus maps $\mathscr{H}$ to itself. We utilize again the formal kets $|\hat{q}\rangle$ where $\hat{q} \in \hat{\mathcal{Q}} \setminus \Delta$ is a numbered configuration, for which we also write $\hat{q} = (\hat{x},\hat{y}) = ({\boldsymbol x}_1, \ldots, {\boldsymbol x}_n,{\boldsymbol y}_1, \ldots,
{\boldsymbol y}_m)$. We also use the symmetrized and normalized kets $|s\hat{q}
\rangle = s|\hat{q} \rangle$. As in \eqref{idenpovm}, we can write \begin{equation}\label{crea1povm}
{P}(dq) = \sum_{\hat{q} \in \pi^{-1}(q)} |\hat{q} \rangle
\langle \hat{q}| \, dq = n!\, m! \, S |\hat{q} \rangle \langle
\hat{q}| \, dq = |s\hat{q} \rangle \langle s\hat{q}| \, dq \end{equation} with arbitrary $\hat{q} \in \pi^{-1}(q)$. For the probability distribution, we thus have, as in \eqref{idenmeasure}, \begin{equation}\label{crea1measure}
\mathbb{P}(dq) = \sum_{\hat{q} \in \pi^{-1}(q)}
|\Psi(\hat{q})|^2 \, dq = n!\, m! \, |\Psi(\hat{q})|^2 \, dq =
|\sp{s\hat{q}}{\Psi}|^2 \, dq \end{equation} with arbitrary $\hat{q} \in \pi^{-1}(q)$.
The free Hamiltonian is the second quantized Schr\"odinger operator (with zero potential), associated with the free process described in Section~\ref{sec:free}. The interaction Hamiltonian is defined by \begin{equation}\label{HIdef}
H_{I} = \int d^3{\boldsymbol x} \, \psi^\dag({\boldsymbol x})\, (a^\dag_\varphi({\boldsymbol x}) +
a_{\varphi}({\boldsymbol x}))\, \psi({\boldsymbol x}) \end{equation} with $\psi^\dag({\boldsymbol x})$ the creation operators (in position representation), acting on the \emph{fermion} Fock space, and $a^\dag_\varphi({\boldsymbol x})$ the creation operators (in position representation), acting on the \emph{boson} Fock space, regularized through convolution with an $L^2$ function $\varphi:\mathbb{R}^3 \to \mathbb{R}$. $H_{I}$ has a kernel; we will now obtain a formula for it, see
\eqref{crea1kernel} below. The $|s\hat{q} \rangle$ are connected to the creation operators according to \begin{equation}\label{shatqpsia}
|s\hat{q}\rangle = \psi^\dag({\boldsymbol x}_n) \cdots
\psi^\dag({\boldsymbol x}_1) a^\dag({\boldsymbol y}_m) \cdots a^\dag({\boldsymbol y}_1) |0\rangle\,, \end{equation}
where $|0\rangle \in \mathscr{H}$ denotes the vacuum state. A relevant fact is that the creation and annihilation operators $\psi^\dag,\psi,a^\dag$ and $a$ possess kernels. Using the canonical (anti\nobreakdash-)commutation relations for $\psi$ and $a$, one obtains from \eqref{shatqpsia} the following formulas for the kernels of $\psi({\boldsymbol r})$ and $a({\boldsymbol r})$, ${\boldsymbol r} \in \mathbb{R}^3$: \begin{align}
\sp{s\hat{q}}{\psi({\boldsymbol r})|s\hat{q}'} &= \delta_{n,n'-1} \,
\delta_{m,m'} \,
\delta^{3n'}(x \cup {\boldsymbol r} -x') \, (-1)^{\varrho((\hat{x},
{\boldsymbol r}),\hat{x}')} \, \delta^{3m}(y-y') \label{psikernel} \\
\sp{s\hat{q}}{a({\boldsymbol r})|s\hat{q}'} &= \delta_{n,n'} \,
\delta_{m,m'-1} \, \delta^{3n}(x-x') \,
(-1)^{\varrho(\hat{x},\hat{x}')} \,
\delta^{3m'}(y \cup {\boldsymbol r} - y') \label{akernel} \end{align} where $(x,y) = q = \pi(\hat{q})$, and $\varrho (\hat{x},\hat{x}')$ denotes the permutation that carries $\hat{x}$ to $\hat{x}'$ given that $x=x'$. The corresponding formulas for $\psi^\dag$ and $a^\dag$ can be obtained by exchanging $\hat{q}$ and $\hat{q}'$ on the right hand sides of \eqref{psikernel} and \eqref{akernel}. For the smeared-out operator $a_\varphi({\boldsymbol r})$, we obtain \begin{equation}\label{aprofilekernel}
\sp{s\hat{q}}{a_\varphi({\boldsymbol r})|s\hat{q}'} = \delta_{n,n'} \,
\delta_{m,m'-1} \, \delta^{3n}(x-x') \,
(-1)^{\varrho(\hat{x},\hat{x}')} \sum_{{\boldsymbol y}' \in y'}
\delta^{3m}(y- y'\setminus {\boldsymbol y}') \, \varphi({\boldsymbol y}' - {\boldsymbol r}) \end{equation} We make use of the resolution of the identity \begin{equation}\label{resolution}
I = \int\limits_{\mathcal{Q}} dq \, |s\hat{q} \rangle \langle
s\hat{q}|\,. \end{equation} Inserting \eqref{resolution} twice into \eqref{HIdef} and exploiting \eqref{psikernel} and \eqref{aprofilekernel}, we find \begin{equation}\label{crea1kernel} \begin{split}
\sp{s\hat{q}} {H_{I}| s\hat{q}'} &= \delta_{n,n'} \,
\delta_{m-1,m'} \, \delta^{3n}(x-x') \,
(-1)^{\varrho(\hat{x},\hat{x}')} \sum_{{\boldsymbol y} \in y} \delta^{3m'}
(y \setminus {\boldsymbol y} - y') \sum_{{\boldsymbol x} \in x} \varphi({\boldsymbol y} - {\boldsymbol x}) \: \\
&+ \delta_{n,n'} \, \delta_{m'-1,m} \,
\delta^{3n}(x-x') \, (-1)^{\varrho(\hat{x},\hat{x}')}
\sum_{{\boldsymbol y}' \in y'} \delta^{3m} (y - y' \setminus {\boldsymbol y}') \sum_{{\boldsymbol x} \in
x} \varphi({\boldsymbol y}' - {\boldsymbol x})\,. \end{split} \end{equation}
By \eqref{crea1povm}, the jump rates \eqref{tranrates} are \begin{equation}
\sigma(q|q') = \frac{\Big[\tfrac{2}{\hbar} \, \mathrm{Im} \,
\sp{\Psi}{s\hat{q}} \sp{s\hat{q}}{H_{I}| s\hat{q}'} \sp{s\hat{q}'}{\Psi}
\Big]^+} {\sp{\Psi}{s\hat{q}'} \sp{s\hat{q}'}{\Psi}} \,. \end{equation} More explicitly, we obtain from \eqref{crea1kernel} the rates \begin{equation}\label{crea1rates} \begin{split}
\sigma(q|q') &= \delta_{nn'} \,\delta_{m-1,m'} \,\delta^{3n}(x-x')
\sum_{{\boldsymbol y} \in y} \delta^{3m'}(y\setminus {\boldsymbol y}-y') \,
\sigma_{\mathrm{crea}}(q'\cup {\boldsymbol y}|q') \: \\
&+\delta_{nn'}\,\delta_{m,m'-1} \, \delta^{3n}(x-x') \sum_{{\boldsymbol y}' \in
y'} \delta^{3m}(y - y'\setminus {\boldsymbol y}') \, \sigma_{\mathrm{ann}}(q'\setminus
{\boldsymbol y}'|q') \end{split} \end{equation} with \begin{subequations} \begin{align}
\sigma_{\mathrm{crea}}(q'\cup {\boldsymbol y}|q')&= \frac{2 \sqrt{m'+1}}{\hbar} \,
\frac{\Big[ \mathrm{Im} \, \Psi^*(\hat{q}) \,
(-1)^{\varrho(\hat{x},\hat{x}')} \sum\limits_{{\boldsymbol x}' \in x'}
\varphi({\boldsymbol y}-{\boldsymbol x}') \, \Psi(\hat{q}')\Big]^+}{ \Psi^*(\hat{q}') \,
\Psi(\hat{q}')} \label{crea1crearate} \\
\sigma_{\mathrm{ann}}(q'\setminus {\boldsymbol y}'|q')&= \frac{2} {\hbar \sqrt{m'}}
\,\frac{\Big[\mathrm{Im} \, \Psi^*(\hat{q}) \,
(-1)^{\varrho(\hat{x},\hat{x}')} \sum\limits_{{\boldsymbol x}' \in x'}
\varphi({\boldsymbol y}'-{\boldsymbol x}') \, \Psi(\hat{q}') \Big]^+}{ \Psi^*(\hat{q}') \,
\Psi(\hat{q}')} , \label{crea1annrate} \end{align} \end{subequations} for arbitrary $\hat{q}' \in \pi^{-1}(q')$ and $\hat{q} \in \pi^{-1}(q)$ with $q=(x',y'\cup{\boldsymbol y})$ respectively $q=(x',y' \setminus {\boldsymbol y}')$. (Note that a sum sign can be drawn out of the plus function if the terms have disjoint supports.)
Equation \eqref{crea1rates} is worth looking at closely: One can read off that the only possible jumps are $(x',y') \to (x',y' \cup {\boldsymbol y})$, creation of a boson, and $(x',y') \to (x',y' \setminus {\boldsymbol y}')$, annihilation of a boson. In particular, while one particle is created or annihilated, the other particles do not move. The process that we considered in \cite{crea1} consists of pieces of Bohmian trajectories interrupted by jumps with rates \eqref{crea1rates}; the process is thus an example of the jump rate formula \eqref{tranrates}, and an example of combining jumps and Bohmian motion by means of process additivity.
The example shows how, for other QFTs, the jump rates \eqref{tranrates} can be applied to relevant interaction Hamiltonians: If $H_{I}$ is, in the position representation, a polynomial in the creation and annihilation operators, then it possesses a kernel on the relevant configuration space. A cut-off (implemented here by smearing out the creation and annihilation operators) needs to be introduced to make $H_{I}$ a well-defined operator on $L^2$.
If, in some QFT, the particle number operator is not conserved, jumps between the sectors of configuration space are inevitable for an equivariant process. And, indeed, when $H_{I}$ does not commute with the particle number operator (as is usually the case), jumps can occur that change the number of particles. Often, $H_{I}$ contains \emph{only} off-diagonal terms with respect to the particle number; then every jump will change the particle number. This is precisely what happens in the model of \cite{crea1}.
\subsection{Efficient Calculation of Rates in the Previous Example} \label{sec:efficient}
We would like to give another, refined way of calculating the explicit jump rates \eqref{crea1rates} from the definition \eqref{HIdef} of $H_{I}$. The calculation above is rather cumbersome, partly \emph{because} of all the $\delta$'s. It is also striking that only very few transitions $q' \to q$ are actually possible, which suggests that it is unnecessary to write down a formula for the kernel
$\sp{q}{H_{I}|q'}$ valid for all pairs $q,q'$. Rather than writing down all the $\delta$ terms as in \eqref{crea1rates}, it is easier to specify the possible transitions $q' \to q$ and to write down the rates, such as \eqref{crea1crearate} and \eqref{crea1annrate}, only for these transitions. Thus, for a more efficient calculation of the rates, it is advisable to first determine the possible transitions, and then we need keep track only of the corresponding kernel elements.
\subsubsection{A Diagram Notation}
To formulate this more efficient strategy, it is helpful to regard $\Psi$ as a cross-section of a fiber bundle $E$ over the Riemannian manifold $\mathcal{Q}$, or of a countable union $E = \bigcup_i E^{(i)}$ of bundles $E^{(i)}$ over Riemannian manifolds $\mathcal{Q}^{(i)}$ with $\mathcal{Q} = \bigcup_i \mathcal{Q}^{(i)}$. (In the present example, with $\mathcal{Q}$ given by \eqref{conffermionboson}, we take $i$ to be the pair $(n,m)$ of particle numbers, $\mathcal{Q}^{(n,m)}$ to be the $(n,m)$-particle sector, and $E^{(i)}$ to be defined by \eqref{idenEdef} (with $\pi$ the natural projection from $\hat{\mathcal{Q}} \setminus \Delta$, with $\hat{\mathcal{Q}}$ given by \eqref{crea1conf}, to $\mathcal{Q}$). The $\hat{q} \in \pi^{-1}(q)$ can be viewed as defining an orthonormal basis of $E_q$.)
A key element of the strategy is a special diagram notation for operators. The operators we have in mind are $H_{I}$ and its building blocks, the field operators. The strategy will start with the diagrams for the field operators, and obtain from them a diagram for $H_{I}$. The diagram will specify, for an operator $O$, what the kernel of $O$ is, while leaving out parts of the kernel that are zero.
So let us assume that $O$ has kernel $\sp{q}{O|q'}$, i.e., $(O\Psi)(q) =
\int \sp{q}{O|q'} \, \Psi(q') \, dq'$. The diagram \begin{equation}\label{arrow}
q' \xrightarrow[O]{K(q',\lambda)} F(q',\lambda) \end{equation} means that the operator $O$ has \emph{kernel constructed from $F$ and $K$}, \begin{equation}\label{kernelarrow}
\sp{q}{O|q'} = \int\limits_{\Lambda} d\lambda \, \delta\big(q-
F(q',\lambda)\big) \, K(q',\lambda), \end{equation} where $\lambda$ varies in some parameter space $\Lambda$, $F: \mathcal{Q} \times \Lambda \to \mathcal{Q}$, and $K$ is a function (or distribution) of $q'$ and $\lambda$ such that
$K(q',\lambda) : E_{q'} \to E_{F(q',\lambda)}$
is a $\mathbb{C}$-linear mapping.
The role of $\lambda$ is to parametrize the possible transitions; e.g., for the boson creation \eqref{crea1crearate} in the previous section, $\lambda$ would be the position ${\boldsymbol y}$ of the new boson, and $\Lambda = \mathbb{R}^3$. The notation \eqref{arrow} does not explicitly mention what $\Lambda$ and the measure $d\lambda$ are; this will usually be clear from the context of the diagram. The measure $d\lambda$ will usually be a uniform distribution over the parameter space $\Lambda$, such as Lebesgue measure if $\Lambda = \mathbb{R}^d$ or the counting measure if $\Lambda$ is finite or countably infinite. We may also allow having a different $\Lambda_{q'}$ for every $q'$.
In words, \eqref{arrow} may be read as: ``According to $O$, the possible transitions from $q'$ are to $F(q',\lambda)$, and are associated with the amplitudes $K(q',\lambda)$.'' In fact, when $O = H$, a jump from $q'$ can lead only to those $q$'s for which $q = F(q',\lambda)$ for some value of $\lambda$, and the corresponding jump rate \eqref{tranrates} is \begin{equation}\label{arrowrates}
\sigma\big(F(q',\lambda)\big|q'\big) = \frac{[(2/\hbar) \, \mathrm{Im} \,
\Psi^*(F(q',\lambda)) \, K(q',\lambda) \, \Psi(q')]^+} {\Psi^*(q')
\, \Psi(q')}, \end{equation}
provided that for given $q'$, $F(q', \,\cdot\,)$ is an injective mapping. Here, $\sigma(q|q')$ is the density of the measure
$\sigma(dq|q')$ with respect to the measure on $\mathcal{Q}$ \begin{equation}\label{arrowuniform}
\mu_{q'}(dq) = \int\limits_{\Lambda} d\lambda \, \delta\big(
q-F(q',\lambda) \big) \, dq, \end{equation} where $\delta(q-q_0) \, dq$ denotes the measure on $\mathcal{Q}$ with total weight 1 concentrated at $q_0$. \eqref{arrowuniform}, the image of $d\lambda$ under the map $F(q',\cdot\,)$, is concentrated on the set $\{ F(q',\lambda) : \lambda \in \Lambda\}$ of possible destinations and plays the role of the ``uniform distribution'' over this set. In other words, \eqref{arrowrates} is the rate of occurrence, with respect to $d\lambda$, of the transition corresponding to $\lambda$. (For the boson creation rate \eqref{crea1crearate}, $\mu_{q'}(dq)$ turns out the Lebesgue measure in ${\boldsymbol y}$ on the subset $\{q' \cup {\boldsymbol y}: {\boldsymbol y} \in \mathbb{R}^3 \setminus q'\} \subseteq \mathcal{Q}$.)
Given $O$, the choice of $\Lambda, F$, and $K$ is not unique. One could always choose $\Lambda = \mathcal{Q}$, $F(q',q) = q$, and $K(q',q) =
\sp{q}{O|q'}$, which of course would mean to miss the point of this notation. The case that $F$ and $K$ do not depend on a parameter $\lambda$ is formally contained in the scheme \eqref{kernelarrow} by taking $\Lambda$ to be a one-point set (and $d\lambda$ the counting measure); in this case \eqref{kernelarrow} means \begin{equation}\label{kernelarrownolambda}
\sp{q}{O|q'} = \delta(q-F(q')) \, K(q')\,. \end{equation} Conversely, whenever $\# \Lambda =1$, the dependence of $F$ and $K$ on the parameter $\lambda$ is irrelevant.
A basic advantage of the notation \eqref{arrow}, compared to writing down a formula for $\sp{q}{O|q'}$, is that many $\delta$ factors become unnecessary. For example, if $O$ is multiplication by $V(q)$, then ($\Lambda$ is a one-point set and) we have the diagram \[
q' \xrightarrow[O]{V(q')} q'. \]
\subsubsection{Operations With Diagrams}
For the product $O_2O_1$ of two operators given by diagrams, we have the diagram \begin{equation}\label{productarrow}
q' \xrightarrow[O_2O_1]{K_2(F_1(q',\lambda_1),\lambda_2) \,
K_1(q',\lambda_1)} F_2(F_1(q',\lambda_1),\lambda_2) \end{equation} with parameter space $\Lambda_1 \times \Lambda_2$, for which we also write \begin{equation}\label{concatarrow}
q' \xrightarrow[O_1]{K_1(q',\lambda_1)} F_1(q',\lambda_1)
\xrightarrow[O_2]{K_2(F_1(q',\lambda_1),\lambda_2)}
F_2(F_1(q',\lambda_1),\lambda_2). \end{equation} We thus define the concatenation of two diagrams by means of the composition of the transition mappings and the product of the amplitudes, i.e., using obvious notation, \begin{equation}\label{shortconcatarrow}
q_1 \xrightarrow{\alpha} q_2 \xrightarrow{\beta} q_3 \quad
\text{means} \quad q_1 \xrightarrow{\alpha\beta} q_3. \end{equation} Thus, multiplication of operators corresponds to concatenation of diagrams.
For the sum $O_1 +O_2$ of two operators given by diagrams with the same parameter space $\Lambda_1 = \Lambda _2 = \Lambda$ and the same transition mapping $F_1(q', \lambda) = F_2(q',\lambda) = F(q',\lambda)$, we have the diagram \begin{equation}\label{sumarrow}
q' \xrightarrow[O_1 + O_2]{K_1(q',\lambda) + K_2(q',\lambda)}
F(q',\lambda). \end{equation}
\subsubsection{Diagrams of Creation and Annihilation Operators}
We now write down diagrams for creation and annihilation operators. In the case that $O = O({\boldsymbol r})$ arises from formally evaluating an operator-valued distribution $O({\boldsymbol x})$ at ${\boldsymbol x} = {\boldsymbol r}$, the dependence of $K(q',\lambda)$ on $\lambda$ is in the sense of distributions rather than functions. More precisely, we have \begin{equation}\label{Kdistribution}
K(q',\lambda) = D(q',\lambda) \, K_0(q',\lambda) \end{equation} where $D$ is a (real-valued) distribution on $\mathcal{Q} \times \Lambda$, and $K_0$ a mapping-valued function such that for every $q'$ and $\lambda$, $K_0(q',\lambda)$ is a linear mapping $E_{q'} \to E_{F(q',\lambda)}$.
For $\psi^\dag({\boldsymbol r})$ and $\psi({\boldsymbol r})$, ${\boldsymbol r} \in \mathbb{R}^3$, we have (recall that $x'$ is a finite subset of $\mathbb{R}^3$) \begin{subequations} \begin{align}
(x',y')& \xrightarrow[\psi^\dag({\boldsymbol r})] {\alpha_{\mathrm{f}}} (x' \cup {\boldsymbol r},y')
\qquad\quad (\#\Lambda =1) \\ (x',y')& \xrightarrow[\psi({\boldsymbol r})]
{\delta({\boldsymbol x}'-{\boldsymbol r}) \, \varepsilon_{\mathrm{f}}} (x' \setminus {\boldsymbol x}',y') \qquad
(\Lambda = x', \lambda={\boldsymbol x}') \end{align} \end{subequations} using linear mappings $\alpha_{\mathrm{f}}: E_{q'} \to E_{ (x' \cup {\boldsymbol r},y')}$ (``append a fermion'') and $\varepsilon_{\mathrm{f}}: E_{q'} \to E_{ (x' \setminus {\boldsymbol x}',y')}$ (``erase a fermion''), which can be regarded as the natural mappings between these fiber spaces. They are defined through the following properties: \begin{subequations} \begin{align}
&\alpha_{\mathrm{f}} \Psi\text{ is appropriately symmetrized} \\
&\big(\alpha_{\mathrm{f}} \Psi\big)((\hat{x}', {\boldsymbol r}), \hat{y}') = \frac{1}{\sqrt{n'
+1}} \, \Psi(\hat{x}',\hat{y}') \\
&\big(\varepsilon_{\mathrm{f}} \Psi\big) (\hat{x},\hat{y}') =
\sqrt{n'} \, \Psi((\hat{x}, {\boldsymbol x}'),\hat{y}') \end{align} \end{subequations} where $\Psi \in E_{q'}$, and $\hat{x}$ is an arbitrary ordering of the set $x=x' \setminus {\boldsymbol x}'$. (Recall that the set $\pi^{-1}(q')$ of the possible orderings of $q'$ forms a basis of $E_{q'}$, so that every ordering $(\hat{x}', \hat{y}') = \hat{q}' \in \pi^{-1}(q')$ corresponds to a particular component of $\Psi$. Thus, $((\hat{x}',{\boldsymbol r}),\hat{y}') \in \pi^{-1}(x' \cup {\boldsymbol r}, y')$ corresponds to a particular component in $E_{(x' \cup {\boldsymbol r}, y')}$.)
For the smeared-out creation and annihilation operators $a_\varphi^\dag({\boldsymbol r})$ and $a_\varphi({\boldsymbol r})$, we have \begin{subequations} \begin{align}
(x',y') &\xrightarrow[a_\varphi^\dag({\boldsymbol r})] {\varphi({\boldsymbol y}-{\boldsymbol r}) \,
\alpha_{\mathrm{b}}} (x',y' \cup {\boldsymbol y}) \qquad (\Lambda = \mathbb{R}^3, \lambda =
{\boldsymbol y}) \\
(x',y') &\xrightarrow[a_\varphi({\boldsymbol r})] {\varphi({\boldsymbol y}'-{\boldsymbol r}) \,
\varepsilon_{\mathrm{b}}} (x', y' \setminus {\boldsymbol y}') \qquad (\Lambda = y',
\lambda = {\boldsymbol y}') \end{align} \end{subequations} where $\alpha_{\mathrm{b}}$ (``append a boson'') and $\varepsilon_{\mathrm{b}}$ (``erase a boson'') are the analogous linear mappings relating different spaces, $\alpha_{\mathrm{b}}: E_{q'} \to E_{(x',y' \cup {\boldsymbol y})}$ and $\varepsilon_{\mathrm{b}}: E_{q'} \to E_{(x',y' \setminus {\boldsymbol y}')}$, defined by the following properties: \begin{subequations} \begin{align}
&\alpha_{\mathrm{b}} \Psi\text{ is appropriately symmetrized} \\
&\big(\alpha_{\mathrm{b}} \Psi\big) (\hat{x}',(\hat{y}', {\boldsymbol y})) =
\frac{1}{\sqrt{m'+1}} \, \Psi(\hat{x}',\hat{y}') \\
&\big(\varepsilon_{\mathrm{b}} \Psi\big) (\hat{x}', \hat{y}) =
\sqrt{m'} \, \Psi(\hat{x}', (\hat{y}, {\boldsymbol y}')), \end{align} \end{subequations} where $\hat{y}$ is an arbitrary ordering of the set $y=y' \setminus {\boldsymbol y}'$, $\hat{x}'$ one of $x'$, $\hat{y}'$ one of $y'$, and $\Psi \in E_{q'}$.
\subsubsection{Application of the Diagram Method}
Now let us apply the strategy to the example \eqref{HIdef} of the previous section. For $\psi^\dag({\boldsymbol r}) \, a^\dag_\varphi({\boldsymbol r}) \, \psi({\boldsymbol r})$, we have the diagram \[
q' \xrightarrow[\psi({\boldsymbol r})]{\delta({\boldsymbol x}'-{\boldsymbol r}) \, \varepsilon_{\mathrm{f}}} (x'
\setminus {\boldsymbol x}', y') \xrightarrow[a^\dag_\varphi({\boldsymbol r})]{\varphi({\boldsymbol y}
-{\boldsymbol r}) \, \alpha_{\mathrm{b}}} (x' \setminus {\boldsymbol x}',y' \cup {\boldsymbol y})
\xrightarrow[\psi^\dag({\boldsymbol r})] {\alpha_{\mathrm{f}}} (x' \setminus {\boldsymbol x}' \cup
{\boldsymbol r},y' \cup {\boldsymbol y}) \] with $\Lambda = x' \times \mathbb{R}^3$. Using the concatenation rule \eqref{shortconcatarrow}, we can write instead \[
q' \xrightarrow[\psi^\dag({\boldsymbol r}) \, a^\dag_\varphi({\boldsymbol r}) \,
\psi({\boldsymbol r})]{\delta({\boldsymbol x}'-{\boldsymbol r}) \,\varphi({\boldsymbol y} -{\boldsymbol r}) \, \alpha_{\mathrm{f}}
\alpha_{\mathrm{b}} \varepsilon_{\mathrm{f}}} (x' \setminus {\boldsymbol x}' \cup {\boldsymbol r},y' \cup {\boldsymbol y}). \] Integrating over $d{\boldsymbol r}$, we obtain, since $x' \setminus {\boldsymbol x}' \cup {\boldsymbol r}$ may be replaced by $x'$, which is independent of ${\boldsymbol x}'$, \begin{equation}\label{creaarrow}
q' \xrightarrow[\int d{\boldsymbol r} \, \psi^\dag({\boldsymbol r}) \, a^\dag_\varphi({\boldsymbol r})
\, \psi({\boldsymbol r})]{\sum\limits_{{\boldsymbol x}' \in x'}\varphi({\boldsymbol y} -{\boldsymbol x}') \,
\alpha_{\mathrm{f}} \alpha_{\mathrm{b}} \varepsilon_{\mathrm{f}}} (x',y' \cup {\boldsymbol y}), \end{equation} with $\Lambda = \mathbb{R}^3$. We have now taken care of one of two terms in \eqref{HIdef}, involving $a^\dag$ rather than $a$. {}From \eqref{creaarrow} we read off, without a big calculation, that this term corresponds to jumps $(x',y') \to (x',y'\cup {\boldsymbol y})$, or creation of a boson. The corresponding jump rate is given by \eqref{arrowrates}, and reads here: \begin{equation}\label{crea1arrowcrearate}
\sigma(x',y' \cup {\boldsymbol y}|q') = \frac{2}{\hbar} \, \frac{\Big[\mathrm{Im} \,
\Psi^*(x',y' \cup {\boldsymbol y}) \sum\limits_{{\boldsymbol x}' \in x'} \varphi({\boldsymbol y} -{\boldsymbol x}')
\, \alpha_{\mathrm{f}} \alpha_{\mathrm{b}} \varepsilon_{\mathrm{f}} \, \Psi(q')\Big]^+}
{\Psi^*(q') \, \Psi(q')}. \end{equation} This result agrees with \eqref{crea1crearate}.\footnote{Here is why: First, $\Psi^*(q') \, \Psi(q') = n'! \, m'! \, \Psi^*(\hat{q}') \, \Psi(\hat{q}')$ because the inner product in $E_{q'}$ involves summation over all $\hat{q}' \in \pi^{-1}(q')$. Similarly, the square bracket in the numerator of \eqref{crea1arrowcrearate} involves the inner product of $E_{(x',y' \cup {\boldsymbol y}')}$, consisting of $n'! \, (m'+1)!$ contributions. The numberings $\hat{q}$ and $\hat{q}'$ in \eqref{crea1crearate} can be so chosen that $\hat{x} = \hat{x}'$, ${\boldsymbol x}'$ gets the last place of $\hat{x}'$, and $\hat{y} = \hat{y}' \cup {\boldsymbol y}'$; then $\varrho(\hat{x},\hat{x}')$ is trivial, and $\alpha_{\mathrm{f}} \alpha_{\mathrm{b}} \varepsilon_{\mathrm{f}} \Psi(\hat{q}) = (n')^{-1/2} (m'+1)^{-1/2} (n')^{1/2} \Psi(\hat{q}')$. Thus, the square bracket in \eqref{crea1arrowcrearate} is $n'! \, m'!\sqrt{m'+1}$ times the square bracket in \eqref{crea1crearate}.}
We treat the term $\int d{\boldsymbol r} \, \psi^\dag({\boldsymbol r}) \, a_\varphi({\boldsymbol r}) \, \psi({\boldsymbol r})$ in the same way: We begin with the diagram \[
q' \xrightarrow[\psi({\boldsymbol r})]{\delta({\boldsymbol x}'-{\boldsymbol r}) \, \varepsilon_{\mathrm{f}}} (x'
\setminus {\boldsymbol x}', y') \xrightarrow[a_\varphi({\boldsymbol r})]{\varphi({\boldsymbol y}'
-{\boldsymbol r}) \, \varepsilon_{\mathrm{b}}} (x' \setminus {\boldsymbol x}',y' \setminus {\boldsymbol y}')
\xrightarrow[\psi^\dag({\boldsymbol r})] {\alpha_{\mathrm{f}}} (x' \setminus {\boldsymbol x}' \cup
{\boldsymbol r},y' \setminus {\boldsymbol y}') \] with $\Lambda = x' \times y'$. Then we integrate over $d{\boldsymbol r}$ and obtain the associated jump rate \begin{equation}\label{crea1arrowannrate}
\sigma(x',y' \setminus {\boldsymbol y}'|q') = \frac{2}{\hbar} \, \frac{\Big[\mathrm{Im}
\, \Psi^*(x',y' \setminus {\boldsymbol y}') \sum\limits_{{\boldsymbol x}' \in x'}
\varphi({\boldsymbol y}' -{\boldsymbol x}') \, \alpha_{\mathrm{f}} \varepsilon_{\mathrm{b}} \varepsilon_{\mathrm{f}} \,
\Psi(q')\Big]^+} {\Psi^*(q') \, \Psi(q')}, \end{equation} which agrees with \eqref{crea1annrate}. Finally, $H_{I}$ (the sum of both contributions) corresponds according to \eqref{tranrates} to jumps which, since the two contributions have no transitions $q' \to q$ in common (or, in other words, since their kernels have disjoint supports in $\mathcal{Q} \times \mathcal{Q}$), are \emph{either} $q' \to (x',y' \cup {\boldsymbol y})$, with rate \eqref{crea1arrowcrearate}, \emph{or} $q' \to (x',y' \setminus {\boldsymbol y}')$, with rate \eqref{crea1arrowannrate}.
\subsection{Pair Creation in an External Field} \label{sec:positron}
As our second example, we present the Bell-type version of a reasonable and often used QFT of electrons and positrons, in which the electromagnetic field is a background field \cite{Ruijsenaars}. The Bell-type version exhibits pair creation and annihilation (in the literal sense) and employs various notions we have introduced: process additivity, the configuration space $\Gamma_{\!\neq}(\mathbb{R}^3)$ of a variable number of identical particles, the free process, POVMs which are not PVMs, and stochastic jumps.
\subsubsection{Fock Space and Hamiltonian}\label{sec:posiFock}
We consider the second quantized Dirac field in an electromagnetic background field $A_\mu({\boldsymbol x},t)$. In terms of field operators, the Hamiltonian reads \begin{equation}\label{fieldhamil}
H= \int d^3 x :{\Phi^*}({\boldsymbol x})\big[-i c\hbar \boldsymbol{\alpha} \cdot
\nabla +\beta m c^2+ e(\boldsymbol{\alpha}\cdot\boldsymbol{A} +
A_0) \big]\Phi({\boldsymbol x}):\;\;, \end{equation} with colons denoting normal ordering. Note that $H$ is time-dependent due to the time-dependence of $A_\mu({\boldsymbol x},t)$; more precisely, $H_{I}$ is time-dependent while $H_0$ is fixed. As a consequence, the relevant jump rate \eqref{tranrates} is now time-dependent in three ways: through $H_{I}$, through $\Psi$, and through $q' = Q_t$.
We quickly recall what the Hilbert space and the field operators are, and specify what POVM we use. After that, we construct the associated process.
The Hilbert space $L^2(\mathbb{R}^3,\mathbb{C}^4)$ of the Dirac equation is split into the orthogonal sum $\mathscr{H}_+ \oplus \mathscr{H}_-$ of the positive and negative energy subspaces of the \emph{free} Dirac operator, \[
h_0= -i c\hbar {\boldsymbol \alpha} \cdot \nabla + \beta mc^2\,. \] The 1-electron Hilbert space $\mathscr{H}_\mathrm{e}$ and the 1-positron Hilbert space $\mathscr{H}_\mathrm{p}$ are copies of $\mathscr{H}_+$, and the Fock space $\mathscr{F}=\Gamma \mathscr{H}^{(1)}$ arises then from the one-particle Hilbert space $\mathscr{H}^{(1)} = \mathscr{H}_\mathrm{e} \oplus \mathscr{H}_\mathrm{p}$ in the usual manner: with the anti-symmetrization operator ${\mathrm{Anti}\,}$, \begin{equation}\label{elplusposFock}
\mathscr{F}= \bigoplus_{N=0}^\infty {\mathrm{Anti}\,}
(({\mathscr{H}_\mathrm{e}}\oplus{\mathscr{H}_\mathrm{p}})^{\otimes N})\,, \end{equation} which can be naturally identified with \begin{equation}\label{elFockposFock}
\mathscr{H} := \mathscr{F}_\mathrm{e} \otimes \mathscr{F}_\mathrm{p} =
\bigoplus_{n=0}^\infty {\mathrm{Anti}\,} (\mathscr{H}_\mathrm{e}^{\otimes n}) \otimes
\bigoplus_{{\widetilde{n}}=0}^\infty {\mathrm{Anti}\,} (\mathscr{H}_\mathrm{p}^{\otimes {\widetilde{n}}})\,. \end{equation} Since $\mathscr{H}_+ \subseteq L^2(\mathbb{R}^3,\mathbb{C}^4)$, $\mathscr{H}$ can be understood as a subspace of \begin{equation}\label{elHext}
\mathscr{H}_{\mathrm{ext}} := \bigoplus_{n=0}^\infty
{\mathrm{Anti}\,}(L^2(\mathbb{R}^3,\mathbb{C}^4)^{\otimes n}) \otimes
\bigoplus_{{\widetilde{n}}=0}^\infty {\mathrm{Anti}\,}(L^2(\mathbb{R}^3,\mathbb{C}^4)^{\otimes {\widetilde{n}}}) . \end{equation}
We choose the POVM and configuration space in the way suggested by the form \eqref{elFockposFock}, rather than \eqref{elplusposFock}: \begin{equation}
\mathcal{Q} = \Gamma_{\!\neq}(\mathbb{R}^3) \times \Gamma_{\!\neq}(\mathbb{R}^3), \end{equation} where the first factor represents electrons and the second positrons. (Recall from Section \ref{sec:free} that $\Gamma_{\!\neq}(\mathbb{R}^3)$ denotes the space of all finite subsets of $\mathbb{R}^3$. Another interesting possibility, suggested by the representation (\ref{elplusposFock}), is to set $\mathcal{Q} = \Gamma_{\!\neq}(\mathbb{R}^3)$. This would mean that, insofar as the configuration is concerned, electrons and positrons are not distinguished. However, we will not pursue this possibility here.) The natural POVM ${P}$ (see Section~\ref{sec:GammaPOVM} and Section~\ref{sec:identical}) can be expressed as an extension from rectangular sets (the existence of such an extension is proved in Section~4.4 of \cite{crea2A}): \[
{P}(B_\mathrm{e} \times B_\mathrm{p}) = \Gamma{P}^{(1)}(B_\mathrm{e}) \otimes
\Gamma{P}^{(1)}(B_\mathrm{p}) \] with ${P}^{(1)}$ the POVM on $\mathscr{H}_+$ that we considered before, arising by projection from the natural PVM on $L^2(\mathbb{R}^3,\mathbb{C}^4)$. Alternatively, ${P}$ can be viewed as arising, by projection to $\mathscr{H}$, and from $\hat{\mathcal{Q}} = \bigcup_{n=0}^\infty (\mathbb{R}^3)^n \times \bigcup_{{\widetilde{n}}=0}^\infty (\mathbb{R}^3)^{\widetilde{n}}$ to $\mathcal{Q}$, of the natural PVM on $\hat{\mathcal{Q}}$ acting on $\mathscr{H}_{\mathrm{ext}}$. Note that ${P}$
represents the usual $|\Psi|^2$ distribution in the sense that for a configuration $q$ with electrons at ${\boldsymbol x}_1, \ldots, {\boldsymbol x}_n$ and positrons at ${\widetilde{\vx}}_1, \ldots, {\widetilde{\vx}}_{\widetilde{n}}$, we have \[
\mathbb{P}(dq) = \sp{\Psi}{{P}(dq)|\Psi} = n! {\widetilde{n}}! \,
|\Psi^{(n,{\widetilde{n}})}({\boldsymbol x}_1, \ldots, {\widetilde{\vx}}_{\widetilde{n}})|^2 \, d{\boldsymbol x}_1 \cdots d{\widetilde{\vx}}_{\widetilde{n}} \] where $\Psi^{(n,{\widetilde{n}})}$ is just the wave function $(\mathbb{R}^3)^{n+{\widetilde{n}}}\to (\mathbb{C}^4)^{\otimes (n+{\widetilde{n}})}$ we get when we decompose the state vector in the manner suggested by \eqref{elHext}. $\Psi$ is normalized so that \[
\sum_{n,{\widetilde{n}} =0}^\infty \int d{\boldsymbol x}_1 \cdots d{\widetilde{\vx}}_{\widetilde{n}} \,
|\Psi^{(n,{\widetilde{n}})}({\boldsymbol x}_1, \ldots, {\widetilde{\vx}}_{\widetilde{n}})|^2 = 1. \]
The field operator is defined by \begin{equation}\label{Phidef}
\Phi(f) = b(P_+ f) + d^*(CP_- f) \end{equation} where $f$ is a test function from $L^2(\mathbb{R}^3,\mathbb{C}^4)$, $P_\pm$ is the projection to $\mathscr{H}_\pm \subseteq L^2(\mathbb{R}^3,\mathbb{C}^4)$, $C$ is the charge conjugation operator which maps $\mathscr{H}_-$ to $\mathscr{H}_+$ and vice versa, and $b$ is the electron annihilation and $d^*$ the positron creation operator. Letting ${\boldsymbol e}_i$ be the standard orthonormal basis of $\mathbb{C}^4$, $i =1,2,3,4$, $\Phi({\boldsymbol x})$ stands for $\Phi_i({\boldsymbol x}) = \Phi({\boldsymbol e}_i \, \delta(\,\cdot\, -{\boldsymbol x}))$, where $i$ gets contracted with the ${\boldsymbol \alpha}$ matrices. Similarly, we define, as usual, \begin{subequations} \begin{align}
b_i({\boldsymbol x}) &= b\Big(P_+({\boldsymbol e}_i \,
\delta(\,\cdot\, - {\boldsymbol x})) \Big) \\
\text{and } d_i({\boldsymbol x}) &= d \Big(CP_-({\boldsymbol e}_i \,
\delta(\,\cdot\, - {\boldsymbol x})) \Big). \end{align} \end{subequations} We thus have $\Phi_i({\boldsymbol x}) = b_i({\boldsymbol x}) + d_i^*({\boldsymbol x})$.
\subsubsection{The Associated Process}
We now describe the associated Markov process. The free part of (\ref{fieldhamil}), \[
H_0= \int d^3 x :{\Phi^*}({\boldsymbol x})\big[-i c \hbar
{\boldsymbol \alpha}\cdot\nabla +\beta m c^2 \big]\Phi({\boldsymbol x}):\;\;, \] preserves particle numbers (it commutes with the electron and positron number operators), evolving the $(n,{\widetilde{n}})$-particle sector of the Fock space according to the free $(n,{\widetilde{n}})$-particle Hamiltonian \[
H^{(n,{\widetilde{n}})}_0 = \sum_{k=1}^n h^{(k)}_0 + \sum_{{\widetilde{k}}=1}^{\widetilde{n}}
\widetilde{h}^{({\widetilde{k}})}_0\,, \] with \begin{align}
h^{(k)}_0 &= -i c\hbar {\boldsymbol \alpha}^{(k)} \cdot \nabla_k + \beta^{(k)} mc^2
\nonumber\\
\widetilde{h}^{({\widetilde{k}})}_0 &= -i c\hbar \widetilde\valpha^{({\widetilde{k}})} \cdot \widetilde{\nabla}_{\widetilde{k}}
+ \widetilde\beta^{({\widetilde{k}})} mc^2\,,\nonumber \end{align} where ${\boldsymbol \alpha}^{(k)}$ and $\beta^{(k)}$ act on the $k$-th electron index in the tensor product representation \eqref{elFockposFock} and $\widetilde\valpha^{({\widetilde{k}})}$ and $\widetilde{\beta}^{({\widetilde{k}})}$ on the ${\widetilde{k}}$-th positron index. $\widetilde{\nabla}_{\widetilde{k}}$ is the gradient with respect to ${\widetilde{\vx}}_{\widetilde{k}}$.
With $H_0$ is associated a deterministic motion of the configuration in $\mathcal{Q}$, the free process introduced in Section \ref{sec:free}. During this motion, the actual numbers $N, {\widetilde{N}}$ of electrons and positrons remain constant, while the positions $(\boldsymbol X_1, \ldots, \boldsymbol X_N, \widetilde{\boldsymbol X}_1, \ldots, \widetilde{\boldsymbol X}_{\widetilde{N}})=:Q$ move according to Bohm--Dirac velocities (\ref{BohmDirac}), i.e. \begin{subequations}\label{elposmotion} \begin{align}
\dot{\boldsymbol X}_k &= c\frac{\Psi^*(Q) \, {\boldsymbol \alpha}^{(k)} \, \Psi(Q)}
{\Psi^*(Q) \, \Psi(Q)} \\
\dot{\widetilde{\boldsymbol X}}_{\widetilde{k}} &= c\frac{\Psi^*(Q) \, \widetilde\valpha^{({\widetilde{k}})} \, \Psi(Q)}
{\Psi^*(Q) \, \Psi(Q)} \end{align} \end{subequations} where numerators and denominators are scalar products in $(\mathbb{C}^4)^{\otimes (N+{\widetilde{N}})}$.
We turn now to the interaction part. Setting $A ={\boldsymbol \alpha} \cdot e\boldsymbol{A} + e A_0$, we have that \begin{subequations} \begin{align}
H_{I}&= \int d^3 {\boldsymbol x} :{\Phi^*}({\boldsymbol x}) \, A({\boldsymbol x})\,\Phi({\boldsymbol x}):\;\; =\\
&= \sum_{i,j=1}^4 \int d^3 {\boldsymbol x} :(b^*_i({\boldsymbol x}) +
d_i({\boldsymbol x})) \, A^{i,j} ({\boldsymbol x}) \,
(b_j({\boldsymbol x})+d^*_j({\boldsymbol x})):\;\; = \\ \begin{split} \label{HIterms}
&= \sum_{i,j=1}^4 \int d^3 {\boldsymbol x} \, \Big(b^*_i({\boldsymbol x}) \,
A^{i,j}({\boldsymbol x}) \, b_j({\boldsymbol x}) + d_i({\boldsymbol x})
\,A^{i,j}({\boldsymbol x}) \, b_j({\boldsymbol x}) \: + \\
&\quad + \: b^*_i({\boldsymbol x}) \, A^{i,j}({\boldsymbol x}) \,
d^*_j({\boldsymbol x}) - d^*_j({\boldsymbol x}) \,
A^{i,j}({\boldsymbol x}) \, d_i({\boldsymbol x}) \Big). \end{split} \end{align} \end{subequations} Since $H_{I}$ is a polynomial in creation and annihilation operators, it possesses a kernel and corresponds to stochastic jumps. To compute the rates, we apply the strategy developed in Section \ref{sec:efficient}, using diagrams. To this end, we regard fermionic wave functions again as cross-sections of a bundle $E$, defined here by \begin{equation}\label{elposEdef}
E_q = \bigoplus_{\hat{q} \in \pi^{-1}(q)} (\mathbb{C}^4)^{\otimes n}
\otimes (\mathbb{C}^4)^{\otimes {\widetilde{n}}}. \end{equation} Fermionic symmetry of a cross-section $\Psi$ of $E$ means that \begin{equation}\label{Psiantisym}
\Psi\!\!
\begin{array}{l}
{\scriptstyle \varrho(i_1 \ldots i_n),
{\widetilde{\permutation}}({\tilde{\imath}}_1 \ldots {\tilde{\imath}}_{\widetilde{n}})} \\
(\varrho({\boldsymbol x}_1 \ldots {\boldsymbol x}_n), {\widetilde{\permutation}}({\widetilde{\vx}}_1 \ldots
{\widetilde{\vx}}_{\widetilde{n}})) \\ {}
\end{array}
= (-1)^\varrho \, (-1)^{\widetilde{\permutation}} \, \Psi\!\!
\begin{array}{l}
{\scriptstyle i_1 \ldots i_n, {\tilde{\imath}}_1 \ldots
{\tilde{\imath}}_{\widetilde{n}}} \\
({\boldsymbol x}_1 \ldots {\boldsymbol x}_n, {\widetilde{\vx}}_1 \ldots {\widetilde{\vx}}_{\widetilde{n}}) \\ {}
\end{array} \end{equation} for all permutations $\varrho \in S_n$ and ${\widetilde{\permutation}} \in S_{{\widetilde{n}}}$.
The diagrams for $b^*_i({\boldsymbol x}),b_i({\boldsymbol x}),d^*_i({\boldsymbol x})$, and $d_i({\boldsymbol x})$ are \begin{subequations} \begin{align}
(x',{\widetilde{x}}') &\xrightarrow [b^*_i({\boldsymbol x})]
{\sum_j {S_+}^{j}_{i} ({\boldsymbol x}' - {\boldsymbol x}) \,
\alpha_\mathrm{e}({\boldsymbol e}_j)} (x'\cup {\boldsymbol x}',{\widetilde{x}}')\\
(x',{\widetilde{x}}') &\xrightarrow [b_i({\boldsymbol x})]
{\sum_j {S_+}^{j}_{i} ({\boldsymbol x}' - {\boldsymbol x}) \,
\varepsilon_\mathrm{e}({\boldsymbol e}_j)} (x'\setminus {\boldsymbol x}',{\widetilde{x}}')\\
(x',{\widetilde{x}}') &\xrightarrow [d^*_i({\boldsymbol x})]
{\sum_j {S_-}^{j}_{i} ({\widetilde{\vx}}' - {\boldsymbol x}) \,
\alpha_\mathrm{p}({\boldsymbol e}_j)} (x',{\widetilde{x}}'\cup {\widetilde{\vx}}')\\
(x',{\widetilde{x}}') &\xrightarrow [d_i({\boldsymbol x})]
{\sum_j {S_-}^{j}_{i} ({\widetilde{\vx}}' - {\boldsymbol x}) \,
\varepsilon_\mathrm{p}({\boldsymbol e}_j)} (x',{\widetilde{x}}'\setminus {\widetilde{\vx}}') \end{align} \end{subequations} where the matrix function ${S_+}_j^i ({\boldsymbol x})$ is defined as the $j$-component of $P_+ ({\boldsymbol e}_i \, \delta(\,\cdot\,))$, and ${S_-}_j^i ({\boldsymbol x})$ as the $j$-component of $CP_- ({\boldsymbol e}_i \, \delta(\,\cdot\,))$. The linear mappings $\alpha_\mathrm{e}({\boldsymbol e}_j): E_{q'} \to E_{(x' \cup {\boldsymbol x}',{\widetilde{x}}')}$ (``append an electron with spinor ${\boldsymbol e}_j$'') and $\varepsilon_\mathrm{e}({\boldsymbol e}_j): E_{q'} \to E_{(x' \setminus {\boldsymbol x}',{\widetilde{x}}')}$ (``erase an electron, contracting with spinor ${\boldsymbol e}_j$'') are defined through their properties that for $\Psi \in E_{q'}$, \begin{subequations} \begin{align}
&\alpha_\mathrm{e} \Psi \text{ is appropriately symmetrized} \\
&\big(\alpha_\mathrm{e}({\boldsymbol e}_j) \Psi\big) ((\hat{x}', {\boldsymbol x}'),\hat{{\widetilde{x}}'}) =
\frac{1}{\sqrt{n'+1}} \, \Psi(\hat{x}',\hat{{\widetilde{x}}'}) \otimes
{\boldsymbol e}_j \\
&\big(\varepsilon_\mathrm{e}({\boldsymbol e}_j) \Psi\big) (\hat{x}, \hat{{\widetilde{x}}'}) =
\sqrt{n'} \, \Psi_j ((\hat{x}, {\boldsymbol x}'),\hat{{\widetilde{x}}'}), \end{align} \end{subequations} where $\hat{x}$ is an arbitrary ordering of $x=x' \setminus {\boldsymbol x}'$, $\hat{x}'$ one of $x'$, and $\hat{{\widetilde{x}}'}$ one of ${\widetilde{x}}'$. We refer to the last electron slot when writing the tensor product or taking the $j$-component. $\alpha_\mathrm{p} ({\boldsymbol e}_j)$ and $\varepsilon_\mathrm{p}({\boldsymbol e}_j)$ are defined analogously.
For the four terms in \eqref{HIterms}, we thus get the four diagrams (omitting the multiplication by $A^{i,j}({\boldsymbol x})$) \begin{subequations} \begin{align}
(x',{\widetilde{x}}') &\xrightarrow [b_j({\boldsymbol x})] {\sum_k
{S_+}^{k}_{j} ({\boldsymbol x}' - {\boldsymbol x}) \,
\varepsilon_\mathrm{e}({\boldsymbol e}_k)} (x'\setminus {\boldsymbol x}',{\widetilde{x}}') \xrightarrow
[b^*_i({\boldsymbol x})] {\sum_\ell {S_+}^{\ell}_{i}
({\boldsymbol x}'' - {\boldsymbol x}) \, \alpha_\mathrm{e}({\boldsymbol e}_\ell)} (x'\setminus {\boldsymbol x}' \cup
{\boldsymbol x}'',{\widetilde{x}}') \\
(x',{\widetilde{x}}') &\xrightarrow [b_j({\boldsymbol x})] {\sum_k
{S_+}^{k}_{j} ({\boldsymbol x}' - {\boldsymbol x}) \,
\varepsilon_\mathrm{e}({\boldsymbol e}_k)} (x'\setminus {\boldsymbol x}',{\widetilde{x}}') \xrightarrow
[d_i({\boldsymbol x})] {\sum_\ell {S_-}^{\ell}_{i}
({\widetilde{\vx}}' - {\boldsymbol x}) \, \varepsilon_\mathrm{p}({\boldsymbol e}_\ell)} (x'\setminus
{\boldsymbol x}',{\widetilde{x}}'\setminus {\widetilde{\vx}}')\\
(x',{\widetilde{x}}') &\xrightarrow [d^*_j({\boldsymbol x})] {\sum_k
{S_-}^{k}_{j} ({\widetilde{\vx}}' - {\boldsymbol x}) \,
\alpha_\mathrm{p}({\boldsymbol e}_k)} (x',{\widetilde{x}}'\cup {\widetilde{\vx}}') \xrightarrow
[b^*_i({\boldsymbol x})] {\sum_\ell {S_+}^{\ell}_{i}
({\boldsymbol x}' - {\boldsymbol x}) \, \alpha_\mathrm{e}({\boldsymbol e}_\ell)} (x'\cup {\boldsymbol x}',{\widetilde{x}}' \cup
{\widetilde{\vx}}')\\
(x',{\widetilde{x}}') &\xrightarrow [d_i({\boldsymbol x})] {\sum_k
{S_-}^{k}_{i} ({\widetilde{\vx}}' - {\boldsymbol x}) \,
\varepsilon_\mathrm{p}({\boldsymbol e}_k)} (x',{\widetilde{x}}'\setminus {\widetilde{\vx}}') \xrightarrow
[d^*_j({\boldsymbol x})] {\sum_\ell
{S_-}^{\ell}_{j} ({\widetilde{\vx}}'' - {\boldsymbol x}) \,
\alpha_\mathrm{p}({\boldsymbol e}_\ell)} (x',{\widetilde{x}}'\setminus {\widetilde{\vx}}' \cup {\widetilde{\vx}}''). \end{align} \end{subequations} We read off that the first term corresponds to the jump of a single electron from ${\boldsymbol x}'$ to ${\boldsymbol x}''$, while all other particles remain where they were, the second to the annihilation of an electron--positron pair at locations ${\boldsymbol x}'$ and ${\widetilde{\vx}}'$, the third to the creation of an electron--positron pair at locations ${\boldsymbol x}'$ and ${\widetilde{\vx}}'$, and the last to the jump of a positron from ${\widetilde{\vx}}'$ to ${\widetilde{\vx}}''$. The corresponding jump rates are \begin{subequations}\label{elposrates} \begin{align}
\sigma_\mathrm{e} (x'\setminus {\boldsymbol x}' \cup {\boldsymbol x}'',{\widetilde{x}}'|q') &= \frac{[(2/\hbar)
\, \mathrm{Im} \, \Psi^*(q) \sum_{k,\ell}
\chi_\mathrm{e}^{k, \ell} ({\boldsymbol x}',{\boldsymbol x}'')
\alpha_\mathrm{e}({\boldsymbol e}_\ell) \varepsilon_\mathrm{e}({\boldsymbol e}_k)\,
\Psi(q')]^+}{\Psi^*(q') \, \Psi(q')} \\
\sigma_{\mathrm{ann}} (x'\setminus {\boldsymbol x}',{\widetilde{x}}'\setminus {\widetilde{\vx}}'|q') &=
\frac{[(2/\hbar) \, \mathrm{Im} \, \Psi^*(q) \sum_{k,\ell}
\chi_{\mathrm{ann}}^{k, \ell} ({\boldsymbol x}',{\widetilde{\vx}}')
\varepsilon_\mathrm{p}({\boldsymbol e}_\ell) \varepsilon_\mathrm{e}({\boldsymbol e}_k)\,
\Psi(q')]^+}{\Psi^*(q') \, \Psi(q')} \\
\sigma_{\mathrm{crea}} (x'\cup {\boldsymbol x}',{\widetilde{x}}' \cup {\widetilde{\vx}}'|q') &= \frac{[(2/\hbar) \,
\mathrm{Im} \, \Psi^*(q) \sum_{k,\ell}
\chi_{\mathrm{crea}}^{k, \ell} ({\boldsymbol x}',{\widetilde{\vx}}')
\alpha_\mathrm{e}({\boldsymbol e}_\ell) \alpha_\mathrm{p}({\boldsymbol e}_k)\,
\Psi(q')]^+}{\Psi^*(q') \, \Psi(q')} \\
\sigma_\mathrm{p} (x',{\widetilde{x}}'\setminus {\widetilde{\vx}}' \cup {\widetilde{\vx}}''|q') &=
\frac{[(2/\hbar) \, \mathrm{Im} \, \Psi^*(q) \sum_{k,\ell}
\chi_\mathrm{p}^{k, \ell} ({\widetilde{\vx}}',{\widetilde{\vx}}'')
\alpha_\mathrm{p}({\boldsymbol e}_\ell) \varepsilon_\mathrm{p}({\boldsymbol e}_k)\,
\Psi(q')]^+}{\Psi^*(q') \, \Psi(q')}, \end{align} \end{subequations} where $q$ denotes the respective destination, and \begin{subequations} \begin{align}
\chi_\mathrm{e}^{k, \ell}({\boldsymbol x}',{\boldsymbol x}'') =\quad &
\sum\limits_{i,j} \int d^3{\boldsymbol x} \,
{S_+}^\ell_i ({\boldsymbol x}''-{\boldsymbol x}) \, A^{i,j}
({\boldsymbol x}) \, {S_+}^k_j ({\boldsymbol x}'-{\boldsymbol x}) \\
\chi_{\mathrm{ann}}^{k, \ell} ({\boldsymbol x}',{\widetilde{\vx}}') =\quad &
\sum\limits_{i,j} \int d^3{\boldsymbol x} \,
{S_-}^\ell_i ({\widetilde{\vx}}'-{\boldsymbol x}) \, A^{i,j}
({\boldsymbol x}) \, {S_+}^k_j ({\boldsymbol x}'-{\boldsymbol x}) \\
\chi_{\mathrm{crea}}^{k, \ell} ({\boldsymbol x}',{\widetilde{\vx}}') =\quad
&\sum\limits_{i,j} \int d^3{\boldsymbol x} \,
{S_+}^\ell_i ({\boldsymbol x}'-{\boldsymbol x}) \, A^{i,j} ({\boldsymbol x})
\, {S_-}^k_j ({\widetilde{\vx}}'-{\boldsymbol x}) \\
\chi_\mathrm{p}^{k, \ell} ({\widetilde{\vx}}',{\widetilde{\vx}}'') =
-&\sum\limits_{i,j} \int d^3{\boldsymbol x} \,
{S_-}^\ell_j ({\widetilde{\vx}}''-{\boldsymbol x}) \, A^{i,j}
({\boldsymbol x}) \, {S_-}^k_i ({\widetilde{\vx}}'-{\boldsymbol x}). \end{align} \end{subequations} The process for $H_0 + H_{I}$ that we obtain through process additivity is the motion \eqref{elposmotion} interrupted by stochastic jumps with rates \eqref{elposrates}.
Note that the jump of a single electron has small probability to be across a distance much larger than the width of the functions $S_\pm$, which is of the order of the Compton wavelength of the electron. Similarly, the distance $|{\boldsymbol x}-{\widetilde{\vx}}|$ of a newly created pair, or of a pair at the moment of annihilation, has small probability to be much larger than the width of $S_\pm$. While the jump of a single electron or positron leaves the number $N$ of electrons and the number ${\widetilde{N}}$ of positrons unchanged, pair creation and annihilation can only either decrease or increase both $N$ and ${\widetilde{N}}$ by $1$. As a consequence, the actual net charge ${\widetilde{N}}-N$ is conserved by the process.
\section{Second Quantization of a Markov Process}\label{sec:morefree}
\subsection{Preliminaries Concerning the Conditional Density Matrix}
In the next section, we describe the algorithm for the ``second quantization'' of a process. But before that, we have to introduce, as a preparation, the notion of a conditional density matrix. In \cite{DGZ}, we have defined for Bohmian mechanics the \emph{conditional wave function} of, say, subsystem 1 of a composite system with configuration space $\mathcal{Q} = \mathcal{Q}_1 \times \mathcal{Q}_2$ by $\Psi_\mathrm{cond}(q_1) = \Psi(q_1,Q_2)$. {}From a complex wave function $\Psi : \mathcal{Q} \to \mathbb{C}$, together with the actual configuration $Q_2$ of the environment of the subsystem in the composite, we thus form a wave function $\Psi_\mathrm{cond}: \mathcal{Q}_1 \to \mathbb{C}$; for Bohmian mechanics with spin, in contrast, we would not, in general, obtain a suitable wave function for subsystems in this way, because $\Psi_\mathrm{cond}$ as just defined would have more spin indices than appropriate. We can however still define the \emph{conditional density matrix} for subsystem 1, \begin{equation}\label{WPsi}
W_{\mathrm{cond} \, s_1,s_1'}(q_1,q_1') = \frac{1}{\gamma} \sum_{s_2}
\Psi_{s_1,s_2} (q_1,Q_2) \, \Psi^*_{s_1',s_2} (q_1', Q_2) \end{equation} where the $s$'s are spin indices. In order that $W$, like any density matrix, have trace 1, the normalizing factor $\gamma$ must be chosen as \[
\gamma = \int\limits_{q_1 \in \mathcal{Q}_1} \sum_{s_1,s_2} \Psi^*_{s_1,s_2}
(q_1,Q_2) \, \Psi_{s_1,s_2} (q_1,Q_2) \, dq_1\,. \] This $W$ can play most of the roles of the conditional wave function in spinless Bohmian mechanics. The notion of a conditional density matrix easily generalizes from the situation just described, corresponding to wave functions in $L^2(\mathcal{Q},\mathbb{C}^k)$ and the natural localization PVM, to the situation of any product localization POVM on any tensor product Hilbert space: for $\mathscr{H} = \mathscr{H}_1 \otimes \mathscr{H}_2$ and ${P}(dq_1 \times dq_2) = {P}_1(dq_1) \otimes {P}_2(dq_2)$, set \begin{equation}\label{WPOV}
W_\mathrm{cond} = \frac{\mathrm{tr}_2 \big( |\Psi\rangle\langle\Psi| \, {P}
(\mathcal{Q}_1 \times dq_2) \big)} {\mathrm{tr} \big( |\Psi\rangle\langle\Psi|
\, {P}(\mathcal{Q}_1 \times dq_2) \big)} \Big|_{q_2 = Q_2}\,, \end{equation} where $\mathrm{tr}_2$ is the partial trace over $\mathscr{H}_2$. The quotient is to be understood as a Radon--Nikod{\'y}m derivative in $q_2$. Like conditional wave functions, conditional density matrices cannot be defined in orthodox quantum theory, for lack of the configuration $Q_2$. We stress that conditional density matrices have nothing, absolutely nothing, to do with statistical ensembles of state vectors in $\mathscr{H}_1$. Like any density matrix, they do, however, define a probability distribution on $\mathcal{Q}_1$, \begin{equation}\label{PW}
\mathbb{P}^{W_\mathrm{cond}}_1 (\,\cdot\,) = \mathrm{tr} \big(W_\mathrm{cond} \,
{P}_1(\,\cdot\,) \big)\,, \end{equation} which coincides with the conditional distribution of $Q_1$ given $Q_2$, \[
\mathbb{P}(Q_1 \in \,\cdot\,|Q_2) = \frac{\sp{\Psi}{{P}_1(\,\cdot\,)
\otimes {P}_2(dq_2)| \Psi}} {\sp{\Psi}{\mathbf{1} \otimes {P}_2(dq_2)|
\Psi}} \Big|_{q_2 = Q_2}\,. \]
The evolution of $W_\mathrm{cond}$ is not autonomous; it will typically depend on (and always be determined by) $\Psi_t$ and $Q_{2,t}$. For a given density matrix $W$ of a system that is not regarded as a subsystem, however, one can \emph{define} (as usual) the time evolution by $W_t = e^{-i H t/\hbar} \, W \, e^{i H t/\hbar}$, which gives rise to a time-dependent distribution $\mathbb{P}^{W_t} (\,\cdot\,) = \mathrm{tr} (W_t {P}(\,\cdot\,))$. We call a Markov process that is $\mathbb{P}^{W_t}$-distributed at every time $t$ \emph{equivariant} with respect to $W$ and $H$. Given the right initial distribution, this is equivalent to the following condition on the generator: \begin{equation}\label{Wequi}
\mathscr{L} \mathbb{P}^W (\,\cdot\,) = \frac{2}{\hbar} \, \mathrm{Im} \,
\mathrm{tr}(W \, {P}(\,\cdot\,) \, H)\,. \end{equation} This is the version of (\ref{mainequ}) for density matrices, and defines an \emph{equivariant generator} with respect to $W$ and $H$.
Since conditional density matrices will play a crucial role in the construction of the many-particle process, we require that, as part of the input data of the algorithm, we are given an equivariant generator $\mathscr{L}^{(1)}_W$ for every density matrix from a dense subset of the density matrices in $\mathscr{H}^{(1)*} \otimes \mathscr{H}^{(1)}$. This is not much of a restriction, as all relevant examples of equivariant generators naturally extend to density matrices: Bohmian mechanics with spin space $\mathbb{C}^k$ can be extended \cite{Belldensity} to \begin{equation}\label{vW}
v^W(q) = \hbar \, \mathrm{Im} \, \frac{\nabla_{q} \mathrm{tr}_{\mathbb{C}^k} \,
W(q,q')}{\mathrm{tr}_{\mathbb{C}^k} \, W(q,q')} (q'=q)\,, \end{equation} Bohm--Dirac to \begin{equation}\label{vWDirac}
v^W(q) = \frac{\mathrm{tr}_{\mathbb{C}^4} (W(q,q) {\boldsymbol \alpha})}{\mathrm{tr}_{\mathbb{C}^4}
(W(q,q))} \,, \end{equation} and minimal jump rates to \begin{equation}\label{sigmaW}
\sigma^W (dq|q') = \frac{[(2/\hbar)\, \mathrm{Im} \, \mathrm{tr}(W {P}(dq) H
{P}(dq'))]^+} {\mathrm{tr}(W {P}(dq'))} \,. \end{equation} Note also that (\ref{vW}) would not make any sense if $W$ represented a statistical ensemble \cite{Belldensity}, whereas it makes good sense for conditional density matrices, expressing the true relation between the Bohmian velocity for a subsystem arising from (\ref{Bohm}) and the conditional density matrix (\ref{WPsi}) of that subsystem. Mutatis mutandis, the same is true of (\ref{vWDirac}). Similarly, in case that ${P}$ is a PVM, (\ref{sigmaW}) expresses the jump rates for a decoupled subsystem arising from \eqref{tranrates} for the composite in terms of the conditional density matrix of that subsystem.
\subsection{Algorithm} \label{sec:Gamma}
The input data of this algorithm are the one-particle Hilbert space $\mathscr{H}^{(1)}$, configuration space $\mathcal{Q}^{(1)}$, POVM ${P}^{(1)}$, and a family of generators $\mathscr{L}^{(1)} = \mathscr{L}^{(1)}_W$ labeled by the density matrices $W$ from a dense subset of the density matrices in $\mathscr{H}^{(1)*} \otimes \mathscr{H}^{(1)}$. The output is a family of generators $\Gamma \mathscr{L}^{(1)} = \mathscr{L}_0 = \mathscr{L}_{0,\Psi}$ labeled by the state vectors $\Psi$ in (a dense subspace of) Fock space. If $\mathscr{L}^{(1)}_W$ is equivariant with respect to $W$ and $H^{(1)}$, then $\mathscr{L}_{0,\Psi}$ is equivariant with respect to $\Psi$ and $H_0$.
The algorithm is based on two procedures for suitably combining generators for direct sums or tensor products of Hilbert spaces.
\subsubsection{Direct Sums}\label{sec:directsum}
Given a finite or countable sequence of Hilbert
spaces $\mathscr{H}^{(n)}$ with POVMs ${P}^{(n)}$ on configuration
spaces $\mathcal{Q}^{(n)}$, and for each $n$ a family of generators
$\mathscr{L}^{(n)}$ labeled by the vectors in $\mathscr{H}^{(n)}$, there
is a canonically constructed family of generators $\mathscr{L}^\oplus
= \mathscr{L}^\oplus_\Psi$, labeled by the vectors in the direct sum
$\bigoplus_n \mathscr{H}^{(n)}$. The space $\mathcal{Q}$ in which the
corresponding process takes place is the disjoint union of the
$\mathcal{Q}^{(n)}$. If every $\mathscr{L}^{(n)}_{\Psi_n}$ is equivariant
with respect to $\Psi_n \in \mathscr{H}^{(n)}$ and $H^{(n)}$, then
$\mathscr{L}^\oplus_\Psi$ is equivariant with respect to $\Psi \in
\bigoplus_n \mathscr{H}^{(n)}$ and $\bigoplus_n H^{(n)}$.
Here are the details. The POVM ${P} = \bigoplus_n {P}^{(n)}$ on
$\mathcal{Q}$ that naturally arises from the data is given by ${P}(B) =
\bigoplus_n {P}^{(n)} (B \cap \mathcal{Q}^{(n)})$ for $B \subseteq
\mathcal{Q}$. Let $P_n$ denote the projection $\mathscr{H} \to
\mathscr{H}^{(n)}$. The generator $\mathscr{L}^\oplus$ is given by
\begin{equation}
\big( \mathscr{L}_\Psi^\oplus \, \rho \big) \big|_{\mathcal{Q}^{(n)}} =
\mathscr{L}_{P_n\Psi/\|P_n\Psi\|}^{(n)} \big(
\rho \big|_{\mathcal{Q}^{(n)}} \big)\,.
\end{equation}
It generates a (Markov) process $Q_t^\oplus$ such that when
$Q_0^\oplus \in \mathcal{Q}^{(n)}$, it is generated by the state vector
$P_n \Psi/ \|P_n \Psi \|$, i.e., it is a Markov process $Q_t^{(n)}$
in $\mathcal{Q}^{(n)}$ generated by $\mathscr{L}^{(n)}_{P_n \Psi/
\|P_n\Psi\|}$. The equivariance statement follows directly, since
$\|P_n \Psi_t\|^2 = \mathbb{P}_t (\mathcal{Q}^{(n)})$ is invariant under the
evolution generated by $H_0 = \bigoplus_n H^{(n)}$.
\subsubsection{Tensor Products}\label{sec:tensorproduct}
Given a finite sequence of Hilbert spaces
$\mathscr{H}^{[1]}, \ldots, \mathscr{H}^{[n]}$ with POVMs ${P}^{[i]}$ on
configuration spaces $\mathcal{Q}^{[i]}$, and for each $i$ a family of
generators $\mathscr{L}^{[i]} = \mathscr{L}^{[i]}_{W_i}$ labeled
by the density matrices on $\mathscr{H}^{[i]}$, there is a canonically
constructed family of generators $\mathscr{L}^\otimes =
\mathscr{L}^\otimes_W$, labeled by the density matrices on the tensor
product $\mathscr{H}^{[1]} \otimes \cdots \otimes \mathscr{H}^{[n]}$. The
corresponding process takes place in the Cartesian product $\mathcal{Q} =
\mathcal{Q}^{[1]} \times \cdots \times \mathcal{Q}^{[n]}$. If every
$\mathscr{L}^{[i]}_{W_i}$ is equivariant with respect to the density
matrix $W_i$ on $\mathscr{H}^{[i]}$ and the Hamiltonian $H^{[i]}$, then
$\mathscr{L}^\oplus_W$ is equivariant with respect to $W$ on
$\mathscr{H}^{[1]} \otimes \cdots \otimes \mathscr{H}^{[n]}$ and $H =
\sum\limits_i \mathbf{1} \otimes \cdots \otimes H^{[i]} \otimes \cdots
\otimes \mathbf{1} = \sum\limits_i H_i$.
\newcommand{\widehat{q}_i}{\widehat{q}_i}
Here are the details. The POVM that naturally arises from the data
is\footnote{The existence of the tensor product POVM is a
consequence of Corollary~7 in Section~4.4 of \cite{crea2A}.}
\begin{equation}\label{productpovm}
{P}(d{\boldsymbol q}_1 \times \cdots \times d{\boldsymbol q}_n) = {P}^{[1]}(d{\boldsymbol q}_1)
\otimes \cdots \otimes {P}^{[n]}(d{\boldsymbol q}_n).
\end{equation}
For any $q \in \mathcal{Q}$, let ${\boldsymbol q}_i$ denote its $i$-th component and
let $\widehat{q}_i = ({\boldsymbol q}_1, \ldots, {\boldsymbol q}_{i-1}, {\boldsymbol q}_{i+1}, \ldots,
{\boldsymbol q}_n)$. For every $i$ and $\widehat{q}_i$, define
\[
W_i (\widehat{q}_i) = \frac{\mathrm{tr}_{\neq i} \big( W {P}(d{\boldsymbol q}_1 \times \cdots
\times \mathcal{Q}^{[i]} \times \cdots \times d{\boldsymbol q}_n) \big)} {\mathrm{tr} \big(W
{P}(d{\boldsymbol q}_1 \times \cdots \times \mathcal{Q}^{[i]} \times \cdots \times
d{\boldsymbol q}_n) \big)}\,,
\]
where $\mathrm{tr}_{\neq i}$ is the partial trace over all factors except
$\mathscr{H}^{[i]}$. This $W_i$ is the conditional density matrix,
regarded as a function of the configuration $\widehat{q}_i$ of the other
particles. Now consider the process on $\mathcal{Q}$ according to which
the $i$-th particle moves as prescribed by $\mathscr{L}^{[i]}_{W_i}$
while the other particles remain fixed. The generator of this
process is
\begin{equation}\label{Lidef}
\mathscr{L}_i \, \rho := \Big[ \mathscr{L}^{[i]}_{W_i(\widehat{q}_i)} \,
\rho( \,\cdot\,| \widehat{q}_i) \Big] \, \rho_{\neq i}(d\widehat{q}_i)
\end{equation}
where $\rho_{\neq i}$ is the marginal distribution of
$\widehat{Q}_i$ (i.e., $\rho$ integrated over ${\boldsymbol q}_i$) and
$\rho(\,\cdot\,|\widehat{q}_i)$ is the conditional distribution of ${\boldsymbol Q}_i$
given $\widehat{Q}_i = \widehat{q}_i$; the square bracket is a function of
$\widehat{q}_i$ and a measure in $d{\boldsymbol q}_i$. Now define $\mathscr{L}^\otimes_W
\rho = \sum\limits_i \mathscr{L}_i \rho$.
To see that $\mathscr{L}^\otimes$ is equivariant when the
$\mathscr{L}^{[i]}$ are, we have to check (\ref{Wequi}). Note first
that $\mathbb{P}^W(d{\boldsymbol q}_i|\widehat{q}_i) = \mathrm{tr} \big( W_i(\widehat{q}_i) \,
{P}^{[i]}(d{\boldsymbol q}_i) \big)$. Due to the equivariance of
$\mathscr{L}^{[i]}$, for $\rho = \mathbb{P}^W$ the square bracket in
(\ref{Lidef}) equals $(2/\hbar) \, \mathrm{Im} \, \mathrm{tr} \big( W_i (\widehat{q}_i) \,
{P}^{[i]}(d{\boldsymbol q}_i) \, H^{[i]} \big)$, from which we obtain
(\ref{Wequi}) for $\mathscr{L}_i$ and $H_i$ and hence for
$\mathscr{L}^\otimes$ and $H$.
The definition of $\mathscr{L}^\otimes$ reproduces the many-particles Bohm law (\ref{Bohm}) with or without spin from the one-particle version (or, for distinguishable particles, from several different one-particle versions having different masses and spins). Similarly, it reproduces the many-particles Bohm--Dirac law (\ref{BohmDirac}) from the one-particle version.
\subsubsection{Second Quantization of the POVM}\label{sec:GammaPOVM}
Let $\mathcal{Q}^{(n)}$ denote the space of all subsets-with-multiplicities of $\mathcal{Q}^{(1)}$ having $n$ elements (counting in the multiplicities). ${P}^{(1)}$ naturally defines a POVM ${P}^{(1)\otimes n}$ on $(\mathcal{Q}^{(1)})^n$ acting on $\mathscr{H}^{(1)\otimes n}$ by ${P}^{(1)\otimes n}(d{\boldsymbol q}_1 \times \cdots \times d{\boldsymbol q}_n) = {P}^{(1)}(d{\boldsymbol q}_1) \otimes \cdots \otimes {P}^{(1)}(d{\boldsymbol q}_n)$, and a POVM ${P}^{(n)}$ on $\mathcal{Q}^{(n)}$ acting on $\mathscr{F}^{(n)} = P_\pm \mathscr{H}^{(1)\otimes n}$ (the $n$-particle sector of Fock space, with $P_\pm$ the projection to the subspace of (anti\nobreakdash-)symmetric elements of $\mathscr{H}^{(1)\otimes n}$, depending on whether we deal with fermions or bosons) by \[
{P}^{(n)}(B) = {P}^{(1)\otimes n} \big\{({\boldsymbol q}_1, \ldots, {\boldsymbol q}_n)
\in (\mathcal{Q}^{(1)})^n : \{{\boldsymbol q}_1, \ldots, {\boldsymbol q}_n\} \in B \big\} \] for $B \subseteq \mathcal{Q}^{(n)}$, where $\{{\boldsymbol q}_1, \ldots, {\boldsymbol q}_n\}$ should be understood as a set-with-multiplicities.\footnote{This agrees with the definition given in Section \ref{sec:crea1} for the case of a PVM and the coincidence configurations removed from configuration space.} Since ${P}^{(n)}(B)$ is invariant under permutations, it maps symmetric to symmetric and anti-symmetric to anti-symmetric elements of $\mathscr{H}^{(1)\otimes n}$ and thus acts on $\mathscr{F}^{(n)}$ for bosonic or fermionic Fock space.\footnote{In case that ${P}^{(1)}$ is nonatomic, ${P}^{(n)}$ can equivalently be defined in the following way: For the set $\Delta$ of coincidence configurations we set ${P}^{(n)}(\Delta) =0$, and for volumes $d{\boldsymbol q}_1, \ldots, d{\boldsymbol q}_n$ in $\mathcal{Q}^{(1)}$ that are pairwise disjoint, we have a corresponding volume $dq$ in $\mathcal{Q}^{(n)}$, which can be obtained from $d{\boldsymbol q}_1 \times \cdots \times d{\boldsymbol q}_n \subseteq (\mathcal{Q}^{(1)})^n$ by forgetting the ordering, and we set ${P}^{(n)}(dq) = n! \, P_\pm \, {P}^{(1)}(d{\boldsymbol q}_1) \otimes \cdots \otimes {P}^{(1)}(d{\boldsymbol q}_n) \, P_\pm$.} The corresponding POVM on $\mathcal{Q}$ is then ${P} = \Gamma {P}^{(1)} = \bigoplus_n {P}^{(n)}$; more precisely, for $B \subseteq \mathcal{Q}$, \[
{P}(B) = \bigoplus_{n=0}^\infty {P}^{(n)} (B\cap \mathcal{Q}^{(n)})\,. \]
\subsubsection{Construction of the Free Process}
Equipped with the two procedures for direct sums and tensor products, we complete the construction of the free process.
The ``tensor product'' procedure above provides a process on $(\mathcal{Q}^{(1)})^n$ from $n$ identical copies of $\mathscr{L}^{(1)}$. For a state vector $\Psi^{(n)} \in \mathscr{F}^{(n)} = P_\pm \mathscr{H}^{(1)\otimes n}$ from either the symmetric or the anti-symmetric elements of the $n$-fold tensor product space, let $W$ be the projection to $\Psi^{(n)}$; the generator $\mathscr{L}^\otimes_W$ is permutation invariant because the tensor-product construction of $\mathscr{L}^\otimes _W$ is permutation covariant and a permutation can at most change the state vector by a minus sign, which does not affect the density matrix. Consequently, the ordering of the configuration is irrelevant and may be ignored. We thus obtain a process on $\mathcal{Q}^{(n)}$ whose generator we call $\mathscr{L}^{(n)}$. We now apply the ``direct sum'' procedure to obtain a process on $\mathcal{Q}$.
\section{Towards a Notion of Minimal Process}
In this section, we investigate the common traits of the Markov processes relevant to Bell-type QFT, which can be summarized in the notion of a \emph{minimal process} associated with $\Psi,H$, and ${P}$. We begin with a closer study of the minimal free generator \eqref{LH}, and then explain why we call the minimal jump rates ``minimal.'' Finally, in Section \ref{sec:mini}, we give an outlook on the notion of minimal process.
\subsection{Free Process From Differential Operators} \label{sec:freeflow}
In this section, we discuss some of the details, concerning the two equivalent formulas \eqref{LH} and \eqref{genH} for the backward and forward version of the minimal free generator in terms of $H, {P}$, and $\Psi$, that we omitted in Section \ref{sec:free2}. To begin with, $L$ as defined by \eqref{LH} satisfies some necessary conditions for being a backward generator: $Lf(q)$ is real, and $L\mathbf{1} =0$ where $\mathbf{1}$ is the constant 1 function (this corresponds to $\mathscr{L} \rho (\mathcal{Q}) =0$, or conservation of total probability). In case $L$ is indeed a backward generator, the corresponding process is equivariant because \[
\mathscr{L} \mathbb{P} (dq) \stackrel{\eqref{genH}}{=} \mathrm{Re} \, \sp{\Psi}
{\hat{\mathbf{1}}\,\frac{i}{\hbar} [H,{P}(dq)] |\Psi} = \frac{2}{\hbar} \,
\mathrm{Im} \, \sp{\Psi} {{P}(dq) H|\Psi} \stackrel{\eqref{dPdt}}{=}
\dot{\mathbb{P}}(dq)\,. \]
One way to arrive at formula \eqref{LH} has been described in Section \ref{sec:free2}. A different way, leading to \eqref{genH}, is to start from the ansatz $\mathscr{L} \rho = A\frac{d\rho}{d\mathbb{P}}$ where $A$ denotes a (signed-measure-valued) linear operator acting on functions. Equivariance means $A\mathbf{1} (dq) = \sp{\Psi}{\frac{i}{\hbar}
[H, {P}(dq)] |\Psi}$. This suggests $Af(dq) = \sp{\Psi}{\hat{f}\,
\frac{i}{\hbar} [H, {P}(dq)] |\Psi}$, or $Af(dq) =
\sp{\Psi}{\frac{i}{\hbar} [H, {P}(dq)]\, \hat{f} |\Psi}$, or a convex combination thereof. Since $Af(dq)$ must be real, we are forced to choose the combination with coefficients $\frac{1}{2}$ and $\frac{1}{2}$, or equivalently $Af(dq) = \mathrm{Re}\, \sp{\Psi}{\hat{f}\,
\frac{i}{\hbar} [H, {P}(dq)] |\Psi}$, which is \eqref{genH}.
That $\mathscr{L}$ generates a deterministic process (when it is a generator at all) is suggested by the following consideration---at least when $H$ and ${P}$ are time-reversal invariant: replacing $\Psi$ in \eqref{genH} by $T\Psi$ where $T$ is the anti-linear time reversal operator (see Section \ref{sec:symm}) changes the sign of $\mathscr{L}$. The only generators $\mathscr{L}$ such that $-\mathscr{L}$ is also a generator are, presumably, those corresponding to deterministic motion.
This gives us an opportunity to check for which $H$ \eqref{LH} does define a process: for a deterministic process we must have $L = v\cdot \nabla$ where $v$ is the velocity vector field. It is known that vector fields, understood as first-order differential operators, are those linear operators $L$ on the space of smooth functions that satisfy the Leibniz rule $L(fg) = fLg + gLf$. \eqref{LH} is certainly linear in $f$, so we have to check the Leibniz rule to see whether $L$ is indeed of the form $v\cdot \nabla$ and thus the backward generator of a process.
We can see no reason why $L$ should satisfy a Leibniz rule unless ${P}$ is a PVM, which implies that \begin{equation}\label{fPOV}
\hat{f} \, {P}(dq) = f(q) \, {P}(dq)\,, \end{equation} and $H$ is such that for all (nice) functions $f$ and $g$, \begin{equation}\label{Hdiff}
\big[ [ H,\hat{f}] , \hat{g} \big] = \hat{h} \end{equation} for some function $h$, which holds if $H$ is a differential operator of order $\leq 2$. (If $H=-\Delta$, then $h=- 2 \nabla f \cdot \nabla g$; if $H=-i \, {\boldsymbol \alpha} \cdot \nabla$ for whatever vector of matrices ${\boldsymbol \alpha}$, or if $H$ is a multiplication operator, then $h=0$.) To check that the Leibniz rule is obeyed in this case, note that we then have that $[H, \widehat{fg}] = [H, \hat{f} \hat{g}] = [H,\hat{f}] \hat{g} + \hat{f} [H,\hat{g}] = \hat{f} [H, \hat{g}] + \hat{g} [H, \hat{f}] + \big[ [H, \hat{f}], \hat{g} \big]$. Using this in \eqref{LH}, we find that, due to \eqref{fPOV}, the first two terms give the Leibniz rule, whereas the last term, due to \eqref{Hdiff}, does not contribute to the real part in \eqref{LH}.
When $\mathscr{H}$ is an $L^2$ space over $\mathcal{Q}$ and ${P}$ the natural PVM, i.e., when $\Psi$ is a function, \eqref{LH} can be written in the form \begin{equation}\label{vH}
L f(q)= \frac{1}{\hbar}\, \mathrm{Im} \, \frac{\Psi^*(q) \,
([\hat{f},H]\Psi)(q)} {\Psi^*(q) \, \Psi(q)} \end{equation} where $\hat{f}$ is the multiplication operator corresponding to $f$. {}From this, one easily reads off the Bohm velocity \eqref{Bohm} for the $N$-particle Schr\"odinger operator \eqref{Hamil} with or without spin. Similarly, we get the Bohm--Dirac theory when $H$ is the Dirac operator in $\mathscr{H} = {\mathrm{Anti}\,} L^2(\mathbb{R}^3,\mathbb{C}^4)^{\otimes N}$, $\mathcal{Q}$ the manifold of subsets of $\mathbb{R}^3$ with $N$ elements, and ${P}$ the obvious PVM. \eqref{vH} also leads to the Bohm--Dirac motion if $\mathscr{H} = L^2(\mathbb{R}^3,\mathbb{C}^4)^{\otimes N}$, $\mathcal{Q} = \mathbb{R}^{3N}$, and ${P}$ is the natural PVM, but not if $\mathscr{H}$ is the positive energy subspace because then the appropriate POVM ${P}$ is no longer a PVM.
To see that the ``second quantization'' algorithm maps minimal free generators to minimal free generators, or, in other words, preserves the relation \eqref{genH} between Hamiltonian and generator, observe first that \eqref{genH} naturally extends to density matrices, and the extension, if a generator, is equivariant. Next check that the ``direct sum'' and ``tensor product'' procedures of Section \ref{sec:Gamma} are compatible with \eqref{genH} when ${P}$ is a PVM. Finally, observe that the (anti\nobreakdash-)symmetrization operator commutes with the $n$-particle Hamiltonian, with ${P}(B)$ for every permutation invariant set $B \subseteq (\mathcal{Q}^{(1)})^n$, and with $\hat{f}$ for every permutation invariant function $f:(\mathcal{Q}^{(1)})^n \to \mathbb{R}$.
\subsection{Minimality} \label{sec:mini4}
In this section we explain in what sense the minimal jump rates \eqref{tranrates}---or \eqref{mini1}---are minimal. In so doing, we will also explain the significance of the quantity $\mathbb{J}$ defined in \eqref{Jdef}, and clarify the meaning of the steps taken in Sections \ref{sec:mini1} and \ref{sec:mini2} to arrive at the jump rate formulas.
Given a Markov process $Q_t$ on $\mathcal{Q}$, we define the \emph{net probability current} $j_t$ at time $t$ between sets $B$ and $B'$ by \begin{eqnarray}\label{jdefcont}
j_t(B,B') = \lim_{\Delta t \searrow 0} \frac{1}{\Delta t} \hspace{-3ex}
&&
\Big[ \mathrm{Prob}\big\{Q_{t}\in B',Q_{t+\Delta t}\in B \big\} -
\\\nonumber
&&
- \mathrm{Prob} \big\{ Q_{t}\in B, Q_{t+\Delta t} \in B' \big\} \Big]\,. \end{eqnarray} This is the amount of probability that flows, per unit time, from $B'$ to $B$ minus the amount from $B$ to $B'$. For a pure jump process, we have that \begin{equation}\label{jrate}
j_t(B,B') = \int\limits_{q'\in B'} \sigma_t(B|q')\, \rho_t(dq') -
\int\limits_{q\in B} \sigma_t(B'|q)\, \rho_t(dq)\,, \end{equation} so that \begin{equation} j_t(B,B') = j_{\sigma,\rho}(B \times B') \end{equation} where $j_{\sigma,\rho}$ is the signed measure, on $\mathcal{Q} \times \mathcal{Q}$, given by the integrand of \eqref{continuity3}, \begin{equation}\label{jsigma}
j_{\sigma,\rho} (dq \times dq') = \sigma(dq|q') \, \rho(dq') -
\sigma(dq'|q) \, \rho(dq)\,. \end{equation} For minimal jump rates $\sigma$, defined by \eqref{tranrates} or \eqref{mini1} (and with the probabilities $\rho$ given by \eqref{mis}, $\rho = \mathbb{P}$), this agrees with \eqref{Jdef}, as was noted earlier, \begin{equation}\label{jJ}
j_{\sigma,\rho} = \mathbb{J}_{\Psi,H,{P}} \,, \end{equation} where we have made explicit the fact that $\mathbb{J}$ is defined in terms of the quantum entities $\Psi, H$, and ${P}$. Note that both $\mathbb{J}$ and the net current $j$ are anti-symmetric, $\mathbb{J}^\mathrm{tr} = -\mathbb{J}$ and $j^\mathrm{tr} = -j$, the latter by construction and the former because $H$ is Hermitian. (Here $\mathrm{tr}$ indicates the action on measures of the transposition $(q,q') \mapsto (q',q)$ on $\mathcal{Q} \times \mathcal{Q}$.) The property \eqref{jJ} is stronger than the equivariance of the rates $\sigma$, $\mathscr{L}_\sigma \mathbb{P}_t = d\mathbb{P}_t / dt$: Since, by \eqref{continuity3}, \begin{equation}
(\mathscr{L}_\sigma \rho) (dq) = j_{\sigma,\rho} (dq \times \mathcal{Q}), \end{equation} and, by \eqref{Jdef}, \begin{equation}
\frac{d\mathbb{P}}{dt}(dq) = \mathbb{J}(dq \times \mathcal{Q}), \end{equation} the equivariance of the jump rates $\sigma$ amounts to the condition that the marginals of both sides of \eqref{jJ} agree, \begin{equation}
j_{\sigma,\rho} (dq \times \mathcal{Q}) = \mathbb{J} (dq \times \mathcal{Q})\,. \end{equation} In other words, what is special about processes with rates satisfying \eqref{jJ} is that not only the single-time \emph{distribution} but also the \emph{current} is given by a standard quantum theoretical expression in terms of $H, \Psi$, and ${P}$. That is why we call \eqref{jJ} the \emph{standard-current property}---defining \emph{standard-current rates} and \emph{standard-current processes}.
Though the standard-current property is stronger than equivariance, it alone does not determine the jump rates, as already remarked in \cite{BD,Roy}. This can perhaps be best appreciated as follows: Note that \eqref{jsigma} expresses $j_{\sigma,\rho}$ as twice the anti-symmetric part of the (nonnegative) measure \begin{equation}
C(dq \times dq') = \sigma(dq|q') \, \rho(dq') \end{equation} on $\mathcal{Q} \times \mathcal{Q}$ whose right marginal $C(\mathcal{Q} \times dq')$ is absolutely continuous with respect to $\rho$. Conversely, from any such measure $C$ the jump rates $\sigma$ can be recovered by forming the Radon--Nikod\'ym derivative \begin{equation}
\sigma(dq|q') = \frac{C(dq \times dq')}{\rho(dq')}\,. \end{equation} Thus, given $\rho$, specifying $\sigma$ is equivalent to specifying such a measure $C$.
In terms of $C$, the standard-current property becomes (with $\rho = \mathbb{P}$) \begin{equation}\label{CJ}
2 \, \mathrm{Anti} \, C = \mathbb{J}. \end{equation} Since (recalling that $\mathbb{J} = \mathbb{J}^+ - \mathbb{J}^-$ is anti-symmetric) \begin{equation}
\mathbb{J} = 2 \, \mathrm{Anti} \, \mathbb{J}^+, \end{equation} an obvious solution to \eqref{CJ} is \[
C = \mathbb{J}^+, \] corresponding to the minimal jump rates. However, \eqref{jJ} fixes only the anti-symmetric part of $C$. The general solution to \eqref{CJ} is of the form \begin{equation}
C = \mathbb{J}^+ + S \end{equation} where $S(dq \times dq')$ is symmetric, since any two solutions to \eqref{CJ} have the same anti-symmetric part, and $S \geq 0$, since $S = C \wedge C^\mathrm{tr}$, because $\mathbb{J}^+ \wedge (\mathbb{J}^+)^\mathrm{tr} =0$.
In particular, for any standard-current rates, we have that \begin{equation}\label{minimality}
C \geq \mathbb{J}^+, \quad \text{or} \quad \sigma(dq|q') \geq
\frac{\mathbb{J}^+(dq \times dq')}{\mathbb{P}(dq')}. \end{equation} Thus, among all jump rates consistent with the standard-current property, one choice, distinguished by equality in \eqref{minimality}, has the least frequent jumps, or the smallest amount of stochasticity: the minimal rates \eqref{tranrates}.
\subsection{Minimal Processes} \label{sec:mini}
We have considered in this paper minimal jump processes, i.e., jump processes with rates \eqref{tranrates}, associated with integral operators $H$. There is a more general notion of minimal process, such that there is a minimal process associated with every Hamiltonian from a much wider class than that of integral operators; a class presumably containing all Hamiltonians relevant to QFT. This will be discussed in detail in a forthcoming work \cite{crea3}.
Bohmian mechanics is, in this sense, the minimal process associated with the Schr\"odin\-ger Hamiltonian \eqref{Hamil}. The minimal process associated with an integral operator is the jump process with minimal rates. When the minimal free generator \eqref{LH} exists, i.e., when \eqref{LH} is a generator, it generates the minimal process associated with $H$. The minimal process associated with the Hamiltonian of a QFT is the one we have obtained in this paper by means of process additivity. The concept of minimal process directly provides, perhaps always, the process relevant to a Bell-type QFT.
To begin to convey the notion of the minimal process, we generalize the standard-current property (cf.\ Section \ref{sec:mini4}) from pure jump processes to general Markov processes: the net probability current $j$ of a Markov process defines a bilinear form \begin{equation}
j_t(f,g) = \lim_{\Delta t \searrow 0} \, \frac{1}{\Delta t} \, \mathbb{E}
\big( f(Q_{t+\Delta t}) g(Q_t) - f(Q_t) g(Q_{t + \Delta t}) \big)
= (g,L_t f) - (f, L_t g) \end{equation} where $L_t$ is its backward generator, and $( \;, \, )$ on the right hand side means the scalar product of $L^2(\mathcal{Q}, \rho_t)$. Then the Markov process satisfies the \emph{standard-current property} if $\rho_t = \mathbb{P}_t$ and (for $f$ and $g$ real) $j_t(f,g)$ is equal to \begin{equation}
\mathbb{J}_t(f,g) = \frac{2}{\hbar} \, \mathrm{Im} \, \sp{\Psi_t} {\hat{f} H
\hat{g} |\Psi_t}\,, \end{equation} or, in other words, if twice the anti-symmetric part of its backward generator $L_t$ agrees with the operator corresponding to $\mathbb{J}_t$ as given by $(\mathbb{J}_t f,g) = \mathbb{J}_t(f,g)$, $2 \, \mathrm{Anti} \, L_t = \mathbb{J}_t$. The minimal process is then the standard-current process that has, in a suitable sense, the smallest amount of randomness.
Let us consider some examples. The diffusion process with generator $\mathscr{L}$ given below (and for $\rho = \mathbb{P}$) has the standard-current property (in fact, because its ``current velocity'' \cite{stochmech} is $v$) for the Schr\"odinger Hamiltonian \eqref{Hamil} but is not minimal: \begin{equation}\label{diffusion}
\mathscr{L} \rho= \frac{\lambda}{2} \Delta \rho -\,\mathrm{div}\, (\rho
\tilde{v}),\mbox{ with } \tilde{v}:= v + \frac{\lambda}{2}
\nabla(\log|\Psi|^2) \end{equation} where $\lambda$ is any positive constant (the diffusion constant) and $v$ is the Bohmian velocity (\ref{Bohm}); this process was already considered in \cite{Jaekel,Davidson}. Note that Nelson's stochastic mechanics \cite{stochmech} corresponds to $\lambda=\hbar$. It is obvious without any mathematical analysis that the smallest amount of stochasticity corresponds to absence of diffusion, $\lambda =0$, which yields Bohmian mechanics. Processes like the diffusion (\ref{diffusion}) for $\lambda > 0$ seem less natural for the fundamental evolution law of a physical theory since they involve greater mathematical complexity than is needed for a straightforward association of a process with $H$ and $\Psi$. Examples of processes that do not have the standard-current property, for the Schr\"odinger Hamiltonian \eqref{Hamil}, are provided by the alternative velocity formulas considered by Deotto and Ghirardi \cite{Deotto}; one can say that their current is not the one suggested by $H$ and $\Psi$.
We return to the general discussion of the minimal process. As we have already indicated, when, for a standard-current process, we view $\mathbb{J}$ as well as its backward generator $L$ as operators on $L^2(\mathcal{Q}, \mathbb{P})$, then $\frac12 \mathbb{J}$ is the anti-symmetric (skew-adjoint) part of $L$; thus, only the symmetric (self-adjoint) part of $L$ remains at our disposal. Since one of the properties of a backward generator is $L\mathbf{1} =0$, the first possibility $\tilde{L}$ for $L$ that may satisfy the formal criteria for being a backward generator is $\tilde{L} f = \frac12 \mathbb{J} f - (\frac12 \mathbb{J} \mathbf{1})f$. When ${P}$ is a PVM, this is also the operator we obtain by applying, to an arbitrary quantum Hamiltonian $H$, the formula \eqref{LH} for what we called the minimal free generator, which we repeat here for convenience: \begin{equation}\label{Ltilde}
\tilde{L} f(q) = \mathrm{Re} \, \frac{\sp{\Psi} {{P}(dq) \frac{i}{\hbar}
[H,\hat{f}] |\Psi}} {\sp{\Psi} {{P}(dq)|\Psi}}\,. \end{equation} Whereas this formula merely provided an alternative definition of the free process in Section \ref{sec:free2}, it now plays a different role: a step towards obtaining the minimal process from the Hamiltonian $H$. As we have pointed out in Section \ref{sec:free2}, $\tilde{L}$ is also an obvious naive guess for the backward generator $L$, quite independent of equivariance or the current $\mathbb{J}$, since $\frac{i} {\hbar} [H,\hat{f}]$ is the time derivative of $\hat{f}$. Moreover, it manifestly satisfies $\tilde{L} \mathbf{1} =0$. For the backward generator $L$ of a standard-current process we must have, when ${P}$ is a PVM, that $L = \tilde{L} + S$ where $S$ is a symmetric operator and $S\mathbf{1} =0$. For the minimal process, we have to choose $S$ as small as possible---while keeping $S$ symmetric and $L$ a backward generator.
Suppose ${P}$ is a PVM. Observe then that if $H$ is a differential operator (as $H_0$ often is) of the kind considered in Section \ref{sec:free2}, $\tilde{L}$ is itself a backward generator, so that $S=0$ is a possible, and in fact the smallest, choice. If $H$ is an integral operator, what keeps $\tilde{L}$, an integral operator as well, from being a backward generator is that the off-diagonal part of its $\mathbb{P}$-kernel $(q,\tilde{L} q') = \mathbb{P}(q) \tilde{L}(q,q')
= \frac{1}{\hbar} \, \mathrm{Im} \, \sp{\Psi}{q} \sp{q}{H|q'} \sp{q'}{\Psi}$ may assume negative values whereas the off-diagonal part of the
$\mathbb{P}$-kernel of $L$, $(q,Lq') = \mathbb{P}(q) \sigma(q|q')$, cannot be negative. The smallest possible choice of $S$ has as off-diagonal elements what is needed to compensate the negative values, and this leads to the minimal jump process, as described in Section \ref{sec:mini4}. The diagonal part contains only what is needed to ensure that $S\mathbf{1} =0$. For $H$ of the form $H_0 + H_{I}$, the role of $S$ is again to compensate negative values off the diagonal, and the minimal process has velocities determined by $H_0$ via \eqref{LH} and jump rates determined by $H_{I}$ via \eqref{tranrates}.
In any case, the backward generator of the minimal process is the one closest, in a suitable sense, to \eqref{Ltilde}. This formula may thus be regarded as containing the essential structure of $L$, for the deterministic as well as for the jump part of the process.
Another approach towards a general notion of minimal process may be to approximate $H$ by Hilbert--Schmidt operators $H_n$, with which are associated, according to the results of Sections~4.2.1 and 4.2.4 of \cite{crea2A}, minimal jump processes $Q_n$, and take the limit $n \to \infty$ of the processes $Q_n$. This leads to a number of mathematical questions, such as under what conditions on $H, \Psi, {P}$, and $H_n$ does a limiting process exist, and is it independent of the choice of the approximating sequence $H_n$.
\section{Remarks}\label{sec:remarks}
\subsection{Symmetries}\label{sec:symm}
Process additivity preserves symmetries, in the sense that the process generated by $\sum \mathscr{L}^{(i)}$ shares the symmetries respected by all of the building blocks $\mathscr{L}^{(i)}$. This section elaborates on this statement, and the following ones: The minimal jump rates \eqref{tranrates} and the minimal free generator \eqref{LH} share the symmetries of the Hamiltonians with which they are associated. The ``second quantization'' algorithm preserves the symmetries respected by the one-particle process.
Here are some desirable symmetries that may serve as examples: space translations, rotations and inversion, time translations and reversal, Galilean or Lorentz boosts, global change of phase $\Psi \to e^{i\theta} \Psi$, relabeling of particles,\footnote{This may mean two things: changing the artificial labels given to identical particles, or exchanging two species of particles.} and gauge transformations.
We focus first on symmetries that do not involve time in any way, such as rotation symmetry. In this case, a symmetry group $G$ acts on $\mathcal{Q}$, so that to every $g \in G$ there corresponds a mapping $\varphi^g: \mathcal{Q} \to \mathcal{Q}$. In addition, $G$ acts on $\mathscr{H}$ through a projective unitary (or anti-unitary) representation, so that to every $g \in G$ there corresponds a unitary (or anti-unitary) operator $U_g$. Then the theory is $G$-invariant if both the wave function dynamics and the process on $\mathcal{Q}$ are, i.e., if $H$ is $G$-invariant, \begin{equation}\label{HGinv}
U_g^{-1} H U_g = H\,, \end{equation} and \begin{equation}\label{QGinv}
\varphi^g(Q_t^\Psi) = Q_t^{U_g \Psi} \end{equation} in distribution on path space. A necessary condition for (\ref{QGinv}) is that the ``configuration observable'' transforms like the configuration, in the sense that \begin{equation}\label{povGinv}
U_g^{-1} {P}(\,\cdot\,) U_g = \varphi^g_* {P}(\,\cdot\,)\,, \end{equation} where $\varphi_*$ denotes the action of $\varphi$ on measures. Without (\ref{povGinv}), (\ref{QGinv}) would already fail at time $t=0$, no matter what the generator is. Given (\ref{povGinv}), (\ref{QGinv}) is equivalent to the $G$-invariance of the generator: \begin{equation}\label{LGinv}
\varphi^g_* \mathscr{L}^\Psi \varphi^{g^{-1}}_* =
\mathscr{L}^{U_g \Psi} \,. \end{equation}
Since $\varphi^g_*$ is a linear operator, it follows immediately that the sum of $G$-invariant generators is again $G$-invariant. The minimal jump process, when it exists, is $G$-invariant, as follows from the fact that $\varphi^g_*\sigma^\Psi(dq|\varphi^g(q')) =
\sigma^{U_g \Psi} (dq|q')$, which can be seen by inspecting the jump rate formula (\ref{tranrates}). The minimal free generator \eqref{genH} satisfies \eqref{LGinv} by virtue of \eqref{HGinv} and \eqref{povGinv}. ``Second quantization'' provides $G$-actions on $\Gamma \mathcal{Q}^{(1)}$ and $\mathscr{F} = \Gamma \mathscr{H}^{(1)}$ from given actions on $\mathcal{Q}^{(1)}$ and $\mathscr{H}^{(1)}$; \eqref{HGinv}, \eqref{povGinv} and \eqref{LGinv} are inherited from their 1-particle versions.
Time-translation invariance is particularly simple. Consider generators $\mathscr{L}^{(i)}_\Psi$ which do not depend on time except through their dependence on $\Psi$. Then the same is true of $\sum \mathscr{L}^{(i)}$. The same can be said of the ``second quantized'' generator, and, provided $H$ is time-independent, of the minimal jump rates (\ref{tranrates}) and the minimal free generator \eqref{genH}.
Next we consider time reversal. It is represented on $\mathscr{H}$ by an anti-unitary operator $T$, i.e., an anti-linear operator such that $\sp{T\Phi}{T\Psi}$ is the conjugate of $\sp{\Phi}{\Psi}$. We assume that the Hamiltonian is reversible, $THT^{-1} = H$. Then the reversibility of the theory means that \begin{equation}\label{QTinv}
Q^\Psi_{-t} = Q_t^{T\Psi} \end{equation} in distribution on path space, where the superscript should be understood as indicating the state vector at $t=0$. The necessary condition analogous to (\ref{povGinv}) reads \begin{equation}\label{povTinv}
T^{-1} {P}(\,\cdot\,) T = {P}(\,\cdot\,) \,, \end{equation} and given that, (\ref{QTinv}) is equivalent to the $T$-invariance of the generator: \begin{equation}\label{LTinv}
\overline{\mathscr{L}}_\Psi = \mathscr{L}_{T\Psi}\,, \mbox{ or }
\overline{L}_\Psi = L_{T\Psi}\,, \end{equation} where $\overline\mathscr{L}$ and $\overline{L}$ denote the forward and backward generator of the time-reversed process. $\overline{L}$ can be computed from $L$, for an equivariant Markov process, according to\footnote{To make this formula plausible, it may be helpful to note that the second term on the right hand side is just the correction needed to ensure that $L^\dag\mathbf{1} =0$, a necessary condition for being a backward generator. If $\mathbb{P}$ were stationary, the second term on the right hand side would vanish.
Here is a derivation of \eqref{LT}: Let $(f,g) = \int_{q\in \mathcal{Q}}
f(q) \, g(q) \, \mathbb{P}(dq)$ be the scalar product in
$L^2(\mathcal{Q},\mathbb{P})$. It follows from the definition
\eqref{backgenerator} of $L$ that
\[
(g,Lf) = \lim_{t\searrow 0} \frac{1}{t} \, \mathbb{E} \big( g(Q_0)
f(Q_t) - g(Q_0) f(Q_0) \big)\,.
\]
Correspondingly, $\overline{L}$ is characterized (for $f$ and $g$ real) by
\begin{eqnarray*}
(g,\overline{L}f)
&=& \lim_{t\searrow 0} \frac{1}{t} \, \mathbb{E} \big( g(Q_0)
f(Q_{-t}) - g(Q_0) f(Q_0) \big) = \\
&=&\lim_{t\searrow 0} \frac{1}{t} \, \mathbb{E} \big( g(Q_{0}) f(Q_{-t})
- g(Q_{-t}) f(Q_{-t}) \big) \: +\\
&+& \lim_{t\searrow 0} \frac{1}{t}\,\mathbb{E} \big( g(Q_{-t}) f(Q_{-t})
- g(Q_{0}) f(Q_{0}) \big) = \\
&=& (f,Lg) -\int\limits_{q\in\mathcal{Q}} g(q) \, f(q) \,\dot{\mathbb{P}}(dq)
\stackrel{\eqref{generatorduality}}{=} (Lg,f)-(L(gf),\mathbf{1}) =
(g,L^\dag f) - (fg,L^\dag \mathbf{1})\,,
\end{eqnarray*}
which amounts to \eqref{LT}.} \begin{equation}\label{LT}
\overline{L} f = L^\dag f - (L^\dag\mathbf{1}) f \end{equation} where $^\dag$ denotes the adjoint operator on $L^2(\mathcal{Q},\mathbb{P})$, with $\mathbb{P}$ given by \eqref{mis}. Since $\overline{L}$ is linear in $L$, condition (\ref{LTinv}) is preserved when adding (forward or backward) generators; it is also preserved under ``second quantization.'' For a pure jump process, (\ref{LTinv}) boils down to \begin{equation}\label{jumprevers}
\sigma^{\Psi}(dq|q') \, \sp{\Psi}{{P}(dq') |\Psi} = \sigma^{T \Psi}
(dq'|q) \, \sp{\Psi}{{P}(dq) |\Psi}\,, \end{equation} which is satisfied for the minimal jump rates, by inspection of (\ref{tranrates}). The minimal free generator \eqref{LH} changes sign when replacing $\Psi$ by $T\Psi$, which means the velocity changes sign, as it should under time reversal (see Section \ref{sec:freeflow}).
Invariance under Galilean boosts is a more involved story, and as it is not considered as fundamental in physics anyway, we omit it here. Lorentz boosts are even trickier, since for more than just one particle, they even fail to map (simultaneous) configurations into (simultaneous) configurations. As a result, the problem of Lorentz invariance belongs in an altogether different league, which shall not be entered here.
\subsection{On the Notion of Reversibility}
It may appear, and it is in fact a widespread belief, that
stochasticity is incompatible with time reversibility. We naturally
view the past as fixed, and the future, in a stochastic theory, as
free, determined only by innovations. Even Bell expressed such a
belief \cite[p.~177]{Bellbook}. However, from the proper perspective
the conflict disappears, and this perspective is to consider the
path space (of the universe) and the probability measure thereon. If
$t\mapsto Q_t$ is a history of a universe governed by a Bell-type QFT, then
its time reverse, $t\mapsto Q_{-t}$, is again a possible path of this
Bell-type QFT, though corresponding to a different initial state
vector $T\Psi$ instead of $\Psi$, with $T$ the time reversal
operator as discussed in Section \ref{sec:symm}. More than this, the
distribution of the reversed path $t\mapsto Q_{-t}$ coincides with the
probability measure on path space arising from $T\Psi$.\footnote{We
can be more precise about the meaning of the measure on path space:
as in Bohmian mechanics \cite{DGZ}, its role ``is precisely to
permit definition of the word `typical'.'' \cite[p.~129]{Bellbook}
Consequently, the meaning of the reversibility property of the
measures we just mentioned is that the time reverse of a history
that is typical with respect to $\Psi$, is typical with respect to
$T\Psi$.}
It may also be helpful to think of how the situation appears when
viewed from outside space-time: then the path $Q_t$ corresponds to
the decoration of space-time with a pattern of world lines, and this
pattern is random with respect to a probability measure on what
corresponds to path space, namely the space of all possible
decorations of space-time. Then time reversal is a mere reflection,
and for a theory to be time reversible means the same as being
invariant under this reflection: that we could have had as well the
reflected probability measure, provided we had started with $T\Psi$
instead of $\Psi$.
To sum up, we would like to convey that the sense of reversibility
for Markov processes indeed matches the sense of reversibility that
one should expect from a physical theory.
\subsection{Heisenberg Picture}
In (\ref{mis}), we have applied the Schr\"odinger picture, according
to which the state vector evolves while the operators remain
fixed. Eq.~(\ref{mis}) and the reasoning following it can as well be
translated to the Heisenberg picture where the state vector $\Psi$
is regarded as fixed and the operators ${P}_t(\,\cdot\,)$ as
evolving. Thus, we could equivalently write
\[
\mathbb{P}_t(dq) = \sp{\Psi}{{P}_t(dq)| \Psi}
\]
instead of (\ref{mis}). Similarly, $H_0$ and $H_{I}$ become
time-dependent while their sum is constant. We often use an
ambiguous notation like $\sp{\Psi}{{P}(dq)|\Psi}$ and formula
\eqref{tranrates} since the formulas are equally valid in both
pictures (and, for that matter, in the interaction picture).
Like the jump rate formula \eqref{tranrates}, the formula \eqref{LH}
for the minimal free generator is equally valid in the Heisenberg
picture.
We further remark that in the Heisenberg picture, the following nice
equation holds for a pure jump process with minimal rates when
${P}$ is a PVM:
\begin{equation}\label{twotimes}
\mathrm{Prob}\{Q_{t+dt} \in dq, Q_{t} \in dq'\} = \sp{\Psi} {\{{P}_{t+dt}
(dq), {P}_{t}(dq') \} | \Psi}^+
\end{equation}
for $dq \cap dq' = \emptyset$, where $\{ \;,\, \}$ on the right hand
side means the anti-commutator. The similarity to the one-time
distribution formula
\[
\mathrm{Prob}\{Q_t \in dq\} = \sp{\Psi}{{P}_t(dq) |\Psi}
\]
is striking. Specifying the two-time distribution for infinitesimal
time differences is a way of characterizing a Markov process,
equivalent to specifying the (forward or backward) generator and the
one-time distribution. Thus, for a PVM ${P}$ \eqref{twotimes}
provides another formula for the minimal jump rates
\eqref{tranrates}. A similar formula for the process generated by
the minimal free generator \eqref{LH} is $\mathbb{E} \big(g(Q_t)
f(Q_{t+dt}) \big) = \frac12 \sp{\Psi} {\{\hat{g}_t, \hat{f}_{t+dt}
\} | \Psi}$.
\subsection{Examples of Process Additivity} \label{sec:known}
Among different versions of Bohmian mechanics we find numerous examples of process additivity (and, remarkably, no example \emph{violating} it): \begin{itemize} \item The Hamiltonian for $n$ noninteracting particles is the sum of
the Hamiltonians for the individual particles, and it is easy to see
that the vector field defining Bohmian mechanics for the
$n$-particle system is the sum of the vectors fields (each regarded
as vectors fields on $\mathbb{R}^{3n}$) for the particles. As already
mentioned, sums of generators for deterministic processes amount to
sums of the defining vector fields.
Moreover, the vector field for each particle is essentially the
Bohmian one-particle law. To point out that this is a nontrivial
fact, we mention that this is not so for the alternative velocity
formula (10.2) in \cite{Deotto} considered by Deotto and Ghirardi,
for which the velocity of the $i$-th particle differs from the
one-particle law. So Bohmian mechanics of $n$ particles can be
viewed as built from $n$ copies of the one-particle version, in fact
by the ``second quantization'' algorithm of Section \ref{sec:Gamma}.
\item The vector field of Bohmian mechanics for a single spinless
particle may also be seen as arising in this way. If a Hamiltonian
$H=-X^2$ is the negative square of an (incompressible) vector field
(regarded as a first-order differential operator) $X=a({\boldsymbol x}) \!\cdot\!
\nabla$ on $\mathbb{R}^3$ (with $\nabla \!\cdot\! a=0$ ensuring formal
self-adjointness of the square), then the simplest equivariant
process associated with $H$ is given by the velocity vector field
\[
v= \frac{2}{\hbar} \, \mathrm{Im} \,\frac{a\cdot \nabla \Psi}{\Psi}\, a\, .
\]
The corresponding backward generator is $L = \frac{2}{\hbar} \, \mathrm{Im}
\, (\frac{X\Psi}{\Psi}) X$. Now $-\frac{\hbar^2}{2}\Delta =
-\sum_{\alpha}{X_{\alpha}}^2$ is the sum of 3 negative squares of
vector fields $X_{\alpha} = \frac{\hbar} {\sqrt{2}} \partial /
\partial x^\alpha$ corresponding to the individual degrees of
freedom. The associated Bohm velocity is the sum of the velocities
corresponding to the squares. So Bohmian mechanics in three
dimensions can be viewed as built of 3 copies of the one-dimensional
version. To point out that this is a nontrivial fact, we mention
that this is not true, e.g., of the velocity formulas (10.1) and
(10.2) in \cite{Deotto}, which do not make sense in dimensions other
than 3.
\item If we add an interaction potential $V$ to $-\frac{\hbar^2}{2}
\Delta$, the Bohm velocity is the appropriate sum, since the
operator $V$ is associated with the trivial motion $v=0$.
\item We may also include an external vector potential ${\boldsymbol A}({\boldsymbol x},t)$ in
the Schr\"odinger equation, that is, replace $- \frac{\hbar^2}{2}
\Delta = - \frac{\hbar^2}{2} \nabla^2$ by $- \frac{\hbar^2}{2}
\big( \nabla + i \frac{e}{\hbar} {\boldsymbol A}({\boldsymbol x},t) \big)^2 = -
\frac{\hbar^2}{2} \Delta - \frac{\hbar^2}{2} (i \frac{e}{\hbar}
\nabla\cdot{\boldsymbol A} + i \frac{e}{\hbar} {\boldsymbol A} \cdot \nabla) +
\frac{e^2}{2} {\boldsymbol A}^2$. The sum of the associated velocities, namely
\[
\hbar \, \mathrm{Im} \, \frac{\Psi^* \nabla \Psi}{\Psi^* \, \Psi} + e{\boldsymbol A} +
0
\]
equals the velocity one obtains directly, $\hbar \, \mathrm{Im} \, \Psi^*
(\nabla +i \frac{e}{\hbar} {\boldsymbol A})\Psi/ \Psi^* \, \Psi$.
\item In the Bohm--Dirac theory (\ref{BohmDirac}), however, one can
include an external gauge connection $A_\mu({\boldsymbol x},t)$ in the Dirac
equation without changing the velocity formula. That conforms with
process additivity because the operator $(\gamma^0)^{-1} \gamma^\mu
A_\mu = A_0+\boldsymbol{\alpha}\cdot{\boldsymbol A}$ is associated (termwise)
with $v=0$.
\item In the Dirac Hamiltonian $H = -i c \hbar {\boldsymbol \alpha} \cdot \nabla +
\beta mc^2$, the first term corresponds to the Bohm--Dirac velocity
(\ref{BohmDirac}), whereas the second term corresponds to $v=0$; as
a consequence, the Bohm--Dirac velocity does not depend on the mass.
Moreover, the three components of the Bohm--Dirac velocity are each
equivariant with respect to the corresponding derivative term in
$H$. \end{itemize}
In addition, we point out cases of process additivity in the ``second quantization'' algorithm and minimal jump processes.
The ``second quantized'' generator $\Gamma \mathscr{L}^{(1)}$ as constructed in Section \ref{sec:Gamma} provides an example of process additivity (or may be viewed as an application of process additivity): \[
\mathscr{L}_{H_0, \Psi}= \sum_{n=0}^{\infty}
\mathscr{L}_{H_0^{(n)}, \Psi^{(n)}} \,, \] where the generators in the sum correspond to motions in the respective different sectors of $\mathcal{Q}$.
Suppose we regard the particles as ordered, $Q = ({\boldsymbol Q}_1, \ldots, {\boldsymbol Q}_N)$. Then another case of process additivity becomes visible: \[
H_0^{(N)} = \sum_{i=1}^N h_i \] where $h_i$ is the one-particle Hamiltonian acting on the $i$-th particle. Correspondingly, \[
\mathscr{L}_{H_0^{(N)}} = \sum_{i=1}^N \mathscr{L}_i \] where $\mathscr{L}_i$ is equivariant with respect to $h_i$. This applies not only to Bohmian mechanics (as described earlier in this section), but generally to the ``second quantization'' procedure as described in Section \ref{sec:Gamma}. We also note that the ``second quantization'' algorithm presented in Section \ref{sec:Gamma} preserves process additivity in the sense that $\Gamma(\mathscr{L}_1^{(1)} + \mathscr{L}_2^{(1)}) = \Gamma(\mathscr{L}_1^{(1)}) + \Gamma(\mathscr{L}_2^{(1)})$ while $\Gamma(H_1^{(1)} + H_2^{(1)}) = \Gamma(H_1^{(1)}) + \Gamma(H_2^{(1)})$.
\label{sec:miniadd}
We now turn to process additivity among minimal jump processes.
A jump process generated by a sum need not be a minimal jump process even when its constituents are. But under certain conditions it is. Two such cases are the ``direct sum'' and ``tensor product'' processes constructed in Sections \ref{sec:directsum} and \ref{sec:tensorproduct}: $\mathscr{H} = \bigoplus_n \mathscr{H}^{(n)}$ with $\mathcal{Q} = \bigcup_n \mathcal{Q}^{(n)}$ and $H =\bigoplus_n H^{(n)}$, and $\mathscr{H} = \mathscr{H}^{[1]} \otimes \cdots \otimes \mathscr{H}^{[N]}$ with $\mathcal{Q} = \mathcal{Q}^{[1]} \times \cdots \times \mathcal{Q}^{[N]}$ and $H = \sum_i \mathbf{1} \otimes \cdots \otimes H^{[i]} \otimes \cdots \otimes \mathbf{1}$, with $\mathscr{L} = \sum \mathscr{L}_i$ where $\mathscr{L}_i$ acts nontrivially, in an obvious sense, only on $\mathcal{Q}^{(i)}$ or on
$\mathcal{Q}^{[i]}$. These are special cases of the general fact that minimality is compatible with additivity whenever the addends of the Hamiltonian correspond to \emph{different sorts} of jumps. That can be most easily understood in the case of a PVM corresponding to an orthonormal basis $\{|q\rangle : q \in \mathcal{Q}\}$ of $\mathscr{H}$: suppose
$H=H_1 + H_2$ and for every pair $q,q'$ either $\sp{q}{H_1|q'} =0$ or
$\sp{q}{H_2 |q'} =0$. Then $\sigma = \sigma_1 + \sigma_2$. The corresponding condition in the POVM context is that the kernels of $H_1$ and $H_2$ have disjoint supports. When $H$ is naturally given as a sum this condition would be expected to be satisfied.
Finally, we remark that the minimal free generator $\mathscr{L} = \mathscr{L}^H$ as defined in (\ref{genH}) is additive in $H$.
\label{sec:mini3}
\subsection{Second Quantization of a Minimal Jump Process}
We note that the ``second quantization'' of a minimal jump
process associated with a PVM ${P}^{(1)}$, as described in Section
\ref{sec:Gamma}, is the minimal jump process associated with the
second-quantized Hamiltonian; this is a consequence of the
observation that $\mathscr{L}_i$ generates the minimal jump process
for $H_i$ in this case. This fact is probably physically irrelevant
but it is mathematically nice.
\subsection{Global Existence Question}
The rates $\sigma_t$ and velocities $v_t$, together with $\mathbb{P}_t$, define the process $Q_t$ associated with $H,{P}$, and $\Psi$, which can be constructed along the lines of Section \ref{sec:revjump}. However, the rigorous existence of this process, like the global existence of solutions for an ordinary differential equation, is no trivial matter. See Section~4.3 of \cite{crea2A} for a discussion of what must be controlled in order to establish the global existence of the process, and \cite{crex1} for an example of such a global existence proof.
\subsection{POVM Versus PVM}
As we have already remarked in footnote \ref{ft:Naimark}, every POVM ${P}$ is related to a PVM ${P}_{\mathrm{ext}}$, the Naimark extension, on a larger Hilbert space $\mathscr{H}_{\mathrm{ext}}$ according to ${P}(\,\cdot\,) = P_+ {P}_{\mathrm{ext}}(\,\cdot\,) I$ with $P_+$ the projection $\mathscr{H}_{\mathrm{ext}} \to \mathscr{H}$ and $I$ the inclusion $\mathscr{H} \to \mathscr{H}_{\mathrm{ext}}$. This fact allows a second perspective on ${P}$, and sometimes creates a certain ambiguity as to which process is the suitable one for a Bell-type QFT, as follows. At several places in this paper, we have described considerations leading to and methods for defining Markov processes, in particular minimal jump rates \eqref{tranrates} and the minimal free generator \eqref{LH}; these considerations and methods could be applied using either $\mathscr{H}_{\mathrm{ext}}$ and ${P}_{\mathrm{ext}}$ or $\mathscr{H}$ and ${P}$. One would insist that the state vector $\Psi$ must lie in $\mathscr{H}$, the space of physical states, but even then one might arrive at different processes starting from ${P}$ or ${P}_{\mathrm{ext}}$. To obtain a process from ${P}_{\mathrm{ext}}$ requires, of course, that we have a Hamiltonian on $\mathscr{H}_{\mathrm{ext}}$, while $H$ is defined on $\mathscr{H}$; such a Hamiltonian, however, can easily be constructed from $H$ by setting $H_{\mathrm{ext}} = I H P_+$.
In some cases, the Naimark extension does not lead to an ambiguity. This is the case for the jump rate formula \eqref{tranrates}, since for $\Psi \in \mathscr{H}$, $\sp{\Psi}{{P}_{\mathrm{ext}}(dq)| \Psi} =
\sp{\Psi}{{P}(dq) |\Psi}$ and $\sp{\Psi}{{P}_{\mathrm{ext}}(dq) H_{\mathrm{ext}}
{P}_{\mathrm{ext}}(dq')| \Psi} = \sp{\Psi}{{P}(dq) H {P}(dq') |\Psi}$. This fact suggests that, generally, the minimal process arising from $H_{\mathrm{ext}}$ and ${P}_{\mathrm{ext}}$ is the same as the one arising from $H$ and ${P}$.
The situation is different, however, when $H$ is defined on $\mathscr{H}_{\mathrm{ext}}$ to begin with, and different from $H_{\mathrm{ext}}$. This is the case with the free Dirac operator $h_0$, defined as a differential operator on $L^2(\mathbb{R}^3,\mathbb{C}^4)$, which differs from $P_+h_0P_+$. When we obtained in Section \ref{sec:free2} the Bohm--Dirac motion \eqref{BohmDirac} from the formula \eqref{LH} for the minimal free generator, we used $h_0$ and ${P}_{\mathrm{ext}}$. In contrast, the restriction of $h_0$ to the positive energy subspace, or equivalently $P_+ h_0 P_+$, possesses a kernel; more precisely, it is a convolution operator $S_+ \star (h_0 S_+) \star$ in the notation of Section \ref{sec:positron}, and thus corresponds to jumps. The associated minimal process on $\mathbb{R}^3$ presumably makes infinitely many jumps in every finite time interval, similar to the example of \cite{crea2A}, Section 3.5.
Thus, there are two processes to choose between, the Bohm--Dirac motion and the minimal process for $P_+ h_0 P_+$. Both are equivariant, and thus it is arguably impossible to decide empirically which one is right. In our example theory in Section \ref{sec:positron}, we chose the simpler, deterministic one. But we leave to future work the discussion of which is more likely relevant to physics, and why.
\subsection{The Role of Field Operators}\label{sec:fields}
The Bell-type QFTs with which we have been concerned in this paper are models describing the behaviour of \emph{particles} moving in physical 3-space, not of fields on 3-space. We have been concerned here mainly with a particle ontology, not a field ontology. This focus may be surprising at first: almost by definition, it would seem that QFT deals with fields, and not with particles. Consider only the occurrence (and prominence) of field operators in QFT!
But there is less to this than might be expected. The field operators do not function as observables in QFT. It is far from clear how to actually ``observe'' them, and even if this could somehow, in some sense, be done, it is important to bear in mind that the standard predictions of QFT are grounded in the particle representation, not the field representation: Experiments in high energy physics are scattering experiments, in which what is observed is the asymptotic motion of the outgoing particles. Moreover, for Fermi fields---the matter fields---the field as a whole (at a given time) could not possibly be observable, since Fermi fields anti-commute, rather than commute, at space-like separation. One should be careful here not to be taken in by the attitude widespread in quantum theory of intuitively regarding the operators as ``quantities,'' as if they represented something out there in reality; see \cite{naive} for a critique of this attitude.
So let us focus on the role of the field operators in QFT. This seems to be to relate abstract Hilbert space to space-time: the field operators are attached to space-time points, unlike the quantum states $\Psi$, which are usually regarded not as functions but as abstract vectors. In orthodox quantum field theory the field operators are an effective device for the specification of Hamiltonians having good space-time properties. For our purposes here, what is critical is the connection between field operators and POVMs.
Throughout this paper, the connection between Hilbert space and the particle positions in physical space has been made through the POVM ${P}$, and through it alone. We now wish to emphasize that the field operators are closely related to ${P}$, and indeed that field operators are just what is needed for efficiently defining a POVM ${P}$ on $\Gamma(\mathbb{R}^3)$.
This connection is made through number operators $N(R)$, $R \subseteq \mathbb{R}^3$. These define a \emph{number-operator-valued measure} (NOVM) $N(\,\cdot\,)$ on $\mathbb{R}^3$, an ``unnormalized POVM'' ($N(\mathbb{R}^3)$ is usually not the identity operator and $N(R)$ is usually an unbounded positive operator) for which the values $N(R)$ commute and are number operators: $\mathrm{spectrum}(N(R)) \subseteq \{0,1,2,3,\ldots\}$. (The basic difference, then, between a NOVM and a PVM is that the spectrum of the positive operators is $\{0,1,2,3,\ldots\}$ rather than just $\{0,1\}$.)
There is an obvious one-to-one relation between NOVMs $N(\,\cdot\,)$ on $\mathbb{R}^3$ and PVMs ${P}$ on $\Gamma(\mathbb{R}^3)$, given by \begin{equation}\label{Npov}
N(R) = \int\limits_{q\in\Gamma(\mathbb{R}^3)} n_R(q) \, {P}(dq) \end{equation} where $n_R(q) = \#(q \cap R)$ is the number function on $\Gamma(\mathbb{R}^3)$ for the region $R$. Since \eqref{Npov} is the spectral decomposition of the commuting family $N(R)$, this correspondence is one-to-one. (Note that the joint spectrum of the commuting family $N(R)$ is the set of nonnegative-integer-valued measures $n_R$ on $\mathbb{R}^3$, one of the definitions of $\Gamma(\mathbb{R}^3)$ given in Section \ref{sec:free}.)
The moral is that a NOVM on $\mathbb{R}^3$ is just a different way of speaking about a PVM ${P}$ on $\mathcal{Q} = \Gamma(\mathbb{R}^3)$. All other POVMs arise from PVMs by restriction to a subspace (Naimark's theorem \cite{Davies}). An easy way to obtain a NOVM $N$ starts with setting \begin{equation}\label{Ndef}
N(R) = \int_R \phi^*({\boldsymbol x}) \, \phi({\boldsymbol x}) \, d^3{\boldsymbol x} \end{equation} for suitable operators $\phi({\boldsymbol x})$. An easy way to ensure that the $N(R)$ commute is to require that the operators $\phi({\boldsymbol x})$ commute or anti-commute with each other and the adjoints $\phi^*({\boldsymbol x}')$ for ${\boldsymbol x}'\neq {\boldsymbol x}$. An easy way to ensure that the $N(R)$ have nonnegative integer eigenvalues is to require that \begin{equation}\label{ccr}
[\phi({\boldsymbol x}),\phi^*({\boldsymbol x}')]_\pm = \delta({\boldsymbol x}-{\boldsymbol x}')\,, \end{equation}
where $[ \;,\,]_\pm$ is the (anti\nobreakdash-)commutator, and that there is a cyclic vacuum state $|0\rangle \in \mathscr{H}$ for which
$\phi({\boldsymbol x})|0 \rangle =0$. The relations \eqref{ccr} are of course just the usual canonical (anti\nobreakdash-)commutation relations that field operators are required to satisfy.
Moreover, in gauge theories the connection between matter field $\phi$ and the NOVM is perhaps even more compelling. Consider a gauge theory with internal state space $V$, equipped with the inner product $\scalar{\,\cdot\,}{\,\cdot\,}$. Then, given ${\boldsymbol x} \in \mathbb{R}^3$, the matter field $\phi({\boldsymbol x})$ should formally be regarded as a linear functional $V \to \mathcal{O}(\mathscr{H})$, $\xi \mapsto \phi_\xi({\boldsymbol x})$, from the internal state space to operators on $\mathscr{H}$, with $\phi^*_{\xi^*}({\boldsymbol x}) = (\phi_\xi({\boldsymbol x}))^*$ a linear function $V^* \to \mathcal{O}(\mathscr{H})$ on the dual of $V$. \eqref{ccr} then becomes $[\phi_\xi({\boldsymbol x}), \phi_{\eta^*}^*({\boldsymbol x}')] = \delta({\boldsymbol x}-{\boldsymbol x}') \, \scalar{\eta}{\xi}$. Thus the simplest gauge-invariant object associated with $\phi$ is the NOVM \eqref{Ndef}, with the integrand understood as the contraction of the tensor $V \times V^* \to \mathcal{O}(\mathscr{H})$, $(\xi,\eta) \mapsto \phi_\eta^*({\boldsymbol x}) \, \phi_\xi({\boldsymbol x})$.
Hence, not only does the notion of particle not conflict with the prominence of field operators (see Sections \ref{sec:crea1} and \ref{sec:positron} for explicit examples), but field operators have a natural place in a theory whose ultimate goal it is to govern the motion of particles. One of their important roles is to define the POVM ${P}$ that relates Hilbert space to configuration space. Quantum theory of fields or quantum theory of particles? A theory of particle motion exploiting field operators!
\section{Conclusions}
The essential point of this paper is that there is a direct and natural way of understanding QFT as a theory about moving particles, an idea pioneered, in the realm of nonrelativistic quantum mechanics, by de Broglie and Bohm. We leave open, however, three considerable gaps: the question of the process associated with the Klein--Gordon operator, the problem of removing cut-offs, and the issue of Lorentz invariance.
\noindent \textbf{Acknowledgements. }We thank James Taylor of Rutgers University and Stefan Teufel of Technische Universit\"at M\"unchen for helpful discussions. R.T.\ gratefully acknowledges support by the German National Science Foundation (DFG). N.Z.\ gratefully acknowledges support by INFN and DFG. Finally, we appreciate the hospitality that some of us have enjoyed, on more than one occasion, at the Mathematisches Institut of Ludwig-Maximilians-Universit\"at M\"unchen, at the Dipartimento di Fisica of Universit\`a di Genova, and at the Mathematics Department of Rutgers University.
\end{document} |
\begin{document}
\title{Abstract commensurability and the Gupta--Sidki group}
\begin{abstract}
We study the subgroup structure of the infinite torsion $p$-groups defined by Gupta and Sidki in 1983. In particular, following results of Grigorchuk and Wilson for the first Grigorchuk group, we show that all infinite finitely generated subgroups of the Gupta--Sidki 3-group $G$ are abstractly commensurable with $G$ or $G\times G$.
As a consequence, we show that $G$ is subgroup separable and from this it follows that its membership problem is solvable.
Along the way, we obtain a characterization of finite subgroups of $G$ and establish an analogue for the Grigorchuk group.
\end{abstract}
\section{Introduction}
Groups of automorphisms of regular rooted trees have received considerable attention in the last few decades. Their interest derives from the striking properties of some of the first examples studied, the Grigorchuk group \cite{grig} and the Gupta--Sidki $p$-groups \cite{guptasidki}. These groups are easily understood examples of infinite finitely generated torsion groups, answering the General Burnside Problem. Furthermore, the Grigorchuk group was the first group to be shown to be of intermediate word growth and to be amenable but not elementary amenable (see \cite{GrigIntermediate}). Amenability of the Gupta--Sidki $p$-groups (among many other examples) was proved in \cite{AmenableBoundedAutomata}. These and other striking results prompt one to ask what other unusual properties these groups may have.
In \cite{griwil}, the authors establish another notable result about the Grigorchuk group, namely that all its infinite finitely generated subgroups are (abstractly) commensurable with the group itself. Recall that two groups are \emph{(abstractly) commensurable} if they have isomorphic subgroups of finite index. This notion translates into geometric terms as ``having a common finite-sheeted covering": two spaces which have a common finite-sheeted covering have commensurable fundamental groups. For this reason it is an important concept in geometric group theory. It also appears in the study of lattices in semisimple Lie groups, in profinite groups and other areas of group theory. Having only one commensurability class of infinite finitely generated subgroups is a very strong restriction on subgroup structure and examples where it is known to hold are scarce. Following a similar general strategy to that in \cite{griwil}, we will show that an analogous result holds for the Gupta--Sidki 3-group:
\begin{thm}\label{mainthm} Every infinite finitely generated subgroup of the Gupta--Sidki {\rm 3}-group $G$ is commensurable with $G$ or $G\times G$. \end{thm}
This raises two questions. The first is whether the commensurability classes are actually distinct; by contrast, the Grigorchuk group is commensurable with its direct square. This is the motivation for the work carried out in \cite{MeWil}. In that paper, more general results on the structure of subgroups of branch groups yield that the classes are indeed distinct, for many examples of groups acting on $p$-regular trees where $p$ is an odd prime.
The second question concerns the restriction to $p=3$ in \autoref{mainthm}. It seems likely that this restriction is unnecessary. However, our proof relies heavily on a delicate length reduction argument that is only available for $p=3$.
Our first main theorem allows us to prove another result about the subgroup structure of $G$, also parallel to a result in \cite{griwil}. \begin{thm}\label{thm2} The Gupta--Sidki {\rm 3}-group is subgroup separable and hence has solvable membership problem. \end{thm}
Recall that a group is \emph{subgroup separable} (or LERF) if each of its finitely generated subgroups is an intersection of subgroups of finite index. This condition is strong and is only known to hold in very special cases such as free groups, surface groups, polycyclic groups and some 3-manifold groups (see \cite{HallFreeLERF, ScottSurfaceLERF, MalcevPolycyclicLERF, 3manifoldLERF}). It is related to the membership problem (or generalized word problem). The membership problem for a finitely generated group $H$ is solvable if there is an algorithm which given a finitely generated subgroup $K\leq H$ and an element $h\in H$ determines whether or not $h\in K$. If a group is subgroup separable and all its finite quotients can be effectively determined then it has solvable membership problem. The Gupta--Sidki $p$-groups, the Grigorchuk group and, more generally, groups with finite $L$-presentations are examples of groups all of whose finite quotients can be effectively determined (see, for instance, \cite{Hartung_Cosetenumeration}).
The proofs of our main theorems both rely on an auxiliary result, \autoref{thm3}, on finitely generated subgroups of the Gupta--Sidki 3-group. It is worth mentioning that all our results hold for the general case where $p$ is any odd prime, except for the length reduction argument in this \autoref{thm3}. For this reason, all definitions and preliminary results are stated for the general case in \autoref{Defs} and we only focus on the case $p=3$ for the proof of \autoref{thm3} in \autoref{prelims}. We depart from preliminary results in \autoref{max section}, where we discuss maximal subgroups. In \cite{PervovaGrig, PervovaGupta} it was shown that all maximal proper subgroups of the Grigorchuk and Gupta--Sidki groups have finite index. We will show in \autoref{maxsugps} that this maximal subgroups property passes to all finitely generated subgroups of the Gupta--Sidki 3-group, as a consequence of \autoref{mainthm}. The proofs of \autoref{mainthm} and \autoref{thm2} are presented in the final \autoref{pfthm1}. The arguments generalize easily to the case $p>3$ assuming that the analogue of \autoref{thm3} holds. In this final section we also establish the following characterization of finite subgroups of the 3-group, using the analysis carried out in \autoref{prelims}:
\begin{thm}\label{finitesubgps} Let $H$ be a finitely generated subgroup of $G$. Then $H$ is finite if and only if no vertex section of $H$ is equal to $G$. \end{thm} Minor changes in the proof of this theorem and the detailed analysis carried out in \cite{griwil} yield an identical characterization of the finite subgroups of the Grigorchuk group. \begin{thm}\label{grigfinite} Let $H$ be a finitely generated subgroup of the Grigorchuk group $\Gamma$. Then $H$ is finite if and only if no vertex section of $H$ is equal to $\Gamma$. \end{thm}
\section{Definitions and preliminaries}\label{Defs}
We begin by defining the trees on which our groups act, the automorphism groups of these trees, and some of their subgroups which will be used in our proofs.
For an integer $d\geq 2$, we may think of the vertices of the \emph{$d$-regular rooted tree $T$} as finite words over the alphabet $\{0,\ldots, d-1\}$. We think of the empty word $\varepsilon$ as the root. The words $u, v$ are joined by an edge if $v=uw$ (or $u=vw$) for some $w$ in the alphabet.
We can impose a metric on $T$ by assigning unit length to each edge. Then vertex $v$ will be at distance $n$ from vertex $u$ if the unique path joining them consists of $n$ edges. The distance of a vertex $v$ from the root is the \emph{level of $v$}. The set of vertices of level $n$ is called the \emph{$n$th layer} of $T$ and is denoted by $\mathcal{L}_n$.
For a vertex $v\in \mathcal{L}_n$, the subtree consisting of vertices of level $m\geq n$ separated from the root by $v$ is the \emph{subtree rooted at $v$} and it is denoted by $T_v$. Since $T$ is regular, for every vertex $v$ there is an obvious isomorphism from $T$ to $T_v$ taking $u$ to $vu$. We will identify all $T_v$ with $T$ in the rest of the paper.
An \emph{automorphism} of a rooted tree $T$ is a permutation of the vertices that preserves the adjacency relation. We denote the group of all automorphisms of $T$ by $\Aut T$. For any vertex $v$ of $T$ write $v^g$ for the image of $v$ under $g\in \Aut T$. We write $$\St(v):=\{g\in \Aut T \mid v^g=v\}$$ for the \emph{stabilizer} of $v$. The subgroup $$\St(n):=\bigcap_{v\in\mathcal{L}_n} \St(v)$$
is the $n$th \emph{level stabilizer}. For any subgroup $\Gamma\leq \Aut T$, denote by $\St_{\Gamma}(v)$ and $\St_{\Gamma}(n)$ the intersection of $\Gamma$ with the above subgroups. The level stabilizers $\St_{\Gamma}(n)$ have finite index in $\Gamma$. We say that $\Gamma$ has the \emph{congruence subgroup property} if every finite index subgroup of $\Gamma$ contains some $\St_{\Gamma}(n)$.
For every $x\in \St(v)$ there is a unique automorphism $x_v\in \Aut T$ which is simply the restriction of $x$ to the subtree $T_v(=T)$. Hence for every subgroup $\Gamma\leq \Aut T$ and every vertex $v$ of $T$ this restriction yields a homomorphism $$\varphi_v:\St_{\Gamma}(v)\rightarrow \Aut T, \quad x\mapsto x_v.$$ The image of this homomorphism is denoted by $\Gamma_v$ and called the \emph{vertex section} of $\Gamma$ at $v$ (some authors call it an upper companion group). We may also refer to $x_v=\varphi_v(x)$ as the vertex section of $x$ at $v$.
Observe that if $v=uw$ then $\varphi_v=\varphi_u \circ \varphi_w$.
Notice that, although the image $\varphi_v(\St_{\Gamma}(v))$ is a subgroup of $\Aut T$, it may not be a subgroup of $G$. We say that a subgroup $\Gamma\leq \Aut T$ is \emph{fractal} if $\varphi_v(\St_{\Gamma}(v))=\Gamma$ for every vertex $v$ of $T$.
For every $n$, define \begin{align*} \psi_n:\St_{\Gamma}(n) &\rightarrow \prod_{v\in\mathcal{L}_n} \psi_v(\St_{\Gamma}(v)) \leq \prod_{v\in\mathcal{L}_n} \Aut T_n\\
x&\mapsto (x_v)_{v\in\mathcal{L}_n}. \end{align*} This is an embedding, so we may identify elements of $\St_{\Gamma}(n)$ with their image under $\psi_n$. For the first level we usually omit the subscript and just write $\psi$. In fact, we often (following the custom in the literature) omit the $\psi$ altogether for elements of the first level stabilizer.
We can now introduce the family of Gupta--Sidki $p$-groups. Let $p>2$ be a prime and let $T$ be the $p$-regular rooted tree. The \emph{Gupta--Sidki $p$-group} $G=\langle a , b \rangle$ is the subgroup of $\Aut T$ generated by two automorphisms $a$ and $b$. The automorphism $a$ cyclically permutes the first layer as $(1\; 2\; \ldots\; p)$. The element $b=(a,a^{-1},1,\ldots,1,b)$ is recursively defined by its action on subtrees rooted at the first layer: it acts on $T_0$ as $a$ acts on $T$, as $a^{-1}$ on $T_1$, as $b$ on $T_{p-1}$ and trivially on the other subtrees. The figure below shows the action of $b$ on $T$ for $p=3$:
\begin{center} \tikzset{
on each segment/.style={
decorate,
decoration={
show path construction,
moveto code={},
lineto code={
\path [#1]
(\tikzinputsegmentfirst) -- (\tikzinputsegmentlast);
},
curveto code={
\path [#1] (\tikzinputsegmentfirst)
.. controls
(\tikzinputsegmentsupporta) and (\tikzinputsegmentsupportb)
..
(\tikzinputsegmentlast);
},
closepath code={
\path [#1]
(\tikzinputsegmentfirst) -- (\tikzinputsegmentlast);
},
},
},
mid arrow/.style={postaction={decorate,decoration={
markings,
mark=at position .5 with {\arrow[#1]{stealth}}
}}}, }
\begin{tikzpicture}
\tikzstyle{every node}=[fill=black,circle,inner sep=0pt, minimum size=3pt]
\tikzstyle{level 1}=[sibling distance=38mm]
\tikzstyle{level 2}=[sibling distance=17mm]
\tikzstyle{level 3}=[sibling distance=7mm]
\node[circle, draw] (root) {}
child {
node[circle,draw, label=left:\textcolor{red}{$a$}] (level0start) {}
child { node[circle, draw, red] (11) {} }
child { node[circle, draw, red, label=below:$\vdots$] (12) {} }
child { node[circle, draw, red] (13) {} }
}
child {
node[circle,draw, label=right:\textcolor{red}{$a^{-1}$}] (level0middle) {}
child { node[circle, draw, red] (21) {} }
child { node[circle, draw, red, label=below:$\vdots$] (22) {} }
child { node[circle, draw, red] (23) {} }
}
child {
node[circle,draw, label=right:\textcolor{red}{$b$}] (level0end) {}
child { node[circle, draw, label=right:\textcolor{red}{{\small$a$}}] {}
child { node[circle, draw, red] (311) {} }
child { node[circle, draw, red, label=below:$\vdots$] (312) {} }
child { node[circle, draw, red] (313) {} }
}
child { node[circle, draw, label=right:\textcolor{red}{{\small$a^{-1}$}}] {}
child { node[circle, draw, red] (321) {} }
child { node[circle, draw, red, label=below:$\vdots$] (322) {} }
child { node[circle, draw, red] (323) {} }
}
child { node[circle, draw, label=right:\textcolor{red}{{\small$b$}}] {}
child { node[circle, draw, label=right:\textcolor{red}{{\footnotesize$a$}}] (331) {} }
child { node[circle, draw, label=right:\textcolor{red}{{\footnotesize$a^{-1}$}},label=below:$\vdots$] (332) {} }
child { node[circle, draw, label=right:\textcolor{red}{{\footnotesize$b$}}] (333) {} }
}
};
\path[draw=red,thick,dashed, postaction={on each segment={mid arrow=red}}]
(11) to[bend right] (12) to[bend right] (13) to[bend right] (11) (23) to[bend left] (22) to[bend left] (21) to[bend left] (23) (311) to[bend right] (312) to[bend right] (313) to[bend right] (311) (323) to[bend left] (322) to[bend left] (321) to[bend left] (323);
\end{tikzpicture}
\end{center}
We establish some preliminary results and properties of the Gupta--Sidki $p$-groups that will be useful later on. \\
\noindent \textbf{Notation.} From now on, $G$ will usually denote the Gupta--Sidki $p$-group unless otherwise stated, and we may omit the subscript in $\St_G(n)$, $\St_G(v)$. The derived subgroup of $G$ will be denoted by $G'$ and the direct product of $i$ copies of $G$ by $G^{(\times i)}$. \\
It is immediate from the definition of $G$ as a subgroup of $\Aut T$ that $G$ is residually finite (as $\bigcap_{n=1}^{\infty}\St_G(n)=1$). It follows from \cite{GS_embeddingpgroups} that $G$ is \emph{just infinite}; that is, $G$ is infinite but all its proper quotients are finite.
For $i=1,\ldots,p-1$, we write $$b_i:=b^{a^i}=(1,\ldots,b,a,a^{-1},\ldots,1)$$ where $b$ is in the $i$th co-ordinate and $b_0:=b$. Let $B$ denote $\langle b^G \rangle$, the normal closure of $b$. Then it is easy to see that $B=\langle b^G \rangle$ is equal to $\langle b,b_1,\ldots,b_{p-1}\rangle=\St_G(1)$. Note also that $b$ has order $p$.
We include the proof of the following to illustrate the use of projection arguments since these will play an important role later on.
\begin{prop}[\cite{guptasidki}]\label{subdirect} For every $n$, the $n$th level stabilizer $\St_G(n)$ is a subdirect product of $p^n$ copies of $G$. Therefore $G$ is infinite and fractal. \end{prop}
\begin{proof} We proceed by induction.
For $n=1$ the map $\psi:\St_G(1)\rightarrow G^{(\times p)}$ is a homomorphism. We claim that $\varphi_i(\St_G(1))=G$ for $i\in\{0,\ldots,p-1\}$. This is easily seen by examining the images of generators of $B$:
\begin{center}
\begin{tabular}{c}
$\psi(b)=(a,a^{-1},1,\ldots,b)$,\\
$\psi(b_1)=(b,a,a^{-1},1,\ldots,1)$, \\
\vdots \\
$\psi(b_{p-1})=(a^{-1},1,\ldots,1,b,a)$.
\end{tabular} \end{center}
Suppose that the result is true for $n\geq1$.
Then a similar argument as for $\St_G(1)$ shows that $\St_G(n+1)$ is a subdirect product of $p$ copies of $\St_G(n)$, each of them a subdirect product of $p^n$ copies of $G$. This immediately shows that $G$ is fractal and infinite. \end{proof}
The following are easy generalizations to $p>3$ of results proved in \cite{sidkisubgroups} for $p=3$.
\begin{prop}\label{facts} We have \begin{enumerate}[label={\rm(\roman*)}] \item \label{abelianise} $G/G'=\langle aG', bG'\rangle \cong C_p\times C_p$; \item\label{B'} $\psi(B')=(G')^{(\times p)}$; \item\label{wreath} $G/B'\cong C_p\wr C_p$. \end{enumerate} \end{prop}
\begin{proof} The first item is a straightforward verification. For the second,we have \begin{align*} &\psi([b_0b_1,b_1^{-1}b_{p-1}]) \\
&=[(a,a^{-1},1,\ldots,b)(b,a,a^{-1},\ldots,1), (b^{-1},a^{-1},a,\ldots,1)(a^{-1},1,\ldots,b,a)]\\
&=([ab,b^{-1}a^{-1}],[1,a^{-1}],[a^{-1},a],\ldots,[1,b],[b,a])\\
&=(1,\ldots,1,[b,a]) \end{align*} and it is easy to see that, for $i=1,\ldots,p-1$, $$\psi([b_0b_1,b_1^{-1}b_{p-1}]^{a^i})=\psi([b_ib_{i+1},b_{i+1}^{-1}b_{p+i-1}]) =(1,\ldots,[b,a],\ldots,1)$$ where $[b,a]$ is in the $i$th coordinate.
From the above we obtain $$G/B'=G/(G')^{(\times p)}=\langle a\rangle B/(G')^{(\times p)}\cong C_p\wr C_p.$$ \end{proof}
We are now able to show a commensurability property, which will be used in the next sections without mention. \begin{prop}\label{comm} For every odd prime $p$, if $i\equiv j \mod p-1$ then $G^{(\times i)}$ is commensurable with $G^{(\times j)}$. \end{prop}
\begin{proof} For fixed $i\in\{0,\ldots,p-1\}$, we show by induction on $n$ that $G^{(\times i)}$ is commensurable with $G^{(\times n(p-1)+i)}$. For the base case, we deduce from parts \ref{B'} and \ref{wreath} of \autoref{facts} that $G^{(\times p-1+i)}=G^{(\times p)}\times G^{(\times i-1)}$ is commensurable with $G\times G^{(\times i-1)}= G^{(\times i)}$. Now suppose the claim holds for $n$. Then $G^{(\times (n+1)(p-1)+i)} = G^{(\times n(p-1) + i)}\times G^{(\times p-1)}$ is commensurable with $G^{(\times i)}\times G^{(\times p-1)}$, which is in turn commensurable with $G^{(\times i-1)}\times G=G^{(\times i)}$. Hence $G^{(\times (n+1)(p-1)+i)}$ is commensurable with $G^{(\times i)}$ and our claim follows by induction. \end{proof}
The following will be very useful.
\begin{prop}\label{2.2.2} An element $g\in G$ is in $B$ if and only if there exist $n_0,\ldots,n_{p-1}\in \{0,\ldots,p-1\}$ and $c_0,\ldots,c_{p-1}\in G'$ such that $$g=(a^{-n_{p-1}+n_0}b^{n_1}c_0, a^{-n_0+n_1}b^{n_2}c_1,\ldots, a^{-n_{p-2}+n_{p-1}}b^{n_0}c_{p-1}).$$ Moreover, when this is the case, the representation is unique. \end{prop}
\begin{proof} The `if' direction is obvious. If $g\in B$ then $g$ is a product of $b_0,\ldots,b_{p-1}$ and
$$B/B'=\{b_0^{r_0}b_1^{r_1}\cdots b_{p-1}^{r_{p-1}}B' \mid r_i\in \{0,\ldots,p-1\}\}.$$ Thus for some $n_i\in\{0,\ldots,p-1\},\, c\in B'$ we have \begin{align*} g &=b_0^{n_0}b_1^{n_1}\cdots b_{p-1}^{n_{p-1}}c \\
&= (a^{n_0}b^{n_1}a^{-n_{p-1}}d_0, a^{-n_0+n_1}b^{n_2}d_1,\ldots,b^{n_0}a^{-n_{p-2}+n_{p-1}}d_{p-1}) \\
&= (a^{n_0}a^{-n_{p-1}}b^{n_1}c_0, a^{-n_0+n_1}b^{n_2}c_1,\ldots,a^{-n_{p-2}+n_{p-1}}b^{n_0}c_{p-1}) \end{align*} where $c_i,d_i\in G'$ (since $B'=(G')^{(\times p)}$).
The $n_i$ are uniquely determined and therefore so are the $c_i$.
\end{proof}
\begin{lem}\label{G'>St2} The following assertions hold:
\begin{enumerate}[label=(\roman*)]
\item $b^{(1)}=(b,b,\ldots,b)\in G';$
\item $G'\geq \St(2)$.
\end{enumerate} \end{lem}
\begin{proof}
For the first item note that $bb_1\cdots b_{p-1}\equiv b^{(1)} \mod B'$ because
$bb_1\cdots b_{p-1}=(aba^{-1},b,\ldots,b)$.
Therefore it suffices to show that $bb_1\cdots b_{p-1} \in G'$.
To see this, observe that for $i=0,\ldots, p-1$ we have $b_i=b[b,a^i]$ so that
$$bb_1\cdots b_{p-1}= b^2[b,a]b^{-2} b^3[b,a^2]b^{-3}\cdots b^{-1}[b,a^{-2}]b[b,a^{-1}] \in G',$$
as required.
For the second item, let $g=(g_0,\ldots,g_{p-1})\in \St(2)$, so $g_i\in \St(1)$ for each $i$.
\autoref{2.2.2} implies that $g=(b^nc_0,\ldots b^nc_{p-1})\equiv (b^{(1)})^n \mod B'$ for some $n$.
Thus $g\in G'$ by the previous part.
\end{proof}
The proofs of \autoref{mainthm} and \autoref{thm2} use the fact that the Gupta--Sidki 3-group has the congruence subgroup property. This is stated in \cite[Proposition 8.4]{BartCongruence}, but the proof given there is not quite clear. Here is a proof which works for any odd prime $p$. \begin{prop}\label{CSP} The Gupta--Sidki $p$-group $G$ has the congruence subgroup property. \end{prop}
\begin{proof} By \cite[Proposition 3.8]{BartCongruence}, it suffices to show that $G''$ contains some level stabilizer $\St(m)$. The case $p>3$ follows from \cite[Lemma 2]{GrigWilPride-equivalence}, which shows that $G''\geq G'\times\cdots\times G'$, the direct product of $p^2$ copies of $G'$. Thus, by \autoref{G'>St2} we have $G''\geq \St(2) \times\cdots\times \St(2) \geq \St(4)$.
For the case $p=3$ we must prove an analogous version of \cite[Lemma 2]{GrigWilPride-equivalence}. Let $C=[G',G]$ be the third term of the lower central series of $G$. Since $G'$ contains $B'=G'\times G'\times G'$ and the elements $[a,b]^{a^{-1}}=(a, ab, b^{-1}a)$, $b^{(1)}=(b,b,b)$, we have $$([x,a],1,1)\in G'' \text{ and } ([x,b],1,1) \in G''$$ for any $x\in G'$. Therefore $G''\geq C\times 1\times 1$ as $C=\langle \{[x,a], [x,b] \mid x\in G'\}^G\rangle $ and $G''\lhd \St(1)$. Conjugating by suitable powers of $a$ we obtain that $G''\geq C\times C\times C$. Now, $[[b^{-1},a],b_1b_2]=(1,[a,b],1)\in C$ and from this we conclude that $C\geq G'\times G'\times G'$. Thus $G''\geq \St(4)$ as above. \end{proof}
This property will be important for the proofs of \autoref{mainthm} and \autoref{maxsugps} but it is also obviously useful for the study of finite quotients of $G$. In this direction, we point out that in \cite{ggscongruence}
the authors give an explicit formula for the indices $|\Gamma:\St_{\Gamma}(n)|$ not just for the Gupta--Sidki $p$-group, but a more general class of groups $\Gamma$ which act on $p$-regular rooted trees (\emph{GGS groups}). They also prove, using different methods, some of the properties stated in the previous lemmas for arbitrary GGS groups.
The following lemma will be essential in the proof of \autoref{thm3}.
\begin{lem}\label{roadmap} Let $H$ be a subgroup of $G$ which is not contained in $\St(1)$.
Then either all first level vertex sections of $H$ are equal to $G$,
or they are all contained in $\St(1)$ so that $\St_H(1)=\St_H(2)$. \end{lem}
\begin{proof} Denote by $H_0,\dots, H_{p-1}$ the first level vertex sections of $H$. We claim that they are all conjugate in $G$. Since $H$ is not contained in $B$ there exists $as\in H$ with $s=(s_0,\ldots,s_{p-1})\in B$. Thus, for every $h=(h_0,\ldots,h_{p-1})\in \St_H(1)$ we have $h^{as}=(h_{p-1}^{s_0},h_0^{s_1},\ldots,h_{p-2}^{s_{p-1}})\in \St_H(1)$. From this we see that $H_i=H_{i-1}^{s_i}$ for $i=0,\ldots,p-1$. The claim follows repeating this argument with powers of $as$.
Hence we may assume that no $H_i$ is equal to $G$. We examine the image of $H$ modulo $B'$ (equivalently, the images of the $H_i$ modulo $G'$). Since all these vertex sections are conjugate in $G$, they must have the same images modulo $G'$ and the possibilities for these are $\langle ab^k\rangle$ for $k\in\mathbb{F}_p$, or $\langle b\rangle$, or $\langle a\rangle\langle b\rangle$. If $H_iG'=\langle a\rangle\langle b\rangle G'$ then $H_i=G$. Suppose not, then $H_i$ is contained in some maximal subgroup $M<G$, which by \cite{PervovaGupta} has index $p$ in $G$. Thus $M\geq G'$ and $G=\langle a\rangle\langle b\rangle G'=H_iG'\leq MG'=M$, a contradiction.
So either $H_iG'=\langle b\rangle G'$ for all $i$ or there exists $k\in\mathbb{F}_p$ such that $H_iG'=\langle ab^k\rangle G'$ for all $i$. Pick some $h=(h_0,\ldots,h_{p-1})\in\St_H(1)$ and consider its image modulo $B'$. If there exist $i\neq j$ such that $h_iG'$ and $h_jG'$ lie in different cyclic subgroups of $G/G'$ then, conjugating by suitable elements of $H\setminus \St(1)$ we would obtain $H_iG'=H_jG'=\langle a\rangle\langle b\rangle G'$, a contradiction to the above. Thus all $h_iG'$ lie in the same cyclic subgroup of $G/G'$. Our ultimate goal is to show that this cyclic subgroup is $\langle b\rangle G'$, so suppose for a contradiction that there is some $k\in\mathbb{F}_p\setminus\{0\}$ and some $(r_0,\dots,r_{p-1})\in\mathbb{F}_p^p$ such that $$(h_0G',\dots,h_{p-1}G')=((ab^k)^{r_0}G',\dots, (ab^k)^{r_{p-1}}G').$$ Now, by \autoref{2.2.2}, $$(h_0G',\ldots, h_{p-1}G')=(a^{-n_{p-1}+n_0}b^{n_1}G', a^{-n_0+n_1}b^{n_2}G',\ldots, a^{-n_{p-2}+n_{p-1}}b^{n_0}G')$$ for some $n_i\in\mathbb{F}_p$. Comparing these representations of $hB'$, we obtain the following equations describing the exponents of $a$ and $b$:
$$(n_0,\dots,n_{p-1})C=(r_0,\dots,r_{p-1}) \textrm{ and } (n_0,\dots,n_{p-1})P=k(r_0,\dots,r_{p-1})$$ where $$C=\begin{pmatrix}
1 & -1 & 0 & \dots & 0 \\
0 & 1 & -1 & \dots & 0 \\
\vdots & \vdots &\ddots &\ddots & \vdots \\
0 & 0 &\dots & 1 &-1\\
-1 & 0 & 0 &\dots & 1
\end{pmatrix}$$
and $P$ is the permutation matrix associated to $(1\, 2\, \dots\, p)$. These are equivalent to $$(n_0,\dots,n_{p-1})(kCP^{-1}-I)=(0,\dots,0).$$ The matrix $kCP^{-1}-I$ is a circulant matrix over $\mathbb{F}_p$ with non-zero determinant. Hence $(n_0,\dots,n_{p-1})=(0,\dots,0)$ is the only solution and $h\in B'$, which finishes the proof of the lemma. \end{proof}
The proof of \autoref{mainthm} relies on a word length reduction argument.
Instead of the usual word length for finitely generated groups, we use a length function which only takes into account the number of conjugates of $b$.
Let $g\in G$ and let $a^{s_0}b^{r_1}a^{s_1}\cdots b^{r_m}a^{s_m}$ be a shortest (in the usual sense) word in $\{a, b\}$ representing $g$. We can rewrite this word as $$b_{i_1}^{r_1}\cdots b_{i_m}^{r_m}a^v$$ where $b_{i_j}\neq b_{i_j+1}$ for each $j$ and $v\in\{0,\ldots, p-1\}.$
Define the \emph{length} $l(g)$ of $g$ to be $m$. Thus $l(g)$ is the number of conjugates of $b$ in a shortest word in $a,b$ representing $g$.
The following easy lemma shows how the length of elements of $G$ is reduced as we project down levels of the tree. Notice that this holds for any odd prime $p$.
\begin{lem}\label{length} Let $g\in \St(1)$ and suppose that $l(g)=m$. Then \begin{enumerate}[label=\rm{(\roman*)}] \item $l(\varphi_k(g))\leq \frac{1}{2}(m+1)$ and \item if $g\in \St(2)$ then $l(\varphi_j(\varphi_k(g)))\leq \frac{1}{4}(m+3)$ \end{enumerate} for $k,j\in \{0,\ldots,p-1\}$. \end{lem}
\begin{proof} Since $g\in\St(1)$, we can write it in the form $g=b_{i_1}^{r_1}\cdots b_{i_m}^{r_m}$ for some $i_j, r_j \in \{0,\ldots,p-1\}$. Now, the image $\varphi_k(g)$ for each $k\in\{0,\ldots,p-1\}$ is, at worst, of one of the following forms: \begin{enumerate} \item $a^{r_1}b^{r_2}\cdots a^{r_m}$, so $l(\varphi_k(g))\leq\frac{m-1}{2}$; \item $a^{r_1}b^{r_2}\cdots a^{r_{m-1}}b^{r_m}$, so $l(\varphi_k(g))\leq\frac{m}{2}$; \item $b^{r_1}a^{r_2}\cdots b^{r_{m-1}}a^{r_m}$, so $l(\varphi_k(g))\leq\frac{m}{2}$; \item $b^{r_1}a^{r_2}\cdots b^{r_m}$, so $l(\varphi_k(g))\leq\frac{m+1}{2}$. \end{enumerate} This proves the first item.
It now follows that for any $g\in \St(2)$ we have $$l(\varphi_j(\varphi_k(g)))\leq \frac{1}{2} \left( \frac{m+1}{2}+1 \right) =\frac{1}{4}(m+3),$$
for every $j,k\in\{0,\ldots,p-1\}$. \end{proof}
\section{Maximal subgroups}\label{max section}
In this section we establish some results about groups whose maximal subgroups all have finite index. Once \autoref{mainthm} is proved, these results will show that, for each finitely generated subgroup of the Gupta--Sidki 3-group, all maximal subgroups have finite index. The results and proofs are analogous to those in \cite{griwil}.\\
\noindent\textbf{Notation.} We will write $H\leq_{\mathrm{f}} \Gamma$ and $H\leq_{\mathrm{s}} \Gamma_1 \times \cdots \times \Gamma_n$ to mean, respectively, that $H$ is a finite index subgroup of $\Gamma$ and that $H$ is a subdirect product of $\Gamma_1 \times \cdots \times \Gamma_n$.
\begin{lem}\label{Lemma1} Let $\Gamma$ be an infinite finitely generated group and $H$ a subgroup of finite index in $\Gamma$. If $\Gamma$ has a maximal subgroup $M$ of infinite index then $H$ has a maximal subgroup of infinite index containing $H\cap M$. \end{lem}
\begin{proof} We first show that any proper subgroup of $H$ containing $H\cap M$ must be of infinite index in $H$. Suppose for a contradiction that there is a proper finite index subgroup $L$ of $H$ containing $H\cap M$. Then $K$, the normal core of $L$ in $\Gamma$, is of finite index in $\Gamma$ and therefore $KM\leq \Gamma$ must be of finite index too. Now, $M$ is a maximal subgroup of $\Gamma$ contained in $KM$, so either $M=KM$ or $KM=\Gamma$.
If the former holds, then $M$ is of finite index in $\Gamma$, a contradiction. If the latter is true, we obtain $H=K(M\cap H)\leq L$, contradicting the assumption that $L$ is a proper subgroup of $H$.
Since $H$ is finitely generated, every proper subgroup is contained in a maximal subgroup (this can be shown without using Zorn's Lemma, see \cite{Neumann01041937}) and, by the above, the maximal subgroup containing $H\cap M$ must be of infinite index in $H$. \end{proof}
Recall that a \emph{chief factor} of a group $\Gamma$ is a minimal normal subgroup of a quotient group of $\Gamma$.
\begin{lem}[\cite{griwil}, Lemma 3]\label{chief factors} Let $\Gamma_1,\cdots,\Gamma_n$ be groups with the properties that all chief factors are finite and all maximal subgroups have finite index.
If $\Delta\leq_{\mathrm{s}}\Gamma_1\times\cdots\times\Gamma_n$ then all chief factors of $\Delta$ are finite and all maximal subgroups of $\Delta$ have finite index. \end{lem}
\begin{thm}\label{maxsugps} Let $\Gamma:=G^{(\times k)}$ be the direct product of $k$ copies of $G$. If $H$ is a group commensurable with $\Gamma$ then all maximal subgroups of $H$ have finite index in $H$. \end{thm}
\begin{proof}
By definition of commensurability, there exist $K\leq_{\mathrm{f}} H$ and $J\leq_{\mathrm{f}}\Gamma$ with $K$ isomorphic to $J$. For $i=1,\ldots,k$, let $G_i$ denote the $i$th direct factor of $\Gamma$. Then for every $i$ the subgroup $J_i:=J\cap G_i$ has finite index in $G_i$. As $G$ has the congruence subgroup property, for each $i$ there is some $n_i$ with $1\times\cdots\times \St(n_i)\times\cdots\times 1\leq J_i$ and so $S:=\St(n_1)\times\cdots\times\St(n_k)\leq_{\mathrm{f}} J$.
Now, $G$ is residually finite and just infinite and so all of its chief factors are finite. Hence, by the main result in \cite{PervovaGupta}, all of its maximal subgroups have finite index. Furthermore, we saw in \autoref{subdirect} that $\St(n)$ is a subdirect product of $p^n$ copies of $G$. Thus $S$ satisfies the assumptions of \autoref{chief factors} and all of its maximal subgroups are of finite index. \autoref{Lemma1} then implies that all maximal subgroups of $J$ must have finite index in $J$. The result now follows on applying \autoref{Lemma1} to $K\cong J$ and $H$. \end{proof}
\section{The case $p=3$: a key theorem}\label{prelims}
\noindent\textbf{Notation.} From now on we restrict our attention to the case $p=3$, so $G$ will denote the Gupta--Sidki 3-group. Recall that $H_v$ denotes the vertex section of a subgroup $H\leq \Aut T$ at vertex $v$; that is, $H_v=\varphi_v(\St_H(v))=\varphi_{u_n}\circ\cdots\circ\varphi_{u_1}(\St_H(v))$ where $v=u_1\dots u_n$ is considered as a string of letters $u_i\in\{0,\ldots,p-1\}$.
\begin{thm}\label{thm3} Let $\mathcal{X}$ be a family of subgroups of $G$ satisfying \begin{enumerate}[label={\rm(\Roman*)},ref=(\Roman*)] \item\label{prop1} $1\in \mathcal{X}$, $G\in \mathcal{X}$; \item\label{prop2} if $H\in \mathcal{X}$ then $L\in \mathcal{X}$ for all $L\leq G$ such that $H\leq_{\mathrm{f}} L$; \item\label{prop3} if $H$ is a finitely generated subgroup of $\St(1)$ and all first level vertex sections of $H$ are in $\mathcal{X}$ then $H\in \mathcal{X}$. \end{enumerate} Then all finitely generated subgroups of $G$ are in $\mathcal{X}$. \end{thm}
\begin{proof} Note that if $\mathcal{X}$ satisfies properties \ref{prop1}---\ref{prop3} then so does the subfamily $\{H \mid H^g \in \mathcal{X} \text{ for all } g \in G\}$. We may therefore replace $\mathcal{X}$ by this subfamily and assume that if $H\in \mathcal{X}$ so is every $G$-conjugate of $H$.
Suppose for a contradiction that there are finitely generated subgroups of $G$ which are not in $\mathcal{X}$.
Choose among them some subgroup $H$ generated by a finite set $S$ such that $D=\max \{l(s) \mid s \in S\}$ is as small as possible.
If $H\leq \St(1)$ then by \ref{prop3} at least one of the first level vertex sections of $H$ is not in $\mathcal{X}$ and the generating set of this vertex section has elements of length at most $\frac{1}{2}(D+1)<D$ by \autoref{length}, contradicting the choice of $H$.
Therefore $H$ is not contained in $\St(1)$. We will show that there exists some $v\in\mathcal{L}_2$ such that the vertex section $H_v$ is not in $\mathcal{X}$ and has a generating set consisting of elements of length less than $D$.
Since $\St_H(1)$ is a finitely generated subgroup of $\St(1)$, \ref{prop3} implies that not all first level vertex sections of $\St_H(1)$ are in $\mathcal{X}$.
However, if one of them is in $\mathcal{X}$ then, as all first level vertex sections of $H$ are conjugate in $G$,
they must all be in $\mathcal{X}$. Thus no first level vertex section of $H$ is in $\mathcal{X}$; in particular, none of them is equal to $G$, and \autoref{roadmap} asserts that they are all contained in $\St(1)$.
For each $k\in\{0,1,2\}$, property \ref{prop3} again implies that one of $H_{k0},H_{k1},H_{k2}$ is not in $\mathcal{X}$. We claim that for some $k$ every such vertex section is generated by elements of length less than $D$.
Pick some element $t\in S\setminus \St(1)$.
Then, as $\St_H(1)$ has index 3 in $H$, the set $T:=\{1,t,t^{-1}\}$ is a Schreier transversal to $\St_H(1)$ in $H$. Consequently, $\St_H(1)=\St_H(2)$ is generated by $$X=\{t_1st_2^{-1} \mid t_i\in T, s\in S, t_1st_2^{-1}\in \St_H(1)\}.$$ The elements of this set have length at most $3D$, so the second level vertex sections of $H$ are generated by elements of length at most $(3D+3)/4$, by \autoref{length}. Our claim follows if $D>3$. For $D=3$ and $D=2$ the claim follows from \autoref{D=3} and \autoref{D=2}, respectively, while \autoref{D=1} shows that if $D=1$ then $H\in \mathcal{X}$. \end{proof}
\begin{lem}\label{D=1}
Let $H$ be a subgroup of $G$ with a finite generating set $S$ consisting of elements of length at most 1.
Then $H\in \mathcal{X}$. \end{lem} \begin{proof} Any $s\in S$ must be of the form $a^k, \, b_i^r, \, b_i^ra^k$ where $k,r\in\{1,2\}$ and $i\in\{0,1,2\}$.
If $S$ consists of only one element then $H$ is finite and therefore in $\mathcal{X}$ by \ref{prop1} and \ref{prop2}. Thus $S$ must contain at least two elements and they clearly cannot all be of the form $a^k$.
If $S$ contains an element of the form $a^k$ and an element of any other form then $H=G\in\mathcal{X}$. Suppose that all elements of $S$ are of the form $b_i^r$. Then either $H=\langle b_0,b_1,b_2 \rangle =\St(1)$, so $H\in \mathcal{X}$ by \ref{prop1}, \ref{prop3} and \autoref{subdirect}; or $H=\langle b_{i}^{r}, b_{j}^{q}\rangle < \St(1)$ for $i\neq j\in\{0,1,2\}$ and $r,q\in\{1,2\}$, so two of the first level vertex sections of $H$ are $G$ and the other one is $\langle a\rangle$; therefore $H\in \mathcal{X}$ by \ref{prop3}.
Suppose that $S$ contains an element of the form $b_i^ra$ (we may assume that the power of $a$ is 1 as $(b_i^ra)^{-1}=b_{i+1}^{-r}a^2$). If there is some $b_j^q \in S$ such that $j=i$ then $b_j^{p-r}b_i^ra=a\in H$ so $H=G\in\mathcal{X}$. If there is some $b_j^q\in S$ with $j\neq i$, then $b_j^q=(a^q,a^{-q},b^q)^{a^j}$ has $a^{\pm q}$ in the $i$th coordinate; but $(b_i^ra)^3=b_i^rb_{i-1}^rb_{i+1}^r=(b^r,b_1^r,b^r)^{a^i}$ has $b^r$ in the $i$th coordinate, so the vertex section $H_i$ of $H$ at vertex $i$ is $G\in \mathcal{X}$. Since $H\nleq \St(1)$, the first level vertex sections of $H$ are conjugate in $G$, hence all first level vertex sections of $H$ are in $\mathcal{X}$ and $H\in \mathcal{X}$.
Finally, suppose that all elements of $S$ are of the form $b_i^ra$, so $S$ contains elements $b_i^ra$, $b_j^qa$ and $b_i^ra(b_j^qa)^{-1}\in H$. If $i=j$ then $a\in H$ and $H=G\in\mathcal{X}$. If $i\neq j$ then $$b_i^ra(b_j^qa)^{-1}=(a^r,a^{-1},b^r)^{a^i}(a^{-q},a^q,b^{-q})^{a^j}$$ has $b^ra^{\pm q}$ in the $i$th coordinate while $(b_i^ra)^3\in H$ has $b^r$ in the $i$th coordinate. Thus $H_i=G$ and $H\in \mathcal{X}$ by the argument in the previous paragraph. \end{proof}
\begin{lem}\label{D=2} Let $H \nleq \St(1)$ be a subgroup of $G$ with a finite generating set $S$ consisting of elements of length at most $2$ and such that $H_u\leq\St(1)$ for all $u\in \mathcal{L}_1$.
Then $H_v$ is generated by elements of length at most $1$ for all $v\in \mathcal{L}_2$. \end{lem}
\begin{proof} First note that no generator of $H$ is in $\St(1)$ since the only possibilities are elements of the form $b_i^r$ and $b_{i_1}^{r_1}b_{i_2}^{r_2}$ which are not in $\St(2)$.
If some $t\in S \setminus \St(1)$ has length less than 2 then, as in the proof of \autoref{thm3}, the set $T=\{1,t,t^{-1}\}$ is a transversal of $\St_H(1)$ in $H$ and $\St_H(1)$ is generated by the set $X=\{t_1st_2^{-1} \mid t_i\in T, s\in S, t_1st_2^{-1}\in \St_H(1)\}$. The elements of $X$ have length at most 4, so by \autoref{length} all second level vertex sections of $H$ will be generated by elements of length at most 1.
Suppose then that all elements of $S$ have length 2, that is, they are all of the form $b_{i_1}^{r_1}b_{i_2}^{r_2}a$. It suffices to consider only elements of this form as $(b_{i_1}^{r_1}b_{i_2}^{r_2}a)^{-1}=b_{i_2+1}^{-r_2}b_{i_1+1}^{-r_1}a^2$. Pick some $t=b_{i_1}^{r_1}b_{i_2}^{r_2}a\in S$ to form the transversal $T$ so that $\St_H(1)$ is generated by $X$ as above. Then every element of $X$ is of the form $t^{-1}s, \, st^{-1}, \, t^3$ or $tst$ where $s=b_{j_1}^{q_1}b_{j_2}^{q_2}a\in S$. We show that all possible combinations of $i_1, i_2, j_1, j_2, r_1, r_2, q_1, q_2$ give rise to generators of second level vertex sections of $H$ of length at most 1.
The forms $t^{-1}s=b_{i_2+1}^{-r_2}b_{i_1+1}^{-r_1}b_{j_1+1}^{q_1}b_{j_2+1}^{q_2}$ and $st^{-1}=b_{j_1}^{q_1}b_{j_2}^{q_2}b_{i_2}^{-r_2}b_{i_1}^{-r_1}$ yield elements of length at most 4. Thus, by \autoref{length}, their second level vertex sections have length at most 1.
Since $i_1\neq i_2$, an element of the form $t^3=b_{i_1}^{r_1}b_{i_2}^{r_2}b_{i_1-1}^{r_1}b_{i_2-1}^{r_2}b_{i_1+1}^{r_1}b_{i_2+1}^{r_2}$ will have at most two separate instances of each of $b_0, b_1, b_2$. Hence its first level vertex sections will have length at most 2 and so the second level vertex sections have length at most 1.
More care is required for the form $tst=b_{i_1}^{r_1}b_{i_2}^{r_2}b_{j_1-1}^{q_1}b_{j_2-1}^{q_2}b_{i_1+1}^{r_1}b_{i_2+1}^{r_2}$. Easy combinatorial arguments (using that $i_1\neq i_2$ and $j_1\neq j_2$) show that the first level vertex sections of an element of this form cannot have length greater than 3 and that the only way they can have length 3 is if $i_2=i_1-1$ and either $j_1=i_1+1$ or $j_2=i_1+1$. For ease of exposition, assume without loss of generality that $i_1=0$. Then \begin{align*} tst &=b_{0}^{r_1}b_{2}^{r_2}b_{j_1-1}^{q_1}b_{j_2-1}^{q_2}b_{1}^{r_1}b_{0}^{r_2}\\
&=\begin{cases} b_0^{r_1}b_2^{r_2}b_0^{q_1}b_{j_2-1}^{q_2}b_1^{r_1}b_0^{r_2}, & j_1-1=i_1 \\ b_0^{r_1}b_2^{r_2}b_{j_1-1}^{q_1}b_0^{q_2}b_1^{r_1}b_0^{r_2}, & j_2-1=i_1.
\end{cases} \end{align*} The vertex sections at vertices 0 and 1 have length at most 2, while the one at vertex 2 looks like \begin{align*} &\begin{cases} b^{r_1}a^{r_2}b^{q_1}a^{ q_2}a^{-r_1}b^{r_2}, & j_1-1=i_1=j_2; \\ b^{r_1}a^{r_2}b^{q_1}a^{-q_2}a^{-r_1}b^{r_2}, & j_1-1=i_1, \, j_2=i_1-1;\\ b^{r_1}a^{r_2}a^{q_1}b^{q_2}a^{-r_1}b^{r_2}, & j_2-1=i_1=j_1; \\ b^{r_1}a^{r_2}a^{-q_1}b^{q_2}a^{-r_1}b^{r_2}, & j_2-1=i_1, j_1=i_1-1. \end{cases} \end{align*}
\noindent In the first and third cases we have $$\varphi_2(t^{-1}s)=b^{-r_2}a^{r_1}a^{\pm q_1}a^{\mp q_2}=b^{-r_2}$$ (as $H_2\leq\St(1)$ by assumption); while, in the second and fourth cases we have $$\varphi_2(st^{-1})=a^{\mp q_1}a^{\pm q_2}a^{-r_2}b^{-r_1}=b^{-r_1}.$$ Thus, in all cases the vertex section of $H$ at vertex 22 is $G$, which is indeed generated by elements of length at most 1. \end{proof}
\begin{lem}\label{D=3}
Let $H\neq \St(1)$ be a subgroup of $G$ with a finite generating set $S$ consisting of elements of length at most 3
and such that $H_u\leq \St(1)$ for all $u\in \mathcal{L}_1$.
Then there exists $k\in\mathcal{L}_1$ such that $H_{kj}$ is generated by elements of length less than 3 for all $j\in \{0,1,2\}$. \end{lem} \begin{proof} If there exists $t\in S\setminus\St(1)$ with $l(t)\leq 2$ then, by the same argument as in the proof of \autoref{D=2}, the generating set $X$ of $\St_H(1)$ consists of elements of length at most 7, the second level vertex sections of which have length strictly less than 3.
If all generators in $S\setminus \St(1)$ have length 3,
pick one of them, $t$, to form the transversal $T$. Assume that $t$ has the form $b_{i_1}^{r_1}b_{i_2}^{r_2}b_{i_3}^{r_3}a$
for $i_1,i_2,i_3\in\{0,1,2\}$, $i_2\neq i_1, i_2\neq i_3$ and $r_1,r_2,r_3=\pm 1$. It suffices to consider elements of this form since $t^{-1}=b_{i_3}^{r_3}b_{i_2}^{-r_2}b_{i_1}^{-r_1}a^{-1}$.
We may pick $k\in \{0,1,2\}$ such that neither $\varphi_k(b_{i_3+1})$ nor $\varphi_k(b_{i_1})$ is a power of $b$. Then the elements of $X$ have length no more than 9 and the choice of $k$ ensures that $\varphi_k(x)=a^{\pm 1}\varphi_k(bz)$ for each $x\in X$ where $l(bz)\leq 8$. Hence the vertex sections $H_{kj}$ for $j\in\{0,1,2\}$ are generated by elements of length at most $(8+3)/4 < 3$, as required. \end{proof}
\section{The case $p=3$: proof of main theorems}\label{pfthm1} Using the length reduction arguments and results in the previous section, we easily obtain a characterization of the finite subgroups of $G$.
\begingroup \def\ref*{thm2}{\ref*{finitesubgps}} \begin{thm} Let $H$ be a finitely generated subgroup of $G$. Then $H$ is finite if and only if no vertex section of $H$ is equal to $G$. \end{thm} \addtocounter{thm}{-1} \endgroup
\begin{proof} For the non-trivial implication, we make the crucial observation that for each $v\in T$, every vertex section of $H_v$ is a vertex section of $H$. Let $H$ be generated by a finite set $S$. We proceed by induction on $D$, the maximum length of elements in $S$. If $D=1$, then by the proof of \autoref{D=1}, either $H$ is finite or $H_u=G$ for every $u\in \mathcal{L}_1$.
Assume that the assertion of the theorem holds whenever $D\leq n$ with $n\geq 1$. For $D=n+1$ we consider the cases $H\leq \St(1)$ and $H\nleq \St(1)$ separately. If $H\leq \St(1)$ then each first level vertex section $H_u$ of $H$ is generated by elements of length at most $(D+1)/2=(n+2)/2<D$, so that $H_u$ is finite by inductive hypothesis. Thus $H$ itself must be finite as the map $\psi: H\rightarrow H_0\times H_1\times H_2$ is an injective homomorphism.
If $H\nleq \St(1)$ then $H_u\leq \St(1)$ for every $u\in \mathcal{L}_1$ by \autoref{roadmap}. In the case $n=1$, \autoref{D=2} shows that each second level vertex section $H_v$ is generated by elements of length at most 1, and is therefore finite. Thus $H\hookrightarrow \prod_{v\in \mathcal{L}_2} H_v$ is finite. If $n=2$, by \autoref{D=3} , there exists $u\in \mathcal{L}_1$ such that $H_{ui}$ is generated by elements of length at most 2 for all $i\in \{0,1,2,\}$. Hence $H_k$ is finite. Since $H\nleq \St(1)$, all first level vertex sections of $H$ are conjugate in $G$ and are therefore finite, making $H$ finite. Lastly, if $n>2$, by the same argument as in the proof of \autoref{thm3}, $H_v$ is generated by elements of length at most $3D+3/4<D$ whenever $v\in \mathcal{L}_2$. Hence $H$ is finite and the theorem follows by induction. \end{proof}
The same methods as in the above proof and the analysis carried out in \cite[Thorem 3]{griwil} yield an identical characterization of finite subgroups of the Grigorchuk group $\Gamma=\langle a,b,c,d\rangle$. For the reader's convenience, we sketch a proof of the non-trivial implication. The length function here is as in \cite{griwil}, namely, the usual word length for finitely generated groups.
\begingroup \def\ref*{thm2}{\ref*{grigfinite}} \begin{thm} Let $H\leq \Gamma$ be finitely generated. Then $H$ is finite if and only if no vertex section of $H$ is equal to $\Gamma$. \end{thm} \addtocounter{thm}{-1} \endgroup
\begin{proof} Let $H$ be generated by a finite set $S$ such that $1\in S$ and $S^{-1}=S$ and let $D$ be the maximum length of elements of $S$. We induct on $D$.
If $D=1$ then either $H$ is finite or $H=\Gamma$, a contradiction.
Assume that the theorem holds whenever $D\leq n$ with $n\geq 1$. For $D=n+1$ we consider the cases $H\leq \St(1)$ and $H\nleq \St(1)$ separately. If $H\leq \St_{\Gamma}(1)$ then each first level vertex section $H_u$ of $H$ is generated by elements of length at most $(D+1)/2=(n+2)/2<D$, by \cite[Lemma 7]{griwil}, so that $H_u$ is finite by inductive hypothesis. Thus $H$ itself must be finite as the map $\psi: H\rightarrow H_0\times H_1$ is injective.
If $H\nleq \St_{\Gamma}(1)$ there are three cases to consider depending on the vertex section $H_0$ at vertex 0. If $H_0 \leq \St_{\Gamma}(1)$ then, by Case 3 in the proof of \cite[Theorem 3]{griwil},
$H_{00}$ and $H_{01}$ are generated by elements of length less than $D$.
Thus $H_{00}$ and $H_{01}$ are finite by inductive hypothesis, so $H_0$ is finite. Now, $H_0, H_1$ are conjugate in $\Gamma$ as $H\nleq \St_{\Gamma}(1)$; hence $H$ is finite.
If $H_0\Gamma'=\langle ad\rangle \Gamma'$ or $\langle a, d\rangle \Gamma'$, then $H_{00}\leq \St_{\Gamma}(1)$ by \cite[Lemma 6]{griwil}. Case 2 of the proof of \cite[Theorem 3]{griwil} shows that $H_{000}$ and $H_{001}$ are generated by elements of length less than $D$ so that $H_{00}$ is finite. As $H$ acts transitively on the second layer of the tree, there exists $h=(a(s_0,s_1),h_1)\in H$ with $s_0,s_1\in \Gamma$ swapping the vertices 00 and 01. For any $g=((g_{00},g_{01}),g_1)\in \St_H(00)$ we have $g^h=((g_{01}^{s_1}, g_{00}^{s_0}), g_1^{h_1})$, whence $H_{00}^{s_1}=H_{01}$. Hence $H_{01}$ is finite, making $H_0$ and therefore $H$ finite.
If $H_0\Gamma'=\langle ac\rangle \Gamma'$ or $\langle a,c\rangle \Gamma'$, then $H_{000}\leq \St_{\Gamma}(1)$ by \cite[Lemma 6]{griwil}. Case 1 of the proof of \cite[Theorem 3]{griwil} shows that $H_{0000}$ and $H_{0001}$ are generated by elements of length less than $D$ so that $H_{000}$ is finite. Since $H$ acts transitively on the third layer, there is an element $h\in H$ swapping the vertices 000 and 001. Then $H_{000}$ and $H_{001}$ are conjugate in $\Gamma$, by an argument similar to the one above. Thus $H$ is finite by the arguments in the previous case and the theorem follows by induction. \end{proof}
We now move on to the proof of the two main results. These can be easily generalized to the case $p>3$, provided that \autoref{thm3} holds.
\noindent\textbf{Notation.} For simplicity, $\mathcal{G}$ will denote `$G$ or $G\times G$'.
\begingroup \def\ref*{thm2}{\ref*{mainthm}} \begin{thm} Every infinite finitely generated subgroup of the Gupta--Sidki $3$-group $G$ is commensurable with $G$ or the direct square $G\times G$. \end{thm} \addtocounter{thm}{-1} \endgroup
To prove this theorem it suffices to show that properties \ref{prop1}---\ref{prop3} in \autoref{thm3} hold for the class $\mathcal{C}$ of subgroups of $G$ which are finite or commensurable with $\mathcal{G}$.
Clearly, \ref{prop1} holds as the trivial subgroup and $G$ are in this class.
It is also easy to see that \ref{prop2} is satisfied by $\mathcal{C}$: if $H\in \mathcal{C}$ is commensurable with $\mathcal{G}$ and
$J\leq _fH$ is a subgroup isomorphic to a finite index subgroup of $\mathcal{G}$ then,
for any $L\leq G$ containing $H$ as a finite index subgroup, $J$ is also contained in $L$ with finite index.
Thus $L\in \mathcal{C}$.
If $H\in\mathcal{C}$ is finite then any $L$ containing $H$ with finite index must also be finite, so $L\in \mathcal{C}$ too. Thus it only remains to show that $\mathcal{C}$ satisfies \begin{enumerate}[label=(\Roman*), start=3]
\item If $H$ is a finitely generated subgroup of $\St(1)$ and all first level vertex sections of $H$ are in $\mathcal{C}$ then $H\in \mathcal{C}$. \end{enumerate} This will follow from the next lemma, which uses similar ideas to those in \cite{griwil}.
\begin{lem} If $H\leq_{\mathrm{s}} H_1\times \dots \times H_n$ where each direct factor $H_i$ is in $\mathcal{C}$ then $H\in \mathcal{C}$. In other words, $\mathcal{C}$ is closed for subdirect products. \end{lem}
\begin{proof} This reduces to proving that if $H/N_1, H/N_2\in \mathcal{C}$ then $H/(N_1\cap N_2)\in \mathcal{C}$; that is, the case $n=2$. Suppose then that $H\leq_{\mathrm{s}} H_1\times H_2$ with $H_i\in \mathcal{C}$. If both factors $H_i$ are finite then so is $H$ and we are done. Assume that $H_1$ is commensurable with $\mathcal{G}$. Then there exists $K_1\leq_{\mathrm{f}} H_1$ isomorphic to some $L_1\leq_{\mathrm{f}} \mathcal{G}$. By \autoref{CSP}, $K_1$ contains some level stabilizer $\St_G(n)$ (or a direct product $\St_G(n)\times \St_G(m)$ if $K_1\leq_{\mathrm{f}} G\times G$), and this is subdirect in some direct power of $G$ by \autoref{subdirect}. Denote by $M$ the preimage in $H$ of $\St_G{(n)}$ (or $\St_G(n)\times \St_G(m)$). Since $M$ has finite index in $H$, so does its projection to $H_2$. Thus we may replace $H$ by $M$, $H_1$ by $\St_G(n)$ (or $\St_G(n)\times \St_G(m)$) and $H_2$ by the projection of $M$ in $H_2$ and obtain $$H\leq_{\mathrm{s}} G_1\times \dots \times G_r\times H_2$$ for some finite $r$, which we take to be minimal, where each $G_i$ is a copy of $G$.
Write $A:=G_1\times \dots \times G_r$. Then $AH=G_1\times \dots \times G_r\times H_2$ is commensurable with $\mathcal{G}$ and we claim that $H$ has finite index in $AH$. Denote by $K_i$ the kernel of the map from $H$ to all factors of $A$ except $G_i$; that is, $K_i=H\cap (1\times \dots\times G_i\times\dots\times 1)\triangleleft H$. Then $K_i\triangleleft G_i$ and, since $G_i$ is just infinite, $K_i$ has finite index in $G_i$. This way we obtain a finite index normal subgroup $K_1\times \dots \times K_r$ of $A$ contained in $H$.
Hence $|AH : H| = |A : A\cap H|$ is finite and our claim is proved, showing that $H$ is commensurable with $\mathcal{G}$.
For the case $n\geq 2$, proceed as follows: Take $H_1\cong H/\ker (H\twoheadrightarrow H_1)$ and $H_2\cong H/\ker(H\twoheadrightarrow H_2) \in \mathcal{C}$. By the above, $$H/(\ker (H\twoheadrightarrow H_1)\cap \ker (H\twoheadrightarrow H_2))\cong \mathrm{im}(H\rightarrow H_1\times H_2) \in \mathcal{C}.$$ Then, again by the above, $$H/(\ker (H\twoheadrightarrow H_1)\cap \ker (H\twoheadrightarrow H_2))\cap (\ker(H\twoheadrightarrow H_3))\cong \mathrm{im}(H\rightarrow H_1\times H_2\times H_3) \in \mathcal{C}.$$ At the $n$th iteration of this operation, we reach $$H=H/(\bigcap_{i=1}^{n-1} \ker(H\twoheadrightarrow H_i) \cap \ker(H\twoheadrightarrow H_n))\in \mathcal{C}.$$ \end{proof}
\begingroup \def\ref*{thm2}{\ref*{thm2}} \begin{thm} The Gupta--Sidki $3$-group $G$ is subgroup separable. \end{thm} \addtocounter{thm}{-1} \endgroup
To prove this theorem it suffices to show that the conditions of \autoref{thm3} hold for the class $\mathcal{S}$ of finitely generated subgroups of $G$ all of whose subgroups of finite index are closed with respect to the profinite topology on $G$.
Clearly, $\mathcal{S}$ satisfies \ref{prop1}. To see that it also satisfies \ref{prop2}, let $H\leq_{\mathrm{f}} L$ for some $H$ in $\mathcal{S}$; thus $L$ is finitely generated. For any $K\leq_{\mathrm{f}} L$, we have $K\cap H\leq_{\mathrm{f}} H$ so $K\cap H$ is closed in $G$ by assumption. But $K\cap H$ also has finite index in $K$, hence each of its finitely many cosets is also closed in $G$ and therefore so is $K$, their union.
Before we can show that $\mathcal{S}$ also satisfies \ref{prop3} we need the following lemma.
\begin{lem}\label{lemma12}
\begin{enumerate}[label={\rm(\roman*)}] \item Suppose that $H_0$ is a group all of whose quotients are residually finite and that each of the groups $G_1, \ldots, G_n$ either is finite or is residually finite, just infinite and not virtually abelian. Let $H\leq_{\mathrm{s}} H_0\times G_1\times \cdots \times G_n$. Then every quotient of $H$ is residually finite.
\item If $H$ is abstractly commensurable with $G$ or $G\times G$ then every quotient of $H$ is residually finite. \end{enumerate} \end{lem}
\begin{proof} This is essentially Lemma 12 in \cite{griwil}. The first part is identical and the proof of the second only requires small modifications. Suppose that $K\triangleleft H$; we want to show that $K$ is an intersection of subgroups of finite index in $H$. Since $H$ is commensurable with $G$ or $G\times G$ and they both have the congruence subgroup property, there is some normal subgroup $N$ of finite index in $H$ which is a subdirect product of finitely many copies of $G$. By the same argument as in the proof of \ref{prop2}, it suffices to show that $K\cap N$ is closed in $N$ with respect to the profinite topology on $N$. This follows from the first part of the lemma, as it is equivalent to $N/(K\cap N)$ being residually finite. \end{proof}
We may now proceed to show that $\mathcal{S}$ also satisfies \ref{prop3}. \begin{lem} Let $H$ be a finitely generated subgroup of $\St(1)$ such that its first level vertex sections $H_0,H_1,H_2$ are in $\mathcal{S}$. Then $H$ is in $\mathcal{S}$. \end{lem}
\begin{proof} Let $K\leq_{\mathrm{f}} H$, then its first level vertex sections have finite index in those of $H$ and are therefore closed in $G$. So we only need to show that $H$ is closed in $G$. We will show that $\psi(H)$ is closed in $G\times G \times G$, so that $H$ is closed in $\St(1) \leq_{\mathrm{f}} G$ and hence in $G$. In fact, if suffices to show that $\psi(H)$ is closed in $H_0\times H_1\times H_2$ because in that case $\psi(H)=\bigcap K_i$ for $K_i\leq_{\mathrm{f}} H_0\times H_1\times H_2$ with each $K_i$ closed in $G\times G\times G$ by our assumption on $\mathcal{S}$. The lemma will follow from the more general
\noindent \textbf{Claim.} For every $n\in \mathbb{N}$, if $H\leq_{\mathrm{s}} H_0\times \dots\times H_n$ with $H_i\in \mathcal{S}$ then $H$ is closed in $H_0\times \dots\times H_n$.\\ \noindent\emph{Proof of claim.} We proceed by induction on $n$. When $n=1$, define $L_0\times 1:=H\cap(H_0\times 1)$. Then $L_0$ is normal in $H_0$, which is commensurable with $G$ or $G\times G$ by \autoref{mainthm}. By \autoref{lemma12}, $H_0/L_0$ is residually finite so there is a collection $(N_j)_{j\in J}$ of normal subgroups of finite index in $H_0$ whose intersection is $L_0$. Each subgroup $(N_j\times1)H$ is of finite index in $H_0\times H_1$ so it is enough to show that $H=\bigcap_{j\in J}((N_j\times1)H)$. Let $u$ be an element of the intersection, say $u=(n_j,1)(h_{j,0},h_{j,1})$ for each $j\in J$. Then $h_{j,1}$ is constant so we can write $u=(n_jh_{j,0},h_1)$ for each $j\in J$. Fix $i\in J$ and note that $h:=(h_{i,0},h_1)\in H$. Then, for each $j\in J$ we have
$$uh^{-1}=(n_j,1)(h_{j,0}h_{i,0}^{-1},1)\in (N_j\times 1)(H\cap H_0\times 1)=N_j\times 1.$$ Thus $uh^{-1} \in \bigcap_{j\in J} N_j\times 1 = L_0\times 1 \leq H$ and $u\in H$.
Now assume that $H$ is closed in $H_0\times \dots\times H_{n-1}$ whenever $H\leq_{\mathrm{s}} H_0\times\dots\times H_{n-1}$ and each $H_i$ is in $\mathcal{S}$. Suppose that $H\leq_{\mathrm{s}} H_0\times\dots\times H_n$ with $H_i\in \mathcal{S}$. Define $L_0\times 1\times\dots\times 1:=H\cap(H_0\times 1\times\dots \times 1)$. As in the base step, there exist normal subgroups $(N_j)_{j\in J}$ of finite index in $H_0$ whose intersection is $L_0$. Let $H^j:=(N_j\times 1\times\dots \times 1)H$ for each $j\in J$. We show that $H=\bigcap_{j\in J}H^j$ and that each $H^j$ is closed in $H_0\times\dots\times H_n$. The first statement is proved similarly to the two-factor case: write an element $u$ of the intersection as $u=(n_jh_{j,0},h_1,\dots,h_n)$ and find some $h:=(h_{i,0},h_1,\dots,h_n)\in H$ so that $uh^{-1}=(n_j,1,\dots,1)(h_{j,0}h_{i,0}^{-1},1,\dots,1)\in N_j\times 1\times\dots\times 1$. For the second statement, fix $j\in J$ and let $P$ be the pre-image of $N_j$ in $H^j$ under the canonical projection to $H_0$. Then $P$ has finite index in $H^j$ as $N_j$ has finite index in $H_0$. Thus, for each $i\neq 0$, the projection $P_i$ of $P$ onto the $i$th factor has finite index in $H_i$, so $P_i\in \mathcal{S}$. Furthermore, $N_j\times1\times\dots\times1\leq P\leq_{\mathrm{s}} N_j\times P_1\times\dots\times P_n$, so $$P/(N_j\times1\times\dots\times1) \leq_{\mathrm{s}} P_1\times\dots\times P_n.$$ By the inductive hypothesis, $P/(N_j\times1\times\dots\times1)$ is closed in $P_1\times\dots\times P_n$. Thus $P$ is closed in $N_j\times P_1\times\dots\times P_n\leq_{\mathrm{f}} H_0\times H_1\times\dots\times H_n$, hence also in $H_0\times H_1\times\dots\times H_n$. \end{proof}
\end{document} |
\begin{document}
{\let\thefootnote\relax\footnote{ \copyright 2019 by the authors. Faithful reproduction of this article, in its entirety, by any means is permitted for noncommercial purposes.}}
\subjclass[2010]{35A01, 35A02, 35Q55.} \keywords{Nonlinear Schrödinger equation, local well-posedness, global well-posedness, Gronwall’s inequality, Strichartz estimates.}
\begin{abstract} We study the one dimensional nonlinear Schrödinger equation with power nonlinearity $\abs{u}^{\alpha - 1} u$ for $\alpha \in [1,5]$ and initial data $u_0 \in L^2({\mathbb R}) + H^1({\mathbb T})$. We show via Strichartz estimates that the Cauchy problem is locally well-posed. In the case of the quadratic nonlinearity ($\alpha = 2$) we obtain \emph{global} well-posedness in the space $C({\mathbb R}, L^2({\mathbb R}) + H^1({\mathbb T}))$ via Gronwall’s inequality. \end{abstract}
\title{On the global well-posedness of the quadratic NLS
on $L^2(\R) + H^1(\T)$}
\section{Introduction and main results} We are interested in the Cauchy problem for the nonlinear Schrödinger equation (NLS) with power nonlinearity on the space $L^2({\mathbb R}) + H^1({\mathbb T})$, i.e. \begin{equation} \label{eqn:cauchy_nls} \left\{ \begin{IEEEeqnarraybox}[][c]{rCl} \mathrm{i} u_t (x, t) + \partial_{x}^{2} u (x,t) \pm \abs{u}^{\alpha-1} u & = & 0 \qquad (x, t) \in {\mathbb R} \times {\mathbb R}, \\ u(\cdot, 0) & = & u_0, \end{IEEEeqnarraybox} \right. \end{equation} where $u_0 = v_0 + w_0 \in L^2({\mathbb R}) + H^1({\mathbb T})$ and $\alpha \in [1,5]$. By ${\mathbb T}$ we denote the one-dimensional torus, i.e. ${\mathbb T} = {\mathbb R} / 2\pi{\mathbb Z}$, where we consider functions on ${\mathbb T}$ to be $2\pi$-periodic functions on ${\mathbb R}$. Before we state our main results, let us mention that the NLS \eqref{eqn:cauchy_nls} is globally well-posed in $L^2({\mathbb R})$ via Strichartz estimates and mass conservation (see \cite{tsutsumi1987}) and it is globally well-posed in $L^{2}({\mathbb T})$ via the Fourier restriction norm method and mass conservation (see \cite{bourgain1993a}). Motivation for the investigation of hybrid initial values $u_0 \in L^2({\mathbb R}) + H^1({\mathbb T})$ comes from high--speed optical fiber communications, where in a certain approximation the behavior of pulses in glass--fiber cables is described by a NLS equation. The NLS \eqref{eqn:cauchy_nls} with initial data in $H^s({\mathbb R}) + H^s({\mathbb T})$ was referred to in \cite{pattakos2018} as the tooth problem. A tooth is, for example, $w_0$ restricted to one period. We think of the addition of $v_0$ to $w_0$ as eliminating finitely many of these teeth in the underlying periodic signal. A periodic signal is the simplest type of a non-decaying signal, encoding, for example, an infinite string of ones if there is exactly one tooth per period. However, such a purely periodic signal carries no information. One would like to be able to change it, at least locally. This leads necessarily to a hybrid formulation of the NLS where the signal is the sum of a periodic and a localized part, the localized part being able to remove one or more of the teeth in the underlying periodic signal. This way one can model, for example, a signal consisting of two infinite blocks of ones which are separated by a single zero, or even far more complicated patterns. In the optics literature the phenomenon of ghost pulses (see \cite{mamyshev1999} and \cite{zakharov1999}) occurs which in our terminology corresponds to the regrowth of missing teeth of the solution to the NLS \eqref{eqn:cauchy_nls}.
The case of the cubic nonlinearity ($\alpha=3$) and the initial data $u_0 \in H^s({\mathbb R}) + H^s({\mathbb T})$, where $s \geq 0$, was studied by the authors in \cite{pattakos2018}, where the existence of weak solutions in the extended sense was established. Moreover, under some further assumptions, unconditional uniqueness was obtained. In this paper, due to the non-algebraic structure of the nonlinearity in \eqref{eqn:cauchy_nls} (for $\alpha \neq 3,5$) we have to use different methods. For the relation between the solutions of \cite{pattakos2018} and the solutions of Theorem \ref{thm:mainthm1} we refer to Remark \ref{equal_solutions}.
To state the main results of this paper we need some preparation. Let $u = v + w \in C([0, T], L^2({\mathbb R}) + H^1({\mathbb T}))$ where $w$ satisfies the periodic NLS on the torus with initial data $w_0$. The following is known about $w$ (the case $\alpha \geq 2$ has been treated in \cite[Theorem 2.1]{lebowitz1988} while the remaining case $\alpha \in [1, 2)$ is presented in Theorem \ref{thm:gwp_cauchy_quadratic_nls_torus}). \begin{thm} \label{thm:lwp_nls_T} The Cauchy problem for the periodic NLS \begin{equation} \label{eqn:cauchy_nls_torus} \left\{ \begin{IEEEeqnarraybox}[][c]{rCl} \mathrm{i} w_t (x, t) + \partial_{x}^{2} w (x,t) \pm \abs{w}^{\alpha-1} w & = & 0 \qquad (x, t) \in {\mathbb T} \times {\mathbb R}, \\ w(\cdot, 0) & = & w_0. \end{IEEEeqnarraybox} \right. \end{equation} is locally well-posed in $H^1({\mathbb T})$ for $\alpha \geq 1$. That means that for any $w_0 \in H^1({\mathbb T})$ there is a unique $w \in C([0,T], H^1({\mathbb T}))$ satisfying \eqref{eqn:cauchy_nls_torus} in the mild sense. The guaranteed time of existence $T$ depends only on $\norm{w_0}_{H^1({\mathbb T})}$. \end{thm}
A solution $w$ to the periodic NLS at hand dictates that the local part $v$ has to be a solution of the Cauchy problem for the \emph{modified NLS} \begin{equation} \label{eqn:cauchy_modnls} \left\{ \begin{IEEEeqnarraybox}[][c]{rCl} \mathrm{i} v_t (x, t) + \partial_{x}^{2} v (x,t) \pm G_\alpha (w, v) & = & 0 \qquad (x, t) \in {\mathbb R} \times {\mathbb R}, \\ v(\cdot, 0) & = & v_0, \end{IEEEeqnarraybox} \right. \end{equation} where \begin{equation} G_\alpha (w, v) \coloneqq \abs{v + w}^{\alpha-1}(v + w) - \abs{w}^{\alpha - 1} w. \end{equation} The main results of the paper are the following two theorems on local and global wellposedness of NLS \eqref{eqn:cauchy_modnls} and consequently NLS \eqref{eqn:cauchy_nls}.
\begin{thm}[Local well-posedness of the NLS \eqref{eqn:cauchy_nls}] \label{thm:mainthm1} For $\alpha \in [1, 5]$ the Cauchy problem \eqref{eqn:cauchy_modnls} is locally well-posed in $C([0,T], L^2({\mathbb R})) \cap L^{\frac{4(\alpha+1)}{\alpha-1}}([0,T], L^{\alpha + 1}({\mathbb R}))$ for any $v_0 \in L^{2}({\mathbb R})$.
Hence, the original Cauchy problem \eqref{eqn:cauchy_nls} is locally well-posed.
In the case $\alpha \in [1, 5)$, the guaranteed time of existence $T$ depends only on $\norm{v_0}_2$ and $\norm{w_0}_{H^1({\mathbb T})}$, whereas, for $\alpha = 5$, $T$ depends on the profile of $v_0$ and $\norm{w_0}_{H^1({\mathbb T})}$. \end{thm}
\begin{rem} In the case $\alpha \in [1, 2]$, the intersection in Theorem \ref{thm:mainthm1} is not needed, i.e. one has unconditional well-posedness for the perturbation $v$. However, it is not clear whether the Cauchy problem \eqref{eqn:cauchy_nls} is unconditionally well-posed, since the wellposedness we obtain for the periodic part $w$ is only conditional (see the proof of Theorem \ref{thm:lwp_torus}). \end{rem}
\begin{rem} \label{equal_solutions} Notice that the weak solution in the extended sense $\tilde{u}$ constructed in \cite{pattakos2018} and the solution $u$ from Theorem \ref{thm:mainthm1} coincide. This can be seen as follows: $u$ is a weak solution in the extended sense, which follows by the definition, Plancherel’s theorem and the dominated convergence theorem. Moreover, in the aforementioned paper it was observed that $\tilde{u}$ is unique among those solutions, which can be approximated by smooth solutions. This is true for $u$ and hence $\tilde{u} = u$ follows. \end{rem}
For $\alpha = 2$, we need the Cauchy problem for the periodic NLS \eqref{eqn:cauchy_nls_torus} to be globally well-posed in $H^{1}({\mathbb T})$. Although this is claimed to be well-known in the community, we could not find a suitable reference. Several people refer to \cite{bourgain1993a} for this, however in \cite[Proposition
5.73]{bourgain1993a} $\alpha\geq3$ is required (in our notation). Moreover, in part ii) of the remark on page 152 in \cite{bourgain1993a}, Bourgain mentions that one could get existence of a solution for the quadratic nonlinearity using Schauder’s fixed point theorem, but one would loose uniqueness. Hence, we provide a proof in the Appendix (Theorem \ref{thm:gwp_cauchy_quadratic_nls_torus}). This global existence and uniqueness result on the torus, together with a close inspection of the mass $\int \abs{v}^2 \mathrm{d}{x}$ are essential ingredients in our proof of global well-posedness of \eqref{eqn:cauchy_nls} on the “tooth problem space” $L^2({\mathbb R}) + H^1({\mathbb T})$.
\begin{thm}[Global well-posedness of the quadratic NLS] \label{thm:mainthm2} For $\alpha = 2$ and $v_{0}\in L^{2}({\mathbb R})$ the unique solution $v$ of \eqref{eqn:cauchy_modnls} from Theorem \ref{thm:mainthm1} extends globally and obeys the bound \begin{equation} \label{eqn:exp_bound} \norm{v(\cdot, t)}_2 \leq \norm{v_0}_2 \exp \left[\norm{w}_{L^\infty_t L^\infty_x} t \right] \qquad \forall t \in [0, \infty). \end{equation} Hence, the original Cauchy problem \eqref{eqn:cauchy_nls} for $\alpha = 2$ is globally well-posed. \end{thm}
Although the local well-posedness result of Theorem \ref{thm:mainthm1} covers the whole range $\alpha \in [1, 5]$, the methods of the proof of Theorem \ref{thm:mainthm2} only work for $\alpha = 2$. A more precise explanation is given in Remark \ref{rem:failure_global}.
Of course, one can consider hybrid problems for other dispersive equations. Here we confine ourselves to a remark on the KdV.
\begin{rem} Observe that the tooth problem for the KdV reduces to a known setting. More precisely, consider real solutions of \begin{equation} \label{tooth_KdV} \left\{ \begin{IEEEeqnarraybox}[][c]{rCl} u_t (x, t) + u_{xxx} (x,t) + u_x u & = & 0 \qquad (x, t) \in {\mathbb R} \times {\mathbb R}, \\ u(\cdot, 0) & = & u_0 = v_0 + w_0 \in H^{s_1}({\mathbb R}) + H^{s_2}({\mathbb T}). \end{IEEEeqnarraybox} \right. \end{equation} Let $u = v + w \in C([0, T], H^{s_1}({\mathbb R}) + H^{s_2}({\mathbb T}))$, where $s_2 \in {\mathbb N}$ and $w$ is a global solution of the periodic KdV for the initial data $w_0$ (see \cite[Theorem 5]{bourgain1993b}). Then $v$ solves \begin{equation*} v_t + v_{xxx} + v_x v + (w v)_x = 0 \end{equation*} with the initial data $v_0$, which is the KdV with the potential $w$. This problem has been studied in e.g. \cite[Section 3.1]{erdogan2016} using parabolic regularization. There it has been shown that $v$ satisfies an exponential bound similar to \eqref{eqn:exp_bound}. Combining both results we obtain: \begin{quote} For $s_1, s_2 \in {\mathbb N}$ satisfying $s_1 \geq 2$ and $s_2 \geq s_1 + 1$ the KdV tooth problem, i.e., the Cauchy problem \eqref{tooth_KdV}, is globally well-posed in $H^{s_1}({\mathbb R}) + H^{s_2}({\mathbb T})$. \end{quote} \end{rem}
The paper is organized as follows: In Section \ref{section_pre} we state the required prerequisites for the proofs of the main theorems. In Section \ref{sec:lwp} we present the proof of Theorem \ref{thm:mainthm1} and in Section \ref{sec:quadraticglobal} we present the proof of Theorem \ref{thm:mainthm2}. Finally, in the Appendix we justify that the quadratic and subquadratic periodic NLS \eqref{eqn:cauchy_nls_torus} is globally well-posed in $H^{1}({\mathbb T})$.
\section{Prerequisites} \label{section_pre} Let us fix the notation and state some results necessary for the proof of our main theorems. For the purpose of smoothing we will use the heat kernel $(\phi_\varepsilon)_{\varepsilon \geq 0}$. Recall, that $\phi_\varepsilon = \delta_0$, if $\varepsilon = 0$, and \begin{equation*} \phi_\varepsilon(x) = \iv{2 \sqrt{\pi \varepsilon}} \, e^{-\frac{\abs{x}^2}{4 \varepsilon}} \qquad \forall x \in {\mathbb R}, \end{equation*} if $\varepsilon > 0$. We shall denote the convolution (in the space variable $x$) by e.g. $u \ast \phi_\varepsilon$.
For $s \in {\mathbb R}$ and $\Omega \in \set{{\mathbb R}, {\mathbb T}}$ we shall denote by $H^s(\Omega)$ the Sobolev spaces on $\Omega$. Also, we set $H^\infty(\Omega) \coloneqq \cap_{s \in {\mathbb R}} H^s(\Omega)$. By $\mathcal{F}$ we will denote the Fourier transform on ${\mathbb R}$.
We will use the following simple lemma, which can be found e.g. in \cite[Lemma 3.9]{chaichenets2018}. \begin{lem}[Size estimate] \label{lem:size_estimate} Let $\alpha \geq 1$. Then the following \emph{size estimate} \begin{eqnarray} \label{eqn:size_estimate} & & \abs{\abs{v_1 + w}^{(\alpha - 1)}(v_1 + w) - \abs{v_2 + w}^{(\alpha - 1)}(v_2 + w)} \\ & \leq & \alpha \max \set{1, 2^{\alpha - 1}} \left(\abs{v_1}^{\alpha - 1} + \abs{v_2}^{\alpha - 1} + \abs{w}^{\alpha - 1} \right) \abs{v_1 - v_2} \end{eqnarray} holds for any $v_1, v_2, w \in \mathbb C$. \end{lem}
A pair of exponents $(r, q) \in [2, \infty]^2$ is called \emph{admissible} (in one dimension), if \begin{equation} \label{eqn:admissible} \frac{2}{q} + \iv{r} = \frac{1}{2}. \end{equation} Let us denote by $q_{\text{a}}(r)$ the solution of \eqref{eqn:admissible} for any $r \in [2, \infty]$. Another pair of exponents $(\rho, \gamma) \in [1, 2]$ shall be called \emph{dually admissible}, if $(\rho', \gamma') \in [2, \infty]$ is admissible, i.e. if \begin{equation} \label{eqn:dually_admissible} \frac{2}{\gamma} + \iv{\rho} = \frac{5}{2}. \end{equation} For any $\rho \in [1, 2]$ we denote by $\gamma_{\text{a}}(\rho)$ the solution of \eqref{eqn:dually_admissible}.
\begin{prop}[Strichartz estimates] (Cf. \cite[Theorem 1.2]{keel1998}) \label{prop:strichartz} Let $(r, q_{\text{a}}(r))$ be admissible and $(\rho, \gamma_{\text{a}}(\rho))$ be dually admissible. Then there is a constant $C = C(r, \rho) > 0$ such that for any $T > 0$, any $v_0 \in L^2({\mathbb R})$ and any $F \in L^\gamma_{\text{a}}(\rho)([0, T], L^\rho({\mathbb R}))$ the \emph{homogeneous} and \emph{inhomogeneous} Strichartz estimates \begin{eqnarray} \label{eqn:homogeneous_strichartz} \norm{e^{\mathrm{i} t \partial_{x}^{2}} v_0}_{L^{q_{\text{a}}(r)}([0,T], L^r({\mathbb R}))} & \leq & C \norm{v_0}_{L^2({\mathbb R})}, \\ \label{eqn:inhomogeneous_strichartz} \norm{\int_0^t e^{\mathrm{i} (t - \tau) \partial_{x}^{2}} F(\cdot, \tau)}_{L^{q_{\text{a}}(r)}([0,T], L^r({\mathbb R}))} & \leq & C \norm{F}_{L^{\gamma_{\text{a}}(\rho)}([0,T], L^\rho({\mathbb R}))}. \end{eqnarray} hold. \end{prop}
\begin{lem}[Gronwall, integral form] \label{lem:gronwall_integral} (See \cite[Theorem 1.10]{tao2006}.) Let $A, T \geq 0$ and $u, B \in C([0, T], {\mathbb R}^+_0)$ be such that \begin{equation*} u(t) \leq A + \int_0^t B(s) u(s) \mathrm{d}{s} \qquad \forall t \in [0, T]. \end{equation*} Then \begin{equation*} u(t) \leq A \exp\left(\int_0^t B(s) \mathrm{d}{s}\right) \qquad \forall t \in [0, T]. \end{equation*} \end{lem}
\begin{lem}[Gronwall, differential form] \label{lem:gronwall_diff} (See \cite[Theorem 1.12]{tao2006}.) Let $T > 0$, $u: [0, T] \to {\mathbb R}^+_0$ be absolutely continuous and $B \in C([0,T], {\mathbb R}_0^+)$ such that \begin{equation*} u'(t) \leq B(t) u(t) \qquad \text{for almost every } t \in [0, T]. \end{equation*} Then \begin{equation*} u(t) \leq u(0) \exp\left(\int_0^t B(s) \mathrm{d}{s}\right) \qquad \forall t \in [0, T]. \end{equation*} \end{lem}
\begin{lem} \label{lem:fac} (See \cite[Equation (18)]{pattakos2018}). Let $s \geq 0$. Then there is a constant $C = C(s) > 0$ such that for any $v \in H^s({\mathbb R})$ and any $w \in H^{s + 1}({\mathbb T})$ one has $v \cdot w \in H^s({\mathbb R})$ and \begin{equation*} \norm{vw}_{H^s({\mathbb R})} \leq C \norm{v}_{H^s({\mathbb R})} \norm{w}_{H^{s + 1}({\mathbb T})}. \end{equation*} \end{lem} The above estimate is not optimal w.r.t. the assumed regularity of $w$. However, we do not need a stronger version and the proof is straight forward.
\section{Proof of Theorem \ref{thm:mainthm1}} \label{sec:lwp} Consider first the case $\alpha \in [2, 5)$. Let us define the space \begin{equation} \label{eqn:contraction_space} X \coloneqq C([0, T], L^2({\mathbb R})) \cap L^{q_{\text{a}}(\alpha + 1)}([0,T], L^{\alpha + 1}({\mathbb R})) \end{equation} equipped with the norm \begin{equation*} \norm{v}_X \coloneqq \norm{v}_{L^\infty_t L^2_x} + \norm{v}_{L^{q_{\text{a}}(\alpha + 1)}_t L^{\alpha + 1}} \qquad \forall v \in X, \end{equation*} where $T$ will be fixed later in the proof. The integral formulation of \eqref{eqn:cauchy_modnls} reads as \begin{equation} \label{eqn:duhamel_modnls} v = e^{\mathrm{i} t \partial_{x}^{2}} v_0 \pm \mathrm{i} \int_0^t e^{\mathrm{i} (t - \tau) \partial_{x}^{2}} G_\alpha(w, v) \mathrm{d}{\tau} \eqqcolon \mathcal{T}(v). \end{equation}
By Banach’s fixed-point theorem, it suffices to show that there are $R, T > 0$ such that $\mathcal{T}$ is a contractive self-mapping of \begin{equation*}
M(R, T) \coloneqq \set{v \in X \Big| \, \norm{v}_X \leq R}. \end{equation*} Consider first the self-mapping property. For $r \in \set{2, \alpha + 1}$ we have \begin{equation*} \norm{\mathcal{T} v}_{L^{q_{\text{a}}(r)}_t L^r_x} \leq \norm{e^{\mathrm{i} t \partial_{x}^{2}} v_0}_{L^{q_{\text{a}}(r)}_t L^r_x} + \norm{\int_0^t e^{\mathrm{i} (t - \tau) \partial_{x}^{2}} G_\alpha(w, v) \mathrm{d}{\tau}}_{L^{q_{\text{a}}(r)}_t L^r_x}. \end{equation*} By the homogeneous Strichartz estimate \eqref{eqn:homogeneous_strichartz}, we have \begin{equation*} \norm{e^{\mathrm{i} t \partial_{x}^{2}} v_0}_{L^{q_{\text{a}}(r)}_t L^r_x} \lesssim \norm{v_0}_2 \end{equation*} for the first summand. This suggests the choice $R \approx \norm{v_0}_2$. For the second summand, whose norm also needs to be comparable with $R$, we will split the integral term. We proceed with the estimates for the contraction property of $\mathcal{T}$, because the self-mapping property follows from them by setting $v = v_1$ and $v_2 = 0$. To that end, let us define $G_\alpha(w, v_1, v_2) \coloneqq G_\alpha(w, v_1) - G_\alpha(w, v_2)$, set \begin{equation} \label{eqn:char_set} A \coloneqq
\set{x \in {\mathbb R} \, | \, \abs{w} \leq (\abs{v_1} + \abs{v_2})}, \end{equation} and introduce \begin{equation*} G_{\alpha, 1}(w, v_1, v_2) \coloneqq \mathbbmss{1}_{A} \left(\abs{w + v_1}^{\alpha - 1} (w + v_1) - \abs{w + v_2}^{\alpha - 1} (w + v_2) \right) \end{equation*} and \begin{equation*} G_{\alpha, 2}(w, v_1, v_2) \coloneqq \mathbbmss{1}_{A^c} \left(\abs{w + v_1}^{\alpha - 1} (w + v_1) - \abs{w + v_2}^{\alpha - 1} (w + v_2) \right). \end{equation*} By the triangle inequality one obtains for $r \in \set{2, \alpha + 1}$ the estimate \begin{eqnarray} \nonumber & & \norm{ \int_0^t e^{\mathrm{i} (t - \tau) \partial_{x}^{2}} G_\alpha(w, v_1, v_2) \mathrm{d}{\tau} }_{L^{q_{\text{a}}(r)}_t L^r_x} \\ \label{eqn:split} & \leq & \norm{ \int_0^t e^{\mathrm{i} (t - \tau) \partial_{x}^{2}} G_{\alpha, 1}(w, v_1, v_2) \mathrm{d}{\tau}}_{L^{q_{\text{a}}(r)}_t L^r_x} \\ \nonumber & & + \norm{ \int_0^t e^{\mathrm{i} (t - \tau) \partial_{x}^{2}} G_{\alpha, 2}(w, v_1, v_2) \mathrm{d}{\tau}}_{L^{q_{\text{a}}(r)}_t L^r_x}. \end{eqnarray} We use the inhomogeneous Strichartz inequality and the size estimate \eqref{eqn:size_estimate} to bound the first summand of \eqref{eqn:split} by \begin{eqnarray} \label{eqn:strichartz} & & \norm{ \int_0^t e^{\mathrm{i}(t - \tau) \partial_x^2} G_{\alpha, 1}(w, v_1, v_2) \mathrm{d}{\tau}}_{L^{q_{\text{a}}(r)}_t L^r_x} \\ \nonumber & \leq & \norm{G_{\alpha, 1}(w, v_1, v_2) }_{L^{q_{\text{a}}((\alpha + 1)')}_t L^{(\alpha + 1)'}_x} \\ \nonumber & \lesssim & \norm{ \mathbbmss{1}_A \left(\abs{v_1}^{\alpha - 1} + \abs{v_2}^{\alpha - 1} + \abs{w}^{\alpha - 1} \right) \abs{v_1 - v_2} }_{L^{q_{\text{a}}((\alpha + 1)')}_t L^{(\alpha + 1)'}_x}. \end{eqnarray} Using the definition of the set $A$ and Hölder’s inequality for the space and time norms we arrive at the upper bound \begin{equation*} \norm{ (\abs{v_1}^{\alpha - 1} + \abs{v_2}^{\alpha - 1}) \abs{v_1 - v_2} }_{L^{q_{\text{a}}((\alpha + 1)')}L^{(\alpha + 1)'}} \lesssim T^{1 - \frac{\alpha - 1}{4}} R^{\alpha - 1} \norm{v_1 - v_2}_X. \end{equation*} For the second summand of \eqref{eqn:split} we obtain by the same methods the bound \begin{eqnarray*} & & \norm{ \int_0^t e^{\mathrm{i} (t - \tau) \partial_x^2} G_{\alpha, 2}(w, v_1, v_2) \mathrm{d}{\tau}}_{L^{q_{\text{a}}(r)}_t L^r_x} \\ & \lesssim & \norm{\abs{w}^{\alpha - 1} \abs{v_1 - v_2}}_{L^1(L^2)} \lesssim T \norm{v_1 - v_2}_X. \end{eqnarray*} Choosing $T$ small enough shows the contraction property of $\mathcal{T}$ and the proof, in the case $\alpha \in (2, 5)$, concludes.
The case $\alpha \in [1, 2]$ is treated in the same way, but instead of setting $\rho = (\alpha + 1)'$ one chooses $\rho = \frac{2}{\alpha}$ for the Strichartz exponent in \eqref{eqn:strichartz}. Applying Hölder’s inequality subsequently leads to the $L^\infty_t L^2_x$-norm and hence no intersection in \eqref{eqn:contraction_space} is required, i.e. we indeed have unconditional uniqueness.
For the remaining critical case $\alpha = 5$, consider the complete metric space \begin{equation*} M(R, T) \coloneqq
\set{v \in X \, \Big| \, \norm{v - e^{\mathrm{i} t \partial_x^2}v_0}_{L^\infty_t L^2_x} + \norm{v}_{L^6_t L^6_x} \leq R}. \end{equation*} We have to show again that $\mathcal{T}$ is a contractive self-mapping of $M(R, T)$ for some $R, T > 0$. Candidates for $R$ and $T$ are determined from the first term of \eqref{eqn:split}, corresponding to the effective power $\abs{v}^5$, exactly as in the treatment of the usual mass critical NLS (see e.g. \cite[Theorem 5.3]{linares2015}). Subsequently, the remaining terms corresponding to the effective power $\abs{v}^1$ are treated via the Strichartz estimates as in the case $\alpha \in (2, 5)$ enforcing a possibly smaller choice of $T$. We omit the details.
\ensuremath{\square}
\section{Proof of Theorem \ref{thm:mainthm2}} \label{sec:quadraticglobal} The proof of Theorem \ref{thm:mainthm2} will be done by looking at the mass $\iv{2} \norm{v(t)}_2^2$ of the solution. In order to make this rigorous we have to work with solutions which are differentiable in time. We will get time regularity from regularity in space. Hence we replace $G_2$ in \eqref{eqn:cauchy_modnls} by its smooth version $G^\varepsilon$. We obtain \begin{equation} \label{eqn:cauchy_modnls_smooth} \left\{ \begin{IEEEeqnarraybox}[][c]{rCl} \mathrm{i} v_t (x, t) + \partial_{x}^{2} v (x,t) \pm G^\varepsilon (w, v) & = & 0 \qquad (x, t) \in {\mathbb R} \times {\mathbb R}, \\ v(\cdot, 0) & = & v_0, \end{IEEEeqnarraybox} \right. \end{equation} where \begin{equation} G^\varepsilon(w, v) \coloneqq [\abs{v + w} \ast \phi_\varepsilon ] (v + w) - [\abs{w} \ast \phi_\varepsilon ]w. \end{equation}
\begin{thm}[Local well-posedness of the smoothened modified NLS] \label{thm:lwp_modnls} Let $\varepsilon \geq 0$. Then there is a constant $C > 0$ such that for any $v_0 \in L^2$ and any $w \in C({\mathbb R}, L_x^\infty)$ the Cauchy problem \eqref{eqn:cauchy_modnls_smooth} has a unique solution in $C([0,T], L^2({\mathbb R}))$, provided \begin{equation} \label{eqn:lwp_time_smallness} T \leq C \min \set{\norm{v_0}_2^{-\frac{4}{3}}, \norm{w}_{L_t^\infty, L_x^\infty}^{-1}}. \end{equation} \end{thm} \begin{proof} Consider the integral formulation of \eqref{eqn:cauchy_modnls_smooth}, i.e. \begin{equation} \label{eqn:duhamel_modnls_smooth} v = e^{\mathrm{i} t \partial_{x}^{2}} v_0 \pm \mathrm{i} \int_0^t e^{\mathrm{i} (t - \tau) \partial_{x}^{2}} G^\varepsilon(w, v) \mathrm{d}{\tau} \eqqcolon \mathcal{T}^\varepsilon(v) \end{equation} and notice that \begin{equation*} G^\varepsilon(w, v) = \underbrace{ \left([\abs{v + w} - \abs{w}]\ast \phi_\varepsilon \right)v }_{\eqqcolon G_1^\varepsilon(w, v)} + \underbrace{ \left([\abs{v + w} - \abs{w}] \ast \phi_\varepsilon\right)w + [\abs{w} \ast \phi_\varepsilon] v }_{\eqqcolon G_2^\varepsilon(w, v)}. \end{equation*} By Banach’s fixed-point theorem, it suffices to show that there are $R, T > 0$ such that $\mathcal{T}^{\varepsilon}$ is a contractive self-mapping of \begin{equation*} M(R, T) \coloneqq
\set{v \in C([0, T], L^2({\mathbb R})) \Big| \, \norm{v} \leq R}. \end{equation*} Consider first the self-mapping property. We have \begin{equation*} \norm{\mathcal{T}^\varepsilon v}_{L^\infty_t L^2_x} \leq \norm{e^{\mathrm{i} t \partial_{x}^{2}} v_0}_{L^\infty_t L^2_x} + \norm{\int_0^t e^{\mathrm{i} (t - \tau) \partial_{x}^{2}} G^\varepsilon (w, v) \mathrm{d}{\tau}}_{L^\infty_t L^2_x}. \end{equation*} Since the operator $e^{it\partial_{x}^{2}}$ is an isometry on $L^{2}$ we have \begin{equation*} \norm{e^{\mathrm{i} t \partial_{x}^{2}} v_0}_{L^\infty_t L^2_x} = \norm{v_0}_2 \end{equation*} for the first summand. This suggests the choice $R \approx \norm{v_0}_2$. For the second summand, whose norm needs to also be comparable with $R$, we split the integral term and obtain \begin{eqnarray*} & & \norm{\int_0^T e^{\mathrm{i} (t - \tau) \partial_{x}^{2}} G^\varepsilon (w, v) \mathrm{d}{\tau}}_{L^\infty_t L^2_x} \\ & \leq & \norm{\int_0^T e^{\mathrm{i} (t - \tau) \partial_{x}^{2}} G_1^\varepsilon (w, v) \mathrm{d}{\tau}}_{L^\infty_t L^2_x} + \norm{\int_0^T e^{\mathrm{i} (t - \tau) \partial_{x}^{2}} G_2^\varepsilon (w, v) \mathrm{d}{\tau}}_{L^\infty_t L^2_x}. \end{eqnarray*} Now, both summands are treated via the inhomogeneous Strichartz estimate as in the proof of Theorem \ref{thm:mainthm1}. More precisely, one has \begin{eqnarray*} \norm{\int_0^T e^{\mathrm{i} (t - \tau) \partial_{x}^{2}} G_1^\varepsilon (w, v) \mathrm{d}{\tau}}_{L^\infty_t L^2_x} & \lesssim & \norm{([\abs{v + w} - \abs{w}] \ast \phi_\epsilon) v) }_{L^\gamma(L^\rho)} \\ &\leq & \norm{ \norm{[\abs{v + w} - \abs{w}] \ast \phi_\epsilon }_{L_x^{2 \rho}} \norm{v}_{L_x^{2 \rho}}}_{L^\gamma_t} \\ & \leq & \norm{\norm{v}_{L_x^{2 \rho}}^2}_{L_t^{\gamma}} = \norm{v}_{L^{2 \gamma} (L^{2 \rho})}^2. \end{eqnarray*} Above, we used the Cauchy-Schwartz inequality to arrive at the second line and Young’s inequality (if $\varepsilon \neq 0$) and a size estimate to pass to the last line (all in the space variable).
As we want to arrive at the norm in $C([0, T], L^2({\mathbb R}))$, we put $2 \rho = 2$, i.e. $\rho = 1$. Then, from the admissibility condition \eqref{eqn:admissible} for $(\rho', \gamma')$, one obtains $\gamma = \frac{4}{3}$. As $2 \gamma = \frac{8}{3} < \infty = q_{\text{a}}(2)$, one can raise the time exponent to $\infty$ by Hölder’s inequality for the time variable, i.e. \begin{equation} \label{eqn:G_1_estimate} \norm{v}_{L^{2 \gamma} (L^{2 \rho})}^2 \leq T^{\frac{3}{4}} \norm{v}_{L^\infty(L^2)}^2 \leq T^{\frac{3}{4}} R^2 \overset{!}{\lesssim} R. \end{equation} This inequality holds under the condition \begin{equation*} T \lesssim \norm{v_0}_2^{-\frac{4}{3}}, \end{equation*} which is satisfied by \eqref{eqn:lwp_time_smallness}.
For $G_2^\varepsilon$ we similarly obtain \begin{eqnarray*} & & \norm{\int_0^T e^{\mathrm{i} (t - \tau) \partial_{x}^{2}} G_2^\varepsilon(w, v) \mathrm{d}{\tau}}_{L^\infty_t L^2_x} \\ & \lesssim & \norm{([\abs{v + w} - \abs{w}] \ast \phi_\varepsilon])w }_{L^{\tilde{\gamma}}(L^{\tilde{\rho}})} + \norm{ [\abs{w} \ast \phi_\varepsilon] v }_{L^{\tilde{\gamma}}(L^{\tilde{\rho}})} \\ & \leq & \norm{w}_{L^\infty(L^\infty)} \norm{[\abs{v + w} - \abs{w}] \ast \phi_\varepsilon] }_{L^{\tilde{\gamma}}(L^{\tilde{\rho}})} + \norm{[\abs{w} \ast \phi_\varepsilon]}_{L^\infty(L^\infty)} \norm{v}_{L^{\tilde{\gamma}}(L^{\tilde{\rho}})} \\ & \lesssim & \norm{w}_{L^\infty(L^\infty)} \norm{v}_{L^{\tilde{\gamma}}(L^{\tilde{\rho}})}, \end{eqnarray*} where we employed Young’s inequality and a size estimate to obtain the last line. In contrast to the $G_1$-case, we choose $\tilde{\rho} = 2$ to arrive at the norm in $C([0, T], L^2({\mathbb R}))$. Then, by the admissibility condition \eqref{eqn:admissible}, $\tilde{\gamma} = 1 < \infty = q_{\text{a}}(2)$. Hence, by exploiting again the Hölder’s inequality for the time variable, we get \begin{eqnarray*} \norm{w}_{L^\infty(L^\infty)} \norm{v}_{L^{\tilde{\gamma}}(L^{\tilde{\rho}})} & = & \norm{w}_{L^\infty(L^\infty)} \norm{v}_{L^1(L^2)} \\ & \leq& \norm{w}_{L^\infty(L^\infty)} T \norm{v}_{L^\infty(L^2)} \\ & \leq & \norm{w}_{L^\infty(L^\infty)} R T \\ & \overset{!}{\lesssim_1} & R. \end{eqnarray*} From this we obtain the additional condition \begin{equation*} T \lesssim \norm{w}_{L^\infty(L^\infty)}^{-1}, \end{equation*} which is also satisfied by \eqref{eqn:lwp_time_smallness}.
For the contraction property, consider the splitting \begin{eqnarray*} G^\varepsilon(w, v_1, v_2) & \coloneqq & G^\varepsilon(w, v_1) - G^\varepsilon(w, v_2) \\ & = & [\abs{v_1 + w} \ast \phi_\varepsilon](v_1 + w) - [\abs{v_2 + w} \ast \phi_\varepsilon](v_2 + w) \\ & = & \underbrace{ ([\abs{v_1 + w} - \abs{w}] \ast \phi_\varepsilon)(v_1 - v_2) + ([\abs{v_1 + w} - \abs{v_2 + w}] \ast \phi_\varepsilon) v_2 }_{\eqqcolon G_1^\varepsilon(w, v_1, v_2)} \\ & & + \underbrace{([\abs{v_1 + w} - \abs{v_2 + w}] \ast \phi_\varepsilon) w + [\abs{w} \ast \phi_\varepsilon] (v_1 - v_2) }_{\eqqcolon G_2^\varepsilon(w, v_1, v_2)}. \end{eqnarray*} Arguments similar to those used in the proof of the self-mapping property shown above yield the contraction property of $\mathcal{T}^\varepsilon$, possibly requiring an even smaller implicit constant in \eqref{eqn:lwp_time_smallness}. \end{proof}
\begin{lem}[Convergence of the solutions for vanishing smoothing] \label{lem:vanishing_smoothing} Fix $v_0 \in L^2$ and $w \in C({\mathbb R}, C({\mathbb T}))$, and for all $\varepsilon \geq 0$ denote by $v^\varepsilon \in C([0, T], L^2({\mathbb R}))$ the unique solution of the Cauchy problem \eqref{eqn:cauchy_modnls_smooth} from Theorem \ref{thm:lwp_modnls}. Then, \begin{equation*} \norm{v^\varepsilon - v^0}_{L^\infty_t L^2_x} \xrightarrow{\varepsilon \to 0+} 0. \end{equation*} \end{lem} \begin{proof} Recall, that by construction $v^\varepsilon$ and $v^0$ are fixed points of $\mathcal{T}^\varepsilon$ and $\mathcal{T}^0$ respectively and hence \begin{eqnarray*} \norm{v^\varepsilon - v^0}_{L^\infty_t L^2_x} & \leq & \norm{\int_0^t e^{\mathrm{i} (t - \tau) \partial_{x}^{2}} \left( G^\varepsilon(w, v^\varepsilon) - G^0(w, v^0) \right) \mathrm{d}{\tau}}_{L^\infty_t L^2_x} \\ & \leq & \norm{\int_0^t e^{\mathrm{i} (t - \tau) \partial_{x}^{2}} \left( G^\varepsilon(w, v^\varepsilon) - G^\varepsilon(w, v^0) \right) \mathrm{d}{\tau}}_{L^\infty_t L^2_x} \\ & & + \norm{\int_0^t e^{\mathrm{i} (t - \tau) \partial_{x}^{2}} \left( G^\varepsilon(w, v^0) - G^0(w, v^0) \right) \mathrm{d}{\tau}}_{L^\infty_t L^2_x}. \end{eqnarray*} Due to the fact that $\mathcal{T}^\varepsilon$ is contractive, the first summand is controlled by \begin{equation*} \norm{\int_0^t e^{\mathrm{i} (t - \tau) \partial_{x}^{2}} \left( G^\varepsilon(w, v^\varepsilon) - G^\varepsilon(w, v^0) \right) \mathrm{d}{\tau}}_{L^\infty_t L^2_x} \leq C \norm{v^0 - v^\varepsilon}_{L^\infty_t L^2_x}, \end{equation*} where $C < 1$ is the contraction constant. Thus, it suffices to show that the second summand converges to zero. To that end we first gather terms with the same effective powers of $v^0$ and $w$, i.e. \begin{eqnarray} \nonumber & & \int_0^t e^{\mathrm{i} (t - \tau) \partial_{x}^{2}} \left( G^\varepsilon(w, v^0) - G^0(w, v^0) \right) \mathrm{d}{\tau}\\ \nonumber & = & \int_0^t e^{\mathrm{i} (t - \tau) \partial_{x}^{2}} \left( \left[\abs{w + v^0} \ast \phi_\varepsilon \right](v^0 + w) - \left[\abs{w} \ast \phi_\varepsilon \right] w \right. \\ \nonumber & & - \left. \abs{w + v^0} (v^0 + w) + \abs{w} w \right) \mathrm{d}{\tau} \\ \label{eqn:first_summand} & = & \int_0^t e^{\mathrm{i} (t - \tau) \partial_{x}^{2}} \left( \left[\left(\abs{w + v^0} - \abs{w} \right) \ast \phi_\varepsilon - \left(\abs{w + v^0} - \abs{w} \right) \right]v^0 \right) \mathrm{d}{\tau} \\ \label{eqn:second_summand} & & + \int_0^t e^{\mathrm{i} (t - \tau) \partial_{x}^{2}} \left( \left[\left(\abs{w + v^0} - \abs{w} \right) \ast \phi_\varepsilon - \left(\abs{w + v^0} - \abs{w} \right) \right]w \right.\\ \nonumber & & + \left.\left(\abs{w} \ast \phi_\varepsilon - \abs{w} \right) v^0 \right) \mathrm{d}{\tau}. \end{eqnarray} The first summand corresponding to $\abs{v^0}^2$ is treated in the same way as the $G_1^\varepsilon$-term in the proof of Theorem \ref{thm:lwp_modnls}, i.e. via a Strichartz estimate and Hölder’s inequality. We arrive at \begin{eqnarray*} & & \norm{\int_0^t e^{\mathrm{i} (t - \tau) \partial_{x}^{2}} \left( \left[\left(\abs{w + v^0} - \abs{w} \right) \ast \phi_\varepsilon - \left(\abs{w + v^0} - \abs{w} \right) \right]v^0 \right) \mathrm{d}{\tau} }_{L^\infty_t L^2_x} \\ & \leq & \norm{\left(\abs{w + v^0} - \abs{w} \right) \ast \phi_\varepsilon - \left(\abs{w + v^0} - \abs{w} \right) }_{L_t^{\frac{4}{3}}L_x^2} \cdot \norm{v^0}_{L_t^\infty L_x^2}. \end{eqnarray*} It suffices to show that the first factor above tends to zero, as $\varepsilon$ tends to zero. For almost every $t \in [0, T]$ we have that $\left(\abs{w + v^0} - \abs{w} \right) \in L^2$, which implies, due to the fact that $(\phi_\varepsilon)_{\varepsilon > 0}$ is an approximation to the identity, that \begin{equation*} \norm{\left(\abs{w + v^0} - \abs{w} \right) \ast \phi_\varepsilon - \left(\abs{w + v^0} - \abs{w} \right)}_{L_x^2} \xrightarrow{\varepsilon \to 0+} 0. \end{equation*} Furthermore, by Young’s inequality, \begin{equation*} \norm{\left(\abs{w + v^0} - \abs{w} \right) \ast \phi_\varepsilon - \left(\abs{w + v^0} - \abs{w} \right)}_{L_x^2}^{\frac{4}{3}} \lesssim \norm{v^0}_{L_x^2}^{\frac{4}{3}} \end{equation*} for every $\varepsilon > 0$ and almost every $t \in [0, T]$. Also, \begin{equation*} \int_0^T \norm{v^0(\cdot, \tau)}_{L_x^2}^{\frac{4}{3}} \mathrm{d}{\tau} = \norm{v^0}_{L^{\frac{4}{3}}_t L^2_x}^{\frac{4}{3}} \lesssim_T \norm{v^0}_{L^\infty_t L^2_x}^{\frac{4}{3}} \end{equation*} and hence the claim follows by the dominated convergence theorem.
The second summand (Equation \eqref{eqn:second_summand}), corresponding to $\abs{v^0w}$, is treated like the $G_2^\varepsilon$-term and we arrive at \begin{eqnarray*} & & \norm{\int_0^t e^{\mathrm{i} (t - \tau) \partial_{x}^{2}} \left[ \left(\left(\abs{w + v^0} - \abs{w} \right) \ast \phi_\varepsilon - \left(\abs{w + v^0} - \abs{w} \right) \right) w \right] \mathrm{d}{\tau} }_{L^\infty_t L^2_x} \\ & & + \norm{\int_0^t e^{\mathrm{i} (t - \tau) \partial_{x}^{2}} \left[ \left(\abs{w} \ast \phi_\varepsilon - \abs{w} \right) v^0 \right] \mathrm{d}{\tau}}_{L^\infty_t L^2_x} \\ & \leq & \norm{ \left(\abs{w + v^0} - \abs{w} \right) \ast \phi_\varepsilon - \left(\abs{w + v^0} - \abs{w} \right)}_{L_t^1 L_x^2} \cdot \norm{w}_{L_t^\infty L_x^\infty} \\ & & + \norm{v^0}_{L_t^\infty L_x^2} \norm{\abs{w} \ast \phi_\varepsilon - \abs{w}}_{L_t^1 L_x^\infty}. \end{eqnarray*} Observe, that $\abs{w}$ is uniformly continuous in the $x$-variable on the whole of ${\mathbb R}$. Hence, as for \eqref{eqn:first_summand}, the fact that $(\phi_\varepsilon)_{\varepsilon > 0}$ is an approximation to the identity implies the convergence to zero of \eqref{eqn:second_summand}. \end{proof}
\begin{lem}[Smooth solutions for smooth initial data] \label{lem:smoothness_modnls} (Cf. \cite[Proposition 3.11]{tao2006}.) Let $\varepsilon > 0$, $w \in C([0, T], H^\infty({\mathbb T}))$ and $v_0 \in \mathcal{S}$ and let $v$ denote the unique solution of \eqref{eqn:cauchy_modnls_smooth}. Then $v \in C^1([0, T], H^\infty({\mathbb R}))$ and for any $s > \iv{2}$ one has \begin{equation} \label{eqn:gronwall_modnls} \norm{v}_{L_t^\infty H_x^s} \leq C \norm{v_0}_{H^s} \exp\left(\norm{v}_{L_t^1 L_x^\infty} + T \norm{w}_{C(H^{s + 1}({\mathbb T}))} \right) \end{equation} for some $C = C(\varepsilon, s) > 0$. \end{lem} \begin{proof} We begin by showing that $v \in C([0, T], H^s({\mathbb R}))$ for any $s \in {\mathbb N}$. It suffices to prove that the operator $\mathcal{T}^\varepsilon$ from Theorem \ref{thm:lwp_modnls} is a self mapping in $M(R, T') \subseteq H^s$, for a possibly smaller $T' \leq T$. To that end, observe that \begin{eqnarray*} \norm{\mathcal{T}^\varepsilon v}_{H^s} & \leq & \norm{e^{\mathrm{i} t \partial_{x}^{2}} v_0}_{H^s} + \norm{\int_0^t e^{\mathrm{i} (t - \tau) \partial_{x}^{2}} G^\varepsilon(w, v) \mathrm{d}{\tau}}_{H^s} \\ & \leq & \norm{v_0}_{H^s} + \int_0^t \norm{G^\varepsilon(w, v)}_{H^s} \mathrm{d}{\tau}. \end{eqnarray*} The first summand fixes $R \approx \norm{v_0}_{H^s}$. For the integrand in the second summand we have (the variable $\tau$ is omitted in the notation) \begin{eqnarray} \label{eqn:inhomogeneity} & & \norm{G^\varepsilon(w, v)}_{H^s} \\ \nonumber & \leq & \underbrace{\norm{\left(\left[\abs{w + v} - \abs{w} \right] \ast \phi_\varepsilon \right) v}_{H^s}}_{\eqqcolon I} + \underbrace{\norm{\left( \abs{w} \ast \phi_\varepsilon \right) v}_{H^s} }_{\eqqcolon II} + \underbrace{\norm{\left(\left[\abs{w + v} - \abs{w} \right] \ast \phi_\varepsilon \right) w}_{H^s}}_{\eqqcolon III}. \end{eqnarray} As $H^s({\mathbb R})$ is an algebra with respect to point-wise multiplication, the first summand is estimated against \begin{equation*} \norm{\left(\left[\abs{w + v} - \abs{w} \right] \ast \phi_\varepsilon \right) v}_{H^s} \lesssim \norm{\left[\abs{w + v} - \abs{w} \right] \ast \phi_\varepsilon}_{H^s} \norm{v}_{H^s}. \end{equation*} The first product above is further estimated via the definition of the $H^s$-norm as \begin{equation} \label{eqn:smoothened_sobolev} \norm{\left[\abs{w + v} - \abs{w} \right] \ast \phi_\varepsilon}_{H^s} \lesssim \norm{\jb{\cdot}^s \mathcal{F} \phi_\varepsilon}_{L^\infty} \norm{\abs{w + v} - \abs{w}}_2. \end{equation} Further estimating $\norm{v}_2 \leq \norm{v}_{H^s} \leq R$ and recalling the integral concludes the discussion of this term. The second summand (II) is treated via Lemma \ref{lem:fac}: \begin{equation*} \norm{\left( \abs{w} \ast \phi_\varepsilon \right) v}_{H^s} \lesssim_s \norm{\abs{w} \ast \phi_\varepsilon}_{H^{s + 1}({\mathbb T})} \norm{v}_{H^s}. \end{equation*} We again estimate $\norm{v}_{H^s} \leq R$ and observe for the other factor that \begin{eqnarray*} \norm{\abs{w} \ast \phi_\varepsilon}_{H^{s + 1}({\mathbb T})} & \approx & \sum_{\abs{\alpha} \leq \lceil s + 1 \rceil} \norm{\abs{w} \ast \left[D^\alpha \phi_\varepsilon \right]}_{L^2({\mathbb T})} \\ & \leq & \norm{w}_{\infty} \sum_{\abs{\alpha} \leq \lceil s + 1 \rceil} \norm{D^\alpha \phi_\varepsilon}_{L^1({\mathbb R})} \\ & \lesssim_{\varepsilon, s} & \norm{w}_{H^{s + 1}({\mathbb T})}. \end{eqnarray*} The last summand (III) is estimated via \begin{equation*} \norm{\left(\left[\abs{w + v} - \abs{w} \right] \ast \phi_\varepsilon \right) w}_{H^s} \lesssim_{\varepsilon, s} \norm{v}_{H^s} \norm{w}_{H^{s + 1}({\mathbb T})}. \end{equation*} The proof of the above requires no new techniques and is omitted. All in all this shows the local well-posedness of \eqref{eqn:cauchy_modnls_smooth} in $C([0, T'], H^s)$, where the guaranteed time of existence is \begin{equation*} T' \approx_{\varepsilon, s} \set{\norm{w}_{H^{s + 1}({\mathbb T})}^{-1}, \norm{v_0}_{H^s({\mathbb R})}^{-1}}. \end{equation*} To prove the estimate \eqref{eqn:gronwall_modnls}, we will employ Lemma \ref{lem:gronwall_integral} (Gronwall’s inequality). To that end, let $T'$ be now the maximal time of existence of the solution $v \in C([0, T'), H^s)$. Observe that \begin{equation*} \norm{v(\cdot, t)}_{H^s} = \norm{(\mathcal{T}^\varepsilon v)(\cdot, t)}_{H^s} \leq \norm{v_0}_{H^s} + \int_0^t \norm{G^\varepsilon(w, v)(\cdot, \tau)}_{H^s} \mathrm{d}{\tau} \qquad \forall t \in [0, T'). \end{equation*} The integrand above is estimated as in inequality \eqref{eqn:inhomogeneity}. The first term (I), however, needs retreatment, as it is quadratic in $\norm{v}_{H^s}$. The algebra property of $H^s({\mathbb R}) \cap L^\infty({\mathbb R})$ implies \begin{equation*} I \leq \norm{\left(\left[\abs{w + v} - \abs{w} \right] \ast \phi_\varepsilon \right)}_{H^s} \norm{v}_\infty + \norm{\left(\left[\abs{w + v} - \abs{w} \right] \ast \phi_\varepsilon \right)}_\infty \norm{v}_{H^s}. \end{equation*} We estimate the first factor in the first summand by \eqref{eqn:smoothened_sobolev}. For the first factor of the second summand we have \begin{equation*} \norm{\left(\left[\abs{w + v} - \abs{w} \right] \ast \phi_\varepsilon \right)}_\infty \leq \norm{\left[\abs{w + v} - \abs{w} \right]}_\infty \norm{\phi_\varepsilon}_1 \leq \norm{v}_\infty \end{equation*} by Young’s inequality. Reinserting the estimates for the terms $(II)$ and $(III)$ yields \begin{equation*} \norm{v(\cdot, t)}_{H^s} \lesssim_{s, \varepsilon} \norm{v_0}_{H^s} + \int_0^t \left(\norm{v(\cdot, \tau)}_{\infty} + \norm{w(\cdot, \tau)}_{H^{s + 1}({\mathbb T})} \right) \norm{v(\cdot, \tau)}_{H^s} \mathrm{d}{\tau}. \end{equation*} Gronwall’s inequality now implies \begin{eqnarray*} \norm{v(\cdot, t)}_{H^s} & \lesssim_{\varepsilon, s} & \norm{v_0}_{H^s} \exp \left(\int_0^t \left(\norm{v(\cdot, \tau)}_{\infty} + \norm{w(\cdot, \tau)}_{H^{s + 1}({\mathbb T})} \right) \mathrm{d}{\tau} \right) \\ & \leq & \norm{v_0}_{H^s} \exp\left(\norm{v}_{L_t^1 L_x^\infty} + T' \norm{w}_{C(H^{s + 1}({\mathbb T}))} \right) \qquad \forall t \in [0, T'). \end{eqnarray*} Thus we see that a blowup cannot occur for any $T' < T$ and so $T' = T$.
This indeed shows that $v \in C([0, T], H^s)$. As $v_0 \in \mathcal{S}$ and $w \in C([0, T], H^\infty({\mathbb T}))$ are smooth, a classical result from semi-group theory (see \cite[Theorem 4.2.4]{pazy1992}) implies that $v \in C^1([0, T], H^s)$. Since $s > \iv{2}$ was arbitrary, the proof is complete. \end{proof}
\begin{prop} \label{prop:norm_estimate} The unique solution $v$ of \eqref{eqn:cauchy_modnls_smooth} from Theorem \ref{thm:lwp_modnls} satisfies \begin{equation} \label{eqn:norm_estimate} \norm{v(\cdot, t)}_2 \leq \norm{v_0}_2 \exp \left[\norm{w}_{L^\infty_t L^\infty_x} t \right] \qquad \forall t \in [0, T]. \end{equation} \end{prop}
\begin{proof} Let $w^{n} \in C([0 ,T], H^\infty({\mathbb T}))$ be functions with the property \begin{equation*} \norm{w^n - w}_{C([0,T],H^1({\mathbb T}))} \xrightarrow{n \to \infty} 0 \end{equation*} and let $v_n \xrightarrow{n \to \infty} v_0$ in the $L^2$-norm where $v_n \in \mathcal{S}$ for all $n \in {\mathbb N}$. Moreover, let $v^{\varepsilon, n} \in C^1([0, T], L^2))$ be the solution of \eqref{eqn:cauchy_modnls_smooth} with initial data $v_n$ and nonlinearity $G^{\varepsilon}(w^n, v^{\varepsilon, n})$ (the smoothness of $v^{\varepsilon, n}$ follows from Lemma \ref{lem:smoothness_modnls}). We have \begin{eqnarray} \nonumber \iv{2} \frac{\mathrm{d}}{\mathrm{d}{t}} \norm{v^{\varepsilon, n}(\cdot, t)}_2^2 & = & \operatorname{Re} \dup{\dot{v}^{\varepsilon, n}(\cdot, t)} {v^{\varepsilon, n}(\cdot, t)} = \operatorname{Re} \dup{ \mathrm{i} \partial_{x}^{2} v^{\varepsilon, n} \pm \mathrm{i} G^\varepsilon (w^n, v^{\varepsilon, n})} {v^{\varepsilon, n}} \\ \nonumber & = & \underbrace{-\operatorname{Re} \mathrm{i} \dup{\nabla v^{\varepsilon, n}} {\nabla v^{\varepsilon, n}}}_{= 0} \\ \nonumber & & \pm \operatorname{Re} \mathrm{i} \dup{(\abs{v^{\varepsilon, n} + w^n} \ast \phi_\varepsilon) (v^{\varepsilon, n} + w^n) - (\abs{w^n} \ast \phi_\varepsilon) w^n}{v^{\varepsilon, n}} \\ \nonumber & = & \pm \underbrace{\operatorname{Re} \mathrm{i} \dup{(\abs{v^{\varepsilon, n} + w^n} \ast \phi_\varepsilon) v^{\varepsilon, n}}{v^{\varepsilon, n}}}_{= 0} \\ \label{eqn:modsoln_bound} & & \pm \operatorname{Re} \mathrm{i} \dup{ ([\abs{v^{\varepsilon, n} + w^n} - \abs{w^n}] \ast \phi_\varepsilon) w^n} {v^{\varepsilon, n}} \end{eqnarray} and hence \begin{eqnarray} \nonumber \iv{2} \frac{\mathrm{d}}{\mathrm{d}{t}} \norm{v^{\varepsilon, n}(\cdot, t)}_2^2 & \leq & \abs{\dup{[\abs{v^{\varepsilon, n} + w^n} - \abs{w^n}] \ast \phi_\varepsilon) w^n} {v^{\varepsilon, n}}} \\ \nonumber & \leq & \norm{[\abs{v^{\varepsilon, n} + w^n} - \abs{w^n}] \ast \phi_\varepsilon) w^n}_{L^2_x} \norm{v^{\varepsilon, n}}_{L^2_x} \\ \label{eqn:diff_ineq} & \leq & \norm{w^n}_{L^\infty_t L^\infty_x} \norm{v^{\varepsilon, n}}_{L^2_x}^2 \end{eqnarray} for all $t \in [0, T]$. Above, we obtained the first estimate by the Cauchy-Schwarz inequa\-lity and the second one by Hölder’s inequality, Young’s inequality and the size estimate. By the differential form of the Gronwall’s inequality from Lemma \ref{lem:gronwall_diff}, we obtain \begin{equation*} \norm{v^{\varepsilon, n}(\cdot, t)}_2 \leq \norm{v_n}_2 \exp \left[\norm{w^n}_{L^\infty_t L^\infty_x} t \right] \qquad \forall t \in [0, T]. \end{equation*} In the limit $n \to \infty$, the right-hand side above converges to the right-hand side of \eqref{eqn:norm_estimate}. It remains to show \begin{equation} \label{eqn:convergence_ivs} \norm{v^{\varepsilon, n} - v^{\varepsilon}}_{L^\infty L^2} \xrightarrow{n \to \infty} 0, \end{equation} because then the left-hand side converges to $\norm{v^\varepsilon}_{L^\infty_t L^2_x}$ in the limit $n \to \infty$. Finally, Lemma \ref{lem:vanishing_smoothing} yields \begin{equation*} \norm{v^\varepsilon}_{L^\infty_t L^2_x} \xrightarrow {\varepsilon \to 0} \norm{v^0}_{L^\infty_t L^2_x}. \end{equation*}
To prove \eqref{eqn:convergence_ivs}, observe that the linear evolution poses no problems and hence it suffices to control the integral term \begin{equation*} \norm{\int_0^t e^{\mathrm{i} (t - \tau) \partial_{x}^{2}} \left[ G^\varepsilon(w, v^{\varepsilon}) - G^\varepsilon(w^n, v^{\varepsilon, n}) \right] \mathrm{d}{\tau}}_{L^\infty L^2}. \end{equation*} To that end, we will split the difference of the nonlinear terms according to their effective power up to one exception. We begin by observing that \begin{eqnarray*} & & G^\varepsilon(w, v^{\varepsilon}) - G^\varepsilon(w^n, v^{\varepsilon, n}) \\ & = & (\abs{w + v^\varepsilon} \ast \phi_\varepsilon) v^\varepsilon - (\abs{w^n + v^{\varepsilon, n}} \ast \phi_\varepsilon) v^{\varepsilon, n} \\ & & + ([\abs{w + v^\varepsilon} - \abs{w}] \ast \phi_\varepsilon) w - ([\abs{w^n + v^{\varepsilon,n}} - \abs{w^n}] \ast \phi_\varepsilon) w^n \end{eqnarray*} and gather the first and the second summand, as well as the third and the last summand. In the first sum we have \begin{eqnarray*} & & (\abs{w + v^{\varepsilon}} \ast \phi_\varepsilon) v^\varepsilon - (\abs{w^n + v^{\varepsilon, n}} \ast \phi_\varepsilon) v^{\varepsilon, n} \\ & = & \underbrace{ (\abs{w + v^\varepsilon} \ast \phi_\varepsilon) v^\varepsilon - (\abs{w + v^\varepsilon} \ast \phi_\varepsilon) v^{\varepsilon, n}}_{\eqqcolon I} \\ & & + \underbrace{ (\abs{w + v^\varepsilon} \ast \phi_\varepsilon) v^{\varepsilon, n} - (\abs{w^n + v^{\varepsilon, n}} \ast \phi_\varepsilon) v^{\varepsilon, n}}_{\eqqcolon II}, \end{eqnarray*} whereas for the second sum \begin{eqnarray*} & & ([\abs{w + v^\varepsilon} - \abs{w}] \ast \phi_\varepsilon)w - ([\abs{w^n + v^{\varepsilon, n}} - \abs{w^n}] \ast \phi_\varepsilon) w^n \\ & = & \underbrace{ ([\abs{w^n + v^\varepsilon} - \abs{w^n}] \ast \phi_\varepsilon) w^n - ([\abs{w^n + v^{\varepsilon, n}} - \abs{w^n}] \ast \phi_\varepsilon) w^n }_{\eqqcolon III} \\ & & + \underbrace{ ([\abs{w + v^\varepsilon} - \abs{w}] \ast \phi_\varepsilon) w - ([\abs{w^n + v^\varepsilon} - \abs{w^n}] \ast \phi_\varepsilon) w^n}_{\eqqcolon IV} \end{eqnarray*} holds. We now complete the splitting of $G^\varepsilon(w, v^{\varepsilon, n}) - G^\varepsilon(w^n, v^{\varepsilon, n})$ into terms of the same effective powers. We have \begin{eqnarray*} I & = & (\abs{w + v^\varepsilon} \ast \phi_\varepsilon) (v^\varepsilon - v^{\varepsilon, n}) \\ & = & ([\abs{w + v^\varepsilon} - \abs{w}] \ast \phi_\varepsilon) (v^\varepsilon - v^{\varepsilon, n}) + (\abs{w} \ast \phi_\varepsilon)(v^\varepsilon - v^{\varepsilon, n}), \\ II & = & ([\abs{w + v^\varepsilon} - \abs{w^n + v^{\varepsilon, n}}] \ast \phi_\varepsilon) v^{\varepsilon, n} \\ & = & ([\abs{w + v^\varepsilon} - \abs{w + v^{\varepsilon, n}}] \ast \phi_\varepsilon) v^{\varepsilon,n} + ([\abs{w + v^{\varepsilon, n}} - \abs{w^n + v^{\varepsilon, n}}] \ast \phi_\varepsilon) v^{\varepsilon, n}, \\ III & = & ([\abs{w^n + v^\varepsilon} - \abs{w^n + v^{\varepsilon, n}}] \ast \phi_\varepsilon) w^n \text{ and} \\ IV & = & ([\abs{w + v^\varepsilon} - \abs{w}] \ast \phi_\varepsilon) w - ([\abs{w + v^\varepsilon} - \abs{w}] \ast \phi_\varepsilon) w^n \\ & & - ([\abs{w^n + v^\varepsilon} - \abs{w^n}] \ast \phi_\varepsilon) w^n + ([\abs{w + v^\varepsilon} - \abs{w}] \ast \phi_\varepsilon) w^n \\ & = & ([\abs{w + v^\varepsilon} - \abs{w}] \ast \phi_\varepsilon)(w - w^n) \\ & & + ([\abs{w + v^\varepsilon} - \abs{w} - \abs{w^n + v^\varepsilon} + \abs{w^n}] \ast \phi_\varepsilon) w^n, \end{eqnarray*} from which the effective powers are obvious, and put \begin{eqnarray*} \tilde{G}_1^\varepsilon(w, w^n, v^\varepsilon, v^{\varepsilon, n}) & \coloneqq & ([\abs{w + v^\varepsilon} - \abs{w}] \ast \phi_\varepsilon) (v^\varepsilon - v^{\varepsilon, n}) \\ & & + ([\abs{w + v^\varepsilon} - \abs{w + v^{\varepsilon, n}}] \ast \phi_\varepsilon) v^{\varepsilon, n}, \\ \tilde{G}_2^\varepsilon(w, w^n, v^\varepsilon, v^{\varepsilon, n}) & \coloneqq & (\abs{w} \ast \phi_\varepsilon)(v^\varepsilon - v^{\varepsilon, n}) + ([\abs{w^n + v^\varepsilon} - \abs{w^n + v^{\varepsilon, n}}] \ast \phi_\varepsilon) w^n \\ & & + ([\abs{w + v^{\varepsilon, n}} - \abs{w^n + v^{\varepsilon, n}}] \ast \phi_\varepsilon) v^{\varepsilon, n} \\ & & + ([\abs{w + v^\varepsilon} - \abs{w}] \ast \phi_\varepsilon)(w - w^n) \\ & & + ([\abs{w + v^\varepsilon} - \abs{w} - \abs{w^n + v^\varepsilon} + \abs{w^n}] \ast \phi_\varepsilon) w^n. \end{eqnarray*} Now, by the triangle inequality and the inhomogeneous Strichartz estimate, one has \begin{eqnarray*} & & \norm{\int_0^t e^{\mathrm{i} (t - \tau) \partial_{x}^{2}} \left[ G^\varepsilon(w, v^{\varepsilon, n}) - G^\varepsilon(w^n, v^{\varepsilon, n}) \right] \mathrm{d}{\tau}}_{L^\infty L^2} \\ & \leq & \norm{\int_0^t e^{\mathrm{i} (t - \tau) \partial_{x}^{2}} \tilde{G}_1^\varepsilon(w, w^n, v^\varepsilon, v^{\varepsilon, n}) \mathrm{d}{\tau}}_{L^\infty L^2} \\ & & + \norm{\int_0^t e^{\mathrm{i} (t - \tau) \partial_{x}^{2}} \tilde{G}_2^\varepsilon(w, w^n, v^\varepsilon, v^{\varepsilon, n}) \mathrm{d}{\tau}}_{L^\infty L^2} \\ & \lesssim & \norm{ \tilde{G}_1^\varepsilon(w, w^n, v^\varepsilon, v^{\varepsilon, n}) }_{L^\frac{4}{3}_t L^1_x} + \norm{ \tilde{G}_2^\varepsilon(w, w^n, v^\varepsilon, v^{\varepsilon, n}) }_{L^1_t L^2_x}. \end{eqnarray*} We begin by estimating the first summand above. In fact, we have \begin{eqnarray*} & & \norm{ ([\abs{w + v^\varepsilon} - \abs{w}] \ast \phi_\varepsilon) (v^\varepsilon - v^{\varepsilon, n}) }_{L^\frac{4}{3}_t L^1_x} \\ & \leq & \norm{t \mapsto \norm{ [\abs{w + v^\varepsilon} - \abs{w}] \ast \phi_\varepsilon}_{L^2_x} \norm{v^\varepsilon - v^{\varepsilon, n}}_{L^2_x} }_{\frac{4}{3}} \\ & \leq & \norm{t \mapsto \norm{v^\varepsilon}_{L^2_x} \norm{v^\varepsilon - v^{\varepsilon, n}}_{L^2_x} }_{\frac{4}{3}} \\ & \leq & T^{\frac{3}{4}} \norm{v^\varepsilon}_{L^\infty_t L^2_x} \norm{v^\varepsilon - v^{\varepsilon, n}}_{L^\infty_t L^2_x}. \end{eqnarray*} by the Cauchy-Schwarz, Young’s and the inverse triangle inequalities for the space variable and Hölder’s inequality for the time variable. Choosing $T$ sufficiently small shows that \begin{equation*} \norm{ ([\abs{w + v^\varepsilon} - \abs{w}] \ast \phi_\varepsilon) (v^\varepsilon - v^{\varepsilon, n}) }_{L^\frac{4}{3}_t L^1_x} \leq \iv{5} \norm{v^\varepsilon - v^{\varepsilon, n}}_{L^\infty_t L^2_x}. \end{equation*} For the second term in the definition of $\tilde{G}_1^\varepsilon$ the same techniques are applied which yield the bound \begin{equation*} \norm{ ([\abs{w + v^\varepsilon} - \abs{w + v^{\varepsilon, n}}] \ast \phi_\varepsilon) v^{\varepsilon,n}}_{L^\frac{4}{3}_t L^1_x} \leq T^{\frac{3}{4}} \norm{v^{\varepsilon, n}}_{L^\infty_t L^2_x} \norm{v^\varepsilon - v^{\varepsilon, n}}_{L^\infty_t L^2_x}. \end{equation*} By the proof of Theorem \ref{thm:lwp_modnls}, one has \begin{equation} \label{eqn:Linfty_L2_uniform_bound} \norm{v^{\varepsilon, n}}_{L^\infty_t L^2_x} \lesssim \norm{v_n}_2 \approx \norm{v_0}_2 \end{equation} and thus choosing $T$ sufficiently small again implies \begin{equation*} \norm{ ([\abs{w + v^\varepsilon} - \abs{w + v^{\varepsilon, n}}] \ast \phi_\varepsilon) v^{\varepsilon,n}}_{L^\frac{4}{3}_t L^1_x} \leq \iv{5} \norm{v^\varepsilon - v^{\varepsilon, n}}_{L^\infty_t L^2_x}. \end{equation*} The first term in the definition of $\tilde{G}_2^\varepsilon$ is treated similarly to the above. The same is true for the second term, where we additionally observe that \begin{equation} \label{eqn:Linfty_H1_uniform_bound} \sup_{n \in {\mathbb N}} \norm{w^n}_{C([0, T], H^1({\mathbb T}))} < \infty. \end{equation} For the third term, we have \begin{eqnarray*} & & \norm{([\abs{w + v^{\varepsilon, n}} - \abs{w^n + v^{\varepsilon, n}}] \ast \phi_\varepsilon) v^{\varepsilon, n}}_{L^1_t L^2_x} \\ & \leq & \norm{[\abs{w + v^{\varepsilon, n}} - \abs{w^n + v^{\varepsilon, n}}] \ast \phi_\varepsilon}_{L^\infty_t L^\infty_x} \norm{v^{\varepsilon, n}}_{L^\infty_t L^2_x} \\ & \leq & \norm{w - w^n}_{L^\infty_t L^\infty_x} \norm{v^{\varepsilon, n}}_{L^\infty_t L^2_x} \\ & \lesssim & \norm{w - w^n}_{L^\infty_t H^1_x({\mathbb T})} \xrightarrow{n \to \infty} 0, \end{eqnarray*} where the Cauchy-Schwarz inequality was used for the first estimate, the embedding $L^\infty_t \hookrightarrow L^1_t$, Young’s inequality and the inverse triangle inequality for the second estimate and the embedding $C([0,T], H^1({\mathbb T})) \hookrightarrow L^\infty_t L^\infty_x$ together with \eqref{eqn:Linfty_L2_uniform_bound} for the last estimate. By the same techniques, one obtains the convergence of the fourth term to zero.
Finally, for the last term in the definition of $\tilde{G}_2^\varepsilon$, one has \begin{eqnarray*} & & \norm{([\abs{w + v^\varepsilon} - \abs{w} - \abs{w^n + v^\varepsilon} + \abs{w^n}] \ast \phi_\varepsilon) w^n }_{L^1_t L^2_x} \\ & \leq & \norm{\abs{w + v^\varepsilon} - \abs{w} - \abs{w^n + v^\varepsilon} + \abs{w^n}}_{L^1_t L^2_x} \norm{w^n}_{L^\infty H^1_x({\mathbb T})} \\ & \lesssim & \norm{\abs{w + v^\varepsilon} - \abs{w} - \abs{w^n + v^\varepsilon} + \abs{w^n}}_{L^1_t L^2_x}, \end{eqnarray*} where Hölder’s inequality, the embedding $C([0,T], H^1({\mathbb T})) \hookrightarrow L^\infty_t L^\infty_x$ and Young’s inequality were used for the first estimate and \eqref{eqn:Linfty_H1_uniform_bound} for the second estimate. Observe that by the inverse triangle inequality, the bound \begin{equation*} \abs{\abs{w + v^\varepsilon} - \abs{w} - \abs{w^n + v^\varepsilon} + \abs{w^n}} \leq 2 \min \set{\abs{w - w^n}, \abs{v^\varepsilon}} \leq 2 \abs{v^\varepsilon} \end{equation*} holds pointwise (in $t$ and $x$). This implies that \begin{equation*} \abs{w + v^\varepsilon} - \abs{w} - \abs{w^n + v^\varepsilon} + \abs{w^n} \xrightarrow{n \to \infty} 0 \end{equation*} and hence, by the theorem of dominated convergence for the space variable, \begin{equation*} g_n(t) \coloneqq \norm{\abs{w + v^\varepsilon} - \abs{w} - \abs{w^n + v^\varepsilon} + \abs{w^n}}_{L^2_x} \xrightarrow{n \to \infty} 0 \qquad \forall t \in [0, T]. \end{equation*} Moreover, for all $t \in [0, T]$, we have $g_n(t) \leq 2 \norm{v^\varepsilon(\cdot, t)}_2$ and $\norm{v^\varepsilon}_{L^1_t L^2_x} \lesssim \norm{v^\varepsilon}_{L^\infty_t L^2_x} < \infty$. Hence, reapplying the theorem of dominated convergence for the time variable yields \begin{equation*} \norm{([\abs{w + v^\varepsilon} - \abs{w} - \abs{w^n + v^\varepsilon} + \abs{w^n}] \ast \phi_\varepsilon) w^n }_{L^1_t L^2_x} \xrightarrow{n \to \infty} 0 \end{equation*} as claimed. \end{proof}
Notice that \eqref{eqn:norm_estimate} together with the local well-posedness of NLS \eqref{eqn:cauchy_modnls} from Theorem \ref{thm:lwp_modnls} imply that NLS \eqref{eqn:cauchy_modnls} is globally well-posed, i.e. Theorem \ref{thm:mainthm2} is proved.
\begin{rem} \label{rem:failure_global} Observe, that in the case $\alpha \neq 2$, the proof would proceed roughly unchanged up to Equation \eqref{eqn:modsoln_bound}. However, we could replace the differential inequality \eqref{eqn:diff_ineq} by \begin{equation*} \iv{2} \frac{\mathrm{d}}{\mathrm{d}{t}} \norm{v^{\varepsilon, n}(\cdot, t)}_2^2 \lesssim \norm{w^n}_{L^\infty_t L^\infty_x}^{\alpha - 1} \norm{v^{\varepsilon, n}}_{L^2_x}^2 + \norm{w^n}_{L^\infty_t L^\infty_x} \norm{v^{\varepsilon, n} }_{L^{\alpha}_x}^\alpha \end{equation*} and this bound is not sufficient to exclude a blow-up of the $L^2$-norm. \end{rem}
\appendix \section{Quadratic and Subquadratic NLS on the torus} To prove global existence of solutions to the Cauchy problem of the quadratic and subquadratic nonlinear Schrödinger equation on ${\mathbb T}$ (that is \eqref{eqn:cauchy_nls_torus} with $\alpha \in [1, 2]$), we will employ the mass and energy conservation laws. The justification of conservation laws requires solutions which are differentiable in time. Again, time regularity will be obtained from regularity in space. To that end we will smoothen out the rough quadratic nonlinearity in such a way that the solutions of the resulting equation still admit suitable conservation laws. The regularization is slightly different from the one used in the proof of Theorem \ref{thm:mainthm2}. Let us mention that the ideas presented here are borrowed from \cite{ginibre1979} where the same problem was studied on ${\mathbb R}^d$, using a contraction argument and conservation laws. Since our setting is based on the torus, we have to work with Bourgain spaces. For the convenience of the reader, we present some of the arguments in detail.
Observe that, if $w$ is a sufficiently nice $2 \pi$-periodic function and $\varepsilon > 0$, then \begin{eqnarray*} (w \ast \phi_\varepsilon)(x) & = & \int_{-\infty}^\infty w(y) \phi_\varepsilon(x - y) \mathrm{d}{y} = \sum_{n \in {\mathbb Z}} \int_{(2n - 1) \pi}^{(2n + 1)\pi} w(y) \phi_\varepsilon(x - y) \mathrm{d}{y} \\ & = & \int_{-\pi}^{\pi} w(y) \sum_{n \in {\mathbb Z}} \phi_\varepsilon(x - y - 2n \pi) \mathrm{d}{y}. \end{eqnarray*} Hence, convolution of $w$ with $\phi_\varepsilon$ on ${\mathbb R}$ corresponds to convolution of $w$ with the periodization of $\phi_\varepsilon$ on ${\mathbb T}$. For the rest of the paper we will slightly abuse the notation and denote this periodization also by $\phi_\varepsilon$. In the same spirit we will use from now on $\ast$ to denote the convolution on ${\mathbb T}$.
The smooth version of \eqref{eqn:cauchy_nls_torus} for $\alpha \in [1, 2]$ reads as \begin{equation} \label{eqn:cauchy_nls_smooth_T} \left\{ \begin{IEEEeqnarraybox}[][c]{rCl} \mathrm{i} w_t (x, t) + \partial_x^2 w (x,t) \pm (\abs{w \ast \phi_\varepsilon}^{\alpha - 1}(w \ast \phi_\varepsilon)) \ast \phi_\varepsilon & = & 0 \qquad (x, t) \in {\mathbb T} \times {\mathbb R}, \\ w(\cdot, 0) & = & w_0 \ast \phi_\varepsilon \end{IEEEeqnarraybox} \right. \end{equation} and the corresponding Duhamel’s formula is (cf. \cite[Equations (2.14), (2.13), (2.11) and (1.15)]{ginibre1979}) \begin{equation} \label{eqn:duhamel_nls_smooth_T} w(\cdot, t) = e^{\mathrm{i} t \partial_{x}^{2}} (w_0 \ast \phi_\varepsilon) \pm \mathrm{i} \int_0^t e^{\mathrm{i} (t - \tau) \partial_{x}^{2}} \left[\left( \abs{w \ast \phi_\varepsilon}^{\alpha - 1} (w \ast \phi_\varepsilon)\right) \ast \phi_\varepsilon (\cdot, \tau)\right] \mathrm{d}{\tau}. \end{equation} From now on, we denote by $\mathcal{F}$ and $\mathcal{F}^{(-1)}$ the \emph{Fourier transform} and the inverse Fourier transform, on the torus, respectively. We use the symmetric choice of constants and write also \begin{eqnarray*} \hat{f}(\xi) & \coloneqq & (\mathcal{F} f)(\xi) = \iv{\sqrt{2 \pi}} \int_{-\pi}^\pi e^{-\mathrm{i} \xi \cdot x} f(x) \mathrm{d}{x}, \\ \check{g}(x) & \coloneqq & \left(\mathcal{F}^{(-1)} g\right)(x) = \iv{\sqrt{2 \pi}} \sum_{\xi \in {\mathbb Z}} e^{\mathrm{i} \xi \cdot x} g(\xi). \end{eqnarray*} One has $\mathcal{F} (f \ast g) = \sqrt{2 \pi} \hat{f} \hat{g}$. Furthermore, let $\jb{\xi} \coloneqq \sqrt{1 + \abs{\xi}^2}$ for any $\xi \in {\mathbb R}$ and $J^s w \coloneqq \mathcal{F}^{(-1)} \jb{\cdot}^s \mathcal{F} w$ for any $w \in (C^\infty({\mathbb T}))'$.
\subsection{Prerequisites} In this subsection, we present some technical results from the literature, needed for treatment of the quadratic nonlinearity. \begin{lem} \label{lem:transference} Let $p \in [1, \infty]$ and $\varepsilon \geq 0$. Then for any $w \in L^p({\mathbb T})$ one has \begin{equation*} \norm{w \ast \phi_\varepsilon}_{L^p({\mathbb T})} \leq \norm{w}_{L^p({\mathbb T})}. \end{equation*} \end{lem}
\begin{lem} \label{lem:ft_heat_one} Let $s \in {\mathbb R}$ and $w \in H^s({\mathbb T})$. Then \begin{equation*} \norm{w \ast \phi_\varepsilon}_{H^s({\mathbb T})} \leq \norm{w}_{H^s({\mathbb T})} \qquad \text{and} \qquad \norm{w \ast \phi_\varepsilon}_{\dot{H}^s({\mathbb T})} \leq \norm{w}_{\dot{H}^s({\mathbb T})} \qquad \forall \varepsilon \geq 0 \end{equation*} where we denote by $\dot{H}^s({\mathbb T})$ the homogeneous Sobolev norm on the torus. Furthermore, if $\varepsilon > 0$, then \begin{equation*} \norm{w \ast \phi_\varepsilon}_{H^s({\mathbb T})} \lesssim_{\varepsilon, s} \norm{w}_{L^2({\mathbb T})}. \end{equation*} \end{lem}
\begin{lem} \label{lem:app_BA} (Cf. \cite[Theorem 3.16]{brezis2011}.) Let $w^n \xrightarrow{n \to \infty} w$ in $L^2({\mathbb T})$ and assume that $\sup_{n \in {\mathbb N}} \norm{w^n}_{H^1({\mathbb T})} < \infty$. Then $w \in H^1({\mathbb T})$ and \begin{equation} \label{eqn:h1_bnd} \norm{w}_{H^1({\mathbb T})} \leq \liminf_{n \to \infty} \norm{w^n}_{H^1({\mathbb T})}, \qquad \norm{w}_{\dot{H}^1({\mathbb T})} \leq \liminf_{n \to \infty} \norm{w^n}_{\dot{H}^1({\mathbb T})}, \end{equation} and $w^n \rightharpoonup w$ in $H^1({\mathbb T})$, i.e. for any $u \in H^1({\mathbb T})$ one has \begin{equation} \label{eqn:weak_conv} \lim_{n \to \infty} \dup{w^n}{u}_{H^1({\mathbb T})} = \lim_{n \to \infty} \sum_{k \in {\mathbb Z}} \jb{k}^2 \overline{\hat{w}^n_k} \hat{u}_k = \dup{w}{u}_{H^1({\mathbb T})}. \end{equation} If additionally $\norm{w^n}_{H^1} \xrightarrow{n \to \infty} \norm{w}_{H^1}$, then $w^n \xrightarrow{n \to \infty} w$ in $H^1({\mathbb T})$. \end{lem}
In the following we are going to use the $X^{s,b}$ spaces on the torus where $s, b\in{\mathbb R}$. They are defined via the norm (see equation (3.49) in \cite{erdogan2016}) \begin{equation}
\|w\|_{X^{s,b}}=\|\langle k\rangle ^{s}\langle \tau+k^{2}\rangle ^{b}\hat{w}(\tau,k)\|_{L^{2}_{\tau}l^{2}_{k}}. \end{equation}
\begin{lem}[$X^{0, \frac{3}{8}} \hookrightarrow L^4({\mathbb T} \times {\mathbb R})$] (See \cite[Proposition 2.13]{tao2006}.) We have \label{lem:bourgain_lebesgue_embedding} \begin{equation*} \norm{w}_{L^4({\mathbb T} \times {\mathbb R})} \lesssim \norm{w}_{X^{0, \frac{3}{8}}} \end{equation*} for any $w \in \mathcal{S}({\mathbb R}, C^\infty({\mathbb T}))$. \end{lem}
\begin{lem}[$X_\delta^{s, b} \hookrightarrow C(H^s)$] \label{lem:bs_sobolev_embedding} (Cf. \cite[Lemma 3.9]{erdogan2016}.) Let $b > \iv{2}$ and $s \in {\mathbb R}$. \todo{On what does the implicit constant depend?}{Then} \begin{equation*} \norm{w}_{C([0, \delta], H^s({\mathbb T}))} \lesssim \norm{w}_{X_\delta^{s, b}}. \end{equation*} \end{lem}
\begin{lem}[Linear Schrödinger evolution in $X_\delta^{s, b}$] \label{lem:bs_linear_estimate} (Cf. \cite[Lemma 3.10]{erdogan2016}.) Let $b, s \in {\mathbb R}$, $\delta \in (0, 1]$ and $\eta$ a smooth cut-off in time. Then \begin{equation*} \norm{\eta(t) e^{\mathrm{i} t \partial_{x}^{2}} w_0}_{X_{\delta}^{s, b}} \lesssim \norm{w_0}_{H^s({\mathbb T})} \qquad \forall w_0 \in H^s({\mathbb T}). \end{equation*} \end{lem}
\begin{lem}[Treating the integral term in $X_\delta^{s, b}$] \label{lem:bs_integral_estimate} (Cf. \cite[Lemma 3.12]{erdogan2016}.) Let $b \in \left(\iv{2}, 1 \right]$, $s \in {\mathbb R}$ and $\delta \leq 1$. Set $b' \coloneqq b - 1$. Then \begin{equation*} \norm{\int_0^t e^{\mathrm{i} (t - \tau) \partial_{x}^{2}} F(\tau) \mathrm{d}{\tau} }_{X_\delta^{s, b}} \lesssim_b \norm{F}_{X_\delta^{s, b'}} \qquad \forall F \in X_\delta^{s, b'}. \end{equation*} \end{lem}
\begin{lem}[Changing $b$ in $X_\delta^{s, b}$] \label{lem:bs_change_b} (Cf. \cite[Lemma 3.11]{erdogan2016}.) Let $b, b' \in \left(-\iv{2}, \iv{2} \right)$ with $b' < b$, $s \in {\mathbb R}$ and $\delta \in (0, 1]$. Then \begin{equation*} \norm{w}_{X_\delta^{s, b'}} \lesssim \delta^{b - b'} \norm{w}_{X_\delta^{s, b}} \qquad \forall w. \end{equation*} \end{lem} The next proposition appears in \cite{erdogan2016} for the case of the cubic nonlinearity and $\varepsilon = 0$. Since we need the corresponding result for (sub)quadratic nonlinearities which are more complicated than the algebraic cubic nonlinearity, we present the proof, too.
\begin{prop}[Control of the nonlinearity in $X_\delta^{s, b}$] \label{prop:bs_nonlinearity_control} (Cf. \cite[Proposition 3.26]{erdogan2016}.) Let $s \geq 0$ and $\varepsilon > 0$ or $\varepsilon = s = 0$. Then, for all $w_1, w_2$ we have \begin{eqnarray*} & & \norm{ \left(\abs{w_1 \ast \phi_\varepsilon}^{\alpha - 1} (w_1 \ast \phi_\varepsilon)\right) \ast \phi_\varepsilon - \left(\abs{w_2 \ast \phi_\varepsilon}^{\alpha - 1} (w_2 \ast \phi_\varepsilon)\right) \ast \phi_\varepsilon }_{X_\delta^{s, -\frac{3}{8}}} \\ & \lesssim_{\varepsilon, s} & \left(\norm{w_1}^{\alpha - 1}_{X_\delta^{0, \frac{3}{8}}} + \norm{w_2}^{\alpha - 1}_{X_\delta^{0, \frac{3}{8}}}\right) \left(\norm{w_1 - w_2}_{X_\delta^{0, \frac{3}{8}}} \right). \end{eqnarray*} \end{prop} \begin{proof} Fix $w_1, w_2$. Then, by Plancherel theorem and duality in $L^2({\mathbb R} \times {\mathbb T})$, one has \begin{eqnarray*} & & \norm{ \left(\abs{w_1 \ast \phi_\varepsilon}^{\alpha - 1} (w_1 \ast \phi_\varepsilon)\right) \ast \phi_\varepsilon - \left(\abs{w_2 \ast \phi_\varepsilon}^{\alpha - 1} (w_2 \ast \phi_\varepsilon)\right) \ast \phi_\varepsilon }_{X_\delta^{s, -\frac{3}{8}}} \\ & = & \sup_{\norm{w}_{X_\delta^{-s, \frac{3}{8}}} = 1} \abs{\dup{ \left(\abs{w_1 \ast \phi_\varepsilon}^{\alpha - 1} (w_1 \ast \phi_\varepsilon) - \abs{w_2 \ast \phi_\varepsilon}^{\alpha - 1} (w_2 \ast \phi_\varepsilon)\right) \ast \phi_\varepsilon}{w}_{L^2({\mathbb R} \times {\mathbb T})}}. \end{eqnarray*} Fix any $w \in X_\delta^{-s, \frac{3}{8}}$ with $\norm{w}_{X_\delta^{-s, \frac{3}{8}}} = 1$. Then \begin{eqnarray*} & & \abs{\dup{ \left(\abs{w_1 \ast \phi_\varepsilon}^{\alpha - 1} (w_1 \ast \phi_\varepsilon) - \abs{w_2 \ast \phi_\varepsilon}^{\alpha - 1} (w_2 \ast \phi_\varepsilon)\right) \ast \phi_\varepsilon }{w}_{L^2({\mathbb R} \times {\mathbb T})}} \\ & = & \abs{\dup{J^s \left[ \left(\abs{w_1 \ast \phi_\varepsilon}^{\alpha - 1} (w_1 \ast \phi_\varepsilon) - \abs{w_2 \ast \phi_\varepsilon}^{\alpha - 1} (w_2 \ast \phi_\varepsilon)\right) \ast \phi_\varepsilon \right]}{J^{-s} w}_{L^2({\mathbb R} \times {\mathbb T})}} \\ & \leq & \norm{ \left(\abs{w_1 \ast \phi_\varepsilon}^{\alpha - 1} (w_1 \ast \phi_\varepsilon) - \abs{w_2 \ast \phi_\varepsilon}^{\alpha - 1} (w_2 \ast \phi_\varepsilon)\right) \ast (J^s \phi_\varepsilon) }_{L^\frac{4}{3}({\mathbb R} \times {\mathbb T})} \\ & & \cdot \norm{J^{-s} w}_{L^4({\mathbb R} \times {\mathbb T})} \\ & \lesssim_{\varepsilon, s} & \norm{\abs{w_1 \ast \phi_\varepsilon}^{\alpha - 1} (w_1 \ast \phi_\varepsilon) - \abs{w_2 \ast \phi_\varepsilon}^{\alpha - 1} (w_2 \ast \phi_\varepsilon) }_{L^\frac{4}{3}({\mathbb R} \times {\mathbb T})} \underbrace{\norm{J^{-s} w}_{X_\delta^{0, \frac{3}{8}}} }_{= \norm{w}_{X_\delta^{-s, \frac{3}{8}}} = 1} \\ & \leq & \norm{ \abs{w_1 \ast \phi_\varepsilon}^{\alpha - 1} (w_1 \ast \phi_\varepsilon) - \abs{w_2 \ast \phi_\varepsilon}^{\alpha - 1} (w_2 \ast \phi_\varepsilon) }_{L^\frac{4}{3}({\mathbb R} \times {\mathbb T})} \\ & \leq & \norm{\left( \abs{w_1 \ast \phi_\varepsilon}^{\alpha - 1} + \abs{w_1 \ast \phi_\varepsilon}^{\alpha - 1} \right) ((w_1 - w_2) \ast \phi_\varepsilon) }_{L^\frac{4}{3}({\mathbb R} \times {\mathbb T})}, \end{eqnarray*} where, for the first estimate, we used Hölder’s inequality and Young’s inequality, Lemma \ref{lem:bourgain_lebesgue_embedding} for the second and the size estimate \eqref{eqn:size_estimate} for the last inequality. Applying Hölder’s inequality again yields the upper bound \begin{equation*} \left(\norm{ \abs{w_1 \ast \phi_\varepsilon}^{\alpha - 1}}_{L^4({\mathbb R} \times {\mathbb T})} + \norm{ \abs{w_1 \ast \phi_\varepsilon}^{\alpha - 1}}_{L^4({\mathbb R} \times {\mathbb T})} \right) \norm{ (w_1 - w_2) \ast \phi_\varepsilon }_{L^2({\mathbb R} \times {\mathbb T})}. \end{equation*} For the first factor, we apply Hölder’s and Young’s inequalities as well as the embedding from Lemma \ref{lem:bourgain_lebesgue_embedding} and arrive at the upper bound of \begin{equation*} \norm{w_1}_{L^4({\mathbb R} \times {\mathbb T})}^{\alpha - 1} + \norm{w_2}_{L^4({\mathbb R} \times {\mathbb T})}^{\alpha - 1} \lesssim \norm{w_1}_{X^{0, \frac{3}{8}}}^{\alpha - 1} + \norm{w_2}_{X^{0, \frac{3}{8}}}^{\alpha - 1}. \end{equation*} For the second factor we use Young’s inequality and the definition of the norm in $X^{0, \frac{3}{8}}$ to arrive at the final estimate \begin{equation*} \Big(\norm{w_1}_{X^{0, \frac{3}{8}}}^{\alpha - 1} + \norm{w_2}_{X^{0, \frac{3}{8}}}^{\alpha - 1}\Big)\norm{w_1 - w_2}_{X^{0, \frac{3}{8}}}. \end{equation*} \end{proof}
\subsection{Results} First, we consider local wellposedness: \begin{thm} \label{thm:lwp_torus} (Cf. \cite[Theorem 3.27]{erdogan2016} for the cubic NLS.) Let $\varepsilon > 0$ and $s \geq 0$ or $\varepsilon = s = 0$. Then the (smoothened) (sub)quadratic NLS \eqref{eqn:cauchy_nls_smooth_T} is \emph{locally} well-posed in $H^s({\mathbb T})$. \end{thm}
\begin{proof} It suffices to show that the right-hand side of \eqref{eqn:duhamel_nls_smooth_T} defines a contractive self-mapping $\mathcal{T}: M(R, \delta) \to M(R, \delta)$ for some $R, \delta > 0$, where \begin{equation*} M(R, \delta) \coloneqq
\set{w \in Y \big| \, \norm{w}_Y \leq R} \end{equation*} and $Y$ is a suitable subspace of $C([0, \delta], H^s({\mathbb T}))$.
We consider the case $s \geq 1$ first. Put $Y = C([0, \delta], H^s({\mathbb T}))$. Due to $e^{\mathrm{i} t \partial_{x}^{2}}$ being an isometry on $H^s({\mathbb T})$, for any $t \in {\mathbb R}$, and Lemma \ref{lem:ft_heat_one} we have \begin{eqnarray*} & & \norm{\mathcal{T} w}_Y \\ & \leq & \norm{e^{\mathrm{i} t \partial_{x}^{2}} (w_0 \ast \phi_\varepsilon)}_{H^s({\mathbb T})} + \norm{\int_0^t e^{\mathrm{i} (t - \tau) \partial_{x}^{2}} \left[ \left(\abs{w \ast \phi_\varepsilon}^{\alpha - 1} (w \ast \phi_\varepsilon)\right) \ast \phi_\varepsilon \right] \mathrm{d}{\tau}}_Y \\ & \leq & \norm{w_0}_{H^s({\mathbb T})} + \delta \norm{(\abs{w \ast \phi_\varepsilon}^{\alpha - 1} (w \ast \phi_\varepsilon)) \ast \phi_\varepsilon}_Y. \end{eqnarray*} This suggests the choice $R \approx \norm{w_0}_{H^s}$. Fix $\tau \in [0, \delta]$. Then, due to Lemma \ref{lem:ft_heat_one} and the embedding $H^s({\mathbb T}) \hookrightarrow L^\infty({\mathbb T})$, we have that \begin{eqnarray*} & & \norm{((\abs{w \ast \phi_\varepsilon}^{\alpha - 1} (w \ast \phi_\varepsilon)) \ast \phi_\varepsilon)(\cdot, \tau) }_{H^s({\mathbb T})} \\ & \lesssim_{\varepsilon, s} & \norm{(\abs{w \ast \phi_\varepsilon}^{\alpha - 1} (w \ast \phi_\varepsilon))(\cdot, \tau)}_{L^2({\mathbb T})} \\ & \leq & \norm{(w \ast \phi_\varepsilon)(\cdot, \tau) }_{L^\infty({\mathbb T})}^{\alpha - 1} \norm{(w \ast \phi_\varepsilon)(\cdot, \tau)}_{L^2({\mathbb T})} \\ & \lesssim & R^\alpha. \end{eqnarray*} By the above, the condition $\norm{\mathcal{T} w}_Y \leq R$ is satisfied, if $\delta \lesssim_{\varepsilon, s} R^{1 - \alpha}$. The contraction property of $\mathcal{T}$ is shown in the same way, possibly requiring a smaller implicit constant in the last inequality.
In the case $s \in [0, 1)$ and $\varepsilon > 0$, consider any $b \in \left(\iv{2}, \frac{5}{8} \right)$ and put $Y = X_{\delta}^{s, b}$ (by Lemma \ref{lem:bs_sobolev_embedding} one indeed has $Y \hookrightarrow C([0, \delta], H^s({\mathbb T}))$). Then, by the triangle inequality and Lemmata \ref{lem:bs_linear_estimate} and \ref{lem:bs_integral_estimate} we have \begin{eqnarray*} & & \norm{\mathcal{T} w}_{X_{\delta}^{s, b}} \\ & \leq & \norm{e^{\mathrm{i} t \partial_{x}^{2}} (w_0 \ast \phi_\varepsilon)}_{X_{\delta}^{s, b}} + \norm{\int_0^t e^{\mathrm{i} (t - \tau) \partial_x^2} \left[ \left( \abs{w \ast \phi_\varepsilon}^{\alpha - 1} (w \ast \phi_\varepsilon) \right) \ast \phi_\varepsilon \right] \mathrm{d}{\tau}}_{X_{\delta}^{s, b}} \\ & \lesssim & \norm{w_0}_{H^s({\mathbb T})} + \norm{\left(\abs{w \ast \phi_\varepsilon}^{\alpha - 1} (w \ast \phi_\varepsilon)\right) \ast \phi_\varepsilon}_{X_\delta^{s, b - 1}}. \end{eqnarray*} This estimate suggests $R \approx \norm{w_0}_{H^s({\mathbb T})}$. For the second summand, apply Lemma \ref{lem:bs_change_b} and Proposition \ref{prop:bs_nonlinearity_control} (with $w = 0$) to obtain the upper bound \begin{eqnarray} \nonumber \norm{ \left(\abs{w \ast \phi_\varepsilon}^{\alpha - 1} (w \ast \phi_\varepsilon)\right) \ast \phi_\varepsilon}_{X_\delta^{s, b - 1}} & \lesssim & \delta^{1 - b - \frac{3}{8}} \norm{ \left(\abs{w \ast \phi_\varepsilon}^{\alpha - 1} (w \ast \phi_\varepsilon)\right) \ast \phi_\varepsilon}_{X_\delta^{s, -\frac{3}{8}}} \\ \label{eqn:bourgain_lebesgue_control} & \lesssim & \delta^{1 - b - \frac{3}{8}} \norm{w}_{X_\delta^{0, \frac{3}{8}}}^{\alpha - 1} \norm{w}_{X_\delta^{s, \frac{3}{8}}} \\ \nonumber & \leq & \delta^{1 - b - \frac{3}{8}} \norm{w}_{X_\delta^{s, \frac{3}{8}}}^\alpha \leq \delta^{1 - b - \frac{3}{8}} R^\alpha. \end{eqnarray} As the exponent of $\delta$ is positive, we can choose $\delta$ small enough to make $\mathcal{T}$ a self-mapping of $M(R, \delta)$. The fact that $\mathcal{T}$ is contractive is proven similarly, possibly requiring a smaller $\delta$.
The remaining case $\varepsilon = s = 0$ is treated exactly as the last case. \end{proof}
In order to prove the conservation laws, we need to be able to approximate by smooth solutions. \begin{lem}[Smooth solutions for smooth initial data] \label{lem:smoothness_smooth_nls_T} (Cf. \cite[Proposition 3.11]{tao2006}.) Let $\varepsilon > 0$, and $w_0 \in L^2({\mathbb T})$ and let $w$ denote the unique solution of \eqref{eqn:duhamel_nls_smooth_T}. Then $w \in C([0, \delta], H^\infty({\mathbb R}))$ and for any $s > \iv{2}$ one has \begin{equation} \label{eqn:gronwall_nls_smooth} \norm{w}_{L_t^\infty H_x^s} \leq C \norm{w_0}_{L^2} e^{Ct \norm{w}^{\alpha - 1}_{L_t^\infty H^1}} \end{equation} for some $C = C(\varepsilon, s) > 0$. \end{lem} \begin{proof} As $w$ is the solution to \eqref{eqn:duhamel_nls_smooth_T}, one immediately has \begin{eqnarray*} \norm{w(\cdot, t)}_{H^s} & \leq & \norm{w_0 \ast \phi_\varepsilon}_{H^s} + \int_0^t \norm{(\abs{w \ast \phi_\varepsilon}^{\alpha - 1} (w \ast \phi_\varepsilon)) \ast \phi_\varepsilon}_{H^s} \mathrm{d}{\tau} \\ & \lesssim_{\varepsilon, s} & \norm{w_0}_{L^2} + \int_0^t \norm{\abs{w \ast \phi_\varepsilon}^{\alpha - 1} (w \ast \phi_\varepsilon)}_{L^2} \mathrm{d}{\tau} \\ & \leq & \norm{w_0}_{L^2} + \norm{w \ast \phi_\varepsilon}_{L^\infty_t L^\infty_x}^{\alpha - 1} \int_0^t \norm{w \ast \phi_\varepsilon}_{L^2} \mathrm{d}{\tau} \\ & \lesssim & \norm{w_0}_{L^2} + \norm{w}_{L^\infty_t H^1}^{\alpha - 1} \int_0^t \norm{w}_{H^s} \mathrm{d}{\tau}. \end{eqnarray*} Now \eqref{eqn:gronwall_nls_smooth} follows from Lemma \ref{lem:gronwall_integral}. \end{proof}
\begin{thm} \label{thm:nls_smooth_global_hs} Let $\varepsilon > 0$ and $s \in [1, \infty)$. Then the smoothened NLS \eqref{eqn:cauchy_nls_smooth_T} is \emph{globally} well-posed in $H^s({\mathbb T})$. \end{thm} \begin{proof} Local well-posedness has already been shown in Theorem \ref{thm:lwp_torus} and it remains to show that the solution $w$ exists globally. By the blow-up alternative, it suffices to see that $\norm{w(\cdot, t)}_{H^s({\mathbb T})}$ cannot explode. Moreover, by Lemma \ref{lem:smoothness_smooth_nls_T} it suffices to consider $s = 1$. By the same lemma, one has that $w \in C([0, \delta], H^\infty({\mathbb T}))$ and in particular, $w \in C^1([0, \delta], H^1({\mathbb T}))$. Hence, the energy conservation (cf. \cite[Equations (3.14) and (1.18)]{ginibre1979}) \begin{equation} \label{eqn:energy} E_\varepsilon(w(\cdot, t)) \coloneqq \int_{{\mathbb T}} \iv{2} \abs{\nabla w(x, t)}^2 \mp \iv{\alpha + 1} \abs{(w \ast \phi_\varepsilon) (x, t)}^{\alpha + 1} \mathrm{d}{x} = E_\varepsilon(w_0 \ast \phi_\varepsilon) \end{equation} is applicable to $w$. But \begin{equation} \label{eqn:homogeneous_h1_energy} \norm{w(\cdot, t)}_{H^1({\mathbb T})}^2 = \norm{w_0}_2^2 + 2 E_\varepsilon(w_0 \ast \phi_\varepsilon) \pm \frac{2}{\alpha + 1} \norm{w(\cdot, t)}_{\alpha + 1}^{\alpha + 1} \end{equation} and so $\norm{w(\cdot, t)}_{\dot{H}^1({\mathbb T})}$ is controlled by $E_\varepsilon(w_0 \ast \phi_\varepsilon)$ in the defocusing case. In the focusing case we can assume w.l.o.g. that $\norm{w(\cdot, t)}_{\dot{H}^1({\mathbb T})}^2$ is an unbounded function of $t$, (otherwise, there is nothing to show) and say that $\norm{w(\cdot, t)}_{\dot{H}^1({\mathbb T})}^2$ is large. Then, by the Gagliardo-Nirenberg inequality from \cite[Chapter 8, Eqn. (42)] {brezis2011}, we have \begin{equation} \label{eqn:app_gn} \norm{w(\cdot, t)}_{\alpha + 1}^{\alpha + 1} \lesssim \norm{w(\cdot, t)}_2^{\frac{\alpha + 3}{2}} \norm{w(\cdot, t)}_{H^1({\mathbb T})}^{\frac{\alpha - 1}{2}} \leq \iv{2} \norm{w(\cdot, t)}_{H^1({\mathbb T})}^2, \end{equation} where above we additionally used the mass conservation \begin{equation*} \norm{w(\cdot, t)}_{L^2({\mathbb T})} = \norm{w(\cdot, 0)}_{L^2({\mathbb T})}. \end{equation*} Hence, inserting \eqref{eqn:app_gn} into \eqref{eqn:homogeneous_h1_energy} and rearranging the inequality shows that the quantity $\norm{w(\cdot, t)}_{H^1({\mathbb T})}^2$ is bounded, in contradiction to the assumption. This completes the proof. \end{proof}
\begin{thm} \label{thm:gwp_nls_T_l2} (Cf. \cite[Theorem 3.28]{erdogan2016} for the cubic NLS.) The Cauchy problem for the (sub)quadratic periodic NLS (\eqref{eqn:cauchy_nls_torus} with $\alpha \in [1, 2]$) is \emph{globally} well-posed in $L^2({\mathbb T})$ and the solution $w$ enjoys mass conservation $\norm{w(\cdot, t)}_{L^2({\mathbb T})} = \norm{w_0}_{L^2({\mathbb T})}$. \end{thm} \begin{proof} Local well-posedness has already been shown in Theorem \ref{thm:lwp_torus}. Let $w$ denote this local solution. By the blow-up alternative, it suffices to show mass conservation. To that end, let us denote by $w^\varepsilon$ the global solution of \eqref{eqn:cauchy_nls_smooth_T} for $\varepsilon > 0$ from Theorem \ref{thm:nls_smooth_global_hs}. We will show that for any $b \in \left(\iv{2}, \frac{5}{8}\right)$ one has $\norm{w^\varepsilon - w}_{X_\delta^{0, b}} \to 0$ as $\varepsilon \to 0+$. To that end, notice that \begin{eqnarray} \label{eqn:l2_conv} & & \norm{w^\varepsilon - w}_{X_\delta^{0, b}} \\ \nonumber & \leq & \norm{e^{\mathrm{i} t \partial_{x}^{2}} (w_0 \ast \phi_\varepsilon - w_0) }_{X_\delta^{0, b}} \\ & & + \norm{\int_0^t e^{\mathrm{i} (t - \tau) \partial_x^2} \left[ (\abs{w^\varepsilon \ast \phi_\varepsilon}^{\alpha - 1} (w^\varepsilon \ast \phi_\varepsilon)) \ast \phi_\varepsilon - \abs{w}^{\alpha - 1}w \right] \mathrm{d}{\tau} }_{X_\delta^{0, b}} \\ \nonumber & \lesssim & \norm{w_0 \ast \phi_\varepsilon - w_0}_{L^2({\mathbb T})} + \norm{ (\abs{w^\varepsilon \ast \phi_\varepsilon}^{\alpha - 1} (w^\varepsilon \ast \phi_\varepsilon)) \ast \phi_\varepsilon - \abs{w}^{\alpha - 1}w }_{X_\delta^{0, b - 1}} \\ \nonumber & \lesssim & \norm{w_0 \ast \phi_\varepsilon - w_0}_{L^2({\mathbb T})} + \delta^{1 - b} \norm{ (\abs{w^\varepsilon \ast \phi_\varepsilon}^{\alpha - 1} (w^\varepsilon \ast \phi_\varepsilon)) \ast \phi_\varepsilon - \abs{w}^{\alpha - 1}w }_{X_\delta^{0, 0}}, \end{eqnarray} where we used the fact that $w$ and $w^\varepsilon$ solve the corresponding fixed-point equations and Lemmata \ref{lem:bs_linear_estimate}, \ref{lem:bs_integral_estimate} and \ref{lem:bs_change_b}.
For the first summand, observe that \begin{equation*} \norm{w_0 \ast \phi_\varepsilon - w_0}_{L^2({\mathbb T})} = \norm{ \left(\jb{k}^s\hat{w}_0(k) (\sqrt{2 \pi} \hat{\phi}_\varepsilon(k) - 1) \right)_k }_{l^2({\mathbb Z})} \end{equation*} and the right-hand side above converges to $0$ as $\varepsilon \rightarrow 0+$ by the dominated convergence theorem and the definition of $\phi_\varepsilon$.
For the second summand, note that $X_\delta^{0, 0} = L^2([0, \delta] \times {\mathbb T})$ and hence \begin{eqnarray*} & & \norm{ (\abs{w^\varepsilon \ast \phi_\varepsilon}^{\alpha - 1} (w^\varepsilon \ast \phi_\varepsilon)) \ast \phi_\varepsilon - \abs{w}^{\alpha - 1}w }_{L^2([0, \delta] \times {\mathbb T})} \\ & \leq & \norm{ (\abs{w}^{\alpha - 1}w) \ast \phi_\varepsilon - \abs{w}^{\alpha - 1}w }_{L^2([0, \delta] \times {\mathbb T})} \\ & & + \norm{ (\abs{w^\varepsilon \ast \phi_\varepsilon}^{\alpha - 1} (w^\varepsilon \ast \phi_\varepsilon) - \abs{w}^{\alpha - 1}w) \ast \phi_\varepsilon }_{L^2([0, \delta] \times {\mathbb T})}. \end{eqnarray*} The first summand above goes to zero due to $(\phi_\varepsilon)_\varepsilon$ being an approximation to the identity on $L^{2 \alpha}({\mathbb T})$. The other summand is further estimated by \begin{eqnarray} \nonumber & & \norm{ (\abs{w^\varepsilon \ast \phi_\varepsilon}^{\alpha - 1} (w^\varepsilon \ast \phi_\varepsilon) - \abs{w}^{\alpha - 1}w) \ast \phi_\varepsilon }_{L^2([0, \delta] \times {\mathbb T})} \\ \nonumber & \leq & \norm{ \abs{w^\varepsilon \ast \phi_\varepsilon}^{\alpha - 1} (w^\varepsilon \ast \phi_\varepsilon) - \abs{w}^{\alpha - 1}w }_{L^2([0, \delta] \times {\mathbb T})} \\ \nonumber & \lesssim & \norm{\left(\abs{w^\varepsilon \ast \phi_\varepsilon}^{\alpha - 1} + \abs{w}^{\alpha - 1} \right) (w^\varepsilon \ast \phi_\varepsilon - w) }_{L^2([0, \delta] \times {\mathbb T})} \\ \nonumber & \leq & \norm{\left(\abs{w^\varepsilon \ast \phi_\varepsilon}^{\alpha - 1} + \abs{w}^{\alpha - 1} \right) [(w^\varepsilon - w) \ast \phi_\varepsilon] }_{L^2([0, \delta] \times {\mathbb T})} \\ & & \label{eqn:remaining_term} + \norm{\left(\abs{w^\varepsilon \ast \phi_\varepsilon}^{\alpha - 1} + \abs{w}^{\alpha - 1} \right) (w \ast \phi_\varepsilon - w) }_{L^2([0, \delta] \times {\mathbb T})}. \end{eqnarray} Let us introduce the set $A^\varepsilon = \set{\abs{w^\varepsilon} \ast \phi_\varepsilon \leq 1}$. Then the first summand above is further estimated by \begin{eqnarray*} & & \norm{\left(\abs{w^\varepsilon \ast \phi_\varepsilon}^{\alpha - 1} + \abs{w}^{\alpha - 1} \right) [(w^\varepsilon - w) \ast \phi_\varepsilon] }_{L^2([0, \delta] \times {\mathbb T})} \\ & \leq & \norm{ \left(\mathbbmss{1}_{A^\varepsilon} \abs{w^\varepsilon \ast \phi_\varepsilon}^{\alpha - 1} + \mathbbmss{1}_{A^0} \abs{w}^{\alpha - 1} \right) [(w^\varepsilon - w) \ast \phi_\varepsilon] }_{L^2([0, \delta] \times {\mathbb T})} \\ & & + \norm{ \left(\mathbbmss{1}_{(A^\varepsilon)^c} \abs{w^\varepsilon \ast \phi_\varepsilon}^{\alpha - 1} + \mathbbmss{1}_{(A^0)^c} \abs{w}^{\alpha - 1} \right) [(w^\varepsilon - w) \ast \phi_\varepsilon] }_{L^2([0, \delta] \times {\mathbb T})} \\ & \leq & \norm{w^\varepsilon - w}_{L^2([0, \delta] \times {\mathbb T})} + \norm{w}_{L^4([0, \delta] \times {\mathbb T})} \norm{w^\varepsilon - w}_{L^4([0, \delta] \times {\mathbb T})} \\ & \lesssim & \left(1 + \norm{w}_{X_\delta^{0, b}} \right) \norm{w^\varepsilon - w}_{X_\delta^{0, b}}, \end{eqnarray*} where we used Hölder’s and Young’s inequalities for the penultimate estimate and Lemma \ref{lem:bourgain_lebesgue_embedding} for the last step. Recall that in front of this term is $\delta^{1 - b}$ and, w.l.o.g., $\delta \ll 1$. Hence we can just move it to the left-hand side of \eqref{eqn:l2_conv}. The treatment of the last remaining term \eqref{eqn:remaining_term} does not require any new techniques.
By the above, $\norm{w^\varepsilon - w}_{X_\delta^{0, b}} \to 0$ as $\varepsilon \to 0+$. Applying Lemma \ref{lem:bs_sobolev_embedding}, we see that \begin{eqnarray*} \norm{w}_{C([0, T], L^2({\mathbb T}))} & \leq & \limsup_{\varepsilon \to 0+} \left[ \norm{w^\varepsilon - w}_{X_\delta^{0, b}} + \norm{w^\varepsilon}_{C([0, T], L^2({\mathbb T}))} \right] \\ & \leq & \limsup_{\varepsilon \to 0+} \left[ \norm{w_0 \ast \phi_\varepsilon}_{L^2({\mathbb T})} \right] = \norm{w_0}_{L^2({\mathbb T})} \end{eqnarray*} and hence the solution $w$ indeed enjoys mass conservation. This finishes the proof. \end{proof} In addition to mass conservation, we also have conservation of the energy. \begin{thm} \label{thm:gwp_cauchy_quadratic_nls_torus} (Cf. \cite[Theorem 3.1]{ginibre1979} and \cite[Theorem 2.1]{lebowitz1988}.) The Cauchy problem for the (sub)quadratic periodic NLS (\eqref{eqn:cauchy_nls_torus} with $\alpha \in [1, 2]$) is \emph{globally} well-posed in $H^1({\mathbb T})$ and the solution $w$ enjoys energy conservation $E(w(\cdot, t)) = E(w_0)$. \end{thm} \begin{rem} In \cite{lebowitz1988} it is claimed that the quadratic NLS is globally well-posed on the torus. They refer to \cite{ginibre1979}, where it is done on the real line. While our proof of Theorem \ref{thm:gwp_cauchy_quadratic_nls_torus} borrows some ideas from \cite{ginibre1979}, we believe that in order to be able to do the torus case, one needs the result of Bourgain \cite{bourgain1993a}, in particular, the Bourgain spaces, which appeared 5 years after \cite{lebowitz1988}. \end{rem} \begin{proof} Let $w_0 \in H^1({\mathbb T}) \subseteq L^2({\mathbb T})$. By Theorem \ref{thm:gwp_nls_T_l2}, the (sub)quadratic periodic NLS has the unique global solution $w \in C_{\text{b}}({\mathbb R}, L^2({\mathbb T}))$. It remains to show that $w \in C_{\text{b}}({\mathbb R}, H^1({\mathbb T}))$. To show that for any $t \in {\mathbb R}$ one has $w(\cdot, t) \in H^1({\mathbb T})$ we first prove that \begin{equation} \label{eqn:ts_h1_bnd} \sup_{\varepsilon > 0} \norm{w^\varepsilon}_{C({\mathbb R}, H^1({\mathbb T}))} < \infty. \end{equation} By calculations similar to those in the proof of Theorem \ref{thm:gwp_nls_T_l2}, it suffices to prove the corresponding bound for the energy $E_\varepsilon(w^\varepsilon(\cdot, t))$.
To that end let $w^\varepsilon$ be the unique global solution of the modified NLS \eqref{eqn:cauchy_nls_smooth_T} for $\varepsilon > 0$ from Theorem \ref{thm:nls_smooth_global_hs}. The energy conservation from Equation \eqref{eqn:energy} implies \begin{equation*} E_\varepsilon(w^\varepsilon(\cdot, t)) = E_\varepsilon(w_0 \ast \phi_\varepsilon) = \iv{2} \norm{w_0 \ast \phi_\varepsilon}_{\dot{H}^1({\mathbb T})}^2 \mp \iv{\alpha + 1} \norm{w_0 \ast \phi_\varepsilon}_{L^3({\mathbb T})}^{\alpha + 1}. \end{equation*} Observe that by Lemma \ref{lem:ft_heat_one} the first summand above satisfies \begin{equation*} \norm{w_0 \ast \phi_\varepsilon}_{\dot{H}^1({\mathbb T})}^2 \leq \norm{w_0}_{\dot{H}^1({\mathbb T})}^2. \end{equation*} If the sign of the second summand is negative (focusing case), there is nothing left to do. If the sign is positive (defocusing case), one has \begin{equation*} \norm{w_0 \ast \phi_\varepsilon}_{\alpha + 1}^{\alpha + 1} \leq \norm{w_0}_{\alpha + 1}^{\alpha + 1} \leq \norm{w_0}_{L^\infty({\mathbb T})}^{\alpha - 1} \norm{w_0}_{L^2({\mathbb T})}^2 \leq \norm{w_0}_{H^1({\mathbb T})}^{\alpha + 1} \end{equation*} by Lemma \ref{lem:transference}. Therefore, the bound \eqref{eqn:ts_h1_bnd} holds.
Assume for now that $t \in [0, \delta]$, where $\delta$ is the guaranteed time of existence of $w$ in $L^2({\mathbb T})$. From the proof of Theorem \ref{thm:gwp_nls_T_l2}, one has that \begin{equation} \label{eqn:l2_unif_conv} \lim_{\varepsilon \to 0+} \norm{w^\varepsilon - w}_{C([0, T], L^2({\mathbb T}))} = 0. \end{equation} Hence, from Equations \eqref{eqn:ts_h1_bnd} and \eqref{eqn:l2_unif_conv} and Lemma \ref{lem:app_BA} it follows that \begin{equation*} \norm{w(\cdot, t)}_{H^1({\mathbb T})} \leq \liminf_{\varepsilon \to 0+} \norm{w^\varepsilon(\cdot, t)}_{H^1({\mathbb T})} < \infty. \end{equation*} Observe, that by the above we have \begin{eqnarray*} & & \norm{w^\varepsilon(\cdot, t) \ast \phi_\varepsilon - w }_{L^{\alpha + 1}({\mathbb T})}^{\alpha + 1} \\ & \lesssim & \norm{(w^\varepsilon(\cdot, t) - w(\cdot, t)) \ast \phi_\varepsilon}_{L^{\alpha + 1}({\mathbb T})}^{\alpha + 1} + \norm{(w(\cdot, t) \ast \phi_\varepsilon - w(\cdot, t) }_{L^{\alpha + 1}({\mathbb T})}^{\alpha + 1} \\ & \leq & \left(\norm{w^\varepsilon(\cdot, t)}_{L^\infty}^{\alpha - 1} + \norm{w(\cdot, t)}_{L^\infty}^{\alpha - 1}\right) \norm{w^\varepsilon(\cdot, t) - w(\cdot, t)}_{L^2({\mathbb T})}^2 \\ & & + \norm{(w(\cdot, t) \ast \phi_\varepsilon - w(\cdot, t) }_{L^{\alpha + 1}({\mathbb T})}^{\alpha + 1} \xrightarrow{\varepsilon \to 0+} 0 \end{eqnarray*} and hence \begin{equation*} E_0(w(\cdot, t)) \leq \liminf_{\varepsilon \to 0+} E_\varepsilon(w^\varepsilon(\cdot, t)) \leq E_0(w_0). \end{equation*} Interchanging $0$ and $t$ shows the reverse inequality and proves the energy conservation $E_0(w_0) = E_0(w(\cdot, t))$.
Reiterating the argument proves that $w \in L^\infty({\mathbb R}, H^1({\mathbb T}))$. It remains to show that $w \in C({\mathbb R}, H^1({\mathbb T}))$. To that end, observe that $t \mapsto w(\cdot, t)$ is weakly continuous in $L^2({\mathbb T})$. But, by the above, $\sup_{t \in {\mathbb R}} \norm{w(\cdot, t)}_{H^1({\mathbb T})} < \infty$ and hence $t \mapsto w(\cdot, t)$ is weakly continuous in $H^1({\mathbb T})$. By the observation \begin{equation*} \norm{w(\cdot, t) - w(\cdot, s)}_{H^1({\mathbb T})}^2 = \norm{w(\cdot, t)}_{H^1({\mathbb T})}^2 + \norm{w(\cdot, s)}_{H^1({\mathbb T})}^2 - 2 \operatorname{Re} \dup{w(\cdot, t)}{w(\cdot, s)}_{H^1({\mathbb T})}, \end{equation*} it is enough to show that $t \mapsto \norm{w(\cdot, t)}_{H^1({\mathbb T})}$ is continuous. (See \cite[Proposition 3.32]{brezis2011} for this result in a more general setting.)
To that end, observe that by the mass and energy conservation we have \begin{eqnarray*} \norm{w(\cdot, t)}_{H^1({\mathbb T})}^2 & = & 2 E(w(\cdot, t)) \pm \frac{2}{\alpha + 1} \norm{w(\cdot, t) }_{L^{\alpha + 1}({\mathbb T})}^{\alpha + 1} + \norm{w(\cdot, t)}_{L^2({\mathbb T})}^2 \\ & = & 2 E_0(w_0) \pm \frac{2}{\alpha + 1} \norm{w(\cdot, t) }_{L^{\alpha + 1}({\mathbb T})}^{\alpha + 1} + \norm{w_0}_{L^2({\mathbb T})}^2. \end{eqnarray*} Moreover, for any $t, s \in {\mathbb R}$ we have \begin{eqnarray*} & & \abs{\norm{w(\cdot, t)}_{L^{\alpha + 1}({\mathbb T})}^{\alpha + 1} - \norm{w(\cdot, s)}_{L^{\alpha + 1}({\mathbb T})}^{\alpha + 1}} \\ & \lesssim & \int_{{\mathbb T}} \abs{w(x, t) - w(x, s)} \left(\abs{w(x, t)}^\alpha + \abs{w(x, s)}^\alpha \right) \mathrm{d}{x} \\ & \lesssim & \norm{w}_{L^\infty({\mathbb R}, H^1({\mathbb T}))}^\alpha \norm{w(\cdot, t) - w(\cdot, s)}_{L^2({\mathbb T})}. \end{eqnarray*} The fact that $w \in C_{\text{b}}({\mathbb R}, L^2({\mathbb T}))$ concludes the argument. \end{proof}
\section*{Acknowledgments} Funded by the Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) – Project-ID 258734477 – SFB 1173. Dirk Hundertmark thanks Alfried Krupp von Bohlen und Halbach Foundation for their financial support.
\printbibliography
\end{document} |
\begin{document}
\begin{frontmatter}
\title{Infinite-dimensional stochastic differential equations related to Bessel random point fields }
\author{Ryuichi Honda, Hirofumi Osada \\ {\small (Accepted in Stochastic Processes and Their Applications)} }
\address{Faculty of Mathematics, Kyushu University, Fukuoka 819-0395, Japan }
\begin{abstract}
We solve the infinite-dimensional stochastic differential equations (ISDEs) describing an infinite number of Brownian particles in $ \mathbb{R}^+$ interacting through the two-dimensional Coulomb potential. The equilibrium states of the associated unlabeled stochastic dynamics are Bessel random point fields. To solve these ISDEs, we calculate the logarithmic derivatives, and prove that the random point fields are quasi-Gibbsian. \end{abstract}
\begin{keyword}
Interacting Brownian particles \sep Bessel random point fields \sep random matrices \sep infinite-dimensional stochastic differential equations \sep Coulomb potentials \sep hard edge scaling limit
\MSC 82C22 \sep 15A52 \sep 60J60 \sep 60K35 \sep 82B21
\end{keyword} \end{frontmatter}
\section{ Introduction }\label{s:1}
The Bessel random point fields $ \mub $ ($ -1 < \alpha < \infty $) are probability measures on the configuration space $ \SSS $ over $ \SS = \ZI $, whose $ n $-point correlation functions $ \rbn $ (see \eqref{:20c}) with respect to the Lebesgue measure are given by \begin{align} \label{:10a} & \rbn (x_1,\ldots,x_n) = \det [\Ka (x_i,x_j)]_{1 \le i,j \le n} .\end{align} Here, $ \Ka (x,y)$ is a continuous function called the Bessel kernel defined with the Bessel function $ J_{\alpha }$ of order $ \alpha $ such that for $ x\not=y $ \begin{align}\label{:10b}
\Ka (x,y) & =
\frac{J_{\alpha } (\sqrt{x}) \sqrt{y} J_{\alpha }' (\sqrt{y}) -
\sqrt{x} J_{\alpha }' (\sqrt{x}) \sqrt{y} J_{\alpha }(\sqrt{y})
}{2(x-y)} \\ \notag & = \frac{\sqrt{x} J_{\alpha +1} (\sqrt{x}) J_{\alpha } (\sqrt{y}) -
J_{\alpha } (\sqrt{x}) \sqrt{y} J_{\alpha +1}(\sqrt{y})
}{2(x-y)} \end{align} and that for $ x=y $ \begin{align}& \label{:10bb} \Ka (x,x) = \frac{1}{4}\{ J_{\alpha } (\sqrt{x}) ^2 - J_{\alpha +1} (\sqrt{x}) J_{\alpha -1} (\sqrt{x}) \} .\end{align} Note that $ 0 \le \Ka \le \text{Id}$ as an operator on $ L^{2} (\SS ,dx ) $. By definition $ \mub $ are determinantal random point fields with Bessel kernels $ \Ka $ (see \cite{soshi.drpf}).
It is known that these random point fields arise as a scaling limit at the hard left edge of the distributions $ \mub ^{\n }$ of the spectrum of the Laguerre ensemble. The random point fields $ \mub $ represent the thermodynamic limit of the $ \n $-particle systems $ \mub ^{\n }$, whose labeled densities $ \mm _{\alpha }^{\n }(\mathbf{x}) d\mathbf{x}$ are given by \begin{align}\label{:13}& \mm _{\alpha }^{\n} (\mathbf{x}) = \frac{1}{\mathcal{Z} _{\alpha }^{\n }}
e^{-\sum_{i=1}^{\n }x_i/4\n } \prod_{j=1}^{\n }x_j^{\alpha }
\prod_{k<l}^{\n } |x_k-x_l|^{2 }
.\end{align} Very loosely, by taking $ \n $ to infinity, we obtain the following informal expression for the $ \mub $: \begin{align}\label{:14}& \mub (d\mathbf{x}) = \frac{1}{\mathcal{Z} _{\alpha }^{\infty }}
\prod_{j=1}^{\infty}x_j^{\alpha }
\prod_{k<l}^{\infty} |x_k-x_l|^{2 } \prod_{m=1}^{\infty} dx_m .\end{align} Hence we regard the $ \mub $ as random point fields
with free potentials $ \Phi _{\alpha } (x) = - \alpha \log x $ and interaction potential $ \Psi (x) = - 2\log |x|$. Unlike Ruelle's class of interaction potentials, one can not justify this using the Dobrushin-Lanford-Ruelle (DLR) equations. Instead, we will proceed in terms of logarithmic derivatives in \tref{l:23}.
We next turn to the stochastic dynamics associated with the $ \mub ^{\n }$. To prevent the particles from hitting the origin, we suppose that $ 1 \le \alpha $ (\lref{l:81}). Then, from Eq.\! \eqref{:13}, it can be seen that the natural $ \n $-particle stochastic dynamics $ \mathbf{X}^{\n } = (X_t^{\n ,1},\ldots,X_t^{\n , \n})$ are given by the stochastic differential equations (SDEs) \begin{align}\label{:15}& dX_t^{\n ,i} = dB_t^{i} + \{ - \frac{1}{8\n } + \frac{\alpha }{2X_t^{\n ,i} } + \sum _{ j\not = i }^{\n } \frac{1}{X_t^{\n ,i} - X_t^{\n ,j}} \} dt \quad (1 \le i \le \n ) .\end{align} Hence, taking $ \n $ to infinity, we come to the ISDEs \begin{align} \label{:16} & dX_t^i = dB_t^i + \{ \frac{\alpha }{2X_t^i } + \sum _{ j\not = i }^{\infty} \frac{1}{X_t^i - X_t^j} \} dt \quad (i \in \N ) .\end{align}
The purpose of this paper is to solve these ISDEs in such a way that the equilibrium states of the associated unlabeled dynamics $ \mathsf{X}_t = \sum_{i=1}^{\infty} \delta _{X_t^i}$ are Bessel random point fields $ \mub $.
For a given free potential $ \Phi $ and interaction potential $ \Psi $, the interacting Brownian motions in infinite dimensions are the stochastic dynamics given by ISDEs of the form \begin{align}\label{:17a}& dX^i_t = dB^i_t + \frac{\beta }{2} \nabla \Phi (X_t^i) dt + \frac{\beta}{2} \sum_{j\not=i} \nabla \Psi (X^i_t,X^j_t) dt \quad (i\in\N ) .\end{align} Here, $ \{ B^i \}_{i\in\N } $ is a sequence of independent copies of $ d$-dimensional Brownian motions.
The study of interacting Brownian motions in infinite dimensions
was initiated by Lang \cite{lang.1}, \cite{lang.2}, and continued by Shiga \cite{shiga}, Fritz \cite{Fr}, Tanemura \cite{T2}, and others. In these works, $ \Psi $ is assumed to be a Ruelle type potential: that is, $ \Psi $ is super-stable and integrable at infinity. In addition, $ \Psi $ is assumed to be of class $ C^3_0$ (\cite{lang.1,lang.2,shiga,Fr}) or to decay exponentially at infinity with a hard core (\cite{T2,T-R}). Hence, polynomial decay potentials are excluded,
even from Ruelle's category.
Recently, an interesting class of random point fields has appeared from random matrix theory. This class includes such as the sine, Airy, and Bessel random point fields in one-dimensional space and the Ginibre random point field in two dimensions. These represent the thermodynamic limits of the distributions of Gaussian random matrices. There are many other such random point fields that emerge from random matrix theory, but these examples are of particular note. The sine, Airy, and Bessel random point fields describe the universality classes called bulk, soft-edge, and hard-edge scaling limits, respectively. The Ginibre random point field is rotation and translation invariant, and thus is the typical example in two dimensions.
In these random point fields, the interactions always have logarithmic potentials and therefore represent the outer side of the classical theory of interacting Brownian motions in infinite dimensions. In \cite{o.tp, o.isde, o.rm, o.rm2}, the second author (H.O.) developed the theory applicable to these examples. This theory asserts that the quasi-Gibbs property and the existence of logarithmic derivative $ \dmu $ of random poitnt fields $ \mu $ together with marginal assumptions such as non-collision and non-explosion properties of tagged particles imply the existence of (weak) solutions of the ISDEs in Eq. \eqref{:26r}. In \cite{o.isde, o.rm, o.rm2}, he also gave a sufficient condition of the quasi-Gibbs property and the existence of a logarithmic derivative.
Nevertheless, veryfying this sufficient condition for random point fields appearing in random matrix theory is a difficult problem, and the proof depends crucially on the specific property of each model.
In \cite{o.isde, o.rm}, H.O.\! proved these properties for the sine and Ginibre random point fields and solved ISDEs related to these random point fields in the sense of weak solutions. In \cite{o-t.airy}, H.O.\! and Tanemura prove the quasi-Gibbs property and calculate the logarithmic derivative of the Airy random point fields. In \cite{o-t.tail}, they develop a general theory on ISDEs that asserts the existence and pathwise uniqueness of strong solutions of ISDEs under the assumptions of the quasi-Gibbs property, the existence of logarithmic derivative, and other marginal assumptions.
The most important assumptions for the theories
in both \cite{o.tp,o.isde,o.rm,o.rm2} and \cite{o-t.tail}
are the same. These are the quasi-Gibbs property and the existence of a logarithmic derivative $ \dmu $ of the random point fields $ \mu $. Once these have been established, we can solve the ISDEs of Eq. \eqref{:26r} in the sense of weak solutions using \cite{o.tp, o.isde, o.rm, o.rm2} and in the sense of pathwise unique, strong solutions by \cite{o-t.tail}. Hence, ensuring that these two assumptions holds is an important issue.
In the present paper, we prove the quasi-Gibbs property and calculate the logarithmic derivative for the Bessel random point field. Applying the general theories in \cite{o.tp, o.isde, o.rm, o.rm2, o-t.tail}, we then solve the ISDE for the Bessel random point field with $ \beta = 2 $, which describes the remaining universality class in one dimension.
This paper is organized as follows: In \sref{s:2}, we establish the mathematical framework and state the main results (Theorems \ref{l:21}--\ref{l:24}). In \sref{s:3}, we prove Theorems \ref{l:21} and \ref{l:22} using Theorems \ref{l:23} and \ref{l:24} in combination with the general theory developed in \cite{o.tp,o.isde,o.rm,o.rm2,o-t.tail}. In \sref{s:4}, we set forth \tref{l:41} in preparation for \sref{s:5}, where we calculate the logarithmic derivatives of Bessel random point fields and prove \tref{l:23}. In \sref{s:6}, we prove that these are quasi-Gibbssian (\tref{l:24}). In \sref{s:7}, we prove \lref{l:50}. In \sref{s:8}, we prove \lref{l:81}.
\section{ Set up and main results}\label{s:2} Let $ \SS = \ZI $ and $ \Sr = \{ x\in \SS;\, x < r \} $. Let $$ \SSS = \{ \mathsf{s} = \sum _i \delta _{s_i}\, ;\, s_i \in \SS ,\, \mathsf{s} ( \Sr ) < \infty \text{ for all } r\in\N \} ,$$ where $ \delta _{a} $ stands for the delta measure at $ a $. We endow $ \SSS $ with the vague topology, under which $ \SSS $ is a Polish space. $ \SSS $ is called the configuration space over $ \SS $. We write $ \sss (x)=\sss (\{ x \} )$. Let \begin{align}\label{:20a}& \SSSsi = \{ \sss \in \SSS \, ;\, \, \sss (x)\le 1 \text{ for all }x \in \SS ,\, \, \sss (\SS )= \infty \} .\end{align} By definition, $ \SSSsi $ is the set of the configurations consisting of an infinite number of single point measures.
A symmetric locally integrable function $ \map{\rho ^n }{\SS ^n}{\ZI } $ is called the $ n $-point correlation function of a probability measure $ \mu $ on $ \SSS $ w.r.t.\ the Lebesgue measure if $ \rho ^n $ satisfies \begin{align}\label{:20c}& \int_{A_1^{k_1}\ts \cdots \ts A_m^{k_m}} \rho ^n (x_1,\ldots,x_n) dx_1\cdots dx_n
= \int _{\SSS } \prod _{i = 1}^{m} \frac{\mathsf{s} (A_i) ! } {(\mathsf{s} (A_i) - k_i )!} d\mu
\end{align} for any sequence of disjoint bounded measurable subsets $ A_1,\ldots,A_m \subset \SS $ and a sequence of natural numbers $ k_1,\ldots,k_m $ satisfying $ k_1+\cdots + k_m = n $. When $ \mathsf{s} (A_i) - k_i < 0$, according to our interpretation, ${\mathsf{s} (A_i) ! }/{(\mathsf{s} (A_i) - k_i )!} = 0$ by convention. It is known that under a mild condition $ \{ \rho ^n \}_{n \in \N }$ determines the measure $ \mu $ \cite{soshi.drpf}.
Let $ \mub $ be Bessel random point fields. By definition $ \mub $ are probability measures on $ \SSS $ whose $ n $-point correlation functions $ \rbn $ are given by \eqref{:10a}.
Let $ \map{\ulab }{\SS ^{\mathbb{N}} }{\SSS }$ such that $ \ulab ((s_i)) = \sum_{i=1}^{\infty} \delta _{s_i}$. We call $ \ulab $ an unlabel map.
A (weak) solution $ (\mathbf{X},\mathbf{B})$ of an ISDE starting at $ \mathbf{s}$ is called a strong solution if $ \mathbf{X}$ is a function of Brownian motion $ \mathbf{B}$ and the starting point $ \mathbf{s}$.
For a pair of Radon measures $ \mu $ and $ \nu $, we write $ \mu \prec \nu $ if $ \mu $ is absolutely continuous with respect to $ \nu $.
\begin{thm}\label{l:21} Assume that $ 1 \le \alpha < \infty $. The following then holds. \\ \thetag{1} For each $ \alpha $, there exists a set $ \SSSdys $ such that \begin{align}\label{:21a}& \mub (\SSSdys )= 1, \quad \SSSdys \subset \SSSsi ,\end{align} and that, for all $ \mathbf{s}\in \ulab ^{-1}(\SSSdys )$, there exists a $ \ZI ^{\N }$-valued continuous process $ \mathbf{X}=(X^i)_{i\in\N }$, and $ \R ^{\N }$-valued Brownian motion $ \mathbf{B}=(B^i)_{i\in\N }$ satisfying
\begin{align} \label{:21b} & dX_t^i = dB_t^i + \{ \frac{\alpha }{2X_t^i } + \sum _{ j\not = i }^{\infty} \frac{1}{X_t^i - X_t^j} \} dt \quad (i \in \N ) ,\\\label{:21c}& \mathbf{X}_0 = \mathbf{s} .\end{align}
Moreover, $ \mathbf{X}$ satisfies \begin{align}\label{:21f}& P (\ulab (\mathbf{X}_t) \in \SSSdys ,\ 0\le \forall t < \infty ) = 1 .\end{align} \thetag{2} For $ \mub \circ \lab ^{-1}$-a.s.\!\! $ \mathbf{s}$, $ (\mathbf{X},\mathbf{B})$ above is a strong solution of \eqref{:21b} and \eqref{:21c} such that \begin{align}\label{:21g}& \mub \circ \ulab (\mathbf{X}_t) ^{-1} \prec \mub \quad \text{ for all } t \in [0,\infty) .\end{align} Furthermore, the $ \mub $-strong uniqueness holds in the sense that any family of weak solutions satisfying \eqref{:21g} becomes the strong solution for $ \mub \circ \lab ^{-1}$-a.s.\!\! $ \mathbf{s}$, and that any two strong solutions $ (\mathbf{X}, \mathbf{B})$ and $ (\mathbf{X}',\mathbf{B})$ defined on the same Brownian motion $ \mathbf{B}$ starting at $ \mathbf{s} = \lab (\mathsf{s}) $ satisfying \eqref{:21g} are pathwise unique \begin{align}\label{:21h}& P(\mathbf{X}_t=\mathbf{X}_t' \text{ for all }t \ ) = 1 \end{align} for $ \mub \circ \lab ^{-1}$-a.s.\!\! $ \mathbf{s}$. Here $ \map{\lab }{\SSS }{[0,\infty)^{\N }}$ is the label introduced by \rref{r:21} \thetag{2}. \end{thm}
\begin{rem}\label{r:21} \thetag{1} When $ -1 < \alpha < 1 $, the left most particle hits the origin. Hence a coefficient coming from the boundary condition will appear in the ISDEs. Since we suppose $ 1 \le \alpha $, particles never hit the origin (see \lref{l:81}). It would be an interesting problem to study the case $ -1 < \alpha < 1$ where the boundary condition would appear. \\ \thetag{2} The correspondence $ \mathsf{s}\mapsto \mathbf{s}=(s_i)$ is called a label. In case of Bessel random point fields, there exists a natural label such that $ s_{i} < s_{i+1}$ for all $ i \in \mathbb{N}$. As we see later, all the particles $ X_t^i$ never collide each other for all $ t \in [0,\infty)$. Hence the initial label is kept forever. In particular, if we take $ X_0^i< X_0^{i+1}$ for all $ i \in \mathbb{N}$ initially, then $ X_t^i< X_t^{i+1}$ for all $ i \in \mathbb{N}$ and $ t \in [0,\infty)$. We denote this label by $ \lab $ in \thetag{2} of \tref{l:21}. \\\thetag{3} After submitting the first version of the manuscript, a general theory on the existence and uniqueness of strong solutions of infinite-dimensional stochastic differential equations has been developed and completed (see \cite{o-t.tail}).
When the first version was submitted, no preprint of the general theory was available.
It is now clarified and confirmed that the main assumptions required in \cite{o-t.tail} follow from the results of the present paper (Theorems \ref{l:23} and \ref{l:24}).
We noticed that the existence and uniqueness of strong solutions is obtained immediately by combining the general theory in \cite{o-t.tail} and the results in Theorems \ref{l:23} and \ref{l:24}. Hence, in the revised version, we newly add \thetag{2} of \tref{l:21}.
The original \tref{l:21} is renumbered as \tref{l:21} \thetag{1}. \end{rem}
A diffusion with state space $ S_0$ is a family of continuous stochastic processes with the strong Markov property starting at each point of the state space $ S_0$. In general, the notion of the Markov property depends on the filtering. We always consider the natural filtering in the present paper \cite{fot2}.
\begin{thm} \label{l:22} Assume that $ 1 \le \alpha < \infty $. Let $ \SSSSdys = \ulab ^{-1}(\SSSdys )$. Let $ \mathbf{P}_{\mathbf{s}}$ be the distribution of $ \mathbf{X}$ given by \tref{l:21}. Then $\{\mathbf{P}_{\mathbf{s}} \}_{\mathbf{s}\in\SSSSdys }$ is a diffusion with state space $ \SSSSdys $. \end{thm}
We deduce \tref{l:21} and \tref{l:22} from a general theory developed in \cite{o.tp,o.isde,o.rm,o.rm2,o-t.tail}. The key point for this is to calculate {\em the logarithmic derivative} of the measure $ \mub $ and to prove {\em the quasi-Gibbs property} of $ \mub $. These two notions play an important role in the proof of \tref{l:21} and \tref{l:22}.
The logarithmic derivative of $ \mub $ will be calculated in \tref{l:21}. We will use \tref{l:41} to prove \tref{l:21} in \sref{s:5}. The quasi-Gibbs property of $ \mub $ will be proved in \tref{l:24}. \tref{l:24} will be proved in \sref{s:6}.
To introduce the notion of the logarithmic derivative of random point fields we recall the definitions of reduced Palm measures and Campbell measures.
Let $ \mu $ be a probability measure on $ (\SSS , \mathcal{B}(\SSS ))$. A probability measure $ \mu _{\mathbf{x}} $ is called the reduced Palm measure conditioned at $ \mathbf{x} =(x_1,\ldots,x_k) \in \Sk $ if $ \mu _{\mathbf{x}} $ is the regular conditional probability defined by \begin{align}\label{:22x}&
\mu _{\mathbf{x}} = \mu (\cdot - \sum_{i=1}^{k} \delta _{x_i} | \ \sss ( x_i )\ge 1 \text{ for }i=1,\ldots,k) .\end{align}
Let $ \rho ^{k}$ be the $ k $-point correlation function of $ \mu $ with respect to the Lebesgue measure. Let $ \muk $ be the measure on $ \SkS $ defined by
\begin{align}\label{:22y}& \muk (A\ts B )= \int_{A}\mu _{\mathbf{x}} (B) \rho ^{k}( \mathbf{x} ) d \mathbf{x} .\end{align} Here we set $ d \mathbf{x} = dx_1\cdots dx_k $ for $ \mathbf{x} =(x_1,\ldots,x_k) \in \Sk $. The measure $ \muk $ is called the $ k$-Campbell measure.
\begin{dfn}\label{dfn:1} We call $ \dmu \in \Llocone (\muone ) $ the logarithmic derivative of $ \mu$ if $ \dmu $ satisfies \begin{align}\label{:22z}& \int_{\SoneSSS } \dmu f d \muone = - \int_{\SoneSSS } \PD{f(x,\sss )}{x} d \muone \quad \text{ for all } f \in C^{\infty}_{0} ((0,\infty))\ot C_b(\SSS ) .\end{align} \end{dfn} Very loosely, \eqref{:22z} can be written as $ \dmu = {\partial \log \muone (x,\sss )}/{\partial x} $. This intuitive expression is the reason why we call $ \dmu $ the logarithmic derivative of $ \mu $.
\begin{thm} \label{l:23} Assume that $ 1 \le \alpha < \infty $. Then $ \mub $ has the logarithmic derivative $ \dlog ^{\mub } \in \Lloctwo (\mub ^{1}) $ defined by \begin{align}\label{:b23a}& \dlog ^{\mub }(x,\mathsf{y})= \frac{\alpha }{x} + \sum_{i\in\N }\frac{2}{x-y_i} .\end{align} Here $\mathsf{y}=\sum_{i\in\N }\delta _{y_i}$. \end{thm}
\begin{rem}\label{r:23} Since we suppose $ 1 \le \alpha $, particles never hit the origin in \tref{l:23}. If $ -1 < \alpha < 1 $, then the left most particle hits the origin. Hence in the definition of logarithmic derivative, it would be more natural to take $ C^{\infty}_{0} ([0,\infty))\ot C_b(\SSS ) $ as a space of test functions. In this case, the logarithmic derivative contains a term arising from the bounadry condition. Although it would be interesting to study this case, we do not pursue this here. \end{rem}
We next introduce the notion of the quasi-Gibbs property.
For two measures $ \nu _1,\nu _2 $ on a measurable space $ (\Omega , \mathcal{B})$ we write $ \nu _1 \le \nu _2 $ if $ \nu _1(A)\le \nu _2(A)$ for all $ A\in\mathcal{B}$. We say a sequence of finite Radon measures $ \{ \nu ^{\n } \} $ on a Polish space $ \Omega $ converge weakly to a finite Radon measure $ \nu $ if $ \lim_{ \n \to \infty} \int fd\nu ^{\n } = \int f d\nu $ for all $ f \in C_b(\Omega ) $.
Let $ \{ b_r \}_{r=1}^{\infty} $ be an increasing sequence of natural numbers. Let \begin{align*}& \text{$ \SSS _r^m = \{ \mathsf{x} \in \SSS \, ;\,
\mathsf{x} (\SS _{b_r} ) = m \} $ and
$ \Lambda _r^m = \Lambda (\cdot \cap \SSS _r^m )$} ,\end{align*} where $ \Lambda $ is the Poisson random point field
whose intensity is the Lebesgue measure.
We denote by $ \mathcal{H}_{ r }(\mathsf{x} ) $ the Hamiltonian on $ \SS _{b_r}$ such that \begin{align}\label{:qg5}& \mathcal{H}_{ r }(\mathsf{x} ) = \sum_{x_i\in \SS _{b_r}} \Phi (x_i) + \frac{1}{2} \sum_{x_i,x_j \in \SS _{b_r} ,\, i\not= j}
\Psi (x_i,x_j) .\end{align} Here we set $ \mathsf{x} = \sum_i \delta _{x_i}$. Let $ \map{\pi _r, \pi _r^c}{\SSS }{\SSS }$ be the maps such that \begin{align}& \notag \text{$ \pi _r (\mathsf{s})(\cdot ) = \mathsf{s}(\cdot \cap \SS _{b_r} )$ and
$ \pi _r^c (\mathsf{s})(\cdot ) = \mathsf{s}(\cdot \cap \SS _{b_r}^c )$. } \end{align}
\begin{dfn}\label{dfn:2} A probability measure $ \mu $ is said to be a $ (\Phi , \Psi ) $-quasi Gibbs measure if there exists an increasing sequence of natural numbers $ \{ b_r \}_{r=1}^{\infty} $ such that, for each $ r,m \in \N $ and for $ \mu _{r,k}^m $-a.e.\, $\sss \in \SSS $, there exists a sequence of Borel subsets $ \SSS _{r,k}^m $ satisfying \begin{align} & \notag \SSS _{r,k}^m \subset \SSS _{r,k+1}^m\subset \SSS _{r}^m \quad \text{ for all }k , \\ \notag & \limi{k} \mu (\cdot \cap \SSS _{r,k}^m ) = \mu (\cdot \cap \SSS _{r}^m ) \quad \text{ weakly} ,\end{align} and that $ \mu _{r,k}^m = \mu (\cdot \cap \SSS _{r,k}^m )$ satisfy, for each $ r,m , k \in \N $ and for $ \mu _{r,k}^m $-a.e.\, $\sss \in \SSS $,
\begin{align}\label{:qg2}& \cref{;2y}^{-1} e^{- \mathcal{H}_{ r }(\mathsf{x} ) } \Lambda _r^m (d\mathsf{x})
\le \mu _{r,k,\mathsf{s}}^{m}(d\mathsf{x}) \le \cref{;2y}
e^{- \mathcal{H}_{ r }(\mathsf{x} )} \Lambda _r^m (d\mathsf{x}) .\end{align} Here $\Ct \label{;2y} = \cref{;2y}(r,m,k,\pi _{ r }^c (\mathsf{s}))$ is a positive constant and $ \mu _{r,k,\mathsf{s}}^{m}$ is the regular conditional probability measure of $ \mu _{r,k}^m$ defined by \begin{align} \label{:qg4} & \mu _{r,k,\mathsf{s}}^{m}(d\mathsf{x}) = \mu _{r,k}^m(\pi _{ r }
\in d\mathsf{x} | \ \pi _{ r }^c ) (\mathsf{s}) .\end{align} \end{dfn}
The notion of quasi-Gibbsian is first introduced in \cite{o.rm}. The original definition of the quasi-Gibbs measures
is slightly more general than the present version, and is essentially the same. We adopt here a restrictive version for the sake of simplicity.
We remark that we do not assume the symmetry of the interaction potential $ \Psi $. Hence we take the ordered summation of $ \Psi (x_i,x_j)$, and put $ 1/2$ in the sum of \eqref{:qg5}.
\begin{thm} \label{l:24} Let $ -1 < \alpha < \infty $.
Then $ \mub $ is a $ (\alpha \log x , 2 \log |x-y| ) $-quasi Gibbs measure. \end{thm}
Combining \tref{l:24} with a general theory \cite[Corollary 2.1]{o.rm}, we obtain a natural unlabeled $ \mub $-reversible diffusion $ (\mathsf{X},\mathsf{P})$.
\begin{thm} \label{l:25} Let $ -1 < \alpha < \infty $. Let $ \mathcal{E}^{\mub } $ and $ \di ^{\mub }$ be as in \eqref{:26j} and \eqref{:26J}
with $ k=0 $ and $ \mu = \mub $, Then $ (\mathcal{E}^{\mub },\di ^{\mub })$ is closable on $ L^2 (\SSS , \mub )$. There exists a diffusion $ (\mathsf{X},\mathsf{P})$ associated with the closure of $ (\mathcal{E}^{\mub },\di ^{\mub })$ on $ L^2 (\SSS , \mub )$. \end{thm}
\begin{rem}\label{r:25} \thetag{1} If $ -1 < \alpha < 1$, then the left most particle hits the origin. \\ \thetag{2} We write the diffusion $ \mathsf{X}$ in \tref{l:25} as $ \mathsf{X}_t = \sum_{i\in\N } \delta_{X_t^i}$. Since the particles never collide each other, the infinite-dimensional labeled paths $ (X_t^i)_{i\in\N }$ is well defined. Then with suitable labeling of the unlabeled particles $ \mathsf{X}$ at time $ t=0$, the solution of the ISDE $ (X_t^i-X_0^i)_{i\in\N }$ becomes an infinite-dimensional additive functional of the unlabeled diffusion $ (\mathsf{X},\mathsf{P})$. We remark that this additive functional is {\em not} Dirichlet process because {\em no} coordinate functions $ x_i $ ($i\in\N $)
belong to the domain of the Dirichlet form even if locally.
\\ \thetag{3} There are other approaches for this kind of unlabeled stochastic dynamics related to random matrix theory. See \cite{sp.2}, \cite{kt.cmp}, \cite{kt.11}, \cite{boro-ols.12}, \cite{boro-gorin.13}, and \cite{ols.11}. These approaches are more algebraic, and restricted to one dimensional system with inverse temperature $ \beta = 2 $. \\\thetag{4} Let $ \kpath $ be the map from $ C([0,\infty);\SS ^{\mathbb{N}})$
to $ C([0,\infty);\SSS )$ defined by \begin{align}\label{:20b}& \kpath (\mathbf{X}) = \{ \sum_{i=1}^{\infty} \delta _{X_t^i}\}_{t\in[0,\infty)} = \{\ulab (\mathbf{X}_t) \}_{t\in[0,\infty)} ,\end{align} where $ \mathbf{X} = \{(X^i_t)_{i=1}^k \} $. We set $ \mathsf{X} = \kpath (\mathbf{X})$. We call $ \mathbf{X} $ (resp. $ \mathsf{X} $) the labeled process (unlabeled process). Then the relation between the labeled process $ \mathbf{X}$ in \tref{l:21} and the unlabeled process $ \mathsf{X}$ in \tref{l:25} is that $ \kpath (\mathbf{X}) = \mathsf{X}$ for a suitable version of the processes with quasi-everywhere starting points. This identity is a corollary of \cite[Theorem 2.4]{o.tp}. \end{rem}
\section{Proof of Theorems \ref{l:21} and \ref{l:22}}\label{s:3} The purpose of this section is to prove Theorems \ref{l:21} and \ref{l:22}. For this we will use \tref{l:23} and \tref{l:24}, and a result from \cite{o.isde} in a reduced form being sufficient for the present problem.
Let $ \mathbf{X}=(X_t^i)_{i\in\mathbb{N}}$ be a labeled process as before. For $ k \in \{ 0 \}\cup \N $, the process $ (X_t^1,\ldots,X_t^k, \sum_{j>k}^{\infty}\delta_{X_t^j})$ is said to be a $ k$-labeled process. When $ k=0$, the $ k$-labeled process equals the unlabeled process $ \kpath (\mathbf{X}) $.
We introduce Dirichlet forms describing the $ k $-labeled process. For a subset $ A \subset \SS $ we define the map $ \map{\pi _{A }}{\SSS }{\SSS } $ by $ \pi _{A } (\sss ) = \sss ( A \cap \cdot ) $. We say a function $ \map{f}{\SSS }{\R } $ is local if $ f $ is $ \sigma[\pi _{ A }]$-measurable for some compact set $ A \subset \SS $. We say $ f $ is smooth if $ \tilde{f} $ is smooth, where $ \tilde{f}((s_i)) $ is the permutation invariant function in $ (s_i) $ such that $ f (\sss ) = \tilde{f} ((s_i)) $ for $ \sss = \sum _i \delta _{s_i} $.
Let $ \di $ be the set of all local, smooth functions on $ \SSS $. For $ f,g \in \di $ we set $ \map{\DDD [f,g]}{\SSS }{\R } $ by
\begin{align} \label{:30a} & \DDD [f,g](\sss ) = \frac{1}{2} \sum _{ i } \PD{\widetilde{f}(\mathbf{s})}{s_{i}} \PD{\widetilde{g}(\mathbf{s})}{s_{i}} .\end{align} Here $ \sss = \sum_{i}\delta _{s_i}$ and $ \mathbf{s}=(s_i)$. For given $ f $ and $ g $ in $ \di $, it is easy to see that the right-hand side of \eqref{:30a} depends only on $ \sss $. So $ \DDD [f,g]$ is well defined.
For $ f,g \in C_0^{\infty}(\Sk )\ot \di $ let $\nabla ^{k}[f,g]$ be the function on $ \SkS $ defined by \begin{align}\label{:26h}& \nabla ^{k} [f,g ] (\mathbf{x},\sss ) = \frac{1}{2} \sum _{j=1}^{k} \PD{f(\mathbf{x}, \sss )}{x_{j}}\PD{g(\mathbf{x}, \sss )}{x_{j}} .\end{align} where $\mathbf{x} =(x_j)\in \Sk $. We set $ \DDDk $ for $ k\ge 1$ by \begin{align}\label{:26i}& \DDDk [f,g](\mathbf{x},\sss ) = \nabla ^{k}[f,g] (\mathbf{x},\sss ) + \DDD [f(\mathbf{x},\cdot ),g(\mathbf{x},\cdot )](\sss ) .\end{align}
Let $ (\Eak ,\dik )$ be the bilinear form defined by \begin{align}\label{:26j} & \Eak (f,g) = \int_{\SkS } \DDDk [f,g] d\muk ,\\\label{:26J}& \dik = \{ f \in C_0^{\infty}(\Sk )\ot \di \cap L^2(\SS ^k \ts \SSS , \muk ) \, ;\, \, \Eak (f,f) < \infty \} .\end{align} When $ k=0 $, we take $ \DDDzero =\DDD $, $ \mu ^{0} = \mu $, and $ \Eazero = \Ea $. We set $ \Lm = L^2(\SSS , \mu)$ and $ \Lmuk = L^{2}(\SkS ,\muk ) $ and so on.
We assume that there exists a probability measure $ \mu $ on $ \SSS $ with correlation functions $ \{ \rho ^{k} \}_{k\in\N } $ satisfying \Ass{A.1}--\Ass{A.5}: \\ \Ass{A.1} $ \rho ^{k}$ is locally bounded for each $ k \in \N $. \\ \Ass{A.2} There exists a logarithmic derivative $ \dmu $ in the sense of \eqref{:22z}. \\% \Ass{A.3} $(\Eak ,\dik )$ is closable on $ \Lmuk $ for each $ k\in\{ 0 \}\cup \N $. \\ \Ass{A.4} $\mathrm{Cap}^{\mu } (\{\SSSsi \}^c) = 0 $. \\ \Ass{A.5} There exists a $ T>0 $ such that for each $ R>0 $ \begin{align}\label{:26p}&
\liminf_{r\to \infty}\ ( \{ \int_{|x|\le {r+R}} \rho ^1 (x)dx \} \{ \int_{\frac{r}{\sqrt{(r+R )T}}}^{\infty} e^{-u^{2}/2}du \} ) = 0 .\end{align}
Let $ (\Eak ,\dak )$ be the closure of $ (\Eak ,\dik )$ on $ \Lmuk $. It is known \cite[Lemma 2.3]{o.tp} that $ (\Eak ,\dak )$ is quasi-regular and that the associated diffusion $ (\PPk , \mathsf{X}^k )$ exists. We refer to \cite{mr} for the definition and necessary background of quasi-regular Dirichlet forms. We remark that $ \mathrm{Cap}^{\mu }$ in \Ass{A.4} is
the capacity of the Dirichlet space $(\Eazero ,\d ^{\mu } ,\Lm )$.
The assumptions \Ass{A.4} and \Ass{A.5} have clear dynamical interpretations. Indeed, \Ass{A.4} means that particles never collide with each other. Moreover, \Ass{A.5} means that no labeled particle ever explodes \cite{o.tp}.
We quote two theorems from \cite{o.isde}. \begin{thm}[{\cite[Theorem 26]{o.isde}}]\label{l:31} Assume \Ass{A.1}--\Ass{A.5}. Then there exists an $ \SSS _0 $ such that \begin{align}\label{:26q}& \mu (\SSS _0 )= 1, \quad \SSS _0 \subset \SSSsi ,\end{align} and that, for all $ \mathbf{s}\in \ulab ^{-1}(\SSS _0 )$, there exists an $\SS ^{\N }$-valued continuous process $ \mathbf{X}=(X^i)_{i\in\N }$, and $(\R )^{\N }$-valued Brownian motion $ \mathbf{B}=(B^i)_{i\in\N }$ satisfying \begin{align}\label{:26r}& dX^i_t = dB^i_t +\frac{1}{2}\dmu (X^i_t,\mathsf{X}^{i*}_t)dt \quad (i\in \N ) ,\\ \label{:26s}& \mathbf{X}_0 = \mathbf{s} .\end{align}
Moreover, $ \mathbf{X}$ satisfies \begin{align}\label{:26t}& P (\ulab (\mathbf{X}_t) \in \SSS_0 ,\ 0\le \forall t < \infty ) = 1 .\end{align} \end{thm}
\begin{thm}[{\cite[Theorem 27]{o.isde}}] \label{l:32} Let $ \SSSS _0 $ be the subset of $ \SS ^{\N }$ defined by $ \SSSS _0 =\ulab ^{-1}(\SSS _0 )$. Let $ \mathbf{P}_{\mathbf{s}}$ be the distribution of $ \mathbf{X}$ given by \tref{l:31}. Then $\{\mathbf{P}_{\mathbf{s}}\}_{\mathbf{s}\in\SSSS _0 }$ is a diffusion with state space $ \SSSS _0 $. \end{thm}
We take $ \mu = \mub $. Then the assumptions \Ass{A.1}, \Ass{A.4}, and \Ass{A.5} are easily checked as we see in the next lemma.
\begin{lem} \label{l:33} $ \mub $ satisfy \Ass{A.1}, \Ass{A.4}, and \Ass{A.5}. \end{lem}
\aaaaa
\Ass{A.1} and \Ass{A.5} are clear because
the correlation functions $\{\rbn \}$ of $\mub $
are given by the equation \eqref{:10a}
and the kernels $ \Kan $ are locally bounded in $ (0,\infty)$
and bounded in $ [1,\infty)$.
\Ass{A.4} follows from \cite[Theorem 2.1]{o.col}
because the kernel $ \Kan $ is locally Lipschitz continuous. \bbbbb
We next deduce Theorems \ref{l:21} and \ref{l:22} from Theorems \ref{l:23} and \ref{l:24}.
\noindent {\em Proof of \tref{l:21} \thetag{1} and \tref{l:22}. } We will use Theorems \ref{l:31} and \tref{l:32} to prove \tref{l:21} \thetag{1} and \tref{l:22}. For this we check the assumptions \Ass{A.1}--\Ass{A.5} with a help of Theorems \ref{l:23} and \ref{l:24}.
The assumption \Ass{A.2} follows from \tref{l:23}.
From \lref{l:33} we have already known that $ \mub $ satisfy \Ass{A.1}, \Ass{A.4}, and \Ass{A.5}.
From \tref{l:24} we see that $ \mub $ are quasi-Gibbssian with continuous potentials. In \cite[Lermma 3.6]{o.rm}, it was proved that, when potentials are upper semi-continuous, the closability in \Ass{A.3} for $ k=0 $ follows from the quasi-Gibbs property. Then we have \Ass{A.3} for $ k=0 $. The closability for general $ k \ge 1 $ also follows from the quasi-Gibbs property of $ \mub $ in a similar fashion. Hence we obtain \Ass{A.3} for $ \mub $.
We have thus seen that the assumptions \Ass{A.1}--\Ass{A.5} are fulfilled. Hence, \tref{l:21} \thetag{1} and \tref{l:22} follows from Theorems \ref{l:31} and \ref{l:32}, respectively. \qed
We next prove \thetag{2} of \tref{l:21} using a result in \cite{o-t.tail}.
\noindent \noindent {\em Proof of \tref{l:21} \thetag{2}. } We deduce \tref{l:21} \thetag{2} from Theorem 9.3 in \cite{o-t.tail}. We check the assumptions in \cite[Theorem 9.3]{o-t.tail}. These are labeled in \cite{o-t.tail} as follows: \As{A1}--\As{A4}, \As{A5'}, \As{A8'}, \As{A9}, \As{E1}, \As{F1}, and \As{F2}.
We see that \As{A3}, \As{A1}, \As{A4}, and \As{A5'} in \cite{o-t.tail} follow from \As{A.1}, \As{A.2}, \As{A.4}, and \As{A.5} in the present paper, respectively. We deduce \As{A2} in \cite{o-t.tail} from \tref{l:24} immediately. \As{A8'} follows from \eqref{:10bb}. Assumption \As{A9} in \cite{o-t.tail} asserts that $ \mub $ is tail trivial. This was proved in \cite{o-o.tt}.
For $ \mathsf{k},r \in \N $, let $ a_{\mathsf{k}}(r) = \mathsf{k} \sqrt{r}$. Set $ a_{\mathsf{k}} = \{ a_{\mathsf{k}} (r)\}_{r\in\N } $ and $ \mathbf{a} = \{ a_{\mathsf{k}} \}_{\mathsf{k}\in\N } $. Let $ \mathsf{K}[\mathbf{a}] = \cup_{r=1}^{\infty} \mathsf{K}[a_{\mathsf{k}}]$, where $ \mathsf{K}[a_{\mathsf{k}}]= \{ \mathsf{s}; \mathsf{s}(\Sr ) \le a_{\mathsf{k}}(r) \text{ for all } r \in \N \} $. We then deduce from \eqref{:10bb} that \begin{align}& \notag \mub (\mathsf{K}[\mathbf{a}]) = 1 .\end{align} This corresponds to \As{E1} in \cite{o-t.tail}. Assumptions \As{F1} and \As{F2} depend on a nonnegative integer $ \ell $ in \cite[Theorem 9.3]{o-t.tail}. We take $ \ell = 1 $ here. We can easily check \As{F1} and \As{F2} by a straightforward calculation. Indeed, $ \sigma $ and $ b $ in \As{F1} and \As{F2} in \cite{o-t.tail} become $ \sigma = 1 $ and $ b = 2\dlog ^{\mub } $. Hence, $ \partial_x \sigma = 0 $ and, from \tref{l:23}, \begin{align*}& \partial_x b (x,\mathsf{s}) = -\frac{2\alpha }{x^2} - \sum_{i\in\N } \frac{4}{(x-s_i)^2} .\end{align*} This implies \As{F1} and \As{F2} immediately. We thus complete the proof. \qed
In the rest of the paper we devote to the proof of Theorems \ref{l:23} and \ref{l:24}.
\section{Logarithmic derivative of random point fields. } \label{s:4}
Let $ \mu $ be a probability measure on $ \SSS $ with locally bounded $ n $-point correlation function $ \rho ^{n}$ for each $ n \in \N $. Let $ \muone $ be the measure defined by \eqref{:22y} with $ k=1 $. In this section we present a sufficient condition for the existence of the logarithmic derivative $ \dmu $ in $ \Llocp (\muone )$.
Let $ \Sr = \{ x \in \SS \, ;\, |x| < \rrr \}$ and $\Sr ^{n}$ denote the $ n$-product of $ \Sr $. Here and after, $ \cdot ^n$ denotes the $ n$-product of the set $ \cdot $. Let $ \{ \muN \} $ be a sequence of probability measures on $ \SSS $. We assume that their $ n $-point correlation functions $ \{\rho ^{\n ,n}\} $ satisfy for each $ r\in \N $ \begin{align} \label{:40a}& \limi{\n } \rho ^{\n ,n} (\mathbf{x})= \rho ^{n} (\mathbf{x}) \quad \text{ uniformly on $\Sr ^{n}$} ,\\\label{:40b}& \sup_{ \n \in\N } \sup_{\mathbf{x}\in\Sr ^{n}} \rho ^{\n ,n} (\mathbf{x}) \le \cref{;40b} ^{-n} n ^{\cref{;40c}n} ,\end{align} where $ 0 < \Ct \label{;40b}(r) < \infty $ and $ 0 < \Ct \label{;40c}(r)< 1 $ are constants independent of $ n \in \N $.
Let $\map{\g }{\SS ^{2} }{\R }$ be measurable functions. For $ (x,\mathsf{y}) \in \SS \times \SSS $ and $ s > 0 $ we set \begin{align} & \label{:40c}
\ggNs (x,\mathsf{y}) = \sum_{|x-y_i|< s }\gN (x,y_i) ,\quad
\rrNs (x,\mathsf{y}) = \sum_{s \le |x-y_i|}\gN (x,y_i) ,\end{align} where $ \mathsf{y}=\sum_{i}\delta_{y_i}$. As for $ \rrNs $, we define only for $ \mathsf{y}$ such that $ \mathsf{y} (\SS ) < \infty $ in order to make the sum
$\rrNs (x,\mathsf{y}) = \sum_{s \le |x-y_i|}\gN (x,y_i)$ finite. We note that $ \ggNs + \rrNs $ are independent of $ s $.
Let $ \map{\uu , \uN }{\SS }{\R }$ and $ 1 < \hat{p} < \infty $. Assume that $ \muN $ has a logarithmic derivative $ \dmuN $ for each $ \n $ satisfying the following. \begin{align}\label{:40d}& \dmuN (x,\mathsf{y})= \uN (x) + \ggNs (x,\mathsf{y}) + \rrNs (x,\mathsf{y}) ,\\\label{:40e}& \limi{\n }\uN =\uu \quad \text{ in } L^{\hat{p}}_{\mathrm{loc}}(\SS ,dx) ,\\\label{:40f}& \limi{s}\limsupi{\n }
\int_{\Sr \ts \SSS } |\rrNs (x,\mathsf{y}) |^{\hat{p}} d\muNone = 0 .\end{align}
We quote: \begin{thm}[{\cite[Theorem 45]{o.isde}}] \label{l:41} Let $ 1 < p < \hat{p} $. Assume \eqref{:40a}--\eqref{:40f}. Then the logarithmic derivative $\dmu $ exists in $ \Llocp (\muone )$ and is given by
\begin{align}\label{:41a}& \dmu (x,\mathsf{y})= \uu (x) + \limi{s} \ggs (x,\mathsf{y}) .\end{align} The convergence $ \lim \ggs $ takes place in $ \Llocp (\muone )$. \end{thm} \begin{rem}\label{r:41} \tref{l:41} is a special case of \cite[Theorem 45]{o.isde}. In \cite[Theorem 45]{o.isde} extra terms such as $ \vN $ and $ \www $ appeared. These terms are vanished here. However, this is not the case for the Ginibre random point field and the Airy random point fields. \end{rem}
In practice, to check the condition \eqref{:40f} is the most hard part of the proof. So we quote a sufficient condition for this in terms of correlation functions.
\begin{lem} \label{l:42}
We set $ \SS _{s\infty}^{x} = \{ y \in \SS ;s \le |x-y| < \infty \}$. Let $ \rN _{x}$ be the $ n$-point correlation function of the reduced Palm measure $ \muNx $. Then \eqref{:40f} with $ \hat{p} = 2 $ follows from the following: \begin{align} \label{:42A}& \2
|\int_{\SS _{s\infty}^{x}} \gN (x,y)\rNone (y) dy | = 0 ,\\ \label{:42B}&
\2 |\int_{\SS _{s\infty}^{x}}\gN (x,y)
\{ \rNone _{x} (y)-\rNone (y)\}dy | = 0 ,\\\label{:42C}&
\2 |\int_{\SS _{s\infty}^{x}}|\gN (x,y)|^{2} \rNone (y)dy \\ \notag &\quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad - \int_{(\SS _{s\infty}^{x})^{2}}
\gN (x,y)\cdot \gN (x,z)\rNtwo (y,z) dydz |= 0 ,\\% \label{:42D}& \2
|\int_{\SS _{s\infty}^{x}}|\gN (x,y)|^{2} \{ \rNone _{x} (y)-\rNone (y)\} dy \notag \\ & \quad \quad -\int_{(\SS _{s\infty}^{x})^{2} }
\gN (x,y)\cdot \gN (x,z) \{ \rNtwo _{x} (y,z)-\rNtwo (y,z)\}dydz |= 0 .\end{align} \end{lem} \aaaaa \lref{l:42} is a special case of \cite[Lemma 52]{o.isde}. \bbbbb
\section{Finite particle approximations and proof of \tref{l:23}} \label{s:5} In this section, we prove \tref{l:23}. For this we use \tref{l:41}. We will check the assumptions \eqref{:40a}--\eqref{:40f} posed in \tref{l:41}.
We begin by giving finite particle approximations for Bessel random point fields. Let $ \{ L^{[\alpha ]}_{n} \} $ denote the Laguerre polynomials. Then by definition \begin{align}\label{:51a}&
L^{[\alpha ]}_n(x) =\sum^n_{m=0} (-1)^m \binom{n+\alpha }{n-m}\frac{x^m}{m!} .\end{align} The associated monic polynomials $ \{ p^{[\alpha ]}_{n} \} $ are given by \begin{align}\label{:51aa}& p^{[\alpha ]}_n (x) = (-1)^n n! L^{[\alpha ]}_n(x) = (-1)^n \Gamma( n +1) L^{[\alpha ]}_n(x) .\end{align}
Let $ \w (x) = x^{\alpha }e^{-x}$. Then it is known \cite[301p, 302p]{sansone} that for $ m,n \in \{ 0 \}\cup \mathbb{N} $ \begin{align}\label{:51b}&
\int_{0}^{\infty}
L^{[\alpha ]}_m (x) L^{[\alpha ]}_n (x) \, \w (x) dx =
\delta_{m,n}
\frac{\Gamma( n+\alpha +1)}{\Gamma(n+1)}
.\end{align} From \eqref{:51aa} and \eqref{:51b} we immediately deduce that \begin{align}\label{:51B}&
\int_{0}^{\infty}
p^{[\alpha ]}_{\n -1} (x) ^2 \, \w (x) dx =
\Gamma( \n +\alpha ) \Gamma( \n ) .\end{align}
Let $ \map{\kan }{(0,\infty)^2}{\mathbb{R}}$ be such that \begin{align}\label{:51C}&
\kan (x,y)=
\sqrt{\w (x)\w (y)} \sum ^{\n -1}_{m = 0}
\frac{p^{[\alpha ]}_m (x) p^{[\alpha ]}_m (y) }
{\int_{0}^{\infty} p^{[\alpha ]}_m (z) ^2 \w (z)dz } .\end{align} Then we deduce from the Christoffel-Darboux formula \cite[Proposition 5.1.3.]{forrester} that for $ x\not=y$ \begin{align}\label{:51D}& \kan (x,y) = \frac{\sqrt{\w (x)\w (y)} } {\int_{0}^{\infty} p^{[\alpha ]}_{\n -1} (z) ^2 \w (z)dz }
\frac { p^{[\alpha ]}_{\n }(x) p^{[\alpha ]}_{\n -1} (y) -
p^{[\alpha ]}_{\n -1}(x) p^{[\alpha ]}_{\n } (y) }
{x-y} .\end{align} From \eqref{:51aa}--\eqref{:51B} combined with
a straightforward calculation, we obtain that \begin{align}\label{:51Zz}& \kan (x,y) = \sqrt{\w (x)\w (y)}
\frac{\Gamma (\n +1)} {\Gamma (\n + \alpha )}
\frac { L^{[\alpha ]}_{\n -1}(x) L^{[\alpha ]}_{\n } (y)
-
L^{[\alpha ]}_{\n }(x) L^{[\alpha ]}_{\n -1} (y) }
{x-y} .\end{align}
We now introduce the rescaled kernels $ \Kan $ as follows. \begin{align}\label{:51d}& \Kan (x,y) = \frac{1}{4\n } \kan (\frac{x}{4\n },\frac{y}{4\n }) .\end{align} Let $ \mubN $ be the determinantal random point field over $ (0,\infty)$ generated by $ (\Kan , dx)$. Then by construction the $ n$-point correlation functions of $ \mubN $ are given by \begin{align}\label{:51e}& \rbN (x_1,\ldots,x_n) = \det [\Kan (x_i,x_j)]_{i,j=1,\ldots,n} .\end{align}
It is known that $ \mubN (\sss (\SS )= \n )= 1 $, and so the labeled density function of $ \mubN $ is $ \rbNN $ up to the normalizing constant. Moreover, by the standard theory of the random matrix, we obtain, because of the calculation of Vandermond determinant, \begin{align}\label{:51f}& \rbNN (x_1,\ldots,x_{\n }) = c_{\n } e^{-\sum_{i=1}^{\n }x_i/4\n } \prod_{j=1}^{\n }x_j^{\alpha }
\prod_{k<l}^{\n } |x_k-x_l|^{2 } \end{align} with the normalizing constant $ c_{\n }$ \cite{forrester}. We easily deduce from \eqref{:51f} that the logarithmic derivative $ \dlog ^{\mubN } $ of $ \mubN $ are given by \begin{align}\label{:51h}& \dlog ^{\mubN }(x,\mathsf{y}) = -\frac{1}{4\n } + \frac{\alpha }{x} + \sum_{i=1}^{\n -1} \frac{2 }{x-y_i} .\end{align}
\begin{lem} \label{l:51}
Let $ 1 \le \alpha < \infty $.
Then $ \{ \mubN \} $ satisfy \eqref{:40a}--\eqref{:40e} with \begin{align}\label{:51p}& u^{\n } (x) = -\frac{1}{4\n } + \frac{\alpha }{x} , \quad g(x,y)= \frac{2}{x-y} .\end{align} \end{lem}
\aaaaa It is known (\cite[290p]{forrester}) that, for each $ (x,y)\in (0,\infty)^2 $, \begin{align}\label{:51g}& \limi{\n } \Kan (x,y) = \Ka (x,y) .\end{align} Furthermore, one can easily see that the convergence takes place compact uniformly in $ (x,y)\in [0,\infty)^2 $. We deduce \eqref{:40a} from \eqref{:51e} and \eqref{:51g} immediately.
The condition \eqref{:40b} follows from \eqref{:51e} and \eqref{:51g}. In fact, from \eqref{:51g} and the definition \eqref{:51d} of the kernel $ \Kan $, we deduce that the norm $ \mathsf{k}_{\alpha ,i}^{\n ,n } (x_1,\ldots,x_n) $ of the $ i$th row vector of the matrix $ [\Kan (x_i,x_j)]_{i,j=1,\ldots,n} $ satisfies the following inequality. \begin{align}\label{:51H}& \sup_{(x_1,\ldots,x_n) \in S_r^n} \mathsf{k}_{\alpha ,i}^{\n ,n } (x_1,\ldots,x_n) \le \cref{;51} n^{1/2} .\end{align} Here $ \Ct \label{;51} = \cref{;51} (r) $ is a positive constant independent of $ \n $ and $ n $. Hence from Hadamard's inequality we deduce that \begin{align}\label{:51i}&
\sup_{(x_1,\ldots,x_n) \in S_r^n}
|\det [\Kan (x_i,x_j)]_{i,j=1,\ldots,n} | \le \cref{;51}^n n^{n/2} .\end{align}
The conditions \eqref{:40c}--\eqref{:40e} are obvious from construction and \eqref{:51h}. \bbbbb
By \lref{l:51}, it only remains to prove \eqref{:40f}. Taking \lref{l:42} into account, we will deduce \eqref{:40f} from \eqref{:42A}--\eqref{:42D}. The key point of this is the estimate \eqref{:52a} in \lref{l:52}, which control the 1-point correlation functions $ \rbNone $ of $ \mub $. To prove \eqref{:52a} we prepare a bound of $ \rbNone $.
\begin{lem} \label{l:50} Let $ \alpha > -1 $ and $ \omega > 1 $. Then for all $ \n \in \N $ \begin{align}\label{:52o}& \rbNone (x) \le \frac{\cref{;b52f} }{\sqrt{x}} \quad \text{ for } 1 \le x \le 4\n \omega .\end{align} Here $ \Ct = \cref{;b52f} (\alpha , \omega ) \label{;b52f}$
is a positive constant independent of $ x $ and $ \n $. \end{lem}
This lemma follows from an asymptotic formula of Hilb's type from \cite[Theorem 8.22.4, 199 p.]{szego}. Since the proof is long, although straightforward, we postpone it in Appendix (\sref{s:7}).
The next result is the most significant step of the proof.
\begin{lem} \label{l:52} The condition \eqref{:42A} is satisfied. Furthermore, it holds that \begin{align}\label{:52a}& \2
\int_{\SS _{s\infty}^{x}} \frac{1}{|x-y|}\rbNone (y) dy = 0 .\end{align} \end{lem} \aaaaa Since \eqref{:42A} follows from \eqref{:52a}, we only prove \eqref{:52a} .
We divide $ \SS _{s\infty}^{x} $ into two parts $ \SS _{s\infty}^{x}\cap [s,\omega \n ]$ and $ \SS _{s\infty}^{x}\cap [\omega \n , \infty )$, where $ \omega $ is a positive constant.
We begin by the first case $ \SS _{s\infty}^{x}\cap [s,\omega \n ]$.
From \eqref{:52o} we deduce that \begin{align} \label{:52g}&
\sup _{\n \in \mathbb{N}} \sup_{x\in\Sq }
\int_{\SS _{s\infty}^{x}\cap [s,\omega \n ]} \frac{\rbNone (y)}{|x-y|} dy \le
\sup_{x\in\Sq } \int_{\SS _{s\infty}^{x}}
\frac{\cref{;b52f}}{|x-y|\sqrt{|y|}} dy
\to 0 \end{align} as $ s \to \infty $.
As for the second case $ \SS _{s\infty}^{x}\cap [\omega \n , \infty )$, we see that for $ r < \omega \n $ \begin{align}\label{:52q} & \sup_{x \in S_r}
\int_{\SS _{s\infty}^{x}\cap [\omega \n , \infty )}
\frac{\rbNone (y)}{|x-y|} dy \leq
\frac{1}{ \n \omega - r } \int_{0}^{\infty} \rbNone (y) dy
= \frac{\n }{ \n \omega - r } .\end{align} Then we deduce from \eqref{:52q} that \begin{align}\label{:52h} & \lim_{s \to \infty} \limsup_{\n \to \infty} \sup_{x \in S_r}
\int_{\SS _{s\infty}^{x}\cap [\omega \n , \infty )}
\frac{\rbNone (y)}{|x-y|} dy \le \frac{1}{\omega } .\end{align}
Combining \eqref{:52g} and \eqref{:52h}, we deduce that
\begin{align} \label{:52z}& \2
\int_{\SS _{s\infty}^{x}}
\frac{\rbNone (y)}{|x-y|} dy \le \frac{1}{\omega } .\end{align} Taking $ \omega > 0 $ to be arbitrary large in \eqref{:52z} yields \eqref{:52a}. \bbbbb
We next prepare two properties \eqref{:82k} and \eqref{:53a} of determinantal kernels. We will repeatedly use these in the sequel.
Let $ \mubNx $ be the reduced Palm measure of $ \mubN $ conditioned at $ x $ and let $ \rbNxn $ be its $ n $-point correlation function as before. Then $ \mubNx $ has a determinantal structure with kernel \begin{align}\label{:82k}& \Kanx (y,z)= \Kan (y,z) - \frac{\Kan (y,x)\Kan (x,z) }{ \Kan (x,x)} .\end{align} This relation follows from a general theorem on determinantal random point fields \cite[Theorem 1.7]{shirai-t}.
Applying the Schwarz inequality to \eqref{:51C}, we deduce from \eqref{:51d} that \begin{align}\label{:53a}&
|\Kan (x,y) |
\le \sqrt{\Kan (x,x)} \sqrt{\Kan (y,y)}
= \sqrt{\rbNone (x)} \sqrt{\rbNone (y)} .\end{align} Here the equality in \eqref{:53a} follows from \eqref{:51e} with $ n = 1$.
\begin{lem} \label{l:53}
The condition \eqref{:42B} is satisfied. Furthermore, it holds that \begin{align}\label{:53d}& \2
\int_{\SS _{s\infty}^{x}} \frac{| \rbNxone (y)- \rbNone (y) | }{|x-y|} dy = 0 .\end{align} \end{lem} \aaaaa From \eqref{:10a}, \eqref{:82k}, and \eqref{:53a}, we deduce that \begin{align}\label{:82u}
|\rbNxone (y)- \rbNone (y) | &
= | \frac{\Kan (y,x)\Kan (x,y)}{\Kan (x,x)} |
\le \Kan (y,y) = \rbNone (y) .\end{align} Hence \eqref{:53d} is immediate from \eqref{:52a}. The condition \eqref{:42B} follows from \eqref{:53d} immediately. \bbbbb
\begin{lem} \label{l:54} The condition \eqref{:42C} is satisfied. Furthermore, it holds that \begin{align}\label{:54a}&
\2 \int_{\SS _{s\infty}^{x}} \frac{\rbNone (y)}{|x-y|^{2}} dy
+ \int_{(\SS _{s\infty}^{x})^{2}} \frac{ \rbNtwo (y,z) }{|x-y||x-z|}dydz = 0 .\end{align} \end{lem}
\aaaaa From \lref{l:52} we easily deduce that \begin{align}\label{:54b}&
\2 \int_{\SS _{s\infty}^{x}} \frac{\rbNone (y)}{|x-y|^{2}} dy = 0 .\end{align} From \eqref{:51e} and \eqref{:53a} we see that \begin{align}\label{:54c}
\rbNtwo (y,z) & = \rbNone (y)\rbNone (z) - \Kan (y,z)\Kan (z,y)
\le 2 \rbNone (y)\rbNone (z) .\end{align} Hence from \eqref{:54c} and Fubini's theorem, we deduce that \begin{align}\label{:54d} \int_{(\SS _{s\infty}^{x})^2}
\frac{\rbNtwo (y,z) }{|x-y||x-z|} dydz \le
& \int_{(\SS _{s\infty}^{x})^2}
\frac{ 2 \rbNone (y)\rbNone (z) }{|x-y||x-z|} dydz \\ \notag \le \,
& 2 \, (\int_{\SS _{s\infty}^{x}} \frac{ \rbNone (y)}{|x-y|} dy)^2 .\end{align} Then from \eqref{:54d} and \lref{l:52} we deduce that \begin{align}\label{:54D}& \2 \int_{(\SS _{s\infty}^{x})^2}
\frac{\rbNtwo (y,z) }{|x-y||x-z|} dydz = 0 .\end{align}
From \eqref{:54b} and \eqref{:54D}, we conclude \eqref{:54a}. This implies \eqref{:42C}. \bbbbb
\begin{lem} \label{l:55} The condition \eqref{:42D} is satisfied. Furthermore, it holds that \begin{align}\label{:55a}& \2
\int_{\SS _{s\infty}^{x}}\frac{| \rbNxone (y)-\rbNone (y) |}{|x-y|^{2}}
dy \\ & \quad \quad \notag
+ \int_{(\SS _{s\infty}^{x})^{2} }
\frac{| \rbNxtwo (y,z)-\rbNtwo (y,z) |}{|x-y||x-z|} dydz = 0 .\end{align}\end{lem}
\aaaaa We deduce from \lref{l:53} that \begin{align}\label{:55b}& \2 \int_{\SS _{s\infty}^{x}}
\frac{| \rNone _{x} (y)-\rNone (y)|}{|x-y|^{2}} dy = 0 .\end{align}
To estimate the second term of \eqref{:55a} we observe that
\begin{align}\label{:55c}& \rbNxtwo (y,z)- \rbNtwo (y,z) \\ \notag = & - \Kan (y,x)\Kan (x,y) \frac{\Kan (z,z)}{\Kan (x,x)} - \Kan (z,x)\Kan (x,z) \frac{\Kan (y,y)}{\Kan (x,x)} \\ \notag & + \Kan (y,x)\Kan (x,z)\Kan (z,y) \frac{1}{\Kan (x,x)} + \Kan (y,z)\Kan (z,x)\Kan (x,y) \frac{1}{\Kan (x,x)} .\end{align} Then applying \eqref{:53a} to each term of the right-hand side, we obtain \begin{align}\label{:55d}&
|\rbNxtwo (y,z)- \rbNtwo (y,z)|
\le 4\rbNone (y)\rbNone (z) .\end{align} Hence from \eqref{:55d} and Fubini's theorem, we deduce that \begin{align}\label{:55e} \int_{(\SS _{s\infty}^{x})^{2} }
\frac{ | \rbNxtwo (y,z)-\rbNtwo (y,z) | }{|x-y||x-z|}dydz \le & \int_{(\SS _{s\infty}^{x})^{2} }
\frac{4\rbNone (y)\rbNone (z)}{|x-y||x-z|} dydz \\ \notag = & \, 4 \, \left\{
\int_{\SS _{s\infty}^{x}}\frac{\rbNone (y)}{|x-y|} dy \right\}^2 .\end{align} Then from \eqref{:55e} and \lref{l:52}, we see that \begin{align}\label{:55E}& \2 \int_{(\SS _{s\infty}^{x})^{2} }
\frac{ | \rbNxtwo (y,z)-\rbNtwo (y,z) | }{|x-y||x-z|}dydz = 0 .\end{align} From \eqref{:55b} and \eqref{:55E}, we obtain \eqref{:55a}. This implies \eqref{:42D}. \bbbbb
\noindent {\em Proof of Theorems \ref{l:23}. } From \lref{l:51} we see that $ \mub $ satisfy \eqref{:40a}--\eqref{:40e}.
From \lref{l:52}--\lref{l:55}, we deduce that $ \mub $ satisfy \eqref{:42A}--\eqref{:42D}.
This combined with \lref{l:42} yields \eqref{:40f}. We thus see that all the conditions \eqref{:40a}--\eqref{:40f} of \tref{l:41} are fulfilled. Hence from \tref{l:41}, we obtain \tref{l:23} with the logarithmic derivative $ \dlog $ given by \eqref{:41a}. \qed
\section{Proof of \tref{l:24}}\label{s:6}
In this section we prove \tref{l:24}. For this we use \cite[Theorem 2.2]{o.rm2}. We prepare a result from \cite{o.rm2}, which is a special case of \cite[Theorem 2.2]{o.rm2}.
In the next theorem, we take $ \SS = \ZI $ or $ \SS = \mathbb{R}$. Let $ \mu $ be a random point field on $ \SS $. We assume three conditions.
\noindent \Ass{B.1} The random point field $ \mu $ has a locally bounded, $ n $-point correlation function $ \rho ^n $ for each $ n \in \N $.
\noindent \Ass{B.2} There exists a sequence of random point fields $\{ \muN \}_{\n \in \N }$ over $ \SS $ satisfying the following.
\noindent \thetag{1} The $ n $-point correlation functions $ \rN $ of $ \muN $ satisfy
\begin{align} \label{:61a} & \lim _{\n \to \infty } \rN (\mathbf{x}_n) = \rho ^n (\mathbf{x}_n) \quad \text{ a.e.} \quad \text{ for all $ n \in \N $,}
\\ \label{:61b} & \sup \{ \rN (\mathbf{x}_n) ; \n \in \N ,\, \mathbf{x}_n \in \Sr ^n \} \le \{ \cref{;70} n ^{\cref{;62} }\} ^n \quad \text{ for all $ n,r\in \N $} ,\end{align} where $ \mathbf{x}_n = (x_1,\ldots,x_n) \in S ^{n}$, $ \Ct \label{;70}=\cref{;70}(r) >0$, and $ \Ct \label{;62} = \cref{;62} (r) < 1 $ are constants depending on $ r \in \N $.
\noindent \thetag{2} \ $ \muN (\mathsf{s}( S ) = \nN ) = 1 $ for each $ \n $, where $ \nN \in \N $ are strictly increasing.
\noindent \thetag{3} $ \muN $ is a $ (\PhiN ,\PsiN ) $-canonical Gibbs measure for each $ \n $.
\noindent \thetag{4} $ \PhiN $ satisfy the following. \begin{align}\label{:61c}& \limi{\n } \PhiN (x) = \Phi (x) \text{ for a.e.\ \!\! $ x $,} \quad
\infN \inf _{x \in S } \PhiN (x) > -\infty .\end{align}
Let $ \mathsf{x} =\sum _{i}\delta_{x _i }$. For $1 \le r < s \le \infty $ let $ \map{\mathsf{v}_{\ell ,rs} }{\SSS }{\mathbb{R}}$ such that \begin{align}\label{:62b}& \mathsf{v}_{\ell ,rs} (\mathsf{x} )= \beta \big\{\sum _{x _i \in \SS _s \backslash \SS _r }
\frac{1}{{ x }_i^{\ell } } \big\} \quad (\ell \ge 1) .\end{align}
Note that the sum in \eqref{:62b} makes sense for $ \muN $-a.s. $ \mathsf{x} $ even if $ s=\infty $. Indeed, by \thetag{2} of \Ass{B.2}, the total number of particles is $ \nN $ under $ \muN $. Hence, $ \mathsf{v}_{\ell ,rs} (\mathsf{x} )$ is well defined and finite for $ \muN $-a.s. $ \mathsf{x} $, for all $ \n \in \N $.
\noindent \Ass{B.3} There exists an $ \ell _{0}$ such that $ \ell _{0}\in \N $ and that \begin{align} \label{:62d}&
\supN \{ \int_{1\le |x |<\infty }
\frac{1 }{\ | x |^{\ell _{0}}} \, \rNone (x )dx \} < \infty \\\intertext{and that, for each $ 1 \le \ell < \ell _{0}$, } \label{:62f} &
\limi{s} \supN \| \sup _{\n \in \N } \mathsf{v}_{\ell ,s\infty }
\, \|_{L^1(\SSS , \mu ^{\n })} = 0 .\end{align} When $ \ell _{0} = 1 $, we interpret that \eqref{:62f} always holds. The following is a special case of \cite[Theorem 2.2]{o.rm2}. We remark that the assumptions \Ass{B.1}, \Ass{B.2} and \Ass{B.3} correspond to \thetag{H.1}, \thetag{H.2} and \thetag{H.4} in \cite[Theorem 2.2]{o.rm2}, respectively.
\begin{thm}[{\cite[Theorem 2.2]{o.rm2}}]\label{l:62} Assume \Ass{B.1}, \Ass{B.2} and \Ass{B.3}. Then $ \mu $ is a $ (\Phi , \Psi ) $-quasi-Gibbs measure. \end{thm}
\noindent {\em Proof of \tref{l:24}. } We check the assumptions \Ass{B.1}, and \Ass{B.2}, and \Ass{B.3} in \tref{l:62}. We take $ \mu = \mub $ and $ \mu ^{\n }= \mubN $. Then \Ass{B.1} and \Ass{B.2} are satisfied. Furthermore, we take $ \ell _{0} = 1 $. Then from \eqref{:10b} and \lref{l:52}, we deduce \eqref{:62d} easily. Hence we conclude \tref{l:24} from \tref{l:62}. \qed
\section{Appendix: Proof of \lref{l:50}. }\label{s:7}
In this section we prove \lref{l:50}. Let $ -1 < \alpha < \infty $. Let $ L^{[\alpha ]}_{\n } $ denote the Laguerre polynomial and $ \w (x) = e^{- x} x^{\alpha }$ as before. Let \begin{align}\label{:71z}&
\Ma (x) = \{ \mathsf{w} _{\alpha +1}^{\frac{1}{2}} L^{[\alpha +1]}_{\n -1} \,
\w ^{\frac{1}{2}} L^{[\alpha ]}_{\n -1}
- \w ^{\frac{1}{2}}L^{[\alpha ]}_{\n } \,
\mathsf{w} _{\alpha +1}^{\frac{1}{2}}
L^{[\alpha +1]}_{\n -2} \,
\} (x) .\end{align}
Then from a straightforward calculation we obtain the following.
\begin{lem} \label{l:71} There exists a positive constant $ \Ct \label{;72}$ such that \begin{align}\label{:71a}&
\rbNone (y) \le
\frac{\cref{;72}}{\sqrt{y}} \,
\frac{1}{\n ^{\alpha - \frac{1}{2}}}
\Ma (\frac{y}{4\n }) \end{align} for all $ \n \in \mathbb{N}$ and $ y \in (0,\infty)$. \end{lem}
\begin{proof} From \eqref{:51d} and \eqref{:51e}, we see that \begin{align}\label{:71b}& \rbNone (y) = \Kan (y,y) = \frac{1}{4\n } \kan (\frac{y}{4\n },\frac{y}{4\n }) .\end{align} Hence we will estimate $ \kan (x,x) $. Taking $ y \to x $ in \eqref{:51Zz}, we deduce that \begin{align}\label{:71c} \kan (x,x) & = \w (x) \frac{\Gamma (\n +1)} {\Gamma (\n + \alpha )} \{ - \frac{d L^{[\alpha ]}_{\n } }{dx} L^{[\alpha ]}_{\n -1}
+ L^{[\alpha ]}_{\n } \frac{d L^{[\alpha ]}_{\n -1} }{dx}
\,
\} (x) \\ \notag & = \w (x) \frac{\Gamma (\n +1)} {\Gamma (\n + \alpha )} \{ L^{[\alpha +1]}_{\n -1} L^{[\alpha ]}_{\n -1}
- L^{[\alpha ]}_{\n } L^{[\alpha +1]}_{\n -2} \,
\} (x)
\\ \notag & =
\frac{\w (x)^{\frac{1}{2}}}{\mathsf{w}_{\alpha +1}(x)^{\frac{1}{2}}}
\frac{\Gamma (\n +1)} {\Gamma (\n + \alpha )} \{ \mathsf{w} _{\alpha +1}^{\frac{1}{2}}L^{[\alpha +1]}_{\n -1}
\w ^{\frac{1}{2}} L^{[\alpha ]}_{\n -1}
- \w ^{\frac{1}{2}}L^{[\alpha ]}_{\n }
\mathsf{w} _{\alpha +1}^{\frac{1}{2}} L^{[\alpha +1]}_{\n -2} \,
\} (x) \\ \notag & =
\frac{1}{\sqrt{x}}
\frac{\Gamma (\n +1)} {\Gamma (\n + \alpha )}
\Ma (x) .\end{align} Here we used the formula $ \frac{d L^{[\alpha ]}_{\n } }{dx}= - L^{[\alpha +1]}_{\n -1} $ (see \cite[102 p]{szego}) for the second line, and \eqref{:71z} for the last line.
Taking $ x = \frac{y}{4\n }$ in \eqref{:71c}, we obtain that \begin{align}\label{:71d}&
\frac{1}{4\n }\kan (\frac{y}{4\n },\frac{y}{4\n }) = \frac{1}{\sqrt{4\n y}}
\frac{\Gamma (\n +1)} {\Gamma (\n + \alpha )} \Ma (\frac{y}{4\n }) .\end{align} Clearly, there exists a positive constant $ \cref{;72} $ such that \begin{align}\label{:71e}& \frac{1}{\sqrt{4\n }} \frac{\Gamma (\n +1)} {\Gamma (\n + \alpha )} \le \cref{;72} \n ^{-\alpha + \frac{1}{2}} \quad \text{ for all } \n \in \mathbb{N} .\end{align} From \eqref{:71d} and \eqref{:71e} we obtain \eqref{:71a}. \end{proof}
From \lref{l:71}, our next task is to prove \begin{align}\label{:72z}&
\Ma (\frac{y}{4\n }) = O (\n^{\alpha -\frac{1}{2}}) .\end{align} Here the bound $ O (\n^{\alpha -\frac{1}{2}})$ is taken to be uniform in $ \cref{;Hil} \n ^{-1} \le x \le \omega $. For this we quote an asymptotic formula of Hilb's type from \cite{szego}.
\begin{lem}[{\cite[Theorem 8.22.4, 199 p.]{szego}}] \label{l:72} Let $ -1 < \alpha < \infty $. Let $ \Ct \label{;Hil},\, \omega > 0 $ be fixed. Then each Laguerre polynomial $ L^{[\alpha ]}_{\n } $ satisfies, for all $ \cref{;Hil} \n ^{-1} \le x \le \omega $,
\begin{align}\label{:72a}&
\w (x)^{\frac{1}{2}}
L^{[\alpha ]}_{\n } (x) = \Ana
J_{\alpha } ( \sqrt{4Nx} ) + x^{\frac{5}{4}} O (\n ^{\alpha /2 -\frac{3}{4}}) .\end{align}
Here $ N = \n + (\alpha + 1)/2$ and the bound $ O (\n ^{\alpha /2 -\frac{3}{4}})$ holds uniformly in $ \cref{;Hil} \n ^{-1} \le x \le \omega $. Furthermore, $ \Ana $ is defined by \begin{align}\label{:72b}&
\Ana =
\frac{\Gamma (\n + 1+ \alpha )}{ N ^{\alpha / 2} \Gamma (\n + 1 )} .\end{align} \end{lem} \begin{proof} This lemma follows from \thetag{8.22.4} and \thetag{8.22.5} in Szeg\"{o} \cite[Theorem 8.22.4, 199 p.]{szego} immediately. We remark that we use the first equation of \thetag{8.22.5} in Szeg\"{o} \cite[Theorem 8.22.4, 199 p.]{szego} as well as \thetag{8.22.4} in \cite{szego}. \end{proof}
From \lref{l:72} we see the following.
\begin{lem} \label{l:73} For all $ \n \in \mathbb{N }$ and $ \cref{;Hil} \n ^{-1} \le x \le \omega $
\begin{align}\label{:73a}&
\Ma (\frac{y}{4\n }) = O(\n^{\alpha + \frac{1}{2}})
\\ \notag & \quad \cdot
\left[
\left\{ J_{\alpha }((\frac{ N -1}{\n }y)^{\frac{1}{2}}) + (\frac{y}{4\n })^{\frac{5}{4}} O(\frac{1}{\n ^{\frac{3}{4}}}) \right\}
\left\{ J_{\alpha +1}((\frac{ N -1}{\n }y)^{\frac{1}{2}}) + (\frac{y}{4\n })^{\frac{5}{4}} O(\frac{1}{\n ^{\frac{3}{4}}}) \right\}
\right.
\\ \notag & \quad \quad \left. -
\left\{ J_{\alpha +1}((\frac{ N -2}{\n }y)^{\frac{1}{2}}) + (\frac{y}{4\n })^{\frac{5}{4}} O(\frac{1}{\n ^{\frac{3}{4}}}) \right\} \left \{ J_{\alpha }((\frac{ N }{\n }y)^{\frac{1}{2}}) + (\frac{y}{4\n })^{\frac{5}{4}} O(\frac{1}{\n ^{\frac{3}{4}}}) \right\}
\right] .\end{align} \end{lem} \begin{proof}
From \eqref{:72a} we easily deduce that, for all $ \cref{;Hil}\n ^{-1} \le x \le \omega $, \begin{align}\label{:91Q}
\w (x)^{\frac{1}{2}} L^{[\alpha ]}_{\n } (x)
& = \Ana \left\{ J_{\alpha } ( \sqrt{4Nx} ) + x^{\frac{5}{4}} O(\frac{1}{\n ^{\frac{3}{4}}}) \right\} .\end{align}
Then taking $ x = y /4\n $ in \eqref{:91Q} we deduce that
\begin{align} \label{:91q} \w (\frac{y}{4\n })^{\frac{1}{2}}
& L^{[\alpha ]}_{\n } (\frac{y}{4\n })
=
\Ana
\left\{ J_{\alpha }((\frac{ N }{\n }y)^{\frac{1}{2}}) + (\frac{y}{4\n })^{\frac{5}{4}} O(\frac{1}{\n ^{\frac{3}{4}}}) \right\} .\end{align}
A simple calculation shows that \begin{align}\label{:91R}& \Ana = O(\n^{\frac{1}{2}\alpha }) \end{align} Hence we deduce \eqref{:73a} from \eqref{:71z}, \eqref{:91q} and \eqref{:91R} immediately. \end{proof}
\noindent {\em Proof of \lref{l:50}. } We will calculate the right-hand side of \eqref{:73a}.
Recall that $ N = \n + \frac{1}{2} (\alpha + 1) $. Then $ \frac{N}{\n }- \frac{N-1}{\n }= \frac{1}{\n } $. Hence from Taylor expansion of $ J_{\alpha }(\sqrt{y}) $
and $ J_{\alpha +1}(\sqrt{y}) $,
we deduce that
\begin{align}\label{:991}&
J_{\alpha }((\frac{ N -1}{\n }y)^{\frac{1}{2}})
J_{\alpha +1}((\frac{ N -1}{\n }y)^{\frac{1}{2}}) - J_{\alpha }((\frac{ N }{\n }y)^{\frac{1}{2}} ) J_{\alpha +1}((\frac{ N -2}{\n }y)^{\frac{1}{2}} ) \\ \notag & =
\frac{1}{\n } \left[
J_{\alpha }((\frac{ N -1}{\n }y)^{\frac{1}{2}} ) \, \n \left\{
J_{\alpha +1}((\frac{ N -1}{\n }y)^{\frac{1}{2}} ) -
J_{\alpha +1}((\frac{ N -2}{\n }y)^{\frac{1}{2}}) \right\}
\right.
\\ &\left. \quad \quad \quad - \n \left\{ J_{\alpha }((\frac{ N }{\n }y)^{\frac{1}{2}} ) -
J_{\alpha }((\frac{ N -1}{\n }y)^{\frac{1}{2}}) \right\}
J_{\alpha +1}((\frac{ N -2}{\n }y)^{\frac{1}{2}})
\right]
\\ \notag & = O(\frac{1}{\n }) \left[ \{ \frac{d}{dy }
J_{\alpha }(\sqrt{y}) \} \cdot J_{\alpha +1}(\sqrt{y})
- J_{\alpha }(\sqrt{y}) \{ \frac{d}{dy } J_{\alpha +1}(\sqrt{y}) \} \right] \\ \notag & = O (\frac{1}{\n } ) .\end{align} We also see that \begin{align}\label{:992}& J_{\alpha }((\frac{ N }{\n }y)^{\frac{1}{2}})
(\frac{y}{4\n })^{\frac{5}{4}} O(\frac{1}{\n ^{\frac{3}{4}}})
= O (1) \frac{1}{y^{\frac{1}{4}}}
(\frac{y}{4\n })^{\frac{5}{4}}
O(\frac{1}{\n ^{\frac{3}{4}}})
= O (\frac{1}{\n } ) .\end{align}
Here we used $ |J_{\alpha } (t)| \le O(1) t^{-1/2}$ and $ \frac{y}{4\n } = O(1)$.
Substituting \eqref{:991} and \eqref{:992} together with similar relations into \eqref{:73a}, we deduce that \begin{align}\label{:993}
\Ma (\frac{y}{4\n })
= & O (\n ^{\alpha + \frac{1}{2}}) O (\frac{1}{\n})
= O (\n ^{\alpha - \frac{1}{2}}) .\end{align} This together with \lref{l:71} completes the proof of \lref{l:50}. \bbbbb
\section{Appendix 2: No particles hit the origin. } \label{s:8} In this setion we prove that no partciles hit the origin if $ 1 \le \alpha $. For this it is enough to prove that the capacity of the set being at least one particle at the origin is zero.
\begin{lem} \label{l:81} Let $ \mathsf{A} = \{ \mathsf{s}\in\mathsf{S};\, \mathsf{s}(\{ 0 \} ) \ge 1 \} $. Suppose $ \alpha \ge 1 $. Then \begin{align}\label{:81a}& \mathrm{Cap}^{\mu }(\mathsf{A}) = 0 .\end{align} Here $ \mathrm{Cap}^{\mu }$ is the capacity associated with the Dirichlet space $(\Eazero ,\d ^{\mu } ,\Lm )$ as before. \end{lem} \begin{proof} Set $ \mathcal{D}_{\circ ,r} = \{ f\in \di ; \text{$ f $ is $ \sigma [\pi _r]$-measurable} \} $, where $ \pi _r (\mathsf{s})= \mathsf{s} (\cdot \cap \SS _{b_r})$ and $ b_r $ are
as in \dref{dfn:2}. Let $ \d _r^{\mu } $ be the closure of $\mathcal{D}_{\circ ,r} $ with respect to $ (\Eazero ,\d ^{\mu } ) $ on $ \Lm $. It is then clear that
\begin{align}\label{:82a}& \d _r^{\mu } \subset \d ^{\mu } .\end{align} We can regard $ (\Eazero ,\d _r^{\mu } ) $ on $ \Lm $ as a quasi-regular Dirichlet form on $ L^2(\mathsf{\SS} _r, \mu _r)$, where $ \mathsf{\SS} _r$ is the configuration space over $ \SS _{b_r }$, and $ \mu _r = \mu \circ \pi _r^{-1}$ is regarded as a random point field on $ \SS _{b_r }$.
Let $ \mathrm{Cap}_r^{\mu }$ be the capacity associated with the Dirichlet form $ (\Eazero ,\d _r^{\mu } ) $ on $ L^2(\mathsf{\SS} _r, \mu _r)$. We then obtain from \eqref{:82a} that for any $ r \in \N $ \begin{align}\label{:81b}& \mathrm{Cap}^{\mu } (\mathsf{A})\le \mathrm{Cap} _r^{\mu }(\mathsf{A}) .\end{align} Fix $ r \in \N $ and set $ \mathsf{A}_r^m = \mathsf{A}\cap \{ \mathsf{s}(\Sr )=m \} $. We then see that $ \mathsf{A} = \sum_{m=1}^{\infty}\mathsf{A}_r^m $. Hence we deduce from \eqref{:81b} and the sub-additivity of capacity that \begin{align}\label{:81c}& \mathrm{Cap} _r^{\mu }(\mathsf{A} ) \le \sum_{m=1}^{\infty} \mathsf{Cap} _r^{\mu } (\mathsf{A}_r^m ) .\end{align} Taking \eqref{:81b} and \eqref{:81c} into account, we deduce \eqref{:81a} from \begin{align}\label{:81d}& \mathrm{Cap} _r^{\mu }(\mathsf{A}_r^m ) = 0 \quad \text{ for all } m \in \N .\end{align}
Let $ \mr ^m $ be the symmetric labeled density of $ \mu \circ \pi _r^{-1}$ on $ \Sr ^m $. Then
\begin{align}\label{:81e}& \mr ^m (x_1,\ldots,x_m) \le \rbm (x_1,\ldots,x_m) \quad \text{ on } \Sr ^m .\end{align}
Since $ \mu $ is a determinantal random point field with kernel \eqref{:10b}, we see that \begin{align}\label{:81f}&
|\Ka (x_i,x_j)| \le \{\Ka (x_i,x_i) \Ka (x_j,x_j)\}^{1/2}= \{\rbone (x_i) \rbone (x_j)\}^{1/2} .\end{align} We then obtain from \eqref{:10a} and \eqref{:81f} that \begin{align}\label{:81g}& \rbm (x_1,\ldots,x_m) \le m! \prod_{i=1}^m \rbone (x_i) .\end{align} From \eqref{:81e} and \eqref{:81g} combined with the expression \eqref{:10bb}, there exists a positive constant $ \Ct \label{;81g}$ depending on $ r,m \in \N $ such that \begin{align}\label{:81h}& \mr ^m (x_1,\ldots,x_m) \le \cref{;81g} \prod_{i=1}^m x_i^{\alpha } \quad \text{ for all } (x_1,\ldots,x_m)\in \Sr ^m .\end{align} From \eqref{:81h} and a direct calculation using $ 1 \le \alpha $, we obtain \eqref{:81d}. This completes the proof of \eqref{:81a}. \end{proof}
\small{
}
\end{document} |
\begin{document}
\begin{abstract}
In this paper, we examine the locality condition for non-splitting and determine the level of uniqueness of limit models that can be recovered in some stable, but not superstable, abstract elementary classes. In particular we prove:
\begin{theorem1}\label{uniqueness theorem} Suppose that $\mathcal{K}$ is an abstract elementary class satisfying \begin{enumerate} \item the joint embedding and amalgamation properties with no maximal model of cardinality $\mu$. \item stability in $\mu$. \item $\kappa^*_\mu(\mathcal{K})<\mu^+$. \item continuity for non-$\mu$-splitting (i.e. if $p\in\operatorname{ga-S}(M)$ and $M$ is a limit model witnessed by $\langle M_i\mid i<\alpha\rangle$ for some limit ordinal $\alpha<\mu^+$ and there exists $N \prec M_0$ so that $p\restriction M_i$ does not $\mu$-split over $N$ for all $i<\alpha$, then $p$ does not $\mu$-split over $N$). \end{enumerate}
For $\theta$ and $\delta$ limit ordinals $<\mu^+$ both with cofinality $\geq\kappa^*_\mu(\mathcal{K})$, if $\mathcal{K}$ satisfies symmetry for non-$\mu$-splitting (or just $(\mu,\delta)$-symmetry), then, for any $M_1$ and $M_2$ that are $(\mu,\theta)$ and $(\mu,\delta)$-limit models over $M_0$, respectively, we have that $M_1$ and $M_2$ are isomorphic over $M_0$.
\end{theorem1}
Note that no tameness is assumed.
\end{abstract}
\maketitle
\section{Introduction} Because the main test question for developing a classification theory for abstract elementary classes (AECs) is Shelah's Categoricity Conjecture \cite[Problem D.1]{Ba},
the development of independence notions for AECs has often started with an assumption of categoricity (\cite{sh576, vas3, V1} and others). Consequently, the independence relations that result are superstable or stronger (see, for instance, good $\lambda$-frames and the superstable prototype \cite[Example II.3.(A)]{shelahaecbook}). However, little progress has been made to understand stable, but not superstable AECs.
A notable exception is the work on $\kappa$-coheir of Boney and Grossberg \cite{bgcoheir}, which only requires stability in the guise of `no weak $\kappa$-order property.' In this paper, we add to the understanding of strictly stable AECs with a different approach and under different assumptions than \cite{bgcoheir}. In particular, our analysis uses towers and the standard definition of Galois-stability. Moreover, we work without assuming any of the strong locality assumptions (tameness, type shortness, etc.) of \cite{bgcoheir}. We hope that this work will lead to further exploration in this context.
The main tool in our analysis is a tower, which was first conceived to study superstable AECs (see, for instance \cite{ShVi} or \cite{Va1}). The `right analogue' of superstability in AECs has been the subject of much research. Shelah has commented that this notion suffers from `schizophrenia,' where several equivalent concepts in first-order seem to bifurcate into distinct notions in nonelementary settings; see the recent Grossberg and Vasey \cite{grva} for a discussion of the different possibilities (and a suprising proof that they are equivalent under tameness).
Common to much analysis of superstable AECs is the uniqueness of limit models. Uniqueness of limit models was first proved to follow from a categoricity assumption in \cite{sh394, Sh:600, ShVi, Va1, Va1e}. Later, $\mu$-superstability, which was isolated by Grossberg, VanDieren, and Villaveces \cite[Assumption 2.8(4)]{gvv}, was shown to imply uniqueness of limit models under the additional assumption of $\mu$-symmetry \cite{Va2}. $\mu$-superstability was modeled on the local character characterization of superstability in first-order and was already known to follow from categoricity \cite{ShVi}. The connection between $\mu$-symmetry and structural properties of towers \cite{Va2} inspired recent research on $\mu$-superstable classes: \cite{Va4, VaVa}. Moreover, years of work culminating in the series of papers \cite{ShVi, Va1, Va1e, Va2, Va4, VaVa} has led to the extraction of a general scheme for proving the uniqueness of limit models (note that amalgamation is generally assumed in these papers, but this is not true of \cite{ShVi, Va1, Va1e}). In this paper we witness the power of this new scheme by adapting the technology developed in \cite{Va2} to cover $\mu$-stable, but not $\mu$-superstable classes. We suspect that this new technology of towers will likely be used to answer other problems in classification theory (in both first order and non-elementary settings).
This paper focuses on the question to what degree the uniqueness of limit models can be recovered if we assume the class is Galois-stable in $\mu$, but not $\mu$-superstable, by refocusing the question from ``\emph{Are all} $(\mu, \alpha)$-limit models isomorphic (over the base)?'' to ``\emph{For which $\alpha, \beta < \mu^+$ are } $(\mu, \alpha)$-limit models and $(\mu, \beta)$-limit models isomorphic (over the base)?'' Based on first-order results (summarized in \cite[Section 2]{gvv}), we have the following conjecture.
\begin{conjecture}\label{stab-conj} Suppose $\mathcal{K}$ is an AEC with $\mu$-amalgamation and is $\mu$-stable. The set $$\{ \alpha < \mu^+ : \cf(\alpha)=\alpha\text{ and } (\mu, \alpha)\text{-limit models are isomorphic to }(\mu, \mu)\text{-limit models} \}$$ is a non-trivial interval of regular cardinals. Moreover, the minimum of this set, denoted by $\kappa^*_\mu(\mathcal{K})$, is an important measure of the complexity of $\mathcal{K}$. \end{conjecture}
Our main result (restated from the abstract)\footnotei{WB: The theorem1 environment is now hardcoded to be `Theorem 1.2.' If the label here changes, we should change that environment} proves this conjecture under certain assumptions.
\begin{theorem}\label{uniqueness theorem} Suppose that $\mathcal{K}$ is an abstract elementary class satisfying \begin{enumerate} \item the joint embedding and amalgamation properties with no maximal model of cardinality $\mu$. \item stabilty in $\mu$. \item $\kappa^*_\mu(\mathcal{K})<\mu^+$. \item continuity for non-$\mu$-splitting (i.e. if $p\in\operatorname{ga-S}(M)$ and $M$ is a limit model witnessed by $\langle M_i\mid i<\alpha\rangle$ for some limit ordinal $\alpha<\mu^+$ and there exists $N$ so that $p\restriction M_i$ does not $\mu$-split over $N$ for all $i<\alpha$, then $p$ does not $\mu$-split over $N$). \end{enumerate}
For $\theta$ and $\delta$ limit ordinals $<\mu^+$ both with cofinality $\geq\kappa^*_\mu(\mathcal{K})$, if $\mathcal{K}$ satisfies symmetry for non-$\mu$-splitting (or just $(\mu,\delta)$-symmetry), then, for any $M_1$ and $M_2$ that are $(\mu,\theta)$ and $(\mu,\delta)$-limit models over $M_0$, respectively, we have that $M_1$ and $M_2$ are isomorphic over $M_0$.
\end{theorem}
Assumption \ref{assm} collects these assumptions together, and we discuss them following that statement. In this statement, the ``measure of complexity'' from Conjecture \ref{stab-conj} is $\kappa^*_\mu(\mathcal{K})$, a generalization of the first-order $\kappa(T)$ (see Definition \ref{kappastar-def}). An important feature of this work is that it explores the underdeveloped field of strictly stable AECs.
We end with a short comment contextualizing this paper within the body of work on limit models. The general arguments for investigating the uniqueness of limit models have appeared before (see \cite{Va1, gvv}). One use is that they give a version of saturated models without dealing with smaller models and give a sense of how difficult it is to create saturated models. Many works of AECs take a `local approach' of analyzing $\mathcal{K}_\lambda$ (the models of size $\lambda$) to derive structure on $\mathcal{K}_{\lambda^+}$ (see \cite[Chapter II]{shelahaecbook} or \cite{sh576} for the most prominent examples). Because not even the existence of models of size $<\lambda$ is assumed, Galois saturation (which quantifies over smaller models) cannot be used, and limit models have become the standard substitute. Moreover, we expect that limit models will take on a greater importance in the context of strictly stable AECs, especially those without assumption of tameness. Of the various analogues for AECs (see \cite[Theorem 1.2]{grva}), most have seen extensive analysis, but only in the context of tameness. One of the remaining notions (solvability; see \cite[Chapter IV]{shelahaecbook}) seems to have no weakening to the strictly stable context. What remains are $\mu$-superstability and the uniqueness of limit models. Thus, it is reasonable to assume that understanding strictly stable AECs will require understanding the connection between `$\mu$-stability' (Assumption \ref{assm} here) and limit models. Theorem \ref{uniqueness theorem} is a step towards this understanding.
After circulating this paper but before publication, Vasey and Mazari-Armida used our results to make further progress in the field. Vasey used Theorem \ref{uniqueness theorem} in his work to characterize stable AECs \cite{v-stab-AEC}, especially in terms of unions of sufficiently saturated models being saturated \cite[Theorem 11.11]{v-stab-AEC}. Additionally, Vasey \cite[Theorem 3.7]{v-stab-AEC} gives some natural conditions for Assumption \ref{assm}.(\ref{wc-split}) below, which he calls the weak continuity of splitting. On the other hand, Mazari-Armida identified naturally occuring strictly stable AECs. By analyzing limit models of different cofinalities, he demonstrated that the class of torsion-free abelian groups and the class of finitely Butler groups, both with the pure subgroup relation, are strictly stable AECs \cite{marcos}.
Section \ref{background-sec} reviews key definitions and facts with Assumption \ref{assm} being the key hypotheses throughout the paper. Section \ref{relfulltow-sec} discusses the notion of relatively full towers. Section \ref{redtow-sec} discusses reduced towers and proves the key lemma, Theorem \ref{reduced are continuous}. Section \ref{ulm-sec} concludes with a proof of the main theorem, Theorem \ref{uniqueness theorem}.
We would like to thanks Rami Grossberg and Sebastien Vasey for comments on earlier drafts of this paper that led to a vast improvement in presentation.
\section{Background} \label{background-sec} We refer the reader to \cite{Ba}, \cite{GV2}, \cite{gvv}, \cite{Va1}, and \cite{Va2} for definitions and notations of concepts such as Galois-stability, $\mu$-splitting, etc. We reproduce a few of the more specialized definitions and results here.
Grossberg, VanDieren, and Villaveces \cite[Assumption 2.8]{gvv} isolated a notion they call `$\mu$-superstability'\footnote{We do not use this here, but the definition of $\mu$-superstability strengthens Assumption \ref{assm} by requiring that $\kappa^*_\mu(\mathcal{K})$ be $\omega$.} by examining consequences of categoricity from \cite{sh394} and \cite{ShVi}. The key feature in this assumption is that there are no infinite splitting chains (as forbidden in \cite[Theorem 2.2.1]{ShVi}). We weaken $\mu$-superstability by only forbidding long enough splitting chains. How long is `long enough' is measured by $\kappa^*_\mu(\mathcal{K})$, which is a relative of \cite[Definition 4.3]{GV2} and universal local character \cite[Definition 3.5]{bgcoheir}. Following \cite{bgcoheir}, we add the `*' to this symbol to denote that the chain is required to have the property that $M_{i+1}$ is universal over $M_i$.
\begin{definition} \label{kappastar-def} We define $\kappa^*_\mu(\mathcal{K})$ to be the minimal, regular $\kappa<\mu^+$ so that for every increasing and continuous sequence $\langle M_i\in\mathcal{K}_\mu\mid i\leq\alpha \rangle$ with $\alpha\geq \kappa$ regular which satisfies
for every $i<\alpha$, $M_{i+1}$ is universal over $M_i$, and for every non-algebraic $p\in\operatorname{ga-S}(M_\alpha)$, there exists $i<\alpha$ such that $p$ does not $\mu$-split over $M_i$. If no such $\kappa$ exists, we say $\kappa^*_\mu(\mathcal{K})=\infty$.
We call $\kappa^*_\mu(\mathcal{K})$ the `universal local character for $\mu$-nonsplitting for $\mathcal{K}$,' or simply the `universal local character' for short when $\mu$ and $\mathcal{K}$ are fixed. \end{definition}
In \cite[Theorem 4.13]{GV2}, Grossberg and VanDieren show that if $\mathcal{K}$ is a tame stable abstract elementary class satisfying the joint embedding and amalgamation properties with no maximal models, then there exists a single bound for $\kappa^*_\mu(\mathcal{K})$ for all sufficiently large $\mu$ in which $\mathcal{K}$ is $\mu$-stable. This proof works by considering the $\chi$-order property of Shelah. We can also give a direct bound assuming tameness.
\begin{proposition} Let $\mathcal{K}$ be an AEC with amalgamation that is $\lambda$-stable and $(\lambda, \mu)$-tame. Then $\kappa^*_\mu(\mathcal{K}) \leq \lambda$. \end{proposition}
Note that the proof does not require the extensions to be universal.
\begin{proof} Let $\langle M_i \in K_\mu : i \leq \alpha \rangle$ be an increasing, continuous chain with $\cf(\alpha) \geq \lambda$ and $p \in \operatorname{ga-S}(M_\alpha)$. By \cite[Claim 3.3.(1)]{sh394} and $\lambda$-stability, there is $N_0 \prec M_\alpha$ of size $\lambda$ such that $p$ does not $\lambda$-split over $N_0$. By tameness, $p$ does not $\mu$-split over $N_0$. By the cofinality assumption, there is $i_* < \alpha$ such that $N_0 \prec M_{i_*}$. By monotonicity, $p$ does not $\mu$-split over $M_{i_*}$. \end{proof}
This definition motivates our main assumption. We use this collection only to group these items together and will explicitly list Assumption \ref{assm} when it is part of a result's hypothesis.
\begin{assumption}\label{assm} \mbox{} \begin{enumerate} \item $\mathcal{K}$ satisfies the joint embedding and amalgamation properties with no maximal model of cardinality $\mu$. \item $\mathcal{K}$ is stable in $\mu$. \item $\kappa^*_\mu(\mathcal{K})<\mu^+$. \item \label{wc-split}$\mathcal{K}$ satisfies (limit) continuity for non-$\mu$-splitting (i.e. if $p\in\operatorname{ga-S}(M)$ and $M$ is a limit model witnessed by $\langle M_i\mid i<\theta\rangle$ for some limit ordinal $\theta<\mu^+$ and there exists $N$ so that $p\restriction M_i$ does not $\mu$-split over $N$ for all $i<\theta$, then $p$ does not $\mu$-split over $N$). \end{enumerate}
\end{assumption}
A few comments on the assumption is in order. Note that tameness is not assumed in this paper. Amalgamation is commonly assumed in the study of limit models, although \cite{ShVi,Va1, Va1e} replace it with more nuanced results about amalgamation bases. Stability in $\mu$ is necessary for the conclusion of Theorem \ref{uniqueness theorem} to make sense; otherwise, there are no limit models! We have argued (both in principle and in practice) that varying the local character cardinal is the right generalization of superstability to stability in this context. However, we have kept the ``continuity cardinal'' to be $\omega$; this is the content of Assumption \ref{assm}.(\ref{wc-split}). This seems necessary for the arguments\footnote{The first author claimed in the discussion following \cite[Lemma 9.1]{extendingframes} that only long continuity was necessary. However, after discussion with Sebastien Vasey, this seems to be an error.}. It seems reasonable to hope that some failure of continuity for non-splitting will lead to a nonstructure result, but this has not yet been achieved.
The assumptions are (trivially) satisfied in any superstable AEC and, therefore, any categorical AEC. However, in this context, the result is already known. For a new example, we look to the context of strictly stable homogeneous structures as developed in Hyttinen \cite[Section 1]{hyttinen}. In the homogeneous contexts, Galois types are determined by syntactic types. Armed with this, Hyttinen studies the normal syntactic notion of nonsplitting under a stable, unsuperstable hypothesis \cite[Assumption 1.1]{hyttinen}, and shows that syntactic splitting satisfies continuity and (more than) the universal local character of syntactic nonsplitting is $\aleph_1$.\footnote{It shows that it is at most $\aleph_1$. However, if it were $\aleph_0$, the class would be superstable, contradicting the assumption.} It is easy to see that the syntactic version of nonsplitting implies our nonsplitting, which already implies $\kappa^*_\mu(\mathcal{K}) = \aleph_1$. The following argument shows that, if $N$ is limit over $M$, the converse holds as well, which is enough to get the limit continuity for our semantic definition of splitting. Since the context of homogeneous model theory is very tame, we don't worry about attaching a cardinal to non-splitting because they are all equivalent.
Suppose that $N$ is a limit model over $M$, witnessed by $\langle M_i \mid i < \alpha \rangle$, and $p \in \operatorname{ga-S}(M)$ syntactically splits over $M$. Then, since Galois types are syntactic, there are $b, c \in N$ such that $\tp(b/M) = \tp(c/M)$ and, for an appropriate $\phi$, $\phi(x, b, m) \wedge \neg \phi(x, c, m) \in p$. We can find $\beta, \beta' < \alpha$ such that $b \in N_\beta$ and $c \in N_{\beta'}$. Since $b$ and $c$ have the same type, we can find an amalgam $N_* \succ N_\beta$ and $f:N_\alpha \to_{M} N_*$ such that $f(b) = c$. Since $N$ is universal over $N_{\beta'}$, we can find $h:N_* \to_{N_\beta'} N$. This gives us an isomorphism $h \circ f:N_\beta \cong h(f(N_\beta))$ and we claim that this witnesses the semantic version splitting: $c \in N_{\beta'}$, so $c = h(c) = h(f(b)) \in h(f(N_\beta))$ and, thus, $\neg \phi(x, c, m) \in p \upharpoonright h(f(N_\beta))$. On the other hand, $\phi(x, c, m) = h\circ f(\phi(x, b, m)) \in h \circ f( p \upharpoonright N_\beta)$. Thus, we have witnessed $h \circ f(p \upharpoonright N_\beta) \neq p \upharpoonright h(f(N_\beta))$.
Note if $\kappa^*_\mu(\mathcal{K}) = \mu$, then the conclusion of Theorem \ref{uniqueness theorem} is uninteresting, but the results still hold: any two limit models whose lengths have the same cofinality are isomorphic on general grounds. Also, we assume joint embedding, etc. only in $\mathcal{K}_\mu$. However, to simplify presentation, we work as though these properties held in all of $\mathcal{K}$ and, thus, we work inside a monster model. This will allow us to write $\tp(a/M)$ rather than $\tp(a/M;N)$ and witness Galois type equality with automorphisms. The standard technique of working inside of a $(\mu,\mu^+)$-limit model can translate our proofs to ones not using a monster model.
Under these assumptions, it is possible to construct towers. This is the key technical tool in this construction. Towers were introduced in Shelah and Villaveces \cite{ShVi} and expanded upon in \cite{Va1} and subsequent works.
Recall that, if $I$ is well-ordered, then it has a successor function which we will denote $+1$ (or $+_I 1$ if necessary). Also, we typically restrict our attention to well-ordered $I$.
\begin{definition}[{\cite[Definition I.5.1]{Va1}}]\
\begin{enumerate}
\item A \emph{tower indexed by $I$ in $\mathcal{K}_\mu$} is a triple $\mathcal{T} = \langle \bar M, \bar a, \bar N \rangle$ where
\begin{itemize}
\item $\bar M=\langle M_i\in\mathcal{K}_\mu\mid i\in I\rangle$ is an increasing sequence of limit models;
\item $\bar a=\langle a_{i}\in M_{i+1}\backslash M_i\mid i+1\in I\rangle$ is a sequence of elements;
\item $\bar N=\langle N_{i} \in K_\mu \mid i+1\in I\rangle$ such that $N_i \prec M_i$ with $M_i$ universal over $N_i$; and
\item $\tp(a_i/M_i)$ does not $\mu$-split over $N_i$. \end{itemize} \item A tower $\mathcal{T} = \langle \bar M, \bar a, \bar N\rangle$ is \emph{continuous} iff $\bar M$ is, i. e., $M_i = \cup_{j<i} M_j$ for all limit $i \in I$. \item $\mathcal{K}^*_{\mu, I}$ is the collection of all towers indexed by $I$ in $\mathcal{K}_\mu$.
\end{enumerate} \end{definition}
Note that continuity is not required of all towers.
We will switch back and forth between the notation $\mathcal{K}^*_{\mu,\alpha}$ where $\alpha$ is an ordinal and $\mathcal{K}^*_{\mu,I}$ where $I$ is a well ordered set (of order type $\alpha$) when it will make the notation clearer. When we deal with relatively full towers, we will find the notation using $I$ to be more convenient for book-keeping purposes.
For $\beta<\alpha$ and $\mathcal{T}=(\bar M,\bar a,\bar N)\in\mathcal{K}^*_{\mu,\alpha}$ we write $\mathcal{T}\restriction \beta$ for the tower made up of the sequences $\bar M\restriction \beta:=\langle M_i\mid i<\beta\rangle$, $\bar a\restriction\beta:=\langle a_i\mid i+1<\beta\rangle$, and $\bar N\restriction \beta:=\langle N_i\mid i+1<\beta\rangle$.
We will construct increasing chains of towers. Here we define what it means for one tower to extend another:
\begin{definition} For $I$ a sub-ordering of $I'$ and
towers $(\bar M,\bar a,\bar N)\in\mathcal{K}^*_{\mu,I}$ and $(\bar M',\bar a',\bar N')\in\mathcal{K}^*_{\mu,I'}$, we say $$(\bar M,\bar a,\bar N)\leq (\bar M',\bar a',\bar N')$$ if $\bar a=\bar a'\restriction I$, $\bar N=\bar N'\restriction I$, and for $i\in I$, $M_i\preceq_{\mathcal{K}}M'_i$ and whenever $M'_i$ is a proper extension of $M_i$, then $M'_i$ is universal over $M_i$. If for each $i\in I$, $M'_i $ is universal over $M_i$ we will write $(\bar M,\bar a,\bar N)< (\bar M',\bar a',\bar N')$. \end{definition}
For $\gamma$ a limit ordinal $<\mu^+$ and $\langle I_j\mid j<\gamma\rangle$ a sequence of well ordered sets with $I_j$ a sub-ordering of $I_{j+1}$, if $\langle(\bar M^j,\bar a,\bar N)\in\mathcal{K}^*_{\mu,I_j}\mid j<\gamma\rangle$ is a $<$-increasing sequence of towers, then the union $\mathcal{T}$ of these towers is determined by the following: \begin{itemize} \item for each $\beta\in \bigcup_{j<\gamma}I_j$,
$M_\beta:=\bigcup_{\beta\in I_j;\; j<\gamma}M^j_\beta$
\item the sequence $\langle a_\beta\mid \exists (j<\gamma)\; \beta+1,\beta\in I_j\rangle$, and
\item the sequence $\langle N_\beta\mid \exists (j<\gamma)\; \beta+1,\beta\in I_j\rangle$
\end{itemize} is a tower in $\mathcal{K}^*_{\mu,\bigcup_{j<\gamma}I_j}$, provided that $\mathcal{K}$ satisfies the continuity property for non-$\mu$-splitting and that $\bigcup_{j<\gamma} I_j$ is well ordered. Note that it is our desire to take increasing unions of towers that leads to the necessity of the continuity property.
We also need to recall a few facts about directed systems of partial extensions of towers
that are implicit in \cite{Va1}. These are helpful tools in the inductive construction of towers and are used in other work (see, e.g., \cite[Facts 2 and 3]{Va2}): Fact \ref{successor stage prop} will get us through the successor step of inductive constructions of directed systems, and Fact \ref{limit stage prop} describes how to pass through the limit stages. An explicit proof of Fact \ref{limit stage prop} appears as \cite[Fact 3]{Va2}, and we provide a proof of Fact \ref{successor stage prop} below. Two important notes:
\begin{itemize}
\item These facts do not require that the towers be continuous.
\item The work in \cite{Va1} does not assume amalgamation, so more care had to be taken in working with large limit models (in place of the monster model) and towers made of amalgamation bases. The amalgamation assumption in this (and other) papers significantly simplifies the situation.
\end{itemize}
\begin{fact}[\cite{Va1}]\label{successor stage prop} Suppose $\mathcal{T}$ is a tower in $\mathcal{K}^*_{\mu,\alpha}$ and $\mathcal{T}'$ is a tower of length $\beta<\alpha$ with $\mathcal{T}\restriction \beta<\mathcal{T}'$, if $f\in\Aut_{M_\beta}(\mathfrak{C})$ and $M''_\beta$ is a limit model universal over $M_{\beta}$ such that $\tp(a_\beta/M''_\beta)$ does not $\mu$-split over $N_\beta$ and $f(\bigcup_{i<\beta}M'_i)\prec_{\mathcal{K}}M''_\beta$, then the tower $\mathcal{T}''\in\mathcal{K}^*_{\mu,\beta+1}$ defined by $f(\mathcal{T}')$ concatenated with the model $M''_\beta$, element $a_\beta$ and submodel $N_\beta$ is an extension of $\mathcal{T}\restriction (\beta+1)$. \end{fact}
\begin{proof} This is a routine verification from the definitions. $\mathcal{T}''\upharpoonright \beta$ is isomorphic to the tower $\mathcal{T}'$ and we are given the required nonsplitting and that, for $i < \beta$, $f(M_i')\prec M_\beta''$, so we have that $\mathcal{T}'' \in \mathcal{K}^*_{\mu, \beta+1}$. Similarly, $f$ $\mathcal{T} \upharpoonright \beta$, so $\mathcal{T} \upharpoonright \beta < \mathcal{T}'$ implies $\mathcal{T} \upharpoonright \beta < \mathcal{T}'' \upharpoonright \beta$. To extend this to $\mathcal{T} \upharpoonright (\beta+1) < \mathcal{T}''\upharpoonright (\beta+1) = \mathcal{T}''$, we note that $M_\beta''$ is universal over $M_\beta$ by assumption. \end{proof}
\begin{fact}[\cite{Va1}]\label{limit stage prop} Fix $\mathcal{T}\in\mathcal{K}^*_{\mu,\alpha}$ for $\alpha$ a limit ordinal. Suppose $\langle \mathcal{T}^i\in\mathcal{K}^*_{\mu,i}\mid i<\alpha\rangle$ and $\langle f_{i,j}\mid i\leq j<\alpha\rangle$ form a directed system of towers. Suppose \begin{itemize} \item each $\mathcal{T}^i$ extends $\mathcal{T}\restriction i$ \item $f_{i,j}\restriction M_i=id_{M_i}$ \item $M^{i+1}_{i+1}$ is universal over $f_{i,i+1}(M^i_i)$. \end{itemize} Then there exists a direct limit $\mathcal{T}^\alpha$ and mappings $\langle f_{i,\alpha}\mid i<\alpha\rangle$ to this system so that $\mathcal{T}^\alpha\in\mathcal{K}^*_{\mu,\alpha}$, $\mathcal{T}^\alpha$ extends $\mathcal{T}$, and $f_{i,\alpha}\restriction M_i=id_{M_i}$. \end{fact}
Finally, to prove results about the uniqueness of limit models, we will additionally need to assume that non-$\mu$-splitting satisfies a symmetry property over limit models. We refine the definition of symmetry from \cite[Definition 3]{Va2} for non-$\mu$-splitting; this localization only requires symmetry to hold when $M_0$ is $(\mu, \delta)$-limit over $N$.
\begin{definition}\label{mu-delta symmetry} Fix $\mu\geq\LS(\mathcal{K})$ and $\delta$ a limit ordinal $<\mu^+$. We say that an abstract elementary class exhibits \emph{$(\mu,\delta)$-symmetry for non-$\mu$-splitting} if whenever models $M,M_0,N\in\mathcal{K}_\mu$ and elements $a$ and $b$ satisfy the conditions \ref{limit sym cond}-\ref{last} below, then there exists $M^b$ a limit model over $M_0$, containing $b$, so that $\tp(a/M^b)$ does not $\mu$-split over $N$. See Figure \ref{fig:sym}. \begin{enumerate} \item\label{limit sym cond} $M$ is universal over $M_0$ and $M_0$ is a $(\mu,\delta)$-limit model over $N$. \item\label{a cond} $a\in M\backslash M_0$. \item\label{a non-split} $\tp(a/M_0)$ is non-algebraic and does not $\mu$-split over $N$. \item\label{last} $\tp(b/M)$ is non-algebraic and does not $\mu$-split over $M_0$.
\end{enumerate}
\end{definition}
\begin{figure}
\caption{A diagram of the models and elements in the definition of $(\mu,\delta)$-symmetry. We assume the type $\tp(b/M)$ does not $\mu$-split over $M_0$ and $\tp(a/M_0)$ does not $\mu$-split over $N$. Symmetry implies the existence of $M^b$ a limit model over $M_0$ so that $\tp(a/M^b)$ does not $\mu$-split over $N$.}
\label{fig:sym}
\end{figure}
Note that $(\mu, \delta)$-symmetry is the same as $(\mu, \cf \delta)$-symmetry.
\section{Relatively Full Towers} \label{relfulltow-sec}
One approach to proving the uniqueness of limit models is to construct a continuous relatively full tower of length $\theta$, and then conclude that the union of the models in this tower is a $(\mu,\theta)$-limit model. In this section we confirm that this approach can be carried out in this context, even if we remove continuity along the relatively full tower.
\begin{definition}[{\cite[Definition 3.2.1]{ShVi}}]\label{strong type
defn}
For $M$ a $(\mu,\theta)$-limit model, \index{strong
types}\index{Galois-type!strong}\index{$\operatorname{\mathfrak{S}t}(M)$}\index{$(p,N)$}
let $$\operatorname{\mathfrak{S}t}(M):=\left\{\begin{array}{ll} (p,N) &
\left|\begin{array}{l} N\prec_{\mathcal{K}}M;\\ N\text{ is a }(\mu,\theta)\text{-limit model};\\ M\text{ is universal over }N;\\ p\in \operatorname{ga-S}(M)\text{ is non-algebraic}\\ \text{and }p\text{ does not }\mu\text{-split over }N. \end{array}\right\} \end{array}\right . $$ Elements of $\operatorname{\mathfrak{S}t}(M)$ are called {\em strong types.}
Two strong types $(p_1,N_1)\in\operatorname{\mathfrak{S}t}(M_1)$ and $(p_2,N_2)\in\operatorname{\mathfrak{S}t}(M_2)$ are \emph{parallel} iff for every $M'$ of cardinality $\mu$ extending $M_1$ and $M_2$ there exists $q\in\operatorname{ga-S}(M')$ such that $q$ extends both $p_1$ and $p_2$ and $q$ does not $\mu$-split over $N_1$ nor over $N_2$.
\end{definition}
\begin{definition}[Relatively Full Towers]\label{def:relativefulltowers}
Suppose that $I$ is a well-ordered set. Let $(\bar M,\bar a,\bar N)$ be a tower indexed by $I$ such that
each $M_i$ is a $(\mu,\sigma)$-limit model. For each $i$, let $\langle M^\gamma_{i}\mid \gamma<\sigma\rangle$ witness that $M_{i}$ is a $(\mu,\sigma)$-limit model.\\ The tower $(\bar M,\bar a,\bar N)$ is \emph{full relative to $(M^\gamma_{i})_{\gamma<\sigma,i\in I}$} iff \begin{enumerate} \item \label{niceorder-def} there exists a
cofinal sequence $\langle i_\alpha\mid\alpha<\theta\rangle$ of $I$
of order type $\theta$ such that
there are $\mu\cdot \omega$ many elements between $i_\alpha$ and
$i_{\alpha+1}$ and \item\label{strong type condition} for every $\gamma<\sigma$ and every $(p,M^\gamma_{i})\in\operatorname{\mathfrak{S}t}(M_{i})$ with $i_\alpha\leq i<i_{\alpha+1}$, there exists $j\in I$ with $i\leq j< i_{\alpha+1}$ such that $(\tp(a_j/M_j),N_j)$ and $(p,M^\gamma_{i})$ are parallel. \end{enumerate} \end{definition}
The following proposition will allow us to use relatively full towers to produce limit models. The fact that relatively full towers yield limit models was first proved in \cite{Va1} and in \cite{gvv} and later improved in \cite[Proposition 4.1.5]{Dr}. We notice here that the proof of \cite[Proposition 4.1.5]{Dr} does not require that the tower be continuous and does not require that $\kappa^*_\mu(\mathcal{K})=\omega$. We provide the proof for completeness.
\begin{proposition}[Relatively full towers provide limit
models]\label{relatively full is limit} Let $\theta$ be a limit ordinal
$<\mu^+$ satisfying $\theta=\mu\cdot\theta$. Suppose that $I$ is a
well-ordered set
as in Definition \ref{def:relativefulltowers}.(\ref{niceorder-def}).
Let $(\bar M,\bar a,\bar N)\in\mathcal{K}_{\mu,I}^*$ be a tower made up of $(\mu,\sigma)$-limit models, for some fixed $\sigma$ with $\kappa^*_\mu(\mathcal{K})\leq\cf(\sigma)<\mu^+$. If $(\bar M,\bar a,\bar N)\in\mathcal{K}^*_{\mu,I}$ is full relative to $(M^\gamma_i)_{i\in I,\gamma<\sigma}$, then $M:=\bigcup_{i\in I}M_i$ is a $(\mu,\theta)$-limit model over $M_{i_0}$. \end{proposition}
\begin{proof} Because the sequence $\langle i_\alpha\mid \alpha<\theta\rangle$ is cofinal in $I$ and $\theta=\mu\cdot\theta$, we can rewrite $M:=\bigcup_{i\in I}M_i=\bigcup_{\beta<\theta}M_{i_{\beta}}=\bigcup_{\alpha<\theta}\bigcup_{\delta<\mu}M_{i_{\mu\alpha+\delta}}$.
For $\alpha<\theta$ and $\delta<\mu$, notice \begin{equation}\label{special equation} M_{i_{\mu\alpha+\delta+1}}\text{ realizes every type over }M_{i_{\mu\alpha+\delta}}. \end{equation} To see this take $p\in\operatorname{ga-S}(M_{i_{\mu\alpha+\delta}})$. By our assumption that $\cf(\sigma)\geq\kappa^*_\mu(\mathcal{K})$, $p$ does not $\mu$-split over $M^\gamma_{i_{\mu\alpha+\delta}}$ for some $\gamma<\sigma$. Therefore $(p,M^\gamma_{i_{\mu\alpha+\delta}})\in\operatorname{\mathfrak{S}t}(M_{i_{\mu\alpha+\delta}})$. By definition of relatively full towers, there is an $a_k$ with $i_{\mu\alpha+\delta}\leq k<i_{\mu\alpha+\delta+1}$ so that $(\tp(a_k/M_k),N_k)$ and $(p,M^\gamma_{i_{\mu\alpha+\delta}})$ are parallel. Because $M_{i_{\mu\alpha+\delta}}\prec_{\mathcal{K}}M_k$, by the definition of parallel strong types, it must be the case that $a_k\models p$.
By a back and forth argument we can conclude from $(\ref{special equation})$ that $M_{i_{\mu\alpha+\mu}}$ is universal over $M_{i_{\mu\alpha}}$. Thus $M$ is a $(\mu,\theta)$-limit model.
To see the details of the back-and-forth argument mentioned in the previous paragraph, first translate $(\ref{special equation})$ to the terminology of \cite{Ba}: $(\ref{special equation})$ witnesses that $\bigcup_{\beta<\mu}M_{i_{\mu\alpha+\beta}}$ is $1$-special over $M_{i_{\mu\alpha}}$. Then, refer to the proof of Lemma 10.5 of \cite{Ba}.
\end{proof}
\section{Reduced Towers} \label{redtow-sec}
The proof of the uniqueness of limit models from \cite{sh394, gvv, Va1, Va1e} is two dimensional. In the context of towers, the relatively full towers are used to produce a $(\mu,\theta)$-limit model, but to conclude that this model is also a $(\mu,\omega)$-limit model, a $<$-increasing chain of $\omega$-many continuous towers of length $\theta+1$ is constructed. We adapt this construction to prove Theorem \ref{uniqueness theorem}. Instead of creating a chain of $\omega$-many towers, we produce a chain of $\delta$-many towers, and instead of each tower in this chain being continuous, we only require that these towers are continuous at limit ordinals of cofinality at least $\kappa^*_\mu(\mathcal{K})$.
The use of towers should be compared with the proof uniqueness of limit models in \cite[Section II.4]{shelahaecbook} (details are given in \cite[Section 9]{extendingframes}). Both proofs create a `square' of models, but do so in a different way. The proof here will proceed by starting with a 1-dimensional tower of models and then, in the induction step, extend this tower to fill out the square. In contrast, the induction step of \cite[Lemma II.4.8]{shelahaecbook} adds single models at a time. This seems like a minor distinction (or even just a difference in how the induction step is carried out), but there is a real distinction in the resulting squares. In \cite{shelahaecbook}, the construction is `symmetric' in the sense that $\theta$ and $\delta$ are treated the same. However, in the proof presented here, this symmetry is broken and one could `detect' which side of the square was laid out initially by observing where continuity fails.
In \cite{gvv, Va1, Va1e, Va2}, the continuity of the towers is achieved by restricting the construction to reduced towers, which under the stronger assumptions of \cite{gvv, Va1, Va1e, Va2} are shown to be continuous. We take this approach and notice that continuity of reduced towers at certain limit ordinals can be obtained with the weaker assumptions of Theorem \ref{uniqueness theorem}, in particular $\kappa^*_\mu(\mathcal{K})<\mu^+$.
\begin{definition}\label{reduced defn}\index{reduced towers} A tower $(\bar M,\bar a,\bar N)\in\mathcal{K}^*_{\mu,\alpha}$ is said to be \emph{reduced} provided that for every $(\bar M',\bar a,\bar N)\in\mathcal{K}^*_{\mu,\alpha}$ with $(\bar M,\bar a,\bar N)\leq(\bar M',\bar a,\bar N)$ we have that for every $i<\alpha$, $$(*)_i\quad M'_i\cap\bigcup_{j<\alpha}M_j = M_i.$$ \end{definition}
The proofs of the following three results about reduced towers only require that the class $\mathcal{K}$ be stable in $\mu$ and that $\mu$-splitting satisfies the continuity property. Although \cite{ShVi} works under stronger assumptions than we currently, none of these results use anything beyond Assumption \ref{assm}. In particular, $\kappa^*_\mu(\mathcal{K})=\omega$ holds in \cite{ShVi}, but is not used.
\begin{fact}[{\cite[Theorem 3.1.13]{ShVi}}]\label{density of reduced}\index{reduced towers!density of} Let $\mathcal{K}$ satisfy Assumption \ref{assm}. There exists a reduced $<$-extension of every tower in $\mathcal{K}^*_{\mu,\alpha}$.
\end{fact}
\begin{fact}[{\cite[Theorem 3.1.14]{ShVi}}]\label{union of reduced is reduced} Let $\mathcal{K}$ satisfy Assumption \ref{assm}. Suppose $\langle (\bar M,\bar a,\bar N)^\gamma\in\mathcal{K}^*_{\mu,\alpha}\mid \gamma<\beta\rangle$ is a $<$-increasing and continuous sequence of reduced towers such that the sequence is continuous in the sense that for a limit $\gamma<\beta$, the tower $(\bar M,\bar a,\bar N)^\gamma$ is the union of the towers $(\bar M,\bar a,\bar N)^\zeta$ for $\zeta<\gamma$. Then the union of the sequence of towers $\langle (\bar M,\bar a,\bar N)^\gamma\in\mathcal{K}^*_{\mu,\alpha}\mid \gamma<\beta\rangle$ is itself a reduced tower. \end{fact}
In fact the proof of Fact \ref{union of reduced is reduced} gives a slightly stronger result which allows us to take the union of an increasing chain of reduced towers of increasing index sets and conclude that the union is still reduced.
\begin{fact}[{\cite[Lemma 5.7]{gvv}}]\label{monotonicity} Let $\mathcal{K}$ satisfy Assumption \ref{assm}. Suppose that $(\bar M,\bar a,\bar N)\in\mathcal{K}^*_{\mu,\alpha}$ is reduced. If $\beta<\alpha$, then $(\bar M,\bar a,\bar N)\restriction \beta$ is reduced. \end{fact}
The following theorem is related to \cite[Theorem 3]{Va2}, which additionally assumes that $\kappa^*_\mu(\mathcal{K}) = \omega$; in other words it assumes $\mathcal{K}$ $\mu$-superstable. Instead, we allow for strict stability (that is, $\kappa^*_\mu(\mathcal{K})$ to be uncountable) at the cost of only guaranteeing continuity at limits of large cofinality. In particular, the proof is similar to the proof of $(a)\to(b)$ in \cite[Theorem 3]{Va2}, but we crucially allow our towers to be discontinuous at $\gamma$ where $\cf(\gamma)<\kappa^*_\mu(\mathcal{K})$. We provide the details where the proof differs.
\begin{theorem}\label{reduced are continuous} Suppose $\mathcal{K}$ satisfies Assumption \ref{assm}. Let $\alpha$ be an ordinal and $\delta$ be a limit ordinal so that $\kappa^*_\mu(\mathcal{K})\leq\cf(\delta)<\alpha$. If $\mathcal{K}$ satisfies $(\mu, \delta)$-symmetry for non-$\mu$-splitting and
$(\bar M,\bar a,\bar N)\in\mathcal{K}^*_{\mu,\alpha}$ is reduced, then the tower $(\bar M,\bar a,\bar N)$ is continuous at $\delta$ (i.e., $M_\delta=\bigcup_{\beta<\delta}M_\beta$).
\end{theorem}
\begin{proof} Suppose the theorem is false. Then we can find a reduced tower $\mathcal{T} := (\bar M, \bar a, \bar N) \in \mathcal{K}^*_{\mu, \alpha}$ that is a counterexample of minimal length at $\delta$ in the sense that: \begin{enumerate}
\item $M_\delta \neq \cup_{i<\delta} M_i$ and
\item if $(\bar M', \bar a', \bar N') \in \mathcal{K}^*_{\mu, \alpha'}$ is reduced and discontinuous at $\delta$, then $\alpha \leq \alpha'$. \end{enumerate} Notice that Fact \ref{monotonicity} implies that $\alpha=\delta+1$. Let $b\in M_\delta\backslash \bigcup_{i<\delta}M_i$ witness the discontinuity of the tower at $\delta$.
By Fact \ref{density of reduced} and Fact \ref{union of reduced is reduced}, we can build $\mathcal{T}^i=(\bar M^i,\bar a^i,\bar N^i)\in\mathcal{K}^*_{\mu,\delta}$ for $i \leq \delta$ such that $\mathcal{T}^0 = \mathcal{T}\upharpoonright \delta$ and $\langle \mathcal{T}^i \mid i \leq \delta\rangle$ is a $<$-increasing, continuous chain. By $\delta$-applications of Fact \ref{density of reduced} in between successor stages of the construction, we can require that for $\beta<\delta$ \begin{align}\label{limit at successor}\begin{split} M^{i+1}_{\beta}\text{ is a }(\mu,\delta)\text{-limit over }M^i_{\beta}\\ \text{and consequently }M^{i+1}_{\beta}\text{ is a }(\mu,\delta)\text{-limit over }N_{\beta}. \end{split} \end{align} Let $\displaystyle{M^\delta_{diag}:=\bigcup_{i<\delta,\;\beta<\delta}M^i_\beta}$. Figure \ref{fig:Mdeltas} is an illustration of these models.
\begin{figure}
\caption{$(\bar M,\bar a,\bar N)$ and the towers $(\bar M,\bar a,\bar N)^j$ extending $(\bar M,\bar a,\bar N)\restriction\delta$.}
\label{fig:Mdeltas}
\end{figure}
There are two cases depending on whether $b$ is in $M^\delta_{diag}$ or not. Both cases lead to a contradiction of our assumption that $\mathcal{T}$ is reduced.
{\bf Case 1:} $b\in M^\delta_{diag}$\\ The first case will contradict our assumption that $(\bar M,\bar a,\bar N)$ is reduced. We have that $\mathcal{T}^\delta$ is an extension of $\mathcal{T}\upharpoonright \delta$ and that $M^\delta_{diag}$ contains $b$. Let $M^\delta_\delta$ be an extension of $M^\delta_{diag}$ that is also a universal extension of $M_\delta$. Then $\mathcal{T}^\delta {}^\frown \langle M^\delta_\delta \rangle$ is an extension of $\mathcal{T}$. Since $b \in M^\delta_{diag}$, there is some $j < \delta$ so $b \in M^\delta_j$. Because $\mathcal{T}$ is reduced, we have that $$M_j^\delta \cap \bigcup_{i < \alpha} M_i = M_j.$$ Notice that the $M^\delta_j \cap M_\delta$ on the LHS contains $b$, but the RHS does not contain $b$, a contradiction.
{\bf Case 2:} $b\notin M^\delta_{diag}$\\ Then $\tp(b/M^\delta_{diag})$ is non-algebraic. Consider the sequence $\langle \check M_i\mid i<\delta\rangle$ defined by $\check M_i:=M^i_i$ if $i$ is a successor and $\check M_i:=\bigcup_{j<i}M^j_j$ for $i$ a limit ordinal. Notice that $(\ref{limit at successor})$ implies that this sequence witnesses that $M^\delta_{diag}$ is a $(\mu,\delta)$-limit model. Because $M^\delta_{diag}$ is a $(\mu,\delta)$-limit model, by our assumption that $\cf(\delta)\geq\kappa^*_\mu(\mathcal{K})$ and monotonicity of non-splitting, there exists a successor ordinal $i^*<\delta$ so that \begin{equation}\label{i* equation} \tp(b/M^\delta_{diag})\text{ does not }\mu\text{-split over }M^{i^*}_{i^*}. \end{equation} Our next step in Case (2) is to consider the tower formed by the diagonal elements in Figure \ref{fig:Mdeltas}. In particular, let $\mathcal{T}^{diag}$ be the sequence $(M^i_i, a_i, N_i)_{i<\delta}$. We claim that $\mathcal{T}^{diag} \in \mathcal{K}^*_{\mu, \delta}$ and that $\mathcal{T}^{diag}$ extends $\mathcal{T}\upharpoonright \delta$.
We will now use $\mathcal{T}^{diag}$ to construct a tower containing $b$ that extends $\mathcal{T}\upharpoonright \delta$. First we find an approximation, $\mathcal{T}^b$, which is a tower of length $i^*+1$ that contains $b$ and extends $\mathcal{T}^{diag}\restriction(i^*+2)$. Then through a directed system of mappings, we move this tower so that the result is as desired.
To define $\mathcal{T}^b$, first notice that by (\ref{limit at successor}), $M^{i^*}_{i^*}$ is a $(\mu,\delta)$-limit over $N_{i^*}$. Now, referring to the Figure \ref{fig:sym}, apply $(\mu,\delta)$-symmetry to $a_{i^*}$ standing in for $a$, $M^{i^*}_{i^*}$ representing $M_0$, $N_{i^*}$ as $N$, $M^\delta_{diag}$ as $M$, and $b$ as itself. We can conclude that there exists $M^b$ containing $b$, a limit model over $M^{i^*}_{i^*}$, for which $\tp(a_{i^*}/M^b)$ does not $\mu$-split over $N_{i^*}$. Define the tower $\mathcal{T}^b\in\mathcal{K}^*_{\mu,i^*+2}$ by the sequences $\bar a\restriction (i^*+1)$, $\bar N\restriction (i^*+1)$ and $\bar M'$ with $M'_j:=M^j_j$ for $j\leq i^*$ and $M'_{i^*+1}:=M^b$. Notice that $\mathcal{T}^b$ is an extension of $\mathcal{T}^{diag}\restriction(i^*+2)$ containing $b$.
Next, we will explain how we can use this tower to find a tower $\mathring\mathcal{T}^\delta\in\mathcal{K}^*_{\mu,\delta}$ extending $\mathcal{T}^{diag}$ with $b\in \bigcup_{j<\delta}\mathring M^\delta_{j}$. This will be enough to contradict our assumption that $\mathcal{T}$ was reduced.
We want to build $\langle \mathring\mathcal{T}^j, f_{j,k}\mid i^*+2\leq j\leq k\leq\delta\rangle$ a directed system of towers so that for $j \geq i^*+2$ \begin{enumerate} \item\label{base} $\mathring\mathcal{T}^{i^*+2}=\mathcal{T}^b$ \item $\mathring\mathcal{T}^j\in\mathcal{K}^*_{\mu,j}$ for $j\leq\delta$ \item $\mathcal{T}^{diag}\restriction j \leq\mathring\mathcal{T}^j$ for $j\leq\delta$ \item $f_{j,k}(\mathring\mathcal{T}^j)\leq\mathring\mathcal{T}^k\restriction j$ for $j\leq k<\delta$ \item\label{id condition} $f_{j,k}\restriction M^{j}_j=id_{M^{j}_j}$ $j\leq k<\delta$ \item\label{limit M'} $\mathring M^{j+1}_{j+1}$ is universal over $f_{j,j+1}(\mathring M^j_j)$ for $j<\delta$ \item\label{b in} $b\in\mathring M^{j}_{i^*+1}$ for $j\leq\delta$ \item\label{non splitting} $\tp(f_{j,k}(b)/M^{k}_{k})$ does not $\mu$-split over $M^{i^*}_{i^*}$ for $j<k<\delta$. \end{enumerate}
{\bf Construction:} We will define this directed system by induction on $k$, with $i^*+2\leq k\leq\alpha$. The base and successor case are exactly as in the proof of Theorem 5 of \cite{Va2}. The only difference in the construction here is at limit stages in which $\mathcal{T}^{diag}$ is not continuous. Therefore we will concentrate on the details of the construction for stage $k$ and $k+1$ where $k<\delta$ is a limit ordinal for which $\mathcal{T}^{diag}$ is discontinuous at $k$.
{\bf Construction, Case 1:} $k$ is limit where $\mathcal{T}^{diag}$ is discontinuous.\\ First, let $\grave \mathcal{T}^k$ and $\langle\grave f_{j,k}\mid i^*+2\leq j<k\rangle$ be a direct limit of the system defined so far. We use the $\grave{}$ notation since these are only approximations to the tower and mappings that we are looking for. We will have to take some care to find a direct limit that contains $b$ in order to satisfy Condition \ref{b in} of the construction. By Fact \ref{limit stage prop} and our induction hypothesis, we may choose this direct limit so that for all $j<k$ \begin{equation*} \grave f_{j,k}\restriction M^{j}_j=id_{M^{j}_j}. \end{equation*} Consequently $\grave M^\alpha_j:=\grave f_{j,k}(\mathring M^j_j)$ is universal over $M^{j}_j$, and $\bigcup_{j<k}\grave M^k_j$ is a limit model witnessed by Condition \ref{limit M'} of the construction. Additionally,
the tower $\grave\mathcal{T}^k$ composed of the models $\grave M^k_j$, extends $\mathcal{T}^{diag}\restriction k$.
We will next show that for every $j<k$, \begin{equation}\label{limit non split eqn} \tp(\grave f_{i^*+2,k}(b)/M^j_j)\text{ does not }\mu\text{-split over }M^{i^*}_{i^*}. \end{equation} To see this, recall that for every $j<k$, by the definition of a direct limit, $\grave f_{i^*+2,k}(b)=\grave f_{j,k}(f_{i^*+2,j}(b))$. By Condition \ref{non splitting} of the construction, we know \begin{equation*} \tp(f_{i^*+2,j}(b)/M^{j}_{j})\text{ does not }\mu\text{-split over }M^{i^*}_{i^*}. \end{equation*} Applying $\grave f_{j,k}$ to this implies $\tp(\grave f_{i^*+2,k}(b)/M^j_j)$ does not $\mu$-split over $M^{i^*}_{i^*}$, establishing $(\ref{limit non split eqn})$.
Because $M^{j+1}_{j+1}$ is universal over $M^j_j$ by construction, we can apply the continuity of non-splitting to $(\ref{limit non split eqn})$, yielding \begin{equation}\label{grave f} \tp(\grave f_{i^*+2,k}(b)/\bigcup_{j<k}M^j_j)\text{ does not }\mu\text{-split over }M^{i^*}_{i^*}. \end{equation}
Because $\grave f_{i^*+2,k}$ fixes $M^{i^*+1}_{i^*+1}$, $\tp(b/M^{i^*+1}_{i^*+1})=\tp(\grave f_{i^*+2,k}(b)/M^{i^*+1}_{i^*+1})$. We can then apply the uniqueness of non-splitting extensions (see \cite[Theorem I.4.12]{Va1}) to $(\ref{grave f})$ to see that $\tp(\grave f_{i^*+2,k}(b)/\bigcup_{j<k}M^j_j)=\tp(b/\bigcup_{j<k}M^j_j)$. Thus we can fix $g$ an automorphism of the monster model fixing $\bigcup_{j<k}M^j_j$ so that $g(\grave f_{i^*+2,k}(b))=b.$
We will then define $\mathring \mathcal{T}^k$ to be the tower $g(\grave\mathcal{T}^k)$, and the mappings for our directed system will be $f_{j,k}:=g\circ\grave f_{j,k}$ for all $ i^*+2\leq j<k$.
Notice that by our induction hypothesis we have that $b\in \mathring M^{i^*+2}_{i^*+1}$. Then, by definition of a direct limit we have $\grave f_{i^*+2,k}(b)\in \grave M^k_{i^*+1}$. Therefore $g(\grave f_{i^*+2,k}(b))=b\in \mathring M^k_{i^*+1}$, satisfying Condition \ref{b in} of the construction. Furthermore for all $j<k$, we have that $f_{j,k}(b)=b$. Therefore by $(\ref{i* equation})$ and monotonicity of non-splitting, Condition \ref{non splitting} of the construction holds.
Notice that $\mathcal{T}^{diag}$ being discontinuous at $k$ does not impact this stage of the construction since we only require that $\mathring \mathcal{T}^k$ be a tower of length $k$ and therefore $\mathring \mathcal{T}^k$ need not contain models extending $M^k_k$. The discontinuity plays a role at the next stage of the construction.
{\bf Construction, Case 2:} $k+1$ is successor of limit where $\mathcal{T}^{diag}$ is discontinuous.\\ Suppose that
$\mathcal{T}^{diag}$ is discontinuous at $k$ and that $\mathring\mathcal{T}^k\in\mathcal{K}^*_{\mu,k}$ has been defined.
By our choice of $i^*$, we have $\tp(b/\bigcup_{l<\alpha}M^{l}_l)$ does not $\mu$-split over $M^{i^*}_{i^*}$. So in particular by monotonicity of non-splitting, we notice: \begin{equation}\label{Mjj non-split}
\tp(b/M^{k+1}_{k})\text{ does not }\mu\text{-split over }M^{i^*}_{i^*}. \end{equation}
Using the definition of towers (i.e. $M^{k+1}_{k}$ is a $(\mu,\delta)$-limit over $N_{k}$ and $\tp(a_k/M^{k+1}_k)$ does not $\mu$-split over $N_k$) and the choice of $i^*$, we can apply $(\mu,\delta)$-symmetry to $a_{k}$, $M^{k+1}_{k}$,
$ \bigcup_{l<\delta}M^{l}_l$, $b$ and $N_{k}$ which will yield $M^b_{k}$ a limit model over $M^{k+1}_{k}$ containing $b$
so that $\tp(a_{k}/M^b_{k})$ does not $\mu$-split over $N_{k}$ (see Figure \ref{fig:successor}).
\begin{figure}
\caption{A diagram of the application of $(\mu,\delta)$-symmetry in the successor stage of the directed system construction in the proof of Theorem \ref{reduced are continuous}. We have $\tp(b/ \bigcup_{l<\delta}M^{l}_l)$ does not $\mu$-split over $M^{k+1}_{k}$ and $\tp(a_{k}/M^{k+1}_{k})$ does not $\mu$-split over $N_{k}$. Symmetry implies the existence of $M^b_k$ a limit model over $M^{k+1}_{k}$. so that $\tp(a_{k}/M^b)$ does not $\mu$-split over $N_{k}$.}
\label{fig:successor}
\end{figure} Notice that $M^b_k$ has no relationship to $\mathring \mathcal{T}^k$. In particular, it does not contain $\bigcup_{l<k}\mathring M^l_l$. Fix $M'$ to be a model of cardinality $\mu$ extending both $\bigcup_{l<k}\mathring M^l_l$ and $M^{k+1}_{k}$. Since $M^b_{k}$ is a limit model over $M^{k+1}_{k}$ which is a limit model over $M^k_k$, there exits $f:M'\rightarrow M^{k+1}_{k}$ with $f=id_{M^{k}_{k}}$ so that $M^b_{k}$ is also universal over $f(\bigcup_{l<k}\mathring M^l_l)$. Because $\tp(b/M^k_k)$ does not $\mu$-split over $M^{i^*}_{i^*}$ and $f$ fixes $M^k_k$, we know that
$\tp(f(b)/M^k_k)$ does not $\mu$-split over $M^{i^*}_{i^*}$. But because $f(b)$ and $b$ both realize the same types over $M^{i^*+1}_{i^*+1}$, we can conclude by the uniqueness of non-splitting extensions that $\tp(f(b)/M^k_k)=\tp(b/M^k_k)$; so there is $g\in\Aut_{M^k_k}(\mathfrak{C})$ with $g(f(b))=b$. Since $M^b_k$ is universal over $M^k_k$ and $b\in M^b_k$, we can choose $g$ so that $g(f(M'))\prec_{\mathcal{K}}M^b_k$.
Take $\mathring M^{k+1}_{k}$ to be an extension of $M^b_{k}$ which is also universal over $M^{k+1}_{k+1}$, and set $f_{k,k+1}:=g\circ f$. To see that Condition \ref{non splitting} of the construction holds, just apply monotonicity and the fact that $f_{k,k+1}(b)=b$ to $(\ref{i* equation})$. See figure \ref{fig:reduced tower}.
\begin{figure}
\caption{The construction of $\mathring \mathcal{T}^{k+1}$(dotted) from $\mathring \mathcal{T}^k$ (bold) with $g\circ f$ fixing $M^k_k$ and $b$.}
\label{fig:reduced tower}
\end{figure}
It is easy to check by invariance and the induction hypothesis that $\mathring \mathcal{T}^{k+1}$ defined by the models $\mathring M^{k+1}_l:=f_{k,k+1}(\mathring M^k_l)$ for $l< k$ satisfies the remaining requirements on $\mathring \mathcal{T}^{k+1}$. Then the rest of the directed system can be defined by the induction hypothesis and the mappings $f_{l,k+1}:=f_{l,k}\circ f_{k,k+1}$ for $i^*+2\leq l<k$.
This completes the construction.
{\bf Case (2), continued:} Now that we have a tower $\mathring\mathcal{T}^\delta$ extending $\mathcal{T}\restriction\delta$ which contains $b$, we are in a situation similar to the proof in Case $(1)$. To contradict that $\mathcal{T}$ is reduced, we need only lengthen $\mathring\mathcal{T}^\delta$ to a discontinuous extension of the entire tower $(\bar M,\bar a,\bar N)$ by taking the $\delta^{th}$ model to be some extension of $\bigcup_{i<\delta}\mathring M^i_i$ which is also universal over $M_\delta$. This discontinuous extension of $(\bar M,\bar a,\bar N)$ along with $b\in\mathring M^{\delta}_{i^*+1}$ witness that $(\bar M,\bar a,\bar N)$ cannot be reduced.
\end{proof}
Although not used here, the converse of this theorem is also true, as in \cite{Va2}. Note that the following does not have any assumption about $\kappa^*_\mu(\mathcal{K})$.
\begin{proposition} Suppose $\mathcal{K}$ satisfies Assumption \ref{assm}.(1), (2), and (4). Suppose further that that, for every reduced tower $(\bar M, \bar a, \bar M) \in \mathcal{K}_{\mu, \alpha}^*$, $\bar M$ is continuous at limit ordinals of cofinality $\delta$. Then $\mathcal{K}$ satisfies $(\mu, \delta)$-symmetry for non $\mu$-splitting. \end{proposition}
\begin{proof} The proof is an easy adaptation of \cite[Theorem 3.$(b)\to(a)$]{Va2}. The same argument works; the only adaptations are to require that every limit model to in fact be a $(\mu, \delta)$ limit model and that the tower $\mathcal{T}$ be of length $\delta+1$\footnote{In a happy coincidence, the notation in that proof already agrees with this change.}. \end{proof}
\section{Uniqueness of Long Limit Models} \label{ulm-sec} We now begin the proof Theorem \ref{uniqueness theorem}, which we restate here. \begin{theorem1} Suppose that $\mathcal{K}$ is an abstract elementary class satisfying Assumption \ref{assm}.
For $\theta$ and $\delta$ limit ordinals $<\mu^+$ both with cofinality $\geq\kappa^*_\mu(\mathcal{K})$, if $\mathcal{K}$ satisfies symmetry for non-$\mu$-splitting (or just $(\mu,\delta)$-symmetry), then, for any $M_1$ and $M_2$ that are $(\mu,\theta)$ and $(\mu,\delta)$-limit models over $M_0$, respectively, we have that $M_1$ and $M_2$ are isomorphic over $M_0$.
\end{theorem1} The structure of the proof of Theorem \ref{uniqueness theorem} from this point on is similar to the proof in \cite[Theorem 1.9]{gvv}. For completeness we include the details here, and emphasize the points of departure from \cite[Theorem 1.9]{gvv}.
We construct an array of models which will produce a model that is both a $(\mu,\theta)$- and a $(\mu,\delta)$-limit model. Let $\theta$ be an ordinal as in the definition of relatively full tower so that $\cf(\theta)\geq\kappa^*_\mu(\mathcal{K})$ and let $\delta=\kappa^*_\mu(\mathcal{K})$. The goal is to build an array of models with $\delta+1$ rows so that the bottom row of the array is a relatively full tower indexed by a set of cofinality $\theta+1$ continuous at $\theta$.
To do this, we will be adding elements to the index set of towers row by row so that at stage $n$ of our construction the tower that we build is indexed by $I_n$
described here.
The index sets $I_\beta$ will be defined inductively so that $\langle I_\beta\mid \beta<\delta+1\rangle$ is an increasing and continuous chain of well-ordered sets. We fix $I_0$ to be an index set of order type $\theta+1$ and will denote it by $\langle i_\alpha\mid\alpha\leq\theta\rangle$. We will refer to the members of $I_0$ by name in many stages of the construction. These indices serve as anchors for the members of the remaining index sets in the array. Next we demand that for each $\beta<\delta$, $\{j\in I_\beta\mid i_\alpha<j<i_{\alpha+1}\}$ has order type $\mu\cdot \beta$ such that each $I_\beta$ has supremum $i_\theta$. An example of such $\langle I_\beta\mid \beta\leq\delta\rangle$ is $I_\beta=\theta\times(\mu\cdot \beta)\bigcup\{i_\theta\}$ ordered lexicographically, where $i_\theta$ is an element $\geq$ each $i\in \bigcup_{\beta<\delta}I_\beta$. Also, let $I=\bigcup_{\beta<\delta}I_\beta$.
To prove Theorem \ref{uniqueness theorem}, we need to prove that, for a fixed $M\in\mathcal{K}$ of cardinality $\mu$, any $(\mu,\theta)$-limit and $(\mu,\delta)$-limit model over $M$ are isomorphic over $M$. Since all $(\mu, \theta)$-limits over $M$ are isomorphic over $M$ (and the same holds for $(\mu, \delta)$-limits), it is enough to construct a single model that is simultaneously $(\mu, \theta)$-limit and $(\mu, \delta)$-limit over $M$. Let us begin by fixing a limit model $M\in\mathcal{K}_\mu$. We define, by induction on $\beta\leq\delta$, a $<$-increasing and continuous sequence of towers $(\bar M,\bar a,\bar N)^\beta$ such that \begin{enumerate} \item $\mathcal{T}^0:=(\bar M,\bar a,\bar N)^0$ is a tower with $M^0_0=M$. \item $\mathcal{T}^\beta:=(\bar M,\bar a,\bar N)^\beta\in\mathcal{K}^*_{\mu,I_\beta}$. \item\label{realizing types} For every $(p,N)\in\operatorname{\mathfrak{S}t}(M^\beta_i)$ with $i_\alpha\leq i< i_{\alpha+1}$ there is $j\in I_{\beta+1}$ with $i_\alpha< j<i_{\alpha+1}$ so that $(\tp(a_j/M^{\beta+1}_j),N^{\beta+1}_j)$ and $(p,N)$ are parallel.
\end{enumerate} See Figure \ref{fig:arrayconstruction}.
\begin{figure}
\caption{The chain of length $\delta$ of towers of increasing index sets $I_j$ of cofinality $\theta+1$. The symbol $\lll$ indicates that there are $\mu$ many new indices between $i_\beta$ and $i_{\beta+1}$ in $I_{j+1}\backslash I_j$. The elements indexed by these indices realize all the strong types over the model $M^j_{i_\alpha}$. The notation $\prec_{u}$ is an abbreviation for a universal extension.}
\label{fig:arrayconstruction}
\end{figure}
Given $M$, we can find a tower $(\bar M,\bar a,\bar N)^0\in\mathcal{K}^*_{\mu,I_0}$ with $M\preceq_{\mathcal{K}}M^0_0$ because of the existence of universal extensions and because $\kappa^*_\mu(\mathcal{K})<\mu^+$. At successor stages we first take an extension of $(\bar M,\bar a,\bar N)^\beta$ indexed by $I_{\beta+1}$ and realizing all the strong types over the models in
$(\bar M,\bar a,\bar N)^\beta$. This tower may not be reduced, but by Fact \ref{density of reduced}, it has a reduced extension. At limit stages take unions of the chain of towers defined so far.
Notice that by Fact \ref{union of reduced is reduced}, the tower $\mathcal{T}^\delta$ formed by the union of all the $(\bar M,\bar a,\bar N)^\beta$ is reduced. Furthermore, by Theorem \ref{reduced are continuous} every one of the reduced towers $\mathcal{T}^j$ is continuous at $\theta$ because $\cf(\theta)\geq\kappa^*_\mu(\mathcal{K})$. Therefore $M^\delta_{i_\theta}=\bigcup_{k<\theta}M^\delta_{i_k}$, and by the definition of the ordering $<$ on towers, the last model in this tower ($M^\delta_{i_\theta}$) is a $(\mu,\delta)$-limit model witnessed by $\langle M^j_{i_\theta}\mid j<\delta\rangle$. Since $M^1_{i_\theta}$ is universal over $M$, we have that $M^\delta_{i_\theta}$ is $(\mu, \delta)$-limit over $M$.
Next to see that $M^\delta_{i_\theta}$ is also a $(\mu,\theta)$-limit model, notice that $\mathcal{T}^\delta$ is relatively full by condition \ref{realizing types} of the construction and the same argument as \cite[Claim 5.11]{gvv}. Therefore by
Theorem \ref{reduced are continuous} and our choice of $\delta$ with $\cf(\delta)\geq\kappa^*_\mu(\mathcal{K})$, the last model $M^\delta_{i_\theta}$ in this relatively full tower is a $(\mu,\theta)$-limit model over $M$.
This completes the proof of Theorem \ref{uniqueness theorem}.
\end{document} |
\begin{document}
\title{The von Neumann Model of Measurement in Quantum Mechanics}
\classification{03.65.Ta, 03.65.Wj} \keywords{}
\author{Pier A. Mello}{
address={Instituto de F\'isica, Universidad Nacional Aut\'onoma de M\'exico, Apdo. Postal 20-364, 01000 M\'exico, D. F., M\'exico} }
\begin{abstract}
We describe how to obtain information on a quantum-mechanical system by coupling it to a probe and detecting some property of the latter, using a model introduced by von Neumann, which describes the interaction of the system proper with the probe in a dynamical way.
We first discuss single measurements, where the system proper is coupled to one probe with arbitrary coupling strength. The goal is to obtain information on the system detecting the probe position. We find the reduced density operator of the system, and show how L\"uders rule emerges as the limiting case of strong coupling.
The von Neumann model is then generalized to two probes that interact successively with the system proper. Now we find information on the system by detecting the position-position and momentum-position correlations of the two probes. The so-called ``Wigner's formula" emerges in the strong-coupling limit, while ``Kirkwood's quasi-probability distribution" is found as the weak-coupling limit of the above formalism. We show that successive measurements can be used to develop a state-reconstruction scheme.
Finally, we find a generalized transform of the state and the observables based on the notion of successive measurements.
\end{abstract}
\maketitle
\section{INTRODUCTION}
In a general quantum measurement one obtains information on the system of interest by coupling it to an {\em auxiliary degree of freedom}, or {\em probe}, and then {\em detecting some property of the latter} using a measuring device. This procedure, which was described by von Neumann in his classic book \cite{Neumann-MathFounQuanMech:55}, will be referred to as von Neumannn's model (vNM). Within the vNM, the combined system --system proper plus probe-- is given a dynamical description.
An example of the idea involved in the vNM is given by the Stern-Gerlach experiment, which is described in every textbooks on QM (see, e.g., \cite{ballentine:99,peres-book}).
\begin{figure}
\caption{The Stern-Gerlach experiment, designed to find information on the
$z$-component of the spin of a particle by detecting its position.}
\label{fig1}
\end{figure}
In this experiment, presented schematically in Fig. \ref{fig1}, the observable we want to obtain information about is the $z$-component of the spin of a particle. The auxiliary degree of freedom, or probe, is its position ${\bf r}$ --still a microscopic quantity-- after it leaves the magnet, and this is what is recorded by a detecting device, which in this case is a position detector. E.g., for $s=1/2$, we may wish to find information on the two components
$\langle \psi_{spin}|\mathbb{P}^z_{\pm}| \psi_{spin}\rangle =|\langle\pm |\psi_{spin}\rangle|^2$ of the original state.
Another example is provided by Cavity Quantum Electrodynamics (QED) experiments \cite{brune-et-al-1992,davidovich-et-al-1996,guerlin_et_al_2007}.
\begin{figure}
\caption{A Cavity QED experiment, designed to find information on the number of photons inside a cavity, by sending atoms through the cavity and subsequently detecting them.}
\label{fig2}
\end{figure}
Here the observable is the number $n$ of photons in a cavity. As shown schematically in Fig. \ref{fig2}, the probes are atoms successively sent through the cavity: after they leave the cavity, they are detected by a measuring device.
In what follows we first discuss, in the next section, the Stern-Gerlach problem, which has become a paradigm for models of measurement in QM.
The discussion will pave the way for the analysis, in Sec. \ref{single}, of single measurements in QM, where the system proper is coupled to one probe with arbitrary coupling strength. The main goal is to study what information can we obtain on the system by detecting the probe position \cite{johansen-mello:2008}. As a by-product of the analysis, we obtain the reduced density operator of the system proper after its interaction with the probe, and derive the so-called L\"uders rule \cite{Lueders-UEbeZust:51} as the limiting case of strong coupling.
We then generalize the vNM to two probes that interact successively with the system proper \cite{johansen-mello:2008}. Again, we study what information can we obtain on the system by detecting the position-position and momentum-position correlations of the two probes. Indeed, we describe a state reconstruction scheme based on the procedure of successive measurements \cite{johansen-mello:2008,amir-pier}. We obtain the so-called ``Wigner's formula" \cite{Wigner-ProbMeas:63} in the strong-coupling limit of the above formalism, and Kirkwood's quasi-probability distribution \cite{Kirkwood-QuanStatAlmoClas:33} in the weak-coupling limit. We also find a generalized transform of the state and the observables based on the notion of successive measurements, and in terms of complex quasi-probabilities.
\section{The Stern-Gerlach experiment} \label{stern-gerlach}
The Stern-Gerlach experiment, Fig. \ref{fig1}, has become a paradigm for models of measurement in QM. Although the experiment was first performed as early as 1922, it is explained in every textbook on QM (see, e.g. Refs. \cite{ballentine:99,peres-book}), and various refinements have been presented in the literature (as in Refs. \cite{peres-1980,alstrom-1982,platt-1992}), surprisingly, its complete (non-relativistic) solution has been given only recently \cite{scully-1987}.
By a complete solution we mean one that takes into account the {\em translational} and {\em transverse} motions of the atom, and a {\em confined} magnetic field ${\bf B}({\bf r})$ that satisfies Maxwell's equations
\begin{equation} {\bf \nabla \cdot B =0}, \hspace{1cm} {\bf \nabla \times B =0} . \label{divB=0,rotB=0} \end{equation}
Here we shall work with a model of the complete problem which has a simple exact solution and still shows the physical characteristic we want to exhibit, i.e., bending of the trajectory depending on the $z$-projection of the spin.
We shall: i) assume ${\bf B}\equiv 0$ outside the gaps; ii) assume that only $B_z$ is significant; iii) assume $B_z(z) \approx B'(z)z$ inside the gaps; iv) simulate the translational motion in the $y$ direction using a $t$-dependent interaction which lasts for the time the particle is inside the gap of the magnet. This could be achieved by adopting a frame of reference moving with the particle; v) ignore the $x$ degree of freedom.
We then consider the model Hamiltonian
\begin{subequations} \begin{eqnarray} \hat{H}(t) &=& \frac{\hat{p}_z^2}{2m} -{\bf \mu}{\bf \cdot B} \; \theta_{t_1,\tau}(t) \\ &=& \frac{\hat{p}_z^2}{2m} - \left[\mu_B B'_z(0) \tau\right]\frac{\theta_{t_1,\tau}(t)}{\tau} \hat{\sigma}_z \; \hat{z} \;, \\ &=& \frac{\hat{p}_z^2}{2m} - \epsilon g(t) \hat{\sigma}_z \; \hat{z}\; , \end{eqnarray} \label{H(t) 1} \end{subequations}
where $\hat{\sigma}_z$ is one of the Pauli matrices, and $\epsilon = \mu_B B'_z(0) \tau$. The function $\theta_{t_1,\tau}(t)$ is nonzero and equal to unity only inside the time interval $(t_1-\tau/2, t_1+\tau/2)$, i.e.,
\begin{eqnarray} \theta_{t_1,\tau}(t) &=& \left\{ \begin{array}{cc} 1, & t \in (t_1-\tau/2, t_1+\tau/2) \\ 0 & t \notin (t_1-\tau/2, t_1+\tau/2) \end{array} \right . \label{theta} \end{eqnarray}
and
\begin{equation} g(t)= \frac{\theta_{t_1,\tau}(t)}{\tau} \; , \hspace{5mm} \int_{0}^{\infty} g(t)dt =1, \;\;\; (\tau \ll t_1) . \label{g(t)} \end{equation}
We further use the simplification $g(t) \approx \delta(t-t_1)$ and write our model Hamiltonian as
\begin{subequations} \begin{eqnarray} \hat{H}(t) &=& \frac{\hat{p}_z^2}{2m} -\epsilon \delta(t-t_1) \hat{\sigma}_z \hat{z} \\ &=&\hat{H}_0 + \hat{V}(t) . \label{H(t) 2} \\ \hat{H}_0 &=& \hat{K}_z \;\;\;\; {\rm is \; the \; kinetic \; energy \; operator \; for \; the}\; z \; {\rm variable}. \label{H0=Kz} \end{eqnarray} \label{H(t),H0} \end{subequations}
In the nomenclature introduced in the Introduction, $\hat{\sigma}_z$ is the {\em observable for the system proper} and $\hat{z}, \hat{p}_z$ are the {\em probe canonical variables}. From now on we adopt the nomenclature that we {\em measure} (perhaps a better word could be ``premeasure") the observable $\hat{\sigma}_z$, by {\em detecting}, with a suitable instrument, either $\hat{z}$ or $\hat{p}_z$.
We shall solve the Schr\"odinger equation
\begin{subequations} \begin{eqnarray}
i\hbar\frac{\partial |\psi(t)\rangle}{\partial t}
&=& \hat{H}(t) |\psi(t)\rangle \; , \label{schr. eqn SG} \end{eqnarray}
with the initial condition
\begin{eqnarray}
|\psi(0)\rangle &=& |\psi_{spin}^{(0)}\rangle |\chi^{(0)}\rangle \; . \label{initial condition SG} \end{eqnarray} \end{subequations}
Here, $|\psi_{spin}^{(0)}\rangle$ is the initial state of the system proper and
$|\chi^{(0)}\rangle$ the initial state of the probe.
It will be advantageous to use the interaction picture (for a textbook presentation, see, e.g., Ref. \cite{messiah}), in which we have the following relations
\begin{subequations} \begin{eqnarray}
|\psi(t) \rangle_I &=& \hat{U}_0^{\dagger}(t)|\psi(t) \rangle \label{inter picture a} \\
&=& \hat{U}_0^{\dagger}(t)\hat{U}(t)|\psi(0) \rangle \label{inter picture b} \\
&\equiv& \hat{U}_I (t) |\psi(0) \rangle \label{inter picture c} \\ \hat{U}_I (t)&=& \hat{U}_0^{\dagger}(t)\hat{U}(t) \label{inter picture d} \\ i\hbar \frac{d \hat{U}_I (t)}{dt} &=& \hat{V}_I(t)\hat{U}_I (t)\; ; \;\;\;\; \hat{U}_I (0)=\hat{I} \; . \label{inter picture e} \end{eqnarray}
Here, $\hat{U}_0(t)$, $\hat{U}(t)$, are the evolution operators in the Schr\"odinger picture associated with $H_0$ and $\hat{H}$, respectively, and $\hat{U}_I(t)$ the evolution operator in the interaction picture; $\hat{V}_I(t)$ is the interaction in the interaction picture, i.e.,
\begin{eqnarray} \hat{V}_I(t)&=& {\rm e}^{\frac{i}{\hbar}\hat{H}_0 t}\hat{V}(t) {\rm e}^{-\frac{i}{\hbar}\hat{H}_0 t} \label{inter picture e} \\ &=& -\epsilon \delta (t-t_1){\rm e}^{\frac{i}{\hbar}\hat{H}_0 t_1} \hat{\sigma}_z \hat{z} {\rm e}^{-\frac{i}{\hbar}\hat{H}_0 t_1} \label{inter picture f} \\ &\equiv& -\epsilon \delta (t-t_1) \hat{W} \; . \end{eqnarray} \label{inter picture} \end{subequations}
The solution for $\hat{U}_I(t)$ is
\begin{subequations} \begin{eqnarray} \hat{U}_I(t) &=& {\rm e}^{\frac{i\epsilon}{\hbar}\int_0^t{\delta(t'-t_1)dt' \cdot \hat{W}}} \label{UI(t)a} \\ \hat{U}_{I,f} &=& {\rm e}^{\frac{i}{\hbar}\hat{H}_0 t_1} {\rm e}^{\frac{i\epsilon}{\hbar}\hat{\sigma}_z \hat{z}} {\rm e}^{-\frac{i}{\hbar}\hat{H}_0 t_1} \; . \end{eqnarray} \label{UI(t)} \end{subequations}
In the last line we have the ``final" evolution operator in the interaction picture, i.e., after the interaction has ceased to act. From Eq. (\ref{inter picture d}) we find the final evolution operator in the Schr\"odinger picture as
\begin{equation} U_f \equiv U(t>t_1) = e^{-\frac{i}{\hbar}\hat{K}_z(t-t_1)} e^{\frac{i}{\hbar}\epsilon \hat{\sigma}_z \hat{z}} e^{-\frac{i}{\hbar}\hat{K}_z t_1} \; , \label{Uf} \end{equation}
whose physical interpretation is very clear: we first have free evolution from $t=0$ to $t_1$; the interaction acts at $t=t_1$, and we have free evolution again thereafter.
The final state vector, i.e., for $t>t_1$, is then
\begin{equation}
|\Psi(t)\rangle_f
= \sum_{\sigma = \pm 1} \mathbb{P}_{\sigma}|\psi_{spin}^{(0)}\rangle e^{-\frac{i}{\hbar}\hat{K}_z(t-t_1)} e^{\frac{i}{\hbar}\epsilon \sigma \hat{z}}
|\chi(t_1)\rangle \label{Psif} \end{equation}
We have introduced the projector $\mathbb{P}_{\sigma}$ onto the state with eigenvalue $\sigma=+1,-1$ of $\hat{\sigma}_z$. We observe that the component
$\mathbb{P}_{\sigma}|\psi_{spin}^{(0)}\rangle$ of the original spin state gets entangled with the probe state
$e^{\frac{i}{\hbar}\epsilon \sigma \hat{z}}|\chi(t_1)\rangle$
(we denote by $|\chi(t_1)\rangle$ the probe state which has evolved freely from $t=0$ to $t=t_1^{-}$) which, in the $z$-representation, is
\begin{equation} e^{\frac{i}{\hbar}\epsilon \sigma z}\chi(z,t_1) \; , \label{z-boost} \end{equation}
meaning that it got a boost $p_{\sigma}=\epsilon \sigma$ in the $z$ direction. This is precisely the essential physical effect occurring in the Stern-Gerlach experiment.
\subsection{The probability of detecting a probe position $z$ as a function of time} \label{pf(z,t)}
At $t=0$, the probability density $p(z,t)$ of a probe position $z$ is just
$|\chi^{(0)}(z)|^2$, where $\chi^{(0)}(z)$ is the original $z$-wave function. As time goes on, the wave packet spreads in time until just before the interaction occurs, i.e., until $t=t_1^{-}$ (see the schematic illustration in Fig. \ref{fig3}).
\begin{figure}
\caption{The probability $p(z,t)$ of a probe position $z$ as a function of time in the Stern-Gerlach experiment, as explained in the text. }
\label{fig3}
\end{figure}
After the interaction has taken place at $t=t_1$, i.e, for $t>t_1$, that probability is given by
\begin{subequations} \begin{eqnarray} p_f(z,t)
&=& \; _f \langle \Psi| \mathbb{P}_z |\Psi \rangle_f \label{pf(z,t) a} \\ &=& \sum_{\sigma = \pm 1}
\langle \psi_{spin}^{(0)}|\mathbb{P}_{\sigma}|\psi_{spin}^{(0)}\rangle
\left|\langle z |e^{-\frac{i}{\hbar}\hat{K}_z(t-t_1)} e^{\frac{i}{\hbar}\epsilon \sigma \hat{z}}
|\chi(t_1)\rangle \right|^2 \label{pf(z,t) b} \\ &=& \sum_{\sigma = \pm 1} W_{\sigma} ^{(\hat{\sigma}_z)}
|\chi_f(z,\sigma;t)|^2 \; , \label{pf(z,t) c} \end{eqnarray} \label{pf(z,t)} \end{subequations}
where $W_{\sigma} ^{(\hat{\sigma}_z)}
=\langle \psi_{spin}^{(0)}|\mathbb{P}_{\sigma}|\psi_{spin}^{(0)}\rangle$ is the Born probability for the value $\sigma$ of the spin projection in the original system state. We have also defined the $\sigma$-dependent probe wave function for $t>t_1$
\begin{subequations} \begin{eqnarray} \chi_f(z,\sigma;t)
&=&\langle z |e^{-\frac{i}{\hbar}\hat{K}_z(t-t_1)} e^{\frac{i}{\hbar}\epsilon \sigma \hat{z}}
|\chi(t_1)\rangle \\ &\equiv& \int U_0(z,z';t-t_1) e^{\frac{i}{\hbar}\epsilon \sigma z'} \chi(z', t_1) dz' \end{eqnarray} \label{chi-f(z,sigma,t)} \end{subequations}
which consists of the probe wave function which evolves freely up to $t=t_1$, it gets multiplied by the plane wave $e^{\frac{i}{\hbar}\epsilon \sigma z'}$ at $t=t_1$ (the boost referred to above), and evolves freely thereafter, through the free evolution operator indicated as $U_0$. Thus, because of the interaction occurring at $t=t_1$, the $\sigma=1$ component of this state receives a positive boost in the $z$ direction, and the $\sigma=-1$ component receives a negative boost, so that after $t=t_1$ they travel in opposite directions. From Eq. (\ref{pf(z,t) c}), the $\sigma=1$ component occurs with a weight $W_{1} ^{(\hat{\sigma}_z)}$, and the $\sigma=-1$ component with a weight $W_{-1} ^{(\hat{\sigma}_z)}$, as is also indicated in Fig. \ref{fig3}.
\subsubsection{Conditions generally required \cite{brune-et-al-1992,imoto-et-al} for the measurement of the observable of a system, when detecting a property of the probe }
What are called $\hat{A}_s$ and $\hat{A}_{probe}$ in Refs. \cite{brune-et-al-1992,imoto-et-al}, are translated as
\begin{subequations} \begin{eqnarray} \hat{A}_s = \hat{\sigma}_z \label{} \\ \hat{A}_{probe}= \hat{z} \; , \label{} \end{eqnarray} \label{} \end{subequations}
respectively, in our notation for the present Stern-Gerlach problem. The requirements established in these references are:
i)
\begin{equation} \hat{V} = f(\hat{A}_s) \label{} \end{equation}
Here, indeed: $\hat{V}=-\epsilon \delta(t-t_1) \hat{\sigma}_z \hat{z}$
ii)
\begin{equation} [\hat{V},\hat{A}_{probe}] \neq 0 \; , \label{} \end{equation}
so that $\langle \hat{A}_{probe} \rangle$ changes, enabling one to get information on $\hat{A}_s$ by detecting $\hat{A}_{probe}$. Although here $[\hat{V}, \hat{z}] =0$, we have $[\hat{K_z}, \hat{z}] \neq 0$; the kinetic energy $\hat{K}_z$ causes a displacement of the two wave packets, as illustrated in Fig. \ref{fig3}: if we wait long enough, the 2 packets separate, and we can measure $\sigma = \pm 1$.
In order to have a {\em a QM non-demolition measurement}, these references also establish the additional conditions:
iii)
\begin{subequations} \begin{eqnarray} \left[\hat{V},\hat{A}_s \right] &=& 0 \; ; \;\;\;\;\; {\rm here, \; indeed,} \; \left[\hat{V},\hat{\sigma}_z \right] = 0 \label{} \\ \left[ \hat{H}_s,\hat{A}_s \right] &=& 0 \; ; \;\;\;\;\; {\rm here},\;\; H_s=0, \;\;\; {\rm so \; that} \;\; [\hat{H}_s,\hat{\sigma}_z ]=0 \label{} \end{eqnarray}
which, taken together, give
\begin{eqnarray} \left[ \hat{H},\hat{A}_s \right] &=& 0 \; ; \;\;\;{\rm here,\; indeed}, \; \left[\hat{H},\hat{\sigma}_z \right] = 0 , \end{eqnarray} \label{} \end{subequations}
with the consequence that starting with
$|\psi_{spin}^{(0)} \rangle = |\sigma = 1\rangle$, say, the state will not get a component $|\sigma = -1\rangle$.
\subsection{The probability of detecting a probe momentum $p_z$ as a function of time} \label{pf(pz,t)}
At $t=0$, the probability density $p(p_z,t)$ of a probe momentum $p_z$ is
$|\tilde{\chi}^{(0)}(p_z)|^2$, where $\tilde{\chi}^{(0)}(p_z)$, the original wave function in the momentum representation, is the Fourier transform of the original wave function in $z$-space. As time goes on, the wave packet conserves its shape until just before the interaction occurs, i.e., until $t=t_1^{-}$ (see the schematic illustration in Fig. \ref{fig4}).
\begin{figure}
\caption{The probability $p(z,t)$ of a probe momentum $p_z$ as a function of time in the Stern-Gerlach experiment, as explained in the text. }
\label{fig4}
\end{figure}
After the interaction has taken place at $t=t_1$, i.e, for $t>t_1$, that probability is given by
\begin{subequations} \begin{eqnarray} p_f(p_z,t)
&=& \; _f \langle \Psi| \mathbb{P}_{p_z} |\Psi \rangle_f \\ &=& \sum_{\sigma = \pm 1}
\langle \psi_{spin}^{(0)}|\mathbb{P}_{\sigma}|\psi_{spin}^{(0)}\rangle
\left|\langle p_z |e^{-\frac{i}{\hbar}\hat{K}_z(t-t_1)} e^{\frac{i}{\hbar}\epsilon \sigma \hat{z}}
|\chi(t_1)\rangle \right|^2 \\ &=& \sum_{\sigma = \pm 1} W_{\sigma} ^{(\hat{\sigma}_z)}
\left|\tilde{\chi}^{(0)}(p_z- \epsilon\sigma)\right|^2 \; . \end{eqnarray} \end{subequations}
Just as in Eq. (\ref{pf(z,t)}), $W_{\sigma} ^{(\hat{\sigma}_z)}$ is the Born probability for the value $\sigma$ of the spin projection in the original system state; $\tilde{\chi}^{(0)}(p_z- \epsilon\sigma)$ is the original wave function in momentum space, evaluated at the displaced value $p_z- \epsilon\sigma$. Thus, at $t=t_1^+$ the $p_z$ probability density splits into the two pieces indicated in Fig. \ref{fig4}, corresponding to the two values of $\sigma$, with weights $W_{+1} ^{(\hat{\sigma}_z)}$ and $W_{-1} ^{(\hat{\sigma}_z)}$, respectively, and remains unaltered thereafter.
\subsubsection{Conditions generally required \cite{brune-et-al-1992,imoto-et-al} for the measurement of the observable of a system, when detecting a property of the probe }
In this case, the $\hat{A}_s$ and $\hat{A}_{probe}$ of Refs. \cite{brune-et-al-1992,imoto-et-al} are
\begin{subequations} \begin{eqnarray} \hat{A}_s &=& \hat{\sigma}_z \label{} \\ \hat{A}_{probe}&=& \hat{p}_z \; , \label{} \end{eqnarray} \label{} \end{subequations}
respectively, in our present notation. We recall that these references require:
i)
\begin{equation} \hat{V} = f(\hat{A}_s) \; ; \label{} \end{equation}
indeed, this is the case, because $\hat{V}=-\epsilon \delta(t-t_1) \hat{\sigma}_z \hat{z}$.
ii)
\begin{equation} [\hat{V},\hat{A}_{probe}] \neq 0 \; . \label{} \end{equation}
Here, indeed, $[\hat{V}, \hat{p}_z] \propto [\hat{z},\hat{p}_z] \neq0$.
For {\em a QM non-demolition measurement}, these references require:
iii)
\begin{subequations} \begin{eqnarray} \left[\hat{V},\hat{A}_s \right] &=& 0 \; ; \;\;\;{\rm here, \; indeed:} \; \left[\hat{V},\hat{\sigma}_z \right] = 0 \label{} \\ \left[ \hat{H}_s,\hat{A}_s \right] &=& 0 \; ; \;\;\;{\rm here}, \; H_s=0, \;\;\; {\rm so \; that} \; [\hat{H}_s,\hat{\sigma}_z ]=0, \label{} \end{eqnarray}
so that
\begin{eqnarray} \left[ \hat{H},\hat{A}_s \right] &=& 0 \; ; \;\;\;{\rm here, indeed:} \; \left[\hat{H},\hat{\sigma}_z \right] = 0 . \end{eqnarray} \label{} \end{subequations}
Just as in the previous case, the consequence is that starting with
$|\psi_{spin}^{(0)} \rangle = |\sigma = 1\rangle$, say, the state will not get a component $|\sigma = -1\rangle$.
\section{Single measurements in Quantum Mechanics} \label{single}
We first consider the measurement of an observable $\hat{A}$ using {\em one probe} and detecting a property of it. We are using the nomenclature introduced right after Eqs. (\ref{H(t),H0}).
For $\hat{A}$ we write the spectral representation
\begin{equation} \hat{A} = \sum_n a_n {\mathbb P}_{a_n} \; , \label{sp. repr. A} \end{equation}
where the eigenvalues $a_n$ are allowed to be degenerate and $\mathbb{P}_{a_n}$ are the eigenprojectors.
We assume the system to be coupled to a probe, considered, for simplicity, to be one-dimensional, whose position and momentum are represented by the Hermitean operators $\hat{Q}$ and $\hat{P}$. The system-probe interaction is taken to be \cite{Neumann-MathFounQuanMech:55}
\begin{equation} \hat{V}(t) = \epsilon \; g(t) \hat{A} \hat{P}\; , \hspace{5mm} t_1 > 0\; , \label{V_single} \end{equation}
with an {\em arbitrary} interaction strength \cite{Peres-QuanLimiDeteWeak:89} $\epsilon$.
This interaction could be translated to the one for the Stern-Gerlach experiment discussed in Sec. \ref{pf(pz,t)} using the correspondence $\hat{A} \Rightarrow \hat{\sigma}_z$, $\hat{P} \Rightarrow \hat{z}$, $\hat{Q} \Rightarrow -\hat{p}_z$, as illustrated in Fig. \ref{fig5} below. The delta function interaction of the previous section was generalized to $g(t)$ (see Eq. (\ref{g(t)})), a narrow function with finite support, centered at $t=t_1$, so that
\begin{subequations} \begin{eqnarray} \int_{0}^{t}g(t') dt' &\equiv& G(t), \\ G(0)&=&0, \;\;\;\;G(\infty)= 1 . \end{eqnarray} \label{g(t) 1} \end{subequations}
We disregard the intrinsic evolution of the system and the probe, and assume that $\hat{V}(t)$ of Eq. (\ref{V_single}) represents the full Hamiltonian, i.e.,
\begin{equation} \hat{H}(t) = \epsilon g(t) \hat{A}\hat{P}\; , \hspace{5mm} t_1 > 0\; . \label{H(t)_single} \end{equation}
The evolution operator is then given by
\begin{equation} \hat{U}(t) = {\rm e}^{-\frac{i}{\hbar} \int_0^t \hat{H}(t')dt'} = {\rm e}^{-\frac{i}{\hbar} \epsilon \; G(t)\hat{A}\hat{P}} . \label{U_single} \end{equation}
If the density operator of the system plus the probe at $t=0$ is the direct product $ \rho^{(0)} = \rho_{s}^{(0)} \otimes \rho_{\pi}^{(0)} $ ($\pi$ stands for ``probe"), after the interaction has ceased to act, i.e., for $t \gg t_1$, it is given by
\begin{equation} \rho^{(\hat{A})}_f = \sum_{n n' } \mathbb{P}_{a_n} \rho_{s}^{(0)} \mathbb{P}_{a_{n'}} (e^{-\frac{i}{\hbar}\epsilon a_n \hat{P}} \rho_{\pi}^{(0)} e^{\frac{i}{\hbar}\epsilon a_{n'} \hat{P}}) \; . \label{rho_t>t1 single} \end{equation}
From this expression we notice that, because of the interaction, the system and the probe are now correlated. Also notice the presence of the displacement operator ${\rm exp}(-(i/\hbar)\epsilon a_{n} \hat{P})$ in this last equation.
Now the idea is that at time $t>t_1$, i.e., after the system-probe interaction is over, we detect the probe position $\hat{Q}$ to obtain information on the system proper. This we study in what follows.
\subsection{The $\hat{Q}$ probability density after the interaction} \label{pf(Q) 0}
According to Born's rule, the $Q$ probability density for $t>t_1$ is given by
\begin{equation} p_f^{(\hat{A})}(Q) = Tr \left(\rho_f^{(\hat{A})} \mathbb{P}_Q \right) ={\sum}_n W_{a_n}^{(\hat{A})} \; p_0(Q-\epsilon a_n) \; , \label{pf(Q)} \end{equation}
where
\begin{equation} W_{a_n}^{(\hat{A})} ={\rm Tr} ( \rho_s^{(0)} \mathbb{P}_{a_{n}} ) \label{Wan 0} \end{equation}
is the {\em Born probability} for the result $a_n$ in the original system state, and
\begin{equation} p_0(Q-\epsilon a_n)
=\langle Q-\epsilon a_n |\rho_{\pi}^{(0)}| Q-\epsilon a_n \rangle \label{p0(Q-ean)} \end{equation}
is the original $Q$ probability density $p_0(Q)$ (which has a width $= \sigma_Q$), but displaced by $\epsilon a_n$.
In this problem we may take the point of view that knowing the system state $\rho_s^{(0)}$ before the process, and thus $W_{a_n}^{(\hat{A})}$, we can predict the detectable quantity $p_f^{(\hat{A})}(Q)$. We may also adopt the more interesting viewpoint that, {\em detecting $p_f^{(\hat{A})}(Q)$, we can retrieve information on the system state}. We examine this latter attitude below.
Before doing that, we illustrate in Fig. \ref{fig5} the result (\ref{pf(Q)}) for the case of the Stern-Gerlach experiment studied in the last section, by means of the translation given right before Eq. (\ref{g(t) 1}).
\begin{figure}\label{fig5}
\end{figure}
An illustrative example of a more general case is presented in Fig. \ref{fig6a,b}; for the values of the parameters indicated in the figure, Fig. 6a corresponds to the case of ``strong coupling", while Fig. 6b to that of ``weak coupling".
\begin{figure}
\caption{Illustrative example of the probability density of the pointer position $Q$ of Eq. (\ref{pf(Q)}). We have assumed seven eigenvalues $a_n$ for the system observable $\hat{A}$, with Born probabilities [Eq. (\ref{Wan 0})] given by: 0.1, 0.2, 0.2, 0.15, 0.2, 0.05 and 0.1. We chose the particular values: (a) $\epsilon =1$ for the interaction strength and $\sigma_Q=0.05$ for the width of the probe state prior to the measurement; this is a case of ``strong coupling"; (b) $\epsilon =1$ for the interaction strength and $\sigma_Q=1$ for the width of the probe state prior to the measurement; this is a case of ``weak coupling".}
\label{fig6a,b}
\end{figure}
In the above scheme, what we detect is the probe-position probability $p_f^{(\hat{A})}(Q)$. As it can be seen from Fig. \ref{fig6a,b}, it is only in the idealized limit of very {\em strong coupling}, $\epsilon/\sigma_Q \gg 1$, that $p_f^{(\hat{A})}(Q)$ ``mirrors" the eigenvalues $a_n$ of the observable which we want to have information about. In this limit, $p_f^{(\hat{A})}(Q)$ {\em integrated around $a_n$ gives Born's probability $W_{a_n}^{(\hat{A})}$ of $a_n$}. In this high-resolution limit this is thus the information we can retrieve about the system proper, by detecting $Q$. We shall study below what information can be extracted from experiments with arbitrary resolution. Surprisingly, we shall find cases where it is advantageous to use low resolution (see Sec. \ref{state reconstruction}, last paragraph)!
\subsection{The average of $Q$ after the interaction} \label{<Q> 0}
From Eqs. (\ref{pf(Q)}) and (\ref{Wan 0}) one finds \cite{aharonov_et_al} that the average of the probe position $\hat{Q}$ in units of $\epsilon$, after the interaction is over, is given by
\begin{equation} \frac{1}{\epsilon}\langle \hat{Q} \rangle_f^{(\hat{A})} = {\sum}_n a_n W^{(\hat{A})}_{a_n} = {\sum}_n a_n {\rm Tr} (\rho_s^{(0)} \mathbb{P}_{a_n} ) = {\rm Tr} (\rho_s^{(0)} \hat{A} ) =\langle\hat{A}\rangle_0, \;\;\;\;\; \forall \epsilon \; . \label{<Q>} \end{equation}
We have assumed the original $Q$ distribution to be centered at $Q=0$. As a result, detecting the average probe position $\langle \hat{Q}\rangle_f^{(\hat{A})}$ after the interaction is over {\em allows extracting the Born average} $\langle\hat{A}\rangle_0$ of the observable $\hat{A}$ in the original state of the system. It is remarkable that this result is valid for {\em arbitrary} coupling strength $\epsilon$. For example, in the two situations illustrated in Fig. \ref{fig6a,b} we would obtain the same result for $\langle \hat{Q} \rangle_f^{(\hat{A})}/\epsilon$.
Similar results can be found for higher-order moments. E. g., for the second moment of $\hat{Q}$ one finds
\begin{equation} \frac{1}{\epsilon^2} \left[ \langle \hat{Q}^2 \rangle_f^{(\hat{A})} -\sigma_{Q}^2 \right] = {\rm Tr} (\rho_s^{(0)} \hat{A}^2 ) = \langle \hat{A}^2 \rangle_0 \; , \;\;\; \forall \epsilon , \label{<Q^2>} \end{equation}
implying that detecting $\langle \hat{Q}^2 \rangle_f^{(\hat{A})}$ and knowing $\sigma_{Q}^2$ allows extracting the second moment of the observable $\hat{A}$ in the original state of the system, i.e., $\langle \hat{A}^2 \rangle_0$.
More in general, if we detect the final $Q$ probability density (\ref{pf(Q)}) , i.e.,
\begin{equation} p_f^{(\hat{A})}(Q) = \sum_n (\rho_s^{(0)})_{nn} \; p_0(Q-\epsilon a_n) \; , \label{pf(Q) vs rho_nn} \end{equation}
we obtain information on the {\em diagonal elements} $(\rho_s^{(0)})_{nn}$ of the original density operator, but not on the off-diagonal ones. Alternatively, we can write this result in terms of the characteristic function
\begin{equation} \tilde{p}_f^{(\hat{A})}(k) = \left[\sum_n (\rho_s^{(0)})_{nn} {\rm e}^{ik\epsilon a_n}\right] \tilde{p}_0(k) = \left\langle {\rm e}^{ik\epsilon \hat{A}}\right\rangle_0 \; \tilde{p}_0(k) \; , \label{pf(k)} \end{equation}
implying that if we detect $p_f^{(\hat{A})}(Q)$ and infer $\tilde{p}_f^{(\hat{A})}(k)$, we can extract $\left\langle {\rm e}^{ik\epsilon \hat{A}}\right\rangle_0$. Results (\ref{<Q>}) and (\ref{<Q^2>}) are particular cases of Eq. (\ref{pf(k)}).
In Sec. \ref{state reconstruction} we shall find a procedure to extract all of the matrix elements of the original density operator, using the notion of successive measurements.
\subsection{Measuring projectors} \label{measuring projectors}
A particular case of great interest is the measurement of a projector, like $\mathbb{P}_{a_{\nu}}$, so that the Hamiltonian of Eq. (\ref{H(t)_single}) becomes
\begin{equation} \hat{H}(t) = \epsilon \; g(t) \hat{\mathbb{P}}_{a_{\nu}} \hat{P}\; , \hspace{5mm} t_1 > 0\; . \label{V_single 1} \end{equation}
We designate the eigenvalues of $\hat{\mathbb{P}}_{a_{\nu}}$ by $\tau = 1,0$, and its eigenprojectors by $(\hat{\mathbb{P}}_{a_{\nu}})_{\tau}$. Then
\begin{subequations} \begin{eqnarray} (\hat{\mathbb{P}}_{a_{\nu}})_1 &=& \hat{\mathbb{P}}_{a_{\nu}} \; ; \hspace{16mm} \hat{\mathbb{P}}_{a_{\nu}} (\hat{\mathbb{P}}_{a_{\nu}})_1 = 1 \cdot (\hat{\mathbb{P}}_{a_{\nu}})_1 \\ (\hat{\mathbb{P}}_{a_{\nu}})_0 &=& I - \hat{\mathbb{P}}_{a_{\nu}} \; ; \hspace{1cm} \hat{\mathbb{P}}_{a_{\nu}} (\hat{\mathbb{P}}_{a_{\nu}})_0 = 0 \cdot (\hat{\mathbb{P}}_{a_{\nu}})_0 \; . \end{eqnarray} \label{Pa,nu} \end{subequations}
For these eigenvalues and eigenprojectors, the probe-position probability density of Eq. (\ref{pf(Q)}) gives
\begin{equation} p_f^{(\hat{\mathbb{P}}_{a_{\nu}})}(Q) = {\rm Tr} \left[\rho_s^{(0)} (\hat{\mathbb{P}}_{a_{\nu}})_0 \right] \; p_0(Q) +{\rm Tr} \left[\rho_s ^{(0)} (\hat{\mathbb{P}}_{a_{\nu}})_1 \right] \; p_0(Q-\epsilon) \; . \label{pf(Q) for proj} \end{equation}
This result is illustrated in Fig. \ref{fig7} for the strong-coupling case, in which $p_f^{(\hat{\mathbb{P}}_{a_{\nu}})}(Q)$ consists of two peaks centered at $Q/\epsilon =0$ and $Q/\epsilon =1$.
\begin{figure}
\caption{Illustrative example (qualitative) of the probability density of the probe position $Q$ of Eq. (\ref{pf(Q) for proj}) for the strong-coupling case. The result consists of two peaks, centered at $Q/\epsilon =0$ and $Q/\epsilon =1$. Also shown is the average of the probe position which, in units of $\epsilon$, is independent of $\epsilon$ and lies between 0 and 1. }
\label{fig7}
\end{figure}
From Eq. (\ref{pf(Q) for proj}), or from Eq. (\ref{<Q>}), we find
\begin{equation} \frac{1}{\epsilon}\langle \hat{Q} \rangle_f^{(\hat{\mathbb{P}}_{a_{\nu}})} = \sum_{\tau =0}^1 \tau \; {\rm Tr}[\rho_s^{(0)}(\hat{\mathbb{P}}_{a_{\nu}})_{\tau}] = \left\{
\begin{array}{l} {\rm Tr} (\rho_s^{(0)} \hat{\mathbb{P}}_{a_{\nu}}) =W_{a_{\nu}}^{(\hat{A})} \\ {\rm Tr} [\rho_s^{(0)} (\hat{\mathbb{P}}_{a_{\nu}})_{\tau = 1}] =W_{\tau=1}^{(\hat{\mathbb{P}}_{a_{\nu}})} \end{array}
\right. \; , \label{<Q> for proj} \end{equation}
a result independent of $\epsilon$. Thus the final average probe position, in units of $\epsilon$, gives {\em directly} the probability $W_{a_{\nu}}^{(\hat{A})}$ of $a_{\nu}$ of the observable $\hat{A}$ in the original state (see the first row of Eq. (\ref{<Q> for proj}), where we used $\sum_{\tau}\tau(\hat{\mathbb{P}}_{a_{\nu}})_{\tau}=\hat{\mathbb{P}}_{a_{\nu}}$); this is illustrated by the arrow in Fig. \ref{fig7}). In contrast, we would not know how to extract the $W^{(\hat{A})}_{a_n}$'s from the sum in Eq. (\ref{<Q>}) if $\hat{A}$ is not a projector. We can also say that inferring the probability of the eigenvalue $\tau=1$ (second row in (\ref{<Q> for proj})), i.e., the probability of ``yes" of the observable $\hat{\mathbb{P}}_{a_{\nu}}$, from a detection of $\langle \hat{Q} \rangle_f^{(\hat{\mathbb{P}}_{a_{\nu}})}/ \epsilon$ (LHS of (\ref{<Q> for proj})), is equivalent to retrieving the diagonal elements
$\langle a_{\nu}|\rho_s^{(0)}|a_{\nu}\rangle$ of the original density operator of the system (first row in (\ref{<Q> for proj})). As we shall see in Sec. \ref{state reconstruction}, the extension of this last idea to successive measurements will allow retrieving the full density operator.
\subsection{The reduced density operator of the system proper after the interaction} \label{final reduced rho of system}
We compute the reduced density operator of the system proper after the interaction with the probe is over, by tracing $\rho_f^{(\hat{A})}$ of Eq. (\ref{rho_t>t1 single}) over the probe, with the result
\begin{subequations} \begin{eqnarray} \rho_{s,f}^{(\hat{A})} &=& \sum_{n n'} \left(\mathbb{P}_{a_n} \rho_s^{(0)} \mathbb{P}_{a_{n'}}\right) {\rm Tr}_{\pi}\left[ e^{-\frac{i}{\hbar}\epsilon a_n \hat{P}} \rho_{\pi}^{(0)} e^{\frac{i}{\hbar}\epsilon a_{n'} \hat{P}} \right] \label{reduced rho a} \\ &=& \sum_{n,n'} g(\epsilon(a_n - a_{n'}))\; \mathbb{P}_{a_n} \rho_{s}^{(0)} \mathbb{P}_{a_{n'}} \; , \label{reduced rho b} \end{eqnarray} \label{reduced rho} \end{subequations}
where we have defined the characteristic function of the probe momentum distribution as
\begin{equation} g(\beta) = \left\langle {\rm e}^{-\frac{i}{\hbar}\beta \hat{P}} \right\rangle_{\pi}^{(0)} = {\rm Tr} \left[ \rho_{\pi}^{(0)} e^{-\frac{i}{\hbar}\beta \hat{P}} \right]; \; \; \;\; \beta=\epsilon(a_n - a_{n'}) \; . \label{g(beta)} \end{equation}
As an example, for the particular case of a Gaussian state for the probe we have
\begin{subequations} \begin{eqnarray} \chi(Q) &=& \frac{e^{-\frac{Q^2}{4\sigma^2_{Q}}}}{(2\pi\sigma^2_{Q})^{1/4}} \label{g(beta) gaussian a} \\ g(\epsilon(a_n - a_{n'})) &=& \int \chi^{*}(Q-\epsilon a_{n'}) \chi(Q-\epsilon a_{n}) dQ \label{g(beta) gaussian b}\\ &=&e^{-\frac{\epsilon^2}{8\sigma_Q^2}(a_n - a_{n'})^2} = e^{-\frac12 \left(\frac{\epsilon \sigma_P}{\hbar}\right)^2 (a_n-a_{n'})^2} \; . \label{g(beta) gaussian c} \end{eqnarray} \label{g(beta) gaussian} \end{subequations}
The result (\ref{reduced rho}) is valid for an arbitrary state of the probe and an artbitrary coupling strength $\epsilon/\sigma_Q$. In the strong-coupling limit $\epsilon/\sigma_Q \to \infty$, $\rho_{s,f}^{(\hat{A})}$ reduces to
\begin{equation} \rho_{s,f}^{(\hat{A})} = \sum_n \mathbb{P}_{a_n} \rho_s^{(0)} \; \mathbb{P}_{a_n}\; . \label{reduced rho strong coupling} \end{equation}
This is called the von Neumann-L\"uders rule, originally postulated by L\"uders \cite{Lueders-UEbeZust:51}, and then given a dynamical derivation in Ref. \cite{Bell+Nauenberg-MoraAspeQuanMech:66} using vNM.
We have thus reproduced the result of a {\em non-selective projective measurement} \cite{Johansen-Quantheosuccproj:07} of the observable $\hat{A}$, as a limiting case of our general formalism.
Notice that $\rho_{s,f}^{(\hat{A})}$ and $\rho_s^{(0)}$ are not connected by a unitary transformation: indeed, their eigenvalues have changed. For example, in the particular case in which the initial system state is the pure state
$\rho_s^{(0)}= |\psi_s^{(0)}\rangle \langle \psi_s^{(0)}|$, the initial eigenvalues are $1,0, \cdots,0$. On the other hand, one eigenstate of $\rho_{s,f}^{(\hat{A})}$ is
$\mathbb{P}_{a_{\nu}}|\psi_s^{(0)}\rangle$, fulfilling the eigenvalue equation
\begin{equation}
\rho_{s,f}^{(\hat{A})}\left(\mathbb{P}_{a_{\nu}}|\psi_s^{(0)}\rangle\right)
= \langle \psi_s^{(0)}| \mathbb{P}_{a_{\nu}}| \psi_s^{(0)}\rangle
\left(\mathbb{P}_{a_{\nu}}|\psi_s^{(0)}\rangle\right) \; , \label{e-value of rho-f} \end{equation}
so that the corresponding eigenvalue is
$\langle \psi_s^{(0)} |\mathbb{P}_{a_{\nu}}| \psi_s^{(0)}\rangle$. This is not a contradiction, because it is the density operator for the whole system, i.e., the system proper plus the probe, that evolves unitarily (see Eqn. (\ref{rho_t>t1 single})), while here we are dealing with the reduced density operator, which is the full density operator traced over the probe.
We computed above the reduced density matrix for the system proper, $\rho_{s,f}^{(\hat{A})}$, after the system-probe interaction; then the probe is detected. We now wish to make a model for the probe-detector ($\pi-D$) interaction, taking place at some time $t_2>t_1$, and investigate if the resulting reduced density matrix for the system proper is still the same as the one given above, in Eq. (\ref{reduced rho}). Assume that this new interaction {\em does not involve the system proper} $s$. The final density operator after the interaction with the detector, to be called $\rho_f^{(\rm{after \; inter. \; with \; detector})}$, will contain a new evolution operator $U_{\pi D}$, that involves the probe and the detector, but not the system $s$. Tracing $\rho_f^{(\rm{after \; inter. \; with \; detector})}$ over the probe {\em and} the detector, we obtain
\begin{subequations} \begin{eqnarray} \rho_{s,f}^{(\rm{after \; inter. \; with \; detector})} &=& {\rm Tr}_{\pi D}\left( \rho_f^{(\rm{after \; inter. \; with \; detector})} \right)\\ &=& \sum_{n n'} \left(\mathbb{P}_{a_n} \rho_s^{(0)} \mathbb{P}_{a_{n'}}\right) {\rm Tr}_{\pi D} \left[ U_{\pi D} e^{-\frac{i}{\hbar}\epsilon a_n \hat{P}} \rho_{\pi}^{(0)} e^{\frac{i}{\hbar}\epsilon a_{n'} \hat{P}} \hat{\rho}_D^{(0)}
U^{\dagger}_{\pi D} \right] \nonumber \\ \\ &=& \sum_{nn'} g(\epsilon(a_n - a_{n'}))\; \mathbb{P}_{a_n} \rho_{s}^{(0)} \mathbb{P}_{a_{n'}} \; , \end{eqnarray} \label{reduced rho-s tracing pi and D} \end{subequations}
giving the same answer as before, Eq. (\ref{reduced rho}). We stress that this result holds when the probe-detector interaction does not involve $s$.
\section{Successive Measurements in Quantum Mechanics} \label{successive}
We now generalize the problem of the previous section to describe the measurement of two observables in succession: $\hat{A}$, as defined in Eq. (\ref{sp. repr. A}), at time $t_1$, and then \begin{equation} \hat{B} = \sum_m b_m \mathbb{P}_{b_m} \; , \label{sp. repr. B} \end{equation} (the $b_m$'s may also be degenerate), at some later time $t_2$. For this purpose, we assume that we employ two probes, which are the ones that we detect; their momentum and coordinate operators are $\hat{P}_i$, $\hat{Q}_i$, $i=1,2$. The interaction of the system with the probes defines the Hamiltonian
\begin{equation} \hat{H} (t) = \epsilon_1 g_1(t) \hat{A} \hat{P}_1 + \epsilon_2 g_2(t) \hat{B} \hat{P}_2 \; . \label{V 2meas} \end{equation}
Again, we have disregarded the intrinsic Hamiltonians of the system and of the two probes. The functions $g_1(t)$ and $g_2(t)$ are narrow non-overlapping functions, centered around $t=t_1$ and $t=t_2$, respectively (see Eqs. (\ref{g(t) 1})), with $0 < t_1 < t_2 $.
The unitary evolution operator is given by
\begin{equation} \hat{U}(t) ={\rm e}^{-\frac{i}{\hbar}\epsilon_2 G_2(t) \hat{B} \hat{P}_2} {\rm e}^{-\frac{i}{\hbar}\epsilon_1 G_1(t) \hat{A} \hat{P}_1}. \label{U 2meas} \end{equation}
If the density operator of the system plus the probes at $t=0$ is assumed to be the direct product $ \rho^{(0)} = \rho_{s} \otimes \rho_{\pi_1} \otimes \rho_{\pi_2} $, for $t \gg t_2$, i.e., after the second interaction has ceased to act, it is given by
\begin{eqnarray} && \rho_f^{(\hat{B} \leftarrow \hat{A})}= \sum_{nn'mm'} (\mathbb{P}_{b_{m}} \mathbb{P}_{a_{n}} \rho_{s}^{(0)} \;\mathbb{P}_{a_{n'}} \mathbb{P}_{b_{m'}}) \nonumber \\ && \hspace{5mm}\cdot \left( e^{-\frac{i}{\hbar}\epsilon_1 a_n \hat{P_1}} \rho_{\pi_1}^{(0)} e^{\frac{i}{\hbar}\epsilon_1 a_{n'}\hat{P_1}} \right) \left( e^{-\frac{i}{\hbar}\epsilon_2 b_m \hat{P_2}} \rho_{\pi_2}^{(0)} e^{\frac{i}{\hbar}\epsilon_2 b_{m'} \hat{P_2}} \right) \; . \label{rhof two probes} \end{eqnarray}
At $t \gg t_2$ we detect the two probe positions and momenta in order to obtain information about the system. Two examples are considered below.
We first detect the two probe positions $\hat{Q}_1$ and $\hat{Q}_2$. Their correlation can be calculated as
\begin{eqnarray} \langle \hat{Q}_1 \hat{Q}_2 \rangle_f^{(\hat{B} \leftarrow \hat{A})} = {\rm Tr} \left[\rho_f^{(\hat{B} \leftarrow \hat{A})}\hat{Q}_1 \hat{Q}_2 \right]\; , \label{<Q1Q2> 0} \end{eqnarray}
with the result \cite{johansen-mello:2008,amir-pier}
\begin{equation} \frac{\langle \hat{Q}_1 \hat{Q}_2 \rangle_f ^{(\hat{B} \leftarrow \hat{A})}}{\epsilon_1 \epsilon_2} = \Re \sum_{nm} a_{n} b_{m} W^{(\hat{B} \leftarrow \hat{A})}_{b_{m}a_{n}}(\epsilon_1)\; , \label{<Q1Q2> 1} \end{equation}
where $\Re$ stands for the real part. We have defined
\begin{equation} W^{(\hat{B} \leftarrow \hat{A})}_{b_{m}a_{n}}(\epsilon_1) =\sum_{n'} \lambda(\epsilon_1(a_n-a_{n'})) {\rm Tr}\left[\rho_s^{(0)} (\mathbb{P}_{a_{n'}} \mathbb{P}_{b_{m}} \mathbb{P}_{a_{n}})\right] \label{Wmn} \end{equation}
and
\begin{subequations} \begin{eqnarray} \hspace{1cm}\lambda(\beta) &=& g(\beta)+ 2 h(\beta) \; , \label{lambda(beta)} \\ g(\beta) &=& \left\langle {\rm e}^{-\frac{i}{\hbar}\beta \hat{P}_1}\right\rangle_{\pi_1}^{(0)}, \label{g(beta)} \\ h(\beta) &=& \frac{1}{\beta}\left\langle {\rm e}^{-\frac{i}{2\hbar}\beta \hat{P}_1} \hat{Q}_1 {\rm e}^{-\frac{i}{2\hbar}\beta \hat{P}_1} \right\rangle_{\pi_1}^{(0)} \; .
\label{h(beta)} \end{eqnarray} \label{lambda,g,h} \end{subequations}
Here, $\langle \cdots \rangle_{\pi_1}^{(0)}$ indicates an average over the initial state of probe 1. Notice that $\langle \hat{Q}_1 \hat{Q}_2 \rangle_f^{(\hat{B} \leftarrow \hat{A})}$ may be a complicated function of $\epsilon_1$; however, it is linear in $\epsilon_2$, the strength associated with the last measurement, just as for a single measurement we found, in Eq. (\ref{<Q>}), that $\langle \hat{Q} \rangle_f^{(\hat{A})} \propto \epsilon$. We also have the result (see Ref. \cite{amir-pier} for the relevant conditions)
\begin{equation} \lambda(0)=1 \; . \label{l(0)=1} \end{equation}
The following comments are in order at this point. Knowing the original system state $\rho_s^{(0)}$, the {\em auxiliary function} $\Re W^{(\hat{B} \leftarrow \hat{A})}_{b_{m}a_{n}}(\epsilon_1)$ appearing in Eq. (\ref{<Q1Q2> 1}) allows predicting the {\em detectable} quantity $\langle \hat{Q}_1 \hat{Q}_2 \rangle_f^{(\hat{B} \leftarrow \hat{A})}$. It extends to two measurements the Born probability $W^{(\hat{A})}_{a_{n}}={\rm Tr} (\rho_s^{(0)} \mathbb{P}_{a_n})$ of Eq. (\ref{Wan 0}) which, for single measurements, allows predicting the detectable quantity $\langle \hat{Q} \rangle_f^{(\hat{A})} =\epsilon {\sum}_n a_n W^{(\hat{A})}_{a_n} $ of Eq. (\ref{<Q>}). More interestingly, {\em detecting} $\langle \hat{Q}_1 \hat{Q}_2 \rangle_f^{(\hat{B} \leftarrow \hat{A})}$, we investigate {\em what information can we retrieve on the system state}. This is the point of view that will be taken in the next section.
As the next example, we consider again the same Hamiltonian of Eq. (\ref{V 2meas}) but, after the second interaction has acted, i.e., for $t \gg t_2$, we detect, on a second sub-ensemble, the momentum $\hat{P}_1$ of the first probe instead of its position, and the position $\hat{Q}_2$ of the second probe. The resulting correlation between $\hat{P}_1$ and $\hat{Q}_2$ is \cite{johansen-mello:2008,amir-pier}
\begin{equation} \frac{1}{\epsilon_1 \epsilon_2}\langle \hat{P}_1 \hat{Q}_2 \rangle
^{(\hat{B} \leftarrow \hat{A})}
=\frac{1}{2\sigma^2_{Q_1}} \Im \sum_{nm} a_{n} b_{m} \tilde{W}^{(\hat{B} \leftarrow \hat{A})}_{b_{m}a_{n}}(\epsilon_1) \; , \label{<P1Q2> 0} \end{equation}
where
\begin{equation} \tilde{W}^{(\hat{B} \leftarrow \hat{A})}_{b_{m}a_{n}}(\epsilon_1) =\sum_{n'} \tilde{\lambda}(\epsilon_1(a_n-a_{n'})) {\rm Tr}\left[\rho_s^{(0)} (\mathbb{P}_{a_{n'}} \mathbb{P}_{b_{m}} \mathbb{P}_{a_{n}})\right] \; , \label{Wtilde} \end{equation}
and
\begin{subequations} \begin{eqnarray} \tilde{\lambda}(\beta) &=&\frac{\bar{\lambda}(\beta)}{\bar{\lambda}(0)}\;, \label{tilde-lambda} \\ \bar{\lambda}(\beta) &=& \frac{1}{\beta}\frac{\partial g(\beta)}{\partial \beta} \; . \label{bar-lambda} \end{eqnarray} \label{tilde-bar-lambda} \end{subequations}
Just as in the first example, the {\em auxiliary function} $\Im \tilde{W}^{(\hat{B} \leftarrow \hat{A})}_{b_{m}a_{n}}(\epsilon_1)$ allows predicting the {\em detectable} quantity $\langle \hat{P}_1 \hat{Q}_2 \rangle_f^{(\hat{B} \leftarrow \hat{A})}$. Alternatively, detecting $\langle \hat{P}_1 \hat{Q}_2 \rangle_f^{(\hat{B} \leftarrow \hat{A})}$, we shall investigate {\em what information can we retrieve on the system state}.
We illustrate in Fig. \ref{fig8} the measurements of Eqs. (\ref{<Q1Q2> 1}) and (\ref{<P1Q2> 0}) for the particular case of the Stern-Gerlach experiment studied in section \ref{stern-gerlach}, using the following translation of observables:
\begin{equation} \begin{array}{cc} \hat{A} \Rightarrow \hat{\sigma}_z & \hat{B} \Rightarrow \hat{\sigma}_x \\ \hat{P}_1 \Rightarrow \hat{z} & \hat{P}_2 \Rightarrow \hat{x} \\ \hat{Q}_1 \Rightarrow -\hat{p}_z & \hat{Q}_2 \Rightarrow -\hat{p}_x \end{array} \label{translation to SG for 2 probes} \end{equation}
\begin{figure}\label{fig8}
\end{figure}
In what follows we examine some properties of the auxiliary functions $W^{(\hat{B} \leftarrow \hat{A})}_{b_{m}a_{n}}(\epsilon_1)$ and $ \tilde{W}^{(\hat{B} \leftarrow \hat{A})}_{b_{m}a_{n}}(\epsilon_1)$ defined in Eqs. (\ref{Wmn}) and (\ref{Wtilde}) (for more details, see Refs. \cite{johansen-mello:2008,amir-pier}).
1) In the {\em strong-coupling limit}, $\epsilon_1 \to \infty$, we find
\begin{equation}
\left. \begin{array}{c} W^{(\hat{B} \leftarrow \hat{A})}_{b_{m}a_{n}}(\epsilon_1) \\ \tilde{W}^{(\hat{B} \leftarrow \hat{A})}_{b_{m}a_{n}}(\epsilon_1) \end{array} \right\}
\to \; {\cal W}_{b_{m}a_{n}}^{(\hat{B} \leftarrow \hat{A})} \equiv {\rm Tr}\left[\rho_s^{(0)} (\mathbb{P}_{a_{n}} \mathbb{P} _{b_{m}} \mathbb{P}_{a_{n}} )\right] \; . \label{W e to infty} \end{equation}
This is the joint probability distribution given by the so-called {\em Wigner's rule} \cite{Wigner-ProbMeas:63} (it is real and non-negative; notice its dependence on the order in which the two successive measurements are performed), which is obtained for {\em projective measurements} as
\begin{eqnarray}
P(b_m, a_n) &=& P(b_m|a_n) P(a_n)
=|\langle b_m|a_n \rangle|^2 |\langle a_n|\psi \rangle|^2 \nonumber \\
&=& \langle \psi| \mathbb{P}_{a_{n}} \mathbb{P} _{b_{m}}
\mathbb{P}_{a_{n}} |\psi \rangle \; . \label{wigner rule} \end{eqnarray}
We have assumed a pure state and no degeneracy, and $P(b_m|a_n)$ denotes a conditional probability.
2) In the {\em weak-coupling limit}, $\epsilon_1 \to 0$, we find
\begin{equation}
\left. \begin{array}{c} W^{(\hat{B} \leftarrow \hat{A})}_{b_{m}a_{n}}(\epsilon_1) \\ \tilde{W}^{(\hat{B} \leftarrow \hat{A})}_{b_{m}a_{n}}(\epsilon_1) \end{array} \right\}
\to \; {\cal K}^{(\hat{B} \leftarrow \hat{A})}_{b_{m}a_{n}} \equiv{\rm Tr}\left[\rho_s^{(0)} ( \mathbb{P}_{b_{m}} \mathbb{P}_{a_{n}})\right] . \label{W e to 0} \end{equation}
This is the Kirkwood's-Dirac joint {\em quasi-probability}, which is complex, in general \cite{Kirkwood-QuanStatAlmoClas:33,Dirac-AnalBetwClasQuan:45,Steinberg-Condprobquantheo:95,Johansen+Luis-NoncWeakMeas:04} (see also Ref. \cite{feynmann87}).
In this limit $\epsilon_1 \to 0$, the probes correlation can be written in various forms as follows
\begin{subequations} \begin{eqnarray} \frac{1}{\epsilon_1 \epsilon_2}\langle \hat{Q}_1 \hat{Q}_2 \rangle^{(\hat{B}\leftarrow \hat{A})} &=& \sum_{nm} a_n b_m \frac12{\rm Tr}\left[\rho_s^{(0)} (\mathbb{P}_{b_{m}} \mathbb{P}_{a_{n}} + \mathbb{P}_{a_{n}} \mathbb{P}_{b_{m}} ) \right] \label{<Q1Q2> e1 to 0 a} \\ &=& \sum_{nm} a_n b_m W^{MH}_{b_{m} a_{n}} = \sum_{nm} a_n b_m \langle \hat{S}_{mn} \rangle_0 \label{<Q1Q2> e1 to 0 b} \\ &=& \frac12 {\rm Tr}\left[\rho_s^{(0)} (\hat{B}\hat{A}+\hat{A}\hat{B} )\right] \label{<Q1Q2> e1 to 0 c} \end{eqnarray}
where
\begin{eqnarray} W^{MH}_{b_{m} a_{n}} &=& \frac12{\rm Tr}\left[\rho_s^{(0)} (\mathbb{P}_{b_{m}} \mathbb{P}_{a_{n}} +\mathbb {P}_{a_{n}} \mathbb{P}_{b_{m}}) \right] = \langle \hat{S}_{mn} \rangle_0 \; , \label{W(MH)} \\ \hat{S}_{mn} &\equiv& \frac12 \left( \mathbb{P}_{b_{m}} \mathbb{P}_{a_{n}} +\mathbb{P}_{a_{n}}\mathbb{P}_{b_{m}} \right) \; ; \label{Smn} \end{eqnarray} \label{<Q1Q2> e1 to 0} \end{subequations}
$W^{MH}_{b_{m} a_{n}}$ is the real part of the Kirkwood quasi-probability distribution \cite{Kirkwood-QuanStatAlmoClas:33,Dirac-AnalBetwClasQuan:45,Steinberg-Condprobquantheo:95,Johansen+Luis-NoncWeakMeas:04}, also called the "Margenau-Hill (MH) distribution" \cite{Margenau+Hill-CorrbetwMeasQuan:61}: it may take negative values, and thus cannot be regarded as a joint probability in the classical sense.
We remark that, if $[\mathbb{P}_{b_{m}}, \mathbb{P}_{a_{n}}] \neq 0$, then the operator $\hat{S}_{mn}$ of Eq. (\ref{Smn}) has at least one negative e-value. For this pair of variables, i.e., $a_n$, $b_m$, and for the particular state of the system which is the eigenstate that gives rise to this negative eigenvalue, the Margenau-Hill distribution of Eq. (\ref{W(MH)}) is negative, i.e., $W^{MH}_{b_{m} a_{n}}=\langle \hat{S}_{mn} \rangle_0<0$, and the probes correlation $\langle \hat{Q}_1 \hat{Q}_2 \rangle/\epsilon_1\epsilon_2$ {\em may} lie outside the range $[(a_{n}b_{m})_{min}, (a_{n}b_{m})_{max}]$ (see Eq. (\ref{<Q1Q2> e1 to 0 b})). We illustrate this point with an example.
Consider a Hilbert space of dimensionality $N=2$, and two operators $\hat{A}$ and $\hat{B}$ having the following eigenvalues and eigenvectors:
\begin{subequations} \begin{eqnarray} a_n &=& 1,0 \; ; \hspace{1cm}
| 1 \rangle = \left[ \begin{array}{c} 1 \\ 0 \end{array} \right], \hspace{9mm}
| 0 \rangle = \left[ \begin{array}{c} 0 \\ 1 \end{array} \right], \\ b_m &=& 1,0 \; ; \hspace{7mm} \;\;\;
| 1 ) = \frac{1}{\sqrt{2}} \left[ \begin{array}{c} 1 \\ 1 \end{array} \right], \;\;
| 0 ) = \frac{1}{\sqrt{2}} \left[ \begin{array}{r} 1 \\ -1 \end{array} \right]. \end{eqnarray} \label{e-values,e-vectors N2 for MH<0} \end{subequations}
From Eq. (\ref{<Q1Q2> e1 to 0 b}) we have
\begin{equation} \frac{1}{\epsilon_1 \epsilon_2} \langle \hat{Q}_1 \hat{Q}_2 \rangle^{(\hat{B}\leftarrow \hat{A})} = \sum_{n,m=0}^1 a_n b_m \langle S_{mn} \rangle_0 = \langle S_{11} \rangle_0 \; . \label{<Q1Q2> e=0 N=2} \end{equation}
For the two bases of Eq. (\ref{e-values,e-vectors N2 for MH<0}), we find $S_{11}$ and its eigenvalues as
\begin{equation} S_{11} = \frac14 \left[
\begin{array}{cc} 2 & 1 \\ 1 & 0 \end{array}
\right] \;\;\;
\begin{array}{c}
\lambda_+ = \frac14 (1+ \sqrt{2}) > 0 \Rightarrow | \psi_+ \rangle \\
\lambda_- = \frac14 (1- \sqrt{2}) < 0 \Rightarrow | \psi_{-} \rangle \end{array}
\label{S11 and its eigenvalues} \end{equation}
For the state $| \psi_{-}\rangle$, the position-position correlation of Eq. (\ref{<Q1Q2> e=0 N=2}) becomes
\begin{equation} \frac{1}{\epsilon_1 \epsilon_2}\langle \hat{Q}_1 \hat{Q}_2 \rangle^{(\hat{B}\leftarrow \hat{A})}
= \left\langle \psi_- \right|S_{11}\left| \psi_- \right\rangle =\frac14 (1- \sqrt{2}) < 0 \label{} \end{equation} which lies outside the interval defined by the possible values $0,1$ of the product $ss'$.
3) For an intermediate, {\em arbitrary} $\epsilon_1$, $W^{(\hat{B} \leftarrow \hat{A})}_{b_{m}a_{n}}(\epsilon_1)$ and $\tilde{W}^{(\hat{B} \leftarrow \hat{A})}_{b_{m}a_{n}}(\epsilon_1)$ can be regarded, as already noted above, as two auxiliary functions, $\Re W^{(\hat{B} \leftarrow \hat{A})}_{b_{m}a_{n}}(\epsilon_1)$ being taylored for predicting $\langle \hat{Q}_1 \hat{Q}_2 \rangle$, and $\Im \tilde{W}^{(\hat{B} \leftarrow \hat{A})}_{b_{m}a_{n}}(\epsilon_1)$ for $\langle \hat{P}_1 \hat{Q}_2 \rangle$. In the next section we shall see that for some special $\hat{A}$'s and $\hat{B}$'s we can realize the inverse case: from the measurable quantities $\langle \hat{Q}_1 \hat{Q}_2 \rangle$ and $\langle \hat{P}_1 \hat{Q}_2 \rangle$ we can reconstruct $\rho_s^{(0)}$.
4) If the projectors $\mathbb{P}_{a_{n}}$, $\mathbb{P}_{b_{m}}$ appearing in Eqs. (\ref{Wmn}) and (\ref{Wtilde}) commute, i.e., $ \left[\mathbb{P}_{a_{n}}, \mathbb{P}_{b_{m}}\right] = 0, \;\;\forall n,m\; , $ then we find, for {\em arbitrary} $\epsilon_1$
\begin{equation} W^{(\hat{B} \leftarrow \hat{A})}_{b_{m}a_{n}}(\epsilon_1) =\tilde{W}^{(\hat{B} \leftarrow \hat{A})}_{b_{m}a_{n}}(\epsilon_1) ={\rm Tr}\left[\rho_s^{(0)} ( \mathbb{P}_{b_{m}}\mathbb{P}_{a_{n}} )\right], \;\;\;\; \forall \epsilon_1 \; . \label{W=tilde W in comm case} \end{equation}
This result is the standard, real and non-negative, quantum-mechanical definition of the joint probability of $a_n$ and $b_m$ for commuting observables.
We also find that the correlation of the two probe positions measured in units of $\epsilon_1 \epsilon_2$ coincides, for an arbitrary coupling strength $\epsilon_1$, with the standard result for the correlation of the two observables $\hat{A}$ and $\hat{B}$, i.e.,
\begin{equation} \frac{1}{\epsilon_1 \epsilon_2 }\langle Q_1 Q_2 \rangle = {\rm Tr}\left[\rho_s^{(0)} (\hat{A}\hat{B})\right]\;, \;\;\; \forall \epsilon_1 \; . \label{<Q1Q2>=<AB> in comm case} \end{equation}
5) For the particular case in which $\pi_1$ is described by a pure Gaussian state, we find (see also Eqs. (\ref{g(beta) gaussian}))
\begin{subequations} \begin{eqnarray} \lambda(\beta) &=& \tilde{\lambda}(\beta) =g(\beta) =e^{-\frac{\beta^2}{8\sigma_{Q_1}^2}} \label{} \\ W^{(\hat{B} \leftarrow \hat{A})}_{b_{m}a_{n}}(\epsilon_1) &=&\tilde{W}^{(\hat{B} \leftarrow \hat{A})}_{b_{m}a_{n}}(\epsilon_1)\; . \end{eqnarray} \label{g,gtilde,gaussian case} \end{subequations}
This is the case studied in Ref. \cite{johansen-mello:2008}.
6) As a particular situation of property 4) above, suppose we {\em measure successively the same observable} $\hat{A}$. We thus set $\hat{B}=\hat{A}$ in the formalism. From Eq. (\ref{Wmn}) we find
\begin{eqnarray} W^{(\hat{A} \leftarrow \hat{A})}_{a_{\bar{n}}a_{n}}(\epsilon_1) &=&\sum_{n'} \lambda(\epsilon_1(a_n-a_{n'})) {\rm Tr}_s\left[\rho_s^{(0)} (\mathbb{P}_{a_{n'}} \mathbb{P}_{a_{\bar{n}}} \mathbb{P}_{a_{n}})\right] \nonumber \\ &=& {\rm Tr}_s \left(\rho_s^{(0)}\mathbb{P}_{a_{n}}\right) \delta_{a_{\bar{n}},a_n} \; . \label{W for B=A} \end{eqnarray}
Eq. (\ref{<Q1Q2> 1}) then gives
\begin{equation} \frac{\langle Q_1 Q_2 \rangle_f^{(\hat{A} \leftarrow \hat{A})}} {\epsilon_1 \epsilon_2 } = \sum_{n,\bar{n}} a_n a_{\bar{n}} \Re W^{(\hat{A} \leftarrow \hat{A})}_{a_{\bar{n}}a_{n}}(\epsilon_1) = \sum_n W_{a_n}^{(\hat{A})} a_n^2 = \langle \hat{A^2}\rangle_0 \; . \label{<Q1Q2> for B=A} \end{equation}
For simplicity, we restrict ourselves to the case in which, at $t=0$, the system proper $s$ and the two probes $\pi_1$, $\pi_2$ are described by pure states, and
\begin{equation}
|\Psi \rangle_0 = |\psi \rangle_s^{(0)} |\chi \rangle_{\pi_1}^{(0)}
|\chi \rangle_{\pi_2}^{(0)} \; . \label{Psi0} \end{equation}
Then, for $t \gg t_2$, i.e., after the second interaction, the state vector is given by
\begin{subequations} \begin{eqnarray}
|\Psi \rangle_f &=& {\rm e}^{-\frac{i}{\hbar}\epsilon_2 \hat{A} \hat{P}_2} {\rm e}^{-\frac{i}{\hbar}\epsilon_1 \hat{A} \hat{P}_1}
|\Psi \rangle_{0} \label{Psi_f a} \\
&=& \sum_n \left(\mathbb{P}_{a_n} |\psi \rangle_s^{(0)}\right) \left({\rm e}^{-\frac{i}{\hbar}\epsilon_2 a_n \hat{P}_1}
|\chi \rangle_{\pi_1}^{(0)}\right) \left({\rm e}^{-\frac{i}{\hbar}\epsilon_1 a_n \hat{P}_2}
|\chi \rangle_{\pi_2}^{(0)}\right) \; . \label{Psi_f b} \end{eqnarray} \label{Psi_f} \end{subequations}
The joint probability density (jpd) of the eigenvalues $Q_1, Q_2$ of the two position operators for times $t>t_2$, when the two interactions have ceased to act, is then
\begin{subequations} \begin{eqnarray} p_f(Q_1,Q_2) &=&
_f\langle \Psi | \mathbb{P}_{Q_1} \mathbb{P}_{Q_2}
|\Psi \rangle _f \label{p(Q1,Q2) a} \\
&=& \sum_n W_{a_n}^{(\hat{A})} \left|\chi_{\pi_1}^{(0)}(Q_1-\epsilon_1 a_n) \right|^2
\left|\chi_{\pi_2}^{(0)}(Q_2-\epsilon_2 a_n) \right|^2 \label{p(Q1,Q2) b} \\ &=& \sum_n W_{a_n}^{(\hat{A})} \frac{{\rm e}^{-\frac{(Q_1-\epsilon_1 a_n)^2}{2\sigma_{Q_1}^2}}} {\sqrt{2\pi \sigma_{Q_1}^2}} \frac{{\rm e}^{-\frac{(Q_2-\epsilon_2 a_n)^2}{2\sigma_{Q_2}^2}}} {\sqrt{2\pi \sigma_{Q_2}^2}} \; . \label{p(Q1,Q2) c} \end{eqnarray} \label{p(Q1,Q2) 1} \end{subequations}
In Eq. (\ref{p(Q1,Q2) c}) we have assumed the original pure states for the probes to be Gaussian. Also,
\begin{equation} W_{a_n}^{(\hat{A})}
=\; ^{(0)}{_s\langle} \psi |\hat{\mathbb P}_{a_n}|\psi\rangle_s^{(0)} \label{Wan} \end{equation}
is the Born probability for the value $a_n$ in the original system state, and we wrote
\begin{equation}
\langle Q_1 | {\rm e}^{-\frac{i}{\hbar}\epsilon_1 a_n \hat{P}_1}
|\chi \rangle_{\pi_1}^{(0)} = \chi_{\pi_1}^{(0)}(Q_1-\epsilon_1 a_n) \; , \label{chi(Q-ea)} \end{equation}
and similarly for probe $\pi_2$.
Clearly, result (\ref{<Q1Q2> for B=A}) can be verified from the jpd of Eq. (\ref{p(Q1,Q2) c}). From this jpd we also obtain
\begin{subequations} \begin{eqnarray} \langle \hat{Q}_i\rangle &=& \epsilon_i \langle \hat{A} \rangle_0 \; , \hspace{15mm} i=1,2, \label{<Qi> for A=B} \\ \langle \hat{Q}_i^2\rangle &=& \epsilon_i^2 \langle \hat{A}^2 \rangle_0 + \sigma_{Q_i}^2 \; , \;\;\; i=1,2. \label{<(Qi)2> for A=B} \end{eqnarray} \label{<Qi>,<(Qi)2> for A=B} \end{subequations}
It is useful to consider the correlation coefficient between the two probe positions, which is defined as
\begin{subequations} \begin{eqnarray} C(Q_1,Q_2) &=& \frac{\langle Q_1 Q_2 \rangle - \langle Q_1 \rangle \langle Q_2 \rangle} {\sqrt{[\langle Q_1^2 \rangle - \langle Q_1 \rangle^2] [\langle Q_2^2 \rangle - \langle Q_2 \rangle^2]}} \label{Q1Q2 corr. coeff. for B=A a} \\ &=& \frac{({\rm var}\hat{A})_0} {\sqrt{({\rm var}\hat{A})_0 + \left(\frac{\sigma_{Q_1}}{\epsilon_1}\right)^2} \sqrt{({\rm var}\hat{A})_0 + \left(\frac{\sigma_{Q_2}}{\epsilon_2}\right)^2}} \label{Q1Q2 corr. coeff. for B=A b} \\ &\to& 1, \;\;\;\;\; \rm{as} \;\;\;\;\;\sigma_{Q_i} / \epsilon_i \to 0 \; . \label{Q1Q2 corr. coeff. for B=A c} \end{eqnarray} \label{Q1Q2 corr. coeff. for B=A} \end{subequations}
In the second line, (\ref{Q1Q2 corr. coeff. for B=A b}), we have used results (\ref{<Qi>,<(Qi)2> for A=B}) for a pure Gaussian state of the probes. As a result, in the strong-coupling limit $\sigma_{Q_i}/\epsilon_i \to 0$, the outcomes for the probe position 1 and probe position 2 become completely correlated, which is the result we would have expected. This is illustrated schematically in Fig. \ref{fig9}.
\begin{figure}
\caption{Schematic illustration of the jpd of the two probe positions $p_f(Q_1,Q_2)$ of Eq. (\ref{p(Q1,Q2) c}), when we measure subsequently the {\em same} observable $\hat{A}$. The illustration is for the strong-coupling limit $\sigma_{Q_i}/\epsilon_i \ll 1$, in which $Q_1$ and $Q_2$ are strongly correlated. }
\label{fig9}
\end{figure}
7) Relation of successive measurements to {\em weak values}.
The weak value of the observable $\hat{A}$, with pre-selection $|\psi \rangle$
and post-selection $|\phi \rangle$, is defined as \cite{aharonov_et_al,Johansen+Luis-NoncWeakMeas:04}
\begin{subequations} \begin{eqnarray} (\hat{A})_W
&=& \frac{\langle \phi|\hat{A}|\psi \rangle}
{\langle \phi |\psi \rangle} \label{WV A a} \\ &=&
\frac{\langle \psi | \phi \rangle \langle \phi|\hat{A}|\psi \rangle}
{\langle \psi | \phi \rangle\langle \phi |\psi \rangle} =
\frac{\langle \psi | \hat{\mathbb{P}}_{\phi}\hat{A}|\psi \rangle}
{\langle \psi | \hat{\mathbb{P}}_{\phi} | \psi \rangle} \label{WV A b} \\ &\Rightarrow& \frac{ {\rm Tr}\left(\hat{\rho}_s^{(0)}\hat{\mathbb{P}}_{\phi}\hat{A} \right)} {{\rm Tr}\left(\hat{\rho}_s^{(0)}\hat{\mathbb{P}}_{\phi}\right) } \label{WV A c} \\ &=& \sum_n a_n \frac{\langle \hat{\mathbb{P}}_{\phi} \hat{\mathbb{P}}_{a_n}\rangle} {\langle \hat{\mathbb{P}}_{\phi} \rangle} \label{WV A d} \end{eqnarray} \label{WV A} \end{subequations}
We see that the weak value can be regarded as the correlation function between the observable $\hat{A}$ and the projector $\hat{\mathbb{P}}_{\phi}$ \cite{Johansen04,johansen-arxiv-09}. Eq. (\ref{WV A c}) gives the generalization of the definition for a state described by a density operator. Eq. (\ref{WV A d}) expresses the weak value as the sum over the states of $a_n$ times a ``complex probability" of $a_n$ conditioned by $\phi$.
Consider now a successive measurement experiment in which the two observables are $\hat{A}$ and $\hat{\mathbb{P}}_{\phi}$. The Hamiltonian of Eq. (\ref{V 2meas}) becomes
\begin{equation} \hat{H}(t) = \epsilon_1 g_1(t) \hat{A} \hat{P}_1
+ \epsilon_2 g_2(t) \hat{\mathbb{P}}_{\phi} \hat{P}_2, \;\;\;\;\; 0 < t_1 < t_2. \label{H(t) for WV} \end{equation}
One can show \cite{johansen-mello-arxiv-09,johansen_to_be_published} that the position-position and momentum-position correlation of the two probes are related to the real and imaginary parts of the weak value as
\begin{subequations} \begin{eqnarray} \lim_{\epsilon_1 \to 0} \frac{\langle \hat{Q}_1 \hat{Q}_2 \rangle^{(\hat{\mathbb{P}}_{\phi}\Leftarrow \hat{A})}} {\epsilon_1\langle \hat{Q}_2\rangle^{(\hat{\mathbb{P}}_{\phi}\Leftarrow \hat{A})}} &=& \frac12 \frac{\langle \hat{A} \hat{\mathbb{P}}_{\phi} + \hat{\mathbb{P}}_{\phi} \hat{A}\rangle} {\langle \hat{\mathbb{P}}_{\phi} \rangle} = \Re \left[(\hat{A})_W \right] \; , \label{<Q1Q2> and WV} \\ \lim_{\epsilon_1 \to 0} \frac{\langle \hat{P}_1 \hat{Q}_2 \rangle^{(\hat{\mathbb{P}}_{\phi}\Leftarrow \hat{A})}} {\epsilon_1\langle \hat{Q}_2\rangle^{(\hat{\mathbb{P}}_{\phi}\Leftarrow \hat{A})}} &=& \frac{1}{2\sigma^2_{Q_1}} \frac{\langle \hat{\mathbb{P}}_{\phi} \hat{A} - \hat{A} \hat{\mathbb{P}}_{\phi} \rangle} {2 i \langle \hat{\mathbb{P}}_{\phi} \rangle} = 2 \sigma^2_{P_1} \Im \left[(\hat{A})_W \right] \; . \label{<P1Q2> and WV} \end{eqnarray} \label{succ meas and WV} \end{subequations}
\section{STATE RECONSTRUCTION SCHEME BASED ON SUCCESSIVE MEASUREMENTS} \label{state reconstruction}
We now use the above formalism to describe a state tomography scheme. We build on previous work \cite{Johansen-Quantheosuccproj:07,johansen-mello:2008} to identify a set of observables which, when measured in succession, provide complete information about the state of a quantum system described in an $N$-dimensional Hilbert space.
For this purpose we consider, in our Hilbert space, two orthonormal bases, whose vectors are denoted by $|k \rangle$ and $|\mu)$, respectively, with $k, \mu=1,\ldots, N$. Latin letters will be used to denote the first basis while Greek letters will be used for the second basis. Given $k$, there is only one vector; given $\mu$, there is also only one vector: i.e., we have no degeneracy.
We assume the two bases are mutually non-orthogonal, i.e.,
\begin{equation}
\langle k |\mu ) \neq 0, \hspace{5mm} \forall k, \mu . \label{mutual non-orthogonality} \end{equation}
Then the two bases have no common eigenvectors.
\begin{figure}
\caption{Illustration, for $N=2$, of the fact that if the two orthonormal bases have one common eigenvector, they cannot be mutually non-orthogonal. }
\label{fig10}
\end{figure}
This is illustrated in Fig. \ref{fig10} for dimensionality $N=3$. The basic vectors of the first basis are $\hat{i},\hat{j},\hat{k}$. Those of the second basis are $\hat{u},\hat{v},\hat{w}$. Assume they have one common eigenvector: e.g., $\hat{k}=\hat{w}$. Then $\hat{u},\hat{v} \perp \hat{k}$ and $\hat{i},\hat{j} \perp \hat{w}$, contradicting the assumption that the two bases have no mutually orthogonal eigenvectors.
We now consider a successive-measurement experiment in which the two observables are the rank-one projectors $\mathbb{P}_{k}$ and $\mathbb{P}_{\mu}$ onto the $k$- and $\mu$-state of the first and second basis, respectively. The Hamiltonian of Eq. (\ref{V 2meas}) becomes
\begin{equation} \hat{H} (t) = \epsilon_1 g_1 (t) \mathbb{P}_{k} \hat{P}_1 + \epsilon_2 g_2 (t) \mathbb{P}_{\mu} \hat{P}_2 \; . \label{V(t)proj} \end{equation}
The projectors $\mathbb{P}_{k}$ and $\mathbb{P}_{\mu}$ possess two eigenvalues: $0$ and $1$. We denote by $\tau$ and $\sigma$ the eigenvalues of $\mathbb{P}_{k}$ and $\mathbb{P}_{\mu}$, respectively, and the corresponding eigenprojectors by $(\mathbb{P}_{k})_{\tau}$ and $(\mathbb{P}_{\mu})_{\sigma}$. They satisfy relations of the type presented in Eqs. (\ref{Pa,nu}).
In the present case, Eq. (\ref{<Q1Q2> 1}) for the probes position-position correlation function gives
\begin{subequations} \begin{eqnarray} \frac{1}{\epsilon_1 \epsilon_2} \langle \hat{Q}_1 \hat{Q}_2 \rangle ^{( \mathbb{P}_{\mu} \leftarrow \mathbb{P}_{k} )} &=& \Re \sum_{\tau,\sigma = 0}^1 \tau \sigma \; W^{( \mathbb{P}_{\mu} \leftarrow \mathbb{P}_{k})} _{\sigma \tau} (\epsilon_1) \label{<Q1Q2> P P a} \\ &=& \Re W^{( \mathbb{P}_{\mu} \leftarrow \mathbb{P}_{k})}_{1 1} (\epsilon_1). \label{<Q1Q2> P P b} \end{eqnarray} \label{<Q1Q2> P P} \end{subequations}
In Eq. (\ref{<Q1Q2> P P a}), $W^{( \mathbb{P}_{\mu} \leftarrow \mathbb{P}_{k})} _{\sigma \tau} (\epsilon_1)$ is the particular case of the quantity $W^{(\hat{B} \leftarrow \hat{A})}_{b_{m}a_{n}}(\epsilon_1)$ of Eq. (\ref{Wmn}) when $\hat{A}$, $\hat{B}$, $a_n$, and $b_m$ are replaced by $\mathbb{P}_{k}$, $\mathbb{P}_{\mu}$, $\tau$, and $\sigma$, respectively, i.e.,
\begin{equation} W^{(\mathbb{P}_{\mu} \leftarrow \mathbb{P}_{k})} _{\sigma \tau} (\epsilon_1) =\sum_{\tau'=0}^1 \lambda(\epsilon_1(\tau-\tau')) {\rm Tr}\left[\rho_s (\mathbb{P}_{k})_{\tau'} (\mathbb{P}_{\mu})_{\sigma} (\mathbb{P}_{k})_{\tau} \right] \; , \end{equation}
and, in particular,
\begin{subequations} \begin{eqnarray} W^{( \mathbb{P}_{\mu} \leftarrow \mathbb{P}_{k})}_{1 1} (\epsilon_1) &=& {\rm Tr}(\rho_{s}^{(0)}\mathbb{P}_{k}\mathbb{P}_{\mu} \mathbb{P}_{k}) +\lambda(\epsilon_1) \sum_{k' (\neq k)} {\rm Tr} (\rho_{s}^{(0)} \mathbb{P}_{k'}\mathbb{P}_{\mu} \mathbb{P}_{k}) \; , \label{W11 1 a} \\ &=& \sum_{k'}G_{kk'}(\epsilon_1) {\rm Tr} (\rho_{s}^{(0)} \mathbb{P}_{k'}\mathbb{P}_{\mu} \mathbb{P}_{k}) \; , \label{W11 1 b} \end{eqnarray} \label{W11 1} \end{subequations}
where we have used Eq. (\ref{l(0)=1}) and we have defined
\begin{equation} G_{kk'}(\epsilon_1) = \delta_{k, k'} + \lambda(\epsilon_1)(1-\delta_{k, k'}) . \label{Gkk'} \end{equation}
An important result is that Eq. (\ref{W11 1}) can be inverted to obtain $\rho_s^{(0)}$, giving \cite{johansen-mello:2008,amir-pier}
\begin{equation}
\langle k| \rho_{s}^{(0)} | k' \rangle = \sum_{\mu} \frac{W^{( \mathbb{P}_{\mu} \leftarrow \mathbb{P}_{k})} _{1 1} (\epsilon_1)} {G_{kk'}(\epsilon_1)} \cdot
\frac{(\mu|k'\rangle}
{(\mu|k\rangle} \; , \label{rho elements from W(b,a) 1} \end{equation}
It is clear that Eq. (\ref{rho elements from W(b,a) 1}) requires
$(\mu|k\rangle \neq 0 \; \forall \mu, k$, i.e., that the two bases must be mutually non-orthogonal. Eq. (\ref{rho elements from W(b,a) 1}) is the main result of this chapter.
As a consequence, we see that the set of complex quantities $W^{( \mathbb{P}_{\mu} \leftarrow \mathbb{P}_{k})}_{1 1} (\epsilon_1)$, $\forall k,\mu$, contains all the information about the state of the system $\rho_s^{(0)}$. If we could infer $W^{( \mathbb{P}_{\mu} \leftarrow \mathbb{P}_{k})}_{1 1} (\epsilon_1)$, $\forall k,\mu$, from measurement outcomes, we could reconstruct the full $\rho_s^{(0)}$.
Notice that both the real and imaginary parts of the {\em complex} quantities $W^{( \mathbb{P}_{\mu} \leftarrow \mathbb{P}_{k})}_{1 1} (\epsilon_1)$ are needed for tomography. However, from the {\em detected} position-position correlations $\langle \hat{Q}_1 \hat{Q}_2 \rangle ^{(\mathbb{P}_{\mu} \leftarrow \mathbb{P}_{k})} /\epsilon_1 \epsilon_2$, we directly extract only $\Re W^{( \mathbb{P}_{\mu} \leftarrow \mathbb{P}_{k})}_{1 1} (\epsilon_1)$, as we see from Eq. (\ref{<Q1Q2> P P b}). In order to find $\Im W^{( \mathbb{P}_{\mu} \leftarrow \mathbb{P}_{k})}_{1 1} (\epsilon_1)$, we use the momentum-position correlation. Our aim is to show that the measured quantities, the position-position and momentum-position correlation functions, are informationally complete: that is, one can reconstruct $W^{( \mathbb{P}_{\mu} \leftarrow \mathbb{P}_{k})}_{1 1}$ from these quantities.
We recall, from Eq. (\ref{<Q> for proj}), the analogous situation in the single-measurement case, where the measurable quantities $\langle \hat{Q} \rangle_f^{(\hat{\mathbb{P}}_{a_{\nu}})}$ allow the reconstruction of the diagonal matrix elements of $ \rho_s^{(0)}$, i.e., $ (1/\epsilon)\langle \hat{Q} \rangle_f^{(\hat{\mathbb{P}}_{a_{\nu}})} = W_{\tau=1}^{(\hat{\mathbb{P}}_{a_{\nu}})}
=\langle a_{\nu} | \rho_s^{(0)} |a_{\nu} \rangle $. The extension of this result to the two-probe case is Eq. (\ref{rho elements from W(b,a) 1}), in conjunction with Eq. (\ref{<Q1Q2> P P b}) (that relates $\Re W^{( \mathbb{P}_{\mu} \leftarrow \mathbb{P}_{k})}_{1 1} (\epsilon_1)$ to measurable quantities), and Eq. (\ref{y}) below (that relates $\Im W^{( \mathbb{P}_{\mu} \leftarrow \mathbb{P}_{k})}_{1 1} (\epsilon_1)$ to measurable quantities).
In the present case, Eq. (\ref{<P1Q2> 0}) for the probes momentum-position correlation function gives
\begin{equation} \frac{1}{\epsilon_1 \epsilon_2} \langle \hat{P}_1 \hat{Q}_2 \rangle ^{( \mathbb{P}_{\mu} \leftarrow \mathbb{P}_{k} )} = 2 \sigma_{P_1}^2 \Im \widetilde{W}^{( \mathbb{P}_{\mu} \leftarrow \mathbb{P}_{k})} _{1 1} (\epsilon_1), \label{<P1Q2> P P} \end{equation}
where
\begin{equation} \widetilde W^{( \mathbb{P}_{\mu} \leftarrow \mathbb{P}_{k})}_{1 1} (\epsilon_1) = {\rm Tr} (\rho_{s}^{(0)}\mathbb{P}_{k}\mathbb{P}_{\mu} \mathbb{P}_{k}) +\tilde\lambda(\epsilon_1) \sum_{k'(\neq k)} {\rm Tr} (\rho_{s}^{(0)} \mathbb{P}_{k'}\mathbb{P}_{\mu} \mathbb{P}_{k})\; . \label{tildeW P P 0} \end{equation}
Although the function $\widetilde{W}$ is in general not equal to the function $W$ (except when the probes are described by pure Gaussian states, as in Ref. \cite{johansen-mello:2008}), we now prove that it contains the necessary information to find the imaginary part of $W^{( \mathbb{P}_{\mu} \leftarrow \mathbb{P}_{k})}_{1 1}$, and therefore enables a complete state reconstruction.
If we write
\begin{subequations} \begin{eqnarray} W^{(\mathbb{P}_{\mu} \leftarrow \mathbb{P}_{k})}_{1 1} = x_{\mu k} + i y_{\mu k}, \label{x,y} \\ \widetilde{W}^{(\mathbb{P}_{\mu} \leftarrow \mathbb{P}_{k})}_{1 1} = \tilde{x}_{\mu k} + i \tilde{y}_{\mu k}, \label{xtilde,ytilde} \end{eqnarray} \label{x,y,xtilde,ytilde} \end{subequations}
the correlation functions, Eqs. (\ref{<Q1Q2> P P b}) and (\ref{<P1Q2> P P}), become
\begin{subequations} \begin{eqnarray} \frac {\langle \hat{Q}_1 \hat{Q}_2 \rangle ^{( \mathbb{P}_{\mu} \leftarrow \mathbb{P}_{k} )}} {\epsilon_1 \epsilon_2} &=& x_{\mu k}, \label{<Q1Q2>=x} \\ \frac {\langle \hat{P}_1 \hat{Q}_2 \rangle ^{( \mathbb{P}_{\mu} \leftarrow \mathbb{P}_{k} )}} {\epsilon_1 \epsilon_2} &=& 2 \sigma_{P_1}^2 \tilde{y}_{\mu k}. \label{<P1Q2>=ytilde} \end{eqnarray} \label{<Q1Q2>,<Q1P2>} \end{subequations}
Our aim is to express $y_{\mu k}$ in terms of the measured quantities of Eqs. (\ref{<Q1Q2>,<Q1P2>}). We go back to the expressions (\ref{W11 1}) and (\ref{tildeW P P 0}) for $W^{( \mathbb{P}_{\mu} \leftarrow \mathbb{P}_{k})}_{1 1} (\epsilon_1)$ and $\widetilde{W}^{( \mathbb{P}_{\mu} \leftarrow \mathbb{P}_{k})}_{1 1} (\epsilon_1)$. The quantities $\lambda(\epsilon_1)$, $\tilde{\lambda}(\epsilon_1)$ are known if the state of the probe ${\pi}_1$ is known (see Eqs. (\ref{lambda,g,h}) and (\ref{tilde-bar-lambda})). We write them as
\begin{subequations} \begin{eqnarray} \lambda(\epsilon_1) &=& \lambda_r(\epsilon_1) + i \lambda_i(\epsilon_1), \label{lambda r,i} \\ \tilde{\lambda}(\epsilon_1) &=& \tilde{\lambda}_r(\epsilon_1) + i \tilde{\lambda}_i(\epsilon_1). \label{lambda_tilde r,i} \end{eqnarray} \label{lambda r,i;lambda_tilde r,i} \end{subequations}
On the other hand, the traces appearing in Eqs. (\ref{W11 1}) and (\ref{tildeW P P 0}) are unknown; we write them as
\begin{subequations} \begin{eqnarray}
{\rm Tr} (\rho_{s}^{(0)}\mathbb{P}_{k}\mathbb{P}_{\mu} \mathbb{P}_{k}) &=&
|\langle k | \mu) |^2
\langle k |\rho_s | k \rangle
= |\langle k | \mu)|^2 \sum_{\mu'} x_{\mu' k} \label{r0} \\ \sum_{k'(\neq k)} {\rm Tr} (\rho_{s}^{(0)} \mathbb{P}_{k'}\mathbb{P}_{\mu} \mathbb{P}_{k}) &=& r_{\mu k}+is_{\mu k} . \label{r,s} \end{eqnarray} \label{r0,r,s} \end{subequations}
Using Eq. (\ref{rho elements from W(b,a) 1}), we wrote Eq. (\ref{r0}) in terms of measured quantities only.
We introduce the definitions (\ref{x,y,xtilde,ytilde}), (\ref{lambda r,i;lambda_tilde r,i}), (\ref{r0,r,s}) in Eqs.~(\ref{W11 1}) and (\ref{tildeW P P 0}), which then give
\begin{subequations} \begin{eqnarray} x_{\mu k}
&=& |\langle k | \mu)|^2 \sum_{\mu'} x_{\mu' k} + \lambda_r r_{\mu k} -\lambda_i s_{\mu k} , \\ y_{\mu k} &=& \lambda_i r_{\mu k} + \lambda_r s_{\mu k},
\\ \tilde{y}_{\mu k} &=& \tilde{\lambda}_i r_{\mu k} + \tilde{\lambda}_r s_{\mu k}. \end{eqnarray} \end{subequations}
For every pair of indices $\mu, k$ we now have a system of three linear equations in the three unknowns $r_{\mu k}$, $s_{\mu k}$ and $y_{\mu k}$, which can thus be expressed in terms of the measured quantities $\tilde{y}_{\mu k}$ and the $x_{m k}$ of Eqs. (\ref{<Q1Q2>,<Q1P2>}). The result for $y_{\mu k}$ is
\begin{equation} y_{\mu k} = \frac{\Im \{\lambda(\epsilon_1)\tilde\lambda^\ast(\epsilon_1)\}} {\Re \{\lambda(\epsilon_1)\tilde\lambda^\ast(\epsilon_1)\}}
\;\Big(x_{\mu k}-|\langle k|\mu\rangle|^2 \sum_{\mu'} x_{\mu' k}\Big)
+\frac{|\lambda(\epsilon_1)|^2}{\Re \{\lambda(\epsilon_1)\tilde\lambda^\ast(\epsilon_1)\}}\;\tilde{y}_{\mu k} . \label{y} \end{equation}
We have thus achieved our goal of expressing $W^{( \mathbb{P}_{\mu} \leftarrow \mathbb{P}_{k})}_{1 1} (\epsilon_1)$, and hence $\rho_s^{(0)}$ of Eq.~(\ref{rho elements from W(b,a) 1}), in terms of the measured correlations of Eqs.~(\ref{<Q1Q2>,<Q1P2>}). This completes our procedure.
It is interesting to examine the strong- and weak-coupling limits of the above procedure. In the strong-coupling limit, $\epsilon_1 \to \infty$, from Eq. (\ref{Gkk'}) we see that $G_{k'k}(\epsilon_1) \to \delta_{k' k}$, and Eq. (\ref{rho elements from W(b,a) 1}) gives
\begin{equation}
\langle k| \rho_{s}^{(0)} | k \rangle \to \sum_{\mu} \; {\rm Tr}\left(\rho_s^{(0)} \mathbb{P}_{k} \mathbb{P}_{\mu} \mathbb{P}_{k} \right) = \sum_{\mu}\; {\cal W}_{\mu k}^{(\mathbb{P}_{\mu}\leftarrow \mathbb{P}_{k})} = {\rm Tr}_s (\rho_s^{(0)} \mathbb{P}_{k} ) \; , \label{rho retrieval strong coupling} \end{equation}
in terms of Wigner's joint probability defined in Eq. (\ref{W e to infty}). Notice that in this limit only the diagonal elements $\rho_s^{(0)}$ can be retrieved. This is the limit in which Wigner's formula (\ref{W e to infty}) arises.
In the weak-coupling limit, $\epsilon_1 \to 0$, we have $G_{k'k}(\epsilon_1) \to 1$, and Eq. (\ref{rho elements from W(b,a) 1}) gives
\begin{equation}
\langle k| \rho_{s}^{(0)} | k' \rangle \to \sum_{\mu} {\rm Tr}_s (\rho _s^{(0)} \mathbb{P}_{\mu}\mathbb{P}_{k})
\frac{(\mu|k'\rangle}
{(\mu|k\rangle} =\sum_{\mu} {\cal K}_{\mu k}^{(\mathbb{P}_{\mu}\leftarrow \mathbb{P}_{k})}
\frac{(\mu|k'\rangle}
{(\mu|k\rangle},
\label{rho retrieval weak coupling} \end{equation}
in terms of Kirkwood's joint quasi-probability defined in Eq. (\ref{W e to 0}). The result (\ref{rho retrieval weak coupling}) was first obtained in Ref. \cite{Johansen-Quantheosuccproj:07}.
At first glance it seems that, in a $N$-dimensional Hilbert space, the present scheme for state reconstruction requires the measurement of the $2N^2$ different correlations $\langle \hat{Q}_1 \hat{Q}_2 \rangle^{(\mathbb{P}_{\mu}\leftarrow \mathbb{P}_{k})}$ and $\langle \hat{P}_1 \hat{Q}_2 \rangle^{(\mathbb{P}_{\mu}\leftarrow \mathbb{P}_{k})}$. However, Hermiticity and the unit value of the trace of the density matrix $\rho_s^{(0)}$ impose $N^2+1$ restrictions among its matrix elements, so that $\rho_s^{(0)}$ can be expressed in terms of $N^2-1$ independent parameters. These restrictions eventually imply that only $N^2-1$ of these correlations are actually independent and thus the measurement of only $N^2-1$ correlations is required.
Let us take as an example $N=2$. Labelling the states as $k=0,1$ and $\mu=+,-$, it is enough to measure the correlations
\begin{equation} \langle \hat{Q}_1 \hat{Q}_2 \rangle ^{( \mathbb{P}_{+} \leftarrow \mathbb{P}_{0} )}, \hspace{2mm} \langle \hat{Q}_1 \hat{Q}_2 \rangle ^{( \mathbb{P}_{-} \leftarrow \mathbb{P}_{0} )} , \hspace{2mm} {\rm and} \hspace{2mm} \langle \hat{P}_1 \hat{Q}_2 \rangle ^{( \mathbb{P}_{-} \leftarrow \mathbb{P}_{0} )} \; . \label{correlations needed for N=2} \end{equation}
In terms of these correlations and using the notation of Eqs. (\ref{<Q1Q2>,<Q1P2>}), the density matrix elements of our system prior to the mesasurement are given by
\begin{subequations} \begin{eqnarray} \rho_{00}^{(0)} &=& x_{+,0} + x_{-,0} \\ \rho_{11}^{(0)} &=& 1 - x_{+,0} - x_{-,0} \\ \rho_{01}^{(0)} &=& \frac{x_{+,0} - x_{-,0} - 2i y_{-,0}}{g(\epsilon_1)} \\ \rho_{10}^{(0)} &=& \frac{x_{+,0} - x_{-,0} + 2i y_{-,0}}{g(\epsilon_1)} \; . \end{eqnarray} \label{rho in terms of the x and y correlations N=2 Gaussian} \end{subequations}
We have used Gaussian states for the probes, for which $y_{\mu k}=\tilde{y}_{\mu k}$ (see Eqs. (\ref{g,gtilde,gaussian case})).
The relations appearing in Eqs. (\ref{W11 1}) between $W^{( \mathbb{P}_{\mu} \leftarrow \mathbb{P}_{k})}_{11} (\epsilon_1)$ and $\rho_s^{(0)}$
shed light on the the strong- and weak-couping limits of the retrieval scheme described above. We consider again the case $N=2$, for Gaussian states for the probes, and for the case in which the states $|k\rangle$, $k=0,1$, are eigenstates of the Pauli matrix $\sigma_k$ and the states $|\mu )$, $\mu=+,-$, are eigenstates of $\sigma_x$.
In the strong-coupling limit $\epsilon_1 \to \infty$,
\begin{equation} W^{( \mathbb{P}_{\mu} \leftarrow \mathbb{P}_{k})} _{11} (\epsilon_1) \to {\rm Tr}\left(\rho_s^{(0)} \mathbb{P}_{k} \mathbb{P}_{\mu} \mathbb{P}_{k} \right)
=|\langle k|\mu)|^2 \rho_{kk}^{(0)}, \end{equation}
which, for $N=2$ give
\begin{subequations} \begin{eqnarray} W^{(+\leftarrow 0)}_{11} (\epsilon_1) \to \frac12 \rho^{(0)}_{00} , \;\;\; W^{(-\leftarrow 0)}_{11} (\epsilon_1) \to \frac12 \rho^{(0)}_{00} \\ W^{(+\leftarrow 1)}_{11} (\epsilon_1) \to \frac12 \rho^{(0)}_{11} , \;\;\; W^{(-\leftarrow 1)}_{11} (\epsilon_1) \to \frac12 \rho^{(0)}_{11} \; . \end{eqnarray} \end{subequations}
Thus, $W^{( \mathbb{P}_{\mu} \leftarrow \mathbb{P}_{k})}_{11} (\epsilon_1)$ only contains information on the diagonal elements of $\rho_s^{(0)}$, so that only $\rho^{(0)}_{00}$ and $\rho^{(0)}_{11}$ can be retrieved.
In the weak-coupling limit $\epsilon_1 \to 0$,
\begin{equation} W^{( \mathbb{P}_{\mu} \leftarrow \mathbb{P}_{k})} _{11} (\epsilon_1) \to {\rm Tr}\left( \rho_s^{(0)}\mathbb{P}_{\mu} \mathbb{P}_{k} \right) ={\cal K}_{\mu k}
=\langle k|\rho_s^{(0)}|\mu)(\mu |k\rangle \;, \end{equation}
and for $N=2$
\begin{subequations} \begin{eqnarray} W^{(+\leftarrow 0)}_{11} (\epsilon_1) &\to& \frac{\rho^{(0)}_{00} +\rho^{(0)}_{01} }{2} , \;\;\;\; W^{(-\leftarrow 0)}_{11} (\epsilon_1) \to \frac{\rho^{(0)}_{00} -\rho^{(0)}_{01} }{2} \\ W^{(+\leftarrow 1)}_{11} (\epsilon_1) &\to& \frac{\rho^{(0)}_{10} +\rho^{(0)}_{11} }{2} , \;\;\;\; W^{(-\leftarrow 1)}_{11} (\epsilon_1) \to \frac{\rho^{(0)}_{10} -\rho^{(0)}_{11}}{2} \; . \end{eqnarray} \end{subequations}
We thus see that the four $\rho_s^{(0)}$ matrix elements can be retrieved.
In conclusion, to reconstruct a QM state using the successive-measurement scheme, it is better to perform measurements with weak coupling $\epsilon_1$, rather than with strong coupling.
For the case of an arbitrary $\epsilon_1$ we have the relations (\ref{rho in terms of the x and y correlations N=2 Gaussian}) giving the $\rho_s^{(0)}$ matrix elements in terms of the position-position and momentum-position correlations. Recall that $ g(\epsilon_1)=\exp{\left(-\frac{\epsilon_1^2}{8 \sigma_{Q_1}^2}\right)} $. Even if $g\neq 0$, but $g \ll 1$, a small experimental uncertainty in extracting $\langle Q_1 Q_2\rangle^{(\mu \leftarrow k)}$ and $\langle P_1 Q_2\rangle^{(\mu \leftarrow k)}$, which give $W^{(\mu \leftarrow k)}_{1 1} (\epsilon_1) = x_{\mu, k}+ iy_{\mu, k}$, is divided by a small number $g \ll 1$ when $k \neq k'$, and this makes the error in extracting $\rho^{(0)}_{kk'}$ large. Again, we see that, in general, it is advantageous to use a weak coupling rather than a strong coupling.
\section{A quasi-distribution and a generalized transform of observables} \label{generalized transform}
Conceptually, one attractive feature of the tomographic approach we have described is that the quantities $W^{( \mathbb{P}_{\mu} \leftarrow \mathbb{P}_{k})}_{1 1} (\epsilon_1)$ that enter the resonstruction formula, Eq. (\ref{rho elements from W(b,a) 1}), can be interpreted as a {\em quasi-probability}, as we now explain.
Let $\hat{O}$ be an observable associated with an $N$-dimensional quantum system. Using Eq. (\ref{rho elements from W(b,a) 1}), we can express its expectation value as
\begin{subequations} \begin{eqnarray} {\rm Tr}_s(\hat{\rho}_s^{(0)}\hat{O})
= \sum_{k k'} \langle k| \rho_s^{(0)} |k'\rangle \langle k'| \hat{O} |k\rangle = \sum_{k \mu} W^{( \mathbb{P}_{\mu} \leftarrow \mathbb{P}_{k})}_{1 1} (\epsilon_1) \; O(\mu, k; \epsilon_1) \; , \label{<O> (W11,O(mu,k)) a} \end{eqnarray}
where we have defined the ``transform" of the operator $\hat{O}$ as
\begin{eqnarray} O(\mu, k; \epsilon_1) =\sum_{k'}
\frac{(\mu|k'\rangle}
{(\mu|k\rangle}
\frac{\langle k' | \hat{O} | k \rangle} {G_{k' k}(\epsilon_1)} \; . \label{O(mu,k)} \end{eqnarray} \label{<O> (W11,O(mu,k))} \end{subequations}
Eqs. (\ref{<O> (W11,O(mu,k))}) have a structure similar to that of a number of transforms found in the literature, that express the quantum mechanical expectation value of an observable in terms of its transform and a quasi-probability distribution.
For example, the Wigner transform of an observable and the Wigner function of a state are defined in the phase space $(q,p)$ of the system, $q$ and $p$ labelling the states of the coordinate and momentum bases, respectively.
In the present case, the transform (\ref{O(mu,k)}) of the observable is defined for the pair of variables $(\mu, k)$, $\mu$ and $k$ labelling the states of each of the two bases. As Eq. (\ref{<O> (W11,O(mu,k)) a}) shows, the quantity $W^{( \mathbb{P}_{\mu} \leftarrow \mathbb{P}_{k})}_{1 1} (\epsilon_1)$ plays the role of a {\em quasi-probability} for the system state $\hat{\rho}_s^{(0)}$, and is also defined for the pair of variables $(\mu, k)$. It can be thought of as the joint quasi-probability (in general complex \cite{feynmann87}) of two non-degenerate observables, the two bases being their respective eigenbases. Since any pair of mutually orthogonal bases can be used, we have a whole family of transforms that can be employed to retrieve the state.
In the literature it has been discussed how Wigner's function can be considered as a representation of a quantum state (see, e.g., Ref. \cite{schleich-2001}, Chs. 3 and 4), in the sense that i) it allows retrieving the density operator, and ii) any quantum-mechanical expectation value can be evaluated from it. Similarly, and for the same reasons, in the present context the quasi-probability $W^{( \mathbb{P}_{\mu} \leftarrow \mathbb{P}_{k})}_{1 1} (\epsilon_1)$ can also be considered as a representation of a quantum state.
\section{Conclusions} \label{concl}
In this series of lectures we described how to obtain information on a quantum-mechanical system, by coupling it to an auxiliary degeree of freedon, or probe, which is then detected by some measuring instrument. The model used in the analysis is the one introduced by von Neumann, in which the interaction of the system proper with the probe is described in a dynamical way.
For single measurements we used the standard von Neumann model, which employs one probe coupled to the system with an arbitrary coupling strength; for successive measurements, we generalized von Neumann's model employing two probes.
In the case of single measurements, we investigated the average, the variance and the full distribution of the probe position after the interaction with the system, and their relation with the properties of the latter. An interesting outcome of the analysis is the reduced density operator of the system which, in the limit of strong coupling between the system and the probe, was shown to reduce to the von-Neumann-L\"uders rule, a result which is frequently obtained in a ``non-dynamical" way, as a result of non-selective projective measurements.
In the case of successive measurements, we studied how to obtain information on the system by detecting the position-position and momentum-position correlations of the two probes. We saw that the so-called ``Wigner's formula", as well as ``Kirkwood's quasi-probability distribution", emerge in the strong- and in the weak-coupling limits, respectively, of the above formalism. We investigated the successive measurement of the same observable and showed how, in the strong-coupling limit, the result behaves in the expected manner. The relation of the weak-value theory to successive measurements was briefly mentioned.
Furthermore, we described a quantum state tomography scheme which is applicable to a system described in a Hilbert space of arbitrary finite dimensionality, which is constructed from sequences of two measurements. The scheme consists of measuring the various pairs of projectors onto two bases --which have no mutually orthogonal vectors.
Finally, we found a generalized transform of the state and the observables based on the notion of successive measurements. The result has a structure similar to that of a number of transforms found in the literature, like the Wigner function, that express the quantum-mechanical expectation value of an observable in terms of its transform and a quasi-probability distribution.
In a recent investigation, the question was posed whether it is possible to find appropriate measurements of the system position and momentum that would allow the reconstruction of the Wigner function of the system state. It was found that the types of measurements needed are successive measurements of projectors associated with position and momentum, of the type envisaged by von Neumann's model which was discussed here. A preliminary account of that investigation can be found in Ref. \cite{mello-revzen_2013}.
\begin{theacknowledgments}
The author acknowledges financial support from Conacyt, Mexico (under Contract No. 79501) and from the Sistema Nacional de Investigadores, Mexico.
\end{theacknowledgments}
\IfFileExists{\jobname.bbl}{}
{\typeout{}
\typeout{******************************************}
\typeout{** Please run "bibtex \jobname" to optain}
\typeout{** the bibliography and then re-run LaTeX}
\typeout{** twice to fix the references!}
\typeout{******************************************}
\typeout{}
}
\end{document} |
\begin{document}
\title{Stability of properties of locales under groups} \author{Christopher Townsend} \maketitle \begin{abstract} Given a particular collection of categorical axioms, aimed at capturing properties of the category of locales, we show that if $\mathcal{C}$ is a category that satisfies the axioms then so too is the category $[ G, \mathcal{C}]$ of $G$-objects, for any internal group $G$. To achieve this we prove a general categorical result: if an object $S$ is double exponentiable in a category with finite products then so is its associated trivial $G$-object $(S, \pi_2: G \times S \rTo S)$. The result holds even if $S$ is not exponentiable.
An example is given of a category $\mathcal{C}$ that satisfies the axioms, but for which there is no elementary topos $\mathcal{E}$ such that $\mathcal{C}$ is the category of locales over $\mathcal{E}$.
It is shown, in outline, how the results can be extended from groups to groupoids.
\end{abstract}
\section{Introduction}
Given a category $\mathcal{C}$ with finite products and an internal group $G$, a categorical axiom is said to be \emph{$G$-stable} provided if it is true of $\mathcal{C}$ then so too is it true of $[ G , \mathcal{C}]$, the category of $G$-objects. An example is the property of having equalizers. A non-example is the property `every epimorphism splits' which holds in the category $\mathbf{Set}$, if the axiom of choice is true, but for any group $G$, $G \rTo 1$ is an epimorphic $G$-homomorphism which is split if and only if $G$ is trivial.
A set of categorical axioms, investigated in \cite{towhofman}, \cite{closedsubgroup}, \cite{towaxioms} and \cite{towslice}, captures various properties of the category of locales. Certain aspects of locale theory can be developed axiomatically: proper and open maps are pullback stable, the Hofmann-Mislove result shown, the closed subgroup theorem holds, Plewe's result that triqutient surjections are of effective descent proved, the patch construction developed, etc. The purpose of this paper is to explore the question of whether the axioms are $G$-stable for an internal group $G$. The answer is that, with a minor modification that does not weaken the theory, the axioms are $G$-stable. The minor modification is that the existence of coequalizers is no longer an axiom. Intuitively a modification of this sort is needed as constructing coequalizers in $[G,\mathcal{C}]$ appears to require coequalizers that are stable under products, and this stability is an additional property not true of the category of locales.
Once we have established that the axioms are $G$-stable we then establish a new result which is that not every category that satisfies the axioms is a category of locales for some topos. Any open or compact localic group that is not \'{e}tale complete (in the sense of Moerdijk, e.g. Section 7 of \cite{MoerClassTop}) provides an example.
Our next step is to verify that even without any coequalizers in $\mathcal{C}$, key results about coequalizers still hold. Specifically we show that triquotient surjections are coequalizers and that for every open or compact group $G$, there is a connected components adjunction $[ G , \mathcal{C}] \pile{\rTo \\ \lTo} \mathcal{C}$.
Finally we include some comments on how it is easy to extend the results from internal groups to groupoids, given that the axioms are slice stable (\cite{towslice}).
\section{Preliminary categorical definitions and main categorical result}\label{prelim} Let $\mathcal{C}$ be a category with finite products and $S$ an object of $\mathcal{C}$. We use the notation $S^X$ for the presheaf \begin{eqnarray*} \mathcal{C}^{op} &\rTo &\mathbf{Set} \\ Y &\rMapsto &\mathcal{C}(Y\times X,S) \end{eqnarray*} It can be verified, using Yoneda's lemma, that $S^{X}$ is the exponential ${\bf y}S^{ {\bf y}X}$ in the presheaf category $[\mathcal{C}^{op},\mathbf{Set}]$, so the notation is reasonable (where $\bf{y}$ is the Yoneda embedding). We use $\mathcal{C}_S^{op}$ as notation for the full subcategory of $[\mathcal{C}^{op},\mathbf{Set}]$ consisting of objects of the form $S^X$; there is a contravariant functor $\mathcal{C} \rTo^{S^{(\_)}} \mathcal{C}_S^{op}$
Our first lemma is rather simple. \begin{lemma}\label{hannah} Let $\mathcal{C}$ be a category with finite products and $\mathbb{T}=(T,\eta,\mu)$ a monad on $\mathcal{C}$. Then for any two $\mathbb{T}$-algebras $(X,a: TX \rTo X)$ and $(S,s: TS \rTo S)$ the diagram \begin{eqnarray*} (S,s)^{(X,a)}\rTo^{(S,s)^a} (S,s)^{(TX,\mu_X)} \pile{\rTo^{(S,s)^{\mu_X}} \\ \rTo_{(S,s)^{Ta}}} (S,s)^{(TTX,\mu_{TX})} \end{eqnarray*} is an equalizer in $(\mathcal{C}^{\mathbb{T}})^{op}_{(S,s)}$. \end{lemma} \begin{proof} If $\epsilon: (S,s)^{(Y,b)} \rTo (S,s)^{(TX,\mu_X)}$ is a natural transformation such that $(S,s)^{\mu_X}\epsilon=(S,s)^{Ta}\epsilon$ then define $\bar{\epsilon}:(S,s)^{(Y,b)}\rTo (S,s)^{(X,a)}$ by setting $\bar{\epsilon}_{(Z,c)}(u)$ to \begin{eqnarray*} Z \times X \rTo^{Id_Z \times \eta_X} Z \times TX \rTo^{\epsilon_{(Z,c)}(u)}S\text{.} \end{eqnarray*} This is well defined (i.e. defines a $\mathbb{T}$-algebra homomorphism from $(Z,c) \times (X,a)$ to $(S,s)$) because \begin{eqnarray*} (Z,c) \times (TTX,\mu_X) \pile{ \rTo^{Id_Z \times \mu_X} \\ \rTo_{Id_Z \times Ta}} (Z,c) \times (TX, \mu_X) \rTo^{Id_Z \times a} (Z,c) \times (X,a) \end{eqnarray*} is a coequalizer in $\mathcal{C}^{\mathbb{T}}$ (it is $U$-split, by $Id_Z \times \eta_X$ and $Id_Z \times \eta_{Ta}$, where $U: \mathcal{C}^{\mathbb{T}} \rTo \mathcal{C}$ is the forgetful functor and $U$, being monadic, creates coequalizers for $U$-split forks). \end{proof} Recall that an adjunction $L\dashv R:\mathcal{D}\pile{\rTo \\ \lTo} \mathcal{C}$ between categories, both with finite products, satisfies \emph{Frobenius reciprocity} provided the map $L(R(X)\times W)\rTo^{(L\pi_{1},L\pi_{2})}LRX\times LW\rTo^{\varepsilon _{X}\times Id_{LW}}X\times LW$ is an isomorphism for all objects $W$ and $X$ of $\mathcal{D}$ and $\mathcal{C}$ respectively. For example any morphism $f:X\rTo Y$ of a cartesian category $\mathcal{C}$ gives rise to a pullback adjunction $\Sigma_f \dashv f^*: \mathcal{C}/X \rTo \mathcal{C}/X$ that satisfies Frobenius reciprocity. For another example if $G=(G,m:G \times G \rTo G, e : 1 \rTo G, i:G \rTo G)$ is a group object in a category $\mathcal{C}$ with finite products, then the adjunction $G \times (\_) \dashv U : \mathcal{C} \rTo $$ [ G , \mathcal{C}]$ satisfies Frobenius reciprocity. Here $G \times (\_)$ sends an object $X$ of $\mathcal{C}$ to the $G$-object $(G\times X, m \times Id_X)$ and $U$ is the forgetful functor (forget the $G$ action). The counit of this adjunction, at a $G$-object $(X,a)$, is given by the $G$-homomorphism $a : G \times X \rTo X$ so establishing Frobenius reciprocity for the adjunction amounts to finding, for any object $Y$ of $\mathcal{C}$ and any $G$-object $(X,a)$, an inverse for $(G \times X \times Y, m \times Id_X \times Id_Y) \rTo^{(a\pi_{23},\pi_{13})} (X,a) \times (G \times Y,m \times Id_Y)$. The inverse is given by $ X \times G \times Y \rTo^{(\pi_2,a(i\pi_2,\pi_1),\pi_3)} G \times X \times Y$. Another way to establish Frobenius reciprocity is to recall that for any $G$-object $(X,a:G\times X \rTo X)$ there is a $G$-isomorphism $(G,m) \times (X, \pi_2) \cong (G,m) \times (X,a)$.
The adjunctions $G \times (\_) \dashv U$ are key to the considerations of this paper so we recall a couple of basic facts: (1) $U$ is monadic and (2) if $G_1$ and $G_2$ are two internal groups then to prove that $G_1$ is isomorphic to $G_2$ it is sufficient to exhibit an equivalence of categories $\psi: [ G_1 , \mathcal{C}] \rTo^{\simeq} $$[ G_2 , \mathcal{C}]$ which commutes with the adjunction; that is, there exists a natural isomorphism $\beta: \psi G_1 \times (\_) \rTo^{\cong} G_2 \times (\_)$. To see (2) notice that $G_i = [U \circ G_i \times (\_)](1)$ for $i=1,2$ and $\psi$ (together with the natural isomorphism $\beta$) commute with the two monad structures. The next lemma is a generalisation of change of base: \begin{lemma}\label{Frob} Let $\mathcal{C}$ and $\mathcal{D}$ be two categories with finite products and $L: \mathcal{D} \rTo \mathcal{C}$ and $R: \mathcal{C} \rTo \mathcal{D}$ two functors such that $L \dashv R$ and the adjunction satisfies Frobenius reciprocity. Then for any object $S$ of $\mathcal{C}$, $L \dashv R$ extends contravariantly to an adjunction $\mathcal{D}_{RS}^{op}\pile{\rTo \\ \lTo} \mathcal{C}^{op}_S$\end{lemma} This lemma is essentially originally shown in \cite{towgeom}. In the case that the adjunction is a pullback adjunction arising from a locale map and $S$ is the Sierpi\'{n}ski locale, the morphisms of $\mathcal{C}_{S}^{op}$ can be used to represent dcpo homomorphisms and the adjunction established by the lemma shows how to move dcpo homomorphisms between sheaf toposes; this is how the lemma can be viewed as a generalisation of change of base. Consult \cite{towaxioms} for more detail. \begin{proof} Precomposition with $L$ and $R$ defines for any adjunction $L \dashv R$ an adjunction between presheaf categories, $[\mathcal{D}^{op},\mathbf{Set}] \pile{ \rTo \\ \lTo } $$ [\mathcal{C}^{op},\mathbf{Set}]$. But the Frobenius condition implies for $W$ and $X$ of $\mathcal{D}$ and $\mathcal{C}$ respectively that $S^XL\cong RS^{RX}$ and $S^WR\cong {S}^{LW}$ and so the adjunction restricts to $\mathcal{D}_{RS}^{op}\pile{\rTo \\ \lTo} \mathcal{C}^{op}_S$ which can be seen to extend (via $S^{(\_)}$) the adjunction $L \dashv R$. The unit of the extension is given by $S^{\epsilon}$ and the counit by $RS^{\eta}$ where $\eta$ (respectively $\epsilon$) is the unit (counit) of $L \dashv R$. \end{proof} It is an exercise, based on the result just given, to verify that if $\delta : RS^{RX} \rTo RS^W$ then the adjoint transpose of $\delta$, written $\bar{\delta}: S^X \rTo S^{LW}$, is defined by setting, for any $u : Z \times X \rTo S$, $\bar{\delta}_Z(u)$ to be \begin{eqnarray*} Z \times LW \rTo^{[(\epsilon_Z \times Id_{LW})L(\pi_1,\pi_2)]^{-1}} L(RZ \times W) \rTo^{\widetilde{\delta_Z(Ru)}}S \end{eqnarray*} where $\tilde{(\_)}$ is the action of taking adjoint transpose under $L \dashv R$. Given this observation and our observation that the adjunction $G \times (\_) \dashv U : \mathcal{C} \pile{ \rTo \\ \lTo } $$[ G , \mathcal{C} ]$ satisfies Frobenius reciprocity the following corollary is almost immediate: \begin{corollary}\label{nathan} Let $\mathcal{C}$ be a category with finite products and $G$ an internal group. For any objects $Y$ and $S$ of $\mathcal{C}$ and $(X,a)$ a $G$-object, \begin{eqnarray*} Nat[S^{X},S^Y] \cong Nat[(S,\pi_2)^{(X,a)},(S,\pi_2)^{(G \times Y, m \times Id_Y)}] \end{eqnarray*} naturally in both arguments. The mate of $\delta:S^X \rTo S^Y$, evaluated at $u: (Z,c) \times (X,a) \rTo (S,\pi_2)$ is given by \begin{eqnarray*} Z \times G \times Y \rTo^{(c(i \pi_2 , \pi_1),\pi_3)} Z \times Y \rTo^{\delta_Z(u)} S \end{eqnarray*} i.e. $(z,g,y)$ is in $\bar{\delta}_{(Z,c)}(u)$ if and only if $(g^{-1}z,y)$ is in $\delta(u)$. \end{corollary} \begin{proof} In addition to the comments in the preamble, observe that the adjoint transpose of $\delta_Z(u)$ (under $G \times (\_) \dashv U$) is given by $G \times Z \times Y \rTo^{\pi_{23}} Z \times Y \rTo^{\delta(u)} S$ because $S$ has the trivial action. \end{proof}
Why are we so interested in these natural transformstions? Essentially because they are by construction the points of the double exponential $S^{S^X}$; if that double exponential exists. The category of locales provides an example of a category where exponentials do not always exists (not all locales are locally compact) but for which double exponentiation (at the Sierpi\'{n}ski locale at least) does always exist, \cite{victow}. Therefore there is good reason to investigate double exponentiation categorically in the absence of an assumption of cartesian closedness and these natural transformations play a central role. Let us make this more precise, beginning with a definition.
\begin{definition} An object $S$ in a category $\mathcal{C}$ with finite products is \emph{double exponentiable} provided for every other object $X$ the exponential $({\bf y}S)^{S^X}$ exists in $[\mathcal{C}^{op},\mathbf{Set}]$ and is representable. \end{definition}
If an object is double exponentiable then a strong \emph{double exponential} monad can be defined on $\mathcal{C}$; its functor part sends an object $X$ to the object that represents $({\bf y}S)^{S^X}$ and the rest of the monad structure and the strength are determined by the universal property of the double exponential. The key universal property can be expressed by saying that if $P(X)$ is the functor part of the double exponential monad, evaluated at $X$, then for any other object $Y$, there is a bijection, natural in $X$ and $Y$, between morphisms $Y \rTo P(X)$ and natural transformations $S^X \rTo S^Y$. Notice that if $S$ is double exponentiable, the opposite of the Kleisli category of the double power monad, $\mathcal{C}^{op}_P$, can be identified with $\mathcal{C}^{op}_S$ (i.e. the full subcategory of $[\mathcal{C}^{op},\mathbf{Set}]$ consisting of objects of the form $S^X$). Composition of Kleisli arrows is just composition of natural transformations. We will treat the opposite of the Kleisli category as this full subcategory below without notating the equivalence.
We can now prove a categorical proposition which is of general interest and is the main technical insight of this paper.
\begin{proposition}\label{main} Let $\mathcal{C}$ be a category with finite products, $G$ an internal group and $S$ a double exponentiable object. Then $(S,\pi_2)$ is a double exponentiable object in $[G,\mathcal{C}]$. \end{proposition} \begin{proof} Let $(X,a)$ be a $G$-object. Our first observation is that $PX$ (i.e. the object representing ${\bf y}S^{S^X}$) can be made into a $G$-object by defining $a^{P}$ to be $G \times PX\rTo^{t_{G,X}} P(G \times X) \rTo^{P(a)}PX$, where $t$ is the strength on $P$. This follows by application of the definition of strength ($t_{1,X}\cong Id_{PX}$ and $t_{X \times Y, Z} = t_{X, Y\times Z} (Id_X \times t_{Y,Z})$). For any other $G$-object $(Y,b)$, $G$-homomorphisms from $Y$ to $PX$ correspond to natural transformations $\delta: S^X \rTo S^Y$ with the property that $\delta^G S^a = S^b\delta$. So to conclude the proof all we need to do is to show that such natural transformations are in bijection with natural transformations $(S,\pi_2)^{(X,a)}\rTo (S,\pi_2)^{(Y,b)}$. (The bijection must be natural in $(Y,b)$, but this aspect is straightforward and is not commented on further.)
Lemma \ref{hannah}, with $\mathbb{T}$ the monad induced by $ G\times (\_) \dashv U$, shows that natural transformations $\epsilon:(S,\pi_2)^{(X,a)}\rTo (S,\pi_2)^{(Y,b)}$ are in (natural) bijection with natural transformations $\epsilon':(S,\pi_2)^{(X,a)}\rTo (S,\pi_2)^{(G \times Y, m \times Id_Y)}$ such that $(S,\pi_2)^{m \times Id_Y} \epsilon' = (S,\pi_2)^{Id_G \times b} \epsilon' $ and the corollary shows \begin{eqnarray*} Nat[S^{X},S^Y] \cong Nat[(S,\pi_2)^{(X,a)},(S,\pi_2)^{(G \times Y, m \times Id_Y)}]\text{.} \end{eqnarray*} Since this last bijection is natural we can see that the mate of $S^b\delta$ is $(S,\pi_2)^{Id_G \times b} \bar{\delta}$ where $\bar{\delta}$ is the mate of $\delta:S^X \rTo S^Y$. So to complete the proof all that is required is a verification that $\overline{\delta^G S^a} = (S,\pi_2)^{m \times Id_Y} \bar{\delta}$.
Say we are given a $G$-homomorphism $u:(Z,c) \times (X,a) \rTo (S,\pi_2)$. By the corollary we have that $(z,g_1,g_2,y)$ belongs to $[(S,\pi_2)^{m \times Id_Y} \bar{\delta}]_{(Z,c)}(u)$ if and only if $(g_2^{-1}g_1^{-1}z,y)$ belongs to $\delta_Z(u)$. Now since $u$ is a $G$-homomorphism $u(z,gx)=u(g^{-1}z,x) $ and so by applying naturality of $\delta$ at $Z \times G \rTo^{c(i \pi_2, \pi_1)} Z$ we have $\delta_{Z \times G}( u ( Id_Z \times a))(z,g,y)=\delta_Z(u)(g^{-1}z,y)$. But then $\overline{\delta^G S^a}(u)$ is given by \begin{diagram} Z \times G \times G \times Y & \rTo & Z \times G \times Y & \rTo & Z \times Y & \rTo^{\delta_Z(u)} & S\\ (z,g_1,g_2,y) & \mapsto & (g_1^{-1}z,g_2,y) & \mapsto & (g_2^{-1}g_1^{-1}z,y) & & \\ \end{diagram}
\end{proof}
One of our categorical axioms, to follow, is that the category in question must be order enriched. Finite limits are assumed to be order enriched finite limits; that is, their universal property is an order isomorphism, not just a bijection. The above analysis works equally well with order isomorphisms in place of bijections; therefore, \begin{proposition}\label{main} Let $\mathcal{C}$ be an order enriched category with finite products, $G$ an internal group and $S$ a double exponentiable object. Then $(S,\pi_2)$ is a double exponentiable object in $[G,\mathcal{C}]$. \end{proposition}
We need to discuss \emph{order internal} lattices in the context of an order enriched category; i.e. lattices such that the meet and join operations are adjoints to the diagonal (so being a lattice, join semilattice, meet semilattice, distributive lattice etc is a property of the object, not additional structure on the object). The following lemma will be needed: \begin{lemma}\label{meet} If $\mathcal{C}$ is an order enriched catrgory with finite products, then for any order internal meet semilattice $A$, if $A_0 \pile{ \rInto^i \\ \lOnto_q}A$ is a splitting of an inflationary idempotent $\psi: A \rTo A$ (i.e. $Id_A \sqsubseteq \psi=iq$ and $Id_{A_0}=qi$), then $A_0$ is an order internal meet semilattice and $i$ is a meet semilattice homomorphism. Further, $q$ preserves the top element (i.e. $q1_A = 1_{A_0}$). \end{lemma} \begin{proof} Define $1_{A_0} : 1 \rTo A_0$ to be $q1_A$ (so $q$ preserves top) and $\sqcap_{A_0} : A_0 \times A_0 \rTo A_0$ to be $q\sqcap_A (i \times i ) : A \times A \rTo A$. It can be verified that $!^{A_0} \dashv 1_{A_0}$ and $\Delta_{A_0} \dashv \sqcap_{A_0} $ and so $A_0$ is an order internal meet semilattice. To prove $i$ is a meet semilattice homomorphism we need to show (i) that $i$ preserves the top element and (ii) $iq\sqcap_{A}(i \times i) = \sqcap_A (i \times i)$. For (i) notice that $Id_A \sqsubseteq i 1_{A_0} !^A$ because $Id_A \sqsubseteq iq$ and so $i1_{A_0}=1_A$ by uniqueness of right adjoints. For (ii), as $Id_A \sqsubseteq iq$ it just needs to be checked that $iq\sqcap_{A}(i \times i) \sqsubseteq \sqcap_A (i \times i)$; equivalently, $\Delta_A iq \sqcap_A ( i \times i ) \sqsubseteq i \times i$ since $\Delta_A \dashv \sqcap_A$. But this last inequality is clear because $\Delta_A iq = (i \times i )(q \times q) \Delta_A$, $\Delta_A \sqcap_A \sqsubseteq Id_{A \times A}$ and $(i \times i )(q \times q) (i \times i)= i \times i $. \end{proof}
If, further, $\mathcal{C}$ has finite coproducts and is distributive (i.e. the canonical map $X \times Y + X \times Z \rTo X \times (Y + Z) $ is an isomorphism for any three objects $X$, $Y$ and $Z$ and $ X\times 0 \cong 0$ for any $X$) then for any object $S$, $\mathcal{C}^{op}_S$ has products; $S^X \times S^Y$ is given by $S^{X+Y}$ and the final object is $S^0$.
If additionally, $S$ is an order internal lattice and is double exponentiable then provided $\mathcal{C}$ has equalizers (and so is cartesian) two submonads of $P$ can be defined; a lower one, whose points are those natural transformations $S^X \rTo S^Y$ that are join semilattice homomorphisms and an upper one, whose points are meet semilattice homomorphisms. By reversing the order enrichment you switch between the lower and upper submonads. By construction the opposite of the Kleisli categories of the lower and upper monads can be identified with subcategories $\mathcal{C}_S^{op}$; they have the same objects and have as morphisms those natural transformations that are join (respectively meet) semilattice homomorphisms. Notice that all objects of the opposites of the Kleisli categories are order internal lattices which are distributive if $S$ is. See \cite{towhofman} for more detail on the construction of the lower and upper submonads.
Our final categorical definition is that of an object which behaves like the Sierpi\'{n}ski space. Given a cartesian order enriched category, an object $\mathbb{S}$ is a \emph{Sierpi\'{n}ski object} if it is an order internal distributive lattice such that given a pullback \begin{diagram} a^{\ast }(i) & \rTo & 1 \\ \dInto & & \dInto_i \\ X & \rTo^{a} & \mathbb{S} \end{diagram} $a$ is uniquely determined by $a^{\ast }(i)\rTo X$ for $ i:1\rInto \mathbb{S}$ equal to either $0_{\mathbb{S}}$ or $1_{\mathbb{S}}$. If a Sierpi\'{n}ski object is double exponentiable then we use $\mathbb{P}$ for the double exponential monad and call it a \emph{double power} monad; $P_L$ and $P_U$ are used for the lower and upper power monads, when these can be defined as submonads of $\mathbb{P}$.
\section{The axioms}\label{axioms} {\bf Axiom 1.} \emph{$\mathcal{C}$ is an order enriched category with order enriched finite limits and finite coproducts.}
{\bf Axiom 2.} \emph{For any morphism $f:X \rTo Y$ of $\mathcal{C}$ the pullback functor $f^* : \mathcal{C}/Y \rTo \mathcal{C}/X$ preserves finite coproducts.}
The property of being order enriched and having finite limits is $G$-stable, for any internal group $G$, as finite limits are created in $\mathcal{C}$ and the order enrichment on $[G,\mathcal{C}]$ can be taken from $\mathcal{C}$. Given Axiom 2, $[G,\mathcal{C}]$ has coproducts since if $(X,a)$ and $(Y,b)$ are two $G$-objects then \begin{eqnarray*} G \times ( X+ Y) \rTo^{\cong} (G \times X ) + (G \times Y )\rTo{a +b } X+Y \end{eqnarray*} makes $X+Y$ into a $G$-object that can be easily checked to be coproduct. The nullary case is similar. If $f$ is a morphism of $G$-objects (i.e. a $G$-homomorphism) then pullback along $f$ preserves coproduct in $[G,\mathcal{C}]$ since $G$-object pullback and coproduct are created in $\mathcal{C}$. Therefore, \begin{lemma} Axioms 1 and 2 are jointly $G$-stable for any internal group $G$. \end{lemma}
{\bf Axiom 3.} \emph{$\mathcal{C}$ has a Sierpi\'{n}ski object, $\mathbb{S}$.}
It is immediate that this axiom is $G$-stable, for any order enriched cartesian category $\mathcal{C}$, because pullbacks are created in $\mathcal{C}$. The canonical Sierpi\'{n}ski object in $[G,\mathcal{C}]$ is $(\mathbb{S},\pi_2)$.
{\bf Axiom 4.} \emph{$\mathbb{S}$ is double exponentiable.}
That this axiom is $G$-stable follows from Proposition \ref{main}. Notice from the proposition that the morphisms of the Kleisli category $ [G , \mathcal{C} ]_{\mathbb{P}_G}$ can be identified with natural transformations $\delta: \mathbb{S}^X \rTo \mathbb{S}^Y$ with the property $\mathbb{S}^b\delta=\delta^G \mathbb{S}^a$. It is easy to see that the lower (upper) Kleisli maps correspond to $\delta $s that are join (meet) semilattice homomorphisms.
{\bf Axiom 5.} \emph{For any objects $X$ and $Y$, any natural transformation $\alpha : \mathbb{S}^X\rTo \mathbb{S}^Y$ that is also a distributive lattice homomorphism, is of the form $\mathbb{S}^f$ for some unique $f:Y \rTo X$}
If $\epsilon:(S,\pi_2)^{(Y,b)} \rTo (S,\pi_2)^{(X,a)} $ is a natural transformtion that is also a distributive lattice homomorphism then the corresponding natural transformation $\delta: \mathbb{S}^X \rTo \mathbb{S}^Y$ is also a distributive lattice homomorphism and so, assuming the axiom, is equal to $\mathbb{S}^f$ for some unique $f: Y \rTo X$. However, by applying the uniqueness part of the axiom, we see that $f$ is a $G$-homomorphism. It is routine to then check, using the order isomorphism established in proposition \ref{main} that if $\delta$ is of the form $\mathbb{S}^f$ then $\epsilon$ must be $(\mathbb{S},\pi_2)^f$. This shows that the axiom is $G$-stable.
{\bf Axiom 6.} \emph{(i) Inflationary idempotents split in the Kleisli category $\mathcal{C}_{P_L}$.}
\emph{(ii) Deflationary idempotents split in the Kleisli category $\mathcal{C}_{P_U}$.}
\cite{towhofman} shows that these conditions are equivalent to the assumption that the monad $P_L$ (respecitvely $P_U$) is KZ (respectively coKZ).
Say $\alpha: \mathbb{S}^X \rTo \mathbb{S}^X$ is an inflationary idempotent join semilattice homomorphism that splits as $ \mathbb{S}^{X_0}\pile{ \rInto^{\theta} \\ \lOnto_{\gamma}} \mathbb{S}^X$ in the (opposite of) the lower Kleisli category; so $\theta$ and $\gamma$ are both join semilattice homomorphisms. Then, in the presence of Axiom 5, $\theta$ must be equal to $\mathbb{S}^q$ for some unique $q$. This follows as lemma \ref{meet} shows that $\theta$ is a meet semilattice homomorphism. Notice also, by the `Further' part of that lemma, that $\gamma$ preserves top.
\begin{lemma} Axiom 6 is $G$-stable (given Axioms 1-5). \end{lemma} In summary the proof follows by applying our description of the double power Kleisli morphisms of $[G,\mathcal{C}]$ in terms of the double power Kleisli morphisms of $\mathcal{C}$. \begin{proof} If $(X,a)$ is a $G$-object and $\delta: \mathbb{S}^X \rTo \mathbb{S}^X$ an idempotent inflationary join semilattice homomorphism such that $\mathbb{S}^a\delta=\delta^G \mathbb{S}^a$, then $\delta$ factors as $\mathbb{S}^X \rOnto^{\gamma} \mathbb{S}^{X_0} \rInto^{\mathbb{S}^q} \mathbb{S}^X$; see the preamble to the statment of the lemma. Further $\delta^G$ factors as $\mathbb{S}^{Id_G \times q} \gamma^G$. Consider $\nu: \mathbb{S}^{X_0}\rTo \mathbb{S}^{G \times X_0}$ defined as $\gamma^G \mathbb{S}^a\mathbb{S}^q$. The two squares in the following diagram commute since $\gamma$ is a (split) epimorphism and $\mathbb{S}^{Id_G \times q}$ is a (split) monomorphism: \begin{diagram} \mathbb{S}^X & \rTo^{\gamma} & \mathbb{S}^{X_0} & \rTo^{\mathbb{S}^q } & \mathbb{S}^X \\ \dTo^{\mathbb{S}^{a}} & & \dTo_{\nu} & & \dTo_{\mathbb{S}^a} \\ \mathbb{S}^{G \times X} & \rTo^{\gamma^G} & \mathbb{S}^{G \times X_0} & \rTo^{\mathbb{S}^{Id_G \times q}} & \mathbb{S}^{G \times X} \\ \end{diagram} We now claim that $\nu$ is a meet semilattice homomorphism. To see this, by the `Further' part of Lemma \ref{meet}, we see that $\nu$ preserves top because both $\gamma$ and $\gamma^G$ preserve top. To establish preservation by $\nu$ of binary meets one needs but to check that $\sqcap_{\mathbb{S}^{G \times X_0}} (\nu \times \nu)\sqsubseteq \nu \sqcap_{\mathbb{S}^{X_0}}$. Now from Lemma \ref{meet} we know that $\sqcap_{\mathbb{S}^{X_0}} = \gamma \sqcap_{\mathbb{S}^X} ( \mathbb{S}^q \times \mathbb{S}^q)$ (and similarly for $\sqcap_{\mathbb{S}^{G \times X_0}}$). Therefore: \begin{eqnarray*} \nu \sqcap_{\mathbb{S}^{X_0}} & = & \gamma^G \mathbb{S}^a\mathbb{S}^q \gamma \sqcap_{\mathbb{S}^X} ( \mathbb{S}^q \times \mathbb{S}^q) \\
& \sqsupseteq & \gamma^G \mathbb{S}^a \sqcap_{\mathbb{S}^X} ( \mathbb{S}^q \times \mathbb{S}^q) \\
& = & \gamma^G \sqcap_{\mathbb{S}^{G \times X}} (\mathbb{S}^a \times \mathbb{S}^a )( \mathbb{S}^q \times \mathbb{S}^q) \\
& = & \gamma^G \sqcap_{\mathbb{S}^{G \times X} } (\mathbb{S}^{Id_G \times q} \times \mathbb{S}^{Id_G \times q}) ( \nu \times \nu) \\
& = & \sqcap_{\mathbb{S}^{G \times X_0} } ( \nu \times \nu) \text{.}\\ \end{eqnarray*} Since then $\nu$ is a distributive lattice homomorphism it is of the form $\mathbb{S}^t$ for some (unique) $t: G \times X_0 \rTo X_0$ and it is readily checked that $(X_0,t)$ is a $G$-object. By construction $\gamma$ and $\mathbb{S}^q$ commute with $\mathbb{S}^a$ and $\mathbb{S}^t$ and so correspond to morphisms of $[G,\mathcal{C}]^{op}_{\mathbb{P}_G}$ (i.e. natural transformations relative to the category of $G$-objects). This proves stability of 6(i); part (ii) is order dual. \end{proof}
{\bf Axiom 7.} \emph{For any equalizer diagram } \begin{diagram} E & \rTo^{e} & X & \pile { \rTo^f \\ \rTo_g} & Y \end{diagram} in $\mathcal{C}$ the diagram \begin{diagram} \mathbb{S}^{X}\times \mathbb{S}^{X}\times \mathbb{S}^{Y} & \pile{ \rTo^{\sqcap (Id\times \sqcup )(Id\times Id\times \mathbb{S}^{f})} \\ \rTo_{\sqcap (Id\times \sqcup )(Id\times Id\times \mathbb{S}^{g})} } & \mathbb{S}^{X} & \rTo{\mathbb{S}^{e}} & \mathbb{S}^{E} \end{diagram} \emph{is a coequalizer in $\mathcal{C}_{\mathbb{P}}^{op}$}.
Note that Axiom 7 does not break the symmetry given by the order enrichment. A short calculation using the distributivity assumption on $ \mathbb{S}$ shows that the composite $\sqcup (Id\times \sqcap )$ could have been used in the place of $\sqcap (Id\times \sqcup )$.
Stability of this axiom is also straightforward as $\mathbb{S}^e$ is an epimorphism in $\mathcal{C}_{\mathbb{P}}^{op}$. In more detail say $(E,d)\rTo^e (X,a)$ is an equalizer of $f,g:(X,a) \pile{ \rTo \\ \rTo } (Y,b) $ in $[ G , \mathcal{C}]$ and $(Z,c)$ is a $G$-object, then for any $\delta : \mathbb{S}^X \rTo \mathbb{S}^Z$ which has $\mathbb{S}^c \delta = \delta^G \mathbb{S}^a$ we also have $\mathbb{S}^c \delta' \mathbb{S}^e= (\delta')^G \mathbb{S}^d\mathbb{S}^e$ if $\delta$ factors as $\delta'\mathbb{S}^e$ because $e$ is a $G$-homomorphism. $\delta'$ must then correspond to a morphism of $[G,\mathcal{C}]^{op}_{\mathbb{P}_G}$.
\begin{definition} A category $\mathcal{C}$ satisfying the axioms is called a \emph{category of spaces}. \end{definition} \begin{example} The category of locales relative to an elementary topos $\mathcal{E}$, written $\mathbf{Loc}_{\mathcal{E}}$, is a category of spaces. The axioms are all known properties of the category of locales; e.g. \cite{towaxioms} and \cite{towhofman}. \end{example}
For clarity, collecting together the various observations already made: \begin{theorem} The axioms are $G$-stable for any internal group $G$; in other words if $\mathcal{C}$ is a category of spaces then so is $[G,\mathcal{C}]$ for any internal group $G$. \end{theorem}
\section{Categories of spaces that are not categories of locales} In this section we provide a class of examples which shows that not every category of spaces is a category of locales for some elementary topos $\mathcal{E}$. To give this example we must first recall a few basic definitions and results about categories of spaces and a proposition about the representation of geometric morphisms as certain adjunctions between categories of locales. \begin{definition} (1) A morphism $f:X \rTo Y $ of a category of spaces is \emph{open} if there exists $\exists_f : \mathbb{S}^X \rTo \mathbb{S}^Y$ left adjoint to $\mathbb{S}^f$ such that $\exists_f\sqcap_{\mathbb{S}^X}(Id_{\mathbb{S}^X} \times \mathbb{S}^f)=\sqcap_{\mathbb{S}^Y}(\exists_f \times Id_{\mathbb{S}^Y})$ (Frobenius condition). (2) An object $X$ of a category of spaces is \emph{open} if $!:X \rTo 1$ is an open map. (3) An object $X$ of a category of spaces is \emph{discrete} if it is open and $\Delta : X \rTo X \times X$ is open.
\end{definition} In the case where the category of spaces is a category of locales, the usual meanings are recovered; \cite{towaxioms}. Any elementary topos $\mathcal{E}$ can be identified with the full subcategory of $\mathbf{Loc}_{\mathcal{E}}$ consisting of discrete objects. One easily checks all isomorphism are open maps (notice: $\exists_{\phi^{-1}}=\mathbb{S}^{\phi}$ for any isomorphism $\phi$), and the property of being an open map is stable under composition, relative to any category of spaces; $\exists_{fg}=\exists_f\exists_g$ for any composable pair of morphisms $f$ and $g$. Further, open maps are pullback stable (\cite{towaxioms}) and the usual Beck-Chevalley condition holds for any pullback square (where an open map is being pulled back).
\begin{lemma} If $\mathcal{C}$ is a category of spaces and $G=(G,m,e,i)$ is an internal group then a $G$-homomorphism $f:(X,a) \rTo (Y,b)$ is open relative to $[ G , \mathcal{C} ]$ if and only if $f: X \rTo Y $ is open relative to $\mathcal{C}$. \end{lemma} \begin{proof} If $f$ is open as a $G$-homomorphism then there is a natural transformation $ (\mathbb{S}, \pi_2)^{(X,a)} \rTo (\mathbb{S}, \pi_2)^{(Y,b)} $ left adjoint to $(\mathbb{S}, \pi_2)^f$ and satisfying the Frobenius condition. But this natural transformation corresponds to a natural transformation $\mathbb{S}^X \rTo \mathbb{S}^Y $ which can be seen to witness that $f$ is open relative to $\mathcal{C}$. In the other direction if $f$ is open relative to $\mathcal{C}$ then there is $\exists_f : \mathbb{S}^X \rTo \mathbb{S}^Y $ left adjoint to $\mathbb{S}^f$ witnessing that $f$ is an open map of $\mathcal{C}$. So to complete the proof we can just check that the diagram \begin{diagram} \mathbb{S}^X & \rTo{\exists_f} & \mathbb{S}^Y \\ \dTo^{\mathbb{S}^a} & & \dTo_{\mathbb{S}^b} \\ \mathbb{S}^{G \times X} & \rTo{\exists_f^G} & \mathbb{S}^{G \times Y}\\ \end{diagram} commutes, since then $\exists_f$ corresponds to a natural transformation $ (\mathbb{S}, \pi_2)^{(X,a)} \rTo (\mathbb{S}, \pi_2)^{(Y,b)} $ relative to $[ G , \mathcal{C}]$, which can be seen to witness that $f$ is open as a $G$-homomorphism.
To prove that the square commutes, notice that $b: G \times Y \rTo Y $ factors as $G\times Y \rTo^{(\pi_1,b)} G \times Y \rTo^{\pi_2^Y} Y$ where the first factor is an isomorphism, and so
\begin{eqnarray*} \mathbb{S}^b \exists_f & = & \mathbb{S}^{(\pi_1,b)} \mathbb{S}^{\pi_2^Y}\exists_f \\
& = & \mathbb{S}^{(\pi_1,b)} \exists_{Id_G \times f} \mathbb{S}^{\pi_2^X} \\
& = & \exists_{(\pi_1,b(i \times Id_Y))} \exists_{Id_G \times f} \mathbb{S}^{\pi_2^X} \\
& = & \exists_{Id_G \times f} \exists_{(\pi_1,a(i \times Id_X))} \mathbb{S}^{\pi_2^X} \\
& = & \exists^G_ f \mathbb{S}^{(\pi_1,a )} \mathbb{S}^{\pi_2^X} \\
& = & \exists^G_ f \mathbb{S}^a \\ \end{eqnarray*} where the second line is by Beck-Chevalley applied to the pullback square that is formed by pulling $f:X \rTo Y$ back along $\pi_2^Y : G \times Y \rTo Y$, the third and fifth lines use $\exists_{\phi^{-1}}=\mathbb{S}^{\phi}$ for any isomorphism $\phi$, and the fourth line follows because $f$ is a $G$-homomorphism.
\end{proof} If $G$ is a group in a category of spaces $\mathcal{C}$ then we use $BG$ for the full subcategory of $[ G , \mathcal{C}]$ consisting of discrete objects; the lemma can be applied to show that $BG$ is the full subcategory that consists of those $G$-objects $(X,a)$ such that $X$ is discrete relative to $\mathcal{C}$. So, in the case $\mathcal{C}=\mathbf{Loc}$, $BG$ recovers its usual meaning: $G$-sets.
\begin{proposition}\label{geommorph} Let $\mathcal{F}$ and $\mathcal{E}$ be two elementary toposes. There is an equivalence between the category of order enriched Frobenius adjunctions $L \dashv R : \mathbf{Loc}_{\mathcal{F}} \pile{\rTo \\ \lTo } \mathbf{Loc}_{\mathcal{E}}$ such that $R$ preserves the Sierp\'{n}ski locale and the category of geometric morphisms from $\mathcal{F}$ to $\mathcal{E}$. Every such Frobenius adjunction is determined up to isomorphism by the restriction of its right adjoint to discrete objects. \end{proposition} \begin{proof} This is the main result of \cite{towgeom}. \end{proof} If $\mathcal{F} \rTo \mathcal{E}$ is a geometric morphism then we use $\Sigma_f \dashv f^*$ for the corresponding adjunction between categories of locales. We are now in a position to give our example.
\begin{example} It is not the case that every category of spaces arises as the category of locales for some elementary topos. Let $G$ be a localic group, and say $\psi: [ G , \mathbf{Loc}] \rTo^{\simeq} \mathbf{Loc}_{\mathcal{E}}$ for some elelmentary topos $\mathcal{E}$ (such that the equivalence sends the Sierpi\'{n}ski locale relative to $\mathcal{E}$ to the canonical Sierpi\'{n}ski object of $[ G , \mathbf{Loc}]$). It follows that the discrete objects of $ \mathbf{Loc}_{\mathcal{E}}$ can be identified with the discrete objects of $[ G , \mathbf{Loc}]$; but these last are $BG$. It follows that $BG \simeq \mathcal{E}$ and therefore that there is an equivalence $\phi: \mathbf{Loc}_{\mathcal{E}} \rTo^{\simeq} \mathbf{Loc}_{BG}$. So there is an adjunction \begin{eqnarray*} \mathbf{Loc} \pile{\rTo^{G \times (\_)} \\ \lTo_U} [ G , \mathbf{Loc}] \pile{ \rTo^{\psi} \\ \lTo_{\psi^{-1}}} \mathbf{Loc}_{\mathcal{E}} \pile{ \rTo^{\phi} \\ \lTo_{\phi^{-1}}} \mathbf{Loc}_{BG} \end{eqnarray*} which satisfies Frobenius reciprocity and whose right adjoint preverse the Sierpi\'{n}ski locale. Further the restriction of the right adjoint of this adjunction to discrete locales is the forgetful functor and so by the last proposition this adjunction must be isomorphic to the adjunction $\Sigma_{p_G} \dashv p_G^*$ determined by the canonical point $p_G : \mathbf{Set} \rTo BG$ of $BG$.
For any open localic group we know that the geometric morphism $p_G : \mathbf{Set} \rTo BG$ is an open surjection (see Lemma C5.3.6 of \cite{Elephant} and the comments before it). But locales decend along open surjections (Theorem C5.1.5 of \cite{Elephant}) and the definition of locales descending along $p_G$ is that the functor $\rho: \mathbf{Loc}_{BG} \rTo $$ [ \hat{G} , \mathbf{Loc}]$, induced by $p_G^* : \mathbf{Loc}_{BG} \rTo \mathbf{Loc}$ (i.e. $U\rho=p_G^*$), is an equivalence, where $\hat{G}$ is the \'{e}tale completion of $G$ (see e.g. Lemma C5.3.16 of \cite{Elephant} for a bit more detail). Therefore there exists an equivalence of categories $[ G , \mathbf{Loc} ] \simeq [ \hat{G} , \mathbf{Loc}]$ which commutes with the canonical adjunction back to $\mathbf{Loc}$. This is sufficient to show that $G \cong \hat{G}$ (see the comments before Lemma \ref{Frob}); i.e. that $G$ is \'{e}tale complete. Since not every open localic group is \'{e}tale complete, it is not the case that every category of spaces is a category of locales over some topos. \end{example}
\section{Making do without coequalizers} \subsection{Making do: inside $\mathcal{C}$} An achievement of the axiomatic approach to locale theory is that it covers Plewe's result that localic triquotient surjections are effective descent morphisms (which generalises the more well known results that localic proper and open surjections are effective descent morphisms). To prove the result one needs to show that triquotient surjections are regular epimorphisms and, on the surface, this appears to require some coequalizers of the ambient category $\mathcal{C}$. We now show how to avoid this requirement.
\begin{definition} Given a morphism $p: Z \rTo Y$ in a category of spaces, a \emph{triquotient assignment on $p$} is a natural trasnformation $p_{\#}: \mathbb{S}^Z \rTo \mathbb{S}^Y$ satisfying
(i) $\sqcap_{\mathbb{S}^Y}( p_{\#} \times Id_{\mathbb{S}^Y}) \sqsubseteq p_{\#}\sqcap_{\mathbb{S}^Z} (Id_{\mathbb{S}^Z} \times \mathbb{S}^p )$ and
(ii) $ p_{\#}\sqcup_{\mathbb{S}^Z} (Id_{\mathbb{S}^Z} \times \mathbb{S}^p )\sqsubseteq \sqcup_{\mathbb{S}^Y}( p_{\#} \times Id_{\mathbb{S}^Y})$.
Further $p$ is a \emph{triquotient surjection} if it has a triquotient assigment $p_{\#}$ such that $p_{\#}\mathbb{S}^p = Id_{\mathbb{S}^Y}$. \end{definition}
Consult \cite{towaxioms} for more detail on triquotient assignments and the role they play in the axiomatic aporoach. In particular note that the usual `Beck-Chevalley for pullback squares' result holds: if $p_{\#}$ is a triquotient assignment on $p:Z \rTo Y$ then for any $f: X \rTo Y$ there is a triquotient assignment $(\pi_1)_{_\#}$ on $\pi_1 :X \times_Y Z \rTo X$ such that $(\pi_1)_{_\#}\mathbb{S}^{\pi_2} = \mathbb{S}^f p_{\#}$. Notice that if $p:Z \rTo Y$ is a triquotient surjection witnessed by the triquotient assignment $p_{\#} : \mathbb{S}^Z \rTo \mathbb{S}^Y$, then $p_{\#}(1)=1$ and $p_{\#}(0)=0$. Conversely if $p: Z \rTo Y$ has a triquotient assignment $p_{\#}$ with $p_{\#}(1)=1$ and $p_{\#}(0)=0$ then $p_{\#}(\mathbb{S}^p(b))=p_{\#}(0 \sqcup \mathbb{S}^p(b))\sqsubseteq p_{\#}(0) \sqcup b = b$ and order dually $b \sqsubseteq p_{\#}(\mathbb{S}^p(b))$ and so $p$ is a triquotient surjection. Using this charcterization of triquotient surjection it is clear from Beck-Chevalley for pullback squares that triquotient surjections are pullback stable. We now prove that triquotient surjections are regular epimorphisms. \begin{proposition} If $\mathcal{C}$ is a category of spaces and $p: Z \rTo Y$ a triquotient surjection then $p$ is a regular epimorphism. \end{proposition} \begin{proof} Let $p_1,p_2: Z \times_Y Z \pile{\rTo \\ \rTo} Z$ be the kernal pair of $p$. The diagram \begin{diagram} \mathbb{S}^Y & \pile{\rTo^{\mathbb{S}^p} \\ \lTo_{p_{\#}}} & \mathbb{S}^Z & \pile{ \rTo^{\mathbb{S}^{p_2}} \\ \rTo^{\mathbb{S}^{p_1} } \\ \lTo_{(p_1)_{\#}}} & \mathbb{S}^{Z \times_Y Z }\\ \end{diagram} is a split fork in $\mathcal{C}_{\mathbb{P}}^{op}$. For any $q: Z \rTo W$ with $qp_1=qp_2$ we therefore have that $\mathbb{S}^q$ factors (uniquely) as $\mathbb{S}^p \alpha $ for some natural transformation $\alpha$ (it is given by $p_{\#}\mathbb{S}^q$). By Axiom 5 it therefore only remains to check that $\alpha$ is a distributive lattice homomorphism. Since we have already observed $p_{\#}$ preserves $0$ and $1$ we just need to show that $\alpha$ preserves binary meet and join, and for this it is sufficient to check $p_{\#}\mathbb{S}^q(c_1) \sqcap p_{\#}\mathbb{S}^q(c_2) \sqsubseteq p_{\#}\mathbb{S}^q(c_1 \sqcap c_2)$ and $ p_{\#}\mathbb{S}^q(c_1 \sqcup c_2) \sqsubseteq p_{\#}\mathbb{S}^q(c_1) \sqcup p_{\#}\mathbb{S}^q(c_2)$. But \begin{eqnarray*} p_{\#}\mathbb{S}^q(c_1) \sqcap p_{\#}\mathbb{S}^q(c_2) & \sqsubseteq & p_{\#}(\mathbb{S}^p c_1 \sqcap \mathbb{S}^p p_{\#} \mathbb{S}^q c_2) \\ & = & p_{\#}(\mathbb{S}^q c_1 \sqcap (p_1)_{\#} \mathbb{S}^{p_2} \mathbb{S}^q c_2) \text{ (Beck-Chevalley)}\\ & = & p_{\#}(\mathbb{S}^q c_1 \sqcap (p_1)_{\#} \mathbb{S}^{p_1} \mathbb{S}^q c_2) \text{ (since $qp_1=qp_2$)} \\ & = & p_{\#}(\mathbb{S}^q c_1 \sqcap \mathbb{S}^q c_2) \text{ ($p_1$ triquotient surjection)}\\ & = & p_{\#}\mathbb{S}^q ( c_1 \sqcap c_2)\\ \end{eqnarray*} and $ p_{\#}\mathbb{S}^q(c_1 \sqcup c_2) \sqsubseteq p_{\#}\mathbb{S}^q(c_1) \sqcup p_{\#}\mathbb{S}^q(c_2)$ follows an order dual proof and so we are done. \end{proof} Further details on the axiomatic proof that triquotient surjections are of effective decent are contained in \cite{towaxiomsdraft}.
\subsection{Making do: maps between $\mathcal{C}$s} If $G$ is an internal group in a category of spaces we have established that $[G,\mathcal{C}]$ is a category of spaces. Since we have also recalled in Proposition \ref{geommorph} that geometric morphisms can be represented as certain adjunctions between categories of spaces it would be odd if there was not a natural `connected components' adjunction $\Sigma_G \dashv G^*$; i.e. \begin{eqnarray*} [G,\mathcal{C}] \pile{\rTo^{\Sigma_G} \\ \lTo_{G^*} } \mathcal{C} \end{eqnarray*} where $G^*$ sends an object $X$ of $\mathcal{C}$ to the trivial $G$-object $(X,\pi_2)$. But for $\Sigma_G$ to exist it would appear that coequalizers are required, since $\Sigma_G(X,a)$ must (by uniqueness of left adjoints) be isomorphic to the coequalizer of $a$ and $\pi_2$. We now show, for open groups at least, that in fact $\Sigma_G$ can always be defined. (Order dually, $\Sigma_G$ will always exist for compact groups.) The proof does not require $G$ to be a group, only a monoid, but it is not clear what this extra level of generality offers us. To prove this result we need three lemmas, the first of which is a simple order enriched result: \begin{lemma}\label{hannahs} If \begin{eqnarray*} C \rTo^c A \pile{\rTo^a \\ \rTo_b } B \end{eqnarray*} is a fork diagram in an order enriched category $\mathcal{C}$ (i.e. $ac=bc$), then if there exists $q:A\rTo C$ and $t:B\rTo A$ such that $ta=cq \sqsupseteq Id_A$, $qc=Id_C$ and $tb \sqsubseteq Id_A$, then $c$ is the equalizer of $a$ and $b$.
\end{lemma} The result that $c$ is an equalizer is similar to the familiar result that split forks are coequalizers, used in Beck's monadicity theorem. An order enriched monadicity theorem can be written down, based on this result. \begin{proof} Say $d:D \rTo A$ has $ad=bd$. Then $cqd \sqsupseteq d$ and $cqd = tad = tbd \sqsubseteq d$; so, $cqd=d$ showing that $d$ factors via $c$, clearly uniquely as $c$ is split. This establishes an order isomorphism as the action of morphism composition preserves order. \end{proof}
For the next lemma observe that if $p:Z \rTo X$ is an open map with a section $s: X \rTo Z$ (i.e. $ps=Id_X$) then $\mathbb{S}^p \sqsubseteq \exists_p$. This is trivial to establish because $Id_{\mathbb{S}^Z} \sqsubseteq \mathbb{S}^p \exists_p$ since $\mathbb{S}^p$ is right adjoint to $\exists_p$. In particular we observe that for any open object $Y$, arbitrary object $X$, and map $g:X \rTo Y$, we have that $\mathbb{S}^{(g,Id_X)} \sqsubseteq \exists_{\pi_2}$, where $\pi_2 : Y \times X \rTo X$, which is open because it is the pullback of the open map $!:Y \rTo 1$. Our next lemma builds on this last observation. \begin{lemma} If $g: Z_1 \rTo Z_2$ is a map between two open objects and $X$ is some other object of $\mathcal{C}$, then $\exists_{\pi_2^{Z_1}}\mathbb{S}^{g \times Id_X} \sqsubseteq \exists_{\pi_2^{Z_2}}$ (where $\pi_2^{Z_1}: Z_1 \times X \rTo X$ and $\pi_2^{Z_2}: Z_2 \times X \rTo X$). \end{lemma} \begin{proof} As $\exists_{\pi_2^{Z_1}}$ is left adjoint to $\mathbb{S}^{\pi_2^{Z_1}}$, the proof can be completed by showing $\mathbb{S}^{g \times Id_X} \sqsubseteq \mathbb{S}^{\pi_2^{Z_1}}\exists_{\pi_2^{Z_2}}$, which is equivalent to showing $\mathbb{S}^{g \times Id_X} \sqsubseteq \exists_{\pi_{23}} \mathbb{S}^{\pi_{13}}$ by Beck-Chevalley on the pullback square \begin{diagram} Z_2 \times Z_1 \times X & \rTo^{\pi_{13}} & Z_2 \times X \\ \dTo^{\pi_{23}} & & \dTo_{\pi_2^{Z_2}} \\ Z_1 \times X & \rTo_{\pi_2^{Z_1}} & X \\ \end{diagram} But the proof is then complete by our observations in the preamble because $\mathbb{S}^{g \times Id_X}$ factors as $\mathbb{S}^{(g,Id_{Z_1 \times X})}\mathbb{S}^{\pi_{23}}$. \end{proof} This leads us to our first result about open monoids; that is, on monoid objects $(M,m:M\times M \rTo M,e: 1 \rTo M)$, internal to $\mathcal{C}$, such that $M$ is open. \begin{lemma} If $M$ is an open monoid then for any $M$-object $(X,a:M \times X \rTo X)$, $\mathbb{S}^X\rTo^{\mathbb{S}^a}\mathbb{S}^{M \times X} \rTo^{\exists_{\pi_2}}\mathbb{S}^X$ is (a) inflationary and (b) idempotent. \end{lemma} \begin{proof} (a) Immediate because $\exists_{\pi_2}$ is greater than $\mathbb{S}^{(e!,Id_X)}$ and $Id_{\mathbb{S}^X}$ factors as $\mathbb{S}^{(e!,Id_X)}\mathbb{S}^a$.
(b) By Beck-Chevalley on the pullback square
\begin{diagram} M \times M \times X & \rTo^{Id_M \times a} & M \times X \\ \dTo^{\pi_{23}} & & \dTo_{\pi_2} \\ M \times X & \rTo^a & X \\ \end{diagram} and using $a(Id_M \times a) = a (m \times Id_X)$ we have \begin{eqnarray*} \exists_{\pi_2} \mathbb{S}^a \exists_{\pi_2} \mathbb{S}^a & = & \exists_{\pi_2} \exists_{\pi_{23}} \mathbb{S}^{Id_M
\times a} \mathbb{S}^a \\ & = & \exists_{\pi_2} \exists_{\pi_{23}} \mathbb{S}^{m \times Id_X} \mathbb{S}^a \\ & \sqsubseteq & \exists_{\pi_2} \mathbb{S}^a \end{eqnarray*} where the last line is by the lemma (take $g=m$). This completes the proof of (b), given that (a) shows that $\exists_{\pi_2} \mathbb{S}^a$ is inflationary.
\end{proof}
The next result establishes the aim of this subsection. \begin{proposition}\label{nathan} If $M$ is an open monoid then $M^*: \mathcal{C} \rTo $$ [ M , \mathcal{C}]$ has a left adjoint, $\Sigma_M $. \end{proposition} \begin{proof} For any $M$-object $(X,a)$ consider the map $\exists_{\pi_2}\mathbb{S}^a$, which we have established is an inflationary idempotent. So by applying by Axioms 5 and 6 we have a diagram \begin{eqnarray*} \mathbb{S}^{\Sigma_M(X,a)} \pile{ \rInto^{\mathbb{S}^{q^X}} \\ \lOnto_{\tau} } \mathbb{S}^X \pile{ \rTo^{\mathbb{S}^a} \\ \rTo^{\mathbb{S}^{\pi_2}} \\ \lTo_{\exists_a}} \mathbb{S}^{G \times X} \end{eqnarray*} which, by Lemma \ref{hannahs}, is an equalizer in $\mathcal{C}^{op}_{P_L}$. From this it follows that $q^X$ is the coequalizer of $a$ and $\pi_2$; if $t: X \rTo Z$ composes equally with $a$ and $\pi_2$ then $\mathbb{S}^t$ factors uniquely via $\mathbb{S}^{q^X}$ so it just remains to check, as in earlier proofs, that $\tau \mathbb{S}^t$ is a meet semilattice homomorphism. Certainly it preserves top (as $\tau$ does); the manipulation below shows that $\mathbb{S}^{q^X}( \tau \mathbb{S}^t (a_1) \sqcap \tau \mathbb{S}^t (a_2) ) \sqsubseteq \mathbb{S}^{q^X} \tau \mathbb{S}^t ( a_1 \sqcap a_2)$ from which it is clear that $\tau \mathbb{S}^t$ is a meet semilattice homomorphism (post compose the inequality with $\tau$). \begin{eqnarray*} \mathbb{S}^{q^X}( \tau \mathbb{S}^t (a_1) \sqcap \tau \mathbb{S}^t (a_2) ) & = & \mathbb{S}^{q^X} \tau \mathbb{S}^t (a_1) \sqcap \mathbb{S}^{q^X} \tau \mathbb{S}^t (a_2) ) \\ & = & \exists_a \mathbb{S}^{\pi_2} \mathbb{S}^t (a_1) \sqcap \exists_a \mathbb{S}^{\pi_2} \mathbb{S}^t (a_2) \\ & = & \exists_a \mathbb{S}^a \mathbb{S}^t (a_1) \sqcap \exists_a \mathbb{S}^a \mathbb{S}^t (a_2) \\ & \sqsubseteq & \mathbb{S}^t (a_1) \sqcap \mathbb{S}^t (a_2) \\ & = & \mathbb{S}^t (a_1 \sqcap a_2) \\ & \sqsubseteq & \mathbb{S}^{q^X}\tau \mathbb{S}^t (a_1 \sqcap a_2)\\ \end{eqnarray*} \end{proof} \section{Extending to groupoids} In this section we outline how the above arguments extend to groupoids. We start by establishing some notation for slice categories. If $f : Y \rTo X$ is a morphism of a category $\mathcal{C}$ then we use $Y_f$ as notation for $f$ when considered as an object of the slice category $\mathcal{C}/X$. We use $Y_X$ as notation for the object $\pi_2 : Y \times X \rTo X $. Now any morphism $g:Z \rTo X$ of a cartesian category $\mathcal{C}$ gives rise to a pullback adjunction $\Sigma_g \dashv g^*: \mathcal{C}/Z \rTo \mathcal{C}/X$ that satisfies Frobenius reciprocity. So by the change of base result (Lemma \ref{Frob}) there is an adjunction, which we will write $g^{\#} \dashv g_*$, between $(\mathcal{C}/Z)_{S_Z}^{op} $ and $(\mathcal{C}/X)_{S_X}^{op} $. In the case that $X=1$ observe that for any $\delta : S^A \rTo S^B$ we have $g_*g^{\#}(\delta)=\delta^X$.
If $\mathbb{G}=(G_1 \pile{\rTo^{d_0} \\ \rTo_{d_1} } G_0, m: G_1 \times_{G_0} G_1 \rTo G_1, s:G_0 \rTo G_1, i: G_1 \rTo G_1)$ is a groupoid relative to an order enriched cartesian category $\mathcal{C}$, with object of objects $G_0$ and object of morphisms $G_1$, then there is an adjunction $\mathcal{C}/G_0 \pile{ \rTo^{\Sigma_{d_1} d_0^*}\\ \lTo_U} [ \mathbb{G} ,\mathcal{C}]$. It satisfies Frobenius reciprocity and $U$ is monadic. A $\mathbb{G}$-object consists of $(X_f, a: \Sigma_{d_1} d_0^* X_f \rTo X_f)$ where $f: X \rTo G_0$ and $a$ is a morphism over $G_0$ that satisfies the usual unit and associative identities (the domain of $\Sigma_{d_1} d_0^* X_f$ is $G_1 \times_{G_0} X$). By taking adjoint transpose across $\Sigma_{d_1} \dashv d_1^*$ it is well known that having such an $a$ on $X_f$ is equivalent to having a morphism $a': d_0^* X_f \rTo d_1^* X_f$ of $\mathcal{C}/G_1$ such that $s^*a'$ is isomorphic to the identity and $m^*a' \cong \pi_2^*a' \nu_{X_f}\pi_1^*a'$ (where $\nu:\pi_1^*d_1^* \rTo^{\cong} \pi_2^* d_0^*$ is the canonical isomorphism). If $(X_f,a)$ and $(Y_g,b)$ are two $\mathbb{G}$-objects then a morphism $h:X_f\rTo Y_b$ of $\mathcal{C}/G_0$ is a $\mathbb{G}$-homomorphism if and only if $(d_1^*h)a'=b'(d_0^*h)$.
For any object $S$ of $\mathcal{C}$, $S_{G_0}$ can be made into a $\mathbb{G}$-object by defining the trivial action on it: $Id_S \times d_1 : S \times G_1 \rTo S \times G_0$. This $\mathbb{G}$-object is written $\mathbb{G}^*S$.
To summarise the technical difference we have to account for when generalising from groups to groupoids, observe that the role of $\delta^G$, i.e. $!^G_* (!^G)^{\#}(\delta)$, must be taken by $({d_1})_*d_0^{\#}(\delta)$. So the exponential in the presheaf category, something that is determined by the categorical structure of $\mathcal{C}$ alone, must be replaced by an endofunctor relative to $\mathcal{C}$ that contains information about the groupoid. However, once this replacement is made it is easy to see how to make the generalisation.
It does not appear to be possible to generalise Proposition \ref{main} from groups to groupoids. If it were possible then the property of being double exponentiable would be stable under slicing, since for any object $X$ the category of $\mathbb{G}$-objects is the same as the slice of $\mathcal{C}$ over $X$ when $\mathbb{G}$ is taken to be the trivial groupoid $X \pile{\rTo^{Id_X} \\ \rTo_{Id_X} } X$. But, see \cite{towslice}, proving slice stabity of double exponentiability appears to require something like Axiom 7 (or, at least, that the double exponentiation functor preserves coreflexive equalizers).
\begin{proposition} Let $S$ be a double exponentiable object in an order enriched cartesian category $\mathcal{C}$ such that double exponentiation is stable under slicing. For any internal groupoid $\mathbb{G}$, $\mathbb{G}^*S$ is a double exponentiable object of $[ \mathbb{G} ,\mathcal{C}]$. \end{proposition} By `stable under slicing' we mean that for any morphism $g : Z \rTo X$ the canonical morphism $g^*\mathbb{P}_X \rTo \mathbb{P}_Z g^*$, determined by the fact that $\Sigma_g \dashv g^*$ satisfies Frobenius reciprocity, is an isomorphism \begin{proof} Let $(X_f,a)$ be a $\mathbb{G}$-object. Then $P_{G_0}(X_f)$ can be made into a $\mathbb{G}$-object by using \begin{eqnarray*} d_0^*P_{G_0}(X_f) \rTo^{\cong} P_{G_1}d_0^*(X_f) \rTo^{P_{G_1}a'} P_{G_1}d_1^*(X_f) \rTo^{\cong} d_1^*P_{G_0}(X_f)\text{.} \end{eqnarray*} $\mathbb{G}$-homomorphisms from $(Y_g, c)$ to $P_{G_0}(X_f)$ correspond to natural transformations $\delta : S_{G_0}^{X_f} \rTo S_{G_0}^{Y_g}$ such that $({d_1})_*d_0^{\#}(\delta)S_{G_0}^{a}=S_{G_0}^{b}\delta$ (equivalently $d_0^{\#}(\delta)S_{G_1}^{a'}=S_{G_1}^{b'}d_1^{\#}\delta$, by adjoint transpose via $d_1^{\#} \dashv (d_1)_*$). Since $\mathcal{C}/G_0 \pile{ \rTo^{\Sigma_{d_1} d_0^*}\\ \lTo_U} [ \mathbb{G} ,\mathcal{C}]$ satisfies Frobenius reciprocity we know that there is an order isomorphism \begin{eqnarray*} Nat[S_{G_0}^{X_f},S_{G_0}^{Y_g}] \cong Nat[(\mathbb{G}^*S)^{(X_f,a)},(\mathbb{G}^*S)^{(G_1 \times_{G_0} Y,m \times Id_Y)}] \end{eqnarray*} natural in $Y_g$ and so the result follows as in Section \ref{prelim} from the explicit description of this order isomorphism and application of Lemma \ref{hannah} with $\mathbb{T}$ the monad induced by $ \Sigma_{d_1} d_0^* \dashv U$. \end{proof} Let us recall the slice stability result that we need to proceed. The proof is clear from \cite{towslice}. \begin{proposition} If $\mathcal{C}$ is a category of spaces then for any object $X$, so is $\mathcal{C}/X$. The canonical Sierpi\'{n}ski object relative to $\mathcal{C}/X$ is $\mathbb{S}_X$ and pullback commutes with double exponentiation. \end{proposition} By combining these last two propositions we have that $\mathbb{G}^*\mathbb{S}$ is a double exponentiable object in $[ \mathbb{G} ,\mathcal{C}]$, if $\mathcal{C}$ is a category of spacees. Checking that the remaining axioms are $\mathbb{G}$-stable is a straightforward re-application of the arguments deployed in Section \ref{axioms}, given that \cite{towslice} shows that the axioms hold in $\mathcal{C}/G_0$. So we have shown in outline: \begin{theorem} If $\mathcal{C}$ is a category of spaces and $\mathbb{G}$ an internal groupoid, then $[\mathbb{G},\mathcal{C}]$ is a category of spaces. \end{theorem}
\end{document} |
\begin{document}
\title{\LARGE \bf
A Hitting Time Analysis for Stochastic Time-Varying Functions with Applications to Adversarial Attacks on Computation of Markov Decision Processes
}
\begin{abstract} {Stochastic time-varying optimization is an integral part of learning in which the shape of the function changes over time in a non-deterministic manner. This paper considers multiple models of stochastic time variation and analyzes the corresponding notion of hitting time for each model, i.e., the period after which optimizing the stochastic time-varying function reveals informative statistics on {the} optimization of the target function. The studied models of time variation are motivated by adversarial attacks on the computation of value iteration in Markov decision processes. In this application, the hitting time quantifies the extent that the computation is robust to adversarial disturbance. We {develop} upper bounds {on the} hitting time by analyzing the contraction-expansion transformation appeared in the time-variation models. We prove that the hitting time of the value function in the value iteration with a probabilistic contraction-expansion transformation is logarithmic in terms of the inverse of a desired precision.} {In addition,} the hitting time is analyzed for optimization of unknown {continuous or discrete} time-varying functions whose noisy evaluations are revealed over time. {The upper bound for a continuous function is super-quadratic (but sub-cubic) in terms of the inverse of a desired precision and the upper bound for a discrete function is logarithmic in terms of the cardinality of the function domain.} Improved bounds for convex functions are obtained and we show that such functions are learned faster than non-convex functions. Finally, we study a time-varying linear model with additive noise, where hitting time is bounded with the notion of shape dominance.
\end{abstract} \keywords{Stochastic time-varying functions, stochastic operators, hitting time, probabilistic contraction-expansion mapping, probabilistic Banach fixed-point theorem, adversarial Markov decision process}
\section{Introduction and Related Work} \label{introduction} In many practical applications of optimization, such as those in the training of neural networks~\cite{sun2019optimization,gu2020implicit}, online advertising~\cite{bottou2013counterfactual}, decision-making process of power systems~\cite{mulvaney2020load,park2020homotopy}, and the real-time state estimation of nonlinear systems~\cite{rao2003constrained}, the parameters of the problem are often uncertain and change over time \cite{ajalloeian2020inexact}.
To put the time-varying and uncertainty of the systems into perspective in optimization problems, time-varying {or online optimization aims} to find the solution trajectories determined by \begin{align} x^*_t = \argmin_{x \in \mathcal{X}} \left\{ f_t(x) = \mathbb{E} F_t(x, \xi) \right\}, \quad t \in \{1, 2, \dots \} \label{eq:varyingform}, \end{align} where the random variable $\xi$ models the uncertainty in the objective that comes from disturbance, inexactness of model, use of small batches, or injected noise, {and where $\argmin$ denotes any global minimizer of the input function}. Note that the expectation $\mathbb{E}$ over $\xi$ can only be evaluated approximately since the probability distribution is unknown, {and therefore} the target function $f_t$ should be approximated by observed samples. The estimate of the target function may not capture the shape of the target function given {a} limited number of observed samples. {However,} there is a point of time, {named} \emph{hitting time}, after which optimizing the estimated target function results in optimizing the target function up to some precision and confidence level. The hitting time captures the stochastic complexity of the time-varying problem in \eqref{eq:varyingform}.
\begin{table*}[htb]
\centering
\caption{\label{tab:table1}Comparison of Selected Theorems in Sections II-III}
{\begin{tabular}{ccc}
\toprule
Theorem & Assumptions & Hitting Time Definition \\
\midrule
\ref{theorem_hitting_time} & Assumptions \ref{assump:noise_N}-\ref{assump:granularity}, bounded difference functions & \eqref{hitting_time} \\
\ref{theorem_hitting_time_convex} & Assumptions \ref{assump:noise_N}-\ref{assump:gradient_lower_bound}, convex bounded difference functions & \eqref{hitting_time} \\
\ref{theorem_discrete_upper_bound} & Assumptions \ref{assump:noise_N} and \ref{assump:minimum_distance} & \eqref{hitting_time2} \\
\ref{theorem_unimodal_discrete} & Assumptions \ref{assump:noise_N} and \ref{assump:minimum_distance}, unimodal functions & \eqref{hitting_time2} \\
\ref{thm:hitting} & linear dynamics and shape dominance & \eqref{eq:hitting-time-def} \\
\bottomrule
\end{tabular}}
\end{table*} \subsection{Motivating Applications} In order to motivate the analysis of hitting time for time-varying probabilistic transformations, {we first explain its} applications in Markov Decision Process (MDP) and reinforcement learning (RL). Consider an MDP with the set of states (state space) $\mathcal{S}$, the set of actions (action space) $\mathcal{A}$, the time-invariant state transition $h$ such that $s_{k+1} = h(s_k, a_k, w_k)$, where $w_k$ for $k \in \{0, 1, \dots \}$ is a sequence of independent and identically distributed (i.i.d.) random variables, and the
immediate reward $r(s_k, a_k, w_k)$ received after taking action $a_k$ in state $s_k$. A state-contingent decision policy is a mapping $\mu: \mathcal{S} \rightarrow \mathcal{A}$. Given a discount factor $0 < q < 1$ and a policy $\mu$, the value function $V^\mu: \mathcal{S} \rightarrow \mathcal{R}$ is defined as \begin{equation} \label{eq:sum_MDP0}
V^\mu (s) = \mathbb{E} \left [ \sum_{k = 0}^\infty q^k \cdot r(s_k, \mu(s_k), w_k) \bigg | s_0 = s \right ], \end{equation} where expectation is taken over $w_k$ for $k \geq 0$. Then, the optimal value function $V^*$ is defined by \begin{equation} \label{eq:sum_MDP}
V^*(s) = \max_{\mu} V^\mu (s). \end{equation}
For a finite action space, any policy $\mu^*$ given by \begin{equation} \label{eq:optimal_action}
\mu^*(s) = \argmax_{a \in \mathcal{A}} \ \mathbb{E} \big [ r(s, a, w) + q \cdot V^*(h(s, a, w)) \big ] \end{equation} is optimal in the sense that $V^*(s) = V^{\mu^*} (s)$,
which gives rise to the Bellman equation \begin{equation} \label{eq:bellman_equation}
V^*(s) = \max_{a \in \mathcal{A}} \ \mathbb{E} \big [ r(s, a, w) + q \cdot V^*(h(s, a, w)) \big ] \quad \forall s \in \mathcal{S}, \end{equation} where $w$ is a random variable with the same distribution as $w_k$ for some $k$. Define the Bellman operator $\mathcal{T}$ as \begin{equation} \label{eq:Bellman_operator}
(\mathcal{T}V)(s) = \max_{a \in \mathcal{A}} \ \mathbb{E} \big [ r(s, a, w) + q \cdot V(h(s, a, w)) \big ] \end{equation}
Starting from an arbitrary $V_0$, the value iteration method constructs a sequence $\{V_0, V_1, V_2, \dots\}$ with $V_{t + 1} = \mathcal{T}(V_t)$ for $t \in \{0, 1, \dots\}$. { It is well known that the Bellman operator is a contraction mapping, which guarantees convergence to $V^*$. The optimal value function $V^*$ is unknown in MDP and RL applications. The value function $V_t$ is a time-varying function and may never be exactly equal to $V^*$. Moreover, $V_t$ is rarely computed exactly and is subject to adversarial attacks. We will introduce multiple models of attack and analyze the corresponding notion of hitting time for each model to be able to study the convergence of $V_t$.}
\subsection{Related Work}
\subsubsection{Approximate Dynamic Programming}
{ The field approximate dynamic programming encompasses a wide range of techniques that overcomes the curse of dimensionality in the computation of Bellman operator. The adversarial attack model studied in this paper is motivated by the following approaches: \begin{enumerate}[label=\Roman*., wide, labelindent=10pt]
\item \textbf{Approximation in computing expectation:}
There are different approaches to circumventing the costly computation of expectation in \eqref{eq:Bellman_operator}, e.g.,
a) assuming certainty equivalence by replacing stochastic quantities with deterministic ones {to arrive at} a deterministic optimization,
b) using Monte Carlo tree search and adaptive simulation to determine which expectations associated {with} actions should be computed more accurately \cite{dimitri2017dynamic, chang2013simulation, coulom2006efficient, browne2012survey, fu2017markov}.
Both of these approaches introduce some errors in {the} expectation.
\item \textbf{Approximation in maximization:}
The maximization in {the} Bellman operator in \eqref{eq:Bellman_operator} can be over a large number of actions, possibly a continuous action space with {an} infinite number of actions.
In addition to the discretization of the action space, nonlinear programming techniques are prone to errors especially when they are used in an online fashion.
\item \textbf{Approximation of value function:}
Due to the large number of states in many recent applications of Markov decision processes and reinforcement learning,
parametric feature-based approximation methods{, such as} neural network architectures, are used for value function representation \cite{dimitri2017dynamic, van1998learning, tsitsiklis1996feature, van2006performance, busoniu2010reinforcement}.
The parameterization of the value function is another source of error in value iteration that can cause expansion in value iteration \cite{tsitsiklis1996feature, van1998learning}.
\item \textbf{Adversarial value iteration:}
The emergence of cloud, edge, and fog computing means that large-scale MDP and RL problems will likely be solved by distributed servers \cite{satyanarayanan2017emergence, li2018learning, mach2017mobile}.
This swift shift to edge reinforcement learning brings a host of new adversarial attack challenges that can be catastrophic in critical applications of autonomous vehicles and Internet of Things (IoT) in general \cite{isakov2019survey, ansari2020security, xiao2019edge}.
\end{enumerate}} The first three causes {have been} studied extensively in the literature \cite{powell2009you}, {while there is no mathematical analysis of} adversarial attacks on {the computation of the value functions}.
\subsubsection{Reinforcement Learning in Time-varying Environment} {Consider} a reinforcement learning framework in which the model is being learned or {there is} a time-varying environment whose state transition probabilities and rewards change over time~\cite{liu2018solution}. An example of a time-varying environment is the changing environment at which autonomous vehicles interact with each other, human drivers, and pedestrians. In the context of reinforcement learning and Markov decision processes, this gradual change is translated into time-varying reward functions and transition probabilities. The relevance of time-varying functions to MDP and RL problems presented above is one of the many problems that can be described by time-varying functions whose hitting time analysis is of interest. {Other applications of} a time-varying framework, {such as bandit optimization, model predictive control, and empirical risk minimization, are discussed in } \cite{fenghitting}.
\subsubsection{Scenario-based Approach for Optimization}
{ Scenario-based approach for optimization~\cite{calafiore2005uncertain,campi2008exact,8299432} is concerned with decision making based on seen cases while having the ability to generalize to new situations.
In this context, a bound on the violation probability captures the generalization of time-invariant decisions.
The hitting time defined in this paper is related to the violation probability. Our work departs from this line of research in that we study a sequence of time-varying functions instead of a time-invariant function, which can potentially be corrupted by an adversary, and seeking to constantly adjusting our understanding of the optimal solution. The hitting time captures the time-varying aspect in our setting.
} \subsubsection{Dynamical Systems}
{ Our work is also related to asynchronous dynamical systems \cite{hassibi1999control}, which have been extensively studied in the literature. Despite the mathematical resemblance, our work is different from this line of research since our focus is on analyzing the associated hitting times of different models and the dynamics considered in this work may not even be linear.
}
\subsection{Contributions} We propose a probabilistic model of adversarial attacks, in which {both} expansion up to a constant and contraction occur with certain probabilities in iterates of {the} value iteration {method. We then study the hitting} time of such stochastic time-varying value functions in Section \ref{problem_statement}. {We develop an upper bound on the hitting time under a time-varying contraction mapping with additive noise and develop an upper bound on the distance between the fixed point and the value function.}
In the rest of {this paper}, different models of stochastic time variation for continuous and discrete functions are studied in Sections \ref{problem_statement} and \ref{sec:discrete}, respectively. In particular, probabilistic contraction-expansion mappings are studied in Section \ref{sec:prob_contraction_expansion},
time-varying probabilistic contraction-expansion mappings with additive noise are studied in Section \ref{sec:TV_prob_mapping}, time-varying continuous functions with additive noise are studied in Section \ref{section_optimization_TV_continuous_noise}, and improved bounds for convex functions with additive noise are studied in Section \ref{sec:improved_bounds_cts}.
Time-varying discrete functions with additive noise are studied in Section \ref{sec:problem_statement2}, improved bounds for unimodal functions with additive noise are studied in Section \ref{sec:hitting_time_analysis_convex2}, and a time-varying linear model with additive noise with the notion of shape dominance are studied in Section \ref{sec:linear-model}.
{We summarize the theorems and the associated assumptions as well as the hitting times definitions in Table \ref{tab:table1}. Finally, the }simulation results are presented in Section \ref{sec:simulation} and the {paper} is concluded in Section~\ref{sec:conclusion} in which a discussion of opportunities for future work is presented as well.
\section{The Hitting Time Analysis for Continuous Functions} \label{problem_statement} In this section, {three variants of stochastic time-varying models are studied and their hitting times are analyzed}. In the first model, a probabilistic contraction-expansion mapping is {analyzed}, where
the classical Banach fixed-point theorem cannot be applied to this model {due to the probabilistic contraction-expansion nature of the problem}.
In the {second} model, a time-varying probabilistic contraction-expansion mapping with additive noise is {investigated}.
{The above two models} are applicable to both continuous and discrete functions. In the last model, an unknown time-varying continuous function is observed with additive noise whose estimated function changes over time.
{To motivate the three stochastic time-varying models, we revisit the motivating example in the previous section, where a sequence of value functions $V_0, V_1, \dots $ is generated by the Bellman operator $\mathcal{T}$ defined in \eqref{eq:Bellman_operator}. Note that the theoretical proof of convergence {behind the} value iteration {method} depends heavily on the contraction mapping parameter $q$ and the fact that $d \big ( \mathcal{T}(V_{t + 1}), \mathcal{T}(V_t) \big ) \leq q \cdot d \big ( V_{t + 1}, V_t \big )$ deterministically{, where $d(\cdot, \cdot)$ is a translation-invariant distance function induced by a norm}. However, in an online implementation of the value iteration with large state or action spaces, the actual calculation in practice may result in the value iteration {method} not to satisfy the contraction condition $d \big ( \mathcal{T}(V_{t + 1}), \mathcal{T}(V_t) \big ) \leq q \cdot d \big ( V_{t + 1}, V_t \big )$ in some iterations. Instead, the distance may expand up to a factor greater than one in some iterations of the value iteration, i.e., $d \big ( \mathcal{T}(V_{t + 1}), \mathcal{T}(V_t) \big ) \leq Q \cdot d \big ( V_{t + 1}, V_t \big )$, where $Q \geq 1$. {In this problem,} the Bellman contraction mapping in value iteration may not be fixed anymore {and could} change over time. Hence, instead of applying the same transformation $\mathcal{T}$ in value iteration, a time-varying transformation $\mathcal{T}_t$ for $t \in \{0, 1, \dots\}$ may be applied to value iteration. Section~\ref{sec:prob_contraction_expansion} formalizes this observation. }
\subsection{Probabilistic Contraction-Expansion Mapping} \label{sec:prob_contraction_expansion}
Let $\left (X, \| \cdot \| \right )$ be a non-empty complete normed vector (linear) space, known as a Banach space, over the field $\mathbb{R}$ of real scalars, where $X$ is a vector space, e.g., a function space, together with a norm $\| \cdot \|$. The norm induces a translation invariant distance function, called canonical induced metric, as $d(f, g) = \|f - g\|$. Let $\| f \| = \langle f, f \rangle^{1/2}$, where the inner product of $f, g \in X$ in general is defined by $\langle f, g \rangle = \int f(x) g(x) dx$. Consider a contraction mapping $\mathcal{T}: X \rightarrow X$ with the property that for all $f, g \in X$, there exists {a scalar $q \in [0, 1)$} such that \begin{equation} \label{contraction_deterministic} d \big (\mathcal{T}(f), \mathcal{T}(g) \big ) \leq q \cdot d(f, g). \end{equation} {In light of} the Banach-Caccioppoli fixed-point theorem, {this} contraction mapping has its own unique fixed point, i.e., there exists $f^* \in X$ such that $\mathcal{T}(f^*) = f^*$. Furthermore, starting with an arbitrary function $f^0 \in X$, the sequence $\{f^n\}$ with $f^n = \mathcal{T}(f^{n - 1})$ for $n \geq 1$ converges to $f^*$; in other words, $f^n \rightarrow f^*$, where $d \big ( f^*, f^n \big ) \leq \frac{q^n}{1 - q} \cdot d(f^1, f^0)$.
Note that in all iterations of the above value iteration, {the mapping $\mathcal{T}$ operates as a contraction mapping according to \eqref{contraction_deterministic} with probability one.}
However, in the rest of this subsection, we consider a probabilistic version of the Banach fixed-point theorem, where the mapping either contracts or expands the distance between any two points in a probabilistic manner.
Consider the time-varying function $f_t \in X$ for $t \in \{ 0, 1, 2, \dots \}$ evolving over time according to \begin{equation} \label{time_varying_map_1}
f_{t + 1} = \overline{\mathcal{T}}(f_t), \quad t \in \{ 0, 1, 2, \dots \}, \end{equation} where $\overline{\mathcal{T}}$ is a probabilistic contraction-expansion mapping such that
\begin{equation} \label{probabilistic_contraction_expansion}
d \big (\overline{\mathcal{T}}(f_{t+1}), \overline{\mathcal{T}}(f_t) \big ) \leq \begin{cases}
q \cdot d(f_{t + 1}, f_t) & \text{w.p.} \quad p\\
Q \cdot d(f_{t + 1}, f_t) & \text{otherwise} \end{cases}, \ \ \forall t \in \mathbb{N}_0 \end{equation} {for some constants $q \in [0, 1)$, $Q \geq 1$, and $p \in (0, 1]$, where w.p. stands for ``with probability'' and $\mathbb{N}_0$ is natural numbers with zero.}
{{The expansion in \eqref{probabilistic_contraction_expansion} is caused by an adversary in an attempt to move the function sequence away from the fixed point.} The contraction or expansion of $\overline{\mathcal{T}}$ is independent over time and $f^*$ is a fixed point of the mapping if $\overline{\mathcal{T}}(f^*) = f^*$.} The shape of the function $f_t$ changes over time, but there can be a time, called hitting time $T$, {at which $f_T$ reaches {a neighborhood of $f^*$,} as formally defined below.}
\begin{definition} \label{def:hitting_time_probabilistic} {Given $\epsilon > 0$ and $a \in (0, 1]$,} the hitting time $T(\epsilon, a)$ for the stochastic function sequence introduced in \eqref{time_varying_map_1} is defined as \begin{equation}
\label{hitting_time_NTVT}
T(\epsilon, a) = \min \big \{ T : \mathbb{P} \left \{ d \big ( f_t, f^* \big ) < \epsilon \right \} \geq 1 - a, \ \forall t \geq T \big \}, \end{equation} where $f^*$ is a fixed point {whose existence and uniqueness is proven in Theorem \ref{thm:probabilistic_banach} and $\mathbb{P}\{\cdot\}$ takes the probability of the input event}. \end{definition}
{ As a result, the complexity of optimizing {the} functions $f_t$ for $t < T$ can be irrelevant to the optimization complexity of {the} functions $f_t$ for $t \geq T$. Consequently, the hitting time $T$ together with {the} optimization complexity of {any} function $f_t$ for $t \geq T$ captures the complexity of optimizing the time-varying sequence of functions $\{ f_t \}$.}
In the following theorem, the limiting behavior of the function sequence $\{ f_t \}$ is studied and an upper bound {on the} hitting time {is derived}.
\begin{theorem} \label{thm:probabilistic_banach} \textbf{Probabilistic Banach Fixed-Point Theorem.}
Let $\left (X, \| \cdot \| \right )$ be a non-empty complete normed vector space with a probabilistic contraction-expansion mapping $\overline{\mathcal{T}}: X \rightarrow X$ defined in \eqref{probabilistic_contraction_expansion} {such that $q^2 \cdot p + Q^2 \cdot (1 - p) < 1$}. Starting with an arbitrary element $f_0 \in X$, the sequence $\{ f_t \}$ defined in \eqref{time_varying_map_1} converges to an element $f^* \in X$ with {an associated confidence level $1 - a$}, where $f^*$ is a unique fixed point for {the} mapping $\overline{\mathcal{T}}$.
Furthermore, {for every $0 < L < \frac{\epsilon}{d \left ( f_1, f_0 \right )}$,} the hitting time {$T(\epsilon, a)$ satisfies the inequality}
\begin{equation} \label{theorem_equation_prob_Banach}
\begin{aligned}
T(\epsilon, a) \hspace{-0.5mm} \leq \hspace{-0.4mm} & \max \hspace{-0.5mm} \left\{ \hspace{-0.5mm} \frac{\ln \hspace{-0.5mm} \left ( \hspace{-1mm} \frac{a \cdot L^2 \hspace{-0.5mm}\cdot\hspace{-0.5mm} \big (1 - q \cdot p - Q \cdot (1 - p) \big ) \hspace{-0.5mm}\cdot\hspace{-0.5mm} \big ( 1 - q^2 \cdot p - Q^2 \cdot (1 - p) \big )}{ 1 + q \cdot p + Q \cdot (1 - p)} \hspace{-1mm} \right )}{\ln \big ( q^2 \cdot p + Q^2 \cdot (1 - p) \big )}\hspace{-0.5mm},\hspace{-0.5mm} \frac{\ln\hspace{-0.5mm} \left(\hspace{-0.5mm} \Big (\hspace{-0.5mm} \frac{\epsilon}{d \left ( f_1, f_0 \right )} \hspace{-0.5mm}-\hspace{-0.5mm} L \Big ) \hspace{-0.5mm}\cdot\hspace{-0.5mm} \Big (\hspace{-0.5mm} 1 - q \cdot p - Q \cdot (1 - p) \hspace{-0.5mm}\Big )\hspace{-0.5mm} \right )\hspace{-0.5mm}}{\ln \big (q \cdot p + Q \cdot (1 - p) \big )} \hspace{-0.5mm}\right\}\hspace{-0.5mm}.\hspace{-1mm}
\end{aligned} \end{equation} \end{theorem}
\begin{proof} In order to find an upper bound {on} the hitting time $T(\epsilon, a)$ defined in Definition \ref{def:hitting_time_probabilistic}, we first need to study {the convergence behavior of} the function sequence $\{f_t\}$ in \eqref{time_varying_map_1} under the probabilistic contraction-expansion mapping $\overline{\mathcal{T}}$. To this end, we prove that this function sequence is a Cauchy sequence with high probability. {Given arbitrary} integer values $n$ and $m$ such that $n > m$, {one can write} \begin{equation} \label{distance_probabilistic} \begin{aligned}
d \big ( f_n, f_m \big ) = d \big ( \overline{\mathcal{T}}^n(f_0), \overline{\mathcal{T}}^m(f_0) \big )
& \overset{(a)}{\leq} \hspace{-1.2mm} \sum_{i = 1}^{n - m} d \big ( \overline{\mathcal{T}}^{n - i + 1} (f_0), \overline{\mathcal{T}}^{n - i} (f_0) \big )
= \sum_{i = 1}^{n - m} d \big ( \overline{\mathcal{T}}^{n - i} (f_1), \overline{\mathcal{T}}^{n - i} (f_0) \big ) \\
& \overset{(b)}{\leq} \sum_{i = 1}^{n - m} {\left(\prod_{j = 1}^{n - i} B_j\right)} \cdot d \big (f_1, f_0 \big ) =
d \big (f_1, f_0 \big ) \cdot \sum_{i = 1}^{n - m} \prod_{j = 1}^{n - i} B_j, \end{aligned} \end{equation} where triangular inequality is applied $n - m - 1$ times in $(a)$ and the independent and identically distributed random variables $B_j$ for $j \in \{1, 2, \dots, n - 1\}$ used in $(b)$ have the distribution \begin{equation} \label{B_distribution}
B_j =
\begin{cases}
q & \text{w.p.} \quad p \\
Q & \text{otherwise}
\end{cases}. \end{equation} Next, we study the mean and variance of {the} random variable $S_{n, m} = \sum_{i = 1}^{n - m} \prod_{j = 1}^{n - i} B_j$ in \eqref{distance_probabilistic}. Using the independence of $B_j$ for $j \in \{1, 2, \dots, n - 1 \}$, the mean can be {upper-bounded} as \begin{equation}
\label{mean_prob}
\begin{aligned}
\mathbb{E} [S_{n, m}] = \mathbb{E} \left [ \sum_{i = 1}^{n - m} \prod_{j = 1}^{n - i} B_j \right ]
= \sum_{i = 1}^{n - m} \prod_{j = 1}^{n - i} \mathbb{E} \left [ B_j \right ]
= \sum_{i = 1}^{n - m} \big (q \cdot p + Q \cdot (1 - p) \big )^{n - i} \leq \frac{\big (q \cdot p + Q \cdot (1 - p) \big )^m}{1 - q \cdot p - Q \cdot (1 - p) }.
\end{aligned} \end{equation} On the other hand, ${\rm Var}\,(S_{n, m}) \leq \mathbb{E} \left [ S_{n, m}^2 \right ]$, {where ${\rm Var}\,(\cdot)$ takes the variance of the input random variable}, and the second moment of $S_{n, m}$ {will be} {upper-bounded} {next}. Note that \begin{equation}
\label{second_moment}
\begin{aligned}
S_{n, m} = B_1 \cdot B_2 \cdots B_m \cdot \big ( 1 + & B_{m + 1} + B_{m + 1} \cdot B_{m + 2} + \dots + B_{m + 1} \cdots B_{n - 1} \big ).
\end{aligned} \end{equation} Let $\bar{S}_{n, m} = 1 + B_{m + 1} + B_{m + 1} \cdot B_{m + 2} + \dots + B_{m + 1} \cdots B_{n - 1}$, where {$\bar{S}_{n, m}$ is a random variable independent of $B_j$ for $j \in \{1, 2, \dots, m \}$, and $\bar{S} = \lim_{n \rightarrow \infty} \bar{S}_{n, m}$. We leave out the subscript $m$ since the limits $\lim_{n \rightarrow \infty} \bar{S}_{n, m}$ and $\lim_{n \rightarrow \infty} \bar{S}_{n, m^\prime}$ are identically distributed for all $m,m^\prime \geq 0$. This is because $\bar{S}$ is an infinite sum and $\{B_j\}$ are i.i.d. random variables. }
Since $\mathbb{E}[B_j] > 0$ for $j \geq 1$, {we have} $\mathbb{E} [ \bar{S}_{n, m}^2 ] \leq \mathbb{E} [\bar{S}^2]$; hence, {it follows from \eqref{second_moment} that} \begin{equation}
\label{S_n_m_second_moment}
\begin{aligned}
\mathbb{E} \left [ S_{n, m}^2 \right ]
& = \mathbb{E} \left [ B_1^2 \right ] \cdots \mathbb{E} \left [ B_m^2 \right ] \cdot \mathbb{E} \left [ \bar{S}_{n, m}^2 \right ] \leq \mathbb{E} \left [ B_1^2 \right ] \cdots \mathbb{E} \left [ B_m^2 \right ] \cdot \mathbb{E} \left [ \bar{S}^2 \right ].
\end{aligned} \end{equation} In order to find an upper bound on $\mathbb{E} \left [ \bar{S}^2 \right ]$, we have \begin{equation}
\label{S_bar}
\begin{aligned}
\bar{S} = 1 + & B_{m + 1} \cdot (1 + B_{m + 2} + B_{m + 2} \cdot B_{m + 3} + B_{m + 2} \cdot B_{m + 3} \cdot B_{m + 4} + \dots) = 1 + B_{m + 1} \cdot \Tilde{S},
\end{aligned} \end{equation} where $\Tilde{S}$ is independent of $B_{m + 1}$, and {the} random variables $\bar{S}$ and $\Tilde{S}$ are identically distributed but not independent of each other. {By} taking expectation on both sides of $\bar{S}^2 = (1 + B_{m + 1} \cdot \Tilde{S})^2$, {and} using the independence of $\Tilde{S}$ and $B_{m + 1}$ and the fact that $\mathbb{E} \big [ \bar{S}^2 \big ] = \mathbb{E} \big [ \Tilde{S}^2 \big ]$, {one can obtain}
\begin{equation}
\label{second_moment_s_barr}
\begin{aligned}
& \mathbb{E} \left [ \bar{S}^2 \right ] = 1 + \mathbb{E} \left [ B_{m + 1}^2 \right ] \cdot \mathbb{E} \left [ \Tilde{S}^2 \right ] + 2 \mathbb{E} \left [ B_{m + 1} \right ] \cdot \mathbb{E} \left [ \Tilde{S} \right ] \Longrightarrow \mathbb{E} \left [ \bar{S}^2 \right ] = \frac{1 + 2 \mathbb{E} \left [ B_{m + 1} \right ] \cdot \mathbb{E} \left [ \Tilde{S} \right ]}{1 - \mathbb{E} \left [ B_{m + 1}^2 \right ]}.
\end{aligned} \end{equation} In the same way as finding the mean of $S_{n, m}$ in \eqref{mean_prob}, it is derived that $\mathbb{E} \left [ \Tilde{S} \right ] = \frac{1}{1 - q \cdot p - Q \cdot (1 - p)}$; furthermore, $\mathbb{E} \left [ B_{m + 1} \right ] = q \cdot p + Q \cdot (1 - p)$ and $\mathbb{E} \left [ B_{m + 1}^2 \right ] = q^2 \cdot p + Q^2 \cdot (1 - p)$. As a result, if $q^2 \cdot p + Q^2 \cdot (1 - p) < 1$, Equation \eqref{second_moment_s_barr} results in \begin{equation}
\label{s_bar_second_momentt}
\mathbb{E} \left [ \bar{S}^2 \right ] = \frac{1 + q \cdot p + Q \cdot (1 - p)}{ \big (1 - q \cdot p - Q \cdot (1 - p) \big ) \cdot \big ( 1 - q^2 \cdot p - Q^2 \cdot (1 - p) \big )}. \end{equation} Using Equation \eqref{S_n_m_second_moment}, we have \begin{equation}
\label{variance_prob}
\begin{aligned}
{\rm Var}\,(S_{n, m})\leq \mathbb{E} \left [ S_{n, m}^2 \right ]
\hspace{-0.5mm}\leq\hspace{-0.5mm} \big ( q^2 \cdot p + Q^2 \cdot (1 - p) \big )^m \hspace{-0.5mm}\times\hspace{-0.5mm} \frac{1 + q \cdot p + Q \cdot (1 - p)}{ \big (1 - q \cdot p - Q \cdot (1 - p) \big ) \cdot \big ( 1 - q^2 \cdot p - Q^2 \cdot (1 - p) \big )}.
\end{aligned} \end{equation}
{So far, it is shown} that $d \big ( \overline{\mathcal{T}}^n(f_0), \overline{\mathcal{T}}^m(f_0) \big ) \leq S_{n, m} \cdot d \big (f_1, f_0 \big )$, where $S_{n, m}$ is a random variable with {its} mean and variance {upper-bounded in} \eqref{mean_prob} and \eqref{variance_prob}, respectively. Using Chebyshev's inequality, for any $L > 0$, we have \begin{equation}\label{chebyshev_inequality}
\begin{aligned}
& \mathbb{P} \left \{ | S_{n, m} - \mathbb{E} [ S_{n, m} ] | \leq L \right \} \geq 1 - \frac{{\rm Var}\,(S_{n, m})}{L^2} \Longrightarrow \\
& \mathbb{P} \left \{ S_{n, m} \leq \frac{\big (q \cdot p + Q \cdot (1 - p) \big )^m}{1 - q \cdot p - Q \cdot (1 - p) } + L \right \} \hspace{-1mm}\geq\hspace{-0.5mm} 1 - \frac{ \big ( q^2 \cdot p + Q^2 \cdot (1 - p) \big )^m \cdot \big ( 1 + q \cdot p + Q \cdot (1 - p) \big )}{L^2 \cdot \big (1 - q \cdot p - Q \cdot (1 - p) \big ) \cdot \big ( 1 - q^2 \cdot p - Q^2 \cdot (1 - p) \big )}.
\end{aligned} \end{equation} As a result, for any $\epsilon > 0$ and {$a \in (0, 1]$}, we have {$d(f_n,f_m) = d \big ( \overline{\mathcal{T}}^n(f_0), \overline{\mathcal{T}}^m(f_0) \big ) \leq \epsilon$} with {the} confidence level $1 - a$ if $m$ satisfies the two inequalities \begin{subequations} \label{two_inequality_for_m} \begin{eqnarray}
\label{two_inequality_for_m_a}
\dfrac{ \big ( q^2 \cdot p + Q^2 \cdot (1 - p) \big )^m \cdot \big ( 1 + q \cdot p + Q \cdot (1 - p) \big )}{L^2 \cdot \big (1 - q \cdot p - Q \cdot (1 - p) \big ) \cdot \big ( 1 - q^2 \cdot p - Q^2 \cdot (1 - p) \big )} &\leq a \\
\label{two_inequality_for_m_b}
\left ( \dfrac{\big (q \cdot p + Q \cdot (1 - p) \big )^m}{1 - q \cdot p - Q \cdot (1 - p) } + L \right ) \cdot d \big ( f_1, f_0 \big ) &\leq \epsilon.
\end{eqnarray} \end{subequations} {Assume} that $d \big ( f_1, f_0 \big ) \neq 0$; otherwise, $f_0$ is a fixed point by definition. Hence, {for} $0 < L < \frac{\epsilon}{d \left ( f_1, f_0 \right )}$, {if $q \cdot p + Q \cdot (1 - p) < 1$ and $q^2 \cdot p + Q^2 \cdot (1 - p) < 1$,} then the two inequalities in \eqref{two_inequality_for_m_a} and \eqref{two_inequality_for_m_b} are satisfied {when} \begin{equation}
\label{N_epsilon_value}
\begin{aligned}
m \geq \max \hspace{-0.5mm} \left\{ \hspace{-0.5mm} \frac{\ln \hspace{-0.5mm} \left ( \hspace{-1mm} \frac{a \cdot L^2 \hspace{-0.5mm}\cdot\hspace{-0.5mm} \big (1 - q \cdot p - Q \cdot (1 - p) \big ) \hspace{-0.5mm}\cdot\hspace{-0.5mm} \big ( 1 - q^2 \cdot p - Q^2 \cdot (1 - p) \big )}{ 1 + q \cdot p + Q \cdot (1 - p)} \hspace{-1mm} \right )}{\ln \big ( q^2 \cdot p + Q^2 \cdot (1 - p) \big )}\hspace{-0.5mm},\hspace{-0.5mm} \frac{\ln\hspace{-0.5mm} \left(\hspace{-0.5mm} \Big (\hspace{-0.5mm} \frac{\epsilon}{d \left ( f_1, f_0 \right )} \hspace{-0.5mm}-\hspace{-0.5mm} L \Big ) \hspace{-0.5mm}\cdot\hspace{-0.5mm} \Big (\hspace{-0.5mm} 1 - q \cdot p - Q \cdot (1 - p) \hspace{-0.5mm}\Big )\hspace{-0.5mm} \right )\hspace{-0.5mm}}{\ln \big (q \cdot p + Q \cdot (1 - p) \big )} \hspace{-0.5mm}\right\}.
\end{aligned} \end{equation}
{Now, for every $\epsilon>0$ and $a\in (0,1]$, let $N_\epsilon$ be the constant on the right-hand side of \eqref{N_epsilon_value}. Then, with probability $1-a$, it holds that $\lim_{n \rightarrow \infty} d(f_n,f_{N_\epsilon})\leq \lim_{n \rightarrow \infty} S_{n,N_\epsilon} \cdot d \big (f_1, f_0 \big ) \leq \epsilon$. For all $n > m > N_\epsilon$, since $\{B_j\}$ are nonnegative, it holds that $S_{n,m}=B_{1} \cdot B_{2} \cdots B_{N_{\epsilon}} \cdot\left(B_{N_{\epsilon}+1} \cdots B_{m}+\cdots+B_{N_{\epsilon}+1} \cdots B_{n-1}\right)\leq B_{1} \cdot B_{2} \cdots B_{N_{\epsilon}} \cdot (1+B_{N_{\epsilon}+1}+B_{N_{\epsilon}+1}B_{N_{\epsilon}+2}+\dots) = \lim_{n \rightarrow \infty} S_{n,N_\epsilon}$, which implies $d(f_n,f_m)\leq S_{n,m}\cdot d \big (f_1, f_0 \big ) \leq \epsilon$ as long as $\lim_{n \rightarrow \infty} S_{n,N_\epsilon} \cdot d \big (f_1, f_0 \big ) \leq \epsilon$. To conclude, the sequence $\{ f_t \}$ is a Cauchy sequence with probability $1-a$.} {Since} the {vector} space {$X$} is complete, the sequence $\{ f_t \}$ converges to an element $f^*$ in the space with high probability. Moreover, $f^*$ is a fixed point of the mapping $\overline{\mathcal{T}}$ since with high probability we have \begin{equation}
\label{fixed_point_probabilistic}
\overline{\mathcal{T}} (f^*)
= \overline{\mathcal{T}} ( \lim_{t \rightarrow \infty } f_t)
\overset{(a)}{=} \lim_{t \rightarrow \infty } \overline{\mathcal{T}} ( f_t) \\
= \lim_{t \rightarrow \infty } f_{t + 1} = f^*, \end{equation} where $(a)$ is true as the mapping $\overline{\mathcal{T}}$ is continuous due to \eqref{probabilistic_contraction_expansion}, which justifies bringing the limit outside the operator $\overline{\mathcal{T}}$. Lastly, there cannot be more than one fixed point for {the} mapping $\overline{\mathcal{T}}$, which can be proved by contradiction. Considering any pair of distinct fixed points $f_1^*$ and $f_2^*$, we have $d \big ( \overline{\mathcal{T}}(f_1^*), \overline{\mathcal{T}}(f_2^*) \big ) = d \big ( f_1^*, f_2^* \big )$ with probability {1}, which contradicts the fact that the distance between the mapped points contracts with a factor $q < 1$ with probability $p > 0$.
In {this} proof, both $q \cdot p + Q \cdot (1 - p) < 1$ and $q^2 \cdot p + Q^2 \cdot (1 - p) < 1$ must be satisfied {to ensure that Equations \eqref{two_inequality_for_m_a} and \eqref{two_inequality_for_m_b} hold for a large enough $m$}. {However,} $q^2 \cdot p + Q^2 \cdot (1 - p) < 1$ {implies} $q \cdot p + Q \cdot (1 - p) < 1$ {since one can write} \begin{equation}\label{eq:implication_of_assumption}
\begin{aligned}
(1 - p) \cdot (Q^2 - 2Q + 1) \geq 0
\Longrightarrow\quad& Q^2\quad \cdot (1 - p) - 2Q \cdot (1 - p) + 1 - p \geq 0 \\
\overset{(a)}{\Longrightarrow}\quad& Q^2 \cdot (1 - p)^2 - 2Q \cdot (1 - p) + 1 \geq p \cdot \big (1 - (1 - p) \cdot Q^2 \big ) \\
\overset{(b)}{\Longrightarrow}\quad& 1 - Q \cdot (1 - p) \geq p \cdot \sqrt{\frac{1 - Q^2 \cdot (1 - p)}{p}} \\
\overset{(c)}{\Longrightarrow}\quad& q \cdot p + Q \cdot (1 - p) < 1,
\end{aligned} \end{equation} where $p - p \cdot (1 - p) \cdot Q^2$ is added on both sides of inequality in $(a)$, the square root is taken from both sides in $(b)$, and $q^2 \cdot p + Q^2 \cdot (1 - p) < 1$ is used in $(c)$ to draw the claimed conclusion.
\end{proof}
{Theorem \ref{thm:probabilistic_banach} states that if contraction of an operator in the iterates of the value iteration is compromised by an adversary via expansions in the iterates of value iteration, the value function sequence can still converge to the fixed point of the operator with high probability.
{The standard Banach fixed-point theorem is a special case of Theorem \ref{thm:probabilistic_banach} by setting $p = 1$ and $L=0$.} The analysis in the proof of this theorem suggests that the compromised operator being contractive on expectation is not enough for the convergence of the value function sequence with high probability since the introduced randomness to the operator by the adversary can lead to high variance in the elements of the value function sequence.
{Hence, the additional assumption $q^2 \cdot p + Q^2 \cdot (1 - p) < 1$
is required to bound such a variance rooted from the expansion caused by the adversary.} Furthermore, this theorem provides an upper bound on the number of rounds for value iteration to defeat the effect of the adversary that attempts to move the value function sequence away from the fixed point.
{If the adversary is not modeled, the user who expects a normal scenario may perform {fewer iterations} of the value iteration. This can lead to a highly inaccurate estimate of the fixed point in the presence of an adversary.}
}
\begin{remark}
{The parameter $L\in \left(0, \frac{\epsilon}{d \left ( f_1, f_0 \right )}\right)$ serves as an auxiliary parameter used in \eqref{chebyshev_inequality}. We observe that the first term in the upper bound \eqref{theorem_equation_prob_Banach} is decreasing with respect to $L$ and the second term is increasing with respect to $L$. By minimizing the bound \eqref{theorem_equation_prob_Banach} over $L$,
we have that {$T(\epsilon,a)$ has the order $\mathcal{O}\left(\frac{d \left ( f_1, f_0 \right )}{\epsilon}\right)$}.} \end{remark}
\subsection{Time-Varying Probabilistic Contraction-Expansion Mapping with Additive Noise} \label{sec:TV_prob_mapping}
Let $(X, \| \cdot \| )$ be the same complete normed vector space as in Section \ref{sec:prob_contraction_expansion}. Consider time-varying probabilistic contraction-expansion {mappings} $\overline{\mathcal{T}}_t(\cdot): X \rightarrow X$ for $t \in \{ 0, 1, 2, \dots \}$ with parameters $p_t, q_t,$ and $Q_t$, {i.e.,} \begin{equation}
{d \big (\overline{\mathcal{T}}_t(f), \overline{\mathcal{T}}_t(g) \big ) \leq \begin{cases}
q_t \cdot d(f, g) & \text{w.p.} \quad p_t\\
Q_t \cdot d(f, g) & \text{otherwise} \end{cases}, \ \ \forall t \in \mathbb{N}_0.} \end{equation} \noindent {By Theorem \ref{thm:probabilistic_banach}, starting with an arbitrary function $f^0 \in X$,} the sequence $\{f^n\}$ with $f^n = \overline{\mathcal{T}}_t(f^{n - 1})$ for $n \geq 1$, where the same probabilistic contraction-expansion mapping $\overline{\mathcal{T}}_t$ is applied repeatedly, converges to $f_t^*$ with high probability.
{ \begin{assumption}\label{assump:consecutive_fixed_point} The fixed points of every two consecutive {mappings} are at most $\epsilon_f > 0$ away from each other, i.e., $d \big ( f_t^*, f_{t - 1}^* \big ) \leq \epsilon_f$ for all $t \in \{ 1, 2, 3, \dots\}$. \end{assumption}}
{It is worth mention that, even under Assumption \ref{assump:consecutive_fixed_point}, there can be non-consecutive mappings $\overline{\mathcal{T}}_t$ and $\overline{\mathcal{T}}_{t'}$ whose fixed points {are arbitrarily far away from each other.}}
Note that in all iterations of the probabilistic value iteration, the same probabilistic contraction-expansion mapping $\overline{\mathcal{T}}_t$ is applied to the function sequence $\{f^n\}$. However, in the {remainder} of this subsection, we consider a time-varying and noisy version of the probabilistic Banach fixed-point theorem, where the {underlying} {mapping} changes over time and noise functions are added to the outcome of the mapping in each iteration.
Consider the time-varying function $f_t \in X$ for $t \in \{ 0, 1, 2, \dots \}$ evolving over time according to \begin{equation} \label{time_varying_map_2}
f_{t + 1} = \widetilde{\mathcal{T}}_t(f_t) = \overline{\mathcal{T}}_t(f_t) + w_t, \ \ \ t \in \{ 0, 1, 2, \dots \}, \end{equation} where $w_t \in X$ is {some additive} noise.
{\begin{assumption}\label{assump:noise}
The additive noise is uniformly upper-bounded by a constant $\epsilon_w>0$, i.e., $\| w_t \| \leq \epsilon_w$ for all $t \in \{0, 1, 2, \dots \}$. \end{assumption}}
{Note that the shape of the function $f_t$ {can change} over time and can be non-convex. However, the following theorem shows that an upper bound can be established for the distance between $f_t$ and the time-{varying} fixed point $f_t^*$.
}
\begin{theorem} \label{thm:contraction-expansion-noise} Consider {arbitrary} time-varying probabilistic contraction-expansion mappings $\mathcal{T}_t$
with fixed points $f_t^*$, where $\sup_t \big ( q_t^2 \cdot p_t + Q_t^2 \cdot (1 - p_t) \big ) < 1$ {for $t \in \{0, 1, 2, \dots \}$.}
{Let} the time-varying function $f_t$ evolve over time according to the time-varying noisy probabilistic transformation in \eqref{time_varying_map_2}.
{Under Assumptions \ref{assump:consecutive_fixed_point} and \ref{assump:noise}, it holds that} \begin{equation}
\begin{aligned}
d \big ( f_t, f_t^* \big )
\leq P_t \cdot d \big ( f_0, f_0^* \big ) + S_t \cdot ( \epsilon_f + \epsilon_w ),
\end{aligned} \end{equation} where $P_t = \left ( \prod_{i = 0}^{t - 1} B_i \right )$ and $S_t = \left ( 1 + \sum_{i = 1}^{t - 1} \prod_{j = 1}^{t - i} B_j \right )$ are random variables with independent random variables $B_t$ having the distribution \begin{equation}\label{eq:B_t}
B_t =
\begin{cases}
q_t & \text{w.p.} \quad p_t\\
Q_t & \text{otherwise}
\end{cases}. \end{equation} The means and variances of $P_t$ and $S_t$ are {upper-bounded} as \begin{equation}
\begin{aligned}
\mathbb{E} \left [ P_t \right ]
& \leq \left ( \sup_t \big ( q_t \cdot p_t + Q_t \cdot (1 - p_t) \big ) \right )^t \xrightarrow[]{t \rightarrow \infty} 0, \\
{\rm Var}\,(P_t)
& \leq \left ( \sup_t \big ( q_t^2 \cdot p_t + Q_t^2 \cdot (1 - p_t) \big ) \right )^t \xrightarrow[]{t \rightarrow \infty} 0,
\end{aligned} \end{equation} and \begin{equation}
\begin{aligned}
\mathbb{E} \left [ S_t \right ]
& \leq \frac{1}{1 - \sup_t \big ( q_t \cdot p_t + Q_t \cdot (1 - p_t) \big )}, \\
{\rm Var}\, (S_t) & \leq \frac{\big ( \bar{q}^2 \cdot \bar{p} + \bar{Q}^2 \cdot (1 - \bar{p}) \big ) \cdot \big ( 1 + \bar{q} \cdot \bar{p} + \bar{Q} \cdot (1 - \bar{p}) \big )}{\big ( 1 - \bar{q}^2 \cdot \bar{p} - \bar{Q}^2 \cdot (1 - \bar{p}) \big ) \cdot \big ( 1 - \bar{q} \cdot \bar{p} - \bar{Q} \cdot (1 - \bar{p}) \big )},
\end{aligned} \end{equation} where {$\bar{q}$, $\bar{Q}$, and $\bar{p}$ satisfy} $\bar{q} \cdot \bar{p} + \bar{Q} \cdot (1 - \bar{p}) \geq \sup_{t \geq 1} \mathbb{E}[B_t]$ and $\bar{q}^2 \cdot \bar{p} + \bar{Q}^2 \cdot (1 - \bar{p}) \geq \sup_{t \geq 1} \mathbb{E}[B_t^2]$.
\end{theorem}
\begin{proof} Under the time-varying probabilistic contraction-expansion mappings with added noise functions introduced in \eqref{time_varying_map_2}, the distance between $f_t$ and $f_t^*$ can be {upper-bounded} as \begin{equation}
\label{distance_at_time_t}
\begin{aligned}
d \big ( f_t, f_t^* \big )
& = d \big ( \widetilde{\mathcal{T}}_{t - 1} \circ \cdots \circ \widetilde{\mathcal{T}}_0 (f_0), f_t^* \big )\\
& \overset{(a)}{=} d \big ( \overline{\mathcal{T}}_{t - 1} \big ( \widetilde{\mathcal{T}}_{t - 2} \circ \cdots \circ \widetilde{\mathcal{T}}_0 (f_0) \big ) + w_{t - 1}, f_t^* \big ) \\
& = \big \| \overline{\mathcal{T}}_{t - 1} \big ( \widetilde{\mathcal{T}}_{t - 2} \circ \cdots \circ \widetilde{\mathcal{T}}_0 (f_0) \big ) + w_{t - 1} - f_t^* \big \| \\
& \overset{(b)}{\leq} d \big ( \overline{\mathcal{T}}_{t - 1} \big ( \widetilde{\mathcal{T}}_{t - 2} \circ \cdots \circ \widetilde{\mathcal{T}}_0 (f_0) \big ), f_t^* \big ) + \| w_{t - 1} \| \\
& \overset{(c)}{\leq} d \big ( \overline{\mathcal{T}}_{t - 1} \big ( \widetilde{\mathcal{T}}_{t - 2} \circ \cdots \circ \widetilde{\mathcal{T}}_0 (f_0) \big ), f_{t - 1}^* \big ) + d \big ( f_{t - 1}^*, f_t^* \big ) + \| w_{t - 1} \| \\
& \overset{(d)}{\leq} B_{t - 1} \cdot d \big ( \widetilde{\mathcal{T}}_{t - 2} \circ \cdots \circ \widetilde{\mathcal{T}}_0 (f_0), f_{t - 1}^* \big ) + \epsilon_f + \epsilon_w,
\end{aligned} \end{equation} where {$\circ$ denotes the composition of linear operators,} the definition of {the} {mapping} $\widetilde{\mathcal{T}}_{t - 1}$ in \eqref{time_varying_map_2} is used in $(a)$, inequalities $(b)$ and $(c)$ are true by {the} triangular inequality, {and} $(d)$ {follows from Assumptions \ref{assump:consecutive_fixed_point} and \ref{assump:noise}} in addition to the probabilistic contraction-expansion {property of the} operator $\overline{\mathcal{T}}_{t - 1}$
and the fact that $\overline{\mathcal{T}}_{t- 1}(f_{t - 1}^*) = f_{t - 1}^*$. Furthermore, the independent random variables $B_t$ for $t \geq 0$ used in $(d)$ have the distribution {as specified in \eqref{eq:B_t}.}
Taking similar steps as in \eqref{distance_at_time_t}, we have \begin{equation}
\label{distance_at_time_t_2}
\begin{aligned}
d \big ( f_t, f_t^* \big )
& \leq B_{t - 1} \cdot \left ( B_{t - 2} \cdot d \big ( \widetilde{\mathcal{T}}_{t - 3} \circ \cdots \circ \widetilde{\mathcal{T}}_0 (f_0), f_{t - 2}^* \big ) + \epsilon_f + \epsilon_w \right ) + \epsilon_f + \epsilon_w \\
& \leq B_{t - 1} \cdot \Big ( B_{t - 2} \cdot \Big ( B_{t - 3} \cdot d \big ( \widetilde{\mathcal{T}}_{t - 4} \circ \cdots \circ \widetilde{\mathcal{T}}_0 (f_0), f_{t - 3}^* \big ) + \epsilon_f + \epsilon_w \Big ) + \epsilon_f + \epsilon_w \Big ) + \epsilon_f + \epsilon_w \\
& \leq \left ( \prod_{i = 0}^{t - 1} B_i \right ) \cdot d \big ( f_0, f_0^* \big ) + \left ( 1 + \sum_{i = 1}^{t - 1} \prod_{j = 1}^{t - i} B_j \right ) \cdot ( \epsilon_f + \epsilon_w ) \\
& \leq P_t \cdot d \big ( f_0, f_0^* \big ) + S_t \cdot ( \epsilon_f + \epsilon_w ),
\end{aligned} \end{equation} where $P_t = \left ( \prod_{i = 0}^{t - 1} B_i \right )$ and $S_t = \left ( 1 + \sum_{i = 1}^{t - 1} \prod_{j = 1}^{t - i} B_j \right )$ are random variables whose means and variances {will be} calculated below. Using the independence of random variables $B_t$ for $t \geq 0$, we have \begin{equation}
\label{mean_P_t}
\begin{aligned}
\mathbb{E} \left [ P_t \right ]
& = \mathbb{E} \left [ \prod_{i = 0}^{t - 1} B_i \right ]
= \prod_{i = 0}^{t - 1} \mathbb{E} \left [ B_i \right ]
= \prod_{i = 0}^{t - 1} \big ( q_t \cdot p_t + Q_t \cdot (1 - p_t) \big ) \leq \left ( \sup_t \big ( q_t \cdot p_t + Q_t \cdot (1 - p_t) \big ) \right )^t
\end{aligned} \end{equation} and \begin{equation}
\label{variance_P_t}
\begin{aligned}
{\rm Var}\,(P_t)
&= \mathbb{E} \left [ P_t^2 \right ] - \left ( \mathbb{E} \left [ P_t \right ] \right )^2 \\
& = \mathbb{E} \left [ \prod_{i = 0}^{t - 1} B_i^2 \right ] - \prod_{i = 0}^{t - 1} \big ( q_t \cdot p_t + Q_t \cdot (1 - p_t) \big )^2 \\
& \leq \prod_{i = 0}^{t - 1} \big ( q_t^2 \cdot p_t + Q_t^2 \cdot (1 - p_t) \big ) \\
& \leq \left ( \sup_t \big ( q_t^2 \cdot p_t + Q_t^2 \cdot (1 - p_t) \big ) \right )^t .
\end{aligned} \end{equation} Note that it is already shown in \eqref{eq:implication_of_assumption} that $q_t^2 \cdot p_t + Q_t^2 \cdot (1 - p_t) < 1$ {implies} $q_t \cdot p_t + Q_t \cdot (1 - p_t) < 1$, {and therefore it suffices to assume} that $\sup_t \big ( q_t^2 \cdot p_t + Q_t^2 \cdot (1 - p_t) \big ) < 1$. Furthermore, \begin{equation}
\label{mean_S_t}
\begin{aligned}
\mathbb{E} \left [ S_t \right ] &= \mathbb{E} \left [ 1 + \sum_{i = 1}^{t - 1} \prod_{j = 1}^{t - i} B_j \right ] \\
& = 1 + \sum_{i = 1}^{t - 1} \prod_{j = 1}^{t - i} \mathbb{E} \left [ B_j \right ]\\
&= 1 + \sum_{i = 1}^{t - 1} \prod_{j = 1}^{t - i} \big ( q_j \cdot p_j + Q_j \cdot (1 - p_j) \big ) \\
& \leq 1 + \sum_{i = 1}^{t - 1} \left ( \sup_j \big ( q_j \cdot p_j + Q_j \cdot (1 - p_j) \big ) \right )^{t - i} \\
& \leq \frac{1}{1 - \sup_j \big ( q_j \cdot p_j + Q_j \cdot (1 - p_j) \big )}
\end{aligned} \end{equation} and \begin{equation}
\label{variance_S_t}
\begin{aligned}
& \hspace{4.5mm} {\rm Var}\,(S_t)
= {\rm Var}\, \left (1 + \sum_{i = 1}^{t - 1} \prod_{j = 1}^{t - i} B_j \right )
= {\rm Var}\, \left ( \sum_{i = 1}^{t - 1} \prod_{j = 1}^{t - i} B_j \right ) \leq \mathbb{E} \left [ \left ( \sum_{i = 1}^{t - 1} \prod_{j = 1}^{t - i} B_j \right )^2 \right ].
\end{aligned} \end{equation} Consider the sequence of independent and identically distributed random variables $\bar{B}_t$ for $t \in \{1, 2, \dots\}$ that have the distribution \begin{equation}
\bar{B}_t =
\begin{cases}
\bar{q} & \text{w.p. } \bar{p}\\
\bar{Q} & \text{otherwise}
\end{cases} \end{equation}
{such that} $\mathbb{E} [\bar{B}_t] \geq \sup_{i \geq 1} \mathbb{E}[B_i]$ and $\mathbb{E} [\bar{B}_t^2] \geq \sup_{i \geq 1} \mathbb{E}[B_i^2]$. Proceeding with \eqref{variance_S_t}, {one can write} \begin{equation} \label{eq:var_S}
\begin{aligned}
& \hspace{4.5mm} {\rm Var}\,(S_t)
\leq \mathbb{E} \left [ \left ( \sum_{i = 1}^{t - 1} \prod_{j = 1}^{t - i} B_j \right )^2 \right ]
\leq \mathbb{E} \left [ \left ( \sum_{i = 1}^{t - 1} \prod_{j = 1}^{t - i} \bar{B}_j \right )^2 \right ] \leq \mathbb{E} \left [ \left ( \sum_{i = 1}^{\infty} \prod_{j = 1}^{i} \bar{B}_j \right )^2 \right ] = \mathbb{E} \left [ \bar{S}^2 \right ],
\end{aligned} \end{equation} where $\bar{S} = \sum_{i = 1}^{\infty} \prod_{j = 1}^{i} \bar{B}_j$. We have $\mathbb{E} [\bar{S}] = \frac{\bar{q} \cdot \bar{p} + \bar{Q} \cdot (1 - \bar{p})}{1 - \bar{q} \cdot \bar{p} - \bar{Q} \cdot (1 - \bar{p})}$ and $\bar{S} = \bar{B}_1 \cdot (1 + \bar{B}_2 + \bar{B}_2 \cdot \bar{B}_3 + \cdots) = \bar{B}_1 \cdot (1 + \tilde{S})$, where $\tilde{S}$ is independent of $B_1$, and {the} random variables $\bar{S}$ and $\tilde{S}$ are identically distributed but not independent of each other. Taking expectation on both sides of $\bar{S}^2 = \bar{B}_1^2 \cdot (1 + \tilde{S})^2$, and using the independence of $\tilde{S}$ and $B_1$ and the fact that $\mathbb{E}[\bar{S}^2] = \mathbb{E}[\tilde{S}^2]$, we have \begin{equation} \label{eq:second_moment_S}
\begin{aligned}
& \mathbb{E}[\bar{S}^2] = \mathbb{E}[\bar{B}_1^2] \cdot \mathbb{E}[1 + 2\tilde{S} + \tilde{S}^2] = \big (\bar{q}^2 \cdot \bar{p} + \bar{Q}^2 \cdot (1 - \bar{p}) \big ) \times \left (1 + \frac{2 \big ( \bar{q} \cdot \bar{p} + \bar{Q} \cdot (1 - \bar{p}) \big )}{1 - \bar{q} \cdot \bar{p} - \bar{Q} \cdot (1 - \bar{p})} + \mathbb{E}[\tilde{S}^2] \right ) \\
\Longrightarrow\ & \mathbb{E}[\bar{S}^2] = \frac{\big ( \bar{q}^2 \cdot \bar{p} + \bar{Q}^2 \cdot (1 - \bar{p}) \big ) \cdot \big ( 1 + \bar{q} \cdot \bar{p} + \bar{Q} \cdot (1 - \bar{p}) \big )}{\big ( 1 - \bar{q}^2 \cdot \bar{p} - \bar{Q}^2 \cdot (1 - \bar{p}) \big ) \cdot \big ( 1 - \bar{q} \cdot \bar{p} - \bar{Q} \cdot (1 - \bar{p}) \big )} .
\end{aligned} \end{equation} Putting \eqref{eq:var_S} and \eqref{eq:second_moment_S} together, {it can be concluded that} ${\rm Var}\,(S_t) \leq \frac{\big ( \bar{q}^2 \cdot \bar{p} + \bar{Q}^2 \cdot (1 - \bar{p}) \big ) \cdot \big ( 1 + \bar{q} \cdot \bar{p} + \bar{Q} \cdot (1 - \bar{p}) \big )}{\big ( 1 - \bar{q}^2 \cdot \bar{p} - \bar{Q}^2 \cdot (1 - \bar{p}) \big ) \cdot \big ( 1 - \bar{q} \cdot \bar{p} - \bar{Q} \cdot (1 - \bar{p}) \big )}$, {which completes} the proof.
\end{proof}
{In the absence of the adversary, the probabilistic contraction-expansion mapping $\overline{\mathcal{T}}_t$ is purely a contraction with the rate $q_t$. We obtain the following corollary as a direct consequence of Theorem \ref{thm:contraction-expansion-noise}. \begin{corollary} \label{cor:thm:time-varying_operator} Consider {arbitrary} time-varying contraction mappings $\overline{\mathcal{T}}_t$ with {the} contraction constants $q_t$ and fixed points $f_t^*$. {Suppose that $q = \sup_t q_t < 1$ and that Assumption \ref{assump:consecutive_fixed_point} holds.} {{Let} the time-varying function $f_t$ evolve over time according to
\eqref{time_varying_map_2}.} For $\epsilon>0$, we define the hitting time as $T(\epsilon) = \min \left \{ T : d \big ( f_t, f_t^* \big ) < \epsilon, {\ \forall t \geq T} \right \}$. {If $\epsilon \in (\frac{1}{1 - q} \cdot (\epsilon_f + \epsilon_w), \frac{1}{1 - q} \cdot (\epsilon_f + \epsilon_w) + D]$, then}
\begin{equation}
T(\epsilon) \leq 1 + {\ln \left ( {\left ( \epsilon - \frac{1}{1 - q} \cdot (\epsilon_f + \epsilon_w) \right )}\Big/{D} \right )}\bigg/{\ln(q)}, \end{equation}
where $\epsilon_w$ is an upper bound on the norm of each noise function and $D>0$ is an upper bound {on $d \big ( f_0^*, f_0 \big )$.} \end{corollary} \begin{proof} When the time-varying mappings $\{\mathcal{T}_t\}$ are only contraction mappings, the random variable $B_t$ is equal to $q_t$ with probability $1$ in \eqref{eq:B_t}. As a result, Equation \eqref{distance_at_time_t_2} has the following form: \begin{equation}\label{distance_at_time_t_1} d \big ( f_t, f_t^* \big )\leq q^t \cdot d \big ( f_0, f_0^* \big ) + \frac{1}{1 - q} \cdot (\epsilon_f + \epsilon_w), \end{equation} where we use $q = \sup_t q_t$. {Since} the right-hand side of \eqref{distance_at_time_t_1} is
{decreasing in $t$, the hitting time $T(\epsilon)$ is upper-bounded by the minimum value of $t$ that satisfies $q^t \cdot d \big ( f_0, f_0^* \big ) + \frac{1}{1 - q} \cdot (\epsilon_f + \epsilon_w) \leq \epsilon$.}
The proof is completed by noticing that $d \big ( f_0^*, f_0 \big )$ is upper-bounded by a constant $D > 0$ and $\frac{1}{1 - q} \cdot (\epsilon_f + \epsilon_w) \leq \epsilon \leq \frac{1}{1 - q} \cdot (\epsilon_f + \epsilon_w) + D$. \end{proof} {Corollary \ref{cor:thm:time-varying_operator} formalizes how many iterations are required in the value iteration with additive noise and a time-varying contraction operator -- that can be caused by a time-varying environment -- to guarantee that the ultimate function value is in an $\epsilon$-neighborhood of the fixed point.} }
{ \begin{remark} Tighter bounds on the hitting time for Theorems \ref{thm:probabilistic_banach} and \ref{thm:contraction-expansion-noise} may be obtained by applying concentration inequalities involving higher moments instead of Chebyshev's inequality. However, since our bounds already have logarithmic dependence on the relevant parameters $p$, $Q$, $L$, $\epsilon$, and $d(f_1,f_0)$, they are sufficient for most practical purposes as long as those parameters do not scale exponentially with the problem size.
\end{remark} }
\subsection{Optimization of Time-Varying Functions with Additive Noise} \label{section_optimization_TV_continuous_noise}
Consider the unknown time-varying continuous function $f_t: \mathcal{D} \rightarrow \mathcal{R}$ with {the known} bounded Lipschitz constant $K_t$, {over the discrete-time horizon $t \in \{1, 2, \dots\}$}, where $\mathcal{D} \subset \mathbb{R}^d$ is a compact set and $\mathcal{R} \subset \mathbb{R}$. The goal is to $\epsilon$-optimize the unknown time-varying function $f_t$, i.e., to find {a} possibly time-varying point $\widehat{x}_t^*$ such that {$| f_t(\widehat{x}_t^*) - f_t(x_t^*) | \leq \epsilon$} for $\epsilon > 0$, where $x_t^* = \argmin_{x \in \mathcal{D}} f_t(x)$.
{Although the function $f_t$ is unknown, inquiries of the function values at given input points can be made in consecutive rounds, which are evaluated with added noise.}
{More precisely, at round $t \in \{1, 2, \dots\}$, we consider querying the function $f_t$ on the set of input points $\mathcal{P} = \{x_1, \dots, x_n \} \subset \mathcal{D}$,
and the revealed values are} \begin{equation} \label{eq_noise}
\widetilde{f}_t(x_i) = f_t(x_i) + N_t(x_i), \end{equation} where $N_t(x_i)$ {is some noise satisfying the following assumption. \begin{assumption}\label{assump:noise_N} The noise parameters $N_t(x_i)$ are bounded i.i.d. random variables with zero mean, i.e., $\mathbb{E}[N_t(x_i)] = 0$, for which there exists $L_N >0$ such that $\left[\sup\{N_t(x_i)\}-\inf\{N_t(x_i)\}\right]<L_N$ for all $t\in\{1, 2, \dots\}$ and $x_i \in \mathcal{P}$. \end{assumption} }
If the noise is disruptive enough, a single set of observed noisy function values $f_t(x_i)$ for all $x_i \in \mathcal{P}$ may not represent the unknown target function accurately, making it impossible to $\epsilon$-optimize the function {with a few number of observations}. Furthermore, {since} the function {changes} over time, old observations may not be useful in $\epsilon$-optimizing the time-varying function {as $t$ increases}. Putting these two facts into perspective, the estimate of the target function $f_t$ at round $t - 1$, {namely} $\widehat{f}_{t - 1}$, {may need to be} updated with the new observation at round $t$,
while discarding inaccurate old observations. {We propose the following formula for estimating $f_t$:} \begin{equation} \label{eq_update} \begin{aligned}
\widehat{f}_t(x_i) = & \frac{\min\{t, T + 1\} - 1}{\min\{t, T\}} \cdot \widehat{f}_{t - 1}(x_i) + \frac{1}{\min\{t, T\}} \cdot \widetilde{f}_t(x_i) - \frac{1}{T} \cdot \widetilde{f}_{t - T}(x_i) \cdot \mathbbm{1}\{t > T\}, \end{aligned} \end{equation} where $\mathbbm{1}\{\cdot\}$ is the indicator function. The parameter $T$, whose value to be specified, {should be chosen such that old data is discarded} due to the time-varying nature of the function while not harming accurate estimation of the function value in the presence of noise. The computational cost of \eqref{eq_update} is {on} the same order of that of {the} moving average update in reinforcement learning, but in \eqref{eq_update} there is a need {for} storing the previous $T$ observations in order to have access to $\widetilde{f}_{t - T}(x_i)$.
The {estimation} function $\widehat{f}_t(x_i)$ changes over time and may not represent the target function {for small values of $t$}. However, there {may exist a hitting time $T$ that is used in \eqref{eq_update} after which} optimizing the estimated function $\widehat{f}_t$ $\epsilon$-optimizes the target function $f_t$ with an associated confidence level $1 - a$, where $0 < a \leq 1$. As a result, the complexity of $\epsilon$-optimizing the unknown time-varying target function $f_t$ {in long-run} is irrelevant to the complexity of optimizing function $\widehat{f}_t$ up to the hitting time $T$. Consequently, the hitting time $T$ as well as the optimization complexity of $\widehat{f}_t$ for $t \geq T$ captures the difficulty of $\epsilon$-optimizing the target function $f_t$ rather than the cumulative optimization complexities of functions $\widehat{f}_t$ for $t < T$. Formally speaking, the hitting time $T(\epsilon, a)$ is defined below.
\begin{definition} \label{definition_hitting_time_continuous_noise} {Given $\epsilon > 0$ and $a \in (0, 1]$,} the hitting time $T(\epsilon, a)$ is defined as \begin{equation} \label{hitting_time} \begin{aligned}
T(\epsilon, a) = \min \Big \{ T : \mathbb{P} \big ( {\big | f_t(\widehat{x}_t^*) - f_t(x_t^*) \big | \leq \epsilon }\big ) \geq 1 - a, {\ \forall t \geq T } \Big \} , \end{aligned} \end{equation} where $\widehat{x}_t^* = \argmin_{x \in \mathcal{P}} \widehat{f}_t(x)$ and $x_t^* = \argmin_{x \in \mathcal{D}} f_t(x)$. \end{definition}
{To make the time-varying problem amenable to optimization, we also make the following assumption about the set of input points $\mathcal{P}$. \begin{assumption}\label{assump:granularity} For a given $\epsilon >0$, the set of input points $\mathcal{P}=\{x_1, x_2, \dots, x_n\}$ is a $\delta$-uniform grid of the function domain $\mathcal{D}$ such that $\delta < \frac{2\epsilon}{7\sqrt{d}K}$, where $K = \sup_{t \geq 1} K_t$ with $K_t$ being the Lipschitz constant of function $f_t$. \end{assumption} Recall that being a $\delta$-uniform grid means that $\mathcal{P}$ satisfies two properties: (i) ${\{x_i + \delta e_j, x_i - \delta e_j\}} \cap \mathcal{D} \in \mathcal{P}$ for all $i \in {\{1, \dots, n\}}$ and
$j \in {\{1, \dots, d\}}$, where $e_1, \dots, e_d$ are the standard basis of $\mathbb{R}^d$, and {(ii)} for {every} $x \in \mathcal{D}$ there exists $x_i \in \mathcal{P}$ such that $\|x_i - x\| \leq \sqrt{d} \delta/2$. }
{ The fine granularity assumption, i.e., $\delta < \frac{2\epsilon}{7\sqrt{d}K}$, assures that there exists a grid point whose unknown function value at time $t$ is at least $\frac{\epsilon}{7}$ close to the minimum of function $f_t$.} Denote such points of the grid $\mathcal{P}$ by $\mathcal{N}_t(\frac{\epsilon}{7}) = \{x_i \in \mathcal{P}: f_t(x_i) - f_t(x_t^*) \leq \frac{\epsilon}{7} \}$ and let $\overline{\mathcal{N}}_t(\epsilon) = \{x_i \in \mathcal{P}: f_t(x_i) - f_t(x_t^*) > \epsilon \}$.
{Without loss of generality, we assume that $\overline{\mathcal{N}}_t(\epsilon) \neq \emptyset$;} otherwise, any point in $\mathcal{P}$ $\epsilon$-optimizes function $f_t$.
The following theorem presents an upper bound on {the} hitting time.
\begin{theorem} \label{theorem_hitting_time} Consider the unknown time-varying function $f_t$ with the property {
${| f_t(x) - f_{t - 1}(x) |} \leq \frac{\epsilon^3}{43 L_N^2 \cdot \ln(\frac{n}{a})}$, for all $t \geq 1$ and $ x \in \mathcal{D}$.
Given $\epsilon > 0$ and $a \in (0, 1]$, let Assumptions \ref{assump:noise_N} and \ref{assump:granularity} hold. Then, the hitting time $T(\epsilon, a)$ satisfies the inequality}
\begin{equation}\label{eq:thm_hitting_time}
T(\epsilon, a) \leq \frac{49L_N^2}{8 \epsilon^2} \cdot \ln \left ( \frac{n}{a} \right ) + 1. \end{equation}
\end{theorem}
\begin{proof} In order to find an upper bound on the hitting time $T(\epsilon, a)$,
{it is reasonable to assume that the function {variation over time} is {upper-bounded}; otherwise, there may not be enough time for learning the rapidly changing functions $\{f_t\}$.} {Assume that the time-variation} of the unknown time-varying target function $f_t$ is {upper-bounded} by \begin{equation} \label{time_varying_constraint}
{| f_t(x) - f_{t - 1}(x) |} \leq \frac{\epsilon}{7T}, \quad \forall t \geq 1, \forall x \in \mathcal{D}. \end{equation} Then, {under Assumption \ref{assump:granularity}}, the hitting event {defined} in \eqref{hitting_time} satisfies the following condition \begin{equation} \label{eq_subset}
\begin{aligned}
& \Bigg \{ \exists x_i \in \mathcal{N}_t(\frac{\epsilon}{7}) \text{ such that } \frac{1}{T} \cdot \hspace{-0.3cm}\sum_{s = t - T + 1}^t N_s(x_i) \leq \frac{2\epsilon}{7} \textbf{ and }
\frac{1}{T} \cdot \hspace{-0.3cm} \sum_{s = t - T + 1}^t N_s(x_i) \geq -\frac{2\epsilon}{7}, \forall x_i \in \overline{\mathcal{N}}_t(\epsilon) \Bigg \} \\
\subseteq & \left \{ {\big| f_t(\widehat{x}_t^*) - f_t(x_t^*) \big |} \leq \epsilon \right \}, \quad \forall t \geq T.
\end{aligned} \end{equation} The above equation {holds} true because \eqref{eq_noise} and \eqref{eq_update} result in $\widehat{f}_t(x_i) = \frac{1}{T} \cdot \sum_{s = t - T + 1}^t f_s(x_i) + \frac{1}{T} \cdot \sum_{s = t - T + 1}^t N_s(x_i)$ for $t \geq T$, and by \eqref{time_varying_constraint}, {one can write} \begin{equation} \label{value_difference} \begin{aligned}
& \widehat{f}_t(x_i) \leq f_t(x_i) + \frac{\epsilon}{7} + \frac{1}{T} \cdot \sum_{s = t - T + 1}^t N_s(x_i), \quad \forall x_i \in \mathcal{N}_t(\frac{\epsilon}{7}), \\
& \widehat{f}_t(\overline{x}_j) \geq f_t(\overline{x}_j) - \frac{\epsilon}{7} + \frac{1}{T} \cdot \sum_{s = t - T + 1}^t N_s(\overline{x}_j), \quad \forall \overline{x}_j \in \overline{\mathcal{N}}_t(\epsilon). \end{aligned} \end{equation} Furthermore, $f_t(\overline{x}_j) - f_t(x_i) > \frac{6\epsilon}{7}$ for all $\overline{x}_j \in \overline{\mathcal{N}}_t(\epsilon)$ and
$x_i \in \mathcal{N}_t(\frac{\epsilon}{7})$. Taking the difference of the two inequalities in \eqref{value_difference} yields that $\widehat{f}_t(\overline{x}_j) - \widehat{f}_t(x_i) > \frac{4\epsilon}{7} + \sum_{s = t - T + 1}^t N_s(\overline{x}_j) - \sum_{s = t - T + 1}^t N_s(x_i)$. If the event on the left-hand side of \eqref{eq_subset} is true, {then} $\widehat{f}_t(\overline{x}_j) - \widehat{f}_t(x_i) > 0$, which means {that} there exists $\widetilde{x}_t^* \in \mathcal{N}_t(\frac{\epsilon}{7})$ whose estimated function value is less than the estimated function value {at all points} $\overline{x}_j \in \overline{\mathcal{N}}_t(\epsilon)$. Note that the estimated function value {at} a point $\overline{x}_t^* \in \mathcal{P} \setminus \left ( \mathcal{N}_t(\frac{\epsilon}{7}) \cup \overline{\mathcal{N}}_t(\epsilon) \right )$ can be less than $\widehat{f}_t(\widetilde{x}_t^*)$, but such a point also $\epsilon$-optimizes the function $f_t$. Hence, $\widehat{x}_t^* = \argmin_{x \in \mathcal{P}} \widehat{f}_t(x)$ $\epsilon$-optimizes the function $f_t$, which means {that} the event on right-hand side of \eqref{eq_subset} is true.
{Denote the event on the left-hand side of \eqref{eq_subset} as $E_t$, whose probability can be lower-bounded as }
\begin{equation} \label{eq_left_hand_side}
{
\begin{aligned}
\mathbb{P}\{E_t\}
&{\overset{(a)}{\geq}}
\mathbb{P} \left \{ \frac{1}{T} \cdot \sum_{s = t - T + 1}^t N_s(x_i) \leq \frac{2\epsilon}{7}, x_i \in \mathcal{N}_t(\frac{\epsilon}{7}) \right \} \times \prod_{x_i \in \overline{\mathcal{N}}_t(\epsilon)} \mathbb{P} \left \{ \frac{1}{T} \cdot \sum_{s = t - T + 1}^t N_s(x_i) \geq -\frac{2\epsilon}{7} \right \} \\
&{\overset{(b)}{\geq}} \ \prod_{x_i \in \mathcal{P}} \left ( 1 - \exp \left ( - \frac{8 T \epsilon^2}{49L_N^2} \right ) \right ) \\
&{>} \ 1 - n \cdot \exp \left ( - \frac{8 T \epsilon^2}{49L_N^2} \right ),
\end{aligned}
} \end{equation} where $(a)$ is true as the added noise signals are independent of each other and $(b)$ {follows} from Hoeffding's inequality and possibly multiplying by positive terms that are less than one. Putting \eqref{eq_subset} and \eqref{eq_left_hand_side} together, we have \begin{equation} \label{eq_lower_bound_prob}
\begin{aligned}
& \mathbb{P} \left \{ {\big | f_t(\widehat{x}_t^*) - f_t(x_t^*) \big |} \leq \epsilon \right \} \geq 1 - n \cdot \exp \left ( - \frac{8 T \epsilon^2}{49L_N^2} \right ) , \forall t \geq T.
\end{aligned} \end{equation} If $1 - n \cdot \exp \left ( - \frac{8 T \epsilon^2}{49L_N^2} \right ) \geq 1 - a$ or equivalently $T \geq \frac{49L_N^2}{8\epsilon^2} \cdot \ln \left ( \frac{n}{a} \right ) $, we have \begin{equation} \label{eq_lower_bound_prob_2}
\begin{aligned}
\mathbb{P} \left \{ {\big| f_t(\widehat{x}_t^*) - f_t(x_t^*) \big |} \leq \epsilon \right \} \geq 1 - a, \quad \forall t \geq T.
\end{aligned} \end{equation} As a result, an upper bound on {the} hitting time $T(\epsilon, a)$ defined in \eqref{hitting_time} is provided as \begin{equation}
T(\epsilon, a) \leq \frac{49L_N^2}{8 \epsilon^2} \cdot \ln \left ( \frac{n}{a} \right ) + 1. \end{equation}
{We substitute the upper bound on $T(\epsilon,a)$ into \eqref{time_varying_constraint}. It follows that the above analysis is valid if} \begin{equation}
{| f_t(x) - f_{t - 1}(x) |} \leq \frac{8 \epsilon^3}{343 L_N^2 \cdot \ln(\frac{n}{a})}, \quad \forall t \geq 1, \forall x \in \mathcal{D}. \end{equation}
{This completes the proof.}
\end{proof}
\begin{remark}
Note that the cardinality of the $\delta$-grid with $\delta < \frac{2\epsilon}{7\sqrt{d}K}$ used in Theorem \ref{theorem_hitting_time}, {namely} $n = |\mathcal{P}|$, depends on $\epsilon$. As an example, if $\mathcal{D}$ can be written as the Cartesian product of $d$ intervals {of} length at most $M$ as $\mathcal{D} = \mathcal{D}_1 \times \mathcal{D}_2 \times \dots \times \mathcal{D}_d$, {then} the cardinality of the $\delta$-grid would be $n = \mathcal{O} \left ( \left (\frac{\sqrt{d}KM}{\epsilon}\right )^d \right )$, {and therefore} the upper bound {on} the hitting time in Theorem \ref{theorem_hitting_time} is given by $T(\epsilon, a) \leq \mathcal{O} \left ( \frac{dL_N^2}{\epsilon^2} \cdot \ln \left ( \frac{\sqrt{d}KM}{\sqrt[d]{a} \epsilon} \right ) \right )$. \end{remark}
{Theorem \ref{theorem_hitting_time} determines how fast the unknown function $f_t$ is allowed to change over time such that one can still learn the estimation function $\widehat{f}_t$ which is used to $\epsilon$-optimize the target function $f_t$ with a confidence level. The parameter $T$ in \eqref{eq_update} can be set to the upper bound provided in Theorem \ref{theorem_hitting_time} so that old inaccurate observations are discarded and at the same time enough observations are used for an accurate estimation of $f_t$. }
\subsection{Improved Bounds for Convex Functions} \label{sec:improved_bounds_cts} Consider the same framework as in Section \ref{section_optimization_TV_continuous_noise} {under additional assumptions to be stated here}. Let $f_t$ be a convex function for all $t \geq 1$. Denote the lower contour set of the convex function $f_t$ by $C_t(c) = \{x \in \mathcal{D}: f_t(x) - f_t(x_t^*) \leq c\}$ and the level set of the convex function $f_t$ by $L_t(c) = \{x \in \mathcal{D}: f_t(x) - f_t(x_t^*) = c\}$ for $c > 0$. Define $\overline{C}_t(c_1, c_2) = \{x \in \mathcal{D}: c_1 < f_t(x) - f_t(x_t^*) \leq c_2\}$ when $c_2 > c_1$.
{Let} $\mathcal{M}_t(c) = \{ x_i \in \mathcal{P}: x_i \in C_t(c) \}$ and $\overline{\mathcal{M}}_t(c_1, c_2) = \{ x_i \in \mathcal{P}: x_i \in \overline{C}_t(c_1, c_2) \}$.
{ \begin{assumption}\label{assump:homeomorphic} There exists $M>0$ such that $L_t(M)$ is homeomorphic to a $d$-dimensional sphere and is inside $\mathcal{D}$ for all $t \geq 1$. \end{assumption} } If $d = 1$ or {$d = 2$}, a {sphere} is defined as two distinctive points {or a circle, respectively}. {Note that a lower bound on $M$ can be estimated up to a precision with high probability, but $M$ is assumed to be known to simplify the proof concepts.}
{ \begin{assumption}\label{assump:gradient_lower_bound} There exists $k>0$ such that
$\left \| \nabla f_t(x) \right \| \geq k$, for all $t\geq 1$ and $x \in \mathcal{D} \setminus C_t(\epsilon)$.
\end{assumption}
Intuitively, Assumption \ref{assump:gradient_lower_bound} requires every convex function $f_t$ have enough curvature inside its lower contour set $C_t(\epsilon)$, so that $\| \nabla f_t(x) \|$ can be uniformly lower-bounded by a positive constant $k$ in $\mathcal{D} \setminus C_t(\epsilon)$ for all $t\geq 1$.}
Leveraging the new assumptions on {the} time-varying functions {$\{f_t\}$}, the following theorem presents a tighter upper bound on {the} hitting time compared to Theorem \ref{theorem_hitting_time}.
\begin{theorem} \label{theorem_hitting_time_convex} Consider the unknown time-varying convex function $f_t$ with {the} property
$ {| f_t(x) - f_{t - 1}(x) |} \leq \frac{ \epsilon^3}{43 L_N^2 \cdot \ln(\frac{n}{a})}$,
for all $t \geq 1$ and
$x \in \mathcal{D}$.
{Given $\epsilon > 0$ and $a \in (0, 1]$, suppose that Assumptions \ref{assump:noise_N}-\ref{assump:gradient_lower_bound} hold.
Then, the hitting time $T(\epsilon, a)$ is upper-bounded by the minimum $T$ satisfying the inequality}
\begin{equation} \label{eq:thm:convex}
\sum_{l = 0}^{l_m} n_l \cdot \exp \Big ( - \frac{2 T \big ( l + \frac{2}{7} \big )^2 \epsilon^2}{L_N^2} \Big ) \leq a, \end{equation} where { $\sum_{l = 0}^{l_m} n_l = n$ and $l_m \leq \lfloor \frac{M}{\epsilon} \rfloor - 3$ such that $n_l = \frac{m_l}{1 + m_l} \cdot n + 1$ for $l \in \{0, 1, \dots, l_m - 1 \}$ with $m_l = \frac{2^{d+ 1} \cdot K \cdot \epsilon}{k \cdot \big (M - (l + 4) \epsilon \big )}$}.
\end{theorem}
\begin{proof}
{Following the same logic as in \eqref{eq_subset} and leveraging the convexity of $\{f_t\}$, we obtain that the the hitting event in \eqref{hitting_time} satisfies the condition}
\begin{equation} \label{eq_subset_convex}
\begin{aligned}
& \bigg \{ \exists x_i \in \mathcal{M}_t(\frac{\epsilon}{7}) \text{ such that } \frac{1}{T} \cdot \hspace{-0.3cm}\sum_{s = t - T + 1}^t \hspace{-0.3cm} N_s(x_i) \leq \frac{2\epsilon}{7} \textbf{ and } \frac{1}{T} \cdot \hspace{-0.3cm}\sum_{s = t - T + 1}^t \hspace{-0.3cm} N_s(x_i) \geq -\frac{2\epsilon}{7}, \forall x_i \in \overline{\mathcal{M}}_t \Big (\epsilon, 2 \epsilon \Big ) \textbf{ and } \\
& \ \
\frac{1}{T} \cdot\hspace{-0.3cm} \sum_{s = t - T + 1}^t N_s(x_i) \geq - \left ( l + \frac{2}{7} \right ) \epsilon, \forall x_i \in \overline{\mathcal{M}}_t \Big ( (l + 1) \epsilon, (l + 2)\epsilon\Big ), \forall 1 \leq l \leq \Big \lfloor \frac{M}{\epsilon} \Big \rfloor \bigg \} \\
\subseteq & \left \{ {\big| f_t(\widehat{x}_t^*) - f_t(x_t^*) \big |} \leq \epsilon \right \}, \quad \forall t \geq T.
\end{aligned} \end{equation}
{Denote the event on the left-hand side of \eqref{eq_subset_convex} as $E_t$, whose probability can be lower-bounded as} \begin{equation} \label{eq_left_hand_side_convex}
{{\small\begin{aligned}
\mathbb{P}\{E_t\}
&{\overset{(a)}{\geq}}
\mathbb{P} \left \{ \frac{1}{T} \cdot \sum_{s = t - T + 1}^t N_s(x_i) \leq \frac{2\epsilon}{7}, x_i \in \mathcal{M}_t(\frac{\epsilon}{7}) \right \} \times \prod_{x_i \in \overline{\mathcal{M}}_t \big (\epsilon, 2 \epsilon \big )} \mathbb{P} \left \{ \frac{1}{T} \cdot \sum_{s = t - T + 1}^t N_s(x_i) \geq -\frac{2\epsilon}{7} \right \} \\
&\hspace{0.5cm} \times\prod_{l = 1}^{ \lfloor \frac{M}{\epsilon} \rfloor} \prod_{x_i \in \overline{\mathcal{M}}_t \big ((l + 1) \epsilon, (l + 2) \epsilon \big )} \mathbb{P} \left \{ \frac{1}{T} \cdot \hspace{-2.5mm} \sum_{s = t - T + 1}^t N_s(x_i) \geq - \big (l + \frac{2}{7} \big ) \epsilon \right \} \\
&{\overset{(b)}{\geq}} \left [ 1 - \exp \left ( - \frac{8 T \epsilon^2}{49L_N^2} \right ) \right ]^{\overline{n}_0 + 1} \times \prod_{l = 1}^{l_m} \left [ 1 - \exp \left ( - \frac{2 T \big ( l + \frac{2}{7} \big )^2 \epsilon^2}{L_N^2} \right ) \right ]^{n_l} \\
&{\geq} \ 1 - \sum_{l = 0}^{l_m} n_l \cdot \exp \left ( - \frac{2 T \big ( l + \frac{2}{7} \big )^2 \epsilon^2}{L_N^2} \right )
\end{aligned}
}} \end{equation} where $(a)$ is true as the added noise signals are independent of each other and $(b)$ {follows from} Hoeffding's inequality, $\overline{n}_0$ is an upper bound on the number of grid points in the set $\overline{\mathcal{M}}_t \big (\epsilon, 2 \epsilon \big )$ and $n_0 = \overline{n}_0 + 1$, and $n_l$ is an upper bound on the number of grid points in the set $\overline{\mathcal{M}}_t \big ((l + 1)\epsilon, (l + 2)\epsilon \big )$,
where $l_m$ satisfies $\sum_{l = 0}^{l_m} n_l = n$ and $l_m \leq \lfloor \frac{M}{\epsilon} \rfloor - 3$. Note that the last nonzero $n_l$ is not a free parameter {since the sum of all $n_l$ should be $n$.}
Putting \eqref{eq_subset_convex} and \eqref{eq_left_hand_side_convex} together, we have $\mathbb{P} \left \{ {\big| f_t(\widehat{x}_t^*) - f_t(x_t^*) \big |} \leq \epsilon \right \} \geq 1 - a$ for all $ t \geq T$ {provided that} \begin{equation} \label{eq_lower_bound_prob_convex}
\begin{aligned}
\sum_{l = 0}^{l_m} n_l \cdot \exp \Big ( - \frac{2 T \big ( l + \frac{2}{7} \big )^2 \epsilon^2}{L_N^2} \Big ) \leq a,
\end{aligned} \end{equation}
which provides an upper bound on {the} hitting time $T(\epsilon, a)$ defined in \eqref{hitting_time}. As stated earlier {in \eqref{time_varying_constraint}}, the above analysis is true if $ {| f_t(x) - f_{t - 1}(x) |} \leq \frac{\epsilon}{7 T(\epsilon, a)}$ for all $t \geq 1$ and $x \in \mathcal{D}$. Using the general upper bound on {the} hitting time provided in Theorem~\ref{theorem_hitting_time}, the analysis holds if $ {| f_t(x) - f_{t - 1}(x) |} \leq \frac{ \epsilon^3}{43 L_N^2 \cdot \ln(\frac{n}{a})}$ for all $t \geq 1$ and $x \in \mathcal{D}$.
In the rest of the proof, the values of $n_l$ for $0 \leq l \leq l_m$ are computed. The key idea{s} behind finding these upper bounds {are} that the level sets $\overline{L}_t \big ( (l + 1 ) \epsilon \big )$ for $0 \leq l \leq l_m + 2$ are nested {surfaces that are homeomorphic to a $d$-dimensional sphere} inside the function domain and {that} the minimum distance between any point of {a} level set from any of the other level set is controlled by $K$ and $k$. Let $Vol(\cdot)$ {denote} the volume of an input $d$-dimensional set and $A(\cdot)$ {denote} the area of an input $(d - 1)$-dimensional {surface}. {By convention,} the area of a { {$d$-dimensional} sphere for $d = 1$ and $d = 2$} is equal to {2 and the length of the sphere, respectively}.
{For every $l \in \{0, 1, \dots, l_m\}$, one can write}
\begin{equation}
\begin{aligned}
& n_l - 1 \leq \frac{2^d \cdot Vol\left ( C_t\big ( (l + 1)\epsilon, (l + 3) \epsilon \big ) \right )}{\delta^d}
\leq \frac{2^d \cdot \frac{2\epsilon}{k} \cdot A\left ( P_t \big ( (l + 1)\epsilon, (l + 3) \epsilon \big ) \right )}{\delta^d}, \\
& \sum_{\overline{l} = l + 1}^{l_m} n_{\overline{l}} \geq \frac{Vol \left ( C_t \big ((l + 3) \epsilon, M - \epsilon \big ) \right )}{\delta^d}
\geq \frac{\frac{M - (l + 4) \epsilon}{K} \cdot A\left ( P_t \big ( (l + 3) \epsilon, M - \epsilon \big ) \right )}{\delta^d},
\end{aligned} \end{equation} where {the term} $2^d$ comes from the {facts} that each $d$-dimensional cube {has} at most $2^d$ endpoints and $P_t \big ( (l + 1)\epsilon, (l + 3) \epsilon \big ) \subset C_t\big ( (l + 1)\epsilon, (l + 3) \epsilon \big )$ and $P_t \big ((l + 3) \epsilon, M - \epsilon \big ) \subset C_t \big ((l + 3) \epsilon, M - \epsilon \big )$ are two $(d - 1)$-dimensional planes such that $A \left ( P_t \big ( (l + 1) \epsilon, (l + 3) \epsilon \big ) \right ) \leq A\left ( L_t \big ( (l + 3) \epsilon \big ) \right ) \leq A \left ( P_t \big ((l + 3) \epsilon, M - \epsilon \big ) \right )$. Then, \begin{equation} \begin{aligned}
& \frac{n_l - 1}{n - n_l} \leq \frac{n_l - 1}{\sum_{\overline{l} = l + 1}^{l_m} n_{\overline{l}}} \leq
\frac{2^{d+ 1} \cdot K \cdot \epsilon}{k \cdot \big (M - (l + 4) \epsilon \big )} = m_l
\ \Longrightarrow n_l \leq \frac{m_l}{1 + m_l} \cdot n + 1, \end{aligned} \end{equation}
{which completes the proof.} \end{proof}
\begin{remark}
{We note that, since the left-hand side of \eqref{eq:thm:convex} is monotone decreasing in $T$, a number $T$ satisfying \eqref{eq:thm:convex} always exists. By substituting the bound in \eqref{eq:thm_hitting_time} into \eqref{eq:thm:convex}, it can be verified that Theorem \ref{theorem_hitting_time_convex} provides a better bound than Theorem \ref{theorem_hitting_time} since some properties of convex functions are leveraged.} A comparison of the results {of} Theorems \ref{theorem_hitting_time} and \ref{theorem_hitting_time_convex} {along with the simulation details is} depicted in Figure \ref{figure_TV_epsilon}.
\begin{figure}
\caption{$\epsilon = L = 10^{-4}, n = 3.2 \times 10^{11}$, and $a$ varies.}
\label{figure_TV_a}
\caption{$a = 0.01$ and $\epsilon = L$ varies.}
\label{figure_TV_b}
\caption{A comparison of the upper bounds in Theorems \ref{theorem_hitting_time} and \ref{theorem_hitting_time_convex} when $M = K = 16, k = 2 \times 10^{-2}$, and $d = 2$. In Figure \ref{figure_TV_b}, the value of $n$ {depends} on $\epsilon$, which is taken into account for drawing the plots.}
\label{figure_TV_epsilon}
\end{figure}
\end{remark}
\section{The Hitting Time Analysis for Discrete Functions} \label{sec:discrete}
In this section, two variants of {stochastic time-varying models are studied for discrete functions}. In the first model, an unknown discrete function is observed with additive noise whose {estimation} function changes over time due to the {presence} of noise. In the second model, a time-varying linear model with additive noise is studied.
\subsection{Optimization of Functions with Additive Noise} \label{sec:problem_statement2}
Consider an unknown discrete function $f: \mathcal{X} \rightarrow \mathcal{R}$, where $\mathcal{X} \subset \mathbb{Z}^d$ is a bounded subset of $d$ integer tuples and $\mathcal{R} \subset \mathbb{R}$ is a subset of real numbers ($\mathbb Z$
denotes the set of integer
numbers). Denote the strict local minima and maxima, known collectively as strict local extrema, of the unknown function $f$ by $\mathcal{X}^*$ defined as \begin{equation}
\label{local_extrema_discrete}
\begin{aligned}
\mathcal{X}^* = & \{x^* \in \mathcal{X} : f(x^*) < f(x), \forall x \in \mathcal{B}(x^*) \} \cup \{x^* \in \mathcal{X} : f(x^*) > f(x), \forall x \in \mathcal{B}(x^*) \} \end{aligned} \end{equation} where $\mathcal{B}(x^*) = \cup_{j = 1}^d \{{x^* + h_j, x^* - h_j}\} \cap \mathcal{X}$ with ${h_1, \dots, h_d}$ being the standard basis of $\mathbb{Z}^d$. The goal is to find $\mathcal{X}^*$, the set of strict local extrema of the unknown function $f$. Although the function $f$ is unknown, inquiries of the function values at points in the domain can be made in consecutive rounds, which are evaluated with added noise signals that are mean zero, independent and identically distributed over time and over $\mathcal{X}$. Formally speaking, the revealed values of the target function $f$ at round $t \in \{1, 2, \dots\}$ are \begin{equation} \label{eq_noise2}
f_t(x) = f(x) + N_t(x), \quad \forall x \in \mathcal{X}, \end{equation} where $N_t(x)$ are {noise signals satisfying Assumption \ref{assump:noise_N}.}
Note that if the noise is disruptive enough, a single set of observed noisy function values $f_t(x)$ for all $x \in \mathcal{X}$ may not represent the unknown target function accurately, making it impossible to find local extrema of the function. {To address this issue, we estimate} the target function $f$ at round $t - 1$ {by leveraging} the new observations at round $t \in \{2, 3, \dots\}$ as \begin{equation} \label{eq_update2}
\widehat{f}_t(x) = \frac{t - 1}{t} \cdot \widehat{f}_{t - 1}(x) + \frac{1}{t} \cdot f_t(x), \quad \forall x \in \mathcal{X}. \end{equation} Note that the {estimation} function $\widehat{f}_t(x)$ changes over time and may not represent the shape of the unknown target function $f$ when $t$ is small. However, there {may exist a} hitting time $T$ after which {the {estimation} function $\widehat{f}_t$ shares the same set of local extrema as the target function $f$ with an associated confidence level $1 - a$, where $0 < a \leq 1$.}
As a result, the complexity of finding {the} local extrema of the target function $f$ may be irrelevant to the complexity of finding the local extrema of function $\widehat{f}_t$ before {the} hitting time $T$. Consequently, the complexity of finding the local extrema of the unknown target function $f$ is related to the hitting time $T$ as well as the computational complexity of optimizing function $\widehat{f}_T$. {Denote} the set of strict local extrema of $\widehat{f}_t$ by $\widehat{\mathcal{X}}^*_t$, {defined as} \begin{equation}
\label{estimated_local_extrema_discrete}
\begin{aligned}
\widehat{\mathcal{X}}^*_t = & \left \{\widehat{x}^* \in \mathcal{X} : \widehat{f}_t(\widehat{x}^*) < \widehat{f}_t(x), \forall x \in \mathcal{B}(\widehat{x}^*) \right \} \cup \left \{\widehat{x}^* \in \mathcal{X} : \widehat{f}_t(\widehat{x}^*) > \widehat{f}_t(x), \forall x \in \mathcal{B}(\widehat{x}^*) \right \}.
\end{aligned} \end{equation}
\begin{definition} \label{hitting_time_discrete_added_noise} {Given $a \in (0, 1],$} the hitting time $T(a)$ for an unknown discrete function $f$ is defined as \begin{equation} \label{hitting_time2} \begin{aligned}
T(a) = \min \left \{T: \mathbb{P} \left ( \widehat{\mathcal{X}}^*_t = \mathcal{X}^* \right ) \geq 1 - a, {\ \forall t \geq T } \right \}, \end{aligned} \end{equation} where $\mathcal{X}^*$ and $\widehat{\mathcal{X}}^*_t$ are defined in \eqref{local_extrema_discrete} and \eqref{estimated_local_extrema_discrete}, respectively.
\end{definition}
The hitting time $T(a)$ depends on the minimum distance of the function values of $f$ at point $x \in \mathcal{X}$ from the function values at its neighbor points. {This distance, denoted by} $\delta(x)$, {is defined as} \begin{equation} \label{delta_x_value}
\delta(x) = \min_{x' \in \mathcal{B}(x)} {| f(x) - f(x') |}. \end{equation}
{In order to simply the analysis, we make the following assumption about the target function $f$. \begin{assumption}\label{assump:minimum_distance} The minimum distance $\delta(x)$ of function $f$ is uniformly lower-bounded by a positive number for all $x\in \mathcal{X}$, i.e., $\delta_m = \min_{x \in \mathcal{X}} \delta(x) > 0$. \end{assumption} Intuitively, Assumption \ref{assump:minimum_distance} ensures that function values of $f$ at adjacent points are different, so that their noisy values become distinguishable after enough observations.}
The following theorem presents an upper bound {on the} hitting time $T(a)$.
\begin{theorem} \label{theorem_discrete_upper_bound} Consider the time-varying function {$\widehat{f}_t$} in \eqref{eq_update2}.
{Under Assumptions \ref{assump:noise_N} and \ref{assump:minimum_distance}, given $a\in (0,1]$, the associated hitting time $T(a)$} defined in \eqref{hitting_time2}, {satisfies the inequality} \begin{equation} \label{UB_1}
T(a) \leq \frac{2L_N^2}{\delta_m^2} \cdot \ln \left ( \frac{2|\mathcal{X}|}{a} \right ), \end{equation}
{where $|\mathcal{X}|$ denotes the number of elements in the set $\mathcal{X}$.}
\end{theorem} \vspace*{2mm}
\begin{proof} In order to find an upper bound on the hitting time $T(a)$, note that the hitting event used in \eqref{hitting_time2} satisfies the condition \begin{equation} \label{eq_subset2}
\begin{aligned}
\left \{ \frac{1}{T} \cdot \Big \| \sum_{t = 1}^{T} N_t(x) \Big \| < \frac{\delta(x)}{2}, \ \forall x \in \mathcal{X} \right \}
\subseteq \left \{ \widehat{\mathcal{X}}^*_T = \mathcal{X}^* \right \}.
\end{aligned} \end{equation} The above equation holds because \eqref{eq_noise2} and \eqref{eq_update2} result in $\widehat{f}_T(x) = f(x) + \frac{1}{T} \cdot \sum_{t = 1}^{T} N_t(x)$, and if the magnitude of the {noise added} to the true value of function $f$ at point $x$ is less than {$\delta(x)/2$} for all $x \in \mathcal{X}$, {then} the set of local extrema of the function $\widehat{f}_T$ coincides with {the} set $\mathcal{X}^*$, the local extrema of function $f$. The probability of the event on the left-hand side of \eqref{eq_subset2} can be {lower-bounded} as \begin{equation} \label{eq_left_hand_side2}
\begin{aligned}
\mathbb{P} \left \{ \frac{1}{T} \cdot \Big \| \sum_{t = 1}^{T} N_t(x) \Big \| < \frac{\delta(x)}{2}, \ \forall x \in \mathcal{X} \right \}
\overset{(a)}{=} & \ \prod_{i = 1}^{|\mathcal{X}|} \mathbb{P} \left \{ \frac{1}{T} \cdot \Big \| \sum_{t = 1}^{T} N_t(x) \Big \| < \frac{\delta(x)}{2} \right \} \\
\overset{(b)}{\geq} & \ \prod_{i = 1}^{|\mathcal{X}|} \left ( 1 - 2 \exp \left ( - \frac{T \delta(x)^2}{2L_N^2} \right ) \right ) \\
>& 1 - 2 \sum_{i = 1}^{|\mathcal{X}|} \exp \left ( - \frac{T \delta(x)^2}{2L_N^2} \right ) \\
\geq & \ 1 - 2 |\mathcal{X}| \cdot \exp \left ( - \frac{T \delta_m^2}{2L_N^2} \right ),
\end{aligned} \end{equation} where $(a)$ holds because the added noise signals are independent from each other and $(b)$ follows from Hoeffding's inequality. Putting \eqref{eq_subset2} and \eqref{eq_left_hand_side2} together, we have \begin{equation} \label{eq_lower_bound_prob2}
\begin{aligned}
\mathbb{P} \left \{ \widehat{\mathcal{X}}^*_T = \mathcal{X}^* \right \} > 1 - 2 |\mathcal{X}| \cdot \exp \left ( - \frac{T \delta_m^2}{2L_N^2} \right ).
\end{aligned} \end{equation}
If $1 - 2 |\mathcal{X}| \cdot \exp \left ( - \frac{T \delta_m^2}{2L_N^2} \right ) \geq 1 - a$ or equivalently $T \geq \frac{2L_N^2}{\delta_m^2} \cdot \ln \left ( \frac{2|\mathcal{X}|}{a} \right ) $, we have {$\mathbb{P} \left \{ \widehat{\mathcal{X}}^*_T = \mathcal{X}^* \right \} > 1 - a$,} from which the upper bound in \eqref{hitting_time2} follows.
\end{proof}
\subsection{A Special Case for Unimodal Functions} \label{sec:hitting_time_analysis_convex2}
A function $f$ over a bounded set $\mathcal{X} \subset \mathbb{Z}$ is {called} unimodal if it {has only} one global minimum $x^*\in \mathcal{X}$ {and} $f(i) > f(j)$ for all $i<j \leq x^*$, {$i, j \in \mathcal{X}$}, while $f(i) < f(j)$ for all $x^*\leq i<j$. Assume that the unknown target function $f$ is unimodal over $\mathcal{X}$, which implies it has a single global minimum. As mentioned earlier, the time-varying function $\widehat{f}_t$ may not even be unimodal for small values of $t$ under disruptive noise, and therefore it could have multiple local extrema. However, the single global minimum of the function $f$ becomes known after the hitting time with an associated confidence level. In this section, a new notion of hitting time is proposed for unimodal functions that captures the complexity of finding the global minimum of the function and does not take the local extrema of the estimated function $\widehat{f}_t$ into account.
{Without loss of generality, we additionally assume that the noise signals $N_t(x)$ are continuous random variables. This implies that the estimation function $\widehat{f}_t$ has a single global minimum with probability $1$. } {Let} $\widehat{x}_t^* = \argmin_{x \in \mathcal{X}} \ \widehat{f}_t(x)$ {denote the global minimum}. The hitting time for a unimodal function $f$ is defined {below}. \begin{definition} \label{hitting_time_discrete_unimodal} {Given $a \in (0, 1],$} the hitting time $T_u(a)$ for {a} unimodal function $f$ with its global minimum at $x^* = \argmin_{x \in \mathcal{X}} \ f(x)$ and {its estimated global minimum} $\widehat{x}_t^* = \argmin_{x \in \mathcal{X}} \ \widehat{f}_t(x)$ is defined as \begin{equation} \label{hitting_time_convex2} \begin{aligned}
T_u(a) = \min \left \{T: \mathbb{P} \big ( \widehat{x}_t^* = x^* \big ) \geq 1 - a, \ \forall t \geq T \right \}. \end{aligned} \end{equation} \end{definition}
The distance of the function value at point $x \in \mathcal{X}$ from the minimum function value is denoted by $\Delta(x)$, which is defined as \begin{equation}
\Delta(x) =
\begin{cases}
f(x) - f(x^*), \quad & \text{if } x \in \mathcal{X} \setminus \{x^*\}, \\
\min \{ f(x^* - 1) - f(x^*), f(x^* + 1) - f(x^*) \}, & \text{if } x = x^* .
\end{cases} \end{equation}
The following theorem presents an upper bound {on the} hitting time for a unimodal function.
\begin{theorem} \label{theorem_unimodal_discrete} Consider the time-varying function {$\widehat f_t$} defined in \eqref{eq_update2} with $f$ being a unimodal function.
{Suppose that Assumptions \ref{assump:noise_N} and \ref{assump:minimum_distance} hold. Given $a\in (0,1]$, the associated hitting time $T_u(a)$}
satisfies the inequality $T_u(a) \leq T$, where $T$ is the smallest number such that \begin{equation} \label{UB_2}
\exp \left ( - \frac{\delta_m^2 T}{2L_N^2} \right ) + 2 \sum_{\substack{i \in \left [ \left \lfloor {{|\mathcal{X}|}/{2}} \right \rfloor \right ] }} \exp \left ( - \frac{i^2 \delta_m^2 T}{2L_N^2} \right ) \leq a. \end{equation}
\end{theorem}
\begin{proof} By construction, we have $\Delta(x) > 0$ for all $x \in \mathcal{X}$. In order to find an upper bound on the hitting time $T_u(a)$, {note that} the hitting event used in \eqref{hitting_time_convex2} satisfies the condition \begin{equation} \label{eq_subset_convex12}
\begin{aligned}
\bigg \{ & \frac{1}{T} \cdot \sum_{t = 1}^{T} N_t(x) > - \frac{\Delta(x)}{2}, \forall x \in \mathcal{X} \setminus \{ x^* \} \textbf{ and } \frac{1}{T} \cdot \sum_{t = 1}^{T} N_t(x^*) < \frac{\Delta(x^*)}{2} \bigg \} \subseteq \Big \{ \widehat{x}_T^* = x^* \Big \}.
\end{aligned} \end{equation}
{Denote the event on the left-hand side of \eqref{eq_subset_convex12} as $E_t$, whose probability can be lower-bounded as}
\begin{equation} \label{eq_left_hand_side_convex12} {\small
\begin{aligned}
{ \mathbb{P}\{E_t\}} &{\overset{(a)}{=}} \ \mathbb{P}\left \{ \frac{1}{T} \cdot \sum_{t = 1}^{T} N_t(x^*) < \frac{\Delta(x^*)}{2} \right \} \times \prod_{ \substack{x \in \mathcal{X} \setminus \{x^*\} }} \mathbb{P} \left \{ \frac{1}{T} \cdot \sum_{t = 1}^{T} N_t(x) > - \frac{\Delta(x)}{2} \right \} \\
&{\overset{(b)}{\geq}} \left ( 1 - \exp \left ( - \frac{T \Delta(x^*)^2}{2L_N^2} \right ) \right ) \times \hspace{-4mm} \prod_{\substack{x \in \mathcal{X} \setminus \{x^*\} }} \left ( 1 - \exp \left ( - \frac{T \Delta(x)^2}{2L_N^2} \right ) \right ) \\
&{>} \ 1 - \exp \left ( - \frac{T \Delta(x^*)^2}{2L_N^2} \right ) - \sum_{\substack{x \in \mathcal{X} \setminus \{x^*\}}} \exp \left ( - \frac{T \Delta(x)^2}{2L_N^2} \right ) \\
&{\overset{(c)}{\geq}} \ 1 - \exp \left ( - \frac{T \delta_m^2}{2L_N^2} \right ) - \sum_{\substack{x \in \mathcal{X} \setminus \{x^*\}}} \exp \left ( - \frac{T (x - x^*)^2 \delta_m^2}{2L_N^2} \right ) \\
&{\overset{(d)}{\geq}} \ 1 - \exp \left ( - \frac{T \delta_m^2}{2L_N^2} \right ) - 2 \sum_{\substack{i \in \left [ \left \lfloor {{|\mathcal{X}|}/{2}} \right \rfloor \right ] }} \exp \left ( - \frac{T i^2 \delta_m^2}{2L_N^2} \right ) \end{aligned}} \end{equation}
where $(a)$ holds true by the independence property of the added noise signals, $(b)$ is due to Hoeffding's inequality, $(c)$ is true because function $f$ is unimodal, $\Delta(x^*) \geq \delta_m$, and $\Delta(x) \geq (x - x^*) \delta_m$, and $(d)$ results from minimizing the equation with respect to {all possible values of $x^*$, which gives rise to $x^* = \left \lceil |\mathcal{X}|/{2} \right \rceil$} (taking the ceiling corresponding to the summation through $\left \lfloor {{|\mathcal{X}|}/{2}} \right \rfloor$). Putting \eqref{eq_subset_convex12} and \eqref{eq_left_hand_side_convex12} together concludes the proof.
\end{proof} \begin{remark}
{A number $T$ that satisfies \eqref{UB_2} must exists because the left-hand side of \eqref{UB_2} approaches $0$ when $T\to\infty$. Also, by substituting the bound in \eqref{UB_1} into \eqref{UB_2}, it can be verified that Theorem \ref{theorem_unimodal_discrete} provides a better bound than Theorem \ref{theorem_discrete_upper_bound} as the properties of unimodal functions are leveraged.} A comparison of the results of Theorems \ref{theorem_discrete_upper_bound} and \ref{theorem_unimodal_discrete} {along with the details of the simulation model is} depicted in Figure \ref{fig:unimodal_vs_general1}.
\end{remark}
\begin{figure}
\caption{$|\mathcal{D}| = 10000$ and $a$ varies.}
\label{fig:sub1}
\caption{$a = 0.01$ and $|\mathcal{D}|$ varies.}
\label{fig:sub2}
\caption{A comparison of the upper bounds in Theorems \ref{theorem_discrete_upper_bound} and \ref{theorem_unimodal_discrete} when $L_N = 0.02$ and $\delta_m = 0.01$.}
\label{fig:unimodal_vs_general1}
\end{figure}
\subsection{Time-Varying Linear Model with Additive Noise} \label{sec:linear-model}
{In this section, we study a linear model of time-variation and analyze the hitting time under shape-dominant operators.}
{Consider} the Hilbert space $L^2(\mathcal{X})$, where the inner product of $f$ and $g \in L^{2}(\mathcal{X})$ is defined by $\langle f, g\rangle = \int_{\mathcal{X}} f(x) g(x) dx$. We use the same inner product notation when {the domain $\mathcal{X}$ is a discrete set}. For any nonzero functions $f, g\in L^2$, there exists a bounded linear transformation $\mathcal{T}: L^2(\mathcal{X}) \to L^2(\mathcal{X})$ such that $\mathcal{T} f = g$. In fact, one such transformation is given by $\mathcal{T} h = \frac{\langle f, h\rangle }{\langle f, f\rangle} g$. Since the zero function is trivial to optimize, the restriction to linear transformation is a general framework that captures the varying nature of nonlinear functions.
We further note that for any scalar $\lambda > 0$, the functions $f$ and $\lambda f$ share the same set of local minima. Rescaling by a positive number does not affect the complexity of the optimization problem. Hence, restricting the linear operators $\mathcal{T}$ to have norm $1$ incurs no loss of generality.
In practice, the functions to be minimized are often not specified exactly, due to the rounding error of numerical computation or the inexact nature of the model. We model this limitation by {the} random perturbation $w$ sampled from some distribution. Given a sequence of linear operators {$\{\mathcal{A}_t\}$}
such that $\@ifstar{\oldnorm}{\oldnorm*}{\mathcal{A}_{ {t}}} = \sup_{f \neq 0} \frac{\@ifstar{\oldnorm}{\oldnorm*}{\mathcal{A}_{ {t}} f}}{\@ifstar{\oldnorm}{\oldnorm*}{f}} = 1$ together with the perturbations {$\{w_t\}$}, consider the following model of linear time variation: \begin{equation}\label{eq:noisy_linear_operator} f_{t+1} = \mathcal{T}_t f_t = \mathcal{A}_t f_t + w_t, \quad \text{for } t \in \{0, 1, \dots\}. \end{equation} \emph{What properties the operators {$\{\mathcal{T}_t\}$} should satisfy in order for $f_{t}$ to almost reach a target function $f^*$ at time {$t=T$}?} We will provide an answer using the notion of shape dominant operator. To understand the importance of this problem, suppose that at time $t=0$, we optimize $f_0$ around a poor local minimum $x_0^*$. If at $t= {T}$, the function $f_{ {T}}$ becomes convex with a unique global minimum $x_{ {T}}^*$, then no matter how optimization is carried out for $f_1$ through $f_{ {T}-1}$, minimizing $f_{ {T}}$ will yield the same solution $x_{ {T}}^*$, which is globally optimal. The effect of minimizing $f_{ {T}}$ cancels out the sub-optimality at time {$t=0$}. Moreover, under some technical conditions, the global solution at time ${ {T}}$ can be used to find global solutions at future times using tracking methods~\cite{ding2021escaping, fattahi2020absence, massicot2019line}. In other words, the shape of $f_{ {T}}$ affects the complexity of online optimization in the long run.
{Now, we introduce the notion of shape dominant operator. Consider time-varying functions $\{f_t\}$ defined on a finite discrete set $\mathcal{X}= \{x_1, \ldots, x_n\}\subset \mathbb{Z}^d$. Equivalently, $f_t$ can be viewed as a vector in $\mathbb{R}^{n}$. For the noisy linear operator $\mathcal{T}_t$ defined in \eqref{eq:noisy_linear_operator}, let $A_t$ denote the associated matrix of the linear operator $\mathcal{A}_t$ represented under the standard basis, for $t\in\{1,2,\dots\}$. Let $P(A_t, w_t)$ denote the joint distribution of $A_t$ and $w_t$. } \begin{definition}\label{def:shape-dominant} The joint distribution $P(A, w)$ is said to be $(\delta, \sigma, f^*, \phi^*)$ shape dominant if following conditions hold with probability $1$: {1) the unit vector $f^*$ is the eigenvector of $A$ associated with eigenvalue $1$; 2) the unit vector $\phi^*$ is the eigenvector of $A^\top$ associated with eigenvalue $1$; 3) $\langle f^*, \phi^*\rangle \neq 0$; 4) all other eigenvalues of $A$ have {absolute values} less than $1-\delta$; 5) conditioned on $A$, the noise $w$ has zero mean and is sub-Gaussian with parameter $\sigma^2$ in the sense that for all $u\in \mathbb{R}^{n}$ with $\@ifstar{\oldnorm}{\oldnorm*}{u} \leq 1$, {it holds that} $\mathbb{E}[\exp(s u^\top w)] \leq \exp\left(\frac {\sigma^2s^2}{2}\right)$.} \end{definition}
\begin{theorem}\label{thm:shape-dominant}
{For the time-varying operator $\mathcal{T}_t$ defined in \eqref{eq:noisy_linear_operator}, suppose that $P(A_t, w_t)$ is $(\delta, \sigma_t, f^*, \phi^*)$ shape dominant and independent for all $t \in \{0,1, \dots, T-1 \}$,} then, \begin{align}
{f_T} = \frac{\langle \phi^*, f_0 + \sum_{t=0}^{ {T-1}} w_t \rangle}{\langle \phi^*, f^*\rangle} f^* + v + w, \end{align} where $\@ifstar{\oldnorm}{\oldnorm*}{v}\leq (1-\delta)^{ {T}} \left(\@ifstar{\oldnorm}{\oldnorm*}{f_0} + \frac{\langle \phi^*, f_0\rangle}{\langle \phi^*, f^*\rangle}\right)$ and $w$ is sub-Gaussian with parameter $\sigma^2 = \left( 1 + \frac{1}{\langle \phi^*, f^*\rangle^2} \right) \sum_{t=0}^{ {T-1}}(1-\delta)^{2( {T}-t)}\sigma_t^2$. \end{theorem}
\begin{proof}
{Consider the subspace $\mathcal{G} = \{g \in \mathbb{R}^{n}, \langle \phi^*, g\rangle=0\}$.} Since $\langle \phi^*, f^*\rangle \neq 0$, we have $f^* \notin \mathcal{G}$. Since $\phi^*$ is the eigenvector of {$A_t^\top$}, the following holds for all $g \in \mathcal{G}$ \begin{align} \langle \phi^*, {A_t}g\rangle = \langle {A_t}^\top \phi^*, g\rangle = \langle \phi^*, g\rangle = 0. \end{align} Therefore, $ {A_t} g \in \mathcal{G}$, and $\mathcal{G}$ is an invariant subspace of {$A_t$} in $\mathbb{R}^{n}$ {for $t\in \{0,1, \dots, T-1 \}$}. Let a basis of $\mathcal{G}$ be given by $\{g_1, \ldots, g_{n-1}\}$. Then, $B = \{f^*, g_1, \ldots, g_{n-1}\}$ is a basis of $\mathbb{R}^{n}$, under which the linear operator {$A_t$} takes the form \begin{align}
{A_t} = \begin{bmatrix*} 1 & 0 & \ldots & 0 \\ 0 \\ \vdots & & {A_t^\prime} \\ 0 \\ \end{bmatrix*}, \label{eq:A-decompose} \end{align} where {$A_t^\prime$} is a random matrix in $\mathbb{R}^{(n-1) \times (n-1)}$. With a slight abuse of notation, we regard {$A_t'$} as a linear transformation from $\mathcal{G}$ to $\mathcal{G}$. Note that $ {\@ifstar{\oldnorm}{\oldnorm*}{A_t'}} \leq 1-\delta$ because all other eigenvalues of {$A_t$} have norm less than $1-\delta$. Under the basis $B$, $f_0$ has the representation {$f_0 =\frac{\langle \phi^*, f_0\rangle}{\langle \phi^*, f^*\rangle} f^* + g$, where $g\in \mathcal{G}$.} As a result, \begin{equation} \begin{aligned}
{f_T} & = \mathcal{T}_{ {T-1}} \circ \cdots \circ \mathcal{T}_0 f_0\\ & = A_{ {T-1}} \cdots A_0 f_0 + \sum_{ {t=0}}^{ {T-1}} A_{ {T-1}} \cdots {A_{t+1} w_t} \\ & = \frac{\langle \phi^*, f_0\rangle}{\langle \phi^*, f^*\rangle} f^* + A_{ {T-1}}' \ldots A_1' g + {\sum_{t=0}^{T-1} A_{T-1} \cdots A_{t+1} w_t}. \end{aligned} \end{equation} The norm estimate gives rise to \begin{equation} \begin{aligned} \@ifstar{\oldnorm}{\oldnorm*}{A_{ {T-1}}' \ldots A_1' g} &\leq (1 - \delta)^{ {T}} \cdot \@ifstar{\oldnorm}{\oldnorm*}{g} \leq (1 - \delta)^{ {T}} \cdot \left( \@ifstar{\oldnorm}{\oldnorm*}{f_0} + \@ifstar{\oldabs}{\oldabs*}{\frac{\langle \phi^*, f_0\rangle}{\langle \phi^*, f^*\rangle}}\right) , \end{aligned} \end{equation} where the triangle inequality is used. Similarly, one can write {$w_t = \frac{\langle \phi^*, w_t\rangle}{\langle \phi^*, f^*\rangle} f^* + h_t$, where $h_t \in \mathcal{G}$.} We have \begin{equation} A_{ {T-1}} \cdots {A_{t+1} w_t} = \frac{\langle \phi^*, {w_t}\rangle}{\langle \phi^*, f^*\rangle} f^* + A'_{ {T-1}} \cdots {A'_{t+1} h_t}. \end{equation} For all $u\in \mathbb{R}^{n}$ with $\@ifstar{\oldnorm}{\oldnorm*}{u} \leq 1$, it holds that \begin{equation} \begin{aligned} & \quad \, \mathbb{E} \left [\exp \left (s \left \langle u, A_{ {T-1}}' \cdots {A_{t+1}' h_t} \right \rangle \right ) \right ] \\ & = \mathbb{E} \left [\exp \left (s \left \langle {A_{t+1}'^\top} \cdots A_{ {T-1}}'^\top u, {h_t} \right \rangle \right ) \right ] \\ & = \mathbb{E} \left [\exp \left (s \left \langle A_{ {t+1}}'^\top \cdots A_{ {T-1}}'^\top u, {w_t} - \frac{\langle \phi^*, {w_t}\rangle}{\langle \phi^*, f^*\rangle} f^* \right \rangle \right ) \right ] \\ & = \mathbb{E} \Bigg[\exp \left (s \left \langle A_{ {t+1}}'^\top \cdots A_{ {T-1}}'^\top u , {w_t} \right \rangle \right ) \times \exp \left (s \left \langle - \frac{\langle A_{ {t+1}}'^\top \cdots A_{ {T-1}}'^\top u, f^* \rangle}{\langle \phi^*, f^*\rangle} \phi^*, {w_t} \right \rangle \right )\Bigg] \\ & \leq \exp\left(\frac {\sigma_{ {t}}^2s^2 \@ifstar{\oldnorm}{\oldnorm*}{A_{ {t+1}}'^\top \cdots A_{ {T-1}}'^\top u}^2}{2}\right) \times \exp \left( \frac{\sigma_{ {t}}^2 s^2}{2} \left(\frac{\langle A_{ {t+1}}'^\top \cdots A_{ {T-1}}'^\top u, f^* \rangle}{\langle \phi^*, f^*\rangle}\right)^2\right)\\ & \leq \exp\left(\frac {\sigma_{ {t}}^2s^2(1-\delta)^{2( {T-t})}\left( 1 + \frac{1}{\langle \phi^*, f^*\rangle^2} \right)}{2}\right), \end{aligned} \end{equation}
{which} implies that $ {A'_{T-1} \cdots A'_{t+1} h_t}$ is sub-Gaussian with parameter $\sigma_{ {t}}^2(1-\delta)^{2( {T-t})}\left( 1 + \frac{1}{\langle \phi^*, f^*\rangle^2} \right)$, and thereby, $ {\sum_{t=0}^{T-1} A_{T-1}' \cdots A_{t+1}' h_t}$ is sub-Gaussian with parameter $\sigma^2 = \left( 1 + \frac{1}{\langle \phi^*, f^*\rangle^2} \right) {\sum_{t=0}^{T-1}(1-\delta)^{2(T-t)}\sigma_t^2}$.
{This completes the proof.} \end{proof}
Theorem~\ref{thm:shape-dominant} states that if the time-varying model is given by shape dominant operators, the function {$f_T$} decomposes into the sum of dominating shape $f^*$, a bias term $v$ that gradually fades away, and a cumulating noise term that discounts noise in previous iterations. We provide a bound {on the} hitting time {below}. \begin{theorem}\label{thm:hitting} Under the same assumptions made in Theorem~\ref{thm:shape-dominant}, {for a given $\epsilon >0$, define the associated hitting time $T(\epsilon)$ as}
\begin{align} \label{eq:hitting-time-def}
{T(\epsilon) = \min \big \{T: \exists \lambda\in \mathbb{R} \text{ s.t. }\@ifstar{\oldnorm}{\oldnorm*}{f_T - \lambda f^*} < \epsilon \big \}}.
\end{align}
{Then, for all $T> \frac{\log{2\left(\@ifstar{\oldnorm}{\oldnorm*}{f_0} + \@ifstar{\oldabs}{\oldabs*}{\frac{\langle \phi^*, f_0\rangle}{\langle \phi^*, f^*\rangle}}\right)} - \log \epsilon}{\log \frac{1}{1-\delta}}$, it holds that} {\small{\begin{equation}
{ \mathbb{P}(T(\epsilon) \geq T) \leq C_n \exp \left( - \frac{\epsilon^2}{32 \left( 1 + \frac{1}{\langle \phi^*, f^*\rangle^2} \right) \sum_{t=0}^{T-1}(1 - \delta)^{2(T-t)}\sigma_t^2} \right) , } \end{equation}}}
\noindent where $C_n$ is a universal constant depending only on $n$. \end{theorem}
\begin{proof}
{By Theorem~\ref{thm:shape-dominant}, for a fixed number $T$, we have the following decomposition for $f_T$:} \begin{equation}
f_{ {T}} = \frac{\langle \phi^*, f_0 + {\sum_{t=0}^{T-1} w_t} \rangle}{\langle \phi^*, f^*\rangle} f^* + v^{( {T})} + w^{( {T})}, \end{equation} where $\@ifstar{\oldnorm}{\oldnorm*}{v^{( {T})}} < (1-\delta)^{ {T}} \left(\@ifstar{\oldnorm}{\oldnorm*}{f_0} + \@ifstar{\oldabs}{\oldabs*}{\frac{\langle \phi^*, f_0\rangle}{\langle \phi^*, f^*\rangle}}\right)$ and {$w^{(T)} = \sum_{t=0}^{T-1} A_{T-1}' \cdots A_{t+1}' h_t$} is sub-Gaussian with parameter $\sigma^2 = \left( 1 + \frac{1}{\langle \phi^*, f^*\rangle^2} \right) {\sum_{t=0}^{T-1}(1-\delta)^{2(T-t)}\sigma_t^2}$.
From the definition of the hitting time $T(\epsilon)$ in \eqref{eq:hitting-time-def}, we have \begin{equation}
{\mathbb{P}(T(\epsilon) < T) \geq \mathbb{P}\left(\@ifstar{\oldnorm}{\oldnorm*}{v^{(T)}} < \epsilon/2, \@ifstar{\oldnorm}{\oldnorm*}{w^{(T)}} < \epsilon/2\right).} \end{equation}
When $ {T}> \frac{\log{2\left(\@ifstar{\oldnorm}{\oldnorm*}{f_0} + \@ifstar{\oldabs}{\oldabs*}{\frac{\langle \phi^*, f_0\rangle}{\langle \phi^*, f^*\rangle}}\right)} - \log \epsilon}{\log \frac{1}{1-\delta}}$, the bound $\@ifstar{\oldnorm}{\oldnorm*}{v^{( {T})}} < \epsilon/2$ is satisfied. Since $w^{( {T})}$ is sub-Gaussian with parameter $\sigma^2$, the tail-bound for $w^{( {T})}$ yields {\small \begin{equation} { { \mathbb{P} \left(\@ifstar{\oldnorm}{\oldnorm*}{w^{({T})}} < \epsilon/2\right) = 1 - \mathbb{P}\left(\@ifstar{\oldnorm}{\oldnorm*}{{w^{{(T)}}}} > \epsilon/2\right) \geq 1 - C_n \exp \left( -\frac{\epsilon^2}{32 \sigma^2} \right) , }} \end{equation}}
\noindent where $C_n$ is a universal constant depending only on $n$.
{This completes the proof.} \end{proof}
To understand the above bound, consider a fixed {time $T$}. When {$\sigma_t$} decreases, the bound becomes smaller. As a result, with a smaller random perturbation, it is more likely to reach the target function faster.
{When} $\epsilon$ increases, the bound {also} becomes smaller, which matches the intuition that a larger neighborhood is easier to reach than a smaller one.
\begin{remark}
{The analysis in this section}
can be generalized {to} continuous functions by working through eigenfunctions as opposed to eigenvectors.
We briefly discuss this in the special case where {${L}^2(\mathcal{X})$} has a finite number of bases.
Let the inner product be $\langle f , g \rangle = \int_{ {\mathcal{X}}} f(x) \cdot g(x) dx$ and the function space to have an orthonormal basis given by the set of functions $\{ u_1, u_2, \dots, u_n \}$ {such that}
\begin{equation}
\label{orthonormal_basis}
\langle u_i, u_j \rangle = \int_{ {\mathcal{X}}} u_i(x) \cdot u_j(x) dx =
\begin{cases}
1 & \text{if } i = j\\
0 & \text{if } i \neq j
\end{cases}. \end{equation} Note that any function can be decomposed into a linear combination of the basis functions, i.e., {$f(x) = \sum_{j = 1}^n a_j \cdot u_j(x)$,} where the coefficients can be stacked into a column vector $a = [a_1, a_2, \dots, a_n]^T$. Define the matrix $A$ representing the linear operator $\mathcal{T}$ with {the} elements \begin{equation}
\label{A_matrix}
A_{ij} = \langle u_i, \mathcal{T} (u_j) \rangle = \int_{ {\mathcal{X}}} u_i(x) \cdot \mathcal{T} \big (u_j(x) \big ) dx. \end{equation}
{There exists a vector $b = [b_1, b_2, \dots, b_n]^T$ such that applying the operator $\mathcal{T}$ on the decomposed form of $f(x)$ yields} \begin{equation}
\label{apply_operator}
\mathcal{T} \big ( f(x) \big )
= \sum_{j = 1}^n a_j \cdot \mathcal{T} \big ( u_j(x) \big )
= \sum_{j = 1}^n b_j \cdot u_j(x). \end{equation} Taking the inner product of both sides of the above equation with an arbitrary basis function $u_i$ {leads to} \begin{equation}
\sum_{j = 1}^n a_j \hspace{-0.25mm} \cdot \hspace{-0.25mm} \big \langle u_i, \mathcal{T} \big ( u_j \big ) \big \rangle
= \sum_{j = 1}^n b_j \hspace{-0.25mm} \cdot \hspace{-0.25mm} \langle u_i, u_j \rangle \Rightarrow \sum_{j = 1}^n a_j \hspace{-0.25mm} \cdot \hspace{-0.25mm} A_{ij} = b_i. \end{equation} The above equation is the matrix multiplication $Aa = b$, which is the matrix {associated with} $\mathcal{T}$ acting upon the function $f(x)$ expressed in the orthonormal basis. If $f(x)$ is an eigenfunction of transformation $\mathcal{T}$ with eigenvalue $\lambda$, we have $Aa = \lambda a$. {Hence, the results of Theorem \ref{thm:hitting} can be applied to continuous functions in a function space with a finite number of bases.} The extension to the case with an infinite, but countable, number of bases is similar under some technical assumptions.
\end{remark}
\section{Simulation Results} \label{sec:simulation}
In this section, the adversarial attack on the computation of value iteration is simulated for an agent interacting with an environment depicted in Figure \ref{fig:ad_VI}. \begin{figure}
\caption{{(a)} the agent interacts with an environment, {(b) the agent} has a set of four actions in each state.}
\label{fig:ad_VI}
\end{figure} \begin{figure}
\caption{A comparison of value iteration convergence in the absence and presence of an adversary.}
\caption{The effect of an adversary versus the number of states.}
\caption{The effect of an adversary on the convergence of value iteration.}
\label{first}
\label{fig:ad_VI_sub1}
\label{second}
\label{fig:ad_VI_sub2}
\label{fig:unimodal_vs_general}
\end{figure} The agent can take any of the four actions Up, Down, Right, and Left in each of the non-terminal states. By taking an action, the agent {moves} one block toward the desired action $90\%$ of the time, or {moves} one block to the right or left of the desired taken action uniformly at random $10\%$ of the time. The agent bounces back to its original state before taking an action if movement in the direction described above is not possible due to the walls marked with diagonal strips or exiting the environment. The agent is incurred a cost of $0.02$ by each move and there are two terminal states in which the agent receives an immediate reward of +1 and -1 as shown in Figure \ref{fig:ad_VI}. In order to determine the optimal path for the agent starting from any of the states, the value function is calculated using synchronous value iteration. In our simulated example, an adversary contaminates the value function by expanding up to $Q = 1.8$ in a random direction, withholding the contraction, $20\%$ of the time. As a result, the distance of the time-varying value function from the true value function based on the $L^2$-norm is affected negatively as depicted in Figure \ref{fig:ad_VI_sub1}, where the starting function is the all-zero function in our simulations and the average and standard deviations are estimated by 1000 rounds of independent runs of the value iteration. Furthermore, the negative effect of the adversary is {worsened} by increasing the cardinality of the state space in the studied example. In order to show this, the number of intermediate blocks in Figure \ref{fig:ad_VI} is changed from {1} to {10}, i.e., the number of states is changed from {9} to 27, and the distance between the value function at the tenth iterate and the true value function is depicted in Figure \ref{fig:ad_VI_sub2}. As {shown} in Figure \ref{fig:ad_VI_sub2}, $\mathbb{E} \big [d(V_{10}^a, V^*) \big ] - d(V_{10}, V^*)$ has an increasing trend as the number of states increases, where $V_{10}^a$ is value function at the tenth iterate in the presence of an adversary and $V_{10}$ is the corresponding function in the absence of an adversary, and the dependence of value function on the number of states is eliminated to keep the notations simple.
\section{Conclusion and Future work} \label{sec:conclusion}
Multiple models of stochastic time variation along with their corresponding notions of hitting time are studied in this {paper}. In particular, we develop
a probabilistic Banach fixed-point theorem {that proves the convergence of the value iteration method with} a probabilistic contraction-expansion transformation {with an associated confidence level, which} finds applications to adversarial attacks on computation of {the} value iteration {method}. {We prove that the hitting time of the value function in the value iteration method with a probabilistic contraction-expansion transformation is logarithmic in terms of the inverse of a desired precision.} Furthermore, we {develop upper bounds on} the hitting time for optimization of unknown discrete and continuous time-varying functions whose noisy evaluations are revealed over time. {The upper bound for a discrete function is logarithmic in terms of the cardinality of the function domain and the upper bound for a continuous function is super-quadratic (but sub-cubic) in terms of the inverse of a desired precision.} In this framework, we show that convex functions are learned faster than non-convex functions. Finally, {an upper bound on} the hitting time is {developed} for a time-varying linear model with additive noise {under} the notion of shape dominance for discrete functions. {Future research directions include:} studying how an environment with time-varying parameters {modeled by} transition probabilities and rewards affects the Bellman transformation and its fixed point, obtaining upper bounds on the {rate} of change {of the} time-varying parameters such that the time-varying fixed points are achievable after a hitting time{, and studying} the effect of an adversary in applications of reinforcement learning whose computations are performed via edge computing.
\end{document} |
\begin{document}
\begin{titlepage}
\title{ Are there Floquet Quanta?}
\begin{abstract}
The Zeldovich hypothesis is revised and the meaning of quasi energy spectra is discussed. The observation of Floquet resonance for microobjects in quickly oscillating external fields might bring a new information about the time scale of hypothetical quantum jumps.
\vskip 1cm
PACS number(s): 42.50.Lc, 33.55.Be, 33.80.Ps, 42.62.-b.
\vskip 1cm
{\it Preprint} CINVESTAV: CIEA-FIS/97-06
\vskip 1cm
\end{abstract}
\end{titlepage}
When considering the absorption (emission) spectra one usually has in mind a static (stationary) system. "In itself" (i.e., when isolated from the rest of the universe) it is described by a time independent Hamiltonian. When submerged in external fields, however, it starts to radiate: the differences between the eigenvalues of the Hamilton operator define the energies of emitted (absorbed) quanta.
The physical reality though, is not limited to static (stationary) systems. In fact, if any physical theory was at all formulated, this is only since we live in a variable universe, where the external fields can be changed and the experiments can be performed. An intriguing question thus arises: can a system with a time dependent Hamiltonian have a similar resonance capacities as the static systems?
A known attempt to give an answer belongs to Zeldovich ~\cite{Zel} and concerns the systems with periodic Hamiltonians. The unitary evolution operators $U(t,t_0)$ obey:
\begin{equation} {dU(t,t_0) \over dt} = - i H(t) U(t,t_0) \end{equation}
\begin{equation} H(t) = H(t+T) \quad \quad (t \in {\bf R}) \end{equation}
According to the idea of Zeldovich the properties of the periodic system (1-2) are determined by its Floquet operator, i.e., the unitary operator $U(T)=U(T,0)$ describing the evolution within the complete period $T$. Obviously:
\begin{equation} U(T) = e^{-iTF} \end{equation} where the self-adjoint operator $F$ is called the Floquet Hamiltonian. The hypothesis of Zeldovich tells that the eigenvalues of $F$, though not energies themselves (the proposed term is {\it quasienergies} ~\cite{Zel}), determine the resonance spectrum of the periodic system (1) modulo multiples of $\hbar \omega$ (where $\omega=2 \pi / T$).
The idea, though intuitive, leaves some questions open. In the first place, the definition of $F$ is non-unique. Every $U(T)$ in (3) admits an infinity of Floquet Hamiltonians (corresponding to the $n \hbar \omega$ tolerance in the spectrum) and it is not obvious which $F$, if any, has the energy interpretation. In fact, in some recently studied cases, the Floquet generator which most naturally describes the evolution, precisely cannot enter into a conservative balance with the external radiation. The first such cases were found in ~\cite{BD1,Dav} by observing that for the charged Schr\"odinger's particle in a magnetic field ${\bf B}(t)$ uniformly rotating around a fixed vector ${\bf n}$ ($|{\bf n}|=1$), the evolution operator becomes:
\begin{equation} U(t,0) = e^{-i \omega t {\bf n} \cdot {\bf M}} e^{-itF} \end{equation} where $F$ is a linear combination of three 1-dimensional oscillators, $F=H_1+H_2-H_3$ (one sign negative!). Every $t=nT=2n \pi / \omega$ the first factor in (4) reduces to 1, and so, $F$ is a natural Floquet Hamiltonian. However, $F$ cannot be the right counterpart for the radiative energy (otherwise, the system could emit an infinite energy at the cost of falling down into negative $F$-levels). A similar phenomenon occurs for the molecular rotator whose electron states resemble the 'Troyan asteroids' ~\cite{IBB1}. In both cases the 'quasi energy crash' excludes a good energy interpretation for $F$. Note, that the difficulty, apparently, escaped the attention of Zeldovich himself, who wrote about the "..transitions from the lowest quasienergy eigenstate (...) into an excited state..." (see ~\cite{Zel}, p.1007). A part of the problem is attended in the new study of the "Troyan case" ~\cite{IBB2} (the resonance hypothesis of Zeldovich ~\cite{Zel} is confirmed by the first order perturbation, though the stability of the ground-top state is still an open problem).
The phenomenon of the 'top state' is not the only puzzle. In fact, in some simple models the insufficiency of the Floquet Hamiltonian to describe the {\it complete resonance} is immediately obvious. The simplest case occurs if $H(t)$ is a periodic operator-valued step-function taking a finite number of steps:
\begin{equation} H(t)=H_1, H_2, ... H_n \quad \quad (periodic \quad pattern) \end{equation} in time lapses $\tau_1, \tau_2, ... \tau_n$ ($T=\tau_1+...+\tau_n$ being the $H(t)$-period). The Floquet Hamiltonian $F$ then is the Baker-Campbell-Hausdorff exponent:
\begin{equation} e^{-i \tau _n H_n}...e^{-i \tau _1 H_1}=e^{-i (\tau _1 + ...+ \tau_n) F} \end{equation}
According to the quasi-energy hypothesis ~\cite{Zel}, $F$ should define the radiation spectrum for the periodic process (5). This seems true if the jumps in (5) are very fast ($T$ small). However, if the time lapses $\tau_1,...,\tau_n$ are long enough comparing with the typical absorption (emission) time, then the absorbed (emitted) quanta will 'see' either only $H_1$, or $H_2$, etc, without 'noticing' $F$. In this way, the Floquet spectrum is linked with a deeper question about the effective time of the absorption (emission) processes. The problem is intimately related to the {\it epicycle structure} of the evolution operator $U(t,0)$. Put $G(t,0)=U(t,0) e^{itF} \quad \Rightarrow$
\begin{equation} U(t,0)=G(t,0) e^{-itF} \end{equation}
where $e^{-itF}$ represents the "main evolution trend" while $G(t,0)$ is a 'closed loop operation' returning to 1 for every $t=nT$. The operator (7), in general, does not allow for the stationary states, though it permits the existence of periodic ones. Indeed, suppose $F$ has a point-spectrum with a sequence of eigenvectors $\phi_1, \phi_2, ...$ belonging to the eigenvalues $\omega_1, \omega_2,...$ The state trajectories $\phi_n(t)$ originated by $\phi_n$'s then are:
\begin{equation} \phi_n(t)=U(t,0) \phi_n = G(t,0) e^{-itF} \phi_n = G(t,0) e^{-i \omega_n t} \phi_n \end{equation}
The assumptions of Zeldovich ~\cite{Zel} mean that the Floquet photon does not interact at all with the "circulating part" $G(t,0)$; it penetrates "right to the bottom" of the dynamical process (8), where it simply replaces $\phi_n$ by $\phi_m$; the loop operator $G(t,0)$ acts as before. To have an exactly soluble model, consider the 1-dimensional oscillator:
\begin{equation} H(t) = {p^2 \over 2} + \beta(t)^2 {q^2 \over 2} \end{equation} where $\beta(t)$ is a periodic function. The epicycle structure is most regular if the Hamiltonian (9) causes an 'evolution loop' (a process in which all motion trajectories simultaneously close, and the entire $U(\tau,0)$ turns proportional to 1 after a finite number of periods $\tau=nT $ ~\cite{Loop1}). The quasi-energy spacing of the loop is $ \Delta F=\hbar \omega_F$, where $\omega_F= 2 \pi l/\tau$ ($l=0,\pm1,\pm2,...$). The general cases of Floquet spectra has been most carefully studied for the rotating fields ~\cite{Paul, BD1,Dav,IBB1,IBB2}; the oscillating case elaborated for ion traps whenever the use of Mathieu functions was accessible ~\cite{Paul}; the exact numerical study of more general cases is still fragmentary. Our Fig.1 plots the numerically determined Floquet frequencies $\omega_F$ for rectangular and sinusoidal $\beta(t)$. The loop processes occur whenever $\omega_F$ crosses the multiple of $2 \pi / nT$; in all cases the Floquet photon "feels" only the global form of the trajectory (7-8).
Note that the pulsating systems (9) can be produced in laboratory if $\beta(t)$ corresponds to the intensity of a homogeneous, time dependent magnetic field ~\cite{Loop1,BD1} of a cyllindrical solenoid, ${\bf B}(t)={\bf n} B(t)$, $B(t+T)=B(t)$ ({\bf n} is a unit vector). The vector potential is ${\bf A}({\bf x},t)=(1/2) {\bf x} \times {\bf B} = (1/2) B(t) {\bf x} \times {\bf n}$ and the Schrodinger's particle of charge $e$ and mass $m$ obeys the Hamiltonian:
\begin{equation} H(t)={1 \over 2m} [{\bf p}-{e \over c} {\bf A}]^2 \end{equation}
or in the simplified variables $q_1=x \sqrt{m}/ \hbar$, $q_2=y \sqrt{m}/ \hbar$, $q_3=z \sqrt{m}/ \hbar$, $p_1=p_x/\sqrt{m}$, $p_2=p_y/\sqrt{m}$, $p_3=p_z/\sqrt{m}$ :
\begin{equation} H(t)={1 \over 2} p_3^2 - \beta (t) M_3 + [{p_1^2 \over 2}+{p_2^2 \over 2}+ \beta (t)^2 ({q_1^2 \over 2}+{q_2^2 \over 2})] \end{equation} where the axes $x$, $y$, $z$ are respectively orthogonal or parallel to the unit vector ${\bf n}$ and the 'manipulation function' $\beta(t)=e\hbar B(t)/2mc$ can simulate pulses of any shape in (9). The Floquet phenomenon (10-11) too, has its extremely regular forms. Thus e.g., Fig.2 represents a loop case generated by 24 periods of the sinusoidal field :
\begin{equation}
{\bf B}(t) = {\bf n} B_0 \sin \omega t, \quad \quad |{\bf n}|=1 \end{equation} How does such a system interact with an external radiation? While the resonant response to coherent fields of Floquet frequency is beyond any doubt (compare the semiclassical aproach of Rabi et al. ~\cite{Rabi}) the research on magnetic resonance might indicate the domination of multi-photon processes ~\cite{Mult1}. The absorption (emission) of single quanta is a distinct phenomenon leading to some less typical problems.
The {\it time of events} in quantum theory is the subject of unfinished discussions ~\cite{Tate}. The question about the {\it minimal time} for an act of absorption (emission) is seldom adressed (if not discouraged) by the present day formalism. By applying the quantum equations 'to the letter', one might conclude that the emission, absorption, decay are virtual processes, never indeed concluded. Opposite arguments (returning to the pionieer ideas!) indicate, that the acts of absorption (emission) {\it indeed happen} ~\cite{Cook,Jum1}: they are sudden jumps from 'potentiality' to 'actuality'; a kind of spontaneous reductions of quantum state, leaving no slightest doubt that the absorption {\it has occured!} In the lab scale the jumps, apparently, are not restricted by any {\it minimal time} (though the {\it expected time} might be finite: see the anti-bounching phenomenon ~\cite{Anti1}). Would the picture be similar for the Floquet absorption?
To avoid 'doctrinal constrains' we shall stick to intuitive ideas ~\cite{Cook}. Assume that a photon penetrates into a solenoid where a microobject is kept under the influence of a magnetic 2-step pattern: $B_1, B_2, B_1, B_2,...$ Let the magnetic steps $B_1, B_2$ last 1 min. each, with $T=2$ min. Of course, within the first 1 min. the system can absorb any photon of energy $\Delta E_1= \hbar \omega_1$ whereas during the next 1 min. it can absorb any photon of energy $\Delta E_2=\hbar \omega_2 $ ($\omega_i=e B_i/2mc, \quad i=1,2$). In contrast, the absorption of a 'Floquet photon' is the summary effect of the entire period of $H(t)$: so, it should not occur until the magnetic field indeed accomplish the 2-pulse pattern. (The best argument is the {\it reductio ad absurdum}. If the mechanism generating the double $B(t)$ pulse had a sudden defect and if $B(t)$ failed to produce the 2-nd step $B_2$, the Floquet frequency would never be absorbed! Should the Floquet photon be absorbed during the first step, how could it 'know' that the second step will indeed occur?) This suggests the {\it minimal time} $T=2 min.$ needed for the 'Floquet absorption'. Quite similarly, for the general process (7-8) the photon would have to wait until the "loop evolution" $G(t,0)$ closes up, to 'see' the global aspect $e^{-itF}$ behind. Can the single photon absorption be so incredibly slow?
To find an answer, the only method is an experiment: one has to place a sample of identical quantum objects in the oscillating magnetic field (10-12). The sample should be then bombarded by an external photon beam of Floquet frequency. To distinguish the Floquet resonance to single quanta from the parametric resonance to coherent fields (which can involve multiphoton processes ~\cite{Mult1}) it might be necessary to apply monocromatic but perfectly incoherent photon beams, so that the photons drop separately onto the sample. (To create such a beam is a separate challenge but is not fundamentally impossible). The resonance absorption should be also checked for the "instantaneous spectra" of $H(t)$. Now, the exclusive presence of the (diffused) instantaneous levels of $H(t)$ would mean that the acts of absorption are much quicker than the period of the external field (the absorbed photons have simply no time to get involved in the Floquet process). In turn, the appearence of the sharply defined 'Floquet lines' would confirm the existence of 'slow absorption' correcting the ideas about quantum jumps ~\cite{Cook}. An analogous conclusion should hold for the 'Troyan rotators' ~\cite{IBB1, IBB2}.
An essential difficulty are orders of magnitude. The pulsating fields of electromagnets might turn 'too slow' to make the problem more than academic (for low $\omega_F$ it might be impossible to bombard the sample with single Floquet quanta!) If faster, the system would generate an extra radiation spoiling the approximation (9). Note though, that clean and fast oscillating fields operate in the nodal points of the laser beam traps ~\cite{Laz1,Laz2,Laz3}. For powerful laser beams $\simeq 10^{15} Watt/cm^2$ the magnetic fields in principle, can approach $10^6 G$, comparable to the newest achievements of the macroscopic technology ~\cite{Magn}. (Indeed, we find it strange that the laser beam traps are so seldom used; they might mark some natural time scales for the atomic phenomena!). Thus, e.g., two monocromatic, perpendicularly crossed standing waves, have the vector potential:
\begin{equation} {\bf A}_{\bf mn} ({\bf x},t) = {1 \over 2} A [{\bf m} \sin({\omega \over c} {\bf n} \cdot {\bf x}) - {\bf n} \sin({\omega \over c} {\bf m} \cdot {\bf x}) ] \sin(\omega t) \end{equation} ({\bf n},{\bf m} ,{\bf s} are three orthogonal unit vectors) hosting the sinusoidally pulsating field (12) on the nodal line ${\bf m} \cdot {\bf x}={\bf n} \cdot {\bf x}=0$ ~\cite{BD2}. The equivalence to the 'solenoid model' (10-11) and to the oscillator (9) is local; but it should hold as long as the charged particle is mantained in vicinity of the nodal line (the typical wavelenghts of the lasers are $\simeq 10^{-6} m$, while the atomic size $\simeq 10^{-9} m$). The stability thresholds are another obstacle (though only for charged microobjects). To keep a charged particle in the oscillating field (12-13) the ratio of the amplitude/frequency cannot be too high (otherwise the particle is expulsed ~\cite{BD1,BD2}). For the sinusoidal pulses (12), the crucial parameter is $\alpha=eB/2mc \omega$ and the stability condition is ~\cite{BD2}:
\begin{equation}
|\alpha| < 0.5735... \end{equation}
For a neutron, there is no threshold (14), and the new techniques ~\cite{Magn} permit to apply strong fields to examine the Floquet spectra. In case of the oscillating laser fields the situation is even better due to the high frequencies. Of course, to create a high intensity standing wave with an exact nodal line (13) is a non-trivial task (but must all efforts of the experimental physics be always dedicated to particle accelerating?)
An interesting class of traps is obtained by superposing two standing waves $A_{\bf ms}$, $A_{\bf ns}$ so that the nodal lines intersect and the phase difference is $\pi/2$. The resulting field has a net of nodal points hosting the rotating magnetic field ~\cite{BD1}:
\begin{eqnarray} {\bf A}_{\rm rot} ({\bf x},t) & = & {A \over 2} [{\bf m} \sin({\omega \over c} {\bf n} \cdot {\bf x}) - {\bf n} \sin({\omega \over c} {\bf m} \cdot {\bf x}) ] \cos(\omega t) \nonumber \\
& & + {A \over 2} [{\bf s} \sin({\omega \over c} {\bf n} \cdot {\bf x}) - {\bf n} \sin({\omega \over c} {\bf s} \cdot {\bf x}) ] \sin(\omega t) \\
& \matrix{\simeq \cr _{{\bf x} \rightarrow 0} \cr} & {1 \over 2} {\bf x} \times {\bf B} (t); \quad \quad {\bf B} (t) = {A \omega \over c} [ {\bf m} \cos \omega t + {\bf s} \sin \omega t] \nonumber \end{eqnarray}
To assure that the nodal lines of two standing waves (13) intersect exactly is again a formidable challenge - but if achieved it would permit to observe the effects of strong and fast rotating magnetic fields in micro scale. An interesting experiment would be to place a spin $1/2$ particle (electron, neutron) in the rotating magnetic field (15) and check for the magnetic resonance (not to $\omega$ as described in ~\cite{Rabi,Mult1} but to the Floquet frequency $\omega_F$ !). The 'instantaneous Hamiltonian' is:
\begin{equation} H(t) = - \mu {\bf B} (t) \cdot {\bf \sigma} \end{equation}
The transition to the 'rotating frame' ~\cite{Rabi,BD1,IBB2} yields the evolution operator:
\begin{equation} U(t,0)=e^{-i \omega t {\bf n} \cdot {\bf \sigma} /2} e^{-itF} \end{equation} where $F$ is the new time independent Hamiltonian and simultaneously, the most natural Floquet generator for (16):
\begin{equation} F = -\mu \hbar B \sigma_x + \omega \hbar \sigma_z /2 = \left( \matrix{ \hbar \omega/2 & -\mu B \cr -\mu B & - \hbar \omega /2 \cr} \right) \end{equation} with two eigenvalues: $\lambda_{\pm} = \pm \sqrt{ \mu^2 B^2 + (\hbar \omega /2)^2}$ [independent of the particular representation (18)]. As already noticed, $F$ may have no good energy interpretation (compare ~\cite{BD1}). This indeed happens for the generator (18) which conserves a non-trivial spectrum for $B \rightarrow 0$. Knowing that the quasi-energies are defined modulo $\hbar \omega$ one immediately gets the right spacing for the magnetic Floquet-resonance of (16):
\begin{equation} \Delta E = \hbar \omega ( \sqrt{1 + ({2 \mu B \over \hbar \omega})^2}-1) \end{equation} with the correct limiting values:
\begin{eqnarray} \Delta E & \simeq & 2 \mu B {\mu B \over \hbar \omega}, \quad \quad 2 \mu B << \hbar \omega \nonumber \\
& \simeq & 2 \mu B - \hbar \omega, \quad \quad \hbar \omega >> 2 \mu B \end{eqnarray}
As before, the observation of the magnetic Floquet line (19-20) would mean the existence of the slow absorption with $\tau > 2 \pi/\omega$, whereas the domination of the instantaneous line with $\Delta E=2 \mu B$ would testify that the absorption (emission) times are much shorter than the trap oscillation period $T=\omega /2 \pi$.
An important experiment would be to check the Zeeman and magnetic resonances for microsystems in the linearly oscillating fields. Suppose, a microobject with a spherically symmetric Hamiltonian $H_0$ is kept in the field (10-12). If the terms quadratic in $B(t)$ are negligible (approximately true for Zeemann if $B \simeq 10^4 G$; exactly for the magnetic resonance ~\cite{Mult1}), the Floquet Hamiltonian $F$ is identical with the unperturbed $H_0$ (the contributions from $-{\bf M} \cdot {\bf B}(t)$ cancel. The conclusion holds also for the Anandan-Hagen term ~\cite{Aha}). In contrast, the instantaneous Hamiltonians $H(t) = H_0 - {\bf M} \cdot {\bf B}(t)$ should show the (variable) Zeemann spectra. Assume now, a sample of microobjects in the field (12) is additionally bombarded by an incoherent photon beam. Then, the existence of ordinary spectral terms, without Zeemann corrections would mean the domination of the Floquet mechanism (slow absorption). Should the Floquet spectrum desappear for too low $\omega$ (slow field oscillations), it would mean the absence of too slow absorption- emission acts. To the contrary, the "diffuse lines" corrected by $ - {\bf M} \cdot {\bf B}(t)$ (for variable $B(t)$) will mean 'quick' emission- absorption processes, confined to very short time intervals (the conclusion seems valid even if the 'separately dropping photons are not available!) Henceforth, the absence of such lines for high $\omega$, could mean the existence of a {\it minimal time} for absorption blinding the vision of the 'instantaneous Hamiltonians' $H(t)$. If there is a minimal absorption time of few nanoseconds compatible with the antibounching observations ~\cite{Anti1}, then even lower frequencies can be used to blind the instantaneous spectra of $H(t)$.
{\bf Acknowledgements.} The support of CONACYT, M\'exico, is acknowledged.
\centerline{\bf Figure Captions} \vskip 1cm
{\bf FIG. 1 .-} The spectral Floquet frequency $\omega_F$ for an oscillator (7) driven by: ({\it a}) time independent $\beta (t) = \beta_0$; ({\it b}) by a sequence of rectangular pulses $\beta(t)=\beta_0, 0, \beta_0, 0,...$ in the time lapses $T/2, T/2, T/2,...$; ({\it c}) by the sinusoidally varying $\beta(t)=\beta_0 \sin \omega t$. Below, three cases of uncharacteristic epicycles (in form of closed loops) generated on the phase plane: ({\it a}) the constant $\beta_0=\beta_{0a}=1.57079...$; ({\it b}) by rectangular pulses with $\beta_0=\beta_{0b}=2.15375...$; ({\it c}) by the sinusoidal pulses with $\beta_0=\beta_{0c}=2.21231...$
\vskip 1cm
{\bf FIG. 2 .-} Specially regular Floquet process in form of an evolution loop generated by 24 periods of the sinusoidal magnetic field ${\bf B}(t)=(B_0+B_1 \sin \omega t) {\bf n}$ with $\beta_0=0.78539..., \quad \beta_1=0.94595...$ in the plane orthogonal to {\bf n}. The loop effect is shared by the classical and quantum motions. How does the phenomenon interact with an external radiation?
\end{document} |
\begin{document}
\title{ extbf{Partial actions and cyclic Kummer's theory } \begin{abstract} \noindent We introduce a theory of Cyclic Kummer extensions of commutative rings for partial Galois extensions of finite groups, extending some of the well-known results of the theory of Kummer extensions of commutative rings developed by A. Z. Borevich. In particular, we provide necessary and sufficient conditions a to determine when a partial $n$-kummerian extension is equivalent to either a radical or a $I$-radical extension, for some subgroup $I$ of the cyclic group $C_n$. \end{abstract}
\noindent \textbf{2010 AMS Subject Classification:} Primary 13B05. Secondary 13A50, 16W22.\\ \noindent \textbf{Key Words:} Partial Kummer extensions, Galois extensions, cocycles, coboundary.
\section{Introduction} The theory of Kummer extensions of commutative rings was introduced by A. Z. Borevich in \cite{B} and it is proved that every Kummer extension of a ring $R$ with group $G$ has a decomposition into a direct sum of $R$-submodules, which are image of homomorphisms defined in terms of characters of $G$. Further, the author introduced the term of radical extension and shows that every cyclic Kummer extension is equivalent to them. Explicitely,
it is proved in \cite[Theorem 2, section 8]{B} that every cyclic extension $T$ of a Kummerian ring $R$ with Galois group $G$ is $G$-equivalent to the radical extension $S_{Q, \varphi, \chi}$, for some $R$-module $Q$ of rank one.
On the other hand the theory of partial Galois extensions of commutative rings was introduced and studied in \cite{DFP}, extending some of the well known results given in celebrated paper by Chase, Harrison and Rosenberg in \cite{CHR}. In particular, given a partial Galois extension $R\subseteq S,$ in \cite[Theorem 5.1]{DFP} the authors established a one to one correspondence between the subgroups of $G$ and the separable $R$-subalgebras $T$ of $S$ which are $\alpha$-strong and such that $H_T$, the subgroup of $G$ such that the elements of $T$ stay fixed by the partial action $\alpha$, is a subgroup of $G$. In \cite{BCMP} the authors complete \cite[Theorem 5.1]{DFP} by showing that for any normal subgroup $H$ of $G$ the subring $S^{\alpha_H}$ of $S$ is a partial Galois extension of $R$ with Galois group $G/H.$ In the case that $G$ is abelian, this leads to the construction of the inverse semigroup of equivalence classes of partial Galois abelian extensions of $R$ with same group $G$, called the Harrison inverse semigroup and denoted by $\mathcal{H}_{\rm par}(G, R)$, this semigroup contains the Harrison group defined in \cite{H}. Moreover, as in the classical case, the study of $\mathcal{H}_{\rm par}(G, R)$ is reduced to the cyclic case.
Starting from the partial Galois theory for abelian groups, it is possible to get a partial cyclic Kummer theory. For this, our principal goals in this work is to generalize the results \cite[section 2]{B}, \cite[Theorem 1, section 3]{B} and \cite[Theorem 2, section 8]{B} to the partial context. Thus we connect invertible modules with one dimensional partial cocycles; our new ingredients are include satured set and $I$-radical extension, which can be seen as subalgebras of Borevich's radical extensions.
The paper is organized as follows. After of the introduction, in Section 2 we present some preliminaries facts on partial actions and partial Galois cohomology of groups. In section 3 we connect invertible modules with one dimensional partial cocycles (see Section \ref{coho}). In section 4 we present a partial Kummer theory. In the first part we show that determine a partial $n$-kummerian extension is a sum of invertible modules described in section \ref{IMCC}, where the sum cover all the characters of $G$. In second part, given $m\in \mathbb{N}$ and $I\subseteq \{0,\dots, m-1\}$ we introduce the notion of Borevich's $I$-radical extension and in Proposition \ref{iradd} we give necessary and sufficient conditions to determine when this extensions have an algebra structure. In the third part we determine which partial cyclic Kummer extensions can be parametrized by $I$-radical extensions, and show that the study of this kind of partial action can be reduced to the global case.
Throughout this work the word ring means an associative ring with an identity element. For a commutative ring $R$ we say that an $R$-module is f.g.p if it is finitely generated and projective, faithfully projective if it is faithful and f.g.p. Moreover, unadorned $\otimes$ means $\otimes_R.$ The Picard group of $R$ is denoted by \textbf{Pic}($R$) and it consists of all $R$-isomorphism classes of f.g.p $R$-modules of rank 1, with binary operation given by $[P][Q]=[P\otimes Q]$. Recall that its identity is $[R]$ and the inverse of $[P]$ in \textbf{Pic}(R) is $[P^*]$, where $P^*=Hom_R(P, R)$. Finally for a ring $A$ and $X,Y\subseteq A$ the set $\mathcal{U}(A)$ is the group of invertible elements of $A$ and $XY$ denotes the set of finite sums of elements of the form $xy$ (or $yx$) for $x \in X$ and $y \in Y.$ . \section{Preliminaries} In this section we recall some basic notions which will be used in the paper.
\subsection{Partial Actions of groups} Let $k$ be a commutative ring and $G$ be a group. Following \cite{DE} we say that a \textit{partial action} $\alpha$ of $G$ on a $k$-algebra $S$ is a family of $k$-algebra isomorphisms $\alpha=\{\alpha_g \colon S_{g^{-1}}\to S_g\}_{g\in G}$, which will be denoted by $(S,\alpha),$ where , for each $g\in G$, $S_g$ is an ideal of $S$ such that \begin{itemize} \item[(i)] $S_1=S,\, \alpha_1=id_S$, where 1 is the identity element of the group $G,$ \item[(ii)] $\alpha_g(S_{g^{-1}}\cap S_h)=S_g\cap S_{gh}, \, \forall g,h \in G$, \item[(iii)] $\alpha_g \circ \alpha_h(x)=\alpha_{gh}(x),$ $ \forall x\in S_{h^{-1}}\cap S_{{(gh)}^{-1}},\, \forall g,h \in G$. \end{itemize} Conditions (ii) and (iii) are equivalent to the fact that $\alpha_{gh}$ is an extension of $\alpha_g \circ\alpha_h,$ moreover we say that $\alpha$ is {\it global} if $\alpha_{gh}=\alpha_g \circ \alpha_h,$ for all $g,h\in G.$ Two classical examples of partial actions are the following. \begin{exe} (Induced partial action) Let $\beta$ be a global action of $G$ on a ring $T$ and $S$ a unital ideal of $T.$ For $g\in G$ we set $S_g=S\cap \beta_g(S)$ and $\alpha_g=\beta_g\mid_{S_{g^{-1}}}$ then the family $\alpha=\{\alpha_g \colon S_{g^{-1}}\to S_g\}_{g\in G}$ is a partial action of $G$ on $S.$ \end{exe} \begin{exe}\label{ext0} (Extension by zero) Let $G$ be a group $S$ a ring and $H$ a subgroup of $G$ acting (globally) on $S$ with action $\beta.$ Set $S_g=\{0\}$ for all $g\in G\setminus H.$ Then $\beta^0=\{\beta_g \colon S_{g^{-1}}\to S_g\}_{g\in G}$ is a partial action of $G$ on $S$ and is called the extension by zero of $\beta.$ \end{exe}
\begin{defn}\label{equiva}
Let $S$ and $S'$ be two rings with partial action $\alpha$ and $\alpha',$ respectively. We say that $(S, \alpha)$ and $(S',\alpha')$ are \emph{$G$-isomorphic}, which is denoted by $(S,\alpha)\overset{par}{\sim} (S',\alpha')$, if there is a $k$-algebra isomorphism $f: S\rightarrow S'$ such that for all $g \in G$: \begin{enumerate} \item [(i)] $f(S_g)=S'_g$, \item [(ii)]$ f \circ \alpha_g= \alpha'_g \circ f$ in ${S_{g^{-1}}}$. \end{enumerate} \end{defn} For our purposes we will assume hereafter that $G$ is finite and $\alpha$ is unital, that is, every ideal $S_g$ is unital, with its identity element denoted by $1_g,$ by \cite[Theorem 4.5]{DE} this condition is equivalent to say that $\alpha$ possesses a globalization. (see \cite[p. 79]{DFP} for more details).
The {\it ring of subinvariants of S} is the set $S^{\alpha}=\{a \in S\mid \alpha_{g}(a1_{g^{-1}})=a1_g\},$ if $\alpha$ is global, then $S^\alpha$ is denoted by $S^G.$ Let $R$ be unital a subring of $S$ with $1_R=1_S.$ Then following
\cite{DFP}, we say that $S\supseteq R$ is a \textit{partial Galois extension} if \begin{enumerate} \item[(i)] $R=S^\alpha$; \item[(ii)] For some $m\in \mathbb{N}$ there exist elements $x_i,y_i\in S, 1\leq i\leq m$, such that \begin{equation*}\label{G2} \sum_{i=1}^mx_i\alpha_{g}(y_i1_{g^{-1}})=\delta_{1, g},\, \text{for each}\, g \in G. \end{equation*} \end{enumerate} The elements $x_i,y_i$ in (ii) are called \textit{partial Galois coordinates} of $S$ over $R$. \begin{rem}\label{isogal} Let $(T, \beta)$ be a globalization of $(S, \alpha).$ Then by \cite[Theorem 3.3]{DFP}, $S\supseteq R$ is a partial Galois extension, if and only if, $T\supseteq T^G$ is a Galois extension, moreover by \cite[Proposition 2.3]{DFP} there is a $T^G$-bilinear map $\psi\colon T\mapsto T $ such that $\psi {\mid_R}: R\to T^G$ is a ring isomorphism whose inverse is given by $x\mapsto x1_S,$ for all $x\in T^G.$ \end{rem}
We give the following. \begin{lem} Let $\alpha$ be a unital partial action of $G$ on $S$ and $R$ a subring of $S.$ Then $S/R$ is a partial Galois extension, if and only if, \begin{itemize} \item $S=R^\alpha.$ \item For some $m\in \mathbb{N}$ there exist elements $x_i,y_i\in S, 1\leq i\leq m$, such that \end{itemize} \begin{equation}\label{galequiv}\sum_{i=1}^m\alpha_{g}(x_i1_{g^{-1}})\alpha_{h}(y_i1_{h^{-1}})=\delta_{g, h}1_g,\, \text{for all}\,\, g, h \in G.\end{equation} \end{lem} \proof $(\Rightarrow)$ Suppose that $S/R$ is a partial Galois extension, then $S^\alpha=R.$ Take $g,h\in G,$ then there is $m\in \mathbb{N}$ and $x_i,y_i\in S, 1\leq i\leq m$, such that $\sum_{i=1}^mx_i\alpha_{l}(y_i1_{l^{-1}})=\delta_{1, l},$ for all $l\in G$. Hence, $\sum\limits_{i=1}^m\alpha_g(x_i1_{g^{-1}})\alpha_{gl}(y_i1_{(gh)^{-1}})=\alpha_g(\delta_{1,l}1_{g^{-1}})$. In particular, taking $l=hg^{-1}$ we get $$\sum\limits_{i=1}^n\alpha_{g}(x_i1_{g^{-1}})\alpha_{h}(y_i1_{h^{-1}})=\alpha_{h}(\delta_{1,hg^{-1}}1_{h^{-1}})=\delta_{g,h}1_g,$$ as desired. For the part $(\Leftarrow)$ take $h=1.$
\endproof
\subsubsection{Partial cohomology of groups}\label{coho}
Now we recall from \cite{DK} some notions about partial cohomology of groups.
\begin{defn} Let $(S,\alpha)$ be a partial action of $G.$Given $n\in \mathbb{N}$, an $n$-cochain of G with values in S is a function $f:G^n\to S$, such that $f(g_1,\dots,g_n)\in \mathcal{U}(S1_{g_1}1_{g_1g_2}\cdots 1_{g_1g_2\cdots g_n})$. A $0$-cochain is an element of $\mathcal{U}(S)$. \end{defn}
\begin{rem} Let $C^n(G,\alpha,S)$ denote the set of all $n$-cochains. This set is an abelian group and its identity is the map $(g_1,\dots, g_n)\mapsto 1_{g_1}1_{g_1g_2}\cdots 1_{g_1g_2\cdots g_n}$ and the inverse of $f\in C^n(G,\alpha,S)$ is $f^{-1}(g_1,\dots, g_n)=f(g_1,\dots, g_n)^{-1}$, where $f(g_1,\dots, g_n)^{-1}$ is the inverse of $f(g_1,\dots, g_n)$ in $S1_{g_1}1_{g_1g_2}\cdots 1_{g_1g_2\cdots g_n}$ for each $g_1,\dots, g_n\in G$. \end{rem}
\begin{defn}[The coboundary homomorphism] Let $n\in \mathbb{N}, n>0, \, f\in C^n(G,\alpha, S)$ and $g_1,\dots , g_{n+1}\in G$, set \small \begin{align*} (\delta^nf)(g_1,\dots , g_{n+1})=&\alpha_{g_1}\left(f(g_2,\dots , g_{n+1})1_{g_1^{-1}}\right)\prod_{i=1}^nf(g_1,\dots, g_i,g_{i+1},\dots , g_{n+1})^{(-1)^i}\\ &f(g_1,\dots , g_n)^{(-1)^{n+1}}. \end{align*} \normalsize \end{defn}
By \cite[Proposition 1.5]{DK} the map $\delta^n: C^n(G,\alpha, S) \to C^{n+1}(G,\alpha, S)$ is a group homomorphism such that $$(\delta^{n+1}\delta^nf)(g_1,g_2,\dots, g_{n+2})=1_{g_1}1_{g_1g_2}\cdots 1_{g_1g_2\cdots g_{n+2}},$$ for any $n\in \mathbb{N}, \, f\in C^n(G,\alpha, S)$ and $g_1,g_2,\dots , g_{n+2}\in G$.
\begin{defn} Let $n\in \mathbb{N}$, we define the groups $Z^n(G,\alpha,S):=ker \delta^n$ of partial n-cocycles, $B^n(G,\alpha,S)=Im \delta^{n-1}$ of partial $n$-coboundaries, and $H^n(G,\alpha,S)=\frac{ker \delta^n}{Im \delta^{n-1}}$ of partial $n$-cohomologies of G with values in S, $n\geq 1$. \end{defn}
\begin{exe} \begin{align*} B^1(G,\alpha, S)&=\{f\in C^1(G,\alpha, S)\mid f(g)=\alpha_g(t1_{g^{-1}})t^{-1}, \, \text{for some}\quad t\in \mathcal{U}(S)\};\\ Z^1(G,\alpha, S)&=\{f\in C^1(G,\alpha, S)\mid f(gh)1_g=f(g)\alpha_g(f(h)1_{g^{-1}}), \, \forall g,h \in G\}. \end{align*} \end{exe}
Two cocycles $f,f'\in Z^n(G,\alpha, S)$ are called \textit{cohomologous} if they differ by an $n$-coboundary.\\
Notice that for $f\in Z^1(G,\alpha, S)$ we get that \begin{equation}\label{invf} f^{-1}(gh)1_g=\alpha_g(f^{-1}(h)1_{g^{-1}})f^{-1}(g), \end{equation} for all $g,h\in G.$
\section{Invertible Modules Connected with $Z^1(G,\alpha, S)$}\label{IMCC} In this section we connect invertible modules with one dimensional cocycles (see Section \ref{coho}) in the partial context. Thus we extend the results of \cite[Section 2]{B} to the frame partial actions.
From now on in this work $S$ will denote a commutative algebra over $k,$ $G$ an abelian group, $(S, \alpha)$, a unital partial action of $G$ on $S$ and $R$ a subring of $S$ such that $S\supseteq R$ is a partial Galois extension with coordinate system $\{x_i,y_i\in S, 1\leq i\leq m\},$ for some $m\in \mathbb{N}.$
An $R$-submodule $X$ of $S$ is called invertible, if there exists a submodule $Y$ of $S$ such that $XY=R. $ We denote by ${\rm Inv}_R(S)$ the group consinsting of invertible $R$-submodules of $S.$
The trace map $tr_{S/R}\colon S\to R$ is defined by $tr_{S/R}(s)=\sum_{g \in G}\alpha_{g}(s1_{g^{-1}}),$ for all $s\in S,$ by \cite[Remark 3.4]{DFP}, in the ring $S$ there exists $w\in S$ such that \begin{equation}\label{tr1}tr_{S/R}(w)=1,\end{equation}
Take $w\in S$ given by \eqref{tr1}, we associate to each one-dimensional cocycle $f\in Z^1(G,\alpha, S)$ the element $\widehat{f}\in End_R(S)$, by setting \begin{equation}\label{hat}\widehat{f}(x)=\sum_{g \in G}f^{-1}(g)\alpha_{g}(wx1_{g^{-1}}), \quad x\in S.\end{equation}
\begin{prop} The map $\widehat{f}$ satisfies $\widehat{f}\circ \widehat{f}=\widehat{f}$ and $Q_f=Im(\widehat{f})$ is a f.g.p $R$-module. \end{prop}
\begin{proof} It is clear that $\widehat{f}\in End_R(S).$ Now, let $x\in S$. Then, \begin{align*} \widehat{f}\circ \widehat{f}(x)&=\widehat{f}\left(\sum_{g \in G}f^{-1}(g)\alpha_{g}(wx1_{g^{-1}})\right)\\ &=\sum_{h \in G}f^{-1}(h)\alpha_{h}\left(w\sum_{g \in G}f^{-1}(g)\alpha_{g}(wx1_{g{-1}})1_{h^{-1}}\right)\\ &=\sum_{g, h}f^{-1}(h)\alpha_{h}(w1_{h^{-1}})\alpha_{h}(f^{-1}(g)1_{h^{-1}})\alpha_{h}[\alpha_{g}(wx1_{g^{-1}})1_{h^{-1}}]\\ &=\sum_{g, h}f(h)^{-1}\alpha_{h}(w1_{h^{-1}})\alpha_{h}(f(g)^{-1}1_{g^{-1}})\alpha_{hg}(wx1_{(hg)^{-1}})1_{h}\\ &=\sum_{g, h}f(h)^{-1}\alpha_{h}(f(g)^{-1}1_{g^{-1}})\alpha_{h}(w1_{h^{-1}})\alpha_{hg}(wx1_{(hg)^{-1}})\\ &\stackrel{\eqref{invf}}=\sum_{g, h}f^{-1}(hg)\alpha_{h}(w1_{h^{-1}})\alpha_{hg}(wx1_{(hg)^{-1}})\\ &\stackrel{l=hg}=\sum_{g, l}f^{-1}(l)\alpha_{h}(w1_{h^{-1}})\alpha_{l}(wx1_{l^{-1}})\\ &=\left(\sum_{h}\alpha_{h}(w1_{h^{-1}})\right)\left(\sum_{l}f^{-1}(l)\alpha_{l}(wx1_{l^{-1}})\right)\\ &\stackrel{\eqref{tr1}}=\widehat{f}(x). \end{align*} The other affirmation follows directly. \end{proof}
\begin{prop}\label{cond} Let $f\in Z^1(G,\alpha, S).$ Then
$$Q_f=\{a\in S\mid\alpha_{g}(a1_{g^{-1}})=f(g)a, \forall g\in G \}.$$ In particular if $e_p:G\to S$ is defined by $g\mapsto 1_g,$ for all $g\in G,$ then $Q_{e_p}=R.$ \end{prop}
\begin{proof} Let $a\in Q_f=Im(\widehat{f})$. Then, $a=\sum\limits_{h\in G}f(h)^{-1}\alpha_{h}(wx1_{h^{-1}}),$ for some $x\in S$. Then, \begin{align*} \alpha_{g}(a1_{g^{-1}})&=\sum_{h\in G}\alpha_{g}(f(h)^{-1}1_{g^{-1}})\alpha_{g}(\alpha_{h}(wx1_{h^{-1}})1_{g^{-1}})\\ &\stackrel{\eqref{invf}}=\sum_{h\in G}f^{-1}(gh)f(g)\alpha_{gh}(wx1_{(gh)^{-1}})1_g\\ &\stackrel{f(g)\in S_g}=f(g)\sum_{h}f^{-1}(gh)\alpha_{gh}(wx1_{(gh)^{-1}})\\ &=f(g)a. \end{align*} Conversely, assume that $f(g)a=\alpha_{g}(a1_{g}),$ for all $g\in G$. Thus, \begin{align*} a1_g&=f^{-1}(g)\alpha_{g}(a1_{g^{-1}})\\ &=f^{-1}(g)\alpha_{g}(a1_S1_{g^{-1}})\\ &=f^{-1}(g)\alpha_{g}\left(a\sum_{h \in G}\alpha_{h}(w1_{h^{-1}})1_{g^{-1}}\right)\\ &=f^{-1}(g)\alpha_{g}\left(\sum_{h \in G}\alpha_{h}(w1_{h^{-1}})\alpha_{h}(\alpha_{h^{-1}}(a1_{h}))1_{g^{-1}}\right)\\ &=\sum_{h \in G}f^{-1}(g)\alpha_{g}[\alpha_{h}(w\alpha_{h^{-1}}(a1_{h})1_{h^{-1}})1_{g^{-1}}]\\ &=\sum_{h \in G}f^{-1}(g)\alpha_{gh}(w\alpha_{h^{-1}}(a1_{h})1_{(gh)^{-1}})1_{g}. \end{align*} Again by \eqref{invf} we get $$f^{-1}(g)1_g=f^{-1}((gh)h^{-1})1_{g}=f^{-1}(gh)\alpha_{gh}(f^{-1}(h^{-1})1_{gh}).$$ We have that, \begin{align*} a1_{g} &=\sum_{h \in G}f^{-1}(gh)\alpha_{gh}(wf^{-1}(h^{-1})\alpha_{h^{-1}}(a1_{h})1_{(gh)^{-1}}), \end{align*} for all $g\in G.$ In particular, taking $g=1$ we get that $a\in Q_f=Im(\widehat{f})$. \end{proof}
\begin{prop}\label{free} If $f\in B^1(G,\alpha, S),$ that is $f(g)=\alpha_{g}(u1_{g^{-1}})u^{-1}, $ for some $u\in \mathcal{U}(S),$ then $Q_f=Ru.$ Moreover, if the cocycles $f,f'\in Z^1(G,\alpha, S)$ are cohomologous, i.e., $f(g)=f'(g)\alpha_{g}(u1_{g^{-1}})u^{-1}$ for some $u\in \mathcal{U}(S),$ then $Q_f=Q_{f'}u$. \end{prop} \begin{proof} Since $f\in B^1(G,\alpha, S)$ there is $u\in \mathcal{U}(S)$ such that $f(g)=\alpha_{g}(u1_{g^{-1}})u^{-1}, $ for all $g\in G.$ We shall prove that $Q_f=Ru.$ Let $a\in Q_f$ then $au^{-1} \in R,$ indeed for $g\in G,$ we have by Proposition \ref{cond} that $$\alpha_g(au\m1_{g^{-1}})=\alpha_g(a1_{g^{-1}})\alpha_g(u\m1_{g^{-1}})=f(g)af^{-1}(g)u^{-1}=au\m1_g,$$ and we get that $au^{-1} \in R,$ that is $a\in Ru.$ For the other inclusion, since $f(g)u=\alpha_{g}(u1_{g^{-1}})$ we have $u\in Q_f$ and thus $Ru\subseteq Q_f$. Now take $f,f'\in Z^1(G,\alpha, S)$ cohomologous and $a \in Q_f,$ then for any $g\in G$ \begin{align*} au^{-1} f'(g)&=af(g)\alpha_{g}(u\m1_{g^{-1}})=\alpha_g(au\m1_{g^{-1}}), \end{align*} and $au^{-1} \in Q_{f'},$ from this we get that $Q_f=Q_{f'}u,$ as desired. \end{proof}
Now we prove that the $R$-module $Q_f$ does not depend on the choice of an element $w$ with trace 1. Indeed, consider the $R$-homomorphism $\widetilde{f}\in End_R(S)$, defined by \begin{equation}\label{wildef}\widetilde{f}(x)=\sum_{g \in G}f^{-1}(g)\alpha_{g}(x1_{g^{-1}}),\end{equation} for all $x\in S.$ Then we have the following.
\begin{prop} \label{equal} Let $\widetilde{f}$ defined by \eqref{wildef}. Then $Im(\widetilde{f})=Q_f=Im(\widehat{f})$. \end{prop}
\begin{proof} First all we prove that $Im(\widetilde{f})\subseteq Im(\widehat{f})$. Indeed, for $x\in S$ we have that \begin{align*} \widehat{f}\circ \widetilde{f}(x)&=\widehat{f}\left(\sum_{g \in G}f^{-1}(g)\alpha_{g}(x1_{g^{-1}})\right)\\ &=\sum_{h \in G}f(h)^{-1}\alpha_{h}\left(w\sum_{g \in G}f^{-1}(g)\alpha_{g}(x1_{g^{-1}})\right)\\ &=\sum_{g, h}f(h)^{-1}\alpha_{h}(w1_{h^{-1}})\alpha_{h}(f^{-1}(g)\alpha_{g}(x1_{g^{-1}}))\\ &=\sum_{g, h}\alpha_{h}(w1_{h^{-1}})f(h)^{-1}\alpha_{h}(f^{-1}(g)\alpha_{g}(x1_{g^{-1}}))\\ &=\sum_{g, h}\alpha_{h}(w1_{h^{-1}})f(h)^{-1}\alpha_{h}(f^{-1}(g)1_{g^{-1}})\alpha_{h}(\alpha_{g}(x1_{g^{-1}})1_{h^{-1}})\\ &=\sum_{g, h}\alpha_{h}(w1_{h^{-1}})f^{-1}(h g)\alpha_{hg}(x1_{(gh)^{-1}})1_{h}\\ &\stackrel{l=hg}=\sum_{h, l}\alpha_{h}(w1_{h^{-1}})f^{-1}(l)\alpha_{l}(x1_{(l^{-1}})1_{h}\\ &=\left(\sum_{h \in G}\alpha_{h}(w1_{h^{-1}})\right)\left(\sum_{l \in G}f^{-1}(l)\alpha_{l}(x1_{l^{-1}})\right)\\ &=\sum_{g \in G}f^{-1}(l)\alpha_{l}(x1_{l^{-1}})\\ &=\widetilde{f}(x). \end{align*} The other inclusion follows from the fact that $\widehat{f}(x)=\widetilde{f}(wx)$ for all $x\in S.$ \end{proof}
\begin{prop}\label{ann} The equality $Q_fS=S$ holds. Furthermore, \begin{equation} \label{equal1}\sum_{i=1}^m\widehat{f}(x_i)\widetilde{f^{-1}}(y_i)=1,\end{equation} where $x_1,\dots, x_m; y_1,\dots, y_m\in S$ is a partial Galois coordinates system of $S$ over $R.$ \end{prop}
\begin{proof} Let $x_1,\dots, x_m; y_1,\dots, y_m\in S$ be a partial Galois coordinates of $S$ over $R,$ then \begin{align*} \sum_{i=1}^m\widehat{f}(x_i)\widetilde{f^{-1}}(y_i)&=\sum_{i=1}^m\left(\sum_{g \in G}f(g)^{-1}\alpha_{g}(wx_i1_{g^{-1}})\right)\left(\sum_{h \in G}f(h)\alpha_{h}(y_i1_{h^{-1}})\right)\\ &=\sum_{g,h}\sum_{i=1}^mf^{-1}(g)f(h)\alpha_{g}(wx_i1_{g^{-1}})\alpha_{h}(y_i1_{h^{-1}})\\ &=\sum_{g,h}\sum_{i=1}^mf^{-1}(g)f(h)\alpha_{g}(wx_i1_{g^{-1}})\alpha_{h}(y_i1_{h^{-1}})\\ &=\sum_{g,h}f^{-1}(g)\alpha_{g}(w1_{g^{-1}})f(h)\sum_{i=1}^m\alpha_{g}(x_i1_{g^{-1}})\alpha_{h}(y_i1_{h^{-1}})\\ &\stackrel{\eqref{galequiv}}=\sum_{g,h}f^{-1}(g)\alpha_{g}(w1_{g^{-1}})f(h)\delta_{g,h}\\ &=\sum_{g}f^{-1}(g)\alpha_{g}(w1_{g^{-1}})f(g)\\ &=\sum_{g}\alpha_{g}(w1_{g^{-1}})=1. \end{align*} With respect to the equality $Q_fS= S,$ we have that
\begin{equation*} Q_fS\subseteq S=\sum_{i=1}^m\widehat{f}(x_i)\widetilde{f^{-1}}(y_i)S\subseteq \sum_{i=1}^m\widehat{f}(x_i)S\subseteq Q_fS. \end{equation*}
\end{proof} The following is a consequence of Proposition \ref{ann}. \begin{cor}\label{faithful} $Q_f$ is a faithful $R$-module (in particular, $Q_f\ne 0$). \end{cor}
\begin{prop}\label{6} Let $f,f'\in Z^1(G,\alpha, S)$. Then $Q_fQ_{f'}=Q_{ff'}$. \end{prop}
\begin{proof} The fact that $Q_fQ_g\subseteq Q_{fg}$ follows from Proposition \ref{cond}. For the other inclusion, if $c=\widetilde{fg}(z), z\in S$, by Proposition \ref{ann} we have that $z=\sum_is_iv_i, $ for some $s_i\in S$ and $ v_i\in Q_{f'}.$ Then \begin{align*} c=\widetilde{fg}(z)&=\sum_{g, i}f(g)^{-1}f'(g)^{-1}\alpha_{g}(s_i1_{g^{-1}})\alpha_{g}(v_i1_{g^{-1}})\\ &\stackrel{Prop. \ref{cond}}=\sum_{g, i}f(g)^{-1}f'(g)^{-1}\alpha_{g}(s_i1_{g^{-1}})f'(g)v_i\\ &=\sum_{g, i}f(g)^{-1}\alpha_{g}(s_i1_{g^{-1}})v_i\\ &=\sum_i\widetilde{f}(s_i)v_i. \end{align*} Note that, $\widetilde{f}(s_i)\in Q_f$ and $v_i\in Q_g$ and thus $c\in Q_fQ_g$. \end{proof}
\begin{prop} Let $f,g\in Z^1(G,\alpha,S)$. Then $Q_f\otimes_R Q_{f^{-1}}=R\xi\simeq R$ as $R$-modules, where $\xi=\sum\limits_{i=1}^m\widehat{f}(x_i)\otimes_R \widetilde{f^{-1}}(y_i)$. \end{prop}
\begin{proof} Note that $R\xi\subseteq Q_f\otimes_R Q_{f^{-1}}$. For the other inclusion, as $S$ is a partial Galois extension from $R$, then the partial Galois coordinates $x_1,\dots , x_m; y_1,\dots, y_m$ generate the $R$-module $S$. Thus, $\widehat{f}(x_1),\dots, \widehat{f}(x_n)$ and $\widetilde{f^{-1}}(y_1),\dots, \widetilde{f^{-1}}(y_n)$ generate the $S$-modules $Q_f$ and $Q_{f^{-1}}$ respectively, and then $Q_f\otimes_R Q_{f^{-1}}$ is generated by $\{\widehat{f}(x_i)\otimes_R \widetilde{f^{-1}}(y_j)\mid 1\leq i,j\leq n\}.$ Hence it is enough to prove that $\widehat{f}(x_i)\otimes_R \widetilde{f^{-1}}(y_j)\in R\xi,$ for all $1\leq i,j\leq n$. Since $ff^{-1}(g)=1_g,$ for each $g\in G,$ we get by Proposition \ref{6} and Proposition \ref{free} that \begin{equation}\label{inv}Q_fQ_{f^{-1}}=Q_{ff^{-1}}=R,\end{equation} hence \begin{align*} \widehat{f}(x_i)\otimes_R \widetilde{f^{-1}}(y_j)&\stackrel{\eqref{equal1}}=\widehat{f}(x_i)\otimes_R \sum_{k=1}^m\widetilde{f^{-1}}(y_j)\widehat{f}(x_k)\widetilde{f^{-1}}(y_k)\\ &=\sum_{k=1}^m \widehat{f}(x_i)\widetilde{f^{-1}}(y_j)\widehat{f}(x_k)\otimes_R \widetilde{f^{-1}}(y_k)\\ &= \widehat{f}(x_i)\widetilde{f^{-1}}(y_j)\left(\sum_{k=1}^m\widehat{f}(x_k)\otimes_R \widetilde{f^{-1}}(y_k)\right)\\ &=\widehat{f}(x_i)\widetilde{f^{-1}}(y_j)\xi. \end{align*}
The module $R\xi$ is faithful, as the tensor product of two faithfully projective modules, and thus free of rank one, that is $R\xi\simeq R.$ \end{proof} \begin{rem}\label{r1} It follows from the equality \eqref{inv} that $Q_f \in {\rm Inv}_R(S),$ for all $f\in Z^1(G,\alpha, S).$ Then $[Q_f] \in {\textbf{ Pic}}(R),$ and thus $Q_{f^{-1}}$ is isomorphic with the $R$-module $Q_f^*=Hom_R(Q_f,R)$. \end{rem} Using the fact that $Q_f$ is faithfully projective and the method of localization we get that.
\begin{prop}\label{8} Let $M$ be an $R$-submodule of $S$, $f\in Z^1(G,\alpha,S).$ Then the $R$-homomorphism $\varphi:Q_f\otimes_R M\to Q_fM$, defined by $a\otimes x\mapsto ax$ ($a\in Q_f, \, x\in M$) is an isomorphism. In particular, the map $Z^1(G,\alpha,S)\ni f\mapsto [Q_f]\in {\bf Pic}(R)$ is a group homomorphism. \end{prop}
The following result is a consequence of Propositions \ref{8} and \ref{6}.
\begin{thm}\label{iso} Let $f,g$ be one-dimensional cocycles of $Z^1(G,\alpha,S)$. Then the $R$-homomorphism $\psi:Q_f\otimes_R Q_g\to Q_{fg}$ defined by $a\otimes b\mapsto ab$ for all $a\in Q_f,\, b\in Q_g$ is an isomorphism. \end{thm}
By Propostion \ref{free} and Theorem \ref{iso} we have a group homomorphism $$\lambda : H^1(G,\alpha,S) \ni {\rm cls}(f) \mapsto [Q_f] \in \textbf{Pic}(R).$$ We finish this section by giving a relation between $\lambda$ and the monomorphism $\varphi_1: H^1(G,\alpha,S) \to \textbf{Pic}(R)$ which is to the head of the seven-terms exact sequence related to partial Galois extension of commutative rings (see \cite{DPP, DPPR}). The map $\varphi_1$ sends ${\rm cls}(f)$ to the $R$-isomorphism class $[S_f^G],$ where $$S_f^G=\{x\in S\mid f(g)\alpha_g(a1_{g^{-1}})=a1_g, \forall g\in G \}.$$ Therefore $S_f^G= Q_{f^{-1}}$ thanks to Proposition \ref{cond}, and it follows that $\lambda({\rm cls}(f) )=\varphi_1({\rm cls}(f^{-1}))$ and we conclude that $\lambda$ is a monomorphism.
\section{Partial actions and Kummer Extensions}
In this section we present a partial Kummer theory, one of the main results in this section is that any partial $n$-kummerian ring extension is a sum of invertible modules., induced by one dimensional cocycles.
First, we recall the following.
\begin{defn}\label{kummer}Let $n\geq 2$ be a natural number.
A commutative ring $R$ is called $n$-kummerian if there exists an element $\omega\in \mathcal{U}(R),$ such that: \begin{itemize} \item [a)] $\omega^n=1.$ \item [b)] $1-\omega^i\in\mathcal{U}(R), $ for all $i\in \{1, \cdots, n-1\}.$ \end{itemize} \end{defn}
\begin{rem}\label{K} In $R$ we have that: $$\sum\limits_{i=0}^{n-1}\omega^i=0\hspace{2cm}\text{and}\hspace{2cm} \prod\limits_{i=1}^{n-1}(1-\omega^i)=n1_R.$$
\end{rem}
Let $R$ be a $n$-kummerian ring and $\omega$ as in Definition \ref{kummer}, any group homomorphism $\chi: G\to \langle \omega \rangle$ is called a character of the group $G$. Let $\hat G=Hom(G,\langle \omega \rangle)$ the set of all characters of $G$ in $\langle \omega \rangle$. We define a group structure on $\hat G$ as follows: For $\chi_1,\chi_2\in\hat G$, their product $\chi_1\chi_2$ is defined by $$(\chi_1\chi_2)(g)=\chi_1(g)\chi_2(g), g\in G.$$
With this product $\hat G$ is a group isomorphic to $G$.
\begin{defn}\label{parkum} The partial Galois extension $S\supseteq R$ of $G$ is called partial $n$-kummerian if $G$ is an abelian Galois group of order $n$ and $R$ is an $n$-kummerian ring. \end{defn}
For $\chi\in \hat G$ we set \begin{equation}\label{chip} \chi_p: G\ni g\mapsto \chi(g)1_g\in S. \end{equation}
\begin{rem}Let $\omega\in R$ be as in Definiton \ref{kummer} and $\Omega$ be the cyclic group generated by $\omega.$ Note that $ Im \,\chi_p\subseteq \bigcup_{g\in G}\Om1_g$ which is an inverse semigroup. \end{rem} \begin{rem}\label{kglob} Let $(T, \beta)$ be a globalization of $(S, \alpha)$ and $\psi$ be the ring isomorphism given in Remark \ref{isogal}. Then $\omega'=\psi(\omega)\in T^G$ satisfies a) and b) in Definition \ref{kummer}, in particular $T\supseteq T^G$ is a $n$-kummerian ring extension. Moreover for any $\chi\in \hat G,$ we have that $\psi_\chi:=\psi\circ \chi\in Hom(G, \langle \omega' \rangle)$ and any element of $ Hom(G, \langle \omega' \rangle)$ is of this form. \end{rem}
We have the following. \begin{lem}\label{summa} Let $S\supseteq R$ be a partial $n$-kummerian ring extension. Then \begin{enumerate} \item For any $g\in G, g\neq e,$ we have
\begin{equation}\label{equal0}\sum\limits_{\chi \in \hat G}\chi(g)=0, \end{equation} \item The set $\hat{G}_{\rm par}=\{\chi_p \mid \chi \in \hat G\}$ is a subgroup of $Z^1(G,\alpha,S).$ Moreover \begin{enumerate} \item The map $\mu_p=\hat G\ni \chi \mapsto \chi_p\in \hat{G}_{\rm par}$ is a group epimorphism with $\ker \mu_p=\{\chi\in\hat G\mid \chi(g)=1 \text{ if }\, 1_g\neq 0 \}$. In particular, if $1_g\neq 0$ for all $g\in G$ the groups $\hat G$ and $\hat{G}_{\rm par}$ are isomorphic. \item The map $\hat G_{\rm par}\ni \chi_p\mapsto Q_{\chi_p}\in{\rm Inv}_R(S)$, is a group homomorphism. \end{enumerate}
\item One has \begin{equation}\label{suma} S=\sum\limits_{\chi\in\hat G} Q_{\chi_p},\end{equation} and the sum in \eqref{suma} is direct if and only if $S\supseteq R$ is a (global) Galois extension.
\item For for any $\chi\in \hat G$ we have $Q_{\psi_\chi}1_S \subseteq Q_{\chi_p}.$
\end{enumerate} \end{lem} \proof 1) By Remark \ref{kglob} and the proof of \cite[Theorem 1]{B} Section 3, we get $\sum\limits_{\chi \in \hat G}\psi _\chi(g)=0, $ for any $g\in G, g\neq e,$ the fact that $\psi$ is a ring isomorphism implies the result.
\noindent 2) It is clear that $\hat{G}_{\rm par}$ is a group. Now take $g,h\in G$ and $\chi_p\in \hat{G}_{\rm par},$ then $$ \chi_p (gh)1_g=\chi(g)1_g\chi(h)1_{gh}=\chi(g)1_g\alpha_g (\chi(h)1_h1_{g^{-1}}) =\chi_p (g)\alpha_g(\chi_p (h)1_{g^{-1}}), $$ and $\chi_p\in Z^1(G,\alpha,S).$ Now for part a) is clear that the map is an epimorphism. Now since $R$ is $n$-kummerian we have \begin{align*} \chi\in \ker \mu_p &\Longleftrightarrow \chi(g)1_g=1_g, \forall g\in
G \\&\Longleftrightarrow 1_g(1-\chi(g))=0, \forall g\in
G \\&\Longleftrightarrow 1_g= 0 \vee \chi(g)=1, \forall g\in
G \end{align*} Finally, part b) is a consequence of Proposition \ref{6} and Remark \ref{r1}.
\noindent 3) Let $x\in S.$ Then
\begin{align*} \sum_{\chi_p}\widetilde{\chi}_p(x)&=\sum_{\chi_p,g}\chi^{-1}_p(g)\alpha_g( x1_{g^{-1}}) \\&=\sum_{\chi,g}\chi^{-1}(g)\alpha_g( x1_{g^{-1}})\\ &=\sum_g\left(\sum_{\chi}\chi^{-1}(g)\alpha_g(x1_{g^{-1}})\right)= \\ &= \sum_g\left(\alpha_g(x1_{g^{-1}})\sum_{\chi}\chi^{-1}(g)\right)\\ &\stackrel{\eqref{equal0}}= nx, \end{align*} and we get that $nx\in \sum\limits_{\chi_p} Im \widetilde{\chi}_p=\sum\limits_{\chi_p} Q_{\chi_p},$ where the last equality follows from Proposition \ref{equal}. Since $(n1_R)^{-1} \in R$ and each $Q_{\chi_p}$ is an $R$-module we get $x\in\sum\limits_{\chi_p}Q_{\chi_p}, $ and $S=\sum\limits_{\chi\in\hat G} Q_{\chi_p}.$
\noindent Now, if $S/R$ is a Galois extension, then $\chi_p=\chi,$ for all $\chi \in \hat G$ and the sum in \eqref{suma} is direct thanks to \cite[Section 3, Theorem 1]{B}. Conversely, by Remark \ref{r1} we have that ${\rm rk}(Q_{\chi_p})=1,$ for all $\chi \in \hat G.$ Then ${\rm rk} (S)={\rm rk}\left(\bigoplus\limits_{\chi_p\in \hat G} Q_{\chi_p}\right )=n,$ and the result follows from \cite[Corollary 4.6]{DFP}.
\noindent 4) For $b\in Q_{\psi_\chi} ,$ then \begin{align*} \alpha_g((b1_S) 1_{g^{-1}})&=\beta_g(b)\beta_g(1_S)1_g=\beta_g(b)1_g=\psi_\chi(g)b1_g =\chi(g)b1_g=\chi_p(g)(b1_S), \end{align*} and we get $b1_S\in Q_{\chi_p}$ thanks to Proposition \ref{cond}.\endproof
\begin{rem} It follows by equation \eqref{suma} and Proposition \ref{6} that $S$ is a strong $\hat G$-system (see \cite[Definition16]{NY}). Moreover, by 4) of Lemma \ref{summa} follows that the map $T=\bigoplus\limits_{\chi \in \hat G}Q_{\psi_\chi} \ni t\mapsto t1_S\in \sum\limits_{\chi\in\hat G} Q_{\chi_p}=S$ is a ring epimorphism that preserves the homogeneous components. \end{rem}
\subsection{On Borevich's radical extensions}\label{Brad}
Here we recall the notion of Borevich's radical extensions .
Let $R$ be a commutative ring. For a non-zero $R$-module $Q$ and a natural number $i$ we denote $Q^{{\otimes} ^i}=\underbrace{Q\otimes \cdots \otimes Q}_{i-times},$ where $Q^{{\otimes} ^0}=R.$ Suppose that there is $m\in \mathbb{N}$ and an $R$-module homomorphism $\varphi\colon Q^{{\otimes} ^m}\to R.$ Let $S_{Q, \varphi}=\bigoplus\limits_{i=0}^{m-1}Q^{{\otimes} ^i}.$ We recall the construction of a product in $S_{Q, \varphi}$. (This process is the so-called factorization by $\varphi$ (see \cite[Section 5]{B} for details). \\
Consider the tensor $R$-algebra $R[Q]=\bigoplus\limits_{i=0}^\infty Q^{{\otimes} ^i},$ and define recursively the $R$-module homomorphism $\tilde \varphi: R[Q]\to S_{Q, \varphi}$ as follows: \\ For $x\in Q^{{\otimes} ^i}\subseteq R[Q]$ we set $ \tilde\varphi(x)=x,$ if $0\leq i\leq m-1.$ Now if $x=a_1\otimes \cdots \otimes a_i,$ with $ i\geq m$ \text { and } $\tilde\varphi_{\mid Q^{{\otimes} ^k}}$ is defined \text { for } $k<i,$ we set \begin{align*}\tilde \varphi(a_1\otimes \cdots \otimes a_i)&=\tilde\varphi( \varphi (a_1\otimes \cdots \otimes a_{m})\otimes a_{m+1}\otimes \cdots \otimes a_i) \\&=\tilde\varphi( \varphi (a_1\otimes \cdots \otimes a_{m}) a_{m+1}\otimes \cdots \otimes a_i). \end{align*} For elements $x,y\in S_{Q, \varphi}$ one defines $$x\bullet y=\tilde\varphi(x\otimes y).$$ with this product $S_{Q, \varphi}$ is a commutative $R$-algebra containing $R$ as a unital subring. Notice that, $S_{Q, \varphi}$ is graded by the cyclic group $C_m,$ and is strongly graded in the case that $\varphi$ is an isomorphism.
The $R$-algebra $S_{Q, \varphi}$ constructed above is called a \textit{radical extension of $R.$}
\begin{defn}\label{iradd} Let $m\in \mathbb{N}$ and $I\subseteq \{0, \cdots, m-1\}.$ The $I$-radical extension of $R$ is the $R$-submodule of $S_{Q, \varphi}$ given by $S_{Q, \varphi, I}=\bigoplus\limits_{i\in I}Q^{{\otimes} ^i}.$ Moreover we say that $I$ is $m$-saturated if it is closed under the addition in $C_m.$ That is, $I$ is m-saturated, if and only if, I viewed as a subset of $C_m$ is a subgroup. \end{defn}
Now we give a criteria to determine when $S_{Q, \varphi, I}$ is an $R$-algebra.
\begin{prop}\label{iradd} Let $I\subseteq \{0, \cdots, m-1\}$ and suppose that the map $\varphi$ as above is an isomorphism. Then the following statements hold: \begin{enumerate} \item $S_{Q, \varphi, I}$ is a commutative $R$-subalgebra of $S_{Q, \varphi},$ if and only if, I is m-saturated. In this case $S_{Q, \varphi, I}$ is a I-graded $R$-algebra and ${\rm rk}_R(S_{Q, \varphi, I})$ divides $m.$ \item Let $I\subseteq \{0,\cdots, m-1 \}$ be $m$-saturated and consider the $I$-radical extension $S_{Q, \varphi, I},$ then there exists a f.g.p $R$-module $Q'$ with ${\rm rk}(Q')=1,$ and a $R$-algebra isomorphism $S_{Q, \varphi, I}\simeq S_{Q', \varphi'}.$ \end{enumerate} \end{prop} \proof 1) Part ($\Leftarrow$) is clear. Now to prove ($\Rightarrow$), suppose that $S_{Q, \varphi, I}$ is a commutative $R$-subalgebra of $S_{Q, \varphi}.$ Let $i,j\in I,$ to show that $i+_m j\in I$ it is enough to show that $Q^{{\otimes} ^{i+_mj}}\subseteq S_{Q, \varphi, I},$ where $+_m$ denotes the addition in $C_m.$ Since $\varphi$ is an isomorphism then $S_{Q, \varphi}$ is a strongly graded $R$-algebra, and thus $Q^{{\otimes} ^{i+_mj}}=Q^{{\otimes} ^i}\bullet Q^{{\otimes} ^j}\subseteq S_{Q, \varphi, I} ,$ as desired.
\endproof
2) Take $n\in \{0,\cdots, m-1 \} $ such that $I=\langle n \rangle$ and write $Q'=Q^{\otimes ^n},$ then ${\rm rk}_R(Q')=1.$ Now consider the $R$-module homomorphism $\varphi: Q^{\otimes ^m}\to R,$ then there is a $R$-module homomorphism $\varphi': Q'^{\otimes ^{m'}}\to R$ where $m'=\frac {m}{{\rm gcd}\{m,n\}}$ is the cardinality of $I.$ Then the isomorphism $S_{Q, \varphi, I}\simeq S_{Q', \varphi'}.$ is given by the identity. \endproof
Consider the $I$-radical extension $S_{Q, \varphi, I}$ of $R$ and define an action on $S_{Q, \varphi, I}$ as follows. \begin{equation*}\label{gactionI}\mu_g(x)=\chi^i(g)x,\text{ for all }g\in G,\, x\in Q^{{\otimes} ^i},\,\, i\in I.\end{equation*}
Then by Proposition \ref{iradd} we get. \begin{cor} Let $I\subseteq \{0,\cdots, m-1 \}$ be $m$-saturated. Then there is a radical extension $S_{Q', \varphi'}$ of $R$ such that $S_{Q, \varphi, I}$ and $S_{Q', \varphi'}$ are $G$-isomorphic. In particular, the ring of invariants of $S_{Q, \varphi, I}$ is $R.$ \end{cor}
\subsection{On Partial Cyclic Kummer Extensions}
It is observed in \cite[Section 5.2]{BCMP} that the study of partial Galois extensions of finite abelian groups can be reduced to the cyclic case, then we assume from now on that $G=\langle g \rangle$ is a cyclic group of order $n.$ Moreover we also assume that
$R$ is an $n$-kummerian ring for some $n\geq 2$ and $\omega\in R$ verifies the hypotheses of Definition \ref{kummer}. Further we fix $\chi\in \hat G$ with $\chi(g)=\omega$ a generating element.
Since $\chi$ has order $n$, Proposition \ref{8} implies that $[Q_{\chi}]\in {\bf Pic}_n(R),$ where ${\bf Pic}_n(R)$ denotes the subgroup of elements of ${\bf Pic}(R)$ whose order divides $n.$ Take a $R$-module isomorphism $\varphi: {Q^{{\otimes}^n}_{\chi}}\to R.$ Then, according to Section \ref{Brad}, we construct the radical extension $S_{Q, \varphi, \chi}.$
By \cite[Theorem 1]{B}, Section 8 we have that $S_{Q, \varphi, \chi}\supseteq R$ is a (global) cyclic Kummer extension of $R$ with Galois group $G,$ where the action is defined by \begin{equation*}\label{gaction}\mu_g(x)=\chi^i(g)x,\text{ for all }g\in G,\, x\in Q^{{\otimes} ^i},\,\, 0\leq i\leq n-1.\end{equation*} Moreover, by \cite[Theorem 2, Section 8]{B} every cyclic Kummer extension of $R$ with Galois group $G,$ is $G$-isomorphic to a radical extension of $R.$ Thus, it is natural to ask which partial kummer extensions are equivalent to either a radical or a $I$-radical extension of $R.$ In view of \eqref{suma} a necessary condition for this is that $S=\displaystyle\bigoplus\limits_{i\in X} Q_{\chi^i_p},$ for some $X\subseteq \{0,\cdots, n-1 \}.$
\begin{prop}\label{isaX} Let $X\subseteq \{0,\cdots, n-1 \}$ such that $S=\displaystyle\bigoplus\limits_{i\in X} Q_{\chi^i_p},$ then $S$ is an $R$-epimorphic image of the extension $S_{Q_{\chi_p}, \varphi}.$ In particular, there is a $R$-module isomorphism between $S$ and $S_{Q_{\chi_p}, \varphi, X}=\displaystyle\bigoplus\limits_{i\in X}Q_{\chi_p}^{{\otimes} ^i} .$
\end{prop} \proof Consider the radical extension $S_{Q_{\chi_p}, \varphi}=\displaystyle\bigoplus\limits_{i=0}^{n-1}Q_{\chi_p}^{{\otimes} ^i}.$
By Theorem \ref{iso} the map $\lambda^i\colon Q_{\chi_p}^{{\otimes} ^i}\to Q_{\chi^i_p}$ given by $$a_1\otimes \cdots \otimes a_i \to a_1\cdots a_i , \,\,\text{for all}\,\, i\in \{0,\cdots, n-1 \}$$
is a well defined $R$-module isomorphism. Now let $\lambda\colon \displaystyle\bigoplus\limits_{i=0}^{n-1}Q_{\chi_p}^{{\otimes} ^i} \to S $ be defined by $\lambda=\displaystyle\sum_{i=0}^{n-1}\tilde\lambda^i,$ where $\tilde\lambda^i=\lambda^i$ if $i\in X$ and zero otherwise, then $\lambda$ is a $R$-module epimorphism. The fact that $S=\displaystyle\bigoplus\limits_{i\in X} Q_{\chi^i_p}$ is a direct sum implies that the kernel of $\lambda$ is $\displaystyle\bigoplus\limits_{i\notin X}Q_{\chi_p}^{{\otimes} ^i}$ and thus $S$ and $S_{Q_{\chi_p}, \varphi, X}$ are isomorphic as $R$-modules, and the isomorphism is given by $\lambda_X=\displaystyle\sum_{i\in X}\tilde\lambda^i.$ \endproof Now we are interested in knowing if $S$ and $S_{Q_{\chi_p}, \varphi, X}=\displaystyle\bigoplus\limits_{i\in X}Q_{\chi_p}^{{\otimes} ^i} $ are isomorphic as $R$-algebras, but for this question to make sense we have to require, according to Proposition \ref{iradd}, that $X$ has to be a $n$-saturated set.
We have the following.
\begin{prop}\label{isat} Suppose that $S=\displaystyle\bigoplus\limits_{i\in I} Q_{\chi^i_p},$ where $I$ is a $n$-saturated set, then $\lambda_I$ gives a $I$-graded $R$-algebra isomorphism between $S$ and $S_{Q_{\chi_p}, \varphi, I}$ . Conversely, if $\lambda_I$ is a $R$-algebra isomorphism, then $S=\displaystyle\bigoplus\limits_{i\in I} Q_{\chi^i_p}.$ \end{prop} \proof
We shall check that the map $\lambda_I^{-1}$ preserves products, where $\lambda_I$ is as in the proof of Proposition \ref{isaX}. Let $x=a_1\cdots a_i\in Q_{\chi^i_p}$ and $y=a_{i+1}\cdots a_{i+j}\in Q_{\chi^j_p},$ with $i,j\in I$ and $a_l\in Q_{\chi_p},$ for all $l\in\{1,\cdots, i+j\}.$ We consider two cases
{\bf Case 1} $i+j < n.$ In this case $\lambda_I^{-1}(xy)=a_1\otimes \cdots \otimes a_i\otimes a_{i+1}\cdots a_{i+j}=\lambda^{-1}(x)\bullet \lambda^{-1}(y)$
{\bf Case 2} $i+j \geq n.$ Here \begin{align*}\lambda_I^{-1}(xy)&=\lambda_I^{-1}(\underbrace{a_1\cdots a_i\cdots a_n}_{\in R} a_{n+1}\cdots a_{i+j})\\ &=a_1\cdots a_i\cdots a_n\lambda_I^{-1}(a_{n+1}\cdots a_{i+j})\\ &=a_1\cdots a_i\cdots a_n a_{n+1}\otimes \cdots \otimes a_{i+j}\\ &=(a_1\otimes \cdots \otimes a_i)\bullet (a_{i+1}\otimes \cdots \otimes a_{i+j})\\ &=\lambda_I^{-1}(x)\bullet \lambda_I^{-1}(y), \end{align*} as desired. The converse is clear. \endproof
Let $H$ be a subgroup of $G$ then then $H$ acts partially on $S$ with partial action $\alpha_H=\{\alpha_h: S_{h^{-1}}\to S_h\}_{h\in H}$ and $S\supseteq S^{\alpha_H}$ is a partial Galois extension.
Notice that in general $R \subseteq S^{\alpha_H}$ for any subgroup $H$ of $G.$ The following fact shows that the equality holds exactly when $\alpha$ is an extension by zero of $\alpha_H.$ (see Example \ref{ext0}). \begin{prop}\label{casiglob} Let $S\supseteq R$ be a partial Galois extension and $H$ a subgroup of $G$ acting globally on $S$ with action $\beta.$ Then $R=S^H,$ if and only if, $\alpha$ is extension by zero of $\beta.$ \end{prop} \begin{proof} It is clear that if $\alpha$ is extension by zero of $\beta,$ then $R=S^H.$ Conversely suppose that $R=S^H,$ then by \cite[iv) Theorem 4.1]{DFP} there are $S$-module isomorphisms $\prod_{g\in G}S_g\simeq S\otimes S\simeq \prod_{h\in H}S_h$. We shall show that $S_g={0}$ for all $g\in G\setminus H.$ For this, let $\mathfrak{p}$ be a prime ideal of $S,$ then $$\sum_{h \in H}{\rm rk}_{R_\mathfrak{p}}((S_h)_\mathfrak{p})+\sum_{g \in G\setminus H}{\rm rk}_{R_\mathfrak{p}}((S_g)_{\mathfrak{p}})=\sum_{h \in H}{\rm rk}_{R_\mathfrak{p}}((S_h)_\mathfrak{p}).$$ Thus, for $g \in G\setminus H$ we have that $(S_g)_{\mathfrak{p}}=0$ which implies $S_g=0,$ as desired. \end{proof}
Now we give the main result of this work. \begin{thm}\label{partoglob} Let $S\supseteq R$ be a partial n-kummerian extension, then there is a $n$-saturated set $I$ of $\{1,\cdots, n\}$ such that $S=\displaystyle\bigoplus\limits_{i\in I} Q_{\chi^i_p},$ if and only if, there is a subgroup $H$ of $G$ of order m such that $S\supseteq R$ is a global m-kummerian extension with Galois group $H$ and global action $\alpha_H.$ In this case $\alpha$ is the extension by zero of $\alpha_H.$ \end{thm} \begin{proof} Suppose that $S=\displaystyle\bigoplus\limits_{i\in I} Q_{\chi^i_p},$ where $I$ is a $n$-saturated set. Let $i_0\in\{1,\cdots, n-1\} $ such that $I=\langle i_0\rangle$ and write $H=\langle g^{i_0}\rangle,$ then $S=\displaystyle\bigoplus\limits_{i=0}^m Q_{\tilde\chi^i_p},$ where $m$ is the order of $H$ and $\tilde\chi=\chi^{i_0}.$ then $S\supseteq S^{\alpha_H}$ is a partial Galois extension and by Proposition \ref{cond} we have \begin{equation}\label{equall}R=Q_{\chi^0_p}=Q_{\tilde\chi^0_p}=S^{\alpha_H}.\end{equation} Finally, since ${\rm rk}_RS={\rm rk}_R\left(\displaystyle\bigoplus\limits_{i=0}^m Q_{\tilde\chi^i_p}\right)=m=\mid H\mid$ we get that $H$ acts globally on $S,$ thanks to \cite[Corollary 4.6]{DFP}.
Conversely, suppose that there exists a subgroup $H$ of $G$ such that $S\supseteq R$ is a global $m$-kummerian extension with Galois group $H.$ Write $H=\langle g^{i_0}\rangle,$ then by 3) of Lemma \ref{summa} we have that $S=\displaystyle\bigoplus\limits_{i=0}^m Q_{\tilde\chi^i_p},$ where $\tilde\chi=\chi^{i_0}.$ Finally taking $I=\langle i_0\rangle$ a $n$-saturated set of $\{1,\cdots, n\}$ we obtain $S=\displaystyle\bigoplus\limits_{i\in I} Q_{\chi^i_p}.$ The final assertions follows from Proposition \ref{casiglob} and \eqref{equall}. \end{proof}
\begin{rem}\label{para} It follows from Proposition \ref{isat} and Theorem \ref{partoglob} that the study of partial Kummer extensions which are parametrized by $I$-radical extensions can be reduced to the global case. \end{rem}
\subsection{Some final examples and remarks}
As observed in Remark \ref{para} there are partial kummerian extension thar are not equivalent to radical extensions. We give two examples of them.
\begin{exe}\label{e1}\cite[Example 6.1]{DFP} Let $G=\langle \sigma \mid \sigma^4=1 \rangle ,$ and put $S=\mathbb{C}e_1\oplus \mathbb{C}e_2\oplus \mathbb{C}e_3\oplus \mathbb{C}e_4,$ where $e_1,e_2,e_3$ and $e_4$ are orthogonal idempotents with sum $1_S.$ Then there there is a partial action $\alpha$ of $G$ on $S$ by setting. $$S_e=S,\,\,\,\, S_g= \mathbb{C}e_1\oplus \mathbb{C}e_2,\,\,\, S_{g^2}= \mathbb{C}e_1\oplus \mathbb{C}e_3\,\,\,\, \text{and}\,\,\,\,S_{g^3}= \mathbb{C}e_2\oplus \mathbb{C}e_3$$ and defining $\alpha_1={\rm id}_S,$ and $$\alpha_g(e_2)=e_1,\,\,\,\alpha_g(e_3)=e_2, \,\,\,\alpha_{g^2}(e_1)=e_3,\,\,\,\alpha_{g^2}(e_3)=e_1\,\,\,\text{and}\,\,\, \alpha_{g^3}=\alpha^{-1}_g.$$ Then $\mathbb{C} \simeq \{(z,z,z)\mid z\in \mathbb{C}\}$ is 4-kummerian with $w=i$ and $S/\mathbb{C}$ is a partial $4$-kummerian extension. Moreover $\hat G={\rm hom}(G, \mathbb{C})$ is generated by $\chi,$ where $\chi(\sigma)=i.$ Then by Proposition \ref{cond} we have that
\begin{itemize} \item $Q_{e_g}= \{(r,r,r)\mid r\in R\}=\langle e_1+ e_2+ e_3\rangle;$ \item $Q_{\chi_p}=\{(r, ir,-r)\mid r\in R\}=\langle e_1+ ie_2 -e_3\rangle;$ \item$Q_{\chi_p^2}=Q_{\chi_p}Q_{\chi_p}=\{(r, -r, r)\mid r\in R\}=\langle e_1-e_2 +e_3\rangle;$ \item $Q_{\chi_p^3}=\{(r, -ir,r)\mid r\in R\}=\langle e_1-ie_2 +e_3\rangle,$
\end{itemize} and we have that \begin{align*} S&=\sum_{\chi \in \hat G}Q_{\chi_p}=Q_{e_g}\oplus Q_{\chi_p}\oplus Q_{\chi_p^2}=Q_{e_g}\oplus Q_{\chi_p}\oplus Q_{\chi_p^3}=Q_{\chi_p}\oplus Q_{\chi_p^2}\oplus Q_{\chi_p^3} \end{align*} Moreover the sum $Q_{e_g}+ Q_{\chi_p^2}+ Q_{\chi_p^3}$ is not direct.
\end{exe} \begin{exe}\label{e2}\cite[Example 6.2]{DFP} Let $G=\langle \sigma \mid \sigma^ 5=1 \rangle .$ Then there is a partial action of $G$ on $S=\mathbb{C}e_1\oplus \mathbb{C}e_2\oplus \mathbb{C}e_3\oplus \mathbb{C}e_4\oplus \mathbb{C}e_5,$ where $e_1,e_2,e_3, e_4$ and $e_5$ are orthogonal idempotents with sum $1_S,$ such that $S/R$ is a partial Galois extension, where $R=\{(r,r,s,s)\mid r,s\in \mathbb{C}\}$ is a 5-kummerian ring with $w$ a fifth primitive root of the unity. Then by Proposition \ref{cond} we have
\begin{itemize} \item $Q_{e_g}= R=\langle e_1+ e_2, e_3 + e_4\rangle;$ \item $Q_{\chi_p}=\{(r, wr,s, ws)\mid r\in \mathbb{C}\}=\langle e_1+ we_2, e_3 + we_4\rangle;$ \item$Q_{\chi_p^2}=\{(r, w^2r,s, w^2s)\mid r,s\in \mathbb{C}\}=\langle e_1+ w^2e_2, e_3 + w^2e_4\rangle;$ \item $Q_{\chi_p^3}=\{(r, w^3r,s, w^3s)\mid r,s\in \mathbb{C}\}=\langle e_1+ w^3e_2, e_3 + w^3e_4\rangle; $ \item $Q_{\chi_p^4}=\{(r, w^4r,s, w^4s)\mid r,s\in \mathbb{C}\}=\langle e_1+ w^4e_2, e_3 + w^4e_4\rangle; $ \end{itemize} and we have that $S=\sum_{\chi \in \hat G}Q_{\chi_p}=Q_{\chi^i_p}\oplus Q_{\chi^j_p},$ for all $1\leq i,j\leq 5, i\neq j.$
\end{exe}
Inspired by Example \ref{e1} and Example \ref{e2} we finish this work with the following. \\
\noindent {\bf Question:} Let $S\supseteq R$ be a partial $n$-kummerian extension with Galois cyclic group $G.$ Write $S=\sum\limits_{i=0}^{n-1} Q_{\chi^i_p},$ where $\hat G=\langle \chi \rangle,$ then is there a subset $X$ of $\{0,1,\cdots, n-1\}$ such that $S=\bigoplus\limits_{i\in X} Q_{\chi^i_p}$?.
Notice that the answer of the question above is affirmative for all partial kummerian extensions that can be parametrized by radical extensions, but as observed in Example \ref{e1} and Example \ref{e2} there are others partial kummerian extensions for which the answer is also affirmative. In particular, these partial Galois extensions $S\supseteq R$ are such that ${\rm rk}_R(S)$ is well defined.
\end{document} |
\begin{document}
\title[Compositions of fold maps having simple structures with projections]{On simple classes of special generic maps and round fold maps and fold maps obtained by composing projections} \author{Naoki Kitazawa} \keywords{Singularities of differentiable maps; special generic maps and round fold maps. \\ \indent {\it \textup{2020} Mathematics Subject Classification}: Primary~57R45. Secondary~57R19.} \address{Institute of Mathematics for Industry, Kyushu University, 744 Motooka, Nishi-ku Fukuoka 819-0395, Japan\\
TEL (Office): +81-92-802-4402 \\
FAX (Office): +81-92-802-4405 \\ } \email{n-kitazawa@imi.kyushu-u.ac.jp} \urladdr{https://naokikitazawa.github.io/NaokiKitazawa.html} \maketitle \begin{abstract}
{\it Fold} maps are fundamental tools in the theory of singularities of differentiable maps and its applications to geometry. They are higher dimensional variants of Morse functions. Classes of {\it special generic} maps and {\it round} fold maps are important classes of fold maps. {\it Special generic} maps are higher dimensional variants of Morse functions on homotopy spheres with exactly two {\it singular points}: canonical projections of unit spheres are special generic. {\it Round} fold maps are Morse functions obtained as doubles of Morse functions, or fold maps such that the set of all the singular points are embeddings and that the images are concentric. In the present paper, we discuss compositions of these maps with canonical projections. For example, we observe that these compositions for special generic maps of simple classes are regarded as round fold maps in considerable cases. We also present round fold maps we cannot represent in this way, seeming to be represented so. Note that such compositions are natural operations in related theory of differentiable maps.
\end{abstract}
\maketitle \section{Introduction.} \label{sec:1} \subsection{Fundamental notions and notation on differentiable maps and differential topology of manifolds and fold maps.} {\it Fold} maps are fundamental tools in the theory of singularities of differentiable maps and its applications to geometry. A {\it singular} point $p \in X$ of a differentiable map $c:X \rightarrow Y$ is a point at which the rank of the differential ${dc}_p$ is smaller than both the dimensions $\dim X$ and $\dim Y$. $S(c)$ denotes the set of all the singular points of $c$ (the {\it singular set} of $c$). $c(S(c))$ is the {\it singular value set} of $c$. $Y-c(S(c))$ is the {\it regular value set} of $c$. A point in $Y$ is a {\it singular {\rm (}regular{\rm )} value} if it is a point in the singular (resp. regular) value set of the map. \begin{Def} \label{def:1} A {\it fold} map is a smooth map at each singular point of which we can represent as $(x_1, \cdots, x_m) \mapsto (x_1,\cdots,x_{n-1},{\sum}_{j=n}^{m-i(p)}{x_j}^2-{\sum}_{j=m-i(p)+1}^m {x_j}^2$ for suitable coordinates and an integer $0 \leq i(p) \leq \frac{m-n+1}{2}$. \end{Def} We call a singular point of a smooth map represented as in Definition \ref{def:1} for suitable coordinates near the singular point and the value at the point a {\it fold} point for the map. \begin{Prop} In Definition \ref{def:1} and for a fold point $p$ for a general smooth map, we can determine $i(p)$ uniquely for each singular point $p$. The restriction to the set consisting of all the singular points of a fixed index is an immersion of an {\rm (}$n-1${\rm )}-dimensional closed and smooth submanifold with no boundary in the $m$-dimensional manifold of the domain. \end{Prop}
We call $i(p)$ here the {\it index} of the singular point $p$. Note that a fold map is a Morse function if and only if $n=1$.
{\it Special generic} maps and {\it round} fold maps are important classes of fold maps. \begin{Def} \label{def:1} A {\it special generic} map is a fold map such that $i(p)=0$ for any singular point $p$. \end{Def} As simplest examples, Morse functions with exactly two singular points on homotopy spheres in the so-called Reeb's theorem, canonical projections of unit spheres, and so on, are special generic.
Hereafter, (boundary) connected sums of manifolds, are discussed in the smooth category.
${\mathbb{R}}^k$ denotes the $k$-dimensional Euclidean space, endowed with the Euclidean metric, and for $x \in {\mathbb{R}}^k$, $||x|| \geq 0$ denotes the distance between $x$ and the origin $0 \in {\mathbb{R}}^k$ or the norm where $x$ is seen as a vector. $D^k:=\{x \in {\mathbb{R}}^{k} \mid ||x|| \leq 1.\}$ denotes the $k$-dimensional unit disc for $k>0$ and $S^k:=\{x \in {\mathbb{R}}^{k+1} \mid ||x||=1.\}$ denotes the $k$-dimensional unit sphere. \begin{Ex} Let $m\geq n$ be positive integers. An $m$-dimensional closed manifold $M$ represented as a connected sum of $l>0$ manifolds $S^{l_j} \times S^{m-l_j}$ ($1 \leq l_j \leq n-1$) admits a special generic map $f:M \rightarrow {\mathbb{R}}^n$ such that $f {\mid}_{S(f)}$ is an embedding and that $f(M)$ is represented as a boundary connected sum of the $l$ manifolds $S^{l_j} \times D^{n-l_j}$ where $1 \leq j \leq l$ is an integer. \end{Ex}
Diffeomorphisms on a smooth manifold are assumed to be smooth. We define the {\it diffeomorphism group} of the manifold as the group of all the diffeomorphisms. The structure groups of bundles whose fibers are (smooth) manifolds are subgroups of the diffeomorphism groups unless otherwise stated. In other words the bundles are {\it smooth}. The class of {\it linear} bundles is a subclass of the class of smooth bundles. A bundle is {\it linear} if the fiber is regarded as a unit sphere or a unit disc in a Euclidean space and the structure group acts linearly in a canonical way.
\begin{Prop}[\cite{saeki}, \cite{saeki2}, and so on.] \label{prop:2} Let $m>n \geq 1$ be integers. An $m$-dimensional closed, connected and smooth manifold $M$ admits a special generic map into ${\mathbb{R}}^n$ if and only if the following three hold. \begin{enumerate} \item There exists a smooth surjection $q_f:M \rightarrow W_f$ onto an $n$-dimensional compact and smooth manifold $W_f$ and an immersion $\bar{f}:W_f \rightarrow {\mathbb{R}}^n$. \item There exists a small collar neighborhood $C(\partial W_f)$ such that the composition of $q_f {\mid}_{{q_f}^{-1}(C(\partial W_f))}$ with the canonical projection to $\partial W_f$ gives a trivial linear bundle whose fiber is diffeomorphic to $D^{m-n+1}$. \item $q_f {\mid}_{{q_f}^{-1}(W_f-{\rm Int}(C(\partial W_f)))}$ gives a smooth bundle whose fiber is diffeomorphic to $S^{m-n}$. \end{enumerate} Furthermore, we can take a special generic map $f$ as $\bar{f} \circ q_f$ satisfying $q_f(S(f))=\partial W_f$ if $M$ admits a special generic map into ${\mathbb{R}}^n$. \end{Prop}
In the present paper, we mainly consider a simple subclass of special generic maps.
We review {\it round} fold maps. \begin{Def}[\cite{kitazawa0.1}, \cite{kitazawa0.2}, \cite{kitazawa0.3}, \cite{kitazawa0.10}, and so on,] \label{def:3} A fold map $f$ on an $m$-dimensional closed, connected and smooth manifold $M$ into ${\mathbb{R}}^n$ is said to be {\it round} if either of the following hold. \begin{enumerate} \item $n=1$. $f {\mid}_{S(f)}$ is an embedding. There exist a regular value $a \in {\mathbb{R}}^n$ and
a pair $(\Phi:f^{-1}((-\infty,a]) \rightarrow f^{-1}([a,\infty)),{\phi}_{\Phi}: (-\infty,a] \rightarrow [a,\infty))$ such that $f {\mid}_{f^{-1}([a,\infty))} \circ \Phi={\phi}_{\Phi} \circ f {\mid}_{f^{-1}((-\infty,a])}$.
\item $n=2$. $f {\mid}_{S(f)}$ is an embedding. There exist a diffeomorphism $\phi$ on ${\mathbb{R}}^n$ and an integer $l>0$ such that $\phi(f(S(f)))=\{||x||=r \mid r \in \mathbb{N}, 1 \leq r \leq l\}$. \end{enumerate} \end{Def} \begin{Def}[In \cite{kitazawa0.4} these notions are defined first essentially and we change the names of these notions] \label{def:4} Let $f:M \rightarrow {\mathbb{R}}^n$ be a round fold map and $\phi$ be the diffeomorphism in Definition \ref{def:3}. \begin{enumerate} \item \label{def:4.1} $f$ is said to {\it have a globally trivial monodromy} if either of the following holds. \begin{enumerate} \item $n=1$.
\item $n \geq 2$ and for a diffeomorphism $\phi$ on ${\mathbb{R}}^n$ and an integer $l>0$ such that $\phi(f(S(f)))=\{||x||=r \mid r \in \mathbb{N}, 1 \leq r \leq l\}$, the composition of the restriction of $\phi \circ f$ to ${(\phi \circ f)}^{-1}(\{||x||=r \mid \frac{1}{2} \leq r\})$ with a canonical map mapping $x$ to $\frac{1}{||x||} x$ gives a trivial bundle over the unit sphere. \end{enumerate} \item \label{def:4.2} $f$ is said to {\it have a componentwisely trivial monodromy} if either of the following holds. \begin{enumerate} \item $n=1$.
\item $n \geq 2$, and for a diffeomorphism $\phi$ on ${\mathbb{R}}^n$ and an integer $l>0$ such that $\phi(f(S(f)))=\{||x||=r \mid r \in \mathbb{N}, 1 \leq r \leq l\}$, the composition of the restriction of $\phi \circ f$ to ${(\phi \circ f)}^{-1}(\{||x||=r \mid k-\frac{1}{2} \leq r \leq k+\frac{1}{2} \})$ with a canonical map mapping $x$ to $\frac{1}{||x||} x$ gives a trivial bundle over the unit sphere for each integer $1 \leq k \leq l$. \end{enumerate} \end{enumerate} \end{Def} As a simplest example, canonical projections of unit spheres are round and have globally trivial monodromies and componentwisely trivial monodromies. \subsection{Main Theorems.} The following three are part of main theorems of the present paper. They are all on compositions of special generic maps or round fold maps satisfying good properties with suitable projections. Composing smooth maps with projections are natural and in various scenes strong methods in the singularity theory and geometric theory of differentiable maps. \cite{mather} is one of pioneering papers on sophisticated methods of the singularity theory of differentiable maps and concentrates on generic properties on singular points or singularities of smooth maps obtained by composing given smooth maps with canonical projections. This is to some extent related to the present study where we do not need to understand the theory so much here. \cite{fukuda} considers compositions of smooth maps of a good class or the class of so-called {\it Morin} maps with canonical projections to the line or the $1$-dimensional Euclidean space to relate topological information of singular sets with the manifolds of the domains via classical and fundamental theory of Morse functions. The class of Morin maps contains the class of fold maps as a simple class. We do not concentrate on this here. \cite{golubitskyguillemin} explains fundamental and sophisticated theory on the singularity theory and geometric theory of differentiable maps systematically including fundamental theory of Morse functions, fold maps and Morin maps, well-known Mather's sophisticated theory including theory related to \cite{mather}, and so on.
${\pi}_{m,n}:{\mathbb{R}}^m \rightarrow {\mathbb{R}}^n$ denotes the canonical projection to the first $n$ components where $m>n \geq 1$. \begin{MainThm} \label{mainthm:1} Let $m>n \geq 2$ and $l>0$ be integers. Let $M$ be an $m$-dimensional closed manifold and $f: M \rightarrow {\mathbb{R}}^n$ be a round fold map satisfying the following three properties. \begin{enumerate} \item The index of each singular point is $0$ or $1$. The number of connected components of the singular set is $l+1$. \item Preimages of regular values of $f$ are disjoint unions of copies of $S^{m-n}$ and the numbers of the connected components of preimages of regular values in the connected component containing $0$ are $l+1$ where $\phi$ denotes the diffeomorphism in {\rm Definition \ref{def:3}}. \end{enumerate} Then by composing $\phi \circ f$ with the canonical projection ${\pi}_{n,n^{\prime}}:{\mathbb{R}}^n \rightarrow {\mathbb{R}}^{n^{\prime}}$, we have a new round fold map satisfying the following two properties. \begin{enumerate} \item The index of each singular point is $0$ or $\min\{n-n^{\prime}+1,\frac{m-n^{\prime}+1}{2},(m-n^{\prime}+1)-(n-n^{\prime}+1)\}$. The number of singular points is $2(l+1)$ for $n^{\prime}=1$ and that of connected components of the singular set is $l+1$ for $n^{\prime} \geq 2$. \item Preimages of regular values are diffeomorphic to $S^{m-n^{\prime}}$ or represented as connected sums of copies of $S^{n-n^{\prime}} \times S^{m-n}$ and the numbers of the copies of $S^{n-n^{\prime}} \times S^{m-n}$ of preimages of regular values in the connected component containing $0$ are $l+1$. \item The resulting round fold map has a globally trivial monodromy. \end{enumerate} \end{MainThm} In the following two Main Theorems, we need the notion of a fold map {\it represented as a connected sum of fold maps} and a {\it summand} for the family of the fold maps, for example. We define these notions in Definition \ref{def:5}. \begin{MainThm} \label{mainthm:2} In Proposition \ref{prop:2}, let $M$ admit a special generic map $f:M \rightarrow {\mathbb{R}}^n$ such that $n \geq 2$, and represented as a connected sum of a family of finitely many fold maps indexed by $j \in J$ satisfying the following three. \begin{enumerate} \item The images of the special generic maps regarded as summands for the family of the maps are smoothly immersed manifolds diffeomorphic to $F_j \times [-1,1]$ where $F_j$ is an {\rm (}$n-1${\rm )}-dimensional closed and connected manifold we can immerse into ${\mathbb{R}}^n$. \item $W_f$ in Proposition \ref{prop:2} is represented as a boundary connected sum of these manifolds $F_j \times [-1,1]$. \item For each manifold $F_j$, we can take an immersion before so that the composition of the immersion of $F_j$ with the canonical projection to ${\mathbb{R}}^{n_j}$ via ${\pi}_{n,n_j}$ is a round fold map into ${\mathbb{R}}^{n_j}$. \end{enumerate} Then by changing $\bar{f}$ to a suitable immersion and composing the new special generic map with ${\pi}_{n,\min \{n_j \mid j \in J.\}}$, we have a round map into ${\mathbb{R}}^{\min \{n_j \mid j \in J.\}}$. \end{MainThm} The last one is a corollary to existing and obtained results. Theorem \ref{thm:2} and Lemma \ref{lem:1} are in the next section and the former is also a main theorem. We do not introduce these two rigorously in the present section. \begin{MainThm} \label{mainthm:3} There exists a family of infinitely many $7$-dimensional closed, simply-connected and spin manifolds satisfying the following five. \begin{enumerate} \item The integral cohomology rings of these 7-dimensional manifolds are mutually isomorphic. \item Distinct 7-dimensional manifolds in the family are not homeomorphic. \item All manifolds in the family admit round fold maps having globally trivial monodromies and satisfying the property on preimages of regular values for round maps obtained in Theorem \ref{thm:2} with $(m,n,n^{\prime})=(7,6,4)$ and $(i_1,i_2)=(1,1)$. \item Previous round fold maps cannot be obtained in Theorem \ref{thm:2} with $(m,n,n^{\prime})=(7,6,4)$. \item Each of these round fold maps cannot be obtained by composing any fold map into ${\mathbb{R}}^5$ satisfying the following properties with the canonical projection. \begin{enumerate} \item A fold map is represented as a connected sum of finitely many fold maps. \item Each summand for the family of the previous fold maps is a round fold map into ${\mathbb{R}}^5$ as in the assumption of Main Theorem \ref{mainthm:1} or a special generic map in Lemma \ref{lem:1}. \end{enumerate} \end{enumerate} \end{MainThm} \subsection{The content of the present paper.} In the second section, we prove Main Theorem \ref{mainthm:1}. We also see that a main ingredient of the proof generalize a main argument of section 6 of \cite{saekisuzuoka}. We also introduce Theorem \ref{thm:1} as a previously obtained and related result by the author. As another main theorem, we prove Theorem \ref{thm:2}. In the third section, we prove Main Theorem \ref{mainthm:2}. Some of \cite{kitazawa0.4}--\cite{kitazawa0.6} are closely related to this. In the fourth section, we prove Main Theorem \ref{mainthm:3}. Related to this, we first give a further exposition on Theorem \ref{thm:1} as Theorem \ref{thm:3}. As Remark \ref{rem:3}, we also explain relations between the arguments and ones in \cite{kitazawa0.7}--\cite{kitazawa0.9}, which are on construction of new fold maps such that the compositions with suitable projections are given fold maps. In other words, we consider lifting given fold maps to new fold maps. This construction is an important and explicit work in considering lifting smooth maps to immersions, embeddings or more general generic smooth maps.
Hereafter, manifolds, maps between manifolds, and so on, are smooth unless otherwise stated. \section{A proof of Main Theorem \ref{mainthm:1} and related problems, results and remarks.}
\begin{proof}[A proof of Main Theorem \ref{mainthm:1}] The fact that we have a new round fold map is obvious from the definition of a fold map and fundamental theory.
The fact on the indices of singular points are obvious from the local forms of the round fold maps. We consider the preimage of a regular value $p \in {\mathbb{R}}^{n^{\prime}}$ such that the distance between the origin and this is $k<||p||<k+1$ for a non-negative integer $k\leq l$.
Consider the canonical inclusion of ${\mathbb{R}}^{n^{\prime}}$ into ${{\mathbb{R}}^{n^{\prime}}} \times \{0\} \subset {{\mathbb{R}}^{n^{\prime}}} \times {{\mathbb{R}}^{n-n^{\prime}}} = {\mathbb{R}}^n$. The preimage is regarded as the preimage of $\{p\} \times {{\mathbb{R}}^{n-n^{\prime}}}$ for the original round fold map. The preimage is also regarded as the boundary of the preimage of the set $\{tp\mid t \geq 1\} \times {\mathbb{R}}^{n-n^{\prime}}$ for the original round fold map: $tp$ is represented as a vector and regarded as a point canonically.
It is diffeomorphic to the boundary of the product of the following two manifolds where the corner is eliminated.
\begin{enumerate} \item The preimage of $\{tp\mid t \geq 1\} \times \{0\} \subset \{tp\mid t \geq 1\} \times {\mathbb{R}}^{n-n^{\prime}}$, which is diffeomorphic to a manifold obtained by removing the interiors of $l+1-k$ disjointly and smoothly embedded ($m-n+1$)-dimensional unit discs from a copy of $S^{m-n+1}$. \item $\{tp\mid t \geq 1\} \times {\mathbb{R}}^{n-n^{\prime}} \bigcap \partial (\phi \circ f)(M)$, which is diffeomorphic to $D^{n-n^{\prime}}$. \end{enumerate}
FIGURE \ref{fig:1} shows a case where $(n,l)=(2,1)$ and $p \in \mathbb{R}$ with $0<||p||<1$.
\begin{figure}
\caption{The image (the singular value) of $\phi \circ f$ for $(n,k)=(2,1)$ and $p \in \mathbb{R}$ with $0<||p||<1$.}
\label{fig:1}
\end{figure}
From the structure of the original round fold map, the bundle in Definition \ref{def:4} (\ref{def:4.1}) is regarded as the restriction of the bundle over the unit sphere $S^{n-1}$ to the equator and also regarded as the restriction of the bundle over an ($n-1$)-dimensional hemisphere (in the unit sphere) to the boundary. This completes the proof of the third statement.
This completes the proof.
\end{proof} The following present some examples of given round fold maps in Main Theorem \ref{mainthm:1}. \begin{Thm}[\cite{kitazawa0.1}, \cite{kitazawa0.2} and \cite{kitazawa0.10}.] \label{thm:1} Let $m>n \geq 1$ be integers. Let $M$ be an $m$-dimensional manifold represented as a connected sum of $l>0$ total spaces of bundles over $S^n$ whose fibers are diffeomorphic to $S^{m-n}$. Then there exists a round fold map $f: M \rightarrow {\mathbb{R}}^n$ having a componentwisely trivial monodromy satisfying the following two properties. \begin{enumerate} \item The index of each singular point is $0$ or $1$. The number of singular points is $2(l+1)$ for $n=1$ and that of connected components of the singular set is $l+1$ for $n \geq 2$. \item Preimages of regular values of $f$ for $n=1$ or $\phi \circ f$ for $n \geq 2$ are disjoint unions of copies of $S^{m-n}$ and the numbers of the connected components of preimages of regular values in the connected component containing $p$ are $l+1$ where $\phi$ denotes the diffeomorphism in Definition \ref{def:3}. Furthermore, $p=a$ if $n=1$ and $p=0$ if $n \geq 2$. \end{enumerate} \end{Thm}
\begin{Rem} \label{rem:1} Related to the argument here, Asano \cite{asano} considered and solved a more general problem for a good class of smooth maps into the plane on $4$-dimensional closed, connected and orientable manifolds. This study concentrates on the class of so-called {\it trisection maps} into the plane. The image of such a map is a smoothly embedded copy of the $2$-dimensional unit disc and the singular value set is, topologically, embedded concentric circles and may have finitely many {\it cusps} where the singular sets are $1$-dimensional closed and smooth submanifolds with no boundaries and where all singular points except (preimages of) cusps just before are fold points. One of the problems questions, for a so-called {\it generic} arc properly embedded in the image, to which $3$-dimensional closed manifold the preimage is diffeomorphic? For general theory of the class of trisection maps, see \cite{gaykirby} and as a recent work, see also \cite{baykursaeki} for example. \end{Rem} \begin{Def} \label{def:5} A fold map $f$ on an $m$-dimensional closed and connected manifold $M$ into ${\mathbb{R}}^n$ is said to be {\it represented as a connected sum} of two fold maps if there exist a hyperplane $H={\mathbb{R}}^{n-1} \times \{0\} \subset {\mathbb{R}}^n$ with its tubular neighborhood ${\mathbb{R}}^{n-1} \times [-1,1]=H \times [-1,1]$ and a diffeomorphism $\phi$ from a submanifold $S \times [-1,1] \subset {\mathbb{R}}^n$ regarded as the total space of a product bundle over $S$, diffeomorphic to ${\mathbb{R}}^{n-1}$, onto $H \times [-1,1]$, satisfying the following three where $f_S$ denotes the restriction of $f$ to $f^{-1}(S \times [-1,1])$. \begin{enumerate} \item The restriction of $\phi$ to $S \times \{t\}$ is a diffeomorphism onto $H \times \{t\}$ for any $t \in [-1,1]$. \item There exists a diffeomorphism ${\Phi}_H:S^{m-1} \times [-1,1] \rightarrow f^{-1}(S \times [-1,1])$ such that the restriction of ${\Phi}_H$ to $S^{m-1} \times \{t\}$ is a diffeomorphism onto $f^{-1}(S \times \{t\})$. \item ${\phi} \circ f_S \circ {\Phi}_H$ is the canonical projection of the ($m-1$)-dimensional unit sphere into the ($n-1$)-dimensional Euclidean space if we restrict this to each $S^{m-1} \times \{t\}$, the target to each ${\mathbb{R}}^{n-1} \times \{t\}$, and identify them with $S^{m-1}$ and ${\mathbb{R}}^{n-1}$ in canonical ways. \end{enumerate} \end{Def} By decomposing the fold map into two smooth maps and by attaching copies of the restriction of the canonical projection to the hemisphere which is the preimage of the subspace of the target regarded as the $n$-dimensional half-space in a suitable way, we have two fold maps such that the original manifold is represented as a connected sum of the two resulting manifolds. Note that the target of the projection of the hemisphere is the $n$-dimensional half-space. We call each of these fold maps a {\it summand} of $f$ for the pair of the fold maps. We can define these notions where we need to consider a family of fold maps consisting of more than $2$ fold maps and finitely many ones.
\begin{Lem} \label{lem:1} In Proposition \ref{prop:2}, let $M$ admit a special generic map $f:M \rightarrow {\mathbb{R}}^n$ such that $n \geq 2$, and that $W_f$ is represented as a boundary connected sum of a copy of $S^{n-1} \times I$ where $I$ denotes a closed interval. Then by changing $\bar{f}$ to a suitable embedding and composing the new special generic map with ${\pi}_{n,n-1}$, we have a round fold map having a componentwisely trivial monodromy in the assumption of Main Theorem \ref{mainthm:1}. \end{Lem} \begin{proof} By a diffeomorphism on ${\mathbb{R}}^n$, we can map $\bar{f}(W_f)$ to a submanifold obtained by removing the interiors of finitely many copies of the unit disc smoothly and disjointly embedded into the interior of the unit disc whose center is the origin. We take the embedded copies so that they are discs of fixed diameters, that their centers locate at points in an axis, and that for distinct copies, the diameters are distinct. By composing the resulting special generic map with a suitable projection, we have a round fold map satisfying the conditions assumed in Main Theorem \ref{thm:1}. By arguments similar to one in the proof of the last property of the resulting round fold map in Main Theorem \ref{mainthm:1}, the resulting round fold map has a componentwisely trivial monodromy. To complete the proof, we may also apply the main ingredient of section 6 of \cite{saekisuzuoka}. \end{proof} This special generic map is regarded as one represented as a connected sum of finitely many special generic maps: for each summand $f_a$ for the family of fold maps, $W_{f_a}$ is diffeomorphic to $S^{n-1} \times I$.
The following theorem is a newly obtained theorem in the present paper. \begin{Thm} \label{thm:2} In Proposition \ref{prop:2}, let $m>n \geq 2$ and $M$ admit a special generic map $f:M \rightarrow {\mathbb{R}}^n$ such that $W_f$ is diffeomorphic to the $n$-dimensional unit disc or a manifold of the form $S^k \times D^{n-k}$ where $k$ is an integer satisfying $1 \leq k \leq n-1$, or a special generic map $f:M \rightarrow {\mathbb{R}}^n$ represented as a connected sum of a family of ${\Sigma}_{k=1}^{n-1} i_k>1$ fold maps satisfying the following two. \begin{enumerate} \item $i_k$ is a non-negative integer. \item The images of exactly $i_k$ special generic maps regarded as summands for the family of the maps are smoothly immersed manifolds diffeomorphic to $S^{n-k} \times D^k$. $W_f$ in Proposition \ref{prop:2} is represented as a boundary connected sum of these manifolds. \end{enumerate} By changing $\bar{f}$ to a suitable embedding and composing the new special generic map with ${\pi}_{n,n^{\prime}}$, we have a round fold map having a globally trivial monodromy in the former case and a fold map satisfying the following two in the latter case. \begin{enumerate} \item The fold map is represented as a connected sum of finitely many fold maps and summands for the family of these fold maps satisfy the following two. \begin{enumerate} \item If $n^{\prime} \leq \max\{n-k \mid i_k>0\}$, then exactly one of the summands is a round fold map. \item For the remaining summands, they are special generic and the images of exactly $i_k$ special generic maps are smoothly embedded manifolds diffeomorphic to $S^{n-k} \times D^{n^{\prime}-n+k}$ for $n-n^{\prime}+1 \leq k \leq n-1$. $W_f$ in Proposition \ref{prop:2} is represented as a boundary connected sum of these manifolds. \end{enumerate} \item Moreover, the round fold map before satisfies the following properties. \begin{enumerate}
\item The singular value set can be of the form $\{x \in {\mathbb{R}}^{n^{\prime}} \mid ||x|| \in \mathbb{N}, 1 \leq ||x|| \leq {\Sigma}_{k=1}^{n-n^{\prime}} i_k+1.\}$ by composing the round fold map with a suitable diffeomorphism $\phi$ as in Definition \ref{def:3} if we need. \item Consider going straight from a point in the complementary set of the image to the origin in the target of the round fold map. Then in the $j$-th connected component of the intersection of the image and the regular value set, the preimage of a point there is diffeomorphic to a manifold in the following list. \begin{enumerate} \item If $j=1$, then it is diffeomorphic to $S^{m-n^{\prime}}$. \item If ${\Sigma}_{j^{\prime}=1}^{j_0} i_{j^{\prime}}+1 < j \leq {\Sigma}_{j^{\prime}=1}^{j_0+1} i_{j^{\prime}}+1$ where $0 \leq j_0< n-n^{\prime}-1$, then it is diffeomorphic to a manifold represented as a connected sum of the manifolds satisfying the following two. \begin{enumerate} \item The family of the manifolds consists of exactly $j-1$ manifolds. \item The family contains exactly $i_{j^{\prime}}$ manifolds diffeomorphic to $S^{j^{\prime}} \times S^{m-n^{\prime}-j^{\prime}}$ for $1 \leq j^{\prime}<j_0$ and exactly $j-{\Sigma}_{j^{\prime}=1}^{j_0} i_{j^{\prime}}-1$
manifolds diffeomorphic to $S^{j_0} \times S^{m-n^{\prime}-j_0}$. \end{enumerate} \item ${\Sigma}_{j^{\prime}=1}^{n-n^{\prime}-1} i_{j^{\prime}}+1 < j \leq {\Sigma}_{j^{\prime}=1}^{n-n^{\prime}} i_{j^{\prime}}+1$, then it is diffeomorphic to the disjoint union of a manifold represented as a connected sum of the manifolds
satisfying the following two and $j-{\Sigma}_{j^{\prime}=1}^{n-n^{\prime}-1} i_{j^{\prime}}-1$ copies of $S^{m-n^{\prime}}$. \begin{enumerate} \item The family of the manifolds consists of exactly ${\Sigma}_{j^{\prime}=1}^{n-n^{\prime}-1} i_{j^{\prime}}$ manifolds.
\item The family contains exactly $i_{j^{\prime}}$ manifolds diffeomorphic to $S^{j^{\prime}} \times S^{m-n^{\prime}-j^{\prime}}$ for $1 \leq j^{\prime} \leq n-n^{\prime}-1$. \end{enumerate} \end{enumerate} \item The round fold map has a componentwisely trivial monodromy. \end{enumerate} \end{enumerate} \end{Thm} \begin{proof} We can change the original special generic map $f$ by changing $\bar{f}$ into a suitable immersion so that the following properties fold.
\begin{enumerate} \item In a generalized case of Definition \ref{def:5}, we have finitely many hyperplanes $S$ each of which is of the form ${\mathbb{R}}^{n-1} \times \{a_i\}={\mathbb{R}}^n$ and we have a family of fold maps. \item Summands for the family satisfy the following properties. \begin{enumerate} \item For one of the summand, in a generalized case of Definition \ref{def:5}, we have ${\Sigma}_{k=1}^{k=n-n^{\prime}} i_k$ hyperplanes each of which is of the form ${\mathbb{R}}^{n-1} \times \{b_i\}={\mathbb{R}}^n$ after composing the map with a suitable diffeomorphism on the target. Thus we have another family of fold maps. For the summands for the newly obtained family, there exist exactly $i_k$ special generic maps whose images are embedded manifolds diffeomorphic to $S^{n-k} \times D^k$ for $1 \leq k \leq n-n^{\prime}$. \item For the remaining summands, there exist exactly $i_k$ special generic maps whose images are embedded manifolds diffeomorphic to $S^{n-k} \times D^k$ for $n-n^{\prime}+1 \leq k \leq n-1$. We also regard $S^{n-k} \times \{0\} \subset {\mathbb{R}}^{n-k} \times \{0\} \times I_k \subset {\mathbb{R}}^{n-k} \times {\mathbb{R}}^{n-n^{\prime}} \times I_k \subset {\mathbb{R}}^{n^{\prime}-1} \times {\mathbb{R}}^{n-n^{\prime}} \times I_k$ and $S^{n-k} \times D^k$ as a smoothly and naturally embedded compact manifold in the interior of ${\mathbb{R}}^{n-k} \times {\mathbb{R}}^{k-1} \times I_k={\mathbb{R}}^{n-1} \times I_k$ where $I_k$ denotes a closed interval in $\mathbb{R}$.
\end{enumerate} \end{enumerate} After taking $\bar{f}$ suitably, we compose the resulting special generic map with the canonical projection to ${\mathbb{R}}^{n^{\prime}}$, identified canonically and suitably with ${\mathbb{R}}^{n^{\prime}-1} \times \{0\} \times I_k$. By applying Main Theorem \ref{mainthm:1} and Lemma \ref{lem:1} and respecting the structures of the special generic map and its image, we have a round map, which is also a desired map. This completes the proof. \end{proof} \begin{Rem} \label{rem:2} One of pioneering studies studying preimages of regular values for (so-called {\it generic}) maps obtained by composing given generic maps with projections is, presented in section 6 of \cite{saekisuzuoka}. This shows that preimages of regular values for (generic) maps obtained by composing special generic maps with projections are disjoint unions of copies of spheres. In Lemma \ref{lem:1}, specific cases of this explicit study appear. \end{Rem}
\section{A proof of Main Theorem \ref{mainthm:2} and related expositions.} Hereafter, for a set $X$ $\sharp X$ denotes the cardinality of $X$. \begin{proof}[A proof of Main Theorem \ref{mainthm:2}.] We can change the original special generic map $f$ by changing $\bar{f}$ into a suitable immersion so that the following properties fold.
\begin{enumerate} \item In a generalized case of Definition \ref{def:5}, we have $\sharp J-1$ hyperplanes $S$ each of which is of the form ${\mathbb{R}}^{n-1} \times \{a_i\}={\mathbb{R}}^n$ and we have a family of fold maps. \item For the summands, there exist exactly $\sharp J$ special generic maps whose images are immersed manifolds diffeomorphic to $F_j \times I=F_j \times [-1,1]$ for each $j \in J$ and we can consider identifications between the domains of the immersions and $F_j \times I$ and make the situation satisfying the following two by virtue of the assumption on the immersion of $F_j$. \begin{enumerate} \item The compositions of the restrictions of the immersions to $F_j \times \{1\}$ and $F_j \times \{-1\}$ with the canonical projection to ${\mathbb{R}}^{n_j}$ round fold maps. \item For the round fold maps just before, the disjoint union of singular value sets is of the form of the singular value set of a round fold map or in short concentric. \end{enumerate} \end{enumerate} After taking $\bar{f}$ suitably, we compose the resulting special generic map with the canonical projection to ${\mathbb{R}}^{\min \{n_j \mid j \in J.\}}$. Points which are not singular points of the special generic map are not singular for the resulting map. From the structure of the map and especially, that of the image of the special generic map, the resulting map is a desired round fold map. \end{proof} We discuss examples for Main Theorem \ref{mainthm:2}. For a manifold represented as a connected sum of finitely many copies of $S^{n^{\prime}} \times S^{n-n^{\prime}}$ where $n>n^{\prime} \geq 1$ be integers, we can construct an example of special generic maps into ${\mathbb{R}}^{n^{\prime}+1}$ satisfying the conditions in Example \ref{lem:1} and represented as a composition of an embedding into ${\mathbb{R}}^{n+1}$ with the canonical projection. First take a natural embedding into ${\mathbb{R}}^{n+1}$ and compose the embedding with the canonical projection. By applying Lemma \ref{lem:1}, we have a round fold map into ${\mathbb{R}}^{n^{\prime}}$. Thus we can take such manifolds as $F_j$ in Main Theorem \ref{mainthm:2}.
Related to this, with a little effort, we can find manifolds admitting special generic maps appearing in the summand here in \cite{kitazawa0.4}, \cite{kitazawa0.5}, \cite{kitazawa0.6}, and so on. \section{A proof of Main Theorem \ref{mainthm:3} and related expositions.} \begin{Thm} \label{thm:3} In Theorem \ref{thm:1}, let $(m,n)=(7,4)$ and for any $m$-dimensional homotopy sphere $M$, we have a round fold map into ${\mathbb{R}}^n$ satisfying the properties. Furthermore, we can take $l=1$ if and only if $M$ is represented as the total space of a bundle over $S^4$ whose fiber is diffeomorphic to $S^3$ and $l \geq 2$ for any $m$-dimensional homotopy sphere. \end{Thm} $7$-dimensional oriented homotopy spheres of exactly $16$ of all the $28$ types of $7$-dimensional oriented homotopy spheres is represented as the total space of a bundle over $S^4$ whose fiber is diffeomorphic to $S^3$. The unit sphere satisfies this property. According to \cite{calabi}, \cite{saeki}, \cite{saeki2}, and so on, a $7$-dimensional homotopy sphere admits a special generic map into ${\mathbb{R}}^n$ if and only if it is diffeomorphic to the unit sphere $S^7$ for $4 \leq n \leq 6$. This means that maps in Theorem \ref{thm:2} cannot be obtained via theory of section 6 of \cite{saekisuzuoka} (if $M$ is not diffeomorphic to $S^7$). \begin{proof}[A proof of Main Theorem \ref{mainthm:3}.] Existence of the family of infinitely many manifolds satisfying the first three properties is due to \cite{kitazawa0.11}
with \cite{wang}. The integral cohomology rings are isomorphic to that of ${\mathbb{C}P}^2 \times S^3$.
If at least one of the remaining two properties is false, then the cohomology ring of a manifold in the family must be isomorphic to that of a manifold represented as a connected sum of $S^2 \times S^5$ and $S^3 \times S^4$.
This completes the proof. \end{proof} \begin{Rem} \label{rem:3} \cite{kitazawa0.7}--\cite{kitazawa0.9} present fold maps such that preimages of regular values are disjoint unions of copies of a standard sphere we can represent as compositions of special generic maps with canonical projections of Euclidean spaces. This is a new explicit topic on so-called lifts of smooth maps.
In these studies, for a smooth map, we construct an explicit good smooth map such that the composition with a canonical projection of Euclidean space is the given map. Before the presented preprints by the author appeared, construction of explicit immersions and embeddings had been studied in various cases. \cite{haefliger} is one of pioneering studies. Later, related studies such as \cite{blankcurley}, \cite{levine}, \cite{nishioka}, \cite{saito}, \cite{saekitakase}, \cite{yamamoto}, and so on, have been published. \end{Rem} \begin{Rem} Special generic maps presented in the present paper and the class of such maps have been concentrated on and studied systematically in \cite{kitazawa0.12} for example. \end{Rem} \section{Acknowledgement.} The author is a member of and supported by JSPS KAKENHI Grant Number JP17H06128 "Innovative research of geometric topology and singularities of differentiable mappings". The author declares that data concerning the present study directly are available within the present paper.
\end{document} |
\begin{document}
\title{Realisation of adiabatic and diabatic CZ gates in superconducting qubits coupled with a tunable coupler}
\author{Huikai Xu} \thanks{H. Xu and W. Liu contributed equally to this work.} \affiliation{Beijing Academy of Quantum Information Sciences, Beijing 100193, China} \author{Weiyang Liu}
\thanks{H. Xu and W. Liu contributed equally to this work.} \affiliation{Shenzhen Insititute for Quantum Science and Engineering, Southern University of Science and Technology, Shenzhen 518055, China}
\author{Zhiyuan Li} \affiliation{Beijing Academy of Quantum Information Sciences, Beijing 100193, China}
\author{Jiaxiu Han} \affiliation{Beijing Academy of Quantum Information Sciences, Beijing 100193, China}
\author{Jingning Zhang} \affiliation{Beijing Academy of Quantum Information Sciences, Beijing 100193, China}
\author{Kehuan Linghu} \affiliation{Beijing Academy of Quantum Information Sciences, Beijing 100193, China}
\author{Yongchao Li} \affiliation{Beijing Academy of Quantum Information Sciences, Beijing 100193, China}
\author{Mo Chen} \affiliation{Beijing Academy of Quantum Information Sciences, Beijing 100193, China}
\author{Zhen Yang} \affiliation{Beijing Academy of Quantum Information Sciences, Beijing 100193, China}
\author{Junhua Wang} \affiliation{Beijing Academy of Quantum Information Sciences, Beijing 100193, China}
\author{Teng Ma} \affiliation{Beijing Academy of Quantum Information Sciences, Beijing 100193, China}
\author{Guangming Xue} \email{Corresponding author: xuegm@baqis.ac.cn} \affiliation{Beijing Academy of Quantum Information Sciences, Beijing 100193, China}
\author{Yirong Jin } \email{Corresponding author: jinyr@baqis.ac.cn}
\affiliation{Beijing Academy of Quantum Information Sciences, Beijing 100193, China} \author{Haifeng Yu} \affiliation{Beijing Academy of Quantum Information Sciences, Beijing 100193, China}
\begin{abstract}
High fidelity two-qubit gates are fundamental for scaling up the superconducting number.
We use two qubits coupled via a frequency-tunable coupler which can adjust the coupling strength, and demonstrate the CZ gate using two different schemes, adiabatic and diabatic methods. The Clifford based Randomized Benchmarking (RB) method is used to assess and optimize the CZ gate fidelity. The fidelity of adiabatic and diabatic CZ gates are $99.53(8)\%$ and $98.72(2)\%$, respectively. We also analyze the errors induced by the decoherence.
Comparing to $30$ ns duration time of adiabatic CZ gate, the duration time of diabatic CZ gate is $19$ ns, revealing lower incoherence error rate $r'_{\rm{incoherent, int}} = 0.0197(5)$ than $r_{\rm{incoherent, int}} = 0.0223(3)$. \end{abstract}
\pacs{42.50.Ct, 03.67.Lx, 74.50.+r, 85.25.Cp}
\maketitle
A programmable superconducting information processor which consists of a two-dimensional array of 53 transmon qubits has been demonstrated to achieve the supremacy for a specific computational task\cite{Arut19}. A fast, high-fidelity gate scheme is the key to reaching this milestone.
For superconducting transmon/Xmon qubits\cite{koch07,Bare13}, there are a variety of proposals to realize two-qubits gates which can be divided into three main classes. The first class is implemented with frequency-tunable qubits with the static couplings. Interactions between qubits can be turned on and off by tuning the frequency of qubits.
In particular, by tuning the qubits to make the $|11\rangle$ state in resonance with $|02\rangle$ state, the controlled-Z (CZ) gates can be realized\cite{sch03,Stra03,dic09,Yama10,bare14}. Furthermore, parametrically modulated qubits make particular states in resonance to realize the $i$SWAP gate and the CZ gate\cite{cald18}. In such schemes, each qubit need an additional magnetic flux bias line, which make the circuit complicated when scale up qubits numbers. The second class is implemented with frequency-fixed qubits and the static coupling strengths\cite{rige05,leek09,Hutc17}. The qubits gates are realized with all microwave driver methods, such as the cross-resonance (CR) gate\cite{rige10,chow11,chow13,shel16,pole12,Groot10,Groot12}. This scheme is facing frequency crowding problems limiting the circuit integration. The last class is implemented with frequency-fixed or frequency-tunable qubits coupled with the additional tunable coupler. The general scheme is that two frequency-fixed qubits have a large frequency detuning to eliminate the ZZ interaction which can induce the single qubit gate errors, then parametrically modulated coupler method are used to realize the $i$SWAP and CZ gates\cite{ganz20}. A more advantage scheme is to treat the tunable coupler as a switch which can quickly turn on/off the interactions between adjacent qubits\cite{Ashhab06,Ashhab07,chen14,yan18,xu20,liX20,han20,Coll20,McKay19}. The coupler will turn off the interaction between adjacent qubits when the single qubit gate is implemented. In contrast, two-qubit gates are activated when the coupler turns on the interactions. Another advantage is that qubits do not need large frequency detuning and the frequency crowding problem can also be alleviated\cite{Arut19}.
By now, the CZ gate fidelity is as high as 99.7\%\cite{Kyaw20}. Fast gate and low operation errors are two keys to improve the two-qubit gates fidelity. In general, long gate time means more incoherence errors. However, shorter gate time will cause more operation errors. To solve this conflict problem, a fast adiabatic protocol is proposed\cite{bare14,moll18,rol19} and the gate time is about 30 $\sim$ 40 ns. Then, the nonadiabatic gates\cite{li2019,bare19,Foxen2020} are found to have fast gate times, which can eliminate incoherent errors.
In the present work, we investigate adiabatic and diabatic CZ gates in two Xmon qubits coupled with a tunable coupler system, as shown schematically in Fig.~1(a) and (b). Each qubit is capacitively coupled with a coupler which can be seen as a Xmon qubit. Two qubits are also directly coupled via the capacitance. The Hamiltonian of system can be written as \cite{yan18}: \begin{equation} \begin{split} H/\hbar = & \sum_{i=1,2,c}\omega_{i}a_{i}^{+}a_{i} +\frac{\alpha_{i}}{2}a_{i}^{+}a_{i}^{+}a_{i}a_{i} \\ & +\sum_{i \neq j}g_{ij}(a_{i}^{+}a_{j}+a_{i}a_{j}^{+}), \end{split} \end{equation} where $\omega _{i}\left(i = 1,2,c\right)$ are the frequencies of each qubits ($Q_1$, $Q_2$) and the coupler ($C$), $a_i^{+}$ and $a_i \left(i = 1,2, c\right)$ are creation and annihilation operators of $Q_1$, $Q_2$ and $C$, respectively. $g_{1c}(g_{2c})$ is the coupling strength between $C$ and $Q_1(Q_2)$ and $g_{12}$ is the direct coupling strength between $Q_1$ and $Q_2$. The maximum frequencies of $Q_1$, $Q_2$ and $C$ are $\omega_{1}^{\rm{max}}/2\pi = 4.508$ \rm{GHz}, $\omega_{2}^{\rm{max}}/2\pi = 4.701$ \rm{GHz} and $\omega_{c}^{\rm{max}}/2\pi = 5.419$ \rm{GHz} and the anharmonicities are $\alpha_{1}/2\pi = -290$ MHz, $\alpha_{2}/2\pi = -306$ MHz, and $\alpha_{c}/2\pi = -124$ MHz, respectively. The effective coupling strength\cite{yan18,koch07} between $Q_1$ and $Q_2$ is
\begin{figure}
\caption{(a) and (b) are the schematic electrical circuit and optical micrograph of three Xmon qubits($Q_1$, $Q_2$ and $C$). $Q_1$ and $Q_2$ are used as the computational qubits with the $XY$ and $Z$ control and coupled with the $\lambda/4$ resonators $R_1$ and $R_2$ for quantum state readout. Qubit $C$ can be seen as the tunable coupler($C$) with only the $Z$ control.
$i$SWAP experiment. The $Q_1$ is excited, then tune $Q_2$ resonate with $Q_1$. Biasing the flux of $C$ can change the interaction strength between $Q_1$ and $Q_1$. (f) is the $i$SWAP oscillation versus flux bias of $C$. (g) Fourier transition of $i$SWAP oscillation in (f). The light line indicate the total coupling strength. $\tilde{g}$ can be adjusted from 0.40 to 40 MHz. }
\end{figure} \begin{equation} \tilde{g} \approx \frac{g_{1c}g_{2c}}{2} \left(\frac{1}{\Delta_{1c}}+\frac{1}{\Delta_{2c}}\right)+g_{12}, \end{equation}
where $\Delta_{ic} = \omega_{i}-\omega_{c} \left(i = 1,2,c\right)$. In experiments, the value of $g_{1c}\left(g_{2c}\right)$ can be extracted from the spectrum of qubits or coupler.Figure 2(a) is the spectrum of coupler $C$. Since coupler $C$ does not have the readout cavity, we use a novel method indicated in the inset of Fig. 2(a). The coupler $C$ could be driven by a microwave pulse through the XY control line of $Q_{1}$. If the microwave resonates with coupler $C$ which is excited from $|0\rangle$ to $|1\rangle$, the frequency of $Q_2$ will shift due to Lamb shift. Therefore, applying a $\pi$-pulse on qubit $Q_{2}$, the population of excited state $Q_{2}$ will be decreased. The frequency of the coupler $C$ coupled with qubits $Q_1$ and $Q_2$ versus the amplitude of the bias pulse ($V_b$) of the coupler $C$, as shown in Figa. 2(a). The anti-crossing shows that the coupling strength $g_{1c}/2\pi\left(g_{2c}/2\pi\right)$ is $100$ MHz. The direct coupling strength value $g_{12}$ can be derived from $\tilde{g}$ which can be extracted from $i$SWAP experiment. As indicated by the inset of Fig. 2(c), setting up the frequencies of $Q_{1}$ and $Q_2$ on resonance ($4.110$ GHz) and then sweeping the amplitude $V_b$ and duration time $\tau$ of the bias pulse, a chevron pattern of $i$SWAP experiments can be measured, as shown in Fig. 2(b). The spectrum of two-qubits coupling strengths can be obtained by the Fourier transform of $i$SWAP time traces as shown in Fig. 2(c) and the light line in Fig. 2(c) indicates the coupling strength $\tilde{g}/2\pi$ which can be changed from 0.4 \rm{MHz} to 80 \rm{MHz} by adjusting the frequency of coupler $C$. Substituting $V_b$, $\tilde{g}$ and frequencies of $Q_1$, $Q_2$ and $C$ into Eq. (2), we can obtain direct coupling strength of $Q_1(Q_2)$, $g_{12}/2\pi = 5$ MHz.
\begin{figure}
\caption{(a) The frequency of the coupler $C$ coupled with qubits $Q_1$ and $Q_2$ versus the amplitude of the bias pulse ($V_b$) of the coupler $C$ and The inset is the schematic diagram. The red curves lines are numerical simulation results fitting the spectrum of qubit-coupler-qubit systems. Two double arrow segments indicate the anti-crossing points and the coupling strength $g_{1c}/2\pi = g_{2c}/2\pi = 100 \, \rm{MHz}$.
(b) The $i$SWAP oscillation when changing the amplitude $V_{b}$ of the bias pulse of the coupler $C$. The inset is the schematic of $i$SWAP experiment.
(c) Fourier transform of $i$SWAP oscillation in (b). The effective coupling strength $\tilde{g}/2\pi$ can vary from 0.40 MHz to 80 MHz. The red curve line shows the calculated results with $g_{12}/2\pi = 5 \, \rm{MHz}$.}
\end{figure}
To easily understand how to operate the qubit-coupler-qubit systems, we use $|Q_{1},C,Q_{2}\rangle$ to describe the energy-eigenstates. When performing CZ gates, the computational state $|101\rangle$ should be an adiabatic evolution from the idle points close to the region where $|101\rangle$ has coupling with another non-computational state (such as $|011\rangle$ or $|200\rangle$)\cite{yan18}, then back to the idling points with no leakage to these non-computational states. Then the state $|101\rangle$ will accumulate a conditional phase. In order to satisfy adiabatic evolution, the gate time should be greater than $1/g_{1c}$ to make sure that there is no leakage to another non-computational state. In our experiment, we firstly bias the $Q_1$, $Q_2$ and $C$ at the frequency of $f_{Q_{1}}^{01} = 4.283$ \rm{GHz}, $f_{Q_{2}}^{01} = 4.679$ \rm{GHz}, and $f_{c}^{01} = 5.419$ \rm{GHz}, respectively, as idling points where the effective ZZ interaction coupling is smaller than 500 KHz and two qubits have better coherence time (the $T_1$ time is $20.9$ \rm{$\mu$s} for $Q_1$, and $28.8$ \rm{$\mu$s} for $Q_2$). The average single-qubit gate fidelity of $Q_1$ and $Q_2$ are $99.6\%$ and $99.7\%$. Since $Q_1$ and $Q_2$ have stronger coupling strength with $C$, a half-period cosine shape pulse with duration time of 30 ns will satisfy adiabatic evolution condition. \begin{figure}
\caption{(a) and (b) Schematic of a Ramsey-type experiment measuring the conditional phase of CZ gates . Qubit $Q_1$ is prepared in its ground or excited state. Two $\pi/2$-pulses (the second $\pi/2$-pulses with the phase shift $\alpha$) are on qubit $Q_2$ with an interleaved a idle gate duration of $\tau$ while the CZ gate (half-period cosine shape pulse) on coupler $C$. (c) The oscillations of the population of qubit Q2 are measured by sweeping the amplitude $V_b$ of CZ gate and the second $X/2$ gate phase shift $\alpha$. The top and bottom figures are $Q_1$ in its ground state and its excited state respectively. (d) The initial phases $\phi_{Id}$ and $\phi_{X}$ of the oscillation traces in (c) are fitted as a function of $V_b$, which are displayed by the blue and green lines respectively. The conditional phase $\phi_c = \phi_{X} - \phi_{Id}$ can be extracted as a function of $V_b$(the yellow line). The gray criss-cross line indicates $\phi_c = \pi$ with a proper $V_b$.}
\end{figure}
\begin{figure}
\caption{Fidelity and error analysis of the adiabatic CZ gate. (a) Measured sequence fidelity (100 averages) as a function of the number of Cliffords $m$ for both the reference (blue) and interleaved (red) RB experiments. Error bars are the standard deviations from the mean. (b) Measured sequence purity (100 averages) as a function of the number of Cliffords $m$ for both the reference (green) and interleaved (yellow) PB experiments.}
\end{figure}
To calibrate the conditional phase, the Ramsey-type experiments are performed as shown in Fig. 3. Figures 3(a) and (b) are the control pulse sequence and the qubit $Q_1$ is prepared in the ground state $|0\rangle$ and excited state $|1\rangle$, respectively. Figure 3(c) shows the Ramsey oscillation results of qubit $Q_2$ versus the second X/2 gate phase degree $\alpha$ with the different amplitude $V_b$ of coulper $C$ flux pulse. The top and bottom panels are corresponding to $Q_1$ initialized in the ground and excited states, respectively. So the initial phase $\phi_{Id}$ and $\phi_{X}$ can be extracted by fitting Ramsey oscillation trace. Then we can get the relationship between the conditional phase $\phi_c$ and the $V_b$. As shown in Fig. 3(c), the yellow line is $\phi_c$ versus the $V_b$. We can use $V_b = 0.2382$ V to get the conditional phase $\phi_c = \pi$.
$\phi_{Id}$ is a local dynamical phase accumulated on $Q_2$ which can be easily compensated by a virtual $Z$ gate \cite{mcka2017}. Exchange the roles of $Q_1$ and $Q_2$ in the same experiment, local phase on $Q_1$ can be measured too.
The Clifford based Randomized Benchmarking (RB) method can be used to assess and optimize the performance of adiabatic CZ gates\cite{bare14,Knill08,Kelly2014,Magesan11,Magesan12,Malley15}, which is performed by applying a gate sequence of $m$ two-qubit Clifford gates followed by an additional $\big(m + 1\big)$th gate to invert the whole sequence. Fig. 4(a) shows the sequence fidelity as a function of $m$ for both the reference and CZ-interleaved cases. The results are the average of 100 random samples. The parameters of adiabatic CZ gate are obtained by implementing Nelder-Mead (NM) optimization algorithm in 100 evaluations. We can obtain the decay constants $p_{\rm{ref}}$ and $p_{\rm{int}}$ from the exponential function $F = Ap^{m} + B$ fitting, and then the error rate of per Clifford gate $r_{\rm{ref}}$ and $r_{\rm{int}}$ from $r = 3/4 (1-p)$. The CZ gate error rate can be extracted from $r_{\rm{CZ}} = 3/4(1-p_{\rm{int}}/p_{\rm{ref}})$ and the CZ gate fidelity from $F_{\rm{CZ}} = 1 - r_{\rm{CZ}}$. The average error rate of per Clifford gate of our scheme is $r_{\rm{ref}}= 0.0242(2)$, and $r_{\rm{int}}= 0.0286(7)$ for interleaved, as shown in Fig. 4(a). The CZ gate error $r_{\rm{CZ}} = 0.0046(2)$. The fidelity of adiabatic CZ gate is $F_{\rm{\rm{CZ}}} = 99.53(8)\%$. To estimate the decoherence error, we also measure the two-qubit purity benchmarking (PB)\cite{wall2015,feng2016} as shown in Fig. 4(b). We also use exponential function $F = A^{'}u^{m-1} + B^{'}$ to fit sequence purity as a function of $m$, and obtain the purity error rate from $r_{\rm{incoherent}} = 3/4(1-\sqrt{u})$. The average purity error of per Clifford gate is $r_{\rm{incoherent, int}} = 0.0223(3)$. The average purity error of interleaved sequence is $r_{\rm{incoherent, ref}} = 0.0154(8)$. We estimate incoherence error contribution is $r_{\rm{incoherent, ref}}/r_{\rm{ref}} = 64\%$ of total errors per two-qubit Clifford gate.
\begin{figure}
\caption{(b) The control sequence to measure leakage errors when implentmenting the diabatic CZ gates. (b) Leakage error results after CZ gates. (c) Schematic of a Ramsey-type experiment measuring the conditional phase of diabatic CZ gates. The pulse shape of diabatic CZ gates is a square-shaped pulse of duration $\tau = 18$ ns. Qubit $Q_1$ is prepared in its ground or excited state. (d) The results of the Ramsey oscillations under $V_b = 0.173 \, \rm{V}$ and $V_q = 0.174 \, \rm{V}$. The conditional phase $\phi_c = \pi$. (e) The conditional phase results after CZ gates.}
\end{figure} \begin{figure}
\caption{Fidelity and error analysis of the diabatic CZ gate. (a) Measured sequence fidelity (100 averages) as a function of the number of Cliffords $m$ for both the reference (blue) and interleaved (red) RB experiments. Error bars are the standard deviations from the mean. (b) Measured sequence purity (100 averages) as a function of the number of Cliffords $m$ for both the reference (green) and interleaved (yellow) PB experiments.}
\end{figure} To realize a faster CZ gate, the two-qubit gates need to have a stronger effective coupling strength $\tilde{g}$. From Fig. 2(c), the maximum of $\tilde{g}$ is up to $80 $ MHz. So we can choose $\tau \approx 18$ ns as the duration time of CZ gate. However, the evolution of CZ gate will not satisfy adiabatic conditions, which will induce the leakage error. To simplify diabatic CZ gate operation, the square-shaped pulse is used with distortion corrected\cite{Rol19}. So we have two main controllable parameters, the coupling $\tilde{g}$ and frequency detuning $\Delta = f_{Q_{2}}^{01}-f_{Q_{1}}^{01}$ to realize the diabatic CZ gates. To adjust $\Delta$ value in experiments, we fix $Q_1$ frequency at the idle points and change the flux bias pulse amplitude $V_q$ of $Q_2$. Before calibrating the conditional phase, we should exactly identify the leakage versus coupling strength $\tilde{g}$ and frequency detuning $\Delta$. As shown in Fig. 5(a), we initialize two qubits in the excited state and then perform the diabatic CZ gate. If there have the leakage occurs, the ground state population of lower frequency qubit will increase. By sweeping the voltage of $V_b$ and $V_q$, we can map out leakage errors. The result is shown in Fig. 5(b), where the dark region means the low-leakage errors. Then to calibrate the conditional phase by executing Ramsey-type experiments and we can get the relationships between $\phi_c$ and $V_b$, $V_q$. Figure 5(c) shows the experimental control pulse sequence which is similar to the adiabatic CZ gates except that the square-shaped pulse is applied and the values of $Q_2$ flux bias pulse amplitude $V_q$ are explored. The measurement result is shown in Fig. 5(e). The red regions are $\phi_c$ close to $\pi$ or $-\pi$. So we can pick up $V_b$ and $V_q$ values to satisfy $\phi_c = \pi$, then check leakage errors in Fig. 5(b). We find $V_b = 0.173 \, \rm{V}$ and $V_q = 0.174 \, \rm{V}$ satisfying both constraint conditions. Figure 5(d) shows the results of the qubit $Q_2$ Ramsey oscillations under these conditions. The phase difference of two oscillations is $\pi$, which means the conditional phase $\phi_c = \pi$.
Then RB is used to optimize the fidelity of CZ gate by NM algorithm. The best optimization result is shown in Fig. 6(a). The optimized CZ gate time is 19 ns. The average error rate of per Clifford gate $r'_{\rm{ref}}= 0.0280(7)$ and $r'_{\rm{int}}= 0.0407(2)$. The CZ gate error rate $r'_{\rm{CZ}} = 0.0127(8)$. The fidelity of diabatic CZ gate is $F_{\rm{\rm{CZ}}} = 98.72(2)\%$. As shown in Fig. 6(b), The sequence purity $r'_{\rm{incoherent,int}}$ and $r'_{\rm{incoherent,ref}}$ are 0.0197(5) and 0.0132(5), respectively. The incoherence error contributes $47\%$ of total errors per two-qubit Clifford gate.
In summary, we experimentally implemented a tunable-coupled two qubits system, and realized high fidelity CZ gate in two different schemes. The highest RB fidelity of the adiabatic and diabatic CZ gates we achieved were $99.53(8)\%$ and $98.72(2)\%$, respectively. The adiabatic CZ gate defined by a cosine flux pulse on coupler requiring fewer calibrations compared to diabatic CZ gate, which was implemented by applying parallel square-sharped flux pulses both on coupler and one frequency tunable qubit. Purity benchmarking which discribe incoherence error was also stutied. We found that incoherence error contributed $64\%$ and $47\%$ in total error per Clifford gate for adiabatic or diabatic CZ gates, respectively.
This work was supported by the NSFC of China (Grants No. 11890704, 12004042, 11674376), the NSF of Beijing (Grants No. Z190012), National Key Research and Development Program of China (Grant No. 2016YFA0301800) and the Key-Area Research and Development Program of Guang-Dong Province (Grants No. 2018B030326001)..
\begin{thebibliography}{99}
\bibitem{Arut19} F. Arute, K. Arya, R. Babbush, D. Bacon, J. C. Bardin, R. Barends, R. Biswas, S. Boixo, F. G. S. L. Brandao, D. A. Buell, et al., Quantum supremacy using a programmable superconducting processor, Nature \textbf{574}, 505 (2019).
\bibitem{koch07} J. Koch, T. M. Yu, J. Gambetta, A. A. Houck, D. I. Schuster, J. Majer, A. Blais, M. H. Devoret, S. M. Girvin, and R. J. Schoelkopf, Charge-insensitive qubit design derived from the cooper pair box, Phys. Rev. A \text{76}, 042319 (2007).
\bibitem{Bare13} R. Barends, J. Kelly, A. Megrant, D. Sank, E. Jeffrey, Y. Chen, Y. Yin, B. Chiaro, J. Mutus, C. Neill, et al., Coherent josephson qubit suitable for scalable quantum integrated circuits, Phys. Rev. Lett. \textbf{111}, 080502 (2013).
\bibitem{sch03} Norbert Schuch and Jens Siewert, Natural two-qubit gate for quantum computation using the XY interaction, Phys. Rev. A \textbf{67}, 032301 (2003).
\bibitem{Stra03} Frederick W. Strauch, Philip R. Johnson, Alex J. Dragt, C. J. Lobb, J. R. Anderson, and F. C. Wellstood, Quantum Logic Gates for Coupled Superconducting Phase Qubits, Phys. Rev. Lett. \textbf{91}, 167005 (2003).
\bibitem{dic09} L. DiCarlo, J. M. Chow, J. M. Gambetta, Lev S. Bishop, B. R. Johnson, D. I. Schuster, J. Majer, A. Blais, L. Frunzio, S. M. Girvin and R. J. Schoelkopf, Demonstration of two-qubit algorithms with a superconducting quantum processor, Nature \textbf{460}, 240 (2009).
\bibitem{Yama10} T. Yamamoto, M. Neeley, E. Lucero, R. C. Bialczak, J. Kelly, M. Lenander, Matteo Mariantoni, A. D. O¡¯Connell, D. Sank, H. Wang, M. Weides, J. Wenner, Y. Yin, A. N. Cleland, and John M. Martinis, Quantum process tomography of two-qubit controlled-Z and controlled-NOT gates using superconducting phase qubits, Phy. Rev. B \textbf{82}, 184515(2010).
\bibitem{bare14} R. Barends, J. Kelly, A. Megrant, A. Veitia, D. Sank, E. Jeffrey, T. C. White, J. Mutus, A. G. Fowler, B. Campbell, Y. Chen, Z. Chen, B. Chiaro, A. Dunsworth, C. Neill, P. O¡¯Malley, P. Roushan, A. Vainsencher, J. Wenner, A. N. Korotkov, A. N. Cleland and John M. Martinis, Superconducting quantum circuits at the surface code threshold for fault tolerance. Nature \textbf{508}, 500 (2014).
\bibitem{cald18} S. A. Caldwell, N. Didier, C. A. Ryan, E. A. Sete, A. Hudson, P. Karalekas, R. Manenti, M. P. da Silva, R. Sinclair, E. Acala, N. Alidoust, J. Angeles, A. Bestwick, M. Block, B. Bloom, A. Bradley, C. Bui, L. Capelluto, R. Chilcott, J. Cordova, G. Crossman, M. Curtis, S. Deshpande, T. El Bouayadi, D. Girshovich, S. Hong, K. Kuang, M. Lenihan, T. Manning, A. Marchenkov, J. Marshall, R. Maydra, Y. Mohan, W. O¡¯Brien, C. Osborn, J. Otterbach, A. Papageorge, J.-P. Paquette, M. Pelstring, A. Polloreno, G. Prawiroatmodjo, V. Rawat, M. Reagor, R. Renzas, N. Rubin, D. Russell, M. Rust, D. Scarabelli, M. Scheer, M. Selvanayagam, R. Smith, A. Staley, M. Suska, N. Tezak, D. C. Thompson, T.-W. To, M. Vahidpour, N. Vodrahalli, T. Whyland, K. Yadav, W. Zeng, and C. Rigetti, Parametrically activated entangling gates using transmon qubits, Phys. Rev. Applied \textbf{10}, 034050 (2018).
\bibitem{rige05} C. Rigetti, A. Blais, and M. Devoret, Protocol for universal gates in optimally biased superconducting qubits, Phys. Rev. Lett. \textbf(94), 240502 (2005).
\bibitem{leek09} P. J. Leek, S. Filipp, P. Maurer, M. Baur, R. Bianchetti, J. M. Fink, M. Göppl, L. Steffen, and A. Wallraff, Using sideband transitions for two-qubit operations in superconducting circuits, Phys. Rev. B \textbf{79}, 180511 (2009).
\bibitem{Hutc17} M. D. Hutchings, J. B. Hertzberg, Y. Liu, N. T. Bronn, G. A. Keefe, Markus Brink, Jerry M. Chow, and B. L. T. Plourde, Tunable superconducting qubits with flux-independent coherence, Phys. Rev. Applied \textbf{8}, 044003 (2017).
\bibitem{rige10} Chad Rigetti and Michel Devoret. Fully microwave-tunable universal gates in superconducting qubits with linear couplings and fixed transition frequencies, Phys. Rev. B \textbf{81}, 134507 (2010).
\bibitem{chow11} J. M. Chow, A. D. Córcoles, Jay M. Gambetta, Chad Rigetti, B. R. Johnson, John A. Smolin, J. R. Rozen, George A. Keefe, Mary B. Rothwell, Mark B. Ketchen, and M. Steffen, Simple all-microwave entangling gate for fixed-frequency superconducting qubits, Phys. Rev. Lett. \textbf{107}, 080502 (2011).
\bibitem{chow13} Jerry M. Chow, Jay M. Gambetta, Andrew W. Cross, Seth T. Merkel, Chad Rigetti and M. Steffen, Microwave-activated conditional-phase gate for superconducting qubits, New Journal of Physics, \textbf{15}(11), 115012 (2013).
\bibitem{shel16} Sarah Sheldon, Easwar Magesan, Jerry M. Chow, and Jay M. Gambetta, Procedure for systematically tuning up crosstalk in the cross resonance gate, Phys. Rev. A \textbf{93}, 060302 (2016).
\bibitem{pole12} S. Poletto, J. M. Gambetta, S. T. Merkel, J. A. Smolin, J. M. Chow, A. D. Córcoles, G. A. Keefe, M. B. Rothwell, J. R. Rozen, D. W. Abraham, C. Rigetti, and M. Steffen, Entanglement of Two Superconducting Qubits in a Waveguide Cavity via Monochromatic Two-Photon Excitation, Phys. Rev. Lett. \textbf{109}, 240505 (2012).
\bibitem{Groot10} P. C. de Groot, J. Lisenfeld, R. N. Schouten, S. Ashhab, A. Lupascu, C. J. P. M. Harmans, and J. E. Mooij, Selective darkening of degenerate transitions demonstrated with two superconducting quantum bits,
Nature Physics \textbf{6}, 763 (2010).
\bibitem{Groot12} P. C. de Groot, S. Ashhab, A. Lupascu, L. DiCarlo, F. Nori, C. J. P. M. Harmans, and J. E. Mooij, Selective darkening of degenerate transitions for implementing quantum controlled-NOT gates, New J. Phys. \textbf{14}, 073038 (2012).
\bibitem{ganz20} M. Ganzhorn, G. Salis, D. J. Egger, A. Fuhrer, M. Mergenthaler, C. Müller, P. Müller, S. Paredes, M. Pechal, M. Werninghaus, and S. Filipp, Benchmarking the noise sensitivity of different parametric two-qubit gates in a single superconducting quantum computing platform, Phys. Rev. Research \textbf{2}, 033447 (2020).
\bibitem{Ashhab06} S. Ashhab, S. Matsuo, N. Hatakenaka, and F. Nori, Generalized switchable coupling for superconducting qubits using double resonance, Phys. Rev. B \textbf{74} , 184504 (2006).
\bibitem{Ashhab07} S. Ashhab and F. Nori, Switchable coupling for superconducting qubits using double resonance in the presence of crosstalk, Phys. Rev. B \textbf{76}, 132513 (2007).
\bibitem{chen14} Yu Chen, C. Neill, P. Roushan, N. Leung, M. Fang, R. Barends, J. Kelly, B. Campbell, Z. Chen, B. Chiaro, A. Dunsworth, E. Jeffrey, A. Megrant, J. Y. Mutus, P. J. J. O¡¯Malley, C. M. Quintana, D. Sank, A. Vainsencher, J. Wenner, T. C. White, Michael R. Geller, A. N. Cleland, and John M. Martinis, Qubit Architecture with High Coherence and Fast Tunable Coupling, Phys. Rev. Lett. \textbf{113}, 220502 (2014).
\bibitem{yan18} Fei Yan, Philip Krantz, Youngkyu Sung, Morten Kjaergaard, Dan Campbell, Joel I.J. Wang, Terry P. Orlando, Simon Gustavsson, William and D. Oliver, A tunable coupling scheme for implementing high-fidelity two-qubit gates, Phys. Rev. Appl. \textbf{10}, 054062 (2018).
\bibitem{xu20} Yuan Xu, Ji Chu, Jiahao Yuan, Jiawei Qiu, Yuxuan Zhou, Libo Zhang, Xinsheng Tan, Yang Yu, Song Liu, Jian Li, Fei Yan, and Dapeng Yu, High-fidelity, high-scalability two-qubit gate scheme for superconducting qubits, arXiv:2006.11860v1
\bibitem{liX20} X. Li, T. Cai,1, H. Yan, Z. Wang, X. Pan, Y. Ma, W. Cai, J. Han, Z. Hua, X. Han, Y. Wu, H. Zhang, H. Wang, Yipu Song, Luming Duan, and Luyan Sun, A tunable coupler for suppressing adjacent superconducting qubit coupling, Phys. Rev. Applied \textbf{14}, 024070 (2020).
\bibitem{han20} X. Han, T. Cai, X. Li, Y. Wu, Y. Ma, J. Wang, H. Zhang, Y. Song, and L. Duan, Elimination of unwanted qubit interactions for parametric exchange two-qubit gates in a tunable coupling circuit, Phys. Rev. A \textbf{102}, 022619 (2020).
\bibitem{Coll20} Michele C. Collodo, Johannes Herrmann, Nathan Lacroix, Christian Kraglund Andersen, Ants Remm, Stefania Lazar, Jean-Claude Besse, Theo Walter, Andreas Wallraff, Christopher Eichler, Implementation of Conditional-Phase Gates based on tunable ZZ-Interactions, arXiv:2005.08863v1.
\bibitem{McKay19} David C. McKay, Stefan Filipp, Antonio Mezzacapo, Easwar Magesan, Jerry M. Chow, and Jay M. Gambetta, Universal Gate for Fixed-Frequency Qubits via a Tunable Bus, Phys. Rev. Applied \textbf{6}, 064007 (2016). \bibitem{Kyaw20} Thi Ha Kyaw, Tim Menke, Sukin Sim, Nicolas P. D. Sawaya, William D. Oliver, Gian Giacomo Guerreschi, and Alan Aspuru-Guzik, Quantum computer-aided design: digital quantum simulation of quantum processors, arXiv:2006.03070
\bibitem{moll18} Moll, N. et al., Quantum optimization using variational algorithms on near-term quantum devices, Quantum Sci. Technol. \textbf{3}, 030503 (2018).
\bibitem{rol19} M. A. Rol, F. Battistel, F. K. Malinowski, C. C. Bultink, B. M. Tarasinski, R. Vollmer, N. Haider, N. Muthusubramanian, A. Bruno, B. M. Terhal, and L. DiCarlo, Fast, highfidelity conditional-phase gate exploiting leakage interference in weakly anharmonic superconducting qubits, Phys. Rev. Lett. \textbf{123}, 120502 (2019).
\bibitem{li2019} Shaowei Li, Anthony D. Castellano, Shiyu Wang, Yulin Wu, Ming Gong, Zhiguang Yan, Hao Rong, Hui Deng, Chen Zha, Cheng Guo, Lihua Sun, Chengzhi Peng, Xiaobo Zhu and Jian-Wei Pan, Realisation of high-fidelity nonadiabatic CZ gates with superconducting qubits, npj Quantum Information, \textbf{5}(1), 84 (2019).
\bibitem{bare19} R. Barends, C. M. Quintana, A. G. Petukhov, Yu Chen, D. Kafri, K. Kechedzhi, R. Collins, O. Naaman, S. Boixo, F. Arute, K. Arya, D. Buell, B. Burkett, Z. Chen, B. Chiaro, A. Dunsworth, B. Foxen, A. Fowler, C. Gidney, M. Giustina, R. Graff, T. Huang, E. Jeffrey, J. Kelly, P. V. Klimov, F. Kostritsa, D. Landhuis, E. Lucero, M. McEwen, A. Megrant, X. Mi, J. Mutus, M. Neeley, C. Neill, E. Ostby, P. Roushan, D. Sank, K. J. Satzinger, A. Vainsencher, T. White, J. Yao, P. Yeh, A. Zalcman, H. Neven, V. N. Smelyanskiy, and John M. Martinis, Diabatic Gates for Frequency-Tunable Superconducting Qubits, Phys. Rev. Lett. \textbf{123}, 210501 (2019).
\bibitem{Foxen2020} B. Foxen, C. Neill, A. Dunsworth, P. Roushan, B. Chiaro, A. Megrant, J. Kelly, Zijun Chen, K. Satzinger, R. Barends, F. Arute, K. Arya, R. Babbush, D. Bacon, J.C. Bardin, S. Boixo, D. Buell, B. Burkett, Yu Chen, R. Collins, E. Farhi, A. Fowler, C. Gidney, M. Giustina, R. Graff, M. Harrigan, T. Huang, S.V. Isakov, E. Jeffrey, Z. Jiang, D. Kafri, K. Kechedzhi, P. Klimov, A. Korotkov, F. Kostritsa, D. Landhuis, E. Lucero, J. McClean, M. McEwen, X. Mi, M. Mohseni, J.Y. Mutus, O. Naaman, M. Neeley, M. Niu, A. Petukhov, C. Quintana, N. Rubin, D. Sank, V. Smelyanskiy, A. Vainsencher, T.C. White, Z. Yao, P. Yeh, A. Zalcman, H. Neven, and John M. Martinis, Demonstrating a Continuous Set of Two-Qubit Gates for Near-Term Quantum Algorithms, Phys. Rev. Lett. \textbf{125}, 120504 (2020).
\bibitem{mcka2017} D. C. McKay, C. J. Wood, S. Sheldon, J. M. Chow, and J. M. Gambetta, Efficient z gates for quantum computing, Phys. Rev. A \textbf{96}, 022330 (2017).
\bibitem{Knill08} E. Knill, D. Leibfried, R. Reichle, J. Britton, R. B. Blakestad, J. D. Jost, C. Langer, R. Ozeri, S. Seidelin, and D. J.Wineland, Randomized benchmarking of quantum gates, Phys. Rev. A \textbf{77}, 012307 (2008).
\bibitem{Kelly2014} J. Kelly, R. Barends, B. Campbell, Y. Chen, Z. Chen, B. Chiaro, A. Dunsworth, A. G. Fowler, I.-C. Hoi, E. Jeffrey, et al., Optimal quantum control using randomized benchmarking,” Phys. Rev. Lett. \textbf{112}, 240504 (2014).
\bibitem{Magesan11} E. Magesan, J. M. Gambetta, and J. Emerson, Scalable and robust randomized benchmarking of quantum processes, Phys. Rev. Lett. \textbf{106}, 180504 (2011).
\bibitem{Magesan12} E. Magesan, J. M. Gambetta, B. R. Johnson, C. A. Ryan, J. M. Chow, S. T. Merkel, M. P. da Silva, G. A. Keefe, M. B. Rothwell, T. A. Ohki, M. B. Ketchen, and M. Steffen, Efficient measurement of quantum gate error by interleaved randomized benchmarking, Phys. Rev. Lett. \textbf{109}, 080505 (2012).
\bibitem{Malley15} P. J. J. O’Malley, J. Kelly, R. Barends, B. Campbell, Y. Chen, Z. Chen, B. Chiaro, A. Dunsworth, A. G. Fowler, I.-C. Hoi, E. Jeffrey, A. Megrant, J. Mutus, C. Neill, C. Quintana, P. Roushan, D. Sank, A. Vainsencher, J. Wenner, T. C. White, A. N. Korotkov, A. N. Cleland, and J. M. Martinis, Qubit metrology of ultralow phase noise using randomized benchmarking, Phys. Rev. Applied \textbf{3}, 044009 (2015).
\bibitem{wall2015} J. Wallman, C. Granade, R. Harper, and S. T. Flammia, Estimating the coherence of noise. New Journal of Physics, \textbf{17}(11):113020 (2015). \bibitem{feng2016} Guanru Feng, Joel J. Wallman, Brandon Buonacorsi, Franklin H. Cho, Daniel K. Park, Tao Xin, Dawei Lu, Jonathan Baugh, and Raymond Laflamme, Estimating the Coherence of Noise in Quantum Control of a Solid-State Qubit, Phys. Rev. Lett. \textbf{117}, 260501 (2016).
\bibitem{Rol19} M. A. Rol, F. Battistel, F. K. Malinowski, C. C. Bultink, B. M. Tarasinski, R. Vollmer, N. Haider, N. Muthusubramanian, A. Bruno, B. M. Terhal, and L. DiCarlo, Fast, High-Fidelity Conditional-Phase Gate Exploiting Leakage Interference in Weakly Anharmonic Superconducting Qubits, Phys. Rev. Lett. \textbf{123}, 120502 (2019).
\bibitem{Martinis14} J. M. Martinis and M. R. Geller, Fast adiabatic qubit gates using only $\sigma^z$ control, Phys. Rev. A \textbf{90}, 022307 (2014).
\end{thebibliography}
\end{document} |
\begin{document}
\title {On Multivalued Fixed-Point Free Maps on $\mathbb R^n$} \author{Raushan ~Z.~Buzyakova} \address{Department of Mathematics and Statistics, The University of North Carolina at Greensboro, Greensboro, NC, 27402, USA} \email{rzbouzia@uncg.edu} \keywords{fixed point, hyperspace, multivalued function} \subjclass{54H25, 58C30, 54B20}
\begin{abstract}{ To formulate our results let $f$ be a continuous multivalued map from
$\mathbb R^n$ to $2^{\mathbb R^n}$ and $k$ a natural number such that $|f(x)|\leq k$ for all $x$. We prove that $f$ is fixed-point free if and only if its continuous extension $\tilde f:\beta \mathbb R^n\to 2^{\beta \mathbb R^n}$ is fixed-point free. If one wishes to stay within metric terms, the result can be formulated as follows: $f$ is fixed-point free if and only if there exists a continuous fixed-point free extension $\bar f: b\mathbb R^n\to 2^{b\mathbb R^n}$ for some metric compactificaton $b\mathbb R^n$ of $\mathbb R^n$. Using the classical notion of colorablity, we prove that such an $f$ is always colorable. Moreover, a number of colors sufficient to paint the graph can be expressed as a function of $n$ and $k$ only. The mentioned results also hold if the domain is replaced by any closed subspace of $\mathbb R^n$ without any changes in the range. } \end{abstract}
\maketitle \markboth{Raushan Z. Buzyakova}{On Multivalued Fixed-Point Free Maps on $\mathbb R^n$} { }
\section{Introduction}\label{S:intro}
A series of topological results about fixed-point free maps are motivated by these two classical set-theoretical statements (see, in particular, \cite{BE}): \par
\noindent {\it S1. If $f:X\to X$ is a fixed-point free map, then there exists a finite cover $\mathcal F$ of $X$ such that $f(F)$ misses $F$ for each $F\in {\mathcal F}$; and
\par
\noindent S2. Let ${\mathcal P}(X)$ be the set of all non-empty subsets of $X$ and let $f: X\to {\mathcal P}(X)$ be a map with the property that $x\not \in f(x)$. If there exists a natural number $k$ such that $|f(x)|\leq k$ for all $x\in X$, then there exists a finite cover $\mathcal F$ of $X$ such that $F$ misses $\cup\{f(x):x\in F\}$ for each $F\in {\mathcal F}$. } \par
\noindent One of the first results of topological nature related to these statements was obtained by Katetov in \cite{K} by translating the first statement into this form: {\it For a discrete space $X$, a map $f:X\to X$ is fixed-point free if and only if its continuous extension $\tilde f:\beta X\to \beta X$ is fixed-point free.} This topological version suggests that if one wants to have a similar criterion for a non-discrete space $X$ one has to at least demand that elements of covers $\mathcal F$ in the statements under discussion be closed subsets of $X$. Thus, it is commonly accepted that when working with a topological space $X$ and a continuous map $f$ from a closed subspace $A$ of $X$ to $X$, any closed subset $F$ of $A$ that misses its image under $f$ is called a {\it color of $f$}. If there exists a finite cover ("coloring") of $X$ by colors, $f$ is said to be {\it colorable}. Katetov's paper \cite{K} and van Dowen's work \cite{D} have made a significant impact on topologists' interest in the topic. A number of interesting results of topological nature in the direction of the first statement have been published since the mentioned papers (see, in particular, \cite[Section 3.2]{VM2} for references). In this paper we consider one of natural topological versions of the second statement for Euclidean space $\mathbb R^n$ and its hyperspace $2^{\mathbb R^n}$. In \cite{bc} it is proved that a continuous fixed-point free map from a closed subspace of $\mathbb R^n$ to $\mathbb R^n$ is colorable. In this paper we consider fixed-point free multivalued maps on $\mathbb R^n$ and its closed subspaces. To formulate our main results we first introduce the necessary terminology related to multivalued maps.
For a topological space $X$, we use symbol $2^X$ to denote the space of all non-empty closed subsets of $X$ endowed with the Vietoris topology and symbol ${\rm exp}_k(X)$ to denote the subspace of $2^X$ that consists of only those
$A\in 2^X$ for which $|A|\leq k$. A multivalued map $f: X\subset Z\to 2^Z$ is {\it fixed-point free} if $x\not \in f(x)$ for every $x\in X$. A closed set $F\subset X$ is a {\it color} of a continuous map $f$ from $X\subset Z$ to $2^Z$ if $F$ misses $\cup \{f(x):x\in F\}$. If $X$ can be covered by finitely many colors of $f$ then $f$ is said to be {\it colorable}. The main results of the paper are Theorems \ref{thm:chromaticnumber} and \ref{thm:colorability}, which state that any continuous fixed-point free map from a closed subspace of $\mathbb R^n$ to ${\rm exp}_k(\mathbb R^n)$ is colorable and there exists a formula that computes a number of colors sufficient for painting in terms of $n$ and $k$ only. Using the main result we also show that a criterion similar to the Katetov's holds for multivalued maps as well. Namely, we show that a continuous map $f$ from a closed subspace $X$ of $\mathbb R^n$ to ${\rm exp}_k(\mathbb R^n)$ is fixed-point free if and only if its continuous extension $\tilde f: \beta X\to {\rm exp}_k(\beta\mathbb R^n)$ is fixed-point free. To justify the requirement on sizes of $f(x)$ in our main results let us consider one simple example. Define $f$ from $\omega\setminus \{0\}$ to the space of finite subsets of $\omega$
as follows: $f(n) =\{n+1,n+2,...,2n\}$. The map $f$ is continuous because $\omega$ and the space of finite subsets of $\omega$ are discrete. Since for every $n\in \omega\setminus \{0\}$, all elements of $\{n+1, n+2,...,2n\}$ must be of different color we conclude that $f$ is not colorable. This example justifies our requirement in the main results that the set $\{|f(x)|: x\in X\}$ is bounded by a positive integer. Before we dive into the technical part of the paper we would like to outline a short transparent argument of the main results of the paper for a fixed-point free map $f:\mathbb R\to {{\rm exp}}_2(\mathbb R)$. For this let $f_1(x) =\min f(x)$ and $f_2(x)=\max f(x)$. Since $f$ is fixed-point free, the maps $f_1$ and $f_2$ are fixed-point free real-valued maps. By \cite[Theorem 2.5]{b}, both functions are colorable. If one lets ${\mathcal F}_1$ and ${\mathcal F}_2$ be colorings of these maps then it is easy to verify that the family $\{A\cap B: A\subset {\mathcal F}_1,\ B\subset {\mathcal F}_2\}$ is a coloring of $f$. If one wishes to extend the argument for the case ${{\rm exp}}_3(\mathbb R)$ using a straightforward inductive approach, then one may find oneself dealing with open colors or with a single-valued map with the domain being a proper subset of the range. Existence of open colors in a single-valued case can be easily deduced from the definition of colorability when one deals with self-maps. However, if one works with maps from a subspace $X$ of $Y$ into $Y$, a work needs to be done. Nevertheless, the presented argument can be extended for ${{\rm exp}}_k(\mathbb R)$ for any $k$ with some more work and suitable references. Although our argument for any $n$ and $k$ that we present in the paper may seem different from the one just described, a closer look will reveal that it carries the same idea hidden behind technical details naturally arising when dealing with higher dimensions. Throughout the paper we will follow standard
notation and terminology as in \cite{Eng}.
\section{Results}\label{S:results}
For simplicity, but without loss of generality, most of our arguments related to $\mathbb R^n$ will be carried out for $\mathbb R^5$. This will free the letter "n" for other purposes. Since throughout our discussion we will juggle several spaces at a time we agree that by $\bar S$ we denote the closure of $S$ in $\mathbb R^k$ (where the value of $k$ is always understood from the context) while $cl_X(P)$ will denote the closure of $P$ in $X$. \par
\noindent A standard neighborhood in $2^X$ will be denoted as $$ \langle U_1,...,U_m\rangle = \{A\in 2^X: A\subset U_1\cup ...\cup U_m\ and\ U_i\cap A\not =\emptyset\ for \ all\ i=1,...,m\}, $$ where $U_1,...,U_m$ are open sets of $X$.
\par
\noindent By ${{\rm exp}}_n(X)$ we denote the subspace $\{F\in 2^X: |F|\leq n\}$.
\par
\noindent \begin{defin}\label{defin:brightcolor} Let $f$ be a continuous map from $X\subset Z$ to $2^Z$. A closed set $F\subset X$ is a bright color of $f$ if $F$ misses $cl_{Z}[\cup \{f(x):x\in F\}]$. \end{defin}
\par
\noindent \begin{pro}\label{pro:openbrightcolor} Suppose $Z$ is normal, $X$ is closed in $Z$, $f:X\to 2^Z$ is continuous, and $F$ is a bright color of $f$. Then there exists an open neighborhood $U$ of $F$ such that $cl_{X}(U)$ is a bright color of $f$. \end{pro} \begin{proof} Since $F$ is a bright color of $f$ and $X$ is closed in $Z$, the sets $F$ and $cl_Z(\cup \{f(x):x\in F\})$ are disjoint closed sets in $Z$. Since $Z$ is normal, there exist $V$ an open neighborhood of $cl_{Z}[\cup \{f(x):x\in F\}]$ in $Z$ and $W$ an open neighborhood of $F$ in $Z$ such that $cl_Z(W)$ misses $cl_Z(V)$. Consider the open set $\langle V\rangle$ in $2^Z$. Since $f$ is continuous and $f(F)\subset \langle V\rangle$ there exists an open neighborhood $U$ of $F$ in $X$ such that $U\subset W$ and $f(cl_Z(U))\subset \langle V\rangle$. Thus, $cl_Z(\cup\{f(x):x\in cl_Z(U)\})$ is in $cl_Z(V)$. The latter misses $cl_Z(W)$ and therefore $cl_Z(U)$. Therefore, $U$ is as desired. \end{proof}
\par
Proposition \ref{pro:openbrightcolor} implies that if $X$ is closed in $\mathbb R^k$ and
$\mathcal F$ is an $m$-sized bright coloring of a continuous map $f:X\to {{\rm exp}}_n(\mathbb R^k)$
then there exists an $m$-sized open cover $\mathcal U$ of $X$ the closures of whose elements are
bright colors of $f$. We will use this observation throughout the paper without formally
referencing it.
\par
In the following discussion that leads to the main result we will restrict ourselves to closed subspace of $\mathbb R^5$. This is done to avoid accumulation of too many variables. All arguments are valid if one replace "$5$" with any natural number.
\par
\noindent \begin{defin}\label{defin:statementS5n} $S(5,n)$ denotes the following statement: there exists the smallest integer $K(5,n)$ such that every continuous fixed-point free map $f$ from a closed subset $X\subset \mathbb R^5$ to ${{\rm exp}}_n(\mathbb R^5)$ is colorable by at most $K(5, n)$ many bright colors. \end{defin}
\par
\noindent \begin{lem}\label{fgh} Suppose $f,g,h: X\subset Z\to 2^Z$ are maps; and $g$ and $h$ are colorable by at most $N_g$ and $N_h$ (bright) colors, respectively. If $f(x)\subset g(x)\cup h(x)$ for every $x\in X$ then $f$ is colorable by at most $N_g\cdot N_h$ (bright) colors. \end{lem} \begin{proof} Let $\mathcal G$ and $\mathcal H$ be bright colorings of $g$ and $h$ of sizes $N_g$ and $N_h$, respectively. Put
${\mathcal F}=\{G\cap H: G\cap H\not =\emptyset, H\in {\mathcal H}, G\in {\mathcal G}\}$. Clearly, $|{\mathcal F}|\leq N_g\cdot N_h$. Since ${\mathcal G}$ and $\mathcal H$ are closed covers of $X$, so is $\mathcal F$. Fix $F=H\cap G$ in $\mathcal F$. Since, by hypothesis, $f(x)\subset g(x)\cup h(x)$, we conclude that $$ cl_Z[\cup\{f(x):x\in H\cap G\}]\subset cl_Z[\cup\{g(x):x\in H\cap G\}]\cup cl_Z[\cup\{h(x):x\in H\cap G\}]. $$ Since $\mathcal G$ and $\mathcal H$ are bright colorings, $cl_Z[\cup\{g(x):x\in H\cap G\}]$ and $cl_Z[\cup\{h(x):x\in H\cap G\}]$ miss $H\cap G$. Therefore, the left side of the above set inclusion formula misses $H\cap G$ as well, meaning $H\cap G$ is a bright color for $f$. \end{proof}
\par
In what follows, by $\pi_i$ we denote the projection of $\mathbb R^5$ onto its $i$-th coordinate axis. The next two statements (Lemmas \ref{lem:allsamenumberofmax} and \ref{lem:largefirstprojection}) hold if we replace $\pi_1$ by $\pi_i$ for any $i\in \{1,...,n\}$. However, we will carry out our arguments for $\pi_1$ for the already mentioned reason of avoiding unnecessary variables. \par
\noindent \begin{lem}\label{lem:allsamenumberofmax} Suppose $n>M\geq 1$; $A$ is closed in $\mathbb R^5$; $f:A\to {{\rm exp}}_n(\mathbb R^5)\setminus {{\rm exp}}_{n-1}(\mathbb R^5)$ is continuous and fixed-point free;
$|\{y\in f(x): \pi_1(y) = \max \pi_1(f(x))\}|=M$ for all $x\in A$; and $S(5,n-1)$ is true. Then $f$ is colorable by at most $[K(5,n-1)]^2$ bright colors. \end{lem} \begin{proof} Define $g$ and $h$ from $A$ to ${{\rm exp}}_{n-1}(\mathbb R^5)$ as follows: $$ g(x) = \{z\in f(x): \pi_1(z) =\max \pi_1 (f(x))\} \ {\rm and}\ h(x) = f(x)\setminus g(x). $$ Since $f(x)$ is finite, $\max \pi_1(f(x))$ exists. Hence $g(x)$ is defined. Since
$|\{y\in f(x): \pi_1(y) = \max \pi_1(f(x))\}|=M$ and $1\leq M<n$, we conclude that $0<|f(x)\setminus g(x)|<n$ and
$0<|g(x)|<n$. Therefore, $g$ and $h$ are well defined functions from $A$ to ${{\rm exp}}_{n-1}(\mathbb R^5)$. Observe that $f(x)= g(x)\cup h(x)$ for each $x$. Therefore, by Lemma \ref{fgh}, to reach the conclusion of our lemma we need to show that $g$ and $h$ are colorable by at most $K(5,n-1)$ bright colors each. Since we assume that $S(5, n-1)$ holds it suffices to show that $g$ and $h$ are continuous and fixed-point free. The latter property follows from the facts that $f$ is fixed-point free and $f(x) = g(x)\cup h(x)$.
To prove continuity of $g$ and $h$, fix $x\in A$ and open neighborhoods ${\mathcal U}_{g(x)}$ and ${\mathcal V}_{h(x)}$ of $g(x)$ and $h(x)$ in ${{\rm exp}}_{n-1}(\mathbb R^5)$. We need to find an open neighborhood of $x$ in $A$ whose image under $g(x)$ and $h(x)$ are in ${\mathcal U}_{g(x)}$ and ${\mathcal V}_{h(x)}$, respectively. Without loss of generality we may assume that the selected neighborhoods are standard, that is, in the form $$ {\mathcal U}_{g(x)}=\langle U_y: y\in g(x)\rangle\ {\rm and}\ {\mathcal V}_{h(x)}=\langle U_y: y\in f(x)\setminus g(x)\rangle, $$ where $U_y$ is a fixed bounded open neighborhood of $y$ in $\mathbb R^5$ for each $y\in f(x)$. We may also assume that the following properties hold.
\begin{description}
\item[\rm P1] $\min \pi_1(\overline U_y)> \max \pi_1 (\overline U_z)$ whenever $y\in g(x)$ and $z\in f(x)\setminus g(x)$.
\item[\rm P2] $\overline U_y\cap \overline U_z =\emptyset$ for any distinct $y,z\in f(x)$.
\end{description} The property P1 can be achieved since by the definition of $g$, the set $\pi_1(g(x))$ is a singleton and its element is strictly greater than any element of $\pi_1(f(x)\setminus g(x))$. Put ${\mathcal W}_{f(x)} = \langle U_y:y\in f(x)\rangle$. Clearly, ${\mathcal W}_{f(x)}$ is an open neighborhood of $f(x)$. By continuity of $f$, there exists an open $O$ of $x$ in $A$ such that $f(O)\subset {\mathcal W}_{f(x)}$. To finish the proof of continuity of $g$ and $h$ it suffices to show that $g(O)\subset {\mathcal U}_{g(x)}$ and $h(O)\subset {\mathcal V}_{h(x)}$. For this fix an arbitrary $x'\in O$. By the choice of $O$, we have $f(x')\in {\mathcal W}_{f(x)}$. Let $f(x') =\{z_1,...,z_n\}\in {\rm exp}_n(\mathbb R^5)\setminus {\rm exp}_{n-1}(\mathbb R^5)$ and $\pi_1(z_i) = \max \pi_1(f(x'))$ for any $i=1,...,M$. By the lemma's condition on $M$, we have $\pi_1(z_j)<\max \pi_1(f(x))$ for any $j=M+1,...,n$. By P1, we have \begin{description}
\item[\rm P3] $z_i\in \cup \{U_y:y\in g(x)\}$ for any $i=1,...,M$. \end{description} By P2 and P3, we have \begin{description}
\item[\rm P4] $z_j\in \cup \{U_y:y\in f(x)\setminus g(x)\}$ for any $j=M+1,...,n$. \end{description} By P2 and the fact that $f(x')\in {\mathcal W}_{f(x)}=\langle U_y:y\in f(x)\rangle$, we conclude that each $U_y$, participating in the definition of ${\mathcal W}_{f(x)}$, contains exactly one $z_i\in f(x')$. By P3 and P4, we have $\{z_1,...,z_M\}\in {\mathcal U}_{g(x)}$ and $\{z_{M+1},...,z_n\}\in {\mathcal V}_{h(x)}$. Since $g(x')=\{z_1,...,z_M\}$ and $h(x')=\{z_{M+1},...,z_n\}$, we are done. \end{proof}
\par
\noindent \begin{lem}\label{lem:largefirstprojection} Suppose $n>1$; $A$ is closed in $\mathbb R^5$; $f:A\to {{\rm exp}}_n(\mathbb R^5)\setminus {{\rm exp}}_{n-1}(\mathbb R^5)$ is continuous and fixed-point free;
$|\{y\in f(x): \pi_1(y) = \max \pi_1(f(x))\}|>1$ for all $x\in A$; and $S(5,n-1)$ is true. Then $f$ is colorable by at most $n\cdot [K(5,n-1)]^2$ bright colors. \end{lem} \begin{proof} For $m=1,...,n-1$, define $O_m\subset A$ as follows: $x\in O_m$ if and only if
$M_x=|\{y\in f(x):\pi_1(y)=\max \pi_1(f(x))\}|<n-m$.
\par
\noindent {\it Claim. $O_m$ is open.} \par
\noindent To prove the claim, fix $x\in O_m$ and let ${\mathcal V}_{f(x)}=\langle V_y:y\in f(x)\rangle$ be an open neighborhood of $f(x)$ such that the following hold: \begin{enumerate}
\item $V_y$ is a bounded neighborhood of $y$ for every $y\in f(x)$;
\item $V_y\cap V_z=\emptyset$ for any distinct $y,z\in f(x)$; and
\item $\min \pi_1(\overline V_y) >\max \pi_1(\overline V_z)$ whenever $\pi_1(y)=\max \pi_1(f(x))$
and $\pi_1(z)<\max \pi_1(f(x))$. \end{enumerate} It suffices to show now that $f^{-1}({\mathcal V}_{f(x)})\subset O_m$. For this pick $x'\in f^{-1}({\mathcal V}_{f(x)})$. We have $f(x')\in {\mathcal V}_{f(x)}$. By (2) and (3), $M_{x'}\leq M_x$. Hence, $M_{x'}<n-m$. By the definition of $O_m$, $x'\in O_m$. The claim is proved.
\par
We will construct our coloring inductively. For short put $K=[K(5,n-1)]^2$. \par
\noindent
{\it Step 1.} Put $A_1 = A\setminus O_1$. Thus, an element $x$ of $A$ is in $A_1$ if and only if $M_x\geq n-1$. Since $|\pi_1(f(x))|>1$ for every $x\in A$ we conclude that $M_x=n-1$ for every $x\in A_1$. Therefore, by Lemma \ref{lem:allsamenumberofmax}, there exists a $K$-sized open cover ${\mathcal U}_1$ of $A_1$ the closures of whose elements are bright colors for $f$.
\par
\noindent {\it Assumption.} Assume for $m-1$ an open family ${\mathcal U}_{m-1}$ of size at most $(m-1)\cdot K$ is defined and the following conditions are met: \begin{description}
\item[\rm P1] $\overline U$ is a bright color of $f$ for every $U\in {\mathcal U}_{m-1}$; and
\item[\rm P2] If $M_x\geq n-(m-1)$ then $x\in \bigcup {\mathcal U}_{m-1}$. \end{description}
\par
\noindent {\it Step $m<n$.} Put $A_m = A\setminus [O_m\cup (\bigcup {\mathcal U}_{m-1})]$. By Claim, the set $A_m$ is closed. Pick any $x\in A_m$. Then $x\not\in O_m$, meaning that $M_x\geq n-m$. Also $x\not\in \bigcup {\mathcal U}_{m-1}$, which, by P2, implies $M_x<n-(m-1)$. Thus, $n-m\leq M_x<n-m+1$. Therefore, $M_x=n-m$. Therefore, by Lemma \ref{lem:allsamenumberofmax}, there exists a $K$-sized open cover ${\mathcal V}_m$ of $A_m$ the closures of whose elements are bright colors for $f$. Put ${\mathcal U}_m = {\mathcal U}_{m-1}\cup {\mathcal V}_m$. Clearly, the size of this family is at most $m\cdot K$. Let us verify P1 and P2 for $m$. Property P1 holds since ${\mathcal U}_m$ is the union of two families satisfying P1. For P2, observe that $M_x\geq n-m$ if and only if $x\not\in O_m$. But ${\mathcal U}_m$ covers the complement of $O_m$.
\par
\noindent The construction is complete. It suffices to show now that $A=\bigcup {\mathcal U}_{n-1}$. For this pick $x\in A$. By the lemma's hypothesis, $M_x>1=n-(n-1)$. By P2, $x\in \bigcup {\mathcal U}_{n-1}$. \end{proof}
\par
The base step in the proof of our main theorem uses the following statement (\cite[Proposition 2.9]{bc}): {\it If $f$ is a continuous fixed-point free map from a closed subset $X$ of $\mathbb R^m$ to $\mathbb R^m$ then $f$ is colorable by at most $m+3$ bright colors.}
\par
\noindent \begin{thm}\label{thm:chromaticnumber} There exists an integer $K(m,n)$ such that every continuous fixed-point free map from a closed subspace $X$ of $\mathbb R^m$ into ${{\rm exp}}_n(\mathbb R^m)$ is colorable by at most $K(m,n)$ many bright colors. \end{thm} \begin{proof} As in previous statements, to avoid accumulation of extra variables, let us deal with $m=5$. Thus, we need to show that $K(5,n)$ exists for every natural number $n$. By \cite[Proposition 2.9]{bc}, $K(5,1)$ exists. Assume that $K(5,n-1)$ exists. To prove the conclusion of the theorem for $n$, fix any fixed-point free continuous map
$f$ from a closed subspace $X$ of $\mathbb R^5$ into ${{\rm exp}}_n(\mathbb R^5)$. Next define $L\subset X$ as follows: $x\in L$ if and only if $|f(x)|<n$. Then $L$ is closed and the range of
$f|_L$ is a subset of ${{\rm exp}}_{n-1}(\mathbb R^5)$ (notice that $L$ can be empty). Therefore, by our inductive assumption, there exists a $K(5,n-1)$-sized open cover ${\mathcal U}_L$ of $L$ the closures of whose elements are bright colors. Put $E=X\setminus \bigcup {\mathcal U}_L$. Then $E$ is closed and $|f(x)|=n$ for every $x\in E$.
For $1\leq i\leq n$, define $S_i$ as follows: $x\in S_i$ if and only if $x\in E$ and $|\pi_i(f(x))|=1$. Notice that $S_i$ can be empty. Clearly, $S_i$ is closed. Inductively, we will first cover
$\cup_{i\leq n} S_i =\{x\in E: |\pi_i(f(x))|=1\ for\ some\ i\}$ by bright colors and then we will cover the rest of $E$.
\par
\noindent {\it Step 1.} Put $E_i =\cap_{j\not =i} S_j$. Notice that $E_i$ can be empty.
Since $|f(x)|=n$ for every $x\in E$ we conclude that
$|\pi_i(f(x))|=n$ for all $x\in E_i$. Since $n>1$, by Lemma \ref{lem:allsamenumberofmax} (with $\pi_1$ replaced by $\pi_i$ and $M=1$), there exists a finite open cover ${\mathcal U}_1$ of $\cup_{i\leq n}E_i$ the closures of whose elements are bright colors for $f$.
\par
\noindent {\it Assumption.} Suppose an open finite family ${\mathcal U}_{k-1}$, where $1\leq k-1<n$, is defined and the following hold: \begin{description}
\item[\rm P1] For every at most $(k-1)$-sized subset $I$ of $\{1,...,n\}$ the inclusion $\cap_{j\not \in I}S_j \subset \bigcup {\mathcal U}_{k-1}$ holds;
\item[\rm P2] $\overline U$ is a bright color for every $U\in {\mathcal U}_{k-1}$. \end{description}
\par
\noindent
{\it Step $k<n$.} For every $k$-sized $I\subset \{1,...,n\}$ put $E_I = [\cap_{j\not \in I}S_j]\setminus [\bigcup {\mathcal U}_{k-1}]$. Pick $i^*\in I$. Then $|I\setminus \{i^*\}| = k-1$. By P1, the set $\bigcup {\mathcal U}_{k-1}$ contains $\cap \{S_j:j\in I\setminus \{i^*\}\}$. Since $E_I$ misses $\bigcup {\mathcal U}_{k-1}$, we conclude that
$|\pi_{i^*}(f(x))|>1$ for every $x$ in $E_I$. By Lemma \ref{lem:largefirstprojection}, there exists a finite open cover ${\mathcal U}_I$ of $E_I$ the closures of whose elements are bright colors. Put
${\mathcal U}_k =[\cup \{{\mathcal U}_I: I\subset \{1,...,n\},\ |I|=k\}]\cup {\mathcal U}_{k-1}$ Properties P1 and P2 clearly hold for $k$. The construction is complete.
\par
\noindent Let us show that $\{x\in E: |\pi_i(f(x))|=1\ for\ some\ i\}$ is covered by ${\mathcal U}_{n-1}$. For this pick any $z$ in this set. Put $I_z = \{i\leq n: |\pi_i(f(x))|>1\}$. Clearly, $|I_z| <n$. Since $x\in E$ we conclude that $|f(x)|=n$. Therefore, $I_z\not = \emptyset$. Therefore $z\in \cap_{j\not\in I_z} S_j$. By P1, $z\in \bigcup {\mathcal U}_{n-1}$.
To finish the proof we need to cover $E\setminus [\bigcup {\mathcal U}_{n-1}]$ by bright colors. For this observe that $E\setminus [\bigcup {\mathcal U}_{n-1}]$ contains only those $x$ for which $|\pi_i(f(x))|=n>1$ for all $i$. Therefore, by Lemma \ref{lem:allsamenumberofmax} (with $M=1$), the set in question is covered by bright colors. Since we always used Lemmas \ref{lem:allsamenumberofmax} and \ref{lem:largefirstprojection} to construct our coloring, the size of the coloring depends on $n$ and number $5$ only. Thus, $K(5,n)$ exists. \end{proof} \par
A non-technical version of the above result is the following theorem, where $n$ and $k$ denote any natural numbers. \par
\noindent \begin{thm}\label{thm:colorability} Any continuous fixed-point free map from a closed subspace $X$ of $\mathbb R^k$ into ${{\rm exp}}_n(\mathbb R^k)$ is brightly colorable. \end{thm}
\par
Observe that if a continuous map $f:X\to 2^Z$ has the property that $f(x)$ is compact in $Z$ for all $x$ then there exists the continuous extension $\tilde f:\beta X\to 2^{\beta Z}$. For our next discussion put ${\mathcal K}(X)=\{A\in 2^X: A\ is\ compact\}$. In \cite{bc} it is proved that a continuous map $f$ from a closed subspace $X$ of $\mathbb R^k$ to $\mathbb R^k$ is fixed-point free if and only if its continuous extension $\tilde f: \beta X\to \beta \mathbb R^k$ is fixed-point free. It is natural to ask if the corresponding statement holds for a multivalued map $f: X\to {{\rm exp}}_n(\mathbb R^k)$ and its continuous extension $\tilde f: \beta X\to {{\rm exp}}_n(\beta \mathbb R^k)$. Observe first that the continuous extension exists since ${{\rm exp}}_n(\beta \mathbb R^k)$ is compact. The affirmative answer to this question follows from the next statement.
\par
\noindent \begin{pro}\label{pro:ftobetaf}
Let $X$ be a closed subspace of a normal space $Z$. If $\mathcal F$ is a bright coloring
of a continuous map $f:X\to {\mathcal K}(Z)$, then $\{\beta F:F\in {\mathcal F}\}$ is a bright coloring
of $\tilde f:\beta X\to {\mathcal K}({\beta Z})$. \end{pro} \begin{proof} Since $X$ is normal and $\mathcal F$ is a finite closed cover of $X$ the family $\{\beta F:F\in {\mathcal F}\}$ is a closed cover of $\beta X$. Therefore, we only need to show that $\beta F$ is a bright color for $\tilde f$. Since $F$ is a bright color for $f$ the set $F$ misses $cl_Z(\cup \{f(x): x\in F\})$. Since $F$ and $cl_Z(\cup \{f(x): x\in F\})$ are disjoint closed subsets of the normal space $Z$ we conclude that $\beta F$ misses $cl_{\beta Z}(\cup \{f(x): x\in F\})$. Since $\tilde f$ is the continuous extension of $f$ we conclude that $cl_{\beta Z}(\cup \{\tilde f(x): x\in \beta F\})=cl_{\beta Z}(\cup \{f(x): x\in F\})$. Thus $\beta F$ misses $cl_{\beta Z}(\cup \{\tilde f(x): x\in \beta F\})$, whence $\beta F$ is a bright color of $\tilde f$. \end{proof}
\par
\noindent \begin{thm}\label{thm:fpfcriterion} Let $f$ be a continuous map from a closed subspace $X$ of $\mathbb R^k$ to ${{\rm exp}}_n(\mathbb R^k)$. Then $f$ is fixed-point free if and only if $\tilde f:\beta X\to {{\rm exp}}_n(\beta \mathbb R^k)$ is fixed-point free. \end{thm} \begin{proof} Sufficiency is obvious. To prove necessity, let $\mathcal F$ be a bright coloring of $f$. By Proposition \ref{pro:ftobetaf}, $\{\beta F:F\in {\mathcal F}\}$ is a coloring for $\tilde f$. Since $\{\beta F:F\in {\mathcal F}\}$ covers $\beta X$ and $f(\beta F)$ misses $\beta F$ for each $F\in {\mathcal F}$, we conclude that $\tilde f$ does not fix any point. \end{proof}
\par
Using spectral techniques it is observed in \cite[Corollary 3.5.7]{VM2} that if $f$ is a continuous colorable self-map on a separable metric space $X$ then one can find a continuous fixed-point free extension $\bar{f}:bX\to bX$, where $bX$ is a metric compactification of $X$ of the same dimensionality as that of $X$. Using the same technique we will next outline a proof for the following metric version of Theorem \ref{thm:fpfcriterion} for natural numbers $n$ and $k$.
\par
\noindent \begin{thm}\label{thm:metricversion} Let $f:\mathbb R^k\to {{\rm exp}}_n(\mathbb R^k)$ be continuous. Then $f$ is fixed-point free if and only if there exists a continuous fixed-point free extension $\bar f:b\mathbb R^k\to {{\rm exp}}_n(b\mathbb R^k)$, where $b\mathbb R^k$ is some metric compactification of $\mathbb R^k$ of dimension $k$. \end{thm} \begin{proof} Sufficiency is obvious. Let us outline a proof of necessity. By Theorem \ref{thm:fpfcriterion}, $\tilde f:\beta \mathbb R^k\to {{\rm exp}}_n(\beta\mathbb R^k)\subset 2^{\beta\mathbb R^k}$ is fixed-point free. By \v S\v epin spectral theorem \cite{S}, we can find spectra $\{b_\alpha(\mathbb R^k), \pi^\gamma_\alpha, {\mathcal A}\}$ and $\{2^{b_\alpha(\mathbb R^k)}, p^\gamma_\alpha, {\mathcal A}\}$ with inverse limits $\beta \mathbb R^k$ and $2^{\beta \mathbb R^k}$, respectively, and a family of maps $\{f_\alpha:\alpha\in {\mathcal A}\}$ such that the following hold: \begin{enumerate}
\item $b_\alpha(\mathbb R^k)$ is a metric compactification of $\mathbb R^k$ of dimension $k$ for all $\alpha$
\item $\pi^\gamma_\alpha$ and $p^\gamma_\alpha$ are identity maps on $\mathbb R^k$ and $2^{\mathbb R^k}$, respectively.
\item $f_\alpha\circ \pi_\alpha = p_\alpha\circ f$. \end{enumerate} Since $\tilde f$ is fixed-point free and $b_\alpha(\mathbb R^k)$ is compact for every $\alpha$ we may assume that $f_\alpha$ is fixed-point free for every $\alpha$. By (2) and (3), each $f_\alpha$ coincides with $f$ on $\mathbb R^k$. Therefore, each $\{f_\alpha, b_{\alpha}(\mathbb R^k)\}$ serves our purpose. \end{proof}
\par
We would like to finish the paper by commenting on a number of colors sufficient to paint a given graph. If one follows the argument of Theorem \ref{thm:chromaticnumber} or the argument for the reals outlined in the introduction one will quickly see that the size of the coloring constructed for the case ${{\rm exp}}_n(\mathbb R^k)$ is at least $k^n$. Therefore, it is natural to wonder if an estimate for the required number of colors can be represented as a polynomial of both $n$ and $k$. \par
\end{document} |
\begin{document}
\title{Algebraic stacks}
\begin{abstract} This is an expository article on the theory of algebraic stacks. After introducing the general theory, we concentrate in the example of the moduli stack of vector budles, giving a detailed comparison with the moduli scheme obtained via geometric invariant theory. \end{abstract}
\section{Introduction}
The concept of algebraic stack is a generalization of the concept of scheme, in the same sense that the concept of scheme is a generalization of the concept of projective variety. In many moduli problems, the functor that we want to study is not representable by a scheme. In other words, there is no fine moduli space. Usually this is because the objects that we want to parametrize have automorphisms. But if we enlarge the category of schemes (following ideas that go back to Grothendieck and Giraud, and were developed by Deligne, Mumford and Artin) and consider algebraic stacks, then we can construct the ``moduli stack'', that captures all the information that we would like in a fine moduli space.
The idea of enlarging the category of algebraic varieties to study moduli problems is not new. In fact A. Weil invented the concept of abstract variety to give an algebraic construction of the Jacobian of a curve.
These notes are an introduction to the theory of algebraic stacks. I have tried to emphasize ideas and concepts through examples instead of detailed proofs (I give references where these can be found). In particular, section \ref{sectionversus} is a detailed comparison between the moduli \textit{scheme} and the moduli \textit{stack} of vector bundles.
First I will give a quick introduction in subsection \ref{quick}, just to give some motivations and get a flavour of the theory of algebraic stacks.
Section \ref{sectionstacks} has a more detailed exposition. There are mainly two ways of introducing stacks. We can think of them as 2-functors (I learnt this approach from N. Nitsure and C. Sorger, cf. subsection \ref{subsfunctors}), or as categories fibered on groupoids (This is the approach used in the references, cf. subsection \ref{subsgroupoids}). From the first point of view it is easier to see in which sense stacks are generalizations of schemes, and the definition looks more natural, so conceptually it seems more satisfactory. But since the references use categories fibered on groupoids, after we present both points of view, we will mainly use the second.
The concept of stack is merely a categorical concept. To do geometry we have to add some conditions, and then we get the concept of algebraic stack. This is done in subsection \ref{subsalgebraic}.
In subsection \ref{subsgroupspaces} we introduce a third point of view to understand stacks: as groupoid spaces.
In subsection \ref{subsproperties} we define for algebraic stacks many of the geometric properties that are defined for schemes (smoothness, irreducibility, separatedness, properness, etc...). In subsection \ref{subspoints} we introduce the concept of point and dimension of an algebraic stacks, and in subsection \ref{subssheaves} we define sheaves on algebraic stacks.
In section \ref{sectionversus} we study in detail the example of the moduli of vector bundles on a scheme $X$, comparing the moduli stack with the moduli scheme.
Appendix A is a brief introduction to Grothendieck topologies, sheaves and algebraic spaces. In appendix B we define some notions related to the theory of 2-categories.
\subsection{Quick introduction to algebraic stacks} \label{quick}
We will start with an example: vector bundles (with fixed prescribed Chern classes and rank) on a projective scheme $X$ over an algebraically closed field $k$. What is the moduli stack ${\mathcal{M}}$ of vector bundles on $X$?. I don't know a short answer to this, but instead it is easy to define what is a morphism from a scheme $B$ to the moduli stack ${\mathcal{M}}$. It is just a family of vector bundles parametrized by $B$. More precisely, it is a vector bundle $V$ on $B\times X$, flat over $B$, such that the restriction to the slices $b\times X$ have prescribed Chern classes and rank. In other words, ${\mathcal{M}}$ has the property that we expect from a fine moduli space: the set of morphisms $\operatorname{Hom}(B,{\mathcal{M}})$ is equal to the set of families parametrized by $B$.
We will say that a diagram \begin{eqnarray} \label{commdiag} \xymatrix{ {B} \ar[r]^f \ar[rd]_{g} & {B'} \ar[d]^{g'} \\
& {{\mathcal{M}}} } \end{eqnarray} is commutative if the vector bundle $V$ on $B\times X$ corresponding to $g$ is isomorphic to the vector bundle $(f\times \operatorname{id}_X)^*V'$, where $V'$ is the vector bundle corresponding to $g'$. Note that in general, if $L$ is a line bundle on $B$, then $V$ and $V\otimes p^*_B L$ won't be isomorphic, and then the corresponding morphisms from $B$ to ${\mathcal{M}}$ will be different, as opposed to what happens with moduli schemes.
A $k$-point in the stack ${\mathcal{M}}$ is a morphism $u:\operatorname{Spec} k \to{\mathcal{M}}$, in other words, it is a vector bundle $V$ on $X$, and we say that two points are isomorphic if they correspond to isomorphic vector bundles. But we shouldn't think of ${\mathcal{M}}$ just as a set of points, it should be thought of as a category. The objects of ${\mathcal{M}}$ are points\footnote{ To be precise, we should consider also $B$-valued points, for any scheme $B$, but we will only consider $k$-valued points for the moment}, i.e. vector bundles on $X$, and a morphism in ${\mathcal{M}}$ is an isomorphism of vector bundles. This is the main difference between a scheme and an algebraic stack: a scheme is a \textit{set} of points, but in an algebraic stack is a \textit{category}, in fact a \textit{groupoid} (i.e. a category in which all morphisms are isomorphisms). Each point comes with a group of automorphisms. Roughly speaking, a scheme (or more generally, an algebraic space \cite{Ar1}, \cite{K}) can be thought of as an algebraic stack in which these groups of automorphisms are all trivial. If $p$ is the $k$-point in ${\mathcal{M}}$ corresponding to a vector bundle $V$ on $X$, then the group of automorphisms associated to $p$ is the group of vector bundle automorphisms of $V$. This is why algebraic stacks are well suited to serve as moduli of objects that have automorphisms.
An algebraic stack has an atlas. This is a scheme $U$ and a surjective morphism $u:U \to {\mathcal{M}}$ (with some other properties). As we have seen, such a morphism $u$ is equivalent to a family of vector bundles parametrized by $U$, and we say that $u$ is surjective if for every vector bundle $V$ over $X$ there is at least one point in $U$ whose corresponding vector bundle is isomorphic to $V$. The existence of an atlas for an algebraic stack is the analogue of the fact that for a scheme $B$ there is always an \textit{affine} scheme $U$ and a surjective morphism $U \to B$ (if $\{U_i\to B\}$ is a covering of $B$ by affine subschemes, take $U$ to be the disjoint union $\coprod U_i$). Many local properties (smooth, normal, reduced...) can be studied by looking at the atlas $U$. It is true that in some sense an algebraic stack looks, locally, like a scheme, but we shouldn't take this too far. For instance the atlas of the classifying stack $BG$ (parametrizing principal $G$-bundles, cf. example \ref{quotient}) is just a single point. The dimension of an algebraic stack ${\mathcal{M}}$ will be defined as the dimension of $U$ minus the relative dimension of the morphism $u$. The dimension of an algebraic stack can be negative (for instance, $\dim (BG)=-\dim(G)$).
A coherent sheaf $L$ on an algebraic stack ${\mathcal{M}}$ is a law that, for each morphism $g:B \to {\mathcal{M}}$, gives a coherent sheaf $L_B$ on $B$, and for each commutative diagram like (\ref{commdiag}), gives an isomorphism between $f^* L_{B'}$ and $L_B$. The coherent sheaf $L_B$ should be thought of as the pullback ``$g^*L$'' of $L$ under $g$ (the compatibility condition for commutative diagrams is just the condition that $(g'\circ f)^*L$ should be isomorphic to $f^* {g'}^* L$).
Let's look at another example: the moduli quotient (example \ref{quotient}). Let $G$ be an affine algebraic group acting on $X$. For simplicity, assume that there is a normal subgroup $H$ of $G$ that acts trivially on $X$, and that $\overline G=G/H$ is an affine group acting freely on $X$ and furthermore there is a quotient by this action $X \to B$ and this quotient is a principal $\overline G$-bundle. We call $B=X/G$ the \textit{quotient scheme}. Each point corresponds to a $G$-orbit of the action. But note that $B$ is also equal to the quotient $X/\overline G$, because $H$ acts trivially and then $G$-orbits are the same thing as $\overline G$-orbits. We can say that the quotient scheme ``forgets'' $H$.
One can also define the \textit{quotient stack} $[X/G]$. Roughly speaking, a point $p$ of $[X/G]$ again corresponds to a $G$-orbit of the action, but now each point comes with an automorphism group: given a point $p$ in $[X/G]$, choose a point $x\in X$ in the orbit corresponding to $p$. The automorphism group attached to $p$ is the stabilizer $G_x$ of $x$. With the assumptions that we have made on the action of $G$, the automorphism group of any point is always $H$. Then the quotient stack $[X/G]$ is not a scheme, since the automorphism groups are not trivial. The action of $H$ is trivial, but the moduli stack still ``remembers'' that there was an action by $H$. Observe that the stack $[X/\overline G]$ is not isomorphic to the stack $[X/G]$ (as opposed to what happens with the quotient schemes). Since the action of $\overline G$ is free on $X$, the automorphism group corresponding to each point of $[X/\overline G]$ is trivial, and it can be shown that, with the assumptions that we made, $[X/\overline G]$ is represented by the scheme $B$ (this terminology will be made precise in section \ref{sectionstacks}).
\section{Stacks} \label{sectionstacks}
\subsection{Stacks as 2-functors. Sheaves of sets.} \label{subsfunctors}
Given a scheme $M$ over a base scheme $S$, we define its (contravariant) functor of points $\operatorname{Hom}_S(-,M)$ $$ \begin{array}{rccc} {\operatorname{Hom}_S(-,M):} &{({Sch} /S)}& \longrightarrow &{({Sets})} \\
& {B} &\longmapsto &{\operatorname{Hom}_S(B,M)} \end{array} $$ where $({Sch} /S)$ is the category of $S$-schemes, $B$ is an $S$-scheme, and $\operatorname{Hom}_S(B,M)$ is the set of $S$-scheme morphisms. If we give $({Sch}/S)$ the \'etale topology, $\operatorname{Hom}_S(-,M)$ is a sheaf. A sheaf of sets on $({Sch}/S)$ with the \'etale topology is called a space.
Then schemes can be thought of as sheaves of sets. Moduli problems can usually be described by functors. We say that a sheaf of sets $F$ is representable by a scheme $M$ if $F$ is isomorphic to the functor of points $\operatorname{Hom}_S(-,M)$. The scheme $M$ is then called the fine moduli scheme. Roughly speaking, this means that there is a one to one correspondence between families of objects parametrized by a scheme $B$ and morphisms from $B$ to $M$.
\begin{example}[Vector bundles] \label{defvectorbundle} \textup{ Let $X$ be a projective scheme over a Noetherian base $S$. We define the moduli functor $\underline{\Bund}'$ of vector bundles of fixed rank $r$ and Chern classes $c_i$ by sending the scheme $B$ to the set $\underline{\Bund}'(B)$ of isomorphism classes of vector bundles on $X\times B$, flat over $B$ with rank $r$ and whose restriction to the slices $X\times \{b\}$ have Chern classes $c_i$. These vector bundles should be thought of as families of vector bundles parametrized by $B$. A morphism $f:B'\to B$ is sent to $\underline{\Bund}'(f)=f^*:\underline{\Bund}'(B) \to \underline{\Bund}'(B')$, the map of sets induced by the pullback. Usually we will also fix a polarization $H$ in $X$ and restrict our attention to stable or semistable vector bundles with respect to this polarization, and then we consider the corresponding functors $\underline{\Bund}^{\prime s}$ and $\underline{\Bund}^{\prime ss}$. } \end{example}
\begin{example}[Curves] \textup{ The moduli functor $M_g$ of smooth curves of genus $g$ over $S$ is the functor that sends each scheme $B$ to the set $M_g(B)$ of isomorphism classes of smooth and proper morphisms $C \to B$ (where $C$ is an $S$-scheme) whose fibers are geometrically connected curves of genus $g$. Each morphism $f:B'\to B$ is sent to the map of sets induced by the pullback $f^*$. } \end{example}
None of these examples are sheaves (then none of these are representable), because of the presence of automorphisms. They are just presheaves (=functors). For instance, given a curve $C$ over $S$ with nontrivial automorphisms, it is possible to construct a family $f:{\mathcal{C}} \to B$ such that every fiber of $f$ is isomorphic to $C$, but ${\mathcal{C}}$ is not isomorphic to $B \times C$. This implies that $M_g$ doesn't satisfy the monopresheaf axiom.
This can be solved by taking the sheaf associated to the presheaf (sheafification). In the examples, this amounts to change isomorphism classes of families to equivalence classes of families, when two families are equivalent if they are locally (using the \'etale topology over the parametrizing scheme $B$) isomorphic. In the case of vector bundles, this is the reason why one usually declares two vector bundles $V$ and $V'$ on $X \times B$ equivalent if $V\cong V'\otimes p_B^* L$ for some line bundle $L$ on $B$. The functor obtained with this equivalence relation is denoted $\underline{\Bund}$ (and analogously for $\underline{\Bund}^{s}$ and $\underline{\Bund}^{ss}$).
Note that if two families $V$ and $V'$ are equivalent in this sense, then they are locally isomorphic. The converse is only true if the vector bundles are simple (only automorphisms are scalar multiplications). This will happen, for instance, if we are considering the functor $\underline{\Bund}^{\prime s}$ of stable vector bundles, since stable vector bundles are simple. In general, if we want the functor to be a sheaf, we have to use a weaker notion of equivalence, but this is not done because for other reasons there is only hope of obtaining a fine moduli space if we restrict our attention to stable vector bundles.
Once this modification is made, there are some situations in which these examples are representable (for instance, stable vector bundles on curves with coprime rank and degree), but in general they will still not be representable, because in general we don't have a universal family:
\begin{definition}[Universal family] Let $F$ be a representable functor, and let $\phi:F \to \operatorname{Hom}_S(-,X)$ be the isomorphism. The object of $F(X)$ corresponding to the element $\operatorname{id}_X$ of $\operatorname{Hom}_S(X,X)$ is called the universal family. \end{definition}
\begin{example}[Vector bundles] \textup{ If $V$ is a universal vector bundle (over $S\times M$, where $M$ is the fine moduli space), it has the property that for any family $W$ of vector bundles (i.e. $W$ is a vector bundle over $X\times B$ for some parameter scheme $B$) there exists a morphism $f:B\to M$ such that $(f\times \operatorname{id}_X)^* V$ is equivalent to $W$.} \end{example}
When a moduli functor $F$ is not representable and then there is no scheme $X$ whose functor of points is isomorphic to $F$, one can still try to find a scheme $X$ whose functor of points is an approximation to $F$ in some sense. There are two different notions:
\begin{definition}[Corepresents] \textup{\cite[p. 60]{S}, \cite[def 2.2.1]{HL}}. We say that a scheme $M$ corepresents the functor $F$ if there is a natural transformation of functors $\phi:F \to \operatorname{Hom}_S(-,M)$ such that \begin{itemize}
\item Given another scheme $N$ and a natural transformation $\psi:F \to \operatorname{Hom}_S(-,N)$, there is a unique natural transformation $\eta: \operatorname{Hom}_S(-,M)\to \operatorname{Hom}_S(-,N)$ with $\psi= \eta \circ \phi$. $$ \xymatrix{ {F} \ar[d]^{\phi} \ar[rd]^{\psi} \\
{\operatorname{Hom}_S(-,M)} \ar[r]^{\eta} & \operatorname{Hom}_S(-,N)\\ } $$ \end{itemize} \end{definition}
This characterizes $M$ up to unique isomorphism. Let $({Sch}/S)'$ be the functor category, whose objects are contravariant functors from $({Sch}/S)$ to $(Sets)$ and whose morphisms are natural transformation of functors. Then $M$ represents $F$ iff $\operatorname{Hom}_S(Y,M)= \operatorname{Hom}_{({Sch}/S)'}({\mathcal{Y}},F)$ for all schemes $Y$, where ${\mathcal{Y}}$ is the functor represented by $Y$. On the other hand, one can check that $M$ corepresents $F$ iff $\operatorname{Hom}_S(M,Y)= \operatorname{Hom}_{({Sch}/S)'}(F,{\mathcal{Y}})$ for all schemes $Y$. If $M$ represents $F$, then it corepresents it, but the converse is not true. From now on we will usually denote a scheme and the functor that it represents by the same letter.
\begin{definition}[Coarse moduli] A scheme $M$ is called a coarse moduli scheme if it corepresents $F$ and furthermore
\begin{itemize} \item For any algebraically closed field $k$, the map $\phi(k):F(\operatorname{Spec} k) \to \operatorname{Hom}_S(\operatorname{Spec} k, M)$ is bijective.
\end{itemize} \end{definition}
In both cases, given a family of objects parametrized by $B$ we get a morphism from $B$ to $M$, but we don't require the converse to be true.
\begin{example}[Vector bundles] \label{vb1} \textup{ There is a scheme $\mathfrak{M}^{ss}$ that corepresents $\underline{\Bund}^{ss}$. It fails to be a coarse moduli scheme because its closed points are in one to one correspondence with S-equivalence classes of vector bundles, and not with isomorphisms classes of vector bundles. Of course, this can be solved `by hand' by modifying the functor and considering two vector bundles equivalent if they are S-equivalent. Once this modification is done, $\mathfrak{M}^{ss}$ is a coarse moduli space. }
\textup{ But in general $\mathfrak{M}^{ss}$ doesn't represent the moduli functor $\underline{\Bund}^{ss}$. The reason for this is that vector bundles have always nontrivial automorphisms (multiplication by scalar), but the moduli functor doesn't record information about automorphisms: recall that to a scheme $B$ it associates just the set of equivalence classes of vector bundles. To record the automorphisms of these vector bundles, we define $$ \begin{array}{rccc} {\mathcal{M}}: & ({Sch}/S) & \longrightarrow & ({groupoids}) \\
& B & \longmapsto & {\mathcal{M}}(B) \end{array} $$ where ${\mathcal{M}}(B)$ is the category whose objects are vector bundles $V$ on $X\times B$ of rank $r$ and with fixed Chern classes (note that the objects are vector bundles, not isomorphism classes of vector bundles), and whose morphisms are vector bundle isomorphisms (note that we use isomorphisms of vector bundles, not S-equivalence nor equivalence classes as before). This defines a 2-functor between the 2-category associated to $({Sch}/S)$ and the 2-category $({groupoids})$ .} \end{example}
\begin{definition} Let $({groupoids})$ be the 2-category whose objects are groupoids, 1-morphisms are functors between groupoids, and 2-morphisms are natural transformation between these functors. A presheaf in groupoids (also called a quasi-functor) is a contravariant 2-functor ${\mathcal{F}}$ from $({Sch}/S)$ to $({groupoids})$. For each scheme $B$ we have a groupoid ${\mathcal{F}}(B)$ and for each morphism $f:B'\to B$ we have a natural transformation of functors ${\mathcal{F}}(f)$ that is denoted by $f^*$ (usually it is actually defined by a pullback). \end{definition}
\begin{example}[Vector bundles] \label{bbund} \textup{\cite[1.3.4]{La}. ${\mathcal{M}}$ is a presheaf. For each object $B$ of $({Sch}/S)$ it gives the groupoid ${\mathcal{M}}(B)$ that we have defined in example \ref{vb1}. For each 1-morphism $f:B' \to B$ it gives the functor $F(f)=f^*:{\mathcal{M}}(B)\to {\mathcal{M}}(B')$ given by pull-back, and for every diagram \begin{eqnarray} \label{compo} B'' \stackrel{g}\longrightarrow B' \stackrel{f}\longrightarrow B \end{eqnarray} it gives a natural transformation of functors (a 2-isomorphism) $\epsilon_{g,f}:g^*\circ f^* \to (f\circ g)^*$. This is the only subtle point. First recall that the pullback $f^*V$ of a vector bundle (or more generally, any fiber product) is not uniquely defined: it is only defined up to unique isomorphism. First choose once and for all a pullback $f^*V$ for each $f$ and $V$. Then, given a diagram like \ref{compo}, in principle $g^*(f^*V)$ and $(f\circ g)^*V$ are not the same, but (because both solve the same universal problem) there is a canonical isomorphism (the unique isomorphism of the universal problem) $g^*(f^*V) \to (f\circ g)^*V$ between them, and this defines the natural transformation of functors $\epsilon_{g,f}:g^*\circ f^* \to (f\circ g)^*$. By a slight abuse of language, usually we won't write explicitly these isomorphisms $\epsilon_{g,f}$, and we will write $g^*\circ f^* = (f\circ g)^*$. Since they are uniquely defined this will cause no ambiguity.} \end{example}
Now we will define the concept of stack. First we have to choose a Grothendieck topology on $(Sch/S)$, either the \'etale or the fppf topology. Later on, when we define algebraic stack, the \'etale topology will lead to the definition of a Deligne-Mumford stack (\cite{DM}, \cite{Vi}, \cite{E}), and the fppf to an Artin stack (\cite{La}). For the moment we will give a unified description.
In the following definition, to simplify notation we denote by $X|_i$ the pullback $f^*_i X$ where $f_i:U_i \to U$ and $X$ is an object of
${\mathcal{F}}(U)$, and by $X_i|_{ij}$ the pullback $f^*_{ij,i} X_i$ where $f_{ij,i}:U_i \times_U U_j \to U_i$ and $X_i$ is an object of ${\mathcal{F}}(U_i)$. We will also use the obvious variations of this convention, and will simplify the notation using remark \ref{B2}.
\begin{definition}[Stack] \label{sheaf} A stack is a sheaf of groupoids, i.e. a 2-functor (presheaf) that satisfies the following sheaf axioms. Let $\{U_i \to U\}_{i\in I}$ be a covering of $U$ in the site $({Sch}/S)$. Then \begin{enumerate} \item (Glueing of morphisms) If $X$ and $Y$ are two objects of
${\mathcal{F}}(U)$, and $\varphi_i:X|_i\to Y|_i$ are morphisms such that $\varphi_i|_{ij}=\varphi_j|_{ij}$, then there exists a morphism $\eta:X\to Y$ such that $\eta|_i=\varphi_i$.
\item (Monopresheaf) If $X$ and $Y$ are two objects of ${\mathcal{F}}(U)$,
and $\varphi:X\to Y$, $\psi:X \to Y$ are morphisms such that $\varphi|_i=\psi|_i$, then $\varphi = \psi$.
\item \label{sheafthree} (Glueing of objects) If $X_i$ are objects of ${\mathcal{F}}(U_i)$ and
$\varphi_{ij}:X_j|_{ij}
\to X_i|_{ij}$ are morphisms satisfying the cocycle condition
$\varphi_{ij}|_{ijk}\circ \varphi_{jk}|_{ijk}= \varphi_{ik}|_{ijk}$, then there exists an object $X$ of ${\mathcal{F}}(U)$ and $\varphi_i:X|_i
\stackrel{\cong}\to X_i$ such that $\varphi_{ji}\circ \varphi_i|_{ij}=
\varphi_j|_{ij}$. \end{enumerate} \end{definition}
Let's stop for a moment and look at how we have enlarged the category of schemes by defining the category of stacks. We can draw the following diagram $$ \xymatrix{ & {Algebraic\,Stacks} \ar[r] & {Stacks} \ar[r] &{Presheaves\,of\, groupoids} \\ {{Sch}/S} \ar[r] \ar[ur] & {Algebraic\,Spaces} \ar[r] \ar[u] &{Spaces} \ar[r] \ar[u] &{Presheaves\,of\,sets} \ar[u] } $$ where $A \to B$ means that the category $A$ is a subcategory $B$. Recall that a presheaf of sets is just a functor from $({Sch}/S)$ to the category $({Sets})$, a presheaf of groupoids is just a 2-functor to the 2-category $({groupoids})$. A sheaf (for example an space or a stack) is a presheaf that satisfies the sheaf axioms (these axioms are slightly different in the context of categories or 2-categories), and if this sheaf satisfies some geometric conditions (that we haven't yet specified), we will have an algebraic stack or algebraic space.
\subsection{Stacks as categories. Groupoids} \label{subsgroupoids}
There is an alternative way of defining a stack. From this point of view a stack will be a category, instead of a functor.
\begin{definition} A category over $({Sch}/S)$ is a category ${\mathcal{F}}$ and a covariant functor $p^{}_{\mathcal{F}}:{\mathcal{F}} \to ({Sch}/S)$. If $X$ is an object (resp. $\phi$ is a morphism) of ${\mathcal{F}}$, and $p^{}_{\mathcal{F}}(X)=B$ (resp. $p^{}_{\mathcal{F}}(\phi)=f$), then we say that $X$ lies over $B$ (resp. $\phi$ lies over $f$). \end{definition}
\begin{definition}[Groupoid] A category ${\mathcal{F}}$ over $({Sch}/S)$ is called a category fibered on groupoids (or just groupoid) if \begin{enumerate} \item \label{groupoidone} For every $f:B'\to B$ in $({Sch}/S)$ and every object $X$ with $p^{}_{\mathcal{F}}(X)=B$, there exists at least one object $X'$ and a morphism $\phi:X'\to X$ such that $p^{}_{\mathcal{F}}(X')=B'$ and $p^{}_{\mathcal{F}}(\phi)=f$. $$ \xymatrix{ {X'} \ar@{-->}[r]^{\phi} \ar@{-->}[d] & {X} \ar[d] \\ {B'} \ar[r]^{f} & {B} } $$
\item \label{groupoidtwo} For every diagram $$\xymatrix{ {X_3} \ar[rr]^{\psi} \ar[dd]& & {X_1} \ar[dd] \\
& {X_2} \ar[ru]^{\phi} \ar[dd] \\ {B_3} \ar '[r][rr]^{f\circ f'} \ar[rd]_{f'} & & {B_1} \\
& {B_2} \ar[ru]_f } $$ (where $p^{}_{\mathcal{F}}(X_i)=B_i$, $p^{}_{\mathcal{F}}(\phi)=f$, $p^{}_{\mathcal{F}}(\psi)=f\circ f'$), there exists a unique $\varphi:X_3 \to X_2$ with $\psi=\phi\circ \varphi$ and $p^{}_{\mathcal{F}}(\varphi)=f'.$ \end{enumerate} \end{definition}
Condition \ref{groupoidtwo} implies that the object $X'$ whose existence is asserted in condition \ref{groupoidone} is unique up to canonical isomorphism. For each $X$ and $f$ we choose once and for all such an $X'$ and call it $f^*X$. Another consequence of condition \ref{groupoidtwo} is that $\phi$ is an isomorphism if and only if $p^{}_{\mathcal{F}}(\phi)=f$ is an isomorphism.
Let $B$ be an object of $({Sch}/S)$. We define ${\mathcal{F}}(B)$, the fiber of ${\mathcal{F}}$ over $B$, to be the subcategory of ${\mathcal{F}}$ whose objects lie over $B$ and whose morphisms lie over $\operatorname{id}_B$. It is a groupoid.
The association $B\to {\mathcal{F}}(B)$ in fact defines a presheaf of groupoids (note that the 2-isomorphisms $\epsilon_{f,g}$ required in the definition of presheaf of groupoids are well defined thanks to condition \ref{groupoidtwo}). Conversely, given a presheaf of groupoids ${\mathcal{G}}$ on $(Sch/S)$, we can define the category ${\mathcal{F}}$ whose objects are pairs $(B,X)$ where $B$ is an object of $({Sch}/S)$ and $X$ is an object of ${\mathcal{G}}(B)$, and whose morphisms $(B',X')\to (B,X)$ are pairs $(f,\alpha)$ where $f:B'\to B$ is a morphism in $(Sch/S)$ and $\alpha:f^* X \to X'$ is an isomorphism, where $f^*={\mathcal{G}}(f)$. This gives the relationship between both points of view.
\begin{example}[Stable curves] \label{defstablecurve} \textup{\cite[def 1.1]{DM}. Let $B$ be an $S$-scheme. Let $g\geq 2$. A stable curve of genus $g$ over $B$ is a proper and flat morphism $\pi:C \to B$ whose geometric fibers are reduced, connected and one-dimensional schemes $C_b$ such that \begin{enumerate} \item The only singularities of $C_b$ are ordinary double points. \item If $E$ is a non-singular rational component of $C_b$, then $E$ meets the other components of $C_b$ in at least 3 points. \item $\dim H^1({\mathcal{O}}_{C_b})=g$. \end{enumerate} Condition 2 is imposed so that the automorphism group of $C_b$ is finite. A stable curve over $B$ should be thought of as a family of stable curves (over $S$) parametrized by $B$.}
\textup{ We define $\overline{\mathcal{M}}_g$, the groupoid over $S$ whose objects are stable curves over $B$ and whose morphisms are Cartesian diagrams $$ \xymatrix{ {X'} \ar[r] \ar[d] & {X} \ar[d] \\ {B'} \ar[r] & {B}} $$} \end{example}
\begin{example}[Quotient by group action] \label{quotient} \textup{\cite[1.3.2]{La}, \cite[example 4.8]{DM}, \cite[example 2.2]{E}. Let $X$ be an $S$-scheme (assume all schemes are Noetherian), and $G$ an affine flat group $S$-scheme acting on the right on $X$. We define the groupoid $[X/G]$ whose objects are principal $G$-bundles $\pi:E\to B$ together with a $G$-equivariant morphism $f:E\to X$. A morphism is Cartesian diagram $$ \xymatrix{ {E'} \ar[r]^{p} \ar[d]_{\pi'} & {E} \ar[d]_{\pi} \\ {B'} \ar[r] & {B}} $$ such that $f\circ p= f'$.}
\end{example}
\begin{definition}[Stack] A stack is a groupoid that satisfies \begin{enumerate} \item (\textit{Prestack}). For all scheme $B$ and pair of objects $X$, $Y$ of ${\mathcal{F}}$ over $B$, the contravariant functor $$ \begin{array}{rccc} \operatorname{Iso}_B(X,Y): & ({Sch}/B)& \longrightarrow & ({Sets}) \\
& (f:B'\to B) & \longmapsto & \operatorname{Hom}(f^*X,f^*Y) \end{array} $$ is a sheaf on the site $(Sch/B).$
\item Descent data is effective (this is just condition \ref{sheafthree} in the definition \ref{sheaf} of sheaf). \end{enumerate} \end{definition}
\begin{example} \textup{ If $G$ is smooth and affine, the groupoid $[X/G]$ is a stack \cite[2.4.2]{La}, \cite[example 7.17]{Vi}, \cite[prop 2.2]{E}. Then also $\overline{\mathcal{M}}_g$ (cf. example \ref{defstablecurve}) is a stack, because it is isomorphic to a quotient stack of a subscheme of a Hilbert scheme by ${PGL(N)}$ \cite[thm 3.2]{E}, \cite{DM}. The groupoid ${\mathcal{M}}$ defined in example \ref{defvectorbundle} is also a stack \cite[2.4.4]{La}.} \end{example} From now on we will mainly use this approach. Now we will give some definitions for stacks.
\textbf{Morphisms of stacks}. A morphism of stacks $f:{\mathcal{F}} \to {\mathcal{G}}$ is a functor between the categories, such that $p_{\mathcal{G}} \circ f= p^{}_{\mathcal{F}}$. A commutative diagram of stacks is a diagram $$ \xymatrix{
& {{\mathcal{G}}} \ar[rd]^g \ar@2[d]^{\alpha} \\ {{\mathcal{F}}} \ar[ur]^f \ar[rr]_h & &{{\mathcal{H}}} } $$ such that $\alpha:g\circ f \to h$ is an isomorphism of functors. If $f$ is an equivalence of categories, then we say that the stacks ${\mathcal{F}}$ and ${\mathcal{G}}$ are isomorphic. We denote by $\operatorname{Hom}_S({\mathcal{F}},{\mathcal{G}})$ the category whose objects are morphisms of stacks and whose morphisms are natural transformations.
\textbf{Stack associated to a scheme}. Given a scheme $U$ over $S$, consider the category $({Sch}/U)$. Define the functor $p^{}_U:({Sch}/U)\to ({Sch}/S)$ which sends the $U$-scheme $f:B\to U$ to the composition $B\stackrel{f}\to U \to S$. Then $({Sch}/U)$ becomes a stack. Usually we denote this stack also by $U$. From the point of view of 2-functors, the stack associated to $U$ is the 2-functor that for each scheme $B$ gives the category whose objects are the elements of the set $\operatorname{Hom}_S(B,U)$, and whose only morphisms are identities.
We say that a stack is represented by a scheme $U$ when it is isomorphic to the stack associated to $U$. We have the following very useful lemmas:
\begin{lemma} \label{nonrepresentable} If a stack has an object with an automorphism other that the identity, then the stack cannot be represented by a scheme. \end{lemma}
\begin{proof} In the definition of stack associated with a scheme we see that the only automorphisms are identities. \end{proof}
\begin{lemma} \label{yoneda} \textup{\cite[7.10]{Vi}}. Let ${\mathcal{F}}$ be a stack and $U$ a scheme. The functor $$ u:\operatorname{Hom}_S(U,{\mathcal{F}}) \to {\mathcal{F}}(U) $$ that sends a morphism of stacks $f:({Sch}/U)\to {\mathcal{F}}$ to $f(\operatorname{id}_U)$ is an equivalence of categories. \end{lemma}
\begin{proof} Follows from Yoneda lemma \end{proof}
This useful observation that we will use very often means that an object of ${\mathcal{F}}$ that lies over $U$ is equivalent to a morphism (of stacks) from $U$ to ${\mathcal{F}}$.
\textbf{Fiber product}. Given two morphisms $f_1:{\mathcal{F}}_1\to {\mathcal{G}}$, $f_2:{\mathcal{F}}_2\to {\mathcal{G}}$, we define a new stack ${\mathcal{F}}_1 \times_{\mathcal{G}} {\mathcal{F}}_2$ (with projections to ${\mathcal{F}}_1$ and ${\mathcal{F}}_2$) as follows. The objects are triples $(X_1,X_2,\alpha)$ where $X_1$ and $X_2$ are objects of ${\mathcal{F}}_1$ and ${\mathcal{F}}_2$ that lie over the same scheme $U$, and $\alpha: f_1(X_1)\to f_2(X_2)$ is an isomorphism in ${\mathcal{G}}$ (equivalently, $p_{\mathcal{G}}(\alpha)=\operatorname{id}_U$). A morphism from $(X_1,X_2,\alpha)$ to $(Y_1,Y_2,\beta)$ is a pair $(\phi_1,\phi_2)$ of morphisms $\phi_i:X_i\to Y_i$ that lie over the same morphism of schemes $f:U \to V$, and such that $\beta \circ f_1(\phi_1) = f_2(\phi_2)\circ \alpha$. The fiber product satisfies the usual universal property.
\textbf{Representability}. A stack ${\mathcal{X}}$ is said to be representable by an algebraic space (resp. scheme) if there is an algebraic space (resp. scheme) $X$ such that the stack associated to $X$ is isomorphic to ${\mathcal{X}}$. If ``P'' is a property of algebraic spaces (resp. schemes) and ${\mathcal{X}}$ is a representable stack, we will say that ${\mathcal{X}}$ has ``P'' iff $X$ has ``P''.
A morphism of stacks $f:{\mathcal{F}}\to {\mathcal{G}}$ is said to be representable if for all objects $U$ in $({Sch}/S)$ and morphisms $U\to {\mathcal{G}}$, the fiber product stack $U\times_{\mathcal{G}} {\mathcal{F}}$ is representable by an algebraic space. Let ``P'' is a property of morphisms of schemes that is local in nature on the target for the topology chosen on $({Sch}/S)$ (\'etale or fppf), and it is stable under arbitrary base change. For instance: separated, quasi-compact, unramified, flat, smooth, \'etale, surjective, finite type, locally of finite type,... Then we say that $f$ has ``P'' if for every $U\to {\mathcal{G}}$, the pullback $U\times_{\mathcal{G}} {\mathcal{F}} \to U$ has ``P'' (\cite[p.17]{La}, \cite[p.98]{DM}).
\textbf{Diagonal}. Let $\Delta_{\mathcal{F}}:{\mathcal{F}} \to {\mathcal{F}}\times_S {\mathcal{F}}$ be the obvious diagonal morphism. A morphism from a scheme $U$ to ${\mathcal{F}} \times_S {\mathcal{F}}$ is equivalent to two objects $X_1$, $X_2$ of ${\mathcal{F}}(U)$. Taking the fiber product of these we have $$ \xymatrix{ {\operatorname{Iso}_U(X_1,X_2)} \ar[r] \ar[d]& {{\mathcal{F}}} \ar[d]^{\Delta_{\mathcal{F}}} \\ {U} \ar[r]^{(X_1,X_2)} & {{\mathcal{F}}\times_S {\mathcal{F}}}} $$ hence the group of automorphisms of an object is encoded in the diagonal morphism.
\begin{proposition} \label{diag} \textup{\cite[cor 2.12]{La}, \cite[prop 7.13]{Vi}}. The following are equivalent \begin{enumerate} \item The morphism $\Delta_{\mathcal{F}}$ is representable.
\item The stack $\operatorname{Iso}_U(X_1,X_2)$ is representable for all $U$, $X_1$ and $X_2$.
\item For all scheme $U$, every morphism $U\to {\mathcal{F}}$ is representable.
\item For all schemes $U$, $V$ and morphisms $U\to {\mathcal{F}}$ and $V\to {\mathcal{F}}$, the fiber product $U\times_{\mathcal{F}} V$ is representable. \end{enumerate} \end{proposition}
\begin{proof}
The implications $1 \Leftrightarrow 2$ and $3 \Leftrightarrow 4$ follow easily from the definitions.
$1 \Rightarrow 4$) Assume that $\Delta_{\mathcal{F}}$ is representable. We have to show that $U\times_{\mathcal{F}} V$ is representable for any $f:U\to {\mathcal{F}}$ and $g:V\to {\mathcal{F}}$. Check that the following diagram is Cartesian $$ \xymatrix{ {U\times_{\mathcal{F}} V} \ar[r] \ar[d]& {{\mathcal{F}}}\ar[d]^{\Delta_{\mathcal{F}}}\\ U\times_S V \ar[r]^{f\times g} &{{\mathcal{F}}\times_S {\mathcal{F}}}} $$ Then $U\times_{\mathcal{F}} V$ is representable.
$1 \Leftarrow 4$) First note that the Cartesian diagram defined by $h:U\to {\mathcal{F}}\times_S {\mathcal{F}}$ and $\Delta_{\mathcal{F}}$ factors as follows $$ \xymatrix{ {U\times^{}_{{\mathcal{F}}\times_S {\mathcal{F}}} {\mathcal{F}}} \ar[r] \ar[d] & {U\times^{}_{\mathcal{F}} U} \ar[r] \ar[d] &{{\mathcal{F}}} \ar[d] \\ {U} \ar[r]^{\Delta_U} & {U\times_S U} \ar[r] & {{\mathcal{F}}\times_S {\mathcal{F}}}} $$ Both squares are Cartesian and by hypothesis $U\times_{\mathcal{F}} U$ is representable, then $U\times^{}_{{\mathcal{F}}\times_S {\mathcal{F}}} {\mathcal{F}}$ is also representable.
\end{proof}
\subsection{Algebraic stacks} \label{subsalgebraic}
Now we will define the notion of algebraic stack. As we have said, first we have to choose a topology on $({Sch}/S)$. Depending of whether we choose the \'etale or fppf topology, we get different notions.
\begin{definition}[Deligne-Mumford stack] Let $({Sch}/S)$ be the category of $S$-schemes with the \'etale topology. Let ${\mathcal{F}}$ be a stack. Assume \begin{enumerate} \item The diagonal $\Delta_{\mathcal{F}}$ is representable, quasi-compact and separated.
\item There exists a scheme $U$ (called atlas) and an \'etale surjective morphism $u:U\to {\mathcal{F}}$. \end{enumerate} Then we say that ${\mathcal{F}}$ is a Deligne-Mumford stack. \end{definition}
The morphism of stacks $u$ is representable because of proposition \ref{diag} and the fact that the diagonal $\Delta_{\mathcal{F}}$ is representable. Then the notion of \'etale is well defined for $u$. In \cite{DM} this was called an algebraic stack. In the literature, algebraic stack usually refers to Artin stack (that we will define later). To avoid confusion, we will use ``algebraic stack'' only when we refer in general to both notions, and we will use ``Deligne-Mumford'' or ``Artin'' stack when we want to be specific.
Note that the definition of Deligne-Mumford stack is the same as the definition of algebraic space, but in the context of stacks instead of spaces. As with schemes a stack such that the diagonal $\Delta_{\mathcal{F}}$ is quasi-compact and separated is called quasi-separable. We always assume this technical condition, as it is usually done both with schemes and algebraic spaces.
Sometimes it is difficult to find explicitly an \'etale atlas, and the following proposition is useful.
\begin{proposition} \label{represen} \textup{\cite[thm 4.21]{DM}, \cite{E}}. Let ${\mathcal{F}}$ be a stack over the \'etale site $({Sch}/S)$. Assume \begin{enumerate} \item The diagonal $\Delta_{\mathcal{F}}$ is representable, quasi-compact, separated and \textbf{unramified}.
\item There exists a scheme $U$ of finite type over $S$ and a \text{smooth} surjective morphism $u:U\to {\mathcal{F}}$. \end{enumerate} Then ${\mathcal{F}}$ is a Deligne-Mumford stack. \end{proposition}
Now we define the analogue for the fppf topology \cite{Ar2}.
\begin{definition}[Artin stack] Let $({Sch}/S)$ be the category of $S$-schemes with the fppf topology. Let ${\mathcal{F}}$ be a stack. Assume \begin{enumerate} \item The diagonal $\Delta_{\mathcal{F}}$ is representable, quasi-compact and separated.
\item There exists a scheme $U$ (called atlas) and a smooth (hence locally of finite type) and surjective morphism $u:U\to {\mathcal{F}}$. \end{enumerate} Then we say that ${\mathcal{F}}$ is an Artin stack. \end{definition}
For propositions analogous to proposition \ref{represen} see \cite[4]{La}.
\begin{proposition} \textup{\cite[prop 7.15]{Vi}, \cite[lemme 3.3]{La}}. If ${\mathcal{F}}$ is a Deligne-Mumford (resp. Artin) stack, then the diagonal $\Delta_{\mathcal{F}}$ is unramified (resp. finite type). \end{proposition}
Recall that $\Delta_{\mathcal{F}}$ is unramified (resp. finite type) if for every scheme $B$ and objects $X$, $Y$ of ${\mathcal{F}}(B)$, the morphism $\operatorname{Iso}_B(X,Y)\to U$ is unramified (resp. finite type). If $B=\operatorname{Spec} S$ and $X=Y$, then this means that the automorphism group of $X$ is discrete and reduced for a Deligne-Mumford stack, and it just of finite type for an Artin stack.
\begin{example}[Vector bundles] \label{quotconstruction} \textup{The stack ${\mathcal{M}}$ is an Artin stack, locally of finite type \cite[4.14.2.1]{La}. The atlas is constructed as follows. Let Let $P^H_{r,c_i}$ be the Hilbert polynomial corresponding to sheaves on $X$ with rank $r$ and Chern classes $c_i$. Let $\operatorname{Quot}({\mathcal{O}}(-m)^{\oplus N}, P^H_{r,c_i})$ be the Quot scheme parametrizing quotients of sheaves on $X$ \begin{eqnarray} \label{quotmap} {\mathcal{O}}(-m)^{\oplus N} \twoheadrightarrow V, \end{eqnarray} where $V$ is a coherent sheaf on $X$ with Hilbert polynomial $P^H_{r,c_i}$. Let $R_{N,m}$ be the subscheme corresponding to quotients (\ref{quotmap}) such that $V$ is a vector bundle with $H^p(V(m))=0$ for $p>0$ and the morphism (\ref{quotmap}) induces an isomorphism on global sections $$ H^0({\mathcal{O}})^{\oplus N} \stackrel{\cong}{\longrightarrow} H^0(V(m)). $$ The scheme $R^{}_{N,m}$ has a universal vector bundle, induced from the universal bundle of the Quot scheme, and then there is a morphism $u^{}_{N,m}: R^{}_{N,m}\to {\mathcal{M}}$. Since $H$ is ample, for every vector bundle $V$, there exist integers $N$ and $m$ such that $R_{N,m}$ has a point whose corresponding quotient is $V$, and then if we take the infinite disjoint union of these morphisms we get a surjective morphism $$ u: \Big( \coprod_{N,m>0} R^{}_{N,m}\Big) \longrightarrow {\mathcal{M}}. $$ It can be shown that this morphism is smooth, and then it gives an atlas. Each scheme $R_{N,m}$ is of finite type, so the union is locally of finite type, which in turn implies that the stack ${\mathcal{M}}$ is locally of finite type. }
\end{example}
\begin{example}[Quotient by group action] \label{atlasquotient} \textup{The stack $[X/G]$ is an Artin stack \cite[4.14.1.1]{La}. If $G$ is smooth, an atlas is defined as follows (for more general $G$, see \cite[4.14.1.1]{La}): Take the trivial principal $G$-bundle $X\times G$ over $X$, and let the map $f:X\times G \to X$ be the action of the group. This defines an object of $[X/G](X)$, and by lemma \ref{yoneda}, it defines a morphism $u:X\to [X/G]$. It is representable, because if $B$ is a scheme and $g:B\to [X/G]$ is the morphism corresponding to a principal $G$-bundle $E$ over $B$ with an equivariant morphism $f:E\to X$, then $B\times_{[X/G]}X$ is isomorphic to the scheme $E$, and in fact we have a Cartesian diagram $$ \xymatrix{ {E} \ar[r]^{f} \ar[d]_{\pi} & {X} \ar[d]_{u} \\ {B} \ar[r]^{g} & {[X/G].} } $$ The morphism $u$ is surjective and smooth because $\pi$ is surjective and smooth for every $g$ (if $G$ is not smooth, but only separated, flat and of finite presentation, then $u$ is not an atlas, but if we apply the representation theorem \cite[thm 4.1]{La}, we conclude that there is a smooth atlas).}
\textup{ If either $G$ is \'etale over $S$ (\cite[example 4.8]{DM}) or the stabilizers of the geometric points of $X$ are finite and reduced (\cite[example 7.17]{Vi}), then $[X/G]$ is a Deligne-Mumford stack. In particular $\overline{\mathcal{M}}_g$ is a Deligne-Mumford stack.}
\textup{Note that if the action is not free, then $[X/G]$ is not representable by lemma \ref{nonrepresentable}. On the other hand, if there is a scheme $Y$ such that $X \to Y$ is a principal $G$-bundle, then $[X/G]$ is represented by $Y$.}
\textup{Let $G$ be a reductive group acting on $X$. Let $H$ be an ample line bundle on $X$, and assume that the action is polarized. Let $X^s$ and $X^{ss}$ be the subschemes of stable and semistable points. Let $Y=X{/\!\!/} G$ be the GIT quotient. Recall that there is a good quotient $X^{ss}\to Y$, and that the restriction to the stable part $X^s\to Y$ is a principal bundle. There is a natural morphism $[X^{ss}/G] \to X^{ss}{/\!\!/} G$. By the previous remark, the restriction $[X^s/G] \to Y^s$ is an isomorphism of stacks.}
\textup{If $X=S$ (with trivial action of $G$ on $S$), then $[S/G]$ is denoted $BG$, the classifying groupoid of principal $G$-bundles.}
\end{example}
\subsection{Algebraic stacks as groupoid spaces} \label{subsgroupspaces}
We will introduce a third equivalent definition of stack. First consider a category $C$. Let $U$ be the set of objects and $R$ the set of morphisms. The axioms of a category give us four maps of sets $$ \xymatrix{ {R} \ar@<0.5ex>[r]^{s} \ar@<-0.5ex>[r]_{t} & {U} \ar[r]^{e} & {R}} \qquad \xymatrix{ \save[]+<-5.5ex,-0.55ex>*{R\times^{}_{s,U,t} R}\restore \ar[r]^{m} & {R}} $$ where $s$ and $t$ give the source and target for each morphism, $e$ gives the identity morphism, and $m$ is composition of morphisms. If the category is a groupoid then we have a fifth morphism $$ \xymatrix{{R} \ar[r]^i & {R}} $$ that gives the inverse. These maps satisfy \begin{enumerate} \item $s\circ e= t\circ e = \operatorname{id}_R$, $s\circ i=t$, $t\circ i=s$, $s\circ m=s\circ p_2$, $t\circ m=t\circ p_1$.
\item \textit{Associativity}. $m\circ (m\times \operatorname{id}_R)=m\circ (\operatorname{id}_R \times m)$.
\item \textit{Identity}. Both compositions $$ R=R\times^{}_{s,U} U=U\times^{}_{U,t}R \xymatrix{ {}\ar@<0.5ex>[r]^{\operatorname{id}_R \times e} \ar@<-0.5ex>[r]_{e \times \operatorname{id}_R} & {}} R\times^{}_{s,U,t} R \xymatrix{ {}\ar[r]^{m} & {R}} $$ are equal to the identity map on $R$.
\item \textit{Inverse}. $m\circ (i\times \operatorname{id}_R)= e\circ s$, $m\circ (\operatorname{id}_R \times i)= e\circ t$. \end{enumerate}
\begin{definition}[Groupoid space] \textup{\cite[1.3.3]{La}, \cite[pp. 668--669]{DM}}. A groupoid space is a pair of spaces (sheaves of sets) $U$, $R$, with five morphisms $s$, $t$, $e$, $m$, $i$ with the same properties as above. \end{definition}
\begin{definition} \textup{\cite[1.3.3]{La}}. Given a groupoid space, define the groupoid over $({Sch}/S)$ as the category $[R,U]'$ over $({Sch}/S)$ whose objects over the scheme $B$ are elements of the set $U(B)$ and whose morphisms over $B$ are elements of the set $R(B)$. Given $f:B' \to B$ we define a functor $f^*: [R,U]'(B) \to [R,U]'(B')$ using the maps $U(B) \to U(B')$ and $R(B) \to R(B')$. \end{definition}
The groupoid $[R,U]'$ is in general only a prestack. We denote by $[R,U]$ the associated stack. The stack $[R,U]$ can be thought of as the sheaf associated to the presheaf of groupoids $B \mapsto [R,U]'(B)$ (\cite[2.4.3]{La}).
\begin{example}[Quotient by group action] \textup{Let $X$ be a scheme and $G$ an affine group scheme. We denote by the same letters the associated spaces (functors of points). We take $U=X$ and $R=X\times G$. Using the group action we can define the five morphisms ($t$ is the action of the group, $s=p_1$, $m$ is the product in the group, $e$ is defined with the identity of $G$, and $i$ with the inverse).}
\textup{ The objects of $[X\times G,X]'(B)$ are morphisms $f:B\to X$. Equivalently, they are trivial principal $G$-bundles $B\times G$ over $B$ and a map $B\times G \to X$ defined as the composition of the action of $G$ and $f$. The stack $[X\times G,X]$ is isomorphic to $[X/G]$.} \end{example}
\begin{example}[Algebraic stacks] \textup{ Let $R$, $U$ be a groupoid space such that $R$ and $U$ are algebraic spaces, locally of finite presentation (equivalently locally of finite type if $S$ is noetherian). Assume that the morphisms $s$, $t$ are flat, and that $\delta=(s,t):R\to U\times_S U$ is separated and quasi-compact. Then $[R,U]$ is an Artin stack, locally of finite type (\cite[cor 4.7]{La}).}
\textup{ In fact, any Artin stack ${\mathcal{F}}$ can be defined in this fashion. The algebraic space $U$ will be the atlas of ${\mathcal{F}}$, and we set $R=U\times_{\mathcal{F}} U$. The morphisms $s$ and $t$ are the two projections, $i$ exchanges the factors, $e$ is the diagonal, and $m$ is defined by projection to the first and third factor.} \end{example}
Let $\delta:R\to U\times_S U$ be an equivalence relation in the category of spaces. One can define a groupoid space, and $[R,U]$ is to be thought of as the stack-theoretic quotient of this equivalence relation, as opposed to the quotient space, used for instance to define algebraic spaces (for more details and the definition of equivalence relation see appendix A).
\subsection{Properties of Algebraic Stacks} \label{subsproperties}
So far we have only defined scheme-theoretic properties for representable stacks and morphisms. We can define some properties for arbitrary algebraic stacks (and morphisms among them) using the atlas.
Let ``P'' be a property of schemes, local in nature for the smooth (resp. \'etale) topology. For example: regular, normal, reduced, of characteristic $p$,... Then we say that an Artin (resp. Deligne-Mumford) stack has ``P'' iff the atlas has ``P'' (\cite[p.25]{La}, \cite[p.100]{DM}).
Let ``P'' be a property of morphisms of schemes, local on source and target for the smooth (resp. \'etale) topology, i.e. for any commutative diagram $$ \xymatrix{ {X'} \ar[r]^{p} \ar[dr]_{f''}& {Y'\times_Y X} \ar[r]^{g'} \ar[d]_{f'} & {X} \ar[d]^{f} \\ & {Y'} \ar[r]^{g} & {Y} } $$ with $p$ and $g$ smooth (resp. \'etale) and surjective, $f$ has ``P'' iff $f''$ has ``P''. For example: flat, smooth, locally of finite type,... For the \'etale topology we also have: \'etale, unramified,... Then if $f:{\mathcal{X}} \to {\mathcal{Y}}$ is a morphism of Artin (resp. Deligne-Mumford) stacks, we say that $f$ has ``P'' iff for one (and then for all) commutative diagram of stacks $$ \xymatrix{ {X'} \ar[r]^{p} \ar[dr]_{f''}& {Y'\times_Y {\mathcal{X}}} \ar[r]^{g'} \ar[d]_{f'} & {{\mathcal{X}}} \ar[d]^{f} \\ & {Y'} \ar[r]^{g} & {{\mathcal{Y}}} } $$ where $X'$, $Y'$ are schemes and $p$, $g$ are smooth (resp. \'etale) and surjective, $f''$ has ``P'' (\cite[pp. 27-29]{La}).
For Deligne-Mumford stacks it is enough to find a commutative diagram $$ \xymatrix{ {X'} \ar[r]^{p} \ar[d]_{f''}& {{\mathcal{X}}} \ar[d]^{f} \\
{Y'} \ar[r]^{g} & {{\mathcal{Y}}} } $$ where $p$ and $g$ are \'etale and surjective and $f''$ has ``P''. Then it follows that $f$ has ``P'' (\cite[p. 100]{DM}).
Other notions are defined as follows.
\begin{definition}[Substack] \label{substack} \textup{\cite[def 2.5]{La}, \cite[p.102]{DM}}. A stack ${\mathcal{E}}$ is a substack of ${\mathcal{F}}$ if it is a full subcategory of ${\mathcal{F}}$ and \begin{enumerate} \item If an object $X$ of ${\mathcal{F}}$ is in ${\mathcal{E}}$, then all isomorphic objects are also in ${\mathcal{E}}$.
\item For all morphisms of schemes $f:U\to V$, if $X$ is in ${\mathcal{E}}(V)$, then $f^* X$ is in ${\mathcal{E}}(U)$.
\item Let $\{U_i \to U\}$ be a cover of $U$ in the site
$({Sch}/S)$. Then $X$ is in ${\mathcal{E}}$ iff $X|_i$ is in ${\mathcal{E}}$ for all $i$. \end{enumerate} \end{definition}
\begin{definition} \textup{\cite[def 2.13]{La}}. A substack ${\mathcal{E}}$ of ${\mathcal{F}}$ is called open (resp. closed, resp. locally closed) if the inclusion morphism ${\mathcal{E}} \to {\mathcal{F}}$ is \textbf{representable} and it is an open immersion (resp. closed immersion, resp. locally closed immersion). \end{definition}
\begin{definition}[Irreducibility] \textup{\cite[def 3.10]{La}, \cite[p.102]{DM}}. An algebraic stack ${\mathcal{F}}$ is irreducible if it is not the union of two distinct and nonempty proper closed substacks. \end{definition}
\begin{definition}[Separatedness] \textup{\cite[def 3.17]{La}, \cite[def 4.7]{DM}}. An algebraic stack ${\mathcal{F}}$ is separable, if the (representable) diagonal morphism $\Delta_{\mathcal{F}}$ is universally closed (and hence proper, because it is automatically separable and of finite type).
A morphism $f:{\mathcal{F}} \to {\mathcal{G}}$ of algebraic stacks is separable if for all $U \to {\mathcal{F}}$ with $U$ affine, $U\times_{\mathcal{G}} {\mathcal{F}}$ is a separable (algebraic) stack. \end{definition}
For Deligne-Mumford stacks, $\Delta_{\mathcal{F}}$ is universally closed iff it is finite. There is a valuative criterion of separatedness, similar to the criterion for schemes. Recall that by Yoneda lemma (lemma \ref{yoneda}), a morphism $f:U\to {\mathcal{F}}$ between a scheme and a stack is equivalent to an object in ${\mathcal{F}}(U)$. Then we will say that $\alpha$ is an isomorphism between two morphisms $f_1,f_2:U\to {\mathcal{F}}$ when $\alpha$ is an isomorphism between the corresponding objects of ${\mathcal{F}}(U)$.
\begin{proposition}[Valuative criterion of separatedness (stacks)] \textup{\cite[prop 3.19]{La}, \cite[thm 4.18]{DM}}. An algebraic stack ${\mathcal{F}}$ is separated (over $S$) if and only if the following holds. Let $A$ be a valuation ring with fraction field $K$. Let $g^{}_1:\operatorname{Spec} A\to {\mathcal{F}}$ and $g^{}_2:\operatorname{Spec} A \to {\mathcal{F}}$ be two morphisms such that: \begin{enumerate} \item $f_{p^{}_{\mathcal{F}}}\circ g^{}_1= f_{p^{}_{\mathcal{F}}}\circ g^{}_2$.
\item There exists an isomorphism $\alpha: g^{}_1|_{\operatorname{Spec} K}
\to g^{}_2|_{\operatorname{Spec} K}$. \end{enumerate} $$ \xymatrix{
& & {{\mathcal{F}}} \ar[d]^{p^{}_{{\mathcal{F}}}} \\ {\operatorname{Spec} K} \ar@(u,l)[rru] \ar[r]^{i} & {\operatorname{Spec} A} \ar@<0.5ex>[ru]^{g^{}_1} \ar@<-0.5ex>[ru]_{g^{}_2} \ar[r] & S } $$ then there exists an isomorphism (in fact unique) $\tilde\alpha:
g^{}_1\to g^{}_2$ that extends $\alpha$, i.e. $\tilde\alpha|_{\operatorname{Spec} K}=\alpha$. \end{proposition}
\begin{remark} \label{dvr} \textup{ It is enough to consider complete valuation rings $A$ with algebraically closed residue field \cite[3.20.1]{La}. If furthermore $S$ is locally Noetherian and ${\mathcal{F}}$ is locally is finite type, it is enough to consider discrete valuation rings $A$ \cite[3.20.2]{La}.} \end{remark}
\begin{example} \textup{The stack $BG$ won't be separated if $G$ is not proper over $S$ \cite[3.20.3]{La}, and since we assumed $G$ to be affine, this won't happen if it is not finite.}
\textup{In general the moduli stack of vector bundles ${\mathcal{M}}$ is not separated. It is easy to find families of vector bundles that contradict the criterion.}
\textup{The stack of stable curves $\overline{\mathcal{M}}_g$ is separated \cite[prop 5.1]{DM}.} \end{example}
The criterion for morphisms is more involved because we are working with stacks and we have to keep track of the isomorphisms.
\begin{proposition}[Valuative criterion of separatedness (morphisms)] \textup{\cite[prop 3.19]{La}} A morphism of algebraic stacks $f:{\mathcal{F}} \to {\mathcal{G}}$ is separated if and only if the following holds. Let $A$ be a valuation ring with fraction field $K$. Let $g^{}_1:\operatorname{Spec} A\to {\mathcal{F}}$ and $g^{}_2:\operatorname{Spec} A \to {\mathcal{F}}$ be two morphisms such that: \begin{enumerate} \item There exists an isomorphism $\beta: f\circ g^{}_1\to f\circ g^{}_2$.
\item There exists an isomorphism $\alpha: g^{}_1|_{\operatorname{Spec} K}
\to g^{}_2|_{\operatorname{Spec} K}$.
\item $f(\alpha)=\beta|_{\operatorname{Spec} K}$. \end{enumerate} then there exists an isomorphism (in fact unique) $\tilde\alpha:
g^{}_1\to g^{}_2$ that extends $\alpha$, i.e. $\tilde\alpha|_{\operatorname{Spec} K}=\alpha$ and $f(\tilde\alpha)=\beta$. \end{proposition}
Remark \ref{dvr} is also true in this case.
\begin{definition} \textup{\cite[def 3.21]{La}, \cite[def 4.11]{DM}}. An algebraic stack ${\mathcal{F}}$ is proper (over $S$) if it is separated and of finite type, and if there is a scheme $X$ proper over $S$ and a (representable) surjective morphism $X\to {\mathcal{F}}$.
A morphism ${\mathcal{F}}\to {\mathcal{G}}$ is proper if for any affine scheme $U$ and morphism $U\to {\mathcal{G}}$, the fiber product $U\times_{\mathcal{G}} {\mathcal{F}}$ is proper over $U$. \end{definition}
For properness we only have a satisfactory criterion for stacks (see \cite[prop 3.23 and conj 3.25]{La} for a generalization for morphisms).
\begin{proposition}[Valuative criterion of properness] \textup{\cite[prop 3.23]{La}, \cite[thm 4.19]{DM}}. Let ${\mathcal{F}}$ be a separated algebraic stack (over $S$). It is proper (over $S$) if and only if the following condition holds. Let $A$ be a valuation ring with fraction field $K$. For any commutative diagram $$ \xymatrix{
& & {{\mathcal{F}}} \ar[d]^{p^{}_{\mathcal{F}}} \\ {\operatorname{Spec} K} \ar[r]^{i} \ar[rru]^{g} & {\operatorname{Spec} A} \ar[r] & S } $$ there exists a finite field extension $K'$ of $K$ such that $g$ extends to $\operatorname{Spec}(A')$, where $A'$ is the integral closure of $A$ in $K'$. $$ \xymatrix{
& & {{\mathcal{F}}} \ar[dd]^{p^{}_{\mathcal{F}}} \\ {\operatorname{Spec} K'} \ar[rru]^{g\circ u} \ar[d]_{u} \ar[r] & {\operatorname{Spec} A'} \ar[d] \ar@{-->}[ru] \\ {\operatorname{Spec} K} \ar[r]^{i} & {\operatorname{Spec} A} \ar[r] & S } $$ \end{proposition}
\begin{example}[Stable curves] \textup{The Deligne-Mumford stack of stable curves $\overline{\mathcal{M}}_g$ is proper \cite[thm 5.2]{DM}}. \end{example}
\subsection{Points and dimension} \label{subspoints}
We will introduce the concept of point of an algebraic stack and dimension of a stack at a point. The reference for this is \cite[chapter 5]{La}.
\begin{definition} Let ${\mathcal{F}}$ be an algebraic stack over $S$. The set of points of ${\mathcal{F}}$ is the set of equivalence classes of pairs $(K,x)$, with $K$ a field over $S$ (i.e. a field with a morphism of schemes $\operatorname{Spec} K \to S$) and $x:\operatorname{Spec} K \to {\mathcal{F}}$ a morphism of stacks. Two pairs $(K',x')$ and $(K'',x'')$ are equivalent if there is a field $K$ extension of $K'$ and $K''$ and a commutative diagram $$ \xymatrix{ {\operatorname{Spec} K} \ar[r] \ar[d] & {\operatorname{Spec} K'} \ar[d]^{x'} \\ {\operatorname{Spec} K''} \ar[r]^{x''} & {\mathcal{F}} } $$ Given a morphism ${\mathcal{F}} \to {\mathcal{G}}$ of algebraic stacks and a point of ${\mathcal{F}}$, we define the image of that point in ${\mathcal{G}}$ by composition. \end{definition}
Every point of an algebraic stack is the image of a point of an atlas. To see this, given a point represented by $\operatorname{Spec} K \to {\mathcal{F}}$ and an atlas $X\to {\mathcal{F}}$, take any point $\operatorname{Spec} K' \to X\times_{\mathcal{F}} \operatorname{Spec} K$. The image of this point in $X$ maps to the given point.
To define the concept of dimension, recall that if $X$ and $Y$ are locally Noetherian schemes and $f:X\to Y$ is flat, then for any point $x\in X$ we have $$ \dim_x(X)= \dim_x(f) + \dim_{f(x)}(Y), $$ with $\dim_x(f)=\dim_x(X_{f(x)})$, where $X_y$ is the fiber of $f$ over $y$.
\begin{definition} Let $f:{\mathcal{F}}\to {\mathcal{G}}$ be a representable morphism, locally of finite type, between two algebraic spaces. Let $\xi$ be a point of ${\mathcal{F}}$. Let $Y$ be an atlas of ${\mathcal{G}}$ Take a point $x$ in the algebraic space $Y\times_{\mathcal{G}} {\mathcal{F}}$ that maps to $\xi$, $$ \xymatrix{ {Y\times_{\mathcal{G}} {\mathcal{F}}} \ar[r] \ar[d]_{\tilde f} & {\mathcal{F}} \ar[d]^{f} \\ {Y} \ar[r] & {\mathcal{G}} } $$ and define the dimension of the morphism $f$ at the point $\xi$ as $$ \dim_\xi(f)=\dim_x(\tilde f). $$ \end{definition}
It can be shown that this definition is independent of the choices made.
\begin{definition} Let ${\mathcal{F}}$ be a locally Noetherian algebraic stack and $\xi$ a point of ${\mathcal{F}}$. Let $u: X\to {\mathcal{F}}$ be an atlas, and $x$ a point of $X$ mapping to $\xi$. We define the dimension of ${\mathcal{F}}$ at the point $\xi$ as $$ \dim_\xi({\mathcal{F}})=\dim_x(X)-\dim_x(u). $$ The dimension of ${\mathcal{F}}$ is defined as $$ \dim({\mathcal{F}})=\operatorname{Sup}_{\xi} (\dim_\xi({\mathcal{F}})). $$ \end{definition}
Again, this is independent of the choices made.
\begin{example}[Quotient by group action] \textup{ Let $X$ be a smooth scheme of dimension $\dim(X)$ and $G$ a smooth group of dimension $\dim(G)$ acting on $X$. Let $[X/G]$ be the quotient stack defined in example \ref{quotient}. Using the atlas defined in example \ref{atlasquotient}, we see that $$ \dim[X/G]=\dim(X)-\dim(G). $$ Note that we haven't made any assumption on the action. In particular, the action could be trivial. The dimension of an algebraic stack can then be negative. For instance, the dimension of the classifying stack $BG$ defined in example \ref{quotient} has dimension $\dim(BG)=-\dim(G)$. } \end{example}
\subsection{Quasi-coherent sheaves on stacks} \label{subssheaves}
\begin{definition} \textup{\cite[def 7.18]{Vi}, \cite[def 6.11, prop 6.16]{La}.} A quasi-coherent sheaf ${\mathcal{S}}$ on an algebraic stack ${\mathcal{F}}$ is the following set of data: \begin{enumerate}
\item For each morphism $X\to {\mathcal{F}}$ where $X$ is a scheme, a quasi-coherent sheaf ${\mathcal{S}}_X$ on $X$.
\item For each commutative diagram $$ \xymatrix{ {X} \ar[r]^f \ar[dr] & {Y} \ar[d] \\
& {{\mathcal{F}}} } $$ an isomorphism $\varphi^{}_f: {\mathcal{S}}_X \stackrel{\cong}{\longrightarrow} f^*{\mathcal{S}}_Y$, satisfying the cocycle condition, i.e. for any commutative diagram \begin{eqnarray} \label{sheaf2} \xymatrix{ {X} \ar[r]^{f} \ar[dr] & {Y} \ar[d] \ar[r]^{g}& {Z} \ar[dl] \\
& {{\mathcal{F}}} } \end{eqnarray} we have $\varphi^{}_{g\circ f} = \varphi^{}_f \circ f^* \varphi^{}_g$. \end{enumerate}
We say that ${\mathcal{S}}$ is coherent (resp. finite type, finite presentation, locally free) if ${\mathcal{S}}_X$ is coherent (resp. finite type, finite presentation, locally free) for all $X$.
A morphism of quasi-coherent sheaves $h:{\mathcal{S}} \to {\mathcal{S}}'$ is a collection of morphisms of sheaves $h^{}_X:{\mathcal{S}}^{}_X \to {\mathcal{S}}'_X$ compatible with the isomorphisms $\varphi$
\end{definition}
\begin{remark}\textup{ Since a sheaf on a scheme can be obtained by glueing the restriction to an affine cover, it is enough to consider affine schemes.} \end{remark}
\begin{example}[Structure sheaf] \textup{ Let ${\mathcal{F}}$ be an algebraic stack. The structure sheaf ${\mathcal{O}}_{\mathcal{F}}$ is defined by taking $({\mathcal{O}}_{\mathcal{F}})_X={\mathcal{O}}_X$.} \end{example}
\begin{example}[Sheaf of differentials] \textup{ Let ${\mathcal{F}}$ be a Deligne-Mumford stack. To define the sheaf of differentials $\Omega_{\mathcal{F}}$, if $U\to {\mathcal{F}}$ is an \'etale morphism we set $(\Omega_{\mathcal{F}})_U=\Omega_U$, the sheaf of differentials of the scheme $U$. If $V \to {\mathcal{F}}$ is another \'etale morphism and we have a commutative diagram $$ \xymatrix{ {U} \ar[r]^f \ar[dr] & {V} \ar[d] \\
& {{\mathcal{F}}} } $$ then $f$ has to be \'etale, there is a canonical isomorphism $\varphi^{}_f :\Omega_{U/S} \to f^* \Omega_{V/S}$, and these canonical isomorphisms satisfy the cocycle condition.}
\textup{Once we have defined $(\Omega_{\mathcal{F}})_U$ for \'etale morphisms $U\to {\mathcal{F}}$, we can extend the definition for any morphism $X\to {\mathcal{F}}$ with $X$ an arbitrary scheme as follows: take an (\'etale) atlas $U=\coprod U_i \to {\mathcal{F}}$. Consider the composition morphism $$ X\times_{\mathcal{F}} U \stackrel{p_2}{\longrightarrow} U \longrightarrow {\mathcal{F}}, $$ and define $(\Omega_{\mathcal{F}})_{X\times_{\mathcal{F}} U}=p^*_2\Omega_U$. The cocycle condition for $\Omega_{U_i}$ and \'etale descent implies that $(\Omega_{\mathcal{F}})_{X\times_{\mathcal{F}} U}$ descends to give a sheaf $(\Omega_{{\mathcal{F}}})_X$ on $X$. It is easy to check that this doesn't depend on the atlas $U$ used, and that given a commutative diagram like (\ref{sheaf2}), there are canonical isomorphisms $\varphi$ satisfying the cocycle condition.}
\end{example}
\begin{example}[Universal vector bundle] \textup{ Let ${\mathcal{M}}$ be the moduli stack of vector bundles on a scheme $X$ defined in \ref{bbund}. The universal vector bundle $V$ on ${\mathcal{M}} \times X$ is defined as follows:}
\textup{ Let $B$ be a scheme and $f=(f_1,f_2): B \to {\mathcal{M}} \times X$ a morphism. By lemma \ref{yoneda}, the morphism $f_1:B\to {\mathcal{M}}$ is equivalent to a vector bundle $W$ on $B \times X$. We define $V_B$ as ${\tilde f}^*W$, where ${\tilde f}=(\operatorname{id}_B, f_2):B\to B\times X$. Let $$ \xymatrix{ {B'} \ar[r]^g \ar[dr]_{f'} & {B} \ar[d]^{f} \\
& {{\mathcal{M}} \times X} } $$ be a commutative diagram. Recall that this means that there is an isomorphism $\alpha:f \circ g \to f'$, and looking at the projection to ${\mathcal{M}}$ we have an isomorphism $\alpha^{}_1:f^{}_1\circ g \to f'_1$. Using lemma \ref{yoneda}, $f^{}_1\circ g$ and $f'_1$ correspond respectively to the vector bundles $(g\times \operatorname{id}_X)^*W$ and $W'$ on $B'\times X$, and (again by lemma \ref{yoneda}) $\alpha^{}_1$ gives an isomorphism between them. It is easy to check that these isomorphisms satisfy the cocycle condition for diagrams of the form (\ref{sheaf2}). }
\end{example}
\section{Vector bundles: moduli stack vs. moduli scheme} \label{sectionversus}
In this section we will compare, in the context of vector bundles, the new approach of stacks versus the standard approach of moduli schemes via geometric invariant theory (GIT).
Fix a scheme $X$, a positive integer $r$ and classes $c_i\in H^{2i}(X)$. All vector bundles over $X$ in this section will have rank $r$ and Chern classes $c_i$. We will also consider vector bundles on products $B\times X$ where $B$ is a scheme. We will always assume that these vectors bundles are flat over $B$, and that the restriction to the slices $\{p\}\times X$ are vector bundles with rank $r$ and Chern classes $c_i$. Fix also a polarization on $X$. All references to stability or semistability of vector bundles will mean Gieseker stability with respect to this fixed polarization.
Recall that the functor $\underline{\Bund}^{s}$ (resp. $\underline{\Bund}^{ss}$) is the functor from $(Sch/S)$ to $(Sets)$ that for each scheme $B$ gives the set of
\textit{equivalence} classes of vector bundles over $B\times X$, flat over $B$ and such that the restrictions $V|_b$ to the slices $p\times X$ are stable (resp. semistable) vector bundles with fixed rank and Chern classes, where two vector bundles $V$ and $V'$ on $B\times X$ are considered \textit{equivalent} if there is a line bundle $L$ on $B$ such that $V$ is isomorphic to $V'\otimes p^*_B L$.
\begin{theorem} There are schemes $\mathfrak{M}^{s}$ and $\mathfrak{M}^{ss}$, called moduli schemes, corepresenting the functors ${\underline{\Bund}}^{s}$ and ${\underline{\Bund}}^{ss}$. \end{theorem}
The moduli scheme $\mathfrak{M}^{ss}$ is constructed using the Quot schemes introduced in example \ref{quotconstruction} (for a detailed exposition of the construction, see \cite{HL}). Since the set of \textit{semistable} vector bundles is bounded, we can choose once and for all $N$ and $m$ (depending only on the Chern classes and rank) with the property that for any semistable vector bundle $V$ there is a point in $R=R_{N,m}$ whose corresponding quotient is isomorphic to $V$.
The scheme $R$ parametrizes vector bundles $V$ on $X$ together with a basis of $H^0(V(m))$ (up to multiplication by scalar). Recall that $N=h^0(V(m))$. There is an action of ${GL(N)}$ on $R$, corresponding to change of basis but since two basis that only differ by a scalar give the same point on $R$, this ${GL(N)}$ action factors through ${PGL(N)}$. Then the moduli scheme $\mathfrak{M}^{ss}$ is defined as the GIT quotient $R {/\!\!/} {PGL(N)}$.
The closed points of $\mathfrak{M}^{ss}$ correspond to S-equivalence classes of vector bundles, so if there is a strictly semistable vector bundle, the functor ${\underline{\Bund}}^{ss}$ is not representable.
Now we will compare this scheme with the moduli stack ${\mathcal{M}}$ defined on example \ref{bbund}. We will also consider the moduli stack ${\mathcal{M}}^{s}$ defined in the same way, but with the extra requirement that the vector bundles should be stable. The moduli stack ${\mathcal{M}}^{s}$ is a substack (definition \ref{substack}) of ${\mathcal{M}}$. The following are some of the differences between the moduli scheme and the moduli stack:
\begin{enumerate}
\item The stack ${\mathcal{M}}$ parametrizes all vector bundles, but the scheme $\mathfrak{M}^{ss}$ only parametrizes semistable vector bundles.
\item From the point of view of the scheme $\mathfrak{M}^{ss}$, we identify two vector bundles if they are S-equivalent. On the other hand, from the point of view of the moduli stack, two vector bundles are identified only if they are isomorphic.
\item Let $V$ and $V'$ be two families of vector bundles parametrized by a scheme $B$, i.e. two vector bundles (flat over $B$) on $B\times X$. If there is a line bundle $L$ on $B$ such that $V$ is isomorphic to $V'\otimes p^*_B L$, then from the point of view of the moduli scheme, $V$ and $V'$ are identified as being the same family. On the other hand, from the point of view of the moduli stack, $V$ and $V'$ are identified only if they are isomorphic as vector bundles on $B\times X$.
\item The subscheme $\mathfrak{M}^{s}$ corresponding to stable vector bundles is sometimes representable by a scheme, but the moduli stack ${\mathcal{M}}^{s}$ is never representable by a scheme. To see this, note that any vector bundle has automorphisms different from the identity (multiplication by scalars) and apply lemma \ref{nonrepresentable}.
\end{enumerate}
Now we will restrict our attention to stable bundles, i.e. to the scheme $\mathfrak{M}^s$ and the stack ${\mathcal{M}}^s$. For stable bundles the notions of $S$-equivalence and isomorphism coincide, so the points of $\mathfrak{M}^s$ correspond to isomorphism classes of vector bundles. Consider $R^{s}\subset R$, the subscheme corresponding to stable bundles. There is a map $\pi :R^s \to \mathfrak{M}^s=R^s/{PGL(N)}$, and $\pi$ is in fact a principal ${PGL(N)}$-bundle (this is a consequence of Luna's \'etale slice theorem).
\begin{remark}[Universal bundle on moduli scheme]\textup{ The scheme $\mathfrak{M}^s$ represents the functor $\underline{\Bund}^s$ if there is a universal family. Recall that a universal family for this functor is a vector bundle $E$ on $\mathfrak{M}^s \times X$ such that the isomorphism class of
$E|_{p\times X}$ is the isomorphism class corresponding to the point $p\in \mathfrak{M}^s$, and for any family of vector bundles $V$ on $B\times X$ there is a morphism $f:B\to \mathfrak{M}^s$ and a line bundle $L$ on $B$ such that $V \otimes p^*_B L$ is isomorphic to $(f\times \operatorname{id})^*E$. Note that if $E$ is a universal family, then $E\otimes p^*_{\mathfrak{M}^s}L$ will also be a universal family for any line bundle $L$ on $\mathfrak{M}^s$.}
\textup{ The universal bundle for the Quot scheme gives a universal family $\widetilde V$ on $R^s\times X$, but this family doesn't always descend to give a universal family on the quotient $\mathfrak{M}^s$.}
\textup{ Let $X\stackrel{G}\longrightarrow Y$ be a principal $G$-bundle. A vector bundle $V$ on $X$ descends to $Y$ if the action of $G$ on $X$ can be lifted to $X$. In our case, if certain numerical criterion involving $r$ and $c_i$ is satisfied (if $X$ is a smooth curve this criterion is $\operatorname{gcd}(r,c_1)=1$), then we can find a line bundle $L$ on $R^s$ such that the ${PGL(N)}$ action on $R^s$ can be lifted to $\widetilde V \otimes p^*_{R^s}L$, and then this vector bundle descends to give a universal family on $\mathfrak{M}^s \times X$. But in general the best that we can get is a universal family on an \'etale cover of $\mathfrak{M}^s$.} \end{remark}
Recall from example \ref{atlasquotient} that there is a morphism $[R^{ss}/{PGL(N)}] \to \mathfrak{M}^{ss}$, and that the morphism $[R^{s}/{PGL(N)}] \to \mathfrak{M}^{s}$ is an isomorphism of stacks.
\begin{proposition} \label{versus}
There is a commutative diagram of stacks $$ \xymatrix{ {[R^{s}/{GL(N)}]} \ar[rr]^{q} \ar[d]_{g}^{\simeq}& &{[R^{s}/{PGL(N)}]} \ar[d]^{h}_{\simeq} \\ {{\mathcal{M}}^{s}} \ar[rr]_{\varphi} & &{\;\mathfrak{M}^{s},} } $$ where $g$ and $h$ are isomorphisms of stacks, but $q$ and $\varphi$ are not. If we change ``stable'' by ``semistable'' we still have a commutative diagram, but the corresponding morphism $h^{ss}$ is not an isomorphism of stacks.
\end{proposition}
\begin{proof} The morphism $\varphi$ is the composition of the natural morphism ${\mathcal{M}}^{s} \to \underline{\Bund}^{s}$ (sending each category to the set of isomorphism classes of objects) and the morphism $\underline{\Bund}^{s} \to \mathfrak{M}^{s}$ given by the fact that the scheme $\mathfrak{M}^{s}=R^s{/\!\!/} {PGL(N)}$ corepresents the functor.
The morphism $h$ was constructed in example \ref{quotient}.
The key ingredient needed to define $g$ is the fact that the ${GL(N)}$ action on the Quot scheme lifts to the universal bundle, i.e. the universal bundle on the Quot scheme has a ${GL(N)}$-linearization. Let $$ \xymatrix{ {\widetilde{B}} \ar[r]^{p} \ar[d] & R^{ss} \\ {B} } $$ be an object of $[R^{ss}/{GL(N)}]$. Since $R^{ss}$ is a subscheme of a Quot scheme, and this universal bundle has a ${GL(N)}$-linearization. Let $\widetilde E$ be the vector bundle on $\widetilde B\times X$ defined by the pullback of this universal bundle. Since $f$ is ${GL(N)}$-equivariant, $\widetilde E$ is also ${GL(N)}$-linearized. Since $\widetilde B \times X \to B\times X$ is a principal bundle, the vector bundle $\widetilde E$ descends to give a vector bundle $E$ on $B\times X$, i.e. an object of ${\mathcal{M}}^{ss}$. Let $$ \xymatrix{ & & R^{ss}\\ {\widetilde{B}} \ar[r]_{\phi} \ar[d] \ar[rru]^{f} & {\widetilde{B}'} \ar[d] \ar[ru]_{f'} \\ {B} \ar@{=}[r] & {B} } $$ be a morphism in $[R^{ss}/{GL(N)}]$. Consider the vector bundles $\widetilde E$ and $\widetilde E'$ defined as before. Since $f'\circ \phi=f$, we get an isomorphism of $\widetilde E$ with $(\phi \times \operatorname{id})^* \widetilde E'$. Furthermore this isomorphism is ${GL(N)}$-equivariant, and then it descends to give an isomorphism of the vector bundles $E$ and $E'$ on $B\times X$, and we get a morphism in ${\mathcal{M}}^{ss}$.
To prove that this gives an equivalence of categories, we construct a functor $\overline g$ from ${\mathcal{M}}^{ss}$ to $[R^{ss}/{GL(N)}]$. Given a vector bundle on $B\times X$, let $q:\widetilde B \to B$ be the ${GL(N)}$-principal bundle associated with the vector bundle $p^*_B E$ on $B$. Let $\widetilde E=(q\times \operatorname{id})^*E$ be the pullback of $E$ to $\widetilde B\times X$. It has a canonical ${GL(N)}$-linearization because it is defined as a pullback by a principal ${GL(N)}$-bundle. The vector bundle ${p^{}_{\widetilde B}}_* \widetilde E$ is canonically isomorphic to the trivial bundle ${\mathcal{O}}^N_{\widetilde B}$, and this isomorphism is ${GL(N)}$-equivariant, so we get an \textit{equivariant} morphism $\widetilde B\to R^{ss}$, and hence an object of $[R^{ss}/{GL(N)}]$.
If we have an isomorphism between two vector bundles $E$ and $E'$ on $B\times X$, it is easy to check that it induces an isomorphism between the associated objects of $[R^{ss}/{GL(N)}]$.
It is easy to check that there are natural isomorphisms of functors
$g\circ \widetilde g \cong \operatorname{id}$ and $\widetilde g\circ g \cong \operatorname{id}$, and then $g$ is an equivalence of categories.
The morphism $q$ is defined using the following lemma, with $G={GL(N)}$, $H$ the subgroup consisting of scalar multiples of the identity, $\overline G={PGL(N)}$ and $Y$=$R^{ss}$.
\end{proof}
\begin{lemma} Let $Y$ be an $S$-scheme and $G$ an affine flat group $S$-scheme, acting on $Y$ on the right. Let $H$ be a normal closed subgroup of $G$. Assume that $\overline G=G/H$ is affine. If $H$ acts trivially on $Y$, then there is a morphism of stacks $$ [Y/G]\longrightarrow [Y/\overline G]. $$ If $H$ is nontrivial, then this morphism is not faithful, so it is not an isomorphism. \end{lemma}
\begin{proof} Let $$ \xymatrix{ {E} \ar[r]^{f} \ar[d]^{\pi} & Y \\ {B} } $$ be an object of $[Y/G]$. There is a scheme $Y/H$ such that $\pi$ factors $$ E \stackrel{q}\longrightarrow E/H \stackrel{\pi'}\longrightarrow B. $$ To construct $Y/H$, note that there is a local \'etale cover $U_i$ of $B$ and isomorphisms $\phi_i:\pi^{-1}(U_i)\to U_i\times G$, with transition functions $\psi_{ij}=\phi^{}_i \circ \phi^{-1}_j$. Since these isomorphisms are $G$-equivariant, they descend to give isomorphisms $\overline{\psi}_{ij}:U_j\times G/H \to U_i\times G/H$, and using this transition functions we get $Y/H$. This construction shows that $\pi'$ is a principal $\overline G$-bundle. Furthermore, $q$ is also a principal $H$-bundle (\cite[example 4.2.4]{HL}), and in particular it is a categorical quotient.
Since $f$ is $H$-invariant, there is a morphism $\overline f: E/H \to R$, and this gives an object of $[Y/\overline G]$.
If we have a morphism in $[Y/G]$, given by a morphism $g:E\to E'$ of principal $G$-bundles over $B$, it is easy to see that it descends (since $g$ is equivariant) to a morphism $\overline{g}:E/H \to E'/H$, giving a morphism in $[Y/\overline G]$.
This morphism is not faithful, since the automorphism $E\stackrel{\cdot z}{\longrightarrow} E$ given by multiplication on the right by a nontrivial element $z\in H$ is sent to the identity automorphism $E/H \to E/H$, and then $\operatorname{Hom}(E,E)\to \operatorname{Hom}(E/H,E/H)$ is not injective.
\end{proof}
If $X$ is a smooth curve, then it can be shown that ${\mathcal{M}}$ is a smooth stack of dimension $r^2(g-1)$, where $r$ is the rank and $g$ is the genus of $X$. In particular, the open substack ${\mathcal{M}}^{ss}$ is also smooth of dimension $r^2(g-1)$, but the moduli scheme $\mathfrak{M}^{ss}$ is of dimension $r^2(g-1)+1$ and might not be smooth. Proposition \ref{versus} explains the difference in the dimensions (at least on the smooth part): we obtain the moduli stack by taking the quotient by the group ${GL(N)}$, of dimension $N^2$, but the moduli scheme is obtained by a quotient by the group ${PGL(N)}$, of dimension $N^2-1$. The moduli scheme $\mathfrak{M}^{ss}$ is not smooth in general because in the strictly semistable part of $R^{ss}$ the action of ${PGL(N)}$ is not free. On the other hand, the smoothness of a stack quotient doesn't depend on the freeness of the action of the group.
\section{Appendix A: Grothendieck topologies, sheaves and algebraic spaces} \label{grothendiecktopologies}
The standard reference for Grothendieck topologies is SGA (\textit{S\'e\-mi\-naire de G\'eo\-m\'e\-trie Alg\'e\-bri\-que}). For an introduction see \cite{T} or \cite{MM}. For algebraic spaces, see \cite{K} or \cite{Ar1}.
An open cover in a topological space $U$ can be seen as family of morphisms in the category of topological spaces $f_i:U_i \to U$, with the property that $f_i$ is an open inclusion and the union of their images is $U$, i.e we are choosing a class of morphisms (open inclusions) in the category of topological spaces. A Grothendieck topology on an arbitrary category is basically a choice of a class of morphisms, that play the role of ``open sets''. A morphism $f:V\to U$ in this class is to be thought of as an ``open set'' in the object $U$. The concept of intersection of open sets, for instance, can be replaced by the fiber product: the ``intersection'' of $f_1:U_1\to U$ and $f_2:U_2\to U$ is $f_{12}:U_1\times _U U_2 \to U$.
A category with a Grothendieck topology is called a site. We will consider two topologies on $({Sch}/S)$.
\textbf{fppf topology}. Let $U$ be a scheme. Then a cover of $U$ is a finite collection of morphisms $\{f_i:U_i\to U\}_{i\in I}$ such that each $f_i$ is a finitely presented flat morphism (for Noetherian schemes, this is equivalent to flat and finite type), and $U$ is the (set theoretic) union of the images of $f_i$. In other words, $\coprod U_i \to U$ is \textit{``fid\`element plat de pr\'esentation finie''}.
\textbf{\'Etale topology}. Same definition, but substituting flat by \'etale.
A presheaf of sets on $({Sch}/S)$ is a contravariant functor $F$ from $({Sch}/S)$ to $({Sets})$. Choose a topology on $({Sch}/S)$. We say that $F$ is a sheaf (or an $S$-space) with respect to that topology if for every cover $\{f_i:U_i\to U\}_{i\in I}$ in the topology the following two axioms are satisfied: \begin{enumerate} \item \textit{(Mono)} Let $X$ and $Y$ be two elements of $F(U)$. If
$X|_i=Y|_i$ for all $i$, then $X=Y$.
\item \textit{(Glueing)} Let $X_i$ be an object of $F(U_i)$ for each $i$ such that
$X_i|_{ij}=X_j|_{ij}$, then there exists $X \in F(U)$ such that
$X|_i=X_i$ for each $i$. \end{enumerate}
We have used the following notation: if $X\in F(U)$, then $X|_i$ is the element of $F(U_i)$ given by $F(f_i)(X)$, and if $X_i\in F(U_i)$, then $X_i|_{ij}$ is the element of $F(U_{ij})$ given by $F(f_{ij,i})(X_i)$ where $f_{ij,i}:U_i\times_U U_j \to U_i$ is the pullback of $f_j$.
We can define morphisms of $S$-spaces as morphisms of sheaves (natural transformation of functors with the obvious conditions). Note that a scheme can be viewed as an $S$-space via its functor of points, and a morphism between two such $S$-spaces is equivalent to a scheme morphism between the schemes (by the Yoneda embedding lemma), then the category of $S$-schemes is a full subcategory of the category of $S$-spaces.
\textbf{Equivalence relation and quotient space}. An equivalence relation in the category of $S$-spaces consists of two $S$-spaces $R$ and $U$ and a monomorphism of $S$-spaces $$ \delta:R \to U \times_S U $$ such that for all $S$-scheme $B$, the map $\delta(B):R(B)\to U(B)\times U(B)$ is the graph of an equivalence relation between sets. A quotient $S$-space for such an equivalence relation is by definition the sheaf cokernel of the diagram $$ \xymatrix{ {R} \ar@<0.5ex>[r]^{p_2\circ \delta} \ar@<-0.5ex>[r]_{p_1\circ \delta} & {U}} $$
\begin{definition} \textup{\cite[0]{La}}. An $S$-space $F$ is called an algebraic space if it is the quotient $S$-space for an equivalence relation such that $R$ and $U$ are $S$-schemes, $p_1\circ \delta$, $p_2\circ \delta$ are \'etale (morphisms of $S$-schemes), and $\delta$ is a quasi-compact morphism (of $S$-schemes). \end{definition}
Roughly speaking, an algebraic space is a quotient of a scheme by an \'etale equivalence relation. The following is an equivalent definition.
\begin{definition} \textup{\cite[def 1.1]{K}}. An $S$-space $F$ is called an algebraic space if there exists a scheme $U$ (atlas) and a morphism of $S$-spaces $u:U\to F$ such that \begin{enumerate} \item (The morphism $u$ is \'etale) For any $S$-scheme $V$ and morphism $V \to F$, the (sheaf) fiber product $U\times_F V$ is representable by a scheme, and the map $U\times_F V\to V$ is an \'etale morphism of schemes. \item (Quasi-separatedness) The morphism $U\times_F U \to U\times_S U$ is quasi-compact. \end{enumerate} \end{definition}
We recover the first definition by taking $R=U\times_F U$. Then roughly speaking, we can also think of an algebraic space as ``something'' that looks locally in the \'etale topology like an affine scheme, in the same sense that a scheme is something that looks locally in the Zariski topology like an affine scheme.
Algebraic spaces are used, for instance, to give algebraic structure to certain complex manifolds (for instance Moishezon manifolds) that are not schemes, but can be realized as algebraic spaces. All smooth algebraic spaces of dimension 1 and 2 are actually schemes. An example of a smooth algebraic space of dimension 3 that is not a scheme can be found in \cite{H}.
But \'etale topology is useful even if we are only interested in schemes. The idea is that the \'etale topology is finer than the Zariski topology, and in many situations it is ``fine enough'' to do the analogue of the manipulations that can be done with the analytic topology of complex manifolds. As an example, consider the affine complex line $\operatorname{Spec}(\mathbb{C}[x])$, and take a (closed) point $x_0$ different from $0$. Assume that we want to define the function $\sqrt{x}$ in a neighborhood of $x_0$. In the analytic topology we only need to take a neighborhood small enough so that it doesn't contain a loop that goes around the origin, then we choose one of the branches (a sign) of the square root. In the Zariski topology this cannot be done, because all open sets are too large (have loops going around the origin, so the sign of the square root will change, and $\sqrt{x}$ will be multivaluated). But take the 2:1 \'etale map $V= \operatorname{Spec}(\mathbb{C}[y,x,x^{-1}]/(y-x^2)) \to \operatorname{Spec}(\mathbb{C}[x])$. The function $\sqrt{x}$ can certainly be defined on $V$, it is just equal to the function $y$, so it is in this sense that we say that the \'etale topology is finer: $V$ is a ``small enough open subset'' because the square root can be defined on it.
\section{Appendix B: 2-categories}
In this section we recall the notions of 2-category and 2-functor. A 2-category $\mathfrak{C}$ consists of the following data \cite{Hak}:
\begin{enumerate} \item [(i)] A class of objects $\operatorname{ob}\mathfrak{C}$ \item [(ii)] For each pair $X$, $Y \in \operatorname{ob}\mathfrak{C}$, a category $\operatorname{Hom}(X,Y)$ \item [(iii)] \textit{horizontal composition of 1-morphisms and 2-morphisms}. For each triple $X$, $Y$, $Z \in \operatorname{ob}\mathfrak{C}$, a functor $$ \mu_{X,Y,Z}:\operatorname{Hom}(X,Y) \times \operatorname{Hom}(Y,Z) \to \operatorname{Hom} (X,Z) $$ \end{enumerate} with the following conditions \begin{enumerate} \item [(i')] \textit{(Identity 1-morphism)} For each object $X\in \operatorname{ob}\mathfrak{C}$, there exists an object $\operatorname{id}_X\in \operatorname{Hom}(X,X)$ such that $$ \mu_{X,X,Y}(\operatorname{id}_X,\;)=\mu_{X,Y,Y}(\;,\operatorname{id}_Y)=\operatorname{id}_{\operatorname{Hom}(X,Y)}, $$ where $\operatorname{id}_{\operatorname{Hom}(X,Y)}$ is the identity functor on the category $\operatorname{Hom}(X,Y)$ \item[(ii')] \textit{(Associativity of horizontal compositions)} For each quadruple $X$, $Y$, $Z$, $T\in \operatorname{ob}\mathfrak{C}$, $$ \mu_{X,Z,T}\circ (\mu_{X,Y,Z}\times \operatorname{id}_{\operatorname{Hom}(Z,T)})= \mu_{X,Y,T}\circ (\operatorname{id}_{\operatorname{Hom}(X,Y)}\times\mu_{Y,Z,T}) $$ \end{enumerate}
The example to keep in mind is the 2-category $\mathfrak{Cat}$ of categories. The objects of $\mathfrak{Cat}$ are categories, and for each pair $X$, $Y$ of categories, $\operatorname{Hom}(X,Y)$ is the category of functors between $X$ and $Y$.
Note that the main difference between a 1-category (a usual category) and a 2-category is that $\operatorname{Hom}(X,Y)$, instead of being a set, is a category.
Given a 2-category, an object $f$ of the category $\operatorname{Hom}(X,Y)$ is called a 1-morphisms of $\mathfrak{C}$, and is represented with a diagram $$ \xymatrix { {\bullet} \ar[r]^f \save[]+<0ex,2.5ex>*{X}\restore & {\bullet}\save[]+<0ex,2.5ex>*{Y}\restore} $$ and a morphism $\alpha$ of the category $\operatorname{Hom}(X,Y)$ is called a 2-morphisms of $\mathfrak{C}$, and is represented as $$ \xymatrix { {\bullet} \ar @(ur,ul)[rr]^f_{}="f" \ar @(dr,dl)[rr]_{f'}^{}="fp" \save[]+<0ex,2.5ex>*{X}\restore & &{\bullet} \save[]+<0ex,2.5ex>*{Y}\restore \ar @2^{\alpha} "f";"fp"} $$ Now we will rewrite the axioms of a 2-category using diagrams. \begin{enumerate} \item \textit{(Composition of 1-morphisms)} Given a diagram $$ \xymatrix {{\bullet} \ar[r]^f \save[]+<0ex,2.5ex>*{X}\restore & {\bullet} \ar[r]^g \save[]+<0ex,2.5ex>*{Y}\restore & {\bullet} \save[]+<0ex,2.5ex>*{Z}\restore} \quad\text{there exist}\quad \xymatrix {{\bullet} \ar[r]^{g\circ f} \save[]+<0ex,2.5ex>*{X}\restore & {\bullet}\save[]+<0ex,2.5ex>*{Z}\restore} $$ (this is (iii) applied to objects) and this composition is associative: $(h\circ g) \circ f= h\circ (g\circ f)$ (this is (ii') applied to objects).
\item \textit{(Identity for 1-morphisms)} For each object $X$ there is a 1-morphism $\operatorname{id}_X$ such that $f\circ \operatorname{id}_Y =\operatorname{id}_X \circ f=f$ (this is (i')).
\item \label{three} \textit{(Vertical composition of 2-morphisms)} Given a diagram $$ \xymatrix
{{\bullet} \ar @(ur,ul)[rr]^f_{}="f" \ar [rr]|g^{}="g"_{}="g2" \ar @(dr,dl)[rr]_h^{}="h" \save[]+<0ex,2.5ex>*{X}\restore & &{\bullet} \save[]+<0ex,2.5ex>*{Y}\restore \ar @2^{\alpha} "f";"g" \ar @2^{\beta} "g2";"h"} \quad\text{there exists}\quad \xymatrix { {\bullet} \ar @(ur,ul)[rr]^f_{}="f" \ar @(dr,dl)[rr]_h^{}="g" \save[]+<0ex,2.5ex>*{X}\restore & &{\bullet} \save[]+<0ex,2.5ex>*{Y}\restore \ar @2^{\beta\circ\alpha} "f";"g"} $$ and this composition is associative $(\gamma\circ\beta)\circ\alpha = \gamma\circ(\beta\circ\alpha)$.
\item \textit{(Horizontal composition of 2-morphisms)} Given a diagram $$ \xymatrix { {\bullet} \ar @(ur,ul)[rr]^f_{}="f" \ar @(dr,dl)[rr]_{f'}^{}="fp" \save[]+<0ex,2.5ex>*{X}\restore & &{\bullet} \save[]+<0ex,2.5ex>*{Y}\restore
\ar @(ur,ul)[rr]^{g}_{}="g" \ar @(dr,dl)[rr]_{g'}^{}="gp"
& &{\bullet} \save[]+<0ex,2.5ex>*{Z}\restore \ar @2^{\alpha} "f";"fp" \ar @2^{\beta} "g";"gp"} \quad\text{there exists}\quad \xymatrix { {\bullet} \ar @(ur,ul)[rrr]^{g\circ f}_{}="gf" \ar @(dr,dl)[rrr] _{g'\circ f'}^{}="gpfp" \save[]+<0ex,2.5ex>*{X}\restore & & &{\bullet} \save[]+<0ex,2.5ex>*{Z}\restore \ar @2^{\beta\ast\alpha} "gf";"gpfp"} $$ (this is (iii) applied to morphisms) and it is associative $(\gamma\ast \beta)\ast\alpha=\gamma\ast(\beta\ast\alpha)$ (this is (ii') applied to morphisms).
\item \textit{(Identity for 2-morphisms)} For every 1-morphism $f$ there is a 2-morphism $\operatorname{id}_f$ such that $\alpha\circ\operatorname{id}_g=\operatorname{id}_f\circ\alpha= \alpha$ (this and item \ref{three} are (ii)). We have $\operatorname{id}_g \ast \operatorname{id}_f=\operatorname{id}_{g\circ f}$ (this means that $\mu_{X,Y,Z}$ respects the identity).
\item \textit{(Compatibility between horizontal and vertical composition of 2-morphisms)} Given a diagram $$ \xymatrix
{{\bullet} \ar @(ur,ul)[rr]^f_{}="f" \ar [rr]|{f'}^{}="f1"_{}="f2" \ar @(dr,dl)[rr]_{f''}^{}="fpp" \save[]+<0ex,2.5ex>*{X}\restore & &
{\bullet} \ar @(ur,ul)[rr]^g_{}="g" \ar [rr]|{g'}^{}="g1"_{}="g2" \ar @(dr,dl)[rr]_{g''}^{}="gpp" \save[]+<0ex,2.5ex>*{Y}\restore & &{\bullet} \save[]+<0ex,2.5ex>*{Z}\restore \ar @2^{\alpha} "f";"f1" \ar @2^{\alpha'} "f2";"fpp" \ar @2^{\beta} "g";"g1" \ar @2^{\beta'} "g2";"gpp"} $$ then $(\beta'\circ \beta)\ast(\alpha'\circ \alpha)=(\beta'\ast\alpha') \circ(\beta\ast\alpha)$ (this is (iii) applied to morphisms). \end{enumerate} Two objects $X$ and $Y$ of a 2-category are called equivalent if there exist two 1-morphisms $f:X\to Y$, $g:Y\to X$ and two 2-isomorphisms (invertible 2-morphism) $\alpha:g\circ f \to \operatorname{id}_X$ and $\beta:f\circ g \to \operatorname{id}_Y$.
A commutative diagram of 1-morphisms in a 2-category is a diagram $$ \xymatrix{
& {\bullet} \ar[rd]^g \save[]+<0ex,2.5ex>*{Y}\restore
\ar @2[d]^{\alpha} \\ {\bullet} \ar[ru]^f \ar[rr]_{h}
\save[]-<3ex,0ex>*{X}\restore & & {\bullet} \save[]+<3ex,0ex>*{Z}\restore} $$ such that $\alpha:g\circ f \to h$ is a 2-isomorphisms.
\begin{remark} \textup{ Since 2-functors only respect composition of 1-functors up to a 2-isomorphism (condition 3), sometimes they are called pseudofunctors or lax functors.} \end{remark}
\begin{remark} \textup{ Note that we don't require $g\circ f=h$ to say that the diagram is commutative, but just require that there is a 2-isomorphisms between them. This is the reason why 2-categories are used to describe stacks.} \end{remark}
On the other hand, a diagram of 2-morphisms will be called commutative only if the compositions are actually equal. Now we will define the concept of covariant 2-functor (a contravariant 2-functor is defined in a similar way).
A covariant 2-functor $F$ between two 2-categories $\mathfrak{C}$ and $\mathfrak{C'}$ is a law that for each object $X$ in $\mathfrak{C}$ gives an object $F(X)$ in $\mathfrak{C'}$. For each 1-morphism $f:X\to Y$ in $\mathfrak{C}$ gives a 1-morphism $F(f):F(X)\to F(Y)$ in $\mathfrak{C'}$, and for each 2-morphism $\alpha:f\Rightarrow g$ in $\mathfrak{C}$ gives a 2-morphism $F(\alpha):F(f)\Rightarrow F(g)$ in $\mathfrak{C'}$, such that \begin{enumerate} \item \textit{(Respects identity 1-morphism)} $F(\operatorname{id}_X)=\operatorname{id}_{F(X)}$.
\item \textit{(Respects identity 2-morphism)} $F(\operatorname{id}_f)=\operatorname{id}_{F(f)}$.
\item \label{twoisom} \textit{(Respects composition of 1-morphism up to a 2-isomorphism)} For every diagram $$ \xymatrix {{\bullet} \ar[r]^f \save[]+<0ex,2.5ex>*{X}\restore & {\bullet} \ar[r]^g \save[]+<0ex,2.5ex>*{Y}\restore & {\bullet} \save[]+<0ex,2.5ex>*{Z}\restore} $$ there exists a 2-isomorphism $\epsilon_{g,f}:F(g)\circ F(f) \to F(g\circ f)$ $$ \xymatrix{
& {\bullet} \ar[rd]^{F(g)} \save[]+<0ex,2.5ex>*{F(Y)}\restore
\ar @2[d]^{\epsilon_{g,f}} \\ {\bullet} \ar[ru]^{F(f)} \ar[rr]_{F(g\circ f)}
\save[]-<3ex,0ex>*{F(X)}\restore & & {\bullet} \save[]+<3ex,0ex>*{F(Z)}\restore} $$ \begin{enumerate} \item $\epsilon_{f,\operatorname{id}_X}=\epsilon_{\operatorname{id}_Y,f}=\operatorname{id}_{F(f)}$
\item $\epsilon$ \textit{ is associative}. The following diagram is commutative $$ \xymatrix {F(h)\circ F(g)\circ F(f) \ar@2[rr]^{\epsilon_{h,g} \times \operatorname{id}}
\ar@2[d]_{\operatorname{id} \times \epsilon_{g,f}} & & F(h\circ g)\circ F(f) \ar@2[d]^{\epsilon_{h\circ g,f}} \\ F(h)\circ F(g\circ f) \ar@2[rr]^{\epsilon_{h,g\circ f}} & & F(h\circ g\circ f)} $$ \end{enumerate}
\item \textit{(Respects vertical composition of 2-morphisms)} For every pair of 2-morphisms $\alpha:f \to f'$, $\beta:g \to g'$, we have $F(\beta\circ \alpha)=F(\beta)\circ F(\alpha)$.
\item \label{last} \textit{(Respects horizontal composition of 2-morphisms)} For every pair of 2-morphisms $\alpha:f \to f'$, $\beta:g \to g'$, the following diagram commutes $$ \xymatrix {F(g)\circ F(f) \ar@2[rr]^{F(\beta)\ast F(\alpha)}
\ar@2[d]_{\epsilon_{g,f}} & & F(g')\circ F(f') \ar@2[d]^{\epsilon_{g',f'}} \\ F(g\circ f) \ar@2[rr]^{F(\beta\ast\alpha)} & & F(g'\circ f')} $$ \end{enumerate} By a slight abuse of language, condition \ref{last} is usually written as $F(\beta)\ast F(\alpha)=F(\beta\ast \alpha)$. Note that strictly speaking this equality doesn't make sense, because the sources (and the targets) don't coincide, but if we chose once and for all the 2-isomorphisms $\epsilon$ of condition \ref{twoisom}, then there is a unique way of making sense of this equality.
\begin{remark} \label{B2} \textup{ In the applications to stacks, the isomorphism $\epsilon_{g,f}$ of item \ref{twoisom} is canonically defined, and by abuse of language we will say that $F(g)\circ F(f)= F(g\circ f)$, instead of saying that they are isomorphic.} \end{remark}
Given a 1-category $C$ (a usual category), we can define a 2-category: we just have to make the set $\operatorname{Hom}(X,Y)$ into a category, and we do this just by defining the unit morphisms for each element.
On the other hand, given a 2-category $\mathfrak{C}$ there are two ways of defining a 1-category. We have to make each category $\operatorname{Hom}(X,Y)$ into a set. The naive way is just to take the set of objects of $\operatorname{Hom}(X,Y)$, and then we obtain what is called the underlying category of $\mathfrak{C}$ (see \cite{Hak}). This has the problem that a 2-functor $F:\mathfrak{C} \to \mathfrak{C'}$ is not in general a functor of the underlying categories (because in item \ref{twoisom} we only require the composition of 1-morphisms to be respected up to 2-isomorphism).
The best way of constructing a 1-category from a 2-category is to define the set of morphisms between the objects $X$ and $Y$ as the set of isomorphism classes of objects of $\operatorname{Hom}(X,Y)$: two objects $f$ and $g$ of $\operatorname{Hom}(X,Y)$ are isomorphic if there exists a 2-isomorphism $\alpha:f \Rightarrow g$ between them. We call the category obtained in this way the 1-category associated to $\mathfrak{C}$. Note that a 2-functor between 2-categories then becomes a functor between the associated 1-categories.
\textbf{Acknowledgments.} This article is based on a series of lectures that I gave in February 1999 in the Geometric Langlands programme seminar of the Tata Institute of Fundamental Research. First of all, I would like to thank N. Nitsure for proposing me to give these lectures. Most of my understanding on stacks comes from conversations with N. Nitsure and C. Sorger.
I would also like to thank T.R. Ramadas for encouraging me to write these notes, and the participants in the seminar in TIFR for their active participation, interest, questions and comments. In ICTP (Trieste) I gave two informal talks in August 1999 on this subject, and the comments of the participants, specially L. Brambila-Paz and Y.I. Holla, helped to remove mistakes and improve the original notes.
This work was supported by a postdoctoral fellowship of Ministerio de Educaci\'on y Cultura (Spain).
\end{document} |
\begin{document}
\title{Long-distance distribution of genuine energy-time entanglement} \date{\today}
\author{A.~Cuevas}\thanks{These authors contributed equally to this work} \author{G.~Carvacho}\thanks{These authors contributed equally to this work} \affiliation{Departamento de F\'isica, Universidad de Concepci\'on, 160-C Concepci\'on, Chile} \affiliation{Center for Optics and Photonics, Universidad de Concepci\'on, Concepci\'on, Chile} \affiliation{MSI-Nucleus for Advanced Optics, Universidad de Concepci\'on, Concepci\'on, Chile}
\author{G.~Saavedra}\thanks{These authors contributed equally to this work} \affiliation{Center for Optics and Photonics, Universidad de Concepci\'on, Concepci\'on, Chile} \affiliation{MSI-Nucleus for Advanced Optics, Universidad de Concepci\'on, Concepci\'on, Chile} \affiliation{Departamento de Ingenier\'ia El\'ectrica, Universidad de Concepci\'on, 160-C Concepci\'on, Chile}
\author{J.~Cari\~ne} \affiliation{Center for Optics and Photonics, Universidad de Concepci\'on, Concepci\'on, Chile} \affiliation{Departamento de Ingenier\'ia El\'ectrica, Universidad de Concepci\'on, 160-C Concepci\'on, Chile}
\author{W.~A.~T.~Nogueira} \affiliation{Departamento de F\'isica, Universidad de Concepci\'on, 160-C Concepci\'on, Chile} \affiliation{Center for Optics and Photonics, Universidad de Concepci\'on, Concepci\'on, Chile} \affiliation{MSI-Nucleus for Advanced Optics, Universidad de Concepci\'on, Concepci\'on, Chile}
\author{M.~Figueroa} \affiliation{Center for Optics and Photonics, Universidad de Concepci\'on, Concepci\'on, Chile} \affiliation{Departamento de Ingenier\'ia El\'ectrica, Universidad de Concepci\'on, 160-C Concepci\'on, Chile}
\author{A.~Cabello} \affiliation{Departamento de F\`isica Aplicada II, Universidad de Sevilla, E-41012, Sevilla, Spain.}
\author{P.~Mataloni} \affiliation{Dipartimento de Fisica, Sapienza Universit\`a di Roma, Piazzale Aldo Moro 5, Roma I-00185, Italy.} \affiliation{Istituto Nazionale di Ottica (INO-CNR), Largo E. Fermi 6, I-50125 Firenze, Italy}
\author{G.~Lima} \affiliation{Departamento de F\'isica, Universidad de Concepci\'on, 160-C Concepci\'on, Chile} \affiliation{Center for Optics and Photonics, Universidad de Concepci\'on, Concepci\'on, Chile} \affiliation{MSI-Nucleus for Advanced Optics, Universidad de Concepci\'on, Concepci\'on, Chile}
\author{G.~B.~Xavier} \email{gxavier@udec.cl} \affiliation{Center for Optics and Photonics, Universidad de Concepci\'on, Concepci\'on, Chile} \affiliation{MSI-Nucleus for Advanced Optics, Universidad de Concepci\'on, Concepci\'on, Chile} \affiliation{Departamento de Ingenier\'ia El\'ectrica, Universidad de Concepci\'on, 160-C Concepci\'on, Chile}
\begin{abstract} Any practical realization of entanglement-based quantum communication must be intrinsically secure and able to span long distances avoiding the need of a straight line between the communicating parties. The violation of Bell's inequality offers a method for the certification of quantum links without knowing the inner workings of the devices. Energy-time entanglement quantum communication satisfies all these requirements. However, currently there is a fundamental obstacle with the standard configuration adopted: an intrinsic geometrical loophole that can be exploited to break the security of the communication, in addition to other loopholes. Here we show the first experimental Bell violation with energy-time entanglement distributed over 1 km of optical fibers that is free of this geometrical loophole. This is achieved by adopting a new experimental design, and by using an actively stabilized fiber-based long interferometer. Our results represent an important step towards long-distance secure quantum communication in optical fibers. \end{abstract}
\maketitle
A fundamental application of quantum mechanics is in the development of communication systems with intrinsic unbreakable security \cite{GisinRMP}. Although quantum key distribution (QKD) is theoretically unconditionally secure, it has been experimentally demonstrated that QKD with realistic devices is prone to hacking \cite{Lydersen_2010}. A definitive solution to these practical attacks depends on the experimental violation of a Bell inequality, allowing communication with security guaranteed by the impossibility of signaling at superluminal speeds \cite{Bell,Ekert91,Kent05,Acin07}. The security can only be guaranteed if a loophole-free Bell test is performed, a major experimental task that still has not been demonstrated. With single-photons, the locality and the detection loopholes have been individually closed in separate experiments \cite{Zeilinger98, Zeilinger13, Kwiat13}. Recent experimental demonstrations of Einstein-Podolsky-Rosen (EPR) steering free of the detection loophole, also constitute an important step towards secure communications \cite{Smith2012, Bennet2012, Wittmann2012}. Progress has also been made regarding more subtle issues, such as the coincidence-time loophole \cite{Larsson2004,Kwiat13,ZeilingerReply,Larsson2013}.
Secure communication requires distributing quantum entanglement over long distances. The most common method to do it in optical fibers is based on Franson's configuration \cite{Franson89}. In the Franson scheme, each of two simultaneously emitted photons is injected into an unbalanced interferometer designed such that the uncertainty in the time of emission makes indistinguishable the two alternative paths each photon can take. Many experiments have been performed using this configuration \cite{Rarity_4km_1994, Gisin_10km_1998, Brendel99, Gisin_QKD_2000, Gisin_50km_2004, Salart2008, Salart2008b}, due to energy-time's innate robustness to decoherence in optical fibers. The problem of this method is that Franson's configuration has an intrinsic geometrical loophole and, therefore, cannot rule out all possible local explanations for the apparent violation of the Bell Clauser-Horne-Shimony-Holt (CHSH) inequality \cite{CHSH69, Aerts_PRL_1999, Cabello_genuine_2009}. Current experiments based on Franson's scheme only rule out local models making additional assumptions \cite{Franson99, Franson09, Larsson10}. On the practical side, this loophole can be exploited by eavesdroppers to break the security of the communication (see, e.g., the Trojan horse attack to Franson-based quantum cryptography in Ref. \cite{Larsson02}). Some solutions to this problem have been proposed. One consists of keeping Franson's configuration but replacing passive by active switchers \cite{Brendel99}. However, this solution has never been implemented in an experiment. Another solution consists of replacing Franson's interferometers by unbalanced cross-linked interferometers wrapped around the photon-pair source in a ``hug'' configuration introduced in \cite{Cabello_genuine_2009}. It has been recently demonstrated in table-top experiments \cite{Glima_genuine_2010,Vallone_genuine_2011}. Going to larger distances was thought to be unfeasible unless costly stabilization systems used for large gravitational wave detectors were applied \cite{Cabello_genuine_2009}.
Here we report the first experimental violation of the Bell CHSH inequality with genuine energy-time entanglement (i.e., free of the geometrical loophole) distributed through more than 1 km of optical fibers. The energy-time entangled photon-pair source is placed close to Alice, with one of the photons propagating through a short bulk optics unbalanced Mach-Zenhder (UMZ) interferometer. The other photon is transmitted through the second UMZ interferometer of the hug configuration, which is composed of 1-km long fiber optical arms. We show that a Bell violation can be obtained with a home-made active phase stabilization system, demonstrating that the hug configuration is indeed practical for long-distance entanglement distribution. The results presented here have major implications for secure quantum communication between parties that are not in the same straight line, opening up a path towards a loophole-free Bell test with energy-time entanglement. We stress that even if the detection and locality loopholes are simultaneously closed in a single experiment, energy-time entanglement will still remain unusable as a resource for device-independent quantum communication if the geometrical loophole is not addressed.
\begin{figure*}\label{fig1}
\end{figure*}
\section*{Results} \subsubsection*{Experimental setup}
The extensively employed Franson configuration is shown schematically in Fig. 1(a). It consists of two UMZ interferometers with the two arms in each one defined as the short (S) and long (L) paths respectively. In order to avoid single-photon interference, the long-short path difference is made to be much larger than the single-photon coherence length. The issue lies with the fact that the coincident detection events are post-selected for the verification of the Bell violation, and since this post-selection is local, it gives rise to hidden variable models \cite{Aerts_PRL_1999, Cabello_genuine_2009}. In the hug configuration, shown in Fig. 1(b), the unbalanced interferometers are formed shortly after each output mode of the source using beamsplitters, with the difference that the arms are crossed around the source. This geometrical property assures that the short-long or long-short events can only be routed to either Alice or Bob detectors, effectively removing the need of local post-selections.
The experimental setup used is depicted in Fig. 1(c). Degenerate 806 nm photon pairs with orthogonal polarizations are produced from spontaneous parametric down-conversion in a bulk non-linear periodically poled potassium titanyl phosphate (KTP) 20 mm long crystal. The crystal is pumped by a single-longitudinal mode continuous wave (CW) 403 nm laser with a coherence length greater than 20 m, and with 2.2 mW of optical power. A long coherence pump laser is necessary to create a fundamental uncertainty in the emission time of the down-converted photons, and thus set up as indistinguishable, the possible paths they can take in both (equally) unbalanced interferometers \cite{Cabello_genuine_2009}. When this condition is satisfied, the Bell state $\ket{\Phi^+}=\frac{1}{\sqrt{2}}(\ket{SS}+\ket{LL})$ is generated, where $S$ and $L$ indicate the short and long arms, respectively. The pump laser is weakly focused in the crystal in order to obtain a high collection efficiency in the optical fibers \cite{Ljunggren_PRA_fibers}. The emitted photons are deterministically split by a polarizing beam splitter (PBS), with each output mode sent to a beamsplitter (BS). The two cross-linked interferometers required in the hug configuration are built on these four modes connecting the source to Alice and Bob, $S_{\text{i}}$ and $L_{\text{i}}$, with $i=\text{A,B}$. The two modes $S_\text{{A}}$ and $L_\text{{A}}$ form a short-distance UMZ interferometer. Mode $S_\text{{A}}$ is reflected on a piezo-electric mounted mirror responsible for generating the phase shift $\phi_{\text{a}}$ employed by Alice. The final beamsplitter is a single-mode fiber-based component to optimize the overlap of the transverse spatial modes. The long-short path length difference is 2 m. This was chosen such that the time difference is larger than the coincidence temporal window (4 ns). The length of the short arm is approximately 1 m.
The other two modes, $S_{\text{B}}$ and $L_{\text{B}}$ connect the source to Bob through a long UMZ fiber-based interferometer. Its arms are built using 1-km long telecom spooled single-mode fibers. One spool at each arm. The use of telecom fibers is to demonstrate that the distribution of genuine energy-time entanglement is compatible with the already installed optical-based world-wide network. Since multi-mode propagation occurs in the telecom fibers at 806 nm, the arms are connected to a 780 nm single-mode fiber-optical beamsplitter at Bob's station, performing transverse spatial mode filtering \cite{Jennewein_APL_2010}. This interferometer presents the same 2 m difference between the long and short arms, as in Alice's case. For the active phase stabilization a piezo-electric fiber stretcher (FS) with 40 m of wound fiber is used in the $S_B$ arm of Bob's interferometer. Therefore an equal amount of fiber length must be added to the $L_B$ arm for balancing purposes. Thus, the total arm length in this interferometer is 1.04 km. With the exception of the 1 km fiber spools, all other fiber-optic components are single-mode at 780 nm and above. The polarization is adjusted in both arms of the long interferometer with polarization controllers to avoid which-way path information.
\begin{figure}
\caption{Net coincident counts versus the delay line position. A two-photon interference pattern is observed in case (a) with the stabilization system active, while (b) shows a second measurement over the same range of the delay line with stabilization turned off. The error bars shows the standard deviation assuming a poissonian distribution for the photon statistics.}
\label{fig2}
\end{figure}
\subsubsection*{Active phase stabilization}
A major experimental challenge in this implementation is the compensation of random phase fluctuations caused by the environment in long interferometers. This problem is successfully solved here for the first time by adapting to the hug configuration some ideas developed for different purposes \cite{guix_OL}, providing an affordable alternative to the costly stabilization systems suggested in Ref. \cite{Cabello_genuine_2009}. The solution is achieved by injecting a second laser into the system to provide real-time feedback information for the field programmable gate array (FPGA) based control electronics. As mentioned above, a piezo-electric FS composed of 40 m of wound fiber is used for the active phase stabilization of the long fiber-based interferometer. This device allows for a fiber expansion of up to 5 mm at the maximum driving voltage. This is essential as it allows several wavelengths of phase drift to be compensated in a long interferometer. The FS is driven by the control electronics in response to the environmentally-induced phase drifts imposed on the feedback optical signal when propagating in the interferometer. The control system enables phase-locking of the interferometer by using a custom designed closed-loop proportional-integral-derivative (PID) control algorithm. Additional details are provided in Methods section. A bulk-optics delay line, with a movement range of 150 mm, is used to set $L_{\text{A}}-S_{\text{A}} = L_{\text{B}}-S_{\text{B}}$ (see also Methods for additional details). To demonstrate the importance of the active stabilization in the experiment, we show in Fig. 2(a) the net coincidence counts/s between detectors $D_{\text{A1}}$ and $D_{\text{B1}}$ as a function of the delay line position around the indistinguishability point. The stabilization system was kept active maintaining $\phi_{\text{b}}$ constant while the delay line was moving. In Alice's side, the piezo-mounted mirror was driven during the measurement with a slowly varying voltage ramp, therefore modulating $\phi_{\text{a}}$. This is to ensure that two-photon interference fringes are observed in the indistinguishability region. The two-photon interference pattern in Fig. 2(a) is clearly observed. The crucial point is that two-photon interference can not be observed without active phase stabilization due to the rapidly and randomly varying phase drift in a long interferometer. This is shown in Fig. 2(b), where the scan in the same delay is performed with the control system turned off and all other settings kept the same as in Fig. 2(a).
\begin{figure}\label{fig3}
\end{figure}
\subsubsection*{Long-distance Bell inequality violation}
The violation of the Bell CHSH inequality was performed with the delay line set at the center point of the two-photon interference pattern, the active phase control is kept active and the piezo-mounted mirror once again slowly modulated (this time with a $\sim$ 30 s period). The Bell CHSH inequality is defined through the expression $S = E(\phi_{\text{a}},\phi_{\text{b}}) + E(\phi^{\prime}_{\text{a}},\phi_{\text{b}}) + E(\phi_{\text{a}},\phi^{\prime}_{\text{b}}) - E(\phi^{\prime}_{\text{a}},\phi^{\prime}_{\text{b}})$, where $E(\phi_{\text{a}}, \phi_{\text{b}}) = P_{11}(\phi_{\text{a}}, \phi_{\text{b}}) + P_{22}(\phi_{\text{a}}, \phi_{\text{b}}) - P_{12}(\phi_{\text{a}}, \phi_{\text{b}}) - P_{21}(\phi_{\text{a}}, \phi_{\text{b}})$, with $P_{\text{ij}}(\phi_{\text{a}}, \phi_{\text{b}})$ corresponding to the probability of a coincident detection at Alice and Bob's detectors $i$ and $j$ respectively, while the relative phases $\phi_{\text{a}}$ and $\phi_{\text{b}}$ are applied to Alice and Bob's interferometers. For the maximum violation of the Bell CHSH inequality the phase settings are $\phi_{\text{a}} = \pi/4$, $\phi_{\text{b}} = 0$, $\phi_{\text{a}}^{\prime} = -\pi/4$ and $\phi_{\text{b}}^{\prime} = \pi/2$ \cite{CHSH69, Glima_genuine_2010}. The phase control system is used to switch between Bob's two phase settings, 0 and $\pi/2$. The eight measured curves across all output combinations are displayed in Fig. 3(a)-(d). The measured average raw visibilitiy is (84.36 $\pm$ 0.47)\%. When the accidental coincidences are subtracted, the net visibility rises to (95.12 $\pm$ 0.20)\%. The recorded values of the probability correlation functions ($E$) for the case with no accidental subtraction are shown in Fig. 3(e). The corresponding violation of the Bell CHSH inequality in this case yields $S = 2.39 \pm 0.12$, surpassing the classical limit by 3.25 standard deviations. Our experimental results are comparable to previous long-distance Bell experiments using energy-time entanglement based on the Franson configuration \cite{Gisin_10km_1998}.
\section*{Discussion}
Future quantum communication systems must work over long distances, avoid the need of a straight line between the communicating parties and fit within existing communication infrastructures. In addition, security must be based on physical principles rather than on unproven assumptions. For these reasons, energy-time entanglement-based quantum communication is, in principle, the ideal solution. However, the fact that standard setups for creating energy-time entanglement are intrinsically insecure even assuming perfect detection efficiency constitutes a fundamental hurdle. Here we have demonstrated for the first time that genuine energy-time entanglement can be distributed over long distances, thus removing one fundamental obstacle for practical and secure quantum communication with optical fibers. Note that even though polarization entanglement has also been distributed through optical fibers \cite{Sauge2007, Hubel2007}, energy-time entanglement has the advantage of having an innate robustness to decoherence in optical fibers.
In commercial networks, remote nodes are connected with optical cables composed of many optical fibers. For an experiment, based on the hug configuration and with spatial separation between Alice and Bob, two fibers of the same cable can be used. In this case, as the phase drift acts almost equally on both fibers, it is very likely that the scheme adopted here for the stabilization of the fiber-based long interferometer can also be used. Further investigations are required in this direction. Nevertheless, our work is a first step towards practical long distance secure quantum communication based on energy-time entanglement, as it fixes the previous security issues with all other demonstrations using Franson's configuration.
\subsection*{Methods}
\subsubsection*{Stabilization system}
A long-coherence single-longitudinal mode CW laser operating at 852 nm is used as a feedback optical signal. The feedback optical signal is combined with the pump beam on a dichroic mirror before the crystal. The FS is a commercial off-the-shelf device consisting of a 40 m long optical fiber spooled around a piezo-electric element. The feedback optical signal is detected by an amplified p-i-n photodetector in one of the outputs of the long interferometer, after being split by a dichroic mirror. In order to avoid unwanted noise generated from the control laser, three extra optical filters are employed: one bandpass 850 nm optical filter (10 nm of full width at half-maximum -- FWHM), placed at the output of the 852 nm laser, and two bandpass 806 nm filters in series (5 nm of FWHM) are inserted before each single-photon detector. For the sake of clarity these are not shown in Fig. 1(c).
The phase setting $\phi_{\text{b}}$ applied by Bob was controlled using the set-point in the FPGA electronics. It reads the optical intensity of the feedback signal, which gives information on the current relative phase between the interferometer arms. Furthermore it calculates the derivative of the signal, to remove ambiguity arising from the phase information. With both the intensity of the signal and the derivative, the control is able to fix any constant relative phase difference between the interferometer arms. The total bandwidth of the control system including the optical components (stretcher and detector) is of approximately 5 kHz.
\subsubsection*{Indistinguishability between the two-photon paths}
To generate the indistinguishability between the $\ket{SS}$ and $\ket{LL}$ paths, a bulk-optics delay line, with a movement range of 150 mm, is used. Since it is experimentally challenging to properly balance two long paths within the two-photon coherence length ($\approx 1$ mm in our case), a previous adjustment of the 1.04 km arms was performed in an external interferometer with a Fabry-Perot (FP) laser source for a course adjustment. Then a broadband light source (a light emitting diode) was used replacing the FP laser to set the arm lengths to be equal within less than 1 mm. The arms were then installed in the setup, and the extra 2 m of optical fiber added in the long arm.
\begin{references}
\bibitem{GisinRMP} Gisin, N., Ribordy, G., Tittel, W. \& Zbinden, H. Quantum cryptography. \textit{Rev. Mod. Phys.} \textbf{74}, 145--195 (2002).
\bibitem{Lydersen_2010} Lydersen, L., Wiechers, C., Wittmann, C., Elser, D., Skaar, J. \& Makarov, V. Hacking commercial quantum cryptography systems by tailored bright illumination. \textit{Nat. Photon.} \textbf{4}, 686--689 (2010).
\bibitem{Bell} Bell, J. S. Speakable and Unspeakable in Quantum Mechanics: Collected Papers on Quantum Philosophy (Cambridge Univ. Press, 2004).
\bibitem{Ekert91}
Ekert, A. K.
Quantum cryptography based on Bell's theorem.
\textit{Phys. Rev. Lett.} \textbf{67}, 661--663 (1991).
\bibitem{Kent05}
Barrett, J., Hardy, L. \& Kent, A.
No signaling and quantum key distribution.
\textit{Phys. Rev. Lett.} \textbf{95}, 010503 (2005).
\bibitem{Acin07}
Ac\'{\i}n, A., Brunner, N., Gisin, N., Massar, S., Pironio, S. \& Scarani, V.
Device-independent security of quantum cryptography against collective attacks.
\textit{Phys. Rev. Lett.} \textbf{98}, 230501 (2007).
\bibitem{Zeilinger98}
Weihs, G., Jennewein T., Simon, C., Weinfurter, H. \& Zeilinger, A. Violation of Bell's inequality under strict Einstein locality conditions. \textit{Phys. Rev. Lett.} \textbf{81}, 5039 (1998).
\bibitem{Zeilinger13}
Giustina, M., \textit{et al}. Bell violation using entangled photons without the fair sampling assumption. \textit{Nature} \textbf{497}, 227--230 (2013).
\bibitem{Kwiat13}
Christensen, B. G., \textit{et al}. Detection-loophole-free test of quantum nonlocality and applications, \textit{Phys. Rev. Lett.} \textbf{111}, 130406 (2013).
\bibitem{Smith2012}
Smith, D. H., \textit{et al}. Conclusive quantum steering with superconducting transition edge sensors. \textit{Nat. Commun.} \textbf{3}, 625 (2012).
\bibitem{Bennet2012}
Bennet, A. J., \textit{et al}. Arbitrarily loss-tolerant Einstein-Podolsky-Rosen steering allowing a demonstration over 1 km of optical fiber with no detection loophole. \textit{Phys. Rev. X} \textbf{2}, 031003 (2012).
\bibitem{Wittmann2012}
Wittmann, B., \textit{et al}. Loophole-free Einstein-Podolsky-Rosen experiment via quantum steering. \textit{New J. Phys.} \textbf{14} 053030 (2012).
\bibitem{Larsson2004}
Larsson, J.-\AA{}. \& Gill R. D. Bell's inequality and the time-coincidence loophole. Bell's inequality and the coincidence-time loophole \textit{Europhys. Lett.} \textbf{67}, 707 (2004).
\bibitem{ZeilingerReply} Kofler, J., Ramelow, S., Giustina, M. \& Zeilinger, A. On Bell violation using entangled photons without the fair-sampling assumption. Preprint at http://arxiv.org/abs/1307.6475 (2013).
\bibitem{Larsson2013} J.-\AA{}., \textit{et al}. Bell violation with entangled photons, free of the coincidence-time loophole. Preprint at http://arxiv.org/abs/1309.0712 (2013).
\bibitem{Franson89}
Franson, J. D.
Bell inequality for position and time.
\textit{Phys. Rev. Lett.} \textbf{62}, 2205--2208 (1989).
\bibitem{Rarity_4km_1994}
Tapster, P. R., Rarity, J. G. \& Owens, P. C. M.
Violation of Bell's inequality over 4 km of optical fiber.
\textit{Phys. Rev. Lett.} \textbf{73}, 1923--1926 (2004).
\bibitem{Gisin_10km_1998}
Tittel, W., Brendel, J., Zbinden, H. \& Gisin, N.
Violation of Bell inequalities by photons more than 10 km apart.
\textit{Phys. Rev. Lett.} \textbf{81}, 3563--3566 (1998).
\bibitem{Brendel99}
Brendel, J., Gisin, N., Tittel, W. \& Zbinden H.
Pulsed energy-time entangled twin-photon source for quantum communication.
\textit{Phys. Rev. Lett.} \textbf{82}, 2594--2597 (1999).
\bibitem{Gisin_QKD_2000}
Tittel, W., Brendel, J., Zbinden, H. \& Gisin, N.
Quantum Cryptography Using Entangled Photons in Energy-Time Bell States.
\textit{Phys. Rev. Lett.} \textbf{84}, 4737--4740 (2000).
\bibitem{Gisin_50km_2004}
Marcikic, I., de Riedmatten, H., Tittel, W., Zbinden, H., Legr\'{e}, M. \& Gisin, N.
Distribution of time-bin entangled qubits over 50 km of optical fiber.
\textit{Phys. Rev. Lett.} \textbf{93}, 180502 (2004).
\bibitem{Salart2008}
Salart, D., Baas, A., van Houwelingen, J. A. W., Gisin, N. \& Zbinden, H.
Spacelike separation in a Bell test assuming gravitationally induced collapses.
\textit{Phys. Rev. Lett.} \textbf{100}, 220404 (2008).
\bibitem{Salart2008b}
Salart, D., Baas, A., Branciard, C., Gisin, N. \& Zbinden, H.
Testing the speed of 'spooky action at a distance'
\textit{Nature} \textbf{454}, 861--864 (2008).
\bibitem{Aerts_PRL_1999}
Aerts, S., Kwiat, P., Larsson, J.-\AA{} \& \.{Z}ukowski, M.
Two-photon Franson-type experiments and local realism.
\textit{Phys. Rev. Lett.} \textbf{83}, 2872--2875 (1999).
\bibitem{Cabello_genuine_2009}
Cabello, A., Rossi, A., Vallone, G., de Martini, F. \& Mataloni, P.
Proposed Bell experiment with genuine energy-time entanglement.
\textit{Phys. Rev. Lett.} \textbf{102}, 040401 (2009).
\bibitem{CHSH69}
Clauser, J. F., Horne, M. A., Shimony, A. \& Holt, R. A.
Proposed experiment to test local hidden-variable theories.
\textit{Phys. Rev. Lett.} \textbf{23}, 880--884 (1969).
\bibitem{Franson99}
Franson, J. D.
Inconsistency of local realistic descriptions of two-photon interferometer experiments.
\textit{Phys. Rev. A} \textbf{61}, 012105 (1999).
\bibitem{Franson09}
Franson, J. D.
Nonclassical nature of dispersion cancellation and nonlocal interferometry.
\textit{Phys. Rev. A} \textbf{80}, 032119 (2009).
\bibitem{Larsson10}
Larsson, J.-\AA{}.
Energy-time entanglement, elements of reality, and local realism, in
\textit{ Quantum Theory: Reconsiderations of Foundations - 5}
(American Institute of Physics Proceeedings Vol. 1232, 115--127 (2010).
\bibitem{Larsson02}
Larsson, J.-\AA{}.
A practical Trojan horse for Bell-inequality-based quantum cryptography.
Quant. Inf. Comp. \textbf{2}, 434--442 (2002).
\bibitem{Glima_genuine_2010}
Lima, G., Vallone, G., Chiuri, A., Cabello, A. \& Mataloni, P.
Experimental Bell-inequality violation without the postselection loophole.
\textit{Phys. Rev. A} \textbf{81}, 040401(R) (2010).
\bibitem{Vallone_genuine_2011} Vallone, G., Gianani, I., Inostroza, E. B., Saavedra, C., Lima, G., Cabello, A. \& Mataloni, P. Testing Hardy's nonlocality proof with genuine energy-time entanglement. \textit{Phys. Rev. A} \textbf{83}, 042105 (2011).
\bibitem{Ljunggren_PRA_fibers} Ljunggren, D. \& Tengner, M., Optimal focusing for maximal collection of entangled narrow-band photon pairs into single-mode Þbers. \textit{Phys. Rev. A} \textbf{72}, 062301 (2005).
\bibitem{Jennewein_APL_2010} Meyer-Scott, E., H\"{u}bel, H., Fedrizzi, A., Erven, C., Weihs, G. \& Jennewein, T. Quantum entanglement distribution with 810 nm photons through telecom fibers. \textit{Appl. Phys. Lett.} \textbf{97}, 031117 (2010).
\bibitem{guix_OL} Xavier, G. B. \& von der Weid, J. P. Stable single-photon interference in a 1 km fiber-optic Mach-Zehnder interferometer with continuous phase adjustment. \textit{Opt. Lett.} \textbf{36}, 1764--1766 (2011).
\bibitem{Sauge2007}
Sauge S., \textit{et al}. Narrowband polarization-entangled photon pairs distributed over a WDM link for qubit networks. \textit{Opt. Express} \textbf{15}, 6926--6933 (2007).
\bibitem{Hubel2007}
H\"{ubel} H., \textit{et al}. High-fidelity transmission of polarization encoded qubits from an entangled source over 100 km of fiber. \textit{Opt. Express} \textbf{15}, 7853--7862 (2007).
\end{references}
\subsection*{Acknowledgments}
The authors thank M. Barbieri for valuable discussions. This work was supported by the grants FONDECYT 11110115 and 1120067, CONICYT PFB08-024 and Milenio P10-030-F. A. Cuevas, G. C. and J. C. acknowledge the financial support of CONICYT, while M. F. acknowledges support of FONDECYT 1121010. A. Cabello was also supported by Project No.\ FIS2011-29400 (MINECO, Spain).
\subsection*{Authors' Contributions}
A. Cuevas, G. C., G. S. and J. C., with assistance from W. N., M. F., P. M., G. L. and G. X., performed the experiment and analyzed the data. A. Cabello, P. M., G. L. and G. X. conceived and designed the experiment and wrote the paper. All authors agree to the contents of the paper.
\subsection*{Additional information}
Correspondence and requests for materials should be addressed to G. X. The authors declare no competing financial interests.
\end{document} |
\begin{document}
\title{
Additive Kernels for Gaussian Process Modeling
}
\begin{center} \Large N. Durrande\footnotemark[1]\footnotemark[3], D. Ginsbourger\footnotemark[2], O. Roustant \footnotemark[1]\\ January 12, 2010 \end{center}
\footnotetext[1]{CROCUS - Ecole Nationale Sup\'erieure des Mines de St-Etienne
\\ \phantom{ha ha} 29 rue Ponchardier - 42023 St Etienne, France} \footnotetext[2]{Institute of Mathematical Statistics and Actuarial Science, University of Berne,
\\ \phantom{ha ha} Alpeneggstrasse 22 - 3012 Bern, Switzerland,} \footnotetext[3]{Corresponding author: \textit{durrande@emse.fr}}
\begin{abstract} Gaussian Process (GP) models are often used as mathematical approximations of computationally expensive experiments. Provided that its kernel is suitably chosen and that enough data is available to obtain a reasonable fit of the simulator, a GP model can beneficially be used for tasks such as prediction, optimization, or Monte-Carlo-based quantification of uncertainty. However, the former conditions become unrealistic when using classical GPs as the dimension of input increases. One popular alternative is then to turn to Generalized Additive Models (GAMs), relying on the assumption that the simulator's response can approximately be decomposed as a sum of univariate functions. If such an approach has been successfully applied in approximation, it is nevertheless not completely compatible with the GP framework and its versatile applications. The ambition of the present work is to give an insight into the use of GPs for additive models by integrating additivity within the kernel, and proposing a parsimonious numerical method for data-driven parameter estimation.
The first part of this article deals with the kernels naturally associated to additive processes and the properties of the GP models based on such kernels.
The second part is dedicated to a numerical procedure based on relaxation for additive kernel parameter estimation.
Finally, the efficiency of the proposed method is illustrated and compared to other approaches on Sobol's g-function. \end{abstract}
\paragraph{keyword} Kriging, Computer Experiment, Additive Models, GAM, Maximum Likelihood Estimation, Relaxed Optimization, Sensitivity Analysis
\title{
Additive Kernels for Gaussian Process Modeling
}
\section{Introduction}
The study of numerical simulators often deals with calculation intensive computer codes. This cost implies that the number of evaluations of the numerical simulator is limited and thus many methods such as uncertainty propagation, sensitivity analysis, or global optimization are unaffordable. A well known approach to circumvent time limitations is to replace the numerical simulator by a mathematical approximation called metamodel (or response surface or surrogate model) based on the responses of the simulator for a limited number of inputs called the Design of Experiments (DoE). There is a large number of metamodels types and among the most popular we can cite regression, splines, neural networks... In this article, we focus on a particular type of metamodel: the Kriging method, more recently referred to as Gaussian Process modeling \cite{Rasmussen2006}. Originally presented in spatial statistics \cite{Cressie1993} as an optimal Linear Unbiased Predictor (LUP) of random processes, Kriging has become very popular in machine learning, where its interpretation is usually restricted to the convenient framework of Gaussian Processes (GP). Beyond the LUP ---which then elegantly coincides with a conditional expectation---, the latter GP interpretation allows indeed the explicit derivation of conditional probability distributions for the response values at any point or set of points in the input space.
The classical Kriging method faces two issues when the number of dimensions $d$ of the input space $D\subset \mathbb{R}^{d}$ becomes high. Since this method is based on neighborhoods, it requires an increasing number of points in the DoE to cover the domain $D$. The second issue is that the number of anisotropic kernel parameters to be estimated increases with $d$ so that the estimation becomes particularly difficult for high dimensional input spaces \cite{K-T.Fang2006,OHagan2006}. An approach to get around the first issue is to consider specific features lowering complexity such as the family of Additive Models. In this case, the response can approximately be decomposed as a sum of univariate functions: \begin{equation} f(\mathbf{x}) = \mu + \sum_{i=1}^{d}{f_i(x_i)}, \label{eq:AM} \end{equation} where $\mu \in \mathbb{R}$ and the $f_i$'s may be non-linear. Since their introduction by Stones in 1985 \cite{stone1985additive}, many methods have been proposed for the estimation of additive models. We can cite the method of marginal integration \cite{newey1994kernel} and a very popular method described by Hastie and Tibshirani in \cite{Buja1989,Hastie1990}: the GAM backfitting algorithm. However, those methods do not consider the probabilistic framework of GP modeling and do not usually provide additional information such as the prediction variance. Combining the high-dimensional advantages of GAMs with the versatility of GPs is the main goal of the present work. For the study functions that contain an additive part plus a limited number of interactions, an extension of the present work can be found in the recent paper of T. Muehlenstaedt~\cite{Muhl}.
The first part of this paper focuses on the case of additive Gaussian Processes, their associated kernels and the properties of associated additive kriging models. The second part deals with a Relaxed Likelihood Maximization (RLM) procedure for the estimation of kernel parameters for Additive Kriging models. Finally, the proposed algorithm is compared with existing methods on a well known test function: the Sobol's g-function \cite{Saltelli2000}. It is shown within the latter example that Additive Kriging with RLM outperforms standard Kriging and produce similar performances as GAM. Due to its approximation performance and its built-in probabilistic framework both demonstrated later in this article, the proposed Additive Kriging model appears as a serious and promising challenger among additive models.
\section{Towards Additive Kriging}
\subsection{Additive random processes}
Lets first introduce the mathematical construction of an additive GP. A function $f:D \subset \mathbb{R}^d \longrightarrow \mathbb{R}$ is additive when it can be written $f(\mathbf{x}) = \sum_{i=1}^{d}{f_i(x_i)}$, where $x_i$ is the $i$-th component of the $d$-dimensional input vector $\mathbf{x}$ and the $f_i$'s are arbitrary univariate functions.
Let us first consider two independent real-valued first order stationary processes $Z_1$ and $Z_2$ defined over the same probability space $(\Omega, \mathcal{F},P)$ and indexed by $\mathbb{R}$, so that their trajectories $Z_i(.;\omega): x \in \mathbb{R} \longrightarrow Z_i(x;\omega)$ are univariate real-valued functions.
Let $K_i:\mathbb{R}\times\mathbb{R} \longrightarrow \mathbb{R}$ be their respective covariance kernels and $\mu_1, \mu_2 \in \mathbb{R}$ their means.
Then, the process $Z:=Z_{1}+Z_{2}$ defined over $(\Omega, \mathcal{F},P)$ and indexed by $\mathbb{R}^{2}$, and so that \begin{equation} \forall \omega \in \Omega \ \forall \mathbf{x} \in \mathbb{R}^{2} \ Z(\mathbf{x};\omega)=Z_{1}(x_1;\omega)+Z_{2}(x_2;\omega), \label{eq_sum_proc} \end{equation} \noindent has mean $\mu = \mu_1 + \mu_2$ and kernel $K(\mathbf{x},\mathbf{y})=K_1(x_1,y_1)+K_2(x_2,y_2)$. Following equation~\ref{eq_sum_proc}, the latter sum process clearly has additive paths.
In this document, we call additive any kernel of the form $K:(\mathbf{x},\mathbf{y})\in \mathbb{R}^d \times \mathbb{R}^d \longrightarrow K(\mathbf{x},\mathbf{y})=\sum_{i=1}^{d}{K_i(x_i,y_i)}$ where the $K_i$'s are semi-positive definite (s.p.d.) symmetric kernels over $\mathbb{R}\times\mathbb{R}$. Although not commonly encountered in practice, it is well known that such a combination of s.p.d kernels is also a s.p.d. kernel in the direct sum space \cite{Rasmussen2006}. Moreover, one can show that the paths of any random process with additive kernel are additive in a certain sens:
\begin{prop} \label{addproc} Any (square integrable) random process $Z_{\mathbf{x}}$ possessing an additive kernel is additive up to a modification. In essence, it means that there exists a process $A_{\mathbf{x}}$ which paths are all additive, and such that $\forall \mathbf{x} \in X,\ \mathbb{P}(Z_{\mathbf{x}}=A_{\mathbf{x}})=1$. \end{prop} The proof of this property is given in appendix for $d=2$. For $d=n$ the proof follows the same pattern but the notations are more cumbersome.
Note that the class of additive processes is not actually limited to processes with additive kernels. For example, let us consider $Z_1$ and $Z_2$ two correlated Gaussian processes on $(\Omega, \mathcal{F},P)$ such that the couple $(Z_1,Z_2)$ is Gaussian. Then $Z_{1}(x_1)+Z_{2}(x_2)$ is also a Gaussian process with additive paths but its kernel is not additive. However, in the next section, the term additive process will always refer to GP with additive kernels.
\subsection{Invertibility of covariance matrices} In practice, the covariance matrix $\mathrm{K}$ of the observations of an additive process $Z$ at a design of experiments $X=(\mathbf{x}^{(1)}\ \dots\ \mathbf{x}^{(n)})^T$ may not be invertible even if there is no redundant point in $X$. Indeed, the additivity of $Z$ may introduce linear relationships (that holds almost surely) between the observed values of $Z$ and lead to the non invertibility of $\mathrm{K}$. Figure~\ref{fig:planprob} shows two examples of designs leading to a linear relationship between the observation. For the left panel, the additivity of $Z$ implies that $Z(\mathbf{x}^{(4)}) = Z(\mathbf{x}^{(2)})+Z(\mathbf{x}^{(3)})-Z(\mathbf{x}^{(1)})$ and thus the fourth column of the covariance matrix is a linear combination of the three other columns: $K(\mathbf{x}^{(i)},\mathbf{x}^{(4)})=K(\mathbf{x}^{(i)},\mathbf{x}^{(2)})+K(\mathbf{x}^{(i)},\mathbf{x}^{(3)})-K(\mathbf{x}^{(i)},\mathbf{x}^{(1)})$ and the associated covariance matrix is not invertible.
\begin{figure}
\caption{2-dimensional examples of DoE which lead to non-invertible covariance matrix in the case of random processes with additive kernels.}
\label{fig:planprob}
\end{figure}
A first approach is to remove some points in order to avoid any linear combination, which is furthermore in accordance with the aim of parsimonious evaluations for costly simulators. Algebraic methods may be used for determining the combination of points leading to a linear relationship between the values of the random process but this procedure is out of the scope of this paper.
\subsection{Additive Kriging}
Let $z : D \rightarrow \mathbb{R}$ be the function of interest (a numerical simulator for example), where $D \subset \mathbb{R}^d$. The responses of $z$ at the DoE $\mathcal{X}$ are noted $\mathbf{Z}=(z(\mathbf{x}^{(1)}) \ ... \ z(\mathbf{x}^{(n)}))^T$. Simple kriging relies on the hypothesis that $z$ is one path of a centered random process $Z$ with kernel $K$. The expression of the best predictor (also called kriging mean) and of the prediction variance are : \begin{equation} \begin{split} m(\mathbf{x}) & = k(\mathbf{x})^T \mathrm{K} ^{-1}\mathbf{Z} \\ v(\mathbf{x}) & = K(\mathbf{x},\mathbf{x}) - k(\mathbf{x})^T \mathrm{K} ^{-1}k(\mathbf{x}) \end{split} \label{eq:BP} \end{equation} where $k(\mathbf{x})=(K(\mathbf{x},\mathbf{x}^{(1)})\ \dots\ K(\mathbf{x},\mathbf{x}^{(n)}))^T$ and $\mathrm{K}$ is the covariance matrix of general term $\mathrm{K}_{i,j}=K(\mathbf{x}^{(i)},\mathbf{x}^{(j)})$. Note that these equations respectively correspond to the conditional expectation and variance in the case of a GP with known kernel. In practice, the structure of $k$ is supposed known (e.g. power-exponential or Mat\`ern families) but its parameters are unknown. A common way to estimate them is to maximize the likelihood of the kernel parameters given the observations $\mathbf{Z}$ \cite{Ginsbourger2009,Rasmussen2006}.
\noindent Equations \ref{eq:BP} are valid for any s.p.d kernel, thus they can be applied with additive kernels. In this case, the additivity of the kernel implies the additivity of the kriging mean: for example in dimension 2, for $K(\mathbf{x},\mathbf{y})=K_1(x_1,y_1)+K_2(x_2,y_2)$ we have \begin{equation} \begin{split} m(\mathbf{x}) & = (k_1(x_1)+k_2(x_2))^T(\mathrm{K_1+K_2})^{-1}\mathbf{Z}\\ & = k_1(x_1)^T(\mathrm{K_1+K_2})^{-1}\mathbf{Z} + k_2(x_2)^T(\mathrm{K_1+K_2})^{-1}\mathbf{Z}\\ & = m_1(x_1) + m_2(x_2). \end{split} \end{equation}
\noindent Another interesting property concerns the variance: $v$ can be null at points that do not belong to the DoE. Let us consider a two dimensional example where the DoE is composed of the 3 points represented on the left pannel of figure~\ref{fig:planprob}: $\mathcal{X}=\{\mathbf{x}^{(1)}\ \mathbf{x}^{(2)} \ \mathbf{x}^{(3)}\}$. Direct calculations presented in Appendix B shows that the prediction variance at the point $\mathbf{x}^{(4)}$ is equal to 0. This particularity follows from the fact that the value of the additive process are known almost surely at the point $x^{(4)}$ based on the observations at $\mathcal{X}$. In the next section, we illustrate the potential of Additive Kriging on an example and propose an algorithm for parameter estimation.
\subsection{Illustration and further consideration on a 2D example} We present here a first basic example of an additive kriging model. We consider $D=[0,1]^2$, and a set of 5 points in $D$ where the value of the observations $\mathbf{F}$ are arbitrarily chosen. Figure~\ref{fig:ex1dim2} shows the obtained kriging model. We can see on this figure the properties we mentioned above: the kriging mean is an additive function and the prediction variance can be null for points that do no belong to the DoE.
\begin{figure}
\caption{Approximation of the function $f$ based on five observations (black dots). The left panel represents the best predictor and the right panel the prediction variance. the kernel used is the additive gaussian kernel with parameters $\sigma = (1\ 1)$ and $\theta = (0.6\ 0.6)$.}
\label{fig:ex1dim2}
\end{figure}
The effect of any variable can be isolated and represented so as the metamodel can be split in a sum of univariate sub-models. Moreover, we can get confidence intervals for each univariate model. As the expression of the first univariate model is \begin{equation} m_1(x_1)=k_1(x_1)^T(\mathrm{K_1}+\mathrm{K_2})^{-1}\mathbf{F} \end{equation} the effect of the direction 2 can be seen as an observation noise. We thus get an expression for the prediction variance of the first main effect \begin{equation} v_1(x_1)=K_1(x_1,x_1)-k_1(x_1)^T(\mathrm{K_1}+\mathrm{K_2})^{-1}k_1(x_1). \end{equation}
\begin{figure}
\caption{Univariate models of the 2-dimensional example. The left panel plots $m_1$ and the 95\% confidence intervals $c_1(x_1)=m_1(x_1) \pm 2\sqrt{v_1(x_1)}$. The right panel shows the sub-model of the centrated univariate effects $m_1^*$ and $c_1^*(x_1)=m_1^*(x_1) \pm 2\sqrt{v_1^*(x_1)}$}
\label{fig:ex1dim2bis}
\end{figure} The left panel of figure~\ref{fig:ex1dim2bis} shows the obtained sub-model for the first direction. The interest of such graphic is limited since a 2-dimensional function can be plotted but this decomposition becomes useful to get an insight on the effect of a variable when $d>2$. However, we can see that the confidence intervals are wide. This is because the sub-models are define up to a constant. In order to get rid of the effect of such a translation, an option is to approximate $Z_i(x_i)-\int{Z_i(s_i)\mathrm{d} s_i}$ conditionally to the observations: \begin{equation} \begin{split}
m_i^*(x_i)&=\mathrm{E} \left[ \left.Z_i(x_i)-\int{Z_i(s_i)\mathrm{d} s_i} \right| Z(X)=Y \right] \\
v_i^*(x_i)&=\mathrm{var} \left[ \left.Z_i(x_i)-\int{Z_i(s_i)\mathrm{d} s_i} \right| Z(X)=Y \right] \end{split} \end{equation} The expression of $m_i^*(x_i)$ is straightforward whereas $v_i^*(x_i)$ requires more calculations which are given in Appendix C. \begin{equation} \begin{split} m_i^*(x_i)&=m_i(x_i)-\int{m_i(s_i)\mathrm{d} s_i} \\ v_i^*(x_i)&= v_i(x_i) - 2 \int{K_i(x_i,s_i)\mathrm{d} s_i} + 2\int{k_i(x_i)^T K^{-1} k_i(s_i) \mathrm{d} s_i} \\ & \hspace{2cm} + \iint{K_i(s_i,t_i)\mathrm{d} s_i \mathrm{d} t_i} - \iint{k_i(t_i)^T K^{-1} k_i(s_i) \mathrm{d} s_i \mathrm{d} t_i} \end{split} \end{equation} The benefits of using $m_i^*$ and $v_i^*$ and then to define the sub-models up to a constant can be seen on the right panel of figure~\ref{fig:ex1dim2bis}. At the end, the probabilistic framework gives an insight on the error of the metamodel but also of each sub-model.
\section{Parameter estimation}
\subsection{Maximum likelihood estimation (MLE)} MLE is a standard way to estimate covariance parameters and it is covered in detail in the literature \cite{Rasmussen2006,Santner2003}. Let $Y$ be a centered additive Process and $\psi_i=\{ \sigma_i^2, \theta_i \} \text{ with } i \in \{1,\dots,d\}$ the parameters of the univariate kernels. According to the MLE methodology, the best values $\psi_i^*$ for the parameters $\psi_i$ are the values maximizing the likelihood of the observations $\mathrm{Y}$: \begin{equation} \mathcal{L}(\psi_1,\dots,\psi_d) := \frac{1}{{(2\pi)}^{n/2} \mathrm{det(K(\psi))}^{1/2}} \mathrm{exp} \left( - \frac12 \mathrm{Y}^T \mathrm{K}(\psi)^{-1} \mathrm{Y} \right) \label{eq:L} \end{equation} where $\mathrm{K}(\psi) = \mathrm{K}_1(\psi_1) + \dots +\mathrm{K}_d(\psi_d)$ is the covariance matrix depending on the parameters $\psi_i$. The latter maximization problem is equivalent to the usually preferred minimization of \begin{equation} l(\psi_1,\dots,\psi_d) := \mathrm{log}(\mathrm{det}(\mathrm{K}(\psi))) + \mathrm{Y}^T \mathrm{K}(\psi)^{-1} \mathrm{Y} \label{eq:RLL} \end{equation} Obtaining the optimal parameters $\psi_i^*$ relies on the succesful use of a non-convex global optimization routine. This can be severely hindered for large values of $d$ since the search space of kernel parameters becomes high dimensional. One way to cope with this issue is to separate the variables and split the optimization into several low-dimensional subproblems.
\subsection{The Relaxed Likelihood Maximization algorithm} The aim of the Relaxed Likelihood Maximization (RLM) algorithm is to treat separately the optimization in each direction. In this way, RLM can be seen as a cyclic relaxation optimization procedure \cite{Minoux1986} with initial values of the parameters $\sigma^2_i$ set to zero. As we will see, the main originality here is to consider a kriging model with an observation noise variance $\tau^2$ that fluctuates during the optimization. This parameter account for the metamodel error (if the function is not additive for example) but also for the inaccuracy of the intermediate values of $\sigma_i$ and $\theta_i$.
\noindent The first step of the algorithm is to estimate the parameters of the kernel $K_1$. The simplification of the method is to consider that all the variations of $Y$ in the other directions can be summed up as a white noise. Under this hypothesis, $l$ depends on $\psi_1$ and $\tau$: \begin{equation} l(\psi_1,\tau) = \mathrm{log}(\mathrm{det}(\mathrm{K}_1(\psi_1)+\tau^2 I_d)) + \mathbf{Y}^T (\mathrm{K}_1(\psi_1)+\tau^2 I_d)^{-1} \mathbf{Y} \label{eq:KAM-E1} \end{equation}
\noindent Then, the couple $\{\psi_1^*,\tau^{*} \}$ that maximizes $l(\psi_1,\tau)$ can be obtained by numerical optimization.
\noindent The second step of the algorithm consists in estimating $\psi_2$, with $\psi_1$ fixed to $\psi_1^*$:
\begin{equation} \begin{split} \{ \psi_2^*, \tau^{*} \} = & \underset{\psi_2, \tau}{\mathrm{argmax}}(l(\psi_1^*,\psi_2,\tau)) \text{, with} \\ l(\psi_1^*,\psi_2,\tau) = & \mathrm{log}(\mathrm{det}(\mathrm{K}_1(\psi_1^*)+\mathrm{K}_2(\psi_2)+\tau^2 I_d)) + \\
& \quad \mathbf{Y}^T (\mathrm{K}_1(\psi_1^*)+\mathrm{K}_2(\psi_2)+\tau^2 I_d)^{-1} \mathbf{Y} \end{split} \label{eq:KAM-E2} \end{equation}
This operation can be repeated for all the directions until the estimation of $\psi_d$.
\noindent However, even if all the parameters $\psi_i$ have been estimated, it is fruitful to re-estimate them such that the estimation of the parameter $\psi_i$ can benefit of the values $\psi_j^*$ for $j>i$. Thus, the algorithm is composed of a cycle of estimations that treat each direction one after each other:
\noindent \textbf{RLM Algorithm :}\\
1. Initialize the values $\sigma_i^{(0)}=0$ for $\ i \in \{1,\dots,d \}$ \\
2. For $k$ from 1 to number of iteration do \\
3. \quad For $l$ from 1 to $d$ do \\
4. \qquad $ \{ \psi_l^{(k)},\tau^{(k)} \}= \underset{\psi_l,\tau}{\mathrm{argmin}}(l_c(\psi_1^{(k)},\dots,\psi_{l-1}^{(k)},\psi_{l},\psi_{l+1}^{(k-1)},\dots,\psi_{d}^{(k-1)},\tau))$ \\
5. \quad End For \\
6. End For
\noindent $\tau$ is a parameter tuning the fidelity of the metamodel since for $\tau=0$ the kriging mean interpolates the data. In practice, this parameter is decreasing at almost each new estimation. Depending on the observations and on the DoE, $\tau$ converges either to a constant or to zero (cf. the g-function example and figure~\ref{fig:dim4pep}). When zero is not reached, $\tau^2$ should correspond to the part of the variance that cannot be explained by the additive model. Thus, the comparison between $\tau^2$ and the $\sigma_i^2$ allows us to quantify the degree of additivity of the objective function according to the metamodel.
\noindent This procedure of estimation is not meant to be applied for kernels that are not additive. The method developed by Welch for tensor product kernels in \cite{WELCH1992} has similarities since it corresponds to a sequential estimation of the parameters. One interesting feature of Welch's algorithm is to choose at each step the best search direction for the parameters. The RLM algorithm could easily be adapted in a similar way to improve the quality of the results but the corresponding adapted version would be much more time consuming.
\section{Comparison between the optimization's methods} The aim of this section is to compare the RLM algorithm to the Usual Likelihood Maximization (ULM). The test functions that are considered are paths of an additive GP $Y$ with Gaussian additive kernel $K$. For this example, the parameters of $K$ are fixed to $\sigma_i=1$, $\theta_i=0.2$ for $i \in 1 \dots d$ but those values are supposed to be unknown.
\noindent Here, $2 \times d+1$ parameters have to be estimated: $d$ for the variances, $d$ for the range and 1 for the noise variance $\tau^2$. For ULM, they are estimated simultaneously, whereas the RLM is a 3-dimensional optimization at each step. In both cases, we use the \verb?L-BFGS-B? method of the function \verb?optim? with the \verb?R? software. To judge the effectiveness of the algorithms, we compare here the best value found for the log-likelihood $l$ to the computational budget (the number of call to $l$) required for the optimization. As the \verb?optim? function returns the number $nc$ of call to $l$ and the best value $bv$ at the end of each optimization, we obtain for the MLE on one path of $Y$ one value of $nc$ and $bv$ for ULM and $nb\_iteration \times d$ values of $nv$ and $bv$ for RLM since there is one optimization at each step of each iteration.
\noindent The panel (a) of figure~\ref{fig:COMP} presents the results for the two optimizations on a path of a GP for $d=5$. On this example, we can see that ULM needs 1500 calls to the log-likelihood before convergence whereas RLM requires much more calls before convergence. However, the result of the two methods are similar for 1500 calls but the result of RLM after 5000 calls is substantially improved. In order to get more robust results we simulate 20 paths of $Y$ and we observe the global distribution of the variables $nv$ and $bv$. Furthermore, we study the evolution of the algorithm performances when the dimension increases choosing various values for the parameter $d$ from 3 to 18 with a Latin Hypercube (LH) Design with maximin criteria~\cite{Santner2003} containing $10\times d$ points. We observe on the panels (b), (c) and (d) of figure~\ref{fig:COMP} that optimization with the RLM requires more calls to the function $l$, but this method appears to be more efficient and robust than ULM. Those results are stressed by figure~\ref{fig:COM2} where the final best value of RLM and ULM are compared. This figure also shows that the advantage of using RLM comes bigger when $d$ is getting larger.
\begin{figure}\label{fig:COMP}
\end{figure}
\begin{figure}
\caption{Comparison of the final value for the log-likelihood with the two optimization algorithms for 20 paths of $Y$.}
\label{fig:COM2}
\end{figure}
\section{Application to the g-function of Sobol} In order to illustrate the methodology and to compare it to existing algorithms, an analytical test case is considered. The function to approximate is the g-function of Sobol defined over $[0,1]^d$ by \begin{equation}
g(\mathbf{x})= \prod_{k=1}^d \frac{|4x_k-2|+a_k}{1+a_k} \text{ with } a_k > 0 \label{eq:Gsob} \end{equation} This popular function in the literature \cite{Saltelli2000} is obviously not additive. However, depending on the coefficients $a_k$, $g$ can be very close to an additive function. As a rule, the g-function is all the more additive as the $a_k$ are large. One main advantage for our study is that the Sobol sensitivity indices can be obtained analytically so we can quantify the degree of additivity of the test function. For $i=1,\dots,d$ the indice $S_i$ associated to the variables $x_i$ is \begin{equation} S_i = \frac{\frac{1}{3(1+a_i)^2}}{\left[\prod_{k=1}^{d}1+\frac{1}{3(1+a_k)^2}\right]-1}. \label{eq:SI} \end{equation}
\noindent Here we limit ourselves to the case $d=4$ and following \cite{Marrel2008} we choose $a_k = k$ for $k\in\{1,...,4\}$. For this combination of parameters, the sum of the first order Sobol indices is 0.95 so the g-function is almost additive. The considered DoE are LH maximin designs based on 40 points. To asses the quality of the obtained metamodels, the predictivity coefficient $Q_2$ is computed on a test sample of $n=1000$ points uniformly distributed over $[0,1]^4$. Its expression is: \begin{equation} Q_2(\mathbf{y},\hat{\mathbf{y}}) = 1 - \frac{\sum_{i=1}^{n}(y_i-\hat{y}_i)^2}{\sum_{i=1}^{n}(y_i-\bar{\mathbf{y}})^2} \label{eq:Q2} \end{equation} where $\mathbf{y}$ is the vector of the values at the test points, $\hat{\mathbf{y}}$ is the vector of predicted values and $\bar{\mathbf{y}}$ is the mean of $\mathbf{y}$.
\noindent We run on this example 5 iterations of the RLM algorithm with kernel Mat\`ern 3/2. The evolution of the estimated observation noise $\tau^2$ is represented on figure~\ref{fig:dim4pep}. On this figure, it appears that the observation noise is decreasing as the estimation of the parameters is improved. Here, the convergence of the algorithm is reached at iteration 4. The overall quality of the constructed metamodel is high since $Q_2=0.91$ and the final value for $\tau^2$ is 0.01.
\begin{figure}
\caption{Evolution of the observation noise on the 4-dimensional example}
\label{fig:dim4pep}
\end{figure}
\noindent As previously the expression of the univariate sub-metamodels is \begin{equation} m_i(x_i)=k_i(x_i)^T(\mathrm{K_1}+\mathrm{K_2}+\mathrm{K_3}+\mathrm{K_4})^{-1}\mathbf{Y} \end{equation} The univariate functions obtained are presented on figure~\ref{fig:dim4dir}. The confidence intervals are not represented here in order to enhance the readability of the graphics and the represented values are centered to ensure that the observations and the univariate functions are comparable.
\begin{figure}
\caption{$1$-dimensional projections of the observations (bullets) on the g-function example for $d=4$. The univariate models (solid lines) obtained after 5 iterations of RLM are very closed to the analytical main effects (dashed lines).}
\label{fig:dim4dir}
\end{figure}
\noindent As the value of $Q_2$ is likely to fluctuate with the DoE and the optimization performances, we compare here the proposed RLM algorithm with other methods for 20 different LHS. The other methods used for the test are (a) additive kriging model with ULM, (b) kriging with usual tensor-product kernel, (c) the GAM algorithm. The results for classical kriging and GAM are obtained with the DiceKriging\footnote{As for RLM and ULM, DiceKriging also use the BFGS algorithm for the likelihood maximization} \cite{Roustant2009} and the GAM packages for \verb?R? available on the CRAN~\cite{RR}. As the value of the $a_k$ are the same as in \cite{Marrel2008} where Marrel et al. presents a specific algorithm for sequential parameter estimation in non-additive kriging models, the results of this paper are also presented as method (d). The mean and the standard deviation of the obtained $Q_2$ are gathered in table~\ref{tab:results}. \begin{table}[h!] \centering
\begin{tabular}{|lc|cc|} \hline Algorithm & kernel & $\mathrm{mean}(Q_2)$ & $\mathrm{sd}(Q_2)$ \\ \hline RLM & Additive Mat\`ern 3/2 & 0.90 & 0.016 \\ ULM & Additive Mat\`ern 3/2 & 0.88 & 0.037 \\ Standart Kriging & Matern 3/2 & 0.82 & 0.042 \\ GAM & (smoothing splines) & 0.90 & 0.021 \\ Marrel & power-exponential & 0.86 & 0.07 \\ \hline \end{tabular} \caption{$Q_2$ predictivity coefficients at a 1000-points test sample for the various methods.} \label{tab:results} \end{table}
\section{Concluding remarks} The proposed methodology seems to be a good challenger for additive modeling. On the example of the GP paths, the RLM appears to be more efficient than usual likelihood maximization and well suited for high dimensional modeling. On the second example, additive models benefit of the important additive component of the g-function and outperform non additive models even if the function is not purely additive. The predictivity of the RLM is equivalent to that of GAM but its robustness is higher for this example.
\noindent One main difference between RLM and GAM backfitting is that RLM takes into account the estimated parameters into the covariance structure whereas GAM subtracts from the observation the predicted value for all the sub-models obtained in the other directions.
\noindent We would like to stress how the proposed metamodels take advantage of additivity, while benefiting from GP features. For the first point we can cite the complexity reduction and the interpretability. For the second, the main asset is that probabilistic metamodels provide the prediction variance. This justifies the fact of modeling an additive function on $\mathbb{R}^d$ instead of building $d$ metamodels over $\mathbb{R}$ since the prediction variance is not additive. We can also note the opportunity to choose a kernel adapted to the function to approximate.
\noindent At the end, the proposed methodology is fully compatible with Kriging-based methods and its versatile applications. For example, one can choose a well suited kernel for the function to approximate or use additive kriging for high-dimensional optimization strategies relying on the expecting improvement criteria.
\appendix
\section*{Appendix A: Proof of proposition \ref{addproc} for $d=2$} Let $Z$ be a random process indexed by $\mathbb{R}^2$ with kernel $K(\mathbf{x},\mathbf{y})= K_1(x_1,y_1) + K_2(x_2,y_2)$, and $Z_T$ the random process defined by $Z_T(x_1,x_2)=Z(x_1,0)+Z(0,x_2)-Z(0,0)$. By construction, the paths of $Z_T$ are additive functions. In order to show the additivity of the paths of $Z$, we will show that $\forall x \in \mathbb{R}^2$, $\mathrm{P}(Z(\mathbf{x})=Z_T(\mathbf{x}))=1$. For the sake of simplicity, the three terms of $\mathrm{var}[Z(\mathbf{x})-Z_T(\mathbf{x})]=\mathrm{var}[Z(\mathbf{x})]+\mathrm{var}[Z_T(\mathbf{x})]-2 \mathrm{cov}[Z(\mathbf{x}),Z_T(\mathbf{x})]$ are studied separately: \begin{equation*} \mathrm{var}[Z(\mathbf{x})]=K(\mathbf{x},\mathbf{x}) \end{equation*}
\begin{equation*} \begin{split} \mathrm{var}[Z_T(\mathbf{x})] & = \mathrm{var}[Z(x_1,0)+Z(0,x_2)-Z(0,0)] \\ & = \mathrm{var}[Z(x_1,0)] + \mathrm{var}[Z(0,x_2)] + 2 \mathrm{cov}[Z(x_1,0),Z(0,x_2)] \\ & \qquad + \mathrm{var}[Z(0,0)] - 2 \mathrm{cov}[Z(x_1,0),Z(0,0)] - 2 \mathrm{cov}[Z(0,x_2),Z(0,0)] \\ & = K_1(x_1,x_1) + K_2(0,0) + K_1(0,0) + K_2(x_2,x_2) + K(0,0) \\ & \qquad + 2 \left( K_1(x_1,0) + K_2(0,x_2)\right) - 2 \left( K_1(x_1,0) + K_2(0,0) \right) \\ & \qquad - 2 \left( K_1(0,0) + K_2(x_2,0) \right) \\ & = K_1(x_1,x_1) + K_2(x_2,x_2) = K(\mathbf{x},\mathbf{x}) \end{split} \end{equation*}
\begin{equation*} \begin{split} \mathrm{cov}[Z(\mathbf{x}),Z_T(\mathbf{x})] & = \mathrm{cov}[Z(x_1,x_2),Z(x_1,0)+Z(0,x_2)-Z(0,0)] \\ & = K_1(x_1,x_1) + K_2(x_2,0) + K_1(x_1,0) + K_2(x_2,x_2) \\ & \qquad - K_1(x_1,0) - K_2(x_2,0) \\ & = K_1(x_1,x_1) + K_2(x_2,x_2) = K(\mathbf{x},\mathbf{x}) \end{split} \end{equation*}
Those three equations implies that $\mathrm{var}[Z(\mathbf{x})-Z_T(\mathbf{x})]=0$, $\forall{\mathbf{x}} \in \mathbb{R}^2$. Thus, $\mathrm{P}(Z(\mathbf{x})=Z_T(\mathbf{x}))=1$ and there exists a modification of $Z$ with additive paths.
\section*{Appendix B: Calculation of the prediction variance} Let consider a DoE composed of the 3 points $\{\mathbf{x}^{(1)}\ \mathbf{x}^{(2)}\ \mathbf{x}^{(3)}\}$ represented on the left pannel of figure~\ref{fig:planprob}. We want here to show that although $\mathbf{x}^{(4)})$ does not belongs to the DoE we have $v(\mathbf{x}^{(4)})=0$. \begin{eqnarray*} v(\mathbf{x}^{(4)}) & = & K(\mathbf{x}^{(4)},\mathbf{x}^{(4)}) - k(\mathbf{x}^{(4)})^T \mathrm{K}^{-1} k(\mathbf{x}^{(4)}) \\ & = & K(\mathbf{x}^{(4)},\mathbf{x}^{(4)}) - (k(\mathbf{x}^{(2)})+k(\mathbf{x}^{(3)})-k(\mathbf{x}^{(1)}))^T \mathrm{K}^{-1} k(\mathbf{x}^{(4)}) \\ & = & K_1(\mathbf{x}^{(4)}_1,\mathbf{x}^{(4)}_1) + K_2(\mathbf{x}^{(4)}_2,\mathbf{x}^{(4)}_2) - \\ & & \quad(-1\ \ 1\ \ 1) \begin{pmatrix} K_1(\mathbf{x}^{(1)}_1,\mathbf{x}^{(4)}_1)+K_2(\mathbf{x}^{(1)}_2,\mathbf{x}^{(4)}_2) \\ K_1(\mathbf{x}^{(2)}_1,\mathbf{x}^{(4)}_1)+K_2(\mathbf{x}^{(2)}_2,\mathbf{x}^{(4)}_2) \\ K_1(\mathbf{x}^{(3)}_1,\mathbf{x}^{(4)}_1)+K_2(\mathbf{x}^{(3)}_2,\mathbf{x}^{(4)}_2) \end{pmatrix}
\\ & = & K_1(\mathbf{x}^{(2)}_1,\mathbf{x}^{(2)}_1) + K_2(\mathbf{x}^{(3)}_2,\mathbf{x}^{(3)}_2) - K_1(\mathbf{x}^{(2)}_1,\mathbf{x}^{(2)}_1) - K_2(\mathbf{x}^{(3)}_2,\mathbf{x}^{(3)}_2) \\ & = 0 \end{eqnarray*}
\section*{Appendix C: Calculation of $v_i^*$} We want here to calculate the variance of $Z_i(x_i)-\int{Z_i(s_i)\mathrm{d} s_i}$ conditionally to the observations $Y$. \begin{equation*} \begin{split}
v_i^*(x_i)&=\mathrm{var} \left[ \left.Z_i(x_i)-\int{Z_i(s_i)\mathrm{d} s_i} \right| Z(X)=Y \right] \\
&=\mathrm{var} \left[ \left.Z_i(x_i) \right| Z(X)=Y \right] - 2\mathrm{cov} \left[ \left.Z_i(x_i),\int{Z_i(s_i)\mathrm{d} s_i} \right| Z(X)=Y \right] \\
& \hspace{5cm} + \mathrm{var} \left[ \left.\int{Z_i(s_i)\mathrm{d} s_i} \right| Z(X)=Y \right] \\ &= v_i(x_i) - 2 \left( \int{K_i(x_i,s_i)\mathrm{d} s_i} - \int{k_i(x_i)^T K^{-1} k_i(s_i) \mathrm{d} s_i} \right) \\ & \hspace{2cm} + \iint{K_i(s_i,t_i)\mathrm{d} s_i \mathrm{d} t_i} - \iint{k_i(t_i)^T K^{-1} k_i(s_i) \mathrm{d} s_i \mathrm{d} t_i} \end{split} \end{equation*}
\end{document} |
\begin{document}
\comment[Comment on `A scattering quantum circuit for measuring Bell's time inequality']{Comment on `A scattering quantum circuit for measuring Bell's time inequality: a nuclear magnetic resonance demonstration using maximally mixed states'}
\author{G~C~Knee$^1$, E~M~Gauger$^{2,1}$, G~A~D~Briggs$^1$, S~C~Benjamin$^{1,2}$}
\ead{george.knee@materials.ox.ac.uk} \address{$^1$ Department of Materials, University of Oxford, Parks Road, Oxford, OX1 3PH} \address{ $^2$ Centre for Quantum Technologies, National University of Singapore, 3 Science Drive 2, Singapore 117543}
\date{\today}
\begin{abstract} A recent paper by Souza, Oliveira and Sarthour (SOS) reports the experimental violation of a Leggett-Garg (LG) inequality (sometimes referred to as a temporal Bell inequality). The inequality tests for quantum mechanical superposition: if the inequality is violated, the dynamics cannot be explained by a large class of classical theories under the heading of macrorealism. Experimental tests of the LG inequality are beset by the difficulty of carrying out the necessary so-called `non-invasive' measurements (which for the macrorealist will extract information from a system of interest without disturbing it). SOS argue that they nevertheless achieve this difficult goal by putting the system in a maximally mixed state. The system then allegedly undergoes no perturbation during their experiment. Unfortunately the method is ultimately unconvincing to a skeptical macrorealist, and so the conclusions drawn by SOS are unjustified. \end{abstract} \submitto{\NJP} \maketitle
\section{Introduction}
As Souza, Oliveira and Sarthour (SOS) summarise \cite{SouzaOliveiraSarthour2011}, the Leggett-Garg (LG) test \cite{LeggettGarg1985} involves measuring two-time correlators $C_{k,m}:=\langle \mathcal{O}(t_k)\mathcal{O}(t_m)\rangle$ which quantify the average degree to which the observable $\mathcal{O}$ correlates with itself between time $t_k$ and time $t_m$. One may ensure the observable is dichotomic by defining $\mathcal{O}:=2|\psi_0\rangle\langle\psi_0|-\mathcal{I}$ for an initial state $|\psi_0\rangle$. Since this observable has eigenvalues $\pm1$, $C_{k,m}$ is easily obtained by measuring $\mathcal{O}$, waiting, and measuring $\mathcal{O}$ again. The outcomes of the measurement are then multiplied, and their product is averaged over many runs of the experiment. If several of these correlators are computed, one can construct e.g. \begin{equation} K=C_{1,2}+C_{2,3}-C_{1,3}. \end{equation} If the correlators are measured on many identical copies of the system, the assumptions of macrorealism and non-invasive measurabilty lead to a bound on this quantity: one can show that \begin{equation} K\leq1. \label{LGI} \end{equation} LG knew that this inequality can be violated by a quantum system if the three correlators are determined in separate experiments: the reason for this being that all measurements on quantum systems are subject to a trade-off between information gain and disturbance. A fully projective measurement of a two level quantum system can provide the maximum 1 classical bit of information, but also threatens the maximally disturbing effect of updating the quantum state of the system onto an eigenstate of the measurement observable, which may be far from the original quantum state. Any future evolution of the state proceeds in general from this post-measurement eigenstate and \emph{not} from the pre-measurement state, as it would have done if no measurement were performed. This effect is at the heart of the LG test. \section{Zero knowledge does not imply zero disturbance}
LG realized the importance of motivating the non-invasive measurabilty assumption. In contrast to a Bell inequality test, where one can arrange the measurements involved in the experiment at space-like intervals, this is impossible for the LG test. In the former case, the special theory of relativity provides a very strong reason to doubt that each measurement could have any influence on the other (due to the finite upper bound on the speed of a signal propagating between the two space-time locations concerned). In the latter case one cannot spatially separate the measurements, since they are applied to the same physical system. It is not obvious how to arrange the measurements so that a skeptical onlooker will not claim that they have disturbed the system, catastrophically corrupting the experimental data. Unless the assumption is convincingly motivated, the derivation of (\ref{LGI}) has little basis.
It is well established that the initial state of the system of interest is not relevant to the LG test; this was pointed out in the original paper \cite{LeggettGarg1985}. SOS are thus quite justified in preparing their system in the maximally mixed state for the purpose of testing the LG inequality. They are not, however, justified in claiming that this implies that all and any measurements made on this state are non-invasive in the sense that LG intended. The interpretation of a mixed state is clear for both quantum physics and classical physics, as it expresses incomplete knowledge about the state of a system. It is true that in quantum physics there is perhaps a richer interpretation of a mixed state: it is a probability distribution on the Hilbert space. There are a multitude of convex decompositions of a mixed state into pure quantum states. For classical physics it is a probability distribution on the classical state space. In either case the maximally mixed state represents zero information about the two-level system being investigated. In SOS's proposed quantum circuit, the state of the system remains a maximally mixed state throughout. This means that at all times there is zero information available about the state, so that the subjective description of the state will remain constant, although one suspects that the objective, physical state of affairs may be changing. In fact this is the case, if one computes the evolution of for example $|\psi_0\rangle=|0\rangle$ or $|1\rangle$ individually \emph{one finds that these states are indeed perturbed}, and moreover that they are perturbed in equal but opposite ways. We pose the question: how can our ignorance of the identity of the state (according to macrorealism it is either $|0\rangle$ or $|1\rangle$) mitigate the invasiveness of measurements?
To make this point more concrete, consider the following scenario. Alice flips a coin but is blindfolded. She ascribes the maximally mixed state to the coin, as there is an equal probability of it showing heads or tails. Now, while remaining blindfolded, Alice turns the coin over, effectively mapping heads into tails or tails into heads, depending on the physical state of the coin. This interaction with the coin is clearly potentially invasive (the coin may now behave differently to the case where no interaction had taken place), but still the state of the coin is the maximally mixed state. There is a very strong analogy between arguing in this scenario that the interaction is non-invasive, and SOS's argument that their circuit contains non-invasive measurements. This is our chief objection to SOS's approach.
The issue may be resolved in the way that LG suggest. One makes measurements \emph{which are convincingly non-invasive to a macrorealist}. Whether they are invasive according to quantum theory is irrelevant. The most convincing protocol known to us when viewed from a macrorealist viewpoint is the ideal-negative result measurement scheme espoused by LG and in particular Leggett \cite{LeggettGarg1985,Leggett1988}. These measurements directly exploit a macrorealist's belief that the system is in one state or the other at all times, and effectively measure the system without ever interacting with it. An alternative approach would be to experimentally determine an operational notion of measurement invasiveness through a series of control experiments: This approach is suggested by Wilde and Mizel~\cite{WildeMizel2011}. In both cases the quantum mechanical measurement induced disturbance is what gives rise to violation of the LG inequality. Other approaches do not require this disturbance and include for example taking on additional assumptions such as stationarity~\cite{HuelgaMarshallSantos1995}, or using a weak measurement scheme~\cite{GogginAlmeidaBarbieri2011,Palacios-LMalletNguyen2010}, which reduces the interaction strength between the system and the measuring device. These approaches may not require quantum mechanical back-action for a violation, but this is in contrast to Leggett and Garg's original proposal, and may therefore have different implications for the plausibility of macrorealist theories.
Can the experiment of SOS be adapted to include, for example, ideal negative result measurements? Possibly, but in order to be convincing the problems we describe in the following sections will have to be addressed.
\section{Detector efficiency insufficiency} As discussed in~\cite{KneeSimmonsGauger2012}, ideal negative result measurements can be carried out in a spin ensemble setting with a probe qubit; but this probe qubit must be initialized with high confidence. A nuclear spin at room temperature may appear to be well prepared in the pseudo-pure approximation, but is almost completely corrupted when one takes the whole ensemble into account. The decomposition of the state $\rho$ of the nuclear spin ensemble (which serves as SOS's experimental system) into a pure part $\rho_{pp}$ and a maximally mixed part $\mathcal{I}/2$ is given by \begin{equation} \rho=\epsilon\rho_{pp}+(1-\epsilon)\mathcal{I}/2. \end{equation} SOS claim ``Since $(1-\epsilon)\mathcal{I}/2$ is not observed, the probe qubit in such a mixed state produces the same result as would be observed if the probe qubit were in a pure state and the detection efficiency of the measurement apparatus were $\epsilon$.''. At thermal equilibrium $\epsilon=(1-\alpha)/(1+\alpha)$ with $\alpha=\textrm{exp}(-\mu_NB/kT)$. For typical values of temperature $T$ and magnetic field strength $B$, we find that $\epsilon< 10^{-7}$ (here $\mu_N$ is the magnetic moment of the probe nucleus and $k$ is Boltzmann's constant). Assuming the quoted assertion is correct, and $\epsilon$ can be interpreted as a detector efficiency, it is rather low~\cite{GargMermin1987}. ~Experiments with low efficiency detection can only be convincing if a `fair sampling hypothesis' is justified.
\section{The sampling is unfair} The fair sampling hypothesis can be stated as follows: If one only measures a fraction of the systems which one has prepared, the gathered statistics faithfully represent the entire ensemble. This may be warranted, for example, in the case of an experiment with photon loss: typically there is no reason to suspect that unobserved photons would have given different results to observed photons, had they indeed been detected. In nuclear magnetic resonance (NMR) the situation is different and the fair sampling assumption is patently false: It is generally accepted that the unobserved component of the nuclear spin ensemble behaves very differently to the measurable part. It is unobservable precisely because it generates a zero net magnetic field; the field from each spin is cancelled out by other spins in the `identity' component. If the unobserved spins behaved in the same way as the observed spins, they would become observable - giving a contradiction. When authoring a previous paper~\cite{SouzaMagalhaesTeles2008} (their Ref. 23) SOS and coauthors claim to have \emph{simulated} the violation of a Bell inequality with a room temperature NMR experiment - precisely because of the failure of the fair sampling hypothesis. In contrast, in the work under consideration here, despite the experimental system being the same, SOS do not regard their experiment as a simulation.
One way of overcoming these difficulties is to construct the total density matrix of a highly polarised spin ensemble for analysis with the LG inequality. An experimental violation was found using this method in Ref~\cite{KneeSimmonsGauger2012}. \section{Pitfalls of quantum circuits} We would like to point out another reason why performing `ideal negative result measurements' on NMR systems can be tricky. Controlled-NOT (or CNOT) gates, which flip the state of an ancillary system whenever the control system is in a particular state, can be used along with postselection to implement these special measurements. In the quantum circuit paradigm, they can be built by composing several other gates, some of which may be unconditional on the control system. \begin{figure}
\caption{ A circuit that might be suggested to test the LG inequality in a single experiment but which will fail to violate the bound on macrorealism despite the existence of coherent superpositions. Since the observable $\mathcal{O}=\sigma_z$, the controlled rotation is a controlled phase gate. At each of the instants $t_1,t_2,t_3$ two phase gates map information about $\mathcal{O}$ onto three ancillary qubits, and in the intervening times the system qubit is evolved according to $U(\theta)=\cos\theta\mathcal{I}+i\sin\theta\sigma_x$. Each of the ancillary qubits is measured to determine (after ensemble averaging) $C_{1,2}, C_{2,3},C_{1,3}$ respectively. The inset shows that the LG inequality will be obeyed for any value of $\theta$ (which is a function of $\tau:=t_3-t_2=t_2-t_1$).}
\label{fig1}
\end{figure}
Finally we note the final sentence of the paper: ``\ldots we would like to mention that the scattering quantum circuit presented here can be easily adapted to measure the three correlation functions simultaneously using more ancillary qubits''. We suspect that this may indeed be possible, but that a violation of the LG inequality is impossible in this case (see Figure \ref{fig1}). In the test as LG originally outlined it, it is necessary to measure each correlator in a separate run and not `simultaneously' (we take simultaneously here to mean `in a single run'). This is because there is no single evolution of a two-level system which is compatible with a violation of the LG inequality, whether the evolution be thought of as a classical two level system flipping from one of its states to the other, \emph{or} as a quantum system evolving under the continuous time evolution of the Schr\"odinger equation and the discontinuous back action of projective measurements. \section*{References}
\end{document} |
\begin{document}
\title[Koszul complexes and spectra] {Koszul complexes and spectra of projective hypersurfaces with isolated singularities} \dedicatory{To the memory of Egbert Brieskorn} \author{Alexandru Dimca} \address{Univ. Nice Sophia Antipolis, CNRS, LJAD, UMR 7351, 06100 Nice, France.} \email{dimca@unice.fr} \author{Morihiko Saito} \address{RIMS Kyoto University, Kyoto 606-8502 Japan} \email{msaito@kurims.kyoto-u.ac.jp} \begin{abstract} For a projective hypersurface with isolated singularities, we generalize some well-known results in the nonsingular case due to Griffiths, Scherk, Steenbrink, Varchenko, and others. They showed, for instance, a relation between the mixed Hodge structure on the vanishing cohomology and the Gauss-Manin system filtered by shifted Brieskorn lattices of a defining homogeneous polynomial by using the V-filtration of Kashiwara and Malgrange. Numerically this implied an identity between the Steenbrink spectrum and the Poincar\'e polynomial of the Milnor algebra. In our case, however, we have to replace these with the pole order spectrum and the alternating sum of the Poincar\'e series of certain subquotients of the Koszul cohomologies respectively, and then study the pole order spectral sequence which does not necessarily degenerate at $E_2$. This non-degeneration is closely related with the torsion of the Brieskorn module which vanished in the classical case. \end{abstract} \maketitle \centerline{\bf Introduction} \par
\noindent Let $f$ be a homogeneous polynomial in the graded ${\mathbf C}$-algebra $R:={\mathbf C}[x_1,\dots,x_n]$ where $\deg x_i=1$ and $n\geqslant 2$. Set $d=\deg f$. Consider the shifted Koszul complex $${}^s\!K^{\ssb}_f:=K_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}[n]\quad\hbox{with}\quad K_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}=(\Omega^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}},{\rm d}f\wedge).$$ Here $\Omega^j:=\Gamma({\mathbf C}^n,\Omega_{{\mathbf C}^n}^j)$ with $\Omega_{{\mathbf C}^n}^j$ algebraic so that the $\Omega^j$ are finite free graded $R$-modules, and the degree of $\Omega^j$ in ${}^s\!K^{\ssb}_f$ is shifted so that $${}^s\!K^j_f=\Omega^{j+n}(jd)\,\,\,\hbox{(i.e.}\,\,\,{}^s\!K^j_{f,k}=\Omega^{j+n}_{jd+k})\,\,\,\hbox{for}\,\,\,j\in{\mathbf Z}.$$ In general the shift of degree by $p$ of a graded module $M$ will denoted by $M(p)$, where the latter is defined by $M(p)_k=M_{k+p}$. Since the dualizing complex for complexes of $R$-modules is given by $\Omega^n[n]$, we have the self-duality $${\mathbf D}({}^s\!K^{\ssb}_f):={\mathbf R}{\rm Hom}_R({}^s\!K^{\ssb}_f,\Omega^n[n])={}^s\!K^{\ssb}_f(nd).$$ \par
In this paper we assume $$\dim{\rm Sing}\,f^{-1}(0)\leqslant 1. \leqno(A)$$ It is well-known, and is easy to show (see e.g. Remark~(1.9)(iv) below) that this implies $$H^j({}^s\!K^{\ssb}_f)=0\quad\hbox{if}\,\,\,j\ne -1,0.$$ Define $$M:=H^0({}^s\!K^{\ssb}_f),\quad N:=H^{-1}({}^s\!K^{\ssb}_f).$$ Let ${\mathbf m}=(x_1,\dots,x_n)\subset R$, the maximal graded ideal. Set $$M':=H_{{\mathbf m}}^0M,\quad M'':=M/M'.$$ These are finitely generated graded $R$-modules having the decompositions $M=\bigoplus_k M_k$, etc. In the isolated singularity case we have $M''=N=0$, and $M=M'$. Generalizing a well-known assertion in the isolated singularity case, one may conjecture that the canonical morphism from $M'$ to the graded quotient of the pole order filtration on the Gauss-Manin system is injective, see Proposition~(3.5) below for a partial evidence. This is closely related with Question~2 and Remark~(5.9) below, see also \cite{Ba1}, \cite{Ba2}, etc. \par
Let $y:=\h{$\sum$}_{i=1}^n\,c_ix_i$ with $c_i\in{\mathbf C}$ sufficiently general so that $\{y=0\}\subset{\mathbf C}^n$ is transversal to any irreducible component of ${\rm Sing}\,f^{-1}(0)$. Then $M'$ is the $y$-torsion subgroup of $M$, and $M''$, $N$ are finitely generated free graded ${\mathbf C}[y]$-modules of rank $\tau_Z$, where $\tau_Z$ is the total Tjurina number as in (0.4) below. Note that there is a shift of the grading on $N$ by $d$ between this paper and \cite{DiSt1}, \cite{DiSt2}. \par
Define the (higher) dual graded $R$-modules by $$D_i(M):={\rm Ext}_R^{n-i}(M,\Omega^n)\quad(i\in{\mathbf Z}),$$ and similarly for $D_i(N)$, etc. From the above self-duality of the Koszul complex ${}^s\!K^{\ssb}_f$, we can deduce the following duality (which is known to the specialists at least by forgetting the grading, see \cite{Pe}, \cite{vStWa} and also \cite{EyMe}, \cite{Se}, etc.): \par
\noindent {\bf Theorem~1.} {\it There are canonical isomorphisms of graded $R$-modules $$\aligned D_0(M')=D_0(M)&=M'(nd),\\ D_1(M'')=D_1(M)&=N(nd),\\ D_1(N)&=M''(nd),\endaligned \leqno(0.1)$$ and $D_i(M)$, $D_i(M')$, $D_i(M'')$, $D_i(N)$ vanish for other $i$.} \par
This generalizes a well-known assertion in the isolated singularity case where $M''=N=0$. Theorem~1 implies that $M'$, $M''$ and $N$ are Cohen-Macaulay graded $R$-modules of dimension $0$, $1$ and $1$ respectively (but $M$ itself is not Cohen-Macaulay). Moreover $M'$ is graded self-dual, and $M''$ and $N$ are graded dual of each other, up to a shift of grading. \par
For $k\in{\mathbf Z}$, set $$\mu_k=\dim M_k,\quad\mu'_k=\dim M'_k,\quad\mu''_k=\dim M''_k,\quad \nu_k=\dim N_k.$$ Let $g:=\h{$\sum$}_{i=1}^nx_i^d$, and $\gamma_k:=\dim(H^nK_g^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})_k=\dim\bigl(\Omega^n/\,\h{$\sum$}_{i=1}^n\, x_i^{d-1}\Omega^n\bigr){}_k$, so that $$\h{$\sum$}_k\,\gamma_k\,t^k=(t^d-t)^n/(t-1)^n. \leqno(0.2)$$ (Here $g$ can be any homogeneous polynomial of degree $d$ with an isolated singular point.) It is known (see \cite{Di2}, \cite{DiSt1}, \cite{DiSt2}) that $$\mu_k=\mu'_k+\mu''_k=\nu_k+\gamma_k\quad(k\in{\mathbf Z}), \leqno(0.3)$$ since the Euler characteristic of a bounded complex is independent of its differential if the components of the complex are finite dimensional. \par
By the first assertion of (0.1) together with (1.1.4) for $i=1$ and by (0.2), we get the following symmetries: \par
\noindent {\bf Corollary~1.}\quad $\mu'_k=\mu'_{nd-k},\quad\gamma_k=\gamma_{nd-k}\quad(k\in{\mathbf Z})$. \par
Let $Z:=\{f=0\}\subset Y:={\mathbf P}^{n-1}$, and $\Sigma:={\rm Sing}\,Z$. The total Tjurina number $\tau_Z$ is defined by $$\tau_Z:=\h{$\sum$}_{z\in\Sigma}\,\tau_z\quad\hbox{with}\quad\tau_z:=\dim_{{\mathbf C}}{\mathcal O}_{Y,z}/(h_z,{\partial} h_z), \leqno(0.4)$$ where $h_z$ is a local defining equation of $Z$ at $z$, and ${\partial} h_z$ is the Jacobian ideal of $h_z$ generated by its partial derivatives. By Theorem~1, $M''$ and $N$ are Cohen-Macaulay, and are dual of each other up to a shift of grading. Combining this with the graded local duality (1.1.4) for $i=1$ (see \cite{BrHe}, \cite{Ei}, etc.) together with (1.9.3) below, we get the following. \par
\noindent {\bf Corollary~2.}\quad $\mu''_k+\nu_{nd-k}=\tau_Z\quad(k\in{\mathbf Z})$. \par
Here the calculation of the local cohomology in the local duality is not so trivial (see Remark~(1.7) below), and we can also use an exact sequence as in \cite[Prop.~2.1]{Sl} (see also \cite[Prop.~2.1.5]{Gro} and \cite{SaSl}, etc.) Note that Corollary~2 can also be deduced from Thm.~3.1 in \cite{Di2}, see Remark~(1.9)(i) below. By Corollaries~1 and 2 together with (0.3), we get the following. \par
\noindent {\bf Corollary~3.}\quad $\mu'_k=\mu_k+\mu_{nd-k}-\gamma_k-\tau_Z,\quad \mu''_k=\tau_Z-\mu_{nd-k}+\gamma_k\quad(k\in{\mathbf Z})$. \par
This means that $\mu'_k$ and $\mu''_k$ are essentially determined by $\mu_k$ and $\mu_{nd-k}$. Note that $\{\mu''_k\}$ and $\{\nu_k\}$ are weakly increasing sequences of non-negative integers. It is shown that $\{\mu'_k\}$ is log-concave in a certain case, see \cite{Sti}. Assuming ${\rm Sing}\,Z\ne\emptyset$, we have $\mu''_k=\nu_k=\tau_Z>0$ for $k\gg 0$, hence $M'',N$ are nonzero, although $M'$ may vanish, see Remark~(1.9)(iii) below. By Corollary~2 and (0.3) we get the following. \par
\noindent {\bf Corollary~4.}\quad$\gamma_k-\mu'_k=\mu''_k+\mu''_{nd-k}-\tau_Z\quad(k\in{\mathbf Z})$. \par
Here a fundamental question seems to be the following. \par
\noindent {\bf Question~1.} Are both sides of the above equality non-negative? \par
\noindent This seems to be closely related to the subject treated in \cite{ChDi}, \cite{Di2}, \cite{DiSt1}, \cite{DiSt2}, etc. We have a positive answer to Question~1 if $n=3$ and $\Sigma$ is a complete intersection in ${\mathbf P}^2$ (see \cite{Sti}) or if $f$ has type (I), where $f$ is called type (I) if the following condition is satisfied (and type (II) otherwise): $$\mu''_k=\tau_Z\,\,\,\hbox{for}\,\,\,k\geqslant nd/2,\quad\hbox{i.e.}\quad \nu_k=0\,\,\,\hbox{for}\,\,\,k\leqslant nd/2. \leqno(0.5)$$ By the definition of $N$, the last condition in (0.5) cannot hold if there is a nontrivial relation of very low degree between the partial derivatives of $f$, e.g. in case $f$ is a polynomial of $n-1$ variables (or close to it), see Remark~(2.9) below. However, it holds in relatively simple cases, including the nodal hypersurface case by \cite[Thm.~2.1]{DiSt2}, see Remark~(2.10) below. \par
In the type (I) case, we get the $\mu'_k$ by restricting to $k\leqslant nd/2$ (where $\mu'_k+\mu''_k=\mu_k=\gamma_k$ holds) if we know the $\mu''_k$. This can be done for instance in the following case. \par
\noindent {\bf Proposition~1.} {\it Assume $Z$ has only ordinary double points $z_1,\dots,z_{\tau_Z}$, and moreover the $z_i$ correspond to linearly independent vectors in ${\mathbf C}^n$ so that $\tau_Z=r\leqslant n$. Then $$\aligned\mu''_k&=\begin{cases}\,0&(\,k<n\,),\\ \,1&(\,k=n\,),\\ \tau_Z&(\,k>n\,),\end{cases}\quad\quad \nu_k=\begin{cases}\,0&(\,k<n(d-1)\,),\\ \tau_Z-1&(\,k=n(d-1)\,),\\ \tau_Z&(\,k>n(d-1)\,),\end{cases}\\ \mu'_k&=\begin{cases}\,0&(\,k\notin(n,n(d-1))\,),\\ \gamma_k-\tau_Z&(\,k\in(n,n(d-1))\,),\end{cases}\endaligned$$ where $(n,n(d-1))\subset{\mathbf R}$ denotes an open interval.} \par
This follows from Lemma~(2.1) below together with Corollary~2 and (0.3). It can also be deduced from the results in \cite{Di2}, and seems to be closely related with \cite[Thm.~2]{DiSaWo}. The situation becomes, however, rather complicated if the number of singular points is large, see \cite{ChDi}, \cite{Di2}, \cite{DiSt1}, \cite{DiSt2}. \par
Let ${\rm Sp}(f)=\sum_{\alpha}n_{f,\alpha}\,t^{\alpha}\in{\mathbf Q}[t^{1/d}]$ be the {\it Steenbrink spectrum} of $f$ (see \cite{St2}, \cite{St3}) which is normalized as in \cite{St2}. To study the relation with the Koszul cohomologies $M$, $N$ by generalizing the well-known assertion in the isolated singularity case where $M''=N=0$ and $M=M'$ (see \cite{St1} and also \cite{Gri}, \cite{SkSt}, \cite{Va}, etc.), we have to introduce the {\it pole order spectrum} ${\rm Sp}_P(f)$ by replacing the Hodge filtration $F$ with the pole order filtration $P$ in \cite{Di1}, \cite{Di3}, \cite{DiSa2}, \cite{DiSt1}. There are certain shifts of the exponents coming from the difference between $F$ and $P$. Here we have the inclusion $F\subset P$ in general, and the equality holds in certain cases (see \cite{Di3}). We can calculate these spectra explicitly in the case $n=2$, see Propositions~(3.3) and (3.4). The relation between the two spectra is, however, quite nontrivial in general (see for instance Example~(3.7) below). \par
The reason for which we introduce ${\rm Sp}_P(f)$ is that it is related with the Poincar\'e series of $M$, $N$ as follows: The differential of the de Rham complex $(\Omega^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}},{\rm d})$ induces a morphism of graded ${\mathbf C}$-vector spaces of degree $-d:$ $${\rm d}^{(1)}:N\to M,$$ i.e. preserving the degree up to the shift by $-d$. Let $H^nA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$ denote the Brieskorn module \cite{Bri} (in a generalized sense) which is a graded ${\mathbf C}$-module endowed with actions of $t$, $\dd_t^{-1}$, and $t{\partial}_t$, see (4.2) below. Let $(H^nA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})_{\rm tor}$ be its $t$-torsion (or equivalently, $\dd_t^{-1}$-torsion) subspace. It has the kernel filtration $K_{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$ defined by $$K_i(H^nA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})_{\rm tor}:={\rm Ker}\,t^i\subset(H^nA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})_{\rm tor}\quad(i\geqslant 0). \leqno(0.6)$$ The main theorem of this paper is as follows: \par
\noindent {\bf Theorem~2.} {\it There are inductively defined morphisms of graded ${\mathbf C}$-vector spaces of degree $-rd:$ $${\rm d}^{(r)}:N^{(r)}\to M^{(r)}\quad(r\geqslant 2),$$ such that $N^{(r)}$, $M^{(r)}$ are the kernel and the cokernel of ${\rm d}^{(r-1)}$ respectively, and are independent of $r\gg 0$ $($that is, ${\rm d}^{(r)}=0$ for $r\gg 0)$, and we have $${\rm Sp}_P(f)=S(M^{(r)})(t^{1/d})-S(N^{(r)})(t^{1/d})\quad(r\gg 0), \leqno(0.7)$$ where $S(M^{(r)})(t)$, $S(N^{(r)})(t)$ denote the Poincar\'e series of $M^{(r)}$, $N^{(r)}$ for $r\geqslant 2$. \par
Moreover, there are canonical isomorphisms $${\rm Im}\,{\rm d}^{(r)}={\rm Gr}^K_{r-1}({\rm Coker}\,t)\quad(r\geqslant 2), \leqno(0.8)$$ where $K_{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$ is the kernel filtration on $(H^nA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})_{\rm tor}$, and the right-hand side of $(0.8)$ is a subquotient of $(H^nA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})_{\rm tor}$. In particular, ${\rm d}^{(r)}$ vanishes for any $r\geqslant 2$ {\rm (}that is, $M^{(r)}=M^{(2)}$, $N^{(r)}=N^{(2)}$ for any $r\geqslant 2)$ if and only if $H^nA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$ is torsion-free.} \par
Note that ${\rm Ker}\,t^i$ in (0.6) and ${\rm Coker}\,t$ in (0.8) can be replaced respectively with ${\rm Ker}\,{\partial}_t^{-i}$ and ${\rm Coker}\,\dd_t^{-1}$ by using (4.2.2) below. For the proof of Theorem~2 we use the spectral sequence associated with the pole order filtration on the algebraic microlocal Gauss-Manin complex (see (4.4.4) below), and the morphisms ${\rm d}^{(r)}$ are induced by the differentials ${\rm d}_r$ of the spectral sequence. (We can also use the usual Gauss-Manin complex instead of the microlocal one.) The last equivalent two conditions in Theorem~2 are further equivalent to the $E_2$-degeneration of the (microlocal) pole order spectral sequence, see Corollary~(4.7) below (and also \cite{vSt}). Moreover $(H^nA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})_{\rm tor}$ is finite dimensional if and only if $Z$ is analytic-locally defined by a weighted homogeneous polynomial at any singular point, see Theorems~(5.2) and (5.3) below. (In fact, the if part in the analytic local setting was shown in the second author's master thesis, see e.g. \cite[Thm.~3.2]{BaSa} and also \cite{vSt}.)
Here Theorem~(5.3) gives rather precise information about the kernel of ${\rm d}^{(1)}$. This is a refinement of \cite[Thm.~2.4(ii)]{DiSt1}, and is used in an essential way in \cite{DiSa3}. Theorem~(5.3) implies a sharp estimate for $\max\{k\,|\,\nu_k=0\}$ when $n=3$, see Corollary~(5.5) below. This assertion is used in an essential way in \cite{DiSe}, and is generalized to the case $n>3$ in \cite[Theorem~9]{DiSa3} (see \cite{Di4} for another approach to the case $n>3$). \par
In case $(H^nA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})_{\rm tor}=0$, we can determine the pole order spectrum if we can calculate the morphism ${\rm d}^{(1)}:N\to M$, although the latter is not so easy in general unless the last conditions in Theorem~(5.3) are satisfied (see also Remark~(5.9) below). Note that the pole order spectral sequence was studied in \cite{vSt} from a slightly different view point in the (non-graded) analytic local case. \par
For the moment there are no examples such that the singularities of $Z$ are weighted homogeneous and $(H^nA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})_{\rm tor}\ne0$. We have the following. \par
\noindent {\bf Question~2.} Assume all the singularities of $Z$ are weighted homogeneous. Then, is $H^nA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$ torsion-free so that the pole order spectral sequence degenerates at $E_2$ and the equality (0.7) holds with $r=2$? \par
We have a positive answer in certain cases; for instance, if $n=2$ or $1$ is not an eigenvalue of $T_z^d$ for any $z\in{\rm Sing}\,Z$ where $T_z$ is the monodromy of a local defining polynomial $h_z$ of $(Z,z)$, see Corollary~(5.4) below for a more general condition. However, the problem is quite nontrivial in general. In the above second case, Theorem~(5.3) actually implies the injectivity of ${\rm d}^{(1)}:N\to M$ (which is a morphism of degree $-d$), and we get the following. \par
\noindent {\bf Theorem~3.} {\it If $(Z,z)$ is weighted homogeneous and $1$ is not an eigenvalue of $T_z^d$ for any $z\in{\rm Sing}\,Z$, then $H^nA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$ is torsion-free and we have} $${\rm Sp}_P(f)=S(M)(t^{1/d})-S(N)(t^{1/d})\,t^{-1}.$$ \par
Here the second condition is satisfied if $1$ is not an eigenvalue of $T_z$ and moreover the order of $T_z$ is prime to $d$ for any $z\in{\rm Sing}\,Z$. Note that the second assumption can be replaced with $H^{n-2}(f^{-1}(1),{\mathbf C})=0$ if Question~2 is positively solved in this case (see Remark~(5.9) below for a picture in the optimal case). \par
The first author is partially supported by Institut Universitaire de France. The second author is partially supported by Kakenhi 24540039. \par
In Section~1 we prove Theorem~1 after reviewing graded local duality for the convenience of the reader. In Section~2 we explain some methods to calculate the Koszul cohomologies in certain cases. In Section~3 we recall some basics from the theory of spectra, and prove Proposition~(3.3), (3.4), and (3.5). In Section~4 we prove Theorem~2 after reviewing some facts from Gauss-Manin systems and Brieskorn modules. In Section~5 we calculate ${\rm d}^{(1)}$ in certain cases, and prove Theorems~(5.2) and (5.3). \par
\par
\vbox{\centerline{\bf 1. Graded local cohomology and graded duality} \par
\noindent In this section we prove Theorem~1 after reviewing graded local duality for the convenience of the reader.} \par
\noindent {\bf 1.1.~Graded local duality.} Let $R={\mathbf C}[x_1,\dots,x_n]$, and ${\mathbf m}=(x_1,\dots,x_n)\subset R$. Set $$\Omega^k=\Gamma({\mathbf C}^n,\Omega_{{\mathbf C}^n}^k)\quad(k\in{\mathbf Z}). \leqno(1.1.1)$$ Here $\Omega_{{\mathbf C}^n}^k$ is algebraic, and $\Omega^k$ is a finite free graded $R$-module with $\deg x_i=\deg dx_i=1$. \par
For a bounded complex of finitely generated graded $R$-modules $M^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$, define $$\aligned&D_i(M^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}):={\rm Ext}_R^{n-i}(M^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}},\Omega^n)= H^{-i}\bigl({\mathbf D}(M^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})\bigr)\\ &\hbox{with}\quad{\mathbf D}(M^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}):={\mathbf R}{\rm Hom}_R(M^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}},\Omega^n[n]),\endaligned \leqno(1.1.2)$$ where ${\mathbf D}(M^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})$ can be defined by taking a graded free resolution $P^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}\to M^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$. \par
For a finitely generated graded $R$-module $M$, set $$H_{{\mathbf m}}^0M:=\{a\in M\mid {\mathbf m}^ka=0\,\,\,\hbox{for}\,\,\,k\gg 0\}. \leqno(1.1.3)$$ Let $H^i_{{\mathbf m}}M$ be the cohomological right derived functors $(i\in{\mathbf N})$. These are defined by taking a graded injective resolution of $M$. We can calculate them by taking a graded free resolution of $M$ as is explained in textbooks of commutative algebra, see e.g. \cite{BrHe}, \cite{Ei}. Indeed, $H^i_{{\mathbf m}}R=0$ for $i\ne n$, and $$H^n_{{\mathbf m}}R={\mathbf C}[x_1^{-1},\dots,x_n^{-1}]\hbox{$\frac{1}{x_1\dots x_n}$},$$ where the right-hand side is identified with a quotient of the graded localization of $R$ by $x_1\cdots x_n$. We then get the {\it graded local duality} for finitely generated graded $R$-modules $M$: $$D_i(M)_k={\rm Hom}_{{\mathbf C}}((H^i_{{\mathbf m}}M)_{-k},{\mathbf C})\quad(k\in{\mathbf Z},\,i\geqslant 0), \leqno(1.1.4)$$ see loc.~cit. (Indeed, this can be reduced to the case $M=R$ by the above argument.) \par
\noindent {\bf Remarks~1.2.} (i) The functors $H^i_{{\mathbf m}}$ and $D_i$ are compatible with the corresponding functors for non-graded $R$-modules under the forgetful functor, and moreover, the latter functors are compatible with the corresponding sheaf-theoretic functors as is well-known in textbooks of algebraic geometry, see e.g. \cite{Ha}. However, the information of the grading is lost by passing to the corresponding sheaf unless we use a sheaf with ${\mathbf C}^*$-action. \par
(ii) If $M$ is a finitely generated graded $R$-module, then it is well-known that $$D_i(M)=0\quad\hbox{for}\,\,\,i<0. \leqno(1.2.1)$$ \par
\noindent {\bf 1.3.~Spectral sequences.} For a bounded complex of finitely generated graded $R$-modules $M^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$, we have a spectral sequence $$'\!E_2^{p,q}(M^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})=D_{-p}(H^{-q}M^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})\Longrightarrow D_{-p-q}(M^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}). \leqno(1.3.1)$$ This can be defined for instance by taking graded free resolutions of $H^iM^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$ and ${\rm Im}\,d^{\,i}$ for $i\in{\mathbf Z}$, and then extending these to a graded free resolution of $M^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$ by using the short exact sequences $$0\to{\rm Im}\,d^{\,i-1}\to{\rm Ker}\,d^{\,i}\to H^iM^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}\to 0,\quad 0\to{\rm Ker}\,d^{\,i}\to M^i\to{\rm Im}\,d^{\,i}\to 0,$$ as is explained in classical books about spectral sequences. We can also construct (1.3.1) by using the filtration $\tau_{\leqslant -q}$ on $M^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$ as in \cite{De}. \par
Applying (1.3.1) to ${\mathbf D}(M^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})$ and using ${\mathbf D}({\mathbf D}(M^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}))=M^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$, we get $$''\!E_2^{p,q}(M^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})=D_{-p}(D_q(M^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}))\Longrightarrow H^{p+q}M^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}. \leqno(1.3.2)$$ \par
\noindent {\bf Lemma~1.4.} {\it Let ${\mathcal S}\hskip-.5pt h(M)$ denote the coherent sheaf on $X:={\mathbf C}^n$ corresponding to a finitely generated graded $R$-module $M$. Then we have the following equivalence.} $$\aligned H_{{\mathbf m}}^0M=M&\iff{\rm supp}\,{\mathcal S}\hskip-.5pt h(M)\subset\{0\}\\ &\iff\hbox{$M$ is finite dimensional over ${\mathbf C}$},\\ &\iff D_i(M)=0\,\,\,\hbox{for any}\,\,\,i\ne 0.\endaligned \leqno(1.4.1)$$ \par
\noindent {\it Proof.} This is almost trivial except possibly for the last equivalence. It can be shown by restricting to a sufficiently general point of the support of ${\mathcal S}\hskip-.5pt h(M)$ in case the support has positive dimension. Here we use the fact that the dual ${\mathbf D}({\mathcal S}\hskip-.5pt h(M))$ is compatible with the direct image under a closed embedding, and this follows from Grothendieck duality for closed embeddings as is well-known, see e.g. \cite{Ha}. This finishes the proof of Lemma~(1.4). \par
The following is well-known, see \cite{BrHe}, \cite{Ei}, etc. We note here a short proof for the convenience of the reader. \par
\noindent {\bf Proposition~1.5.} {\it Let $M$ be a finitely generated $R$-module. Set $m:=\dim{\rm supp}\,{\mathcal S}\hskip-.5pt h(M)$. Then} $$D_i(M)=0\,\,\,\hbox{for}\,\,\,i>m. \leqno(1.5.1)$$ \par
\noindent {\it Proof.} There is a complete intersection $Z$ of dimension $m$ in $X={\rm Spec}\,R$ such that $M$ is annihilated by the ideal $I_Z$ of $Z$, i.e. $M$ is an $R_Z$-module with $R_Z:=R/I_Z$, and $I_Z$ is generated by a regular sequence $(g_i)_{i\in[1,n-m]}$ of $R$ with $g_iM=0$. (Here $M$ is not assumed graded.) Set $$\omega_Z={\rm Ext}_R^{n-m}(R_Z,\Omega^n).$$ This is called the canonical (or dualizing) module of $Z$. We then get $$D_i(M)={\rm Ext}_{R_Z}^{-i}(M,\omega_Z[m]), \leqno(1.5.2)$$ by Grothendieck duality for the closed embedding $i_Z:Z\hookrightarrow X$, see e.g. \cite{Ha}, etc. In fact, taking an injective resolution $G$ of $\Omega^n[n]$, one can show (1.5.2) by using the canonical isomorphism $${\rm Hom}_{R_Z}(M,{\rm Hom}_R(R_Z,G))={\rm Hom}_R(M,G).$$ Since the right-hand side of (1.5.2) vanishes for $i>m$, the assertion follows. \par
\noindent {\bf Corollary~1.6.} {\it Let $M$ be a finitely generated graded $R$-module with $\dim{\rm supp}\,{\mathcal S}\hskip-.5pt h(M)=1$. Then we have a short exact sequence $$0\to D_0(D_0(M))\to M\to D_1(D_1(M))\to 0, \leqno(1.6.1)$$ together with} $$D_0(D_1(M))=0,\quad D_1(D_0(M))=0. \leqno(1.6.2)$$ \par
\noindent {\it Proof.} By Lemma~(1.5) we get $$''\!E_2^{p,q}(M)=0\quad\hbox{if}\quad(p,q)\notin[-1,0]\times[0,1].$$ So the spectral sequence (1.3.2) degenerates at $E_2$ in this case, and the assertion follows. \par
\noindent {\bf Remark~1.7.} Let $M$ be a graded $R$-module of dimension 1, i.e. $C:={\rm supp}\,{\mathcal S}\hskip-.5pt h(M)$ is one-dimensional. Let $I_M\subset R$ be the annihilator of $M$. Set $\RR:=R/I_M$. Let $y\in R$ be a general element of degree 1 whose restriction to any irreducible component of $C$ is nonzero. Set $R':={\mathbf C}[y]\subset R$. Let $\overline{\mm}$, ${\mathbf m}'$ be the maximal graded ideals of $\RR$, $R'$. Let $H^i_{(R,{\mathbf m})}M$ denote $H^i_{{\mathbf m}}M$, and similarly for $H^i_{(\RR,\overline{\mm})}M$, etc. (to avoid any confusion). There are canonical morphisms $$(R,{\mathbf m})\to(\RR,\overline{\mm})\leftarrow(R',{\mathbf m}'),$$ and they imply canonical morphisms $$H^i_{(R,{\mathbf m})}M\leftarrow H^i_{(\RR,\overline{\mm})}M\to H^i_{(R',{\mathbf m}')}M. \leqno(1.7.1)$$ Indeed, any graded injective resolution of $M$ over $\RR$ can be viewed as a quasi-isomorphism over $R$ or $R'$, and we can further take its graded injective resolution over $R$ or $R'$, which induces the above morphisms. \par
These morphisms are isomorphisms since they are isomorphisms by forgetting the grading as is well-known. (Note that the morphisms ${\rm Spec}\,R\leftarrow{\rm Spec}\,\RR\to {\rm Spec}\,R'$ are proper. Here it is also possible to use the graded local duality together with Grothendieck duality.) Using the long exact sequence associated with the local cohomology and the localization, we can show $$H^1_{(R',{\mathbf m}')}M=M[y^{-1}]/M. \leqno(1.7.2)$$ So we get the following canonical isomorphism (as graded $R'$-modules): $$H^1_{{\mathbf m}}M=M[y^{-1}]/M. \leqno(1.7.3)$$ This also follows from an exact sequence in \cite[Prop.~2.1]{Sl} (see also \cite[Prop.~2.1.5]{Gro} and \cite{SaSl}, etc.) \par
\noindent {\bf 1.8.~Proof of Theorem~1.} As is explained in the introduction, we have the self-duality $${\mathbf D}({}^s\!K^{\ssb}_f)={}^s\!K^{\ssb}_f(nd),$$ which implies the isomorphisms of graded $R$-modules $$D_i({}^s\!K^{\ssb}_f)=H^{-i}({}^s\!K^{\ssb}_f)(nd). \leqno(1.8.1)$$ Consider the spectral sequence (1.3.1) for $M^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}={}^s\!K^{\ssb}_f$. By Lemma~(1.5) applied to $M$, $N$, this degenerates at $E_2$. Combining this with (1.8.1), we thus get $$D_1(M)=N(nd),\quad D_0(N)=0, \leqno(1.8.2)$$ together with a short exact sequence $$0\to D_0(M)\to M(nd)\to D_1(N)\to 0. \leqno(1.8.3)$$ By (1.6.2) in Corollary~(1.6) and Lemma~(1.5) applied to $M$, $N$, the proof of Theorem~1 is then reduced to showing that (1.8.3) is naturally identified, up to the shift of grading by $nd$, with $$0\to M'\to M\to M''\to 0. \leqno(1.8.4)$$ For this, it is enough to show $$H^0_{{\mathbf m}}D_0(M)=D_0(M),\quad H^0_{{\mathbf m}}D_1(N)=0. \leqno(1.8.5)$$ However, the first equality is equivalent to the vanishing of $D_i(D_0(M))$ for $i\ne 0$ by Lemma~(1.4), and follows from (1.6.2) in Corollary~(1.6) together with Lemma~(1.5) applied to $D_0(M)$. The second equality follows for instance from the local duality (1.1.4) for $i=0$ together with (1.6.2) in Corollary~(1.6) applied to $N$. Thus (1.8.5) is proved. This finishes the proof of Theorem~1. \par
\noindent {\bf Remarks~1.9.} (i) Corollary~2 can also be deduced from \cite[Thm.~3.1]{Di2}. Indeed, by the argument in Section~2 in loc.~cit., we can deduce $${\rm def}_{k-n}\Sigma_f=\tau_Z -\mu_k'', \leqno(1.9.1)$$ where ${\rm def}_{k-n}\Sigma_f$ is as in loc.~cit. Moreover, Thm.~3.1 in loc.~cit.\ gives $${\rm def}_{k-n}\Sigma_f=\mu_{nd-k}-\gamma_{nd-k}=\nu_{nd-k}. \leqno(1.9.2)$$ So Corollary~2 follows. \par
(ii) It is well-known that $$\dim_{{\mathbf C}}M''_k=\dim_{{\mathbf C}}M_k=\tau_Z\quad\hbox{if}\,\,\,k\gg 0. \leqno(1.9.3)$$ Indeed, the first equality of (1.9.3) is trivial, and it is enough to show the last equality. Changing the coordinates, we may assume $x_n=y$, where $y$ is as in the introduction. On $\{x_n\ne 0\}\subset{\mathbf C}^n$, we have the the coordinates $x'_1,\dots,x'_n$ defined by $x'_j=x_j/x_n$ for $j\ne n$, and $x'_n=x_n$. Using these, we have $f(x)=x_n^dh(x')$, where $x'=(x'_1,\dots,x'_{n-1})$. This implies that the restriction of ${\mathcal S}\hskip-.5pt h(M)$ to the generic point of an irreducible component of the support of $M$ corresponding to $z\in Z$ has rank $\tau_z$ in the notation of the introduction. So (1.9.3) follows. \par
(iii) Assume $\dim{\rm Sing}\,f^{-1}(0)=1$, i.e. $\Sigma={\rm Sing}\,Z\ne\emptyset$. Let $({\partial} f)\subset R$ denote the Jacobian ideal of $f$ (generated by the partial derivatives ${\partial} f/{\partial} x_i$ of $f$). Then the Jacobian ring $R/({\partial} f)$ (which is isomorphic to $M$ as a graded $R$-module up to a shift of grading) is a Cohen-Macaulay ring if and only if $M'=0$. Indeed, these are both equivalent to the condition that $M$ is a Cohen-Macaulay $R$-module (since $\tau_Z\ne 0$ and hence $M''\ne 0$). Here Grothendieck duality for closed embeddings is used to show the equivalence with the condition that $R/({\partial} f)$ is a Cohen-Macaulay ring. Note that $M'$ might vanish in general, for instance if $f$ is as in Example~(2.7) below or even in case $f=xyz$. \par
(iv) Assume $\bigcap_{i=1}^mg_i^{-1}(0) \subset{\mathbf C}^n$ has codimension $\geqslant r$, where $g_i\in R\,\,\,(i\in[1,m])$. Then there is a regular sequence $(h_j)_{j\in[1,r]}$ of $R$ with $h_j\in V:=\sum_{i=1}^m{\mathbf C}\,g_i$ by increasing induction on $r$ or $m$. This implies the vanishing of the cohomology of the Koszul complex: $$H^kK^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}(R;g_1,\dots,g_m)=0\quad(k<r),$$ by using the $n$-ple complex structure of the Koszul complex as is well-known (see Remark~(v) below). In fact, we can replace the basis $(g_i)$ of the vector space $V$ so that a different expression of the Koszul complex can be obtained. (However, it is not always possible to choose $h_j$ so that $\sum_{i=1}^mRg_i=\sum_{j=1}^rRh_j$ even if $\bigcap_{i=1}^mg_i^{-1}(0)$ has pure codimension $r$ unless $(g_i)$ is already a regular sequence, i.e. $r=m$.) \par
(v) For $g_i\in R\,\,(i\in[1,m])$, the Koszul complex $K^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}(R;g_1,\dots,g_m)$ can be identified with the associated single complex of the $m$-ple complex whose $(j_1,\dots,j_m)$-component is $R$ for $(j_1,\dots,j_m)\in[0,1]^m$, and $0$ otherwise, where its $i$-th differential ${\rm d}_i$ is defined by the multiplication by $g_i$ on $R$. \par
(vi) Theorem~1 holds with ${\rm d}f$ in the definition of the Koszul complex replaced by a 1-form $\omega=\sum_{i=1}^n g_idx_i$ if the $g_i$ are homogeneous polynomials of degree $d-1$ such that $\bigcap_ig_i^{-1}(0)\subset{\mathbf C}^n$ is at most 1-dimensional. See \cite{Pe}, \cite{vStWa} for the (non-graded) analytic local case. \par
\par
\vbox{\centerline{\bf 2. Calculation of the Koszul cohomologies} \par
\noindent In this section we explain some methods to calculate the Koszul cohomologies in certain cases.} \par
\noindent {\bf Lemma~2.1.} {\it Let $r$ be the dimension of the vector subspace of ${\mathbf C}^n$ generated by the one-dimensional vector subspaces corresponding to the singular points of $Z$. Then} $$\mu''_n=1,\quad\mu''_{n+1}\geqslant r.$$ \par
\noindent {\it Proof.} Let $\Sigma'$ be a subset of $\Sigma\,(={\rm Sing}\,Z)$ corresponding to linearly independent $r$ vectors of ${\mathbf C}^n$. Let $I_{\Sigma'}$ be the (reduced) graded ideal of $R$ corresponding to $\Sigma'$. There is a canonical surjection $$M\to\M:=\Omega^n/I_{\Sigma'}\,\Omega^n. \leqno(2.1.1)$$ The target is a free graded ${\mathbf C}[y]$-module of rank $r$, where $y$ is as in the introduction, and it has free homogeneous generators $w_i\,(i\in[1,r])$ with $\deg w_1=n$ and $\deg w_i=n+1$ for $i>1$. So the surjection (2.1.1) factors through $M''$, and the assertion follows. \par
\noindent {\bf Proposition~2.2.} {\it Let $f=f_1+f_2$ with $f_1\in{\mathbf C}[x_1,\dots,x_{n_1}]$, $f_2\in{\mathbf C}[x_{n_1+1},\dots,x_n]$ where $1<n_1<n-1$. Assume the dimensions of the singular loci of $f_1^{-1}(0)\subset{\mathbf C}^{n_1}$ and $f_2^{-1}(0)\subset{\mathbf C}^{n-n_1}$ are respectively $1$ and $0$. Then there are isomorphisms of graded $R$-modules $$M'=M'_{(1)}\h{$\otimes$}_{{\mathbf C}}M'_{(2)},\quad M''=M''_{(1)}\h{$\otimes$}_{{\mathbf C}}M'_{(2)},\quad N=N_{(1)}\h{$\otimes$}_{{\mathbf C}}M'_{(2)},$$ and, setting $S(\mu):=\h{$\sum$}_k\,\mu_k\,t^k\in{\mathbf Z}[[t]]$, etc., we have the equalities $$S(\mu')=S(\mu'_{(1)})\,S(\mu'_{(2)}),\quad S(\mu'')=S(\mu''_{(1)})\,S(\mu'_{(2)}),\quad S(\nu)=S(\nu_{(1)})\,S(\mu'_{(2)}),$$ where $M'_{(i)}$, $M''_{(i)}$, $N_{(i)}$, and $\mu'_{(i),k}$, $\mu''_{(i),k}$, $\nu_{(i),k}\,\,(k\in{\mathbf Z})$ are defined for $f_i\,\,\,(i=1,2)$.} \par
\noindent {\it Proof.} Using the $n$-ple complex structure of the Koszul complex as in Remark~(1.9)(v), we get the canonical isomorphism $${}^s\!K^{\ssb}_f={}^s\!K^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}_{f_1}\otimes_{{\mathbf C}}{}^s\!K^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}_{f_2},$$ where ${}^s\!K^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}_{f_1}$ is defined by using the subring ${\mathbf C}[x_1,\dots,x_{n_1}]$, and similarly for ${}^s\!K^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}_{f_2}$. Since $f_2^{-1}(0)$ has an isolated singularity, $K^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}_{f_2}$ is naturally quasi-isomorphic to $M'_{(2)}$. We get hence $$M=M_{(1)}\h{$\otimes$}_{{\mathbf C}}M'_{(2)},\quad N=N_{(1)}\h{$\otimes$}_{{\mathbf C}}M'_{(2)}.$$ Moreover, the freeness of $M''_{(1)}\h{$\otimes$}_{{\mathbf C}}M'_{(2)}$ over ${\mathbf C}[y]$ can be shown by using an appropriate filtration of $M'_{(2)}$, where $y$ is as in the introduction. These imply that the following two short exact sequences are identified with each other: $$\aligned 0\to M'\to&M\to M''\to 0,\\ 0\to M'_{(1)}\h{$\otimes$}_{{\mathbf C}}M'_{(2)}\to M_{(1)}&\h{$\otimes$}_{{\mathbf C}}M'_{(2)}\to M''_{(1)}\h{$\otimes$}_{{\mathbf C}}M'_{(2)}\to 0.\endaligned$$ So the assertion follows. \par
For the proof of Proposition~(2.6) below, we need the following lemma. Essentially this may be viewed as a special case of Prop.~13 in \cite{ChDi}, see Remark~(2.5) below. We note here a short proof of the lemma using Corollaries~1 and 2 and (0.3) for the convenience of the reader. \par
\noindent {\bf Lemma~2.3.} {\it Assume $n=2$. Let $r$ be the number of the irreducible components of $f^{-1}(0)\subset{\mathbf C}^2$. Then $\tau_Z=d-r$, and we have for $k\in{\mathbf Z}$
$$\aligned\mu'_k&=\max(r-1-|d-k|,0),\\ \mu''_k&=(k-1)_{[0,\tau_Z]},\\ \nu_k&=(k-d-r+1)_{[0,\tau_Z]},\endaligned \leqno(2.3.1)$$ where $x_{[\alpha,\beta]}$ for $x,\alpha,\beta\in{\mathbf Z}$ with $\alpha<\beta$ is defined by} $$x_{[\alpha,\beta]}=\begin{cases}\alpha&\hbox{if}\,\,\,x\leqslant\alpha,\\ x&\hbox{if}\,\,\,\alpha\leqslant x\leqslant\beta,\\ \beta&\hbox{if}\,\,\,\beta\leqslant x.\end{cases}$$ \par
\noindent {\it Proof.} We have the decomposition $$f=\h{$\prod$}_{i=1}^r\,g_i^{m_i},$$ with $\deg g_i=1$ and $m_i\geqslant 1$. For $z\in{\mathbf P}^1$ corresponding to $g_i^{-1}(0)\subset{\mathbf C}^2$, we have $$\tau_z=m_i-1,\quad\hbox{and hence}\quad\tau_Z=d-r.$$ Setting $$f':=\h{$\prod$}_{i=1}^r\,g_i^{m_i-1},$$ we get $$M''=\Omega^2/f'\,\Omega^2.$$ Indeed, the right-hand side is a quotient graded $R$-module of $M$, and is a free graded ${\mathbf C}[y]$-module of rank $\tau_Z$. Since $\deg f'=\tau_Z$, this implies $$\mu''_k=(k-1)_{[0,\tau_Z]}.$$ Using Corollary~2, we then get $$\nu_k=d-r-(2d-k-1)_{[0,\tau_Z]}=(k-d-r+1)_{[0,\tau_Z]}.$$ Here note that $$\nu_k=0\quad\hbox{if}\,\,\,k\leqslant d.$$ For $n=2$ and $k\leqslant d$, we have $$\gamma_k=\max(k-1,0).$$ By (0.3) we then get for $k\leqslant d$ $$\mu'_k=\gamma_k-\mu''_k=\max(k-1-\tau_Z,0).$$ The formula for $k\geqslant d$ follows by using the symmetry in Corollary~1. This finishes the proof of Lemma~(2.3). \par
By an easy calculation we see that Lemma~(2.3) is equivalent to the following. \par
\noindent {\bf Corollary~2.4.} {\it With the notation and the assumption of Lemma~$(2.3)$, we have $$\aligned S(\mu')&=S(1,r-1)\,S(d-r+1,d-1),\\ S(\mu'')&=S(1,\infty)\,S(1,d-r),\\ S(\nu)&=S(d+r-2,\infty)\,S(1,d-r),\endaligned \leqno(2.4.1)$$ where $S(\mu')$ is as in Proposition~$(2.2)$, and $S(a,b)$ for $a\in{\mathbf N}$, $b\in{\mathbf N}\cup\{\infty\}$ is defined by} $$S(a,b):=\h{$\sum$}_{k=a}^b\,t^k\in{\mathbf Z}[[t]]\,\,\,\hbox{if}\,\,\,a\leqslant b, \,\,\,\hbox{and}\,\,\,0\,\,\,\hbox{otherwise}. \leqno(2.4.2)$$ \par
\noindent {\bf Remark~2.5.} With the notation and the assumption of Corollary~(2.4), the following is shown in \cite[Example~14~(i)]{Di2} as a corollary of Prop.~13 (loc.~cit.) $$S(\mu)=t^2(1-2t^{d-1}+t^{d+r-2})/(1-t)^2. \leqno(2.5.1)$$ By Corollaries~2 and 3 together with (0.3), this is essentially equivalent to the equalities in (2.4.1). In fact, it seems rather easy to deduce (2.5.1) from (2.4.1). For the converse some calculation seems to be needed. (The details are left to the reader.) \par
In case $n_1=2$, we can calculate $\mu'_{(1),k}$, $\mu''_{(1),k}$, $\nu_{(1),k}$ for $f_1$ by Lemma~(2.3), and get the following. \par
\noindent {\bf Proposition~2.6.} {\it Assume $f=f_1+f_2$ as in Proposition~$(2.2)$ with $n_1=2$. Let $r$ be the number of the irreducible components of $f_1^{-1}(0)\subset{\mathbf C}^2$. Then, under the assumption of Proposition~$(2.2)$, we have $$\aligned S(\mu')&=S(1,r-1)\,S(d-r+1,d-1)\,S(1,d-1)^{n-2},\\ S(\mu'')&=S(1,\infty)\,S(1,d-r)\,S(1,d-1)^{n-2},\\ S(\nu)&=S(d+r-2,\infty)\,S(1,d-r)\,S(1,d-1)^{n-2},\endaligned$$ where $S(a,b)$ is as in $(2.4.2)$.} \par
\noindent {\it Proof.} The assertion follows from Corollary~(2.4) and Proposition~(2.2), since $S(\mu')$ in the isolated singularity case is invariant by $\mu$-constant deformation, and is given by (0.2). \par
\noindent {\bf Example~2.7.} Let $f$ be as in Theorem~1, and assume further $$f\in{\mathbf C}[x_1,\dots,x_{n-1}]\subset{\mathbf C}[x_1,\dots,x_n].$$ Then $f$ has an isolated singularity at the origin of ${\mathbf C}^{n-1}$. Set $$\gamma'_j:=\dim_{{\mathbf C}}\bigl(\Omega^{\prime\,n-1}/{\rm d}f\wedge\Omega^{\prime\,n-2} \bigr){}_j\quad\hbox{with}\quad\Omega^{\prime\,k}:=\Gamma({\mathbf C}^{n-1},\Omega_{{\mathbf C}^{n-1}}^k).$$ We have the symmetry $$\gamma'_j=\gamma'_{(n-1)d-j}. \leqno(2.7.1)$$ In this case, we have $M'=0$, and $$\mu_k=\mu''_k=\h{$\sum$}_{j\leqslant k-1}\,\gamma'_j,\quad \nu_k=\h{$\sum$}_{j\leqslant k-d}\,\gamma'_j=\h{$\sum$}_{j\geqslant nd-k}\,\gamma'_j, \leqno(2.7.2)$$ where the last equality follows from the symmetry (2.7.1), and Corollary~2 is verified directly in this case. \par
Equivalently, $\mu''_k=\mu_k$ and $\nu_k$ are given as follows: $$\aligned S(\mu)&=S(1,\infty)\,S(1,d-1)^{n-1},\\ S(\nu)&=S(d,\infty)\,S(1,d-1)^{n-1},\endaligned \leqno(2.7.3)$$ where $S(\mu)$, etc.\ are as in Proposition~(2.2), and the order of the coordinates are changed. \par
\noindent {\bf Example~2.8.} Assume $n,d\geqslant 3$. Let $$f=x_1^ax_2^{d-a}+\h{$\sum$}_{i=3}^n\,x_i^d\quad\hbox{with}\,\,\,0<a<d. \leqno(2.8.1)$$ We can apply Proposition~(2.6) to this example. More precisely, the calculation of $\mu'_k$, $\mu''_k$ and $\nu_k$ are reduced to the case $n=2$ by Proposition~(2.2), where $n_1=2$ and $$f_1=x_1^ax_2^{d-a},\quad f_2=\h{$\sum$}_{i=3}^n\,x_i^d.$$ The calculation for $f_1$ follows from Lemma~(2.3) or Corollary~(2.4) where $r=2$. For instance, we get in the notation of Proposition~(2.2) $$\mu'_{(1),k}=\begin{cases}1&\hbox{if}\,\,\,k=d,\\ 0&\hbox{if}\,\,\,k\ne d,\end{cases}$$ and hence $$\mu'_k=\mu'_{(2),k+d}=\gamma''_{k+d}\quad(k\in{\mathbf Z}),$$ where $\gamma''_k$ is as in (0.2) with $n$ replaced by $n-2$. By Proposition~(2.6), we have $$\aligned S(\mu')&=t^d\,S(1,d-1)^{n-2},\\ S(\mu'')&=S(1,\infty)\,S(1,d-2)\,S(1,d-1)^{n-2},\\ S(\nu)&=S(d,\infty)\,S(1,d-2)\,S(1,d-1)^{n-2},\endaligned \leqno(2.8.2)$$ where $S(\mu')$, etc.\ are as in Proposition~(2.2). \par
\noindent {\bf Remark~2.9.} If there is a nontrivial relation of degree $k\leqslant d-2$ among the partial derivatives $f_i:={\partial} f/{\partial} x_i$, i.e. if there are homogeneous polynomials $g_i$ of degree $k\leqslant d-2$ with $\h{$\sum$}_i\,g_if_i=0$ and $g_i\ne0$ for some $i$, then we have $$\nu_{d+n+k-1}\ne 0, \leqno(2.9.1)$$ and hence $$\hbox{Condition~(0.5) does not hold if $(n-2)(d-2)\geqslant 2(k+1)$.} \leqno(2.9.2)$$ Indeed, (2.9.1) follows from the definition $N:=H^{-1}({}^s\!K^{\ssb}_f)$ since $\deg f_i=d-1$. \par
This applies to $f$ in Example~(2.7) with $k=0$ since $f_n=0$, and to $f$ in Example~(2.8) with $k=1$ since $$(d-a)x_1\,f_1=ax_2\,f_2.$$ \par
\noindent {\bf Remark~2.10.} Conditions~(0.5) hold in the nodal hypersurface case by \cite[Thm.~2.1]{DiSt2}. Indeed, it is shown there that $$\hbox{$\nu_k=0\,$ if $\,k\leqslant(n_1+1)d\,$ with $\,n\,$ even or $\,k\leqslant(n_1+1)d-1\,$ with $\,n\,$ odd,} \leqno(2.10.1)$$ where $n_1:=[(n-1)/2]$. (There is a difference in the grading on $N$ by $d$ between this paper and loc.~cit., and $n$ in this paper is $n+1$ in loc.~cit.) \par
\par
\vbox{\centerline{\bf 3. Spectrum} \par
\noindent In this section we recall some basics from the theory of spectra, and prove Proposition~(3.3), (3.4), and (3.5).} \par
\noindent {\bf 3.1.~Hodge and pole order filtrations.} Let $f$ be a homogeneous polynomial of $n$ variables with degree $d$. It is well-known that there is a ${\mathbf C}$-local system $L_k$ ($k\in[1,d]$) of rank 1 on $U:=Y\setminus Z$ such that $$H^j(U,L_k)=H^j(f^{-1}(1),{\mathbf C})_{\lambda}\quad\bigl(\lambda=\exp(-2\pi ik/d), \,k\in[1,d]\bigr), \leqno(3.1.1)$$ where $H^j(f^{-1}(1),{\mathbf C})_{\lambda}$ is the $\lambda$-eigenspace of the cohomology for the semisimple part of the monodromy, see e.g.\ \cite{Di1}, etc. (Note that monodromy in our paper means the one as a local system, see also \cite[Section 1.3]{BuSa}, etc.) Let ${\mathcal L}_k$ be the meromorphic extension of $L_k\h{$\otimes$}_{{\mathbf C}}{\mathcal O}_U$. This is a regular holonomic ${\mathcal D}_Y$-module, and $$H^j\bigl(Y,\Omega_Y^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}({\mathcal L}_k)\bigr)=H^j(f^{-1}(1),{\mathbf C})_{\lambda}\quad\bigl(\lambda=\exp(-2\pi ik/d), \,k\in[1,d]\bigr), \leqno(3.1.2)$$ where $\Omega_Y^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}({\mathcal L}_k)$ denotes the de Rham complex of ${\mathcal L}_k$. We have the Hodge and pole order filtrations $F_{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$ and $P_{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$ on ${\mathcal L}_k$ such that $$F_i\subset P_i, \leqno(3.1.3)$$ where the equality holds outside the singular points of $Z$, and $$P_i{\mathcal L}_k=\begin{cases}{\mathcal O}_Y(id+k)&\hbox{if}\,\,\,i\geqslant 0,\\ \,0&\hbox{if}\,\,\,i<0,\end{cases}$$ see e.g.\ \cite[Section 4.8]{Sa4}. (Note that $F$ comes from the Hodge filtration of a mixed Hodge module.) Set $F^i=F_{-i}$, $P^i=P_{-i}$. They induces the Hodge and pole order filtrations on $\Omega_Y^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}({\mathcal L}_k)$ such that the $j$-th components of $F^i\,\Omega_Y^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}({\mathcal L}_k)$, $P^i\,\Omega_Y^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}({\mathcal L}_k)$ are respectively given by $$\Omega_Y^j\h{$\otimes$}_{{\mathcal O}_Y}F^{i-j}{\mathcal L}_k,\quad \Omega_Y^j\h{$\otimes$}_{{\mathcal O}_Y}P^{i-j}{\mathcal L}_k.$$ By the isomorphism (3.1.2) they further induce the Hodge and pole order filtrations on the Milnor cohomology $H^j(f^{-1}(1),{\mathbf C})$. Here $F$ coincides with the Hodge filtration of the canonical mixed Hodge structure. By using the Bott vanishing theorem, $H^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}\bigl(Y,P^i\,\Omega_Y^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}({\mathcal L}_k)\bigr)$ can be calculated by the complex whose $j$-th component is $$\Gamma(Y,\Omega_Y^j\h{$\otimes$}_{{\mathcal O}_Y}P^{i-j}{\mathcal L}_k)= \begin{cases}\Gamma\bigl(Y,\Omega_Y^j((j-i)d+k)\bigr)&\hbox{if}\,\,\,j\geqslant i,\\ 0&\hbox{if}\,\,\,j<i.\end{cases}$$ But it does not give a strict filtration, and it is not necessarily easy to calculate it. \par
Note that the pole order filtration coincides with the one defined by using the Gauss-Manin system, see (4.4.7) and (4.5.7) below. \par
\noindent {\bf 3.2.~Spectrum.} For $f$ as in (3.1), the spectrum ${\rm Sp}(f)=\h{$\sum$}_{\alpha\in{\mathbf Q}}\,n_{f,\alpha}\,t^{\alpha}$ is defined by $$\aligned n_{f,\alpha}:=\h{$\sum$}_j\,(-1)^{j-n+1}\dim{\rm Gr}^p_F \widetilde{H}^j(f^{-1}(1),{\mathbf C})_{\lambda}\\ \hbox{with}\quad p=\lfloor n-\alpha\rfloor,\,\,\lambda=\exp(-2\pi i\alpha),\endaligned \leqno(3.2.1)$$ (see \cite{St2}, \cite{St3}). Here $\widetilde{H}^j(f^{-1}(1),{\mathbf C})$ is the reduced cohomology, and we have by definition $$\lfloor\alpha\rfloor:=\max\{\,i\in{\mathbf Z}\mid i\leqslant\alpha\,\},\quad \lceil\alpha\rceil:=\min\{\,i\in{\mathbf Z}\mid i\geqslant\alpha\,\}\quad(\alpha\in{\mathbf R}). \leqno(3.2.2)$$ The {\it pole order spectrum} ${\rm Sp}_P(f)$ is defined by replacing $F$ with $P$. \par
For $j\in{\mathbf N}$, we define ${\rm Sp}^j(f)=\h{$\sum$}_{\alpha\in{\mathbf Q}}\,n^j_{f,\alpha}\,t^{\alpha}$ by $$\aligned n^j_{f,\alpha}:=\dim{\rm Gr}^p_F\widetilde{H}^{n-1-j}(f^{-1}(1),{\mathbf C})_{\lambda}\\ \hbox{with}\quad p=\lfloor n-\alpha\rfloor,\,\,\lambda=\exp(-2\pi i\alpha),\endaligned \leqno(3.2.3)$$ so that $${\rm Sp}(f)=\h{$\sum$}_j\,(-1)^j\,{\rm Sp}^j(f).$$ Similarly ${\rm Sp}^j_P(f)=\h{$\sum$}_{\alpha\in{\mathbf Q}}\,n^{P,j}_{f,\alpha}\,t^{\alpha}$ is defined by replacing $F$ with $P$. \par
Set $Z:=\{f=0\}\subset Y:={\mathbf P}^{n-1}$. Let $\pi:(\widetilde{Y},\widetilde{Z})\to(Y,Z)$ be an embedding resolution, and $E_i$ be the irreducible components of $\widetilde{Z}$ with $m_i$ the multiplicity of $\widetilde{Z}$ at the generic point of $E_i$. Let $\alpha=k/d+q\in(0,n)$ with $k\in[1,d]$, $q\in[0,n-1]$. We have by \cite[1.4.3]{BuSa} $$n^j_{f,\alpha}=\dim H^{q-j}\bigl(\widetilde{Y},\Omega_{\widetilde{Y}}^{n-1-q}(\log\widetilde{Z})\h{$\otimes$} _{{\mathcal O}}\,{\mathcal O}_{\widetilde{Y}}(-\ell\,\widetilde{H}+\h{$\sum$}_i\,\lfloor\ell\,m_i/d\rfloor)E_i\bigr), \leqno(3.2.4)$$ where $\ell:=d-k$, and $\widetilde{H}$ is the pull-back of a sufficiently general hyperplane $H$ of $Y$. \par
In a special case we get the following. \par
\noindent {\bf Proposition~3.3.} {\it Assume $n=2$. Set $e:={\rm GCD}(m_i)$ with $m_i$ the multiplicities of the irreducible factors of $f$. Then, for $\alpha=k/d+q\in(0,2)$ with $k\in[1,d]$, $q=0,1$, we have $$n^j_{f,\alpha}=\begin{cases}r-1+k-\h{$\sum$}_i\,\lceil km_i/d\rceil&\hbox{if}\,\,\,j=0,\,q=0,\\ \max\bigl(-k-1+\h{$\sum$}_i\,\lceil km_i/d\rceil,\,0\,\bigr)&\hbox{if}\,\,\,j=0,\,q=1,\\ 1&\hbox{if}\,\,\,j=1,\,q=1,\,ke/d\in{\mathbf Z},\\ 0&\hbox{otherwise},\end{cases} \leqno(3.3.1)$$ where $\lceil\alpha\rceil$ is as in $(3.2.2)$.} \par
\noindent {\it Proof.} We have $\Omega_{{\mathbf P}^1}^1(\log Z)={\mathcal O}_{{\mathbf P}^1}(r-2)$ with $\widetilde{Y}=Y={\mathbf P}^1$, $\widetilde{Z}=Z$, $\widetilde{H}=H$, Hence (3.2.4) in this case becomes $$n^j_{f,\alpha}=\begin{cases}\dim H^0\bigl({\mathbf P}^1,\Omega_{{\mathbf P}^1}^1(\log Z) (-\ell+\h{$\sum$}_i\,\lfloor\ell\,m_i/d\rfloor)\bigr)&\hbox{if}\,\,\,j=0,\,q=0,\\ \dim H^1\bigl({\mathbf P}^1,{\mathcal O}_{{\mathbf P}^1}(-\ell+\h{$\sum$}_i\,\lfloor\ell\,m_i/d\rfloor)\bigr)&\hbox{if}\,\,\,j=0,\,q=1,\\ \dim H^0\bigl({\mathbf P}^1,{\mathcal O}_{{\mathbf P}^1}(-\ell+\h{$\sum$}_i\,\lfloor\ell\,m_i/d\rfloor)\bigr)&\hbox{if}\,\,\,j=1,\,q=1,\\ 0&\hbox{otherwise},\end{cases}$$ and then $$n^j_{f,\alpha}=\begin{cases}r-1-\ell+\h{$\sum$}_i\,\lfloor\ell\,m_i/d\rfloor&\hbox{if}\,\,\,j=0,\,q=0,\\ \max\bigl(\ell-1-\h{$\sum$}_i\,\lfloor\ell\,m_i/d\rfloor,\,0\,\bigr)&\hbox{if}\,\,\,j=0,\,q=1,\\ \max\bigl(-\ell+1+\h{$\sum$}_i\,\lfloor\ell\,m_i/d\rfloor,\,0\,\bigr)&\hbox{if}\,\,\,j=1,\,q=1,\\ 0&\hbox{otherwise}.\end{cases} \leqno(3.3.2)$$ Since $\h{$\sum$}_i\,m_i=d$ and $e={\rm GCD}(m_i)$, we have $$\ell>\h{$\sum$}_i\,\lfloor\ell m_i/d\rfloor\iff\ell m_i/d\notin{\mathbf Z}\,\,(\exists\,i)\iff\ell e/d\notin{\mathbf Z}.$$ So (3.3.1) follows (since $\ell=d-k$). This finishes the proof of Proposition~(3.3). \par
We note here an application of Theorem~2, Theorem~(5.3) and Corollary~(5.4) below. (This will not be used in their proofs.) \par
\noindent {\bf Proposition~3.4.} {\it Assume $n=2$. Then ${\rm Sp}_P(f)={\rm Sp}_P^0(f)-{\rm Sp}_P^1(f)$ is given by $${\rm Sp}^j_P(f)=\begin{cases}\h{$\sum$}_k\,(\mu_k-\nu_{k+d})\,t^{k/d}+\bigl(\,t^{1/e}+\cdots+t^{(e-1)/e}\,\bigr)&\hbox{if}\,\,\,j=0,\\ t\,(\,t^{1/e}+\cdots+t^{(e-1)/e}\,)&\hbox{if}\,\,\,j=1,\end{cases} \leqno(3.4.1)$$ with $\mu_k$, $\nu_k$ explicitly expressed in Lemma~$(2.3)$, and $e={\rm GCD}(m_i)$ as in Proposition~$(3.3)$.} \par
\noindent {\it Proof.} The pole order spectral sequence degenerates at $E_2$ by Corollary~(5.4) below. So the assertion is shown in the case $e=1$, since the last condition implies that $\nu_k^{(2)}=0$. In the general case it is well-known that $$\widetilde{H}^0(f^{-1}(1),{\mathbf C})_{\lambda}=\begin{cases}{\mathbf C}&\hbox{if}\,\,\,\lambda^e=1\,\,\,\hbox{with}\,\,\,\lambda\ne 1,\\ \,0&\hbox{otherwise}\end{cases}. \leqno(3.4.2)$$ By using Theorem~(5.3) and Lemma (2.3), this implies $$N_{k+d}^{(2)}=\begin{cases}{\mathbf C}&\hbox{if}\,\,\,k=i\,(d/e)\,\,\hbox{with}\,\,\,i\in\{1,\dots,e-1\},\\ \,0&\hbox{otherwise},\end{cases}\leqno(3.4.2)$$ where $N^{(2)}\subset N$ is the kernel of ${\rm d}^{(1)}$. This gives also the information of the coimage of ${\rm d}^{(1)}$ which is a morphism of degree $-d$. So the correction terms for ${\rm Sp}_P^0(f)$ and ${\rm Sp}_P^1(f)$ coming form the non-vanishing of ${\rm d}^{(1)}$ are given respectively by $$t^{1/e}+\cdots+t^{(e-1)/e}\quad\hbox{and}\quad t\,(\,t^{1/e}+\cdots+t^{(e-1)/e}\,).$$ So (3.4.1) follows. This finishes the proof of Proposition~(3.4). \par
\noindent {\bf Proposition~3.5.} {\it Assume $f=f_1+f_2$ as in Proposition~$(2.2)$ with $n_1=2$. Then, under the assumption of Proposition~$(2.2)$, we have $$\mu'_k\leqslant n^0_{f,k/d},\quad\mu'_k\leqslant n^{P,0}_{f,k/d}\quad(k\in{\mathbf Z}), \leqno(3.5.1)$$ where $n^0_{f,k/d}$, $n^{P,0}_{f,k/d}$ are as in $(3.2)$.} \par
\noindent {\it Proof.} The Thom-Sebastiani type theorem holds for ${\rm Sp}^0(f)$, ${\rm Sp}^0_P(f)$ under the assumption of Proposition~(2.2), see (4.9) below. So the assertion is reduced to the case $f=f_1$ with $n=2$. The assertion for ${\rm Sp}^0_P(f)$ then follows from Proposition~(3.4) and Lemma~(2.3), where we may assume $r\geqslant 2$ since $\mu'_k=0$ otherwise. By using Lemma~(2.3) and Proposition~(3.3) (more precisely, (3.3.2) for $q=0$ and (3.3.1) for $q=1$), the assertion for ${\rm Sp}^0(f)$ is reduced to the following trivial inequalities $$\aligned r-1-(d-k)\leqslant r-1-\ell+\h{$\sum$}_{i=1}^r\,\lfloor\ell\,m_i/d\rfloor&\quad(\ell\in[0,d-1],\,q=0),\\ r-1+d-(k+d)\leqslant -k-1+\h{$\sum$}_{i=1}^r\,\lceil km_i/d\rceil&\quad(k\in[1,d-1],\,q=1),\endaligned$$ where $\ell=d-k$. (Note that $k$ in Lemma~(2.3) is $k+d$ in the case $q=1$.) This finishes the proof of Proposition~(3.5). \par
\noindent {\bf Remarks~3.6.} (i) If $f$ has an isolated singularity, the equality holds in (3.5.1), and $S(\mu')$ (with $t$ replaced by $t^{1/d}$) coincides with the spectrum ${\rm Sp}(f)$, see \cite{St1} and also \cite{Gri}, \cite{SkSt}, \cite{Va}, etc. It would be interesting if (3.5.1) holds in a more general case. \par
(ii) Let $f$ be as in (3.1). Assume $Z\subset{\mathbf P}^{n-1}$ has only isolated singularities. Let $\alpha'_f$ be the minimal of the exponents of the spectrum for all the singularities of $Z$ (see also Corollary~(5.5) below). Then the multiplicity $n_{f,\alpha}$ of the spectrum ${\rm Sp}(f)$ for $\alpha=p/d<\min(\alpha'_f,1)$ can be given by $$n_{f,p/d}=\binom{p-1}{n-1}\quad(p/d<\min(\alpha'_f,1)).$$ This follows from a formula for multiplier ideals \cite[Prop.~1]{Sa4} together with \cite{Bu} (see also a remark before \cite[Cor.~1]{Sa4}). This equality holds also for the pole order spectrum since $\mu_p$ is at most the right-hand side of the equality and $F\subset P$ (and $\nu_p=0$ for $p<d$). \par
\noindent {\bf Example~3.7.} Let $f=(x^m+y^m)\,x^my^m$ ($m\geqslant 2$), where $d=3m$, $r=m+2$, $\tau_Z=2m-2$. For $\alpha=k/3m+q$ with $k\in[1,3m]$, $q=0,1$, we have by Proposition~(3.3) $$n_{f,\alpha}=\begin{cases}k+1-2\lceil k/3\rceil&\hbox{if}\,\,\,\alpha\in(0,1],\,\,\,q=0,\\ m-k-1+2\lceil k/3\rceil&\hbox{if}\,\,\,\alpha\in(1,2),\,\,\,q=1.\end{cases} \leqno(3.7.1)$$ In fact, $m_i=1$ for $i\in[1,m]$, and $m_i=m$ for $i=m+1,m+2$. Here $e=1$ in the notation of Proposition~(3.3), and hence ${\rm Sp}^1(f)=0$, ${\rm Sp}(f)={\rm Sp}^0(f)$ (similarly for ${\rm Sp}_P(f)$). \par
On the other hand, Lemma~(2.3) and Proposition~(3.4) imply that $$n^P_{f,k/3m}=\mu^{(2)}_k=\mu'_k+\mu''_k-\nu_{k+3m}=\begin{cases}0&(\,k\leqslant 1\,),\\ k-1&(\,1\leqslant k\leqslant m+1\,),\\ m&(\,m+1\leqslant k\leqslant 3m-1\,),\\ 4m+1-k\hbox{ }&(\,3m\leqslant k\leqslant 4m+1\,),\\ 0&(\,4m+1\leqslant k\,).\end{cases} \leqno(3.7.2)$$ (Note that $\mu^{(2)}_k+\mu^{(2)}_{k+3m}=m$ ($k\in[1,3m-1])$ and $\mu^{(2)}_{3m}=m+1$.) In fact, we have by Lemma~(2.3) $$\aligned\mu'_k&=\begin{cases}0&(\,k\leqslant 2m-1\,),\\ k-2m+1\hbox{ }&(\,2m-1\leqslant k\leqslant 3m\,),\\ 4m+1-k&(\,3m\leqslant k\leqslant 4m+1\,),\\ 0&(\,4m+1\leqslant k\,),\end{cases}\\ \mu''_k&=\begin{cases}0&(\,k\leqslant 1\,),\\ k-1&(\,1\leqslant k\leqslant 2m-1\,),\\ 2m-2\quad\quad\!\hbox{ }&(\,2m-1\leqslant k\,),\end{cases}\\ \nu_{k+3m}&=\begin{cases}0&(\,k\leqslant m+1\,),\\ k-m-1\,\,\,\hbox{ }&(\,m+1\leqslant k\leqslant 3m-1\,),\\ 2m-2&(\,3m-1\leqslant k\,).\end{cases}\endaligned$$ These formulas show that the relation between the Steenbrink spectrum ${\rm Sp}(f)$ and the pole order spectrum ${\rm Sp}_P(f)$ is rather complicated even for $n=2$ in general. \par
\par
\vbox{\centerline{\bf 4. Gauss-Manin systems and Brieskorn modules} \par
\noindent In this section we prove Theorem~2 after recalling some facts from Gauss-Manin systems and Brieskorn modules.} \par
\noindent {\bf 4.1.~Graded Gauss-Manin complexes.} Let $f$ be a homogeneous polynomial in $R$ with degree $d$. In the notation of (1.1), the graded Gauss-Manin complex $C_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$ associated with $f$ is defined by $$C_f^j:=\Omega^j[{\partial}_t]\quad(j\in{\mathbf Z}),$$ where ${\partial}_t$ has degree $-d$. This means that $$\Omega^j\,{\partial}_t^p=\Omega^j(pd),$$ where $(pd)$ denotes the shift of the grading as in the introduction. Its differential ${\rm d}$ is defined by $${\rm d}(\omega\,{\partial}_t^p)=({\rm d}\omega)\,{\partial}_t^p-({\rm d}f\wedge\omega)\, {\partial}_t^{p+1}\quad\hbox{for}\,\,\,\omega\in\Omega^k. \leqno(4.1.1)$$ where ${\rm d}\omega$ denotes the differential of the de Rham complex. It has a structure of a complex of ${\mathbf C}[t]\langle{\partial}_t\rangle$-modules defined by $$t(\omega\,{\partial}_t^p)=(f\omega)\,{\partial}_t^p-p\,\omega\,{\partial}_t^{p-1},\quad{\partial}_t (\omega\,{\partial}_t^p)=\omega\,{\partial}_t^{p+1}\quad\hbox{for}\,\,\,\omega\in\Omega^j. \leqno(4.1.2)$$ The Gauss-Manin systems are defined by the cohomology groups $H^jC_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}\,\,(j\in{\mathbf Z})$. These are regular holonomic graded ${\mathbf C}[t]\langle{\partial}_t\rangle$-modules. By the same argument as in \cite{BaSa}, we have $$\hbox{The action of ${\partial}_t$ on $H^jC_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$ is bijective for $j\ne 1$.} \leqno(4.1.3)$$ \par
\noindent {\bf 4.2.~Brieskorn modules.} Let $(A_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}},{\rm d})$ be a graded subcomplex of the de Rham complex $(\Omega^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}},{\rm d})$ defined by $$A_f^j:={\rm Ker}({\rm d}f\wedge:\Omega^j\to\Omega^{j+1}(d)).$$ The Brieskorn modules are graded ${\mathbf C}[t]\langle{\partial}_tt\rangle$-modules defined by its cohomology groups $$H^jA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}\quad(j\in{\mathbf Z}).$$ The actions of $t$, $\dd_t^{-1}$, ${\partial}_tt$ are respectively defined by the multiplication by $f$, $$\aligned\dd_t^{-1}[\omega]=[{\rm d}f\wedge\xi]\quad\hbox{with}\quad{\rm d}\xi=\omega,\\ {\partial}_tt\,[\omega]=[{\rm d}\eta]\quad\hbox{with}\quad{\rm d}f\wedge\eta=f\omega,\endaligned$$ where $[\omega]$ denotes the cohomology class, see \cite{Bri}, \cite{BaSa}, etc. (In case $j=1$, we have to choose a good $\xi$ for the action of $\dd_t^{-1}$, see \cite{BaSa}.) Moreover, we have $${\partial}_tt\,[\omega]=(k/d)[\omega]\quad\hbox{for}\quad[\omega]\in(H^jA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})_k, \leqno(4.2.1)$$ where $(H^jA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})_k$ denotes the degree $k$ part. (This follows from the definition by using the contraction with the Euler vector field $\xi:=\h{$\sum$}_i\,x_i\,{\partial}/{\partial} x_i$.) This implies $$t\,[\omega]=(k/d)\,\dd_t^{-1}[\omega]\quad\hbox{for}\quad[\omega]\in(H^jA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})_k. \leqno(4.2.2)$$ Since $(H^jA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})_k=0$ for $k\leqslant 0$, this implies that ${\rm Coker}\,t$ in Theorem~2 can be replaced with ${\rm Coker}\,\dd_t^{-1}$. \par
There is a natural inclusion $$A_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}\hookrightarrow C_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}.$$ This is compatible with the actions of $t$, $\dd_t^{-1}$, ${\partial}_tt$ on the cohomology by definition. So (4.2.1) holds also for $\omega\in(H^jC_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})_j$, since the image of $H^jA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$ generates $H^jC_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$ over ${\mathbf C}[{\partial}_t]$. The last assertion is well-known in the analytic case, see e.g.\ \cite{BaSa}, and is reduced to this case by using the scalar extensions $$R\hookrightarrow{\mathbf C}\{x_1,\dots,x_n\},\quad {\mathbf C}[t]\hookrightarrow{\mathbf C}\{t\}.$$ \par
For $j\in{\mathbf Z}$, we then get $$H^{j+1}(C_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})_k=\begin{cases}H^j(f^{-1}(1),{\mathbf C})_{\lambda}& \hbox{if}\,\,\,k/d\notin{\mathbf Z}_{\leqslant 0},\\ \widetilde{H}^j(f^{-1}(1),{\mathbf C})_{\lambda}& \hbox{if}\,\,\,k/d\notin{\mathbf Z}_{\geqslant 1},\end{cases} \leqno(4.2.3)$$ in the notation of (3.2), where $\lambda=\exp(-2\pi ik/d)$, see also \cite{Di1}. \par
We have moreover $${\rm Ker}(H^jA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}\to H^jC_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})=(H^jA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})_{\rm tor}, \leqno(4.2.4)$$ where the last term denotes the $t$-torsion subspace of $H^jA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$, which coincides with the $\dd_t^{-1}$-torsion, and is annihilated by ${\partial}_t^{-p}$ for $p\gg 0$, see \cite{BaSa}. \par
\noindent {\bf 4.3.~Relation with the Koszul cohomologies.} Set $$A_f^{\prime\,j}:={\rm d}f\wedge\Omega^{j-1}\buildrel\iota\over\hookrightarrow A_f^j\quad(j\in{\mathbf Z}). \leqno(4.3.1)$$ Using the short exact sequence of complexes $$0\to(A_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}},{\rm d})\to(\Omega^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}},{\rm d})\to (A_f^{\prime\,\raise.15ex\h{${\scriptscriptstyle\bullet}$}},{\rm d})[1]\to 0,$$ we get isomorphisms $${\partial}:H^jA_f^{\prime\,\raise.15ex\h{${\scriptscriptstyle\bullet}$}}\buildrel\sim\over\longrightarrow H^jA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}\quad(j\ne 1), \leqno(4.3.2)$$ together with a short exact sequence $$0\to{\mathbf C}\to H^1A_f^{\prime\,\raise.15ex\h{${\scriptscriptstyle\bullet}$}}\to H^1A_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}\to 0.$$ By (4.3.1) and (4.3.2), we get an action of $\dd_t^{-1}$ on $H^jA_f^{\prime\,\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$, $H^jA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$ defined respectively by $$\dd_t^{-1}:={\partial}^{-1}\,\raise.15ex\h{${\scriptstyle\circ}$}\, H^j\iota,\quad\dd_t^{-1}:=H^j\iota\,\raise.15ex\h{${\scriptstyle\circ}$}\,{\partial}^{-1} \quad(j\ne 1).$$ \par
Let $$(\K_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}},{\rm d}):=(A_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}/A_f^{\prime\,\raise.15ex\h{${\scriptscriptstyle\bullet}$}},{\rm d}). \leqno(4.3.3)$$ The relation with the shifted Koszul complex $({}^s\!K_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}},{\rm d}f\wedge)$ in the introduction is given by $$\K_f^{j+n}=H^j({}^s\!K_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})(-jd)\quad(j\in[-n,0]).$$ By the short exact sequence of complexes $$0\to(A_f^{\prime\,\raise.15ex\h{${\scriptscriptstyle\bullet}$}},{\rm d})\buildrel\iota\over\to(A_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}},{\rm d})\to(\K_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}},{\rm d})\to 0,$$ we get a long exact sequence $$\to H^{j-1}\K_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}\to H^jA_f^{\prime\,\raise.15ex\h{${\scriptscriptstyle\bullet}$}} \buildrel{\iota_j}\over\to H^jA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}\to H^j\K_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}\to, \leqno(4.3.4)$$ where the middle morphism $\iota_j:=H^j\iota$ can be identified by (4.3.2) with $$\dd_t^{-1}:H^jA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}\to H^jA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}\,\,\,\hbox{if}\,\,\,j>1.$$ In particular we get for $j=n$ $$H^n\K_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}={\rm Coker}(\dd_t^{-1}:H^nA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}\to H^nA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}). \leqno(4.3.5)$$ \par
By the above argument, the $\dd_t^{-1}$-torsion of $H^jA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$ contributes to $H^{j-1}\K_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$, and we get in particular $$\hbox{$H^cA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$ is torsion-free if $c$ is the codimension of ${\rm Sing}\,f^{-1}(0)\subset{\mathbf C}^n$.} \leqno(4.3.6)$$ Note that $c=n-1$ under the assumption of the introduction. By Theorems~(5.2) and (5.3) below, the $\dd_t^{-1}$-torsion of $H^nA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$ is finite dimensional if and only if all the singularities of $Z$ are weighted homogeneous. \par
\noindent {\bf 4.4.~Filtrations $P'$ and $G$.} There are two filtrations $P'$, $G$ on $C_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$ defined by $$\aligned P'_p\,C_f^k&:=\h{$\bigoplus$}_{i\leqslant k+p}\,\Omega^k\,{\partial}_t^i,\\ G_p\,C_f^k&:=\bigl(\h{$\bigoplus$}_{i<p}\,\Omega^k\,{\partial}_t^i\bigr)\oplus A_f^p\,{\partial}_t^p. \endaligned \leqno(4.4.1)$$ These are exhaustive increasing filtrations. Set $P^{\prime\,p}=P'_{-p}$, $G^p=G_{-p}$. By definition, we have $${\rm Gr}^p_{P'}C_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}=\sigma_{\geqslant p}\bigl(K_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}((n-p)d)\bigr), \leqno(4.4.2)$$ see \cite{De} for the truncation $\sigma_{\geqslant p}$. Let ${\rm Dec}\,P'$ be as in loc.~cit. Then we have $$G={\rm Dec}\,P'. \leqno(4.4.3)$$ Since the differential of $C_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$ respect the grading, we have the pole order spectral sequence in the category of graded ${\mathbf C}$-vector spaces $$_{P'}E_1^{p,j-p}=H^j{\rm Gr}^p_{P'}C_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}\Longrightarrow H^jC_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}, \leqno(4.4.4)$$ with $$_{P'}E_1^{p,j-p}=\begin{cases}\,0&\hbox{if}\,\,\,j<p,\\ A_f^p&\hbox{if}\,\,\,j=p,\\ H^jK_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}((n-p)d)&\hbox{if}\,\,\,j>p,\end{cases} \leqno(4.4.5)$$ $$_{P'}E_2^{p,j-p}=\begin{cases}\,0&\hbox{if}\,\,\,j<p,\\ H^pA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}&\hbox{if}\,\,\,j=p,\\ H^j\K_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}((j-p)d)&\hbox{if}\,\,\,j>p,\end{cases} \leqno(4.4.6)$$ where $\K_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$ is as in (4.3.3). \par
Note that the degeneration at $E_2$ of the pole order spectral sequence is equivalent to the strictness of ${\rm Dec}\,P'$ by \cite{De}, and the latter condition is equivalent to the torsion-freeness of the $H^jA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$ by using (4.2.4) and (4.4.3). The obtained equivalence seems to be known to the specialists (see e.g.\ \cite{vSt}), and the above argument may simplify some argument in loc.~cit. \par
By the isomorphism (4.2.3) for $k\in[1,d]$, the filtration $P'$ on the left-hand side of (4.2.3) induces a filtration $P'$ on the right-hand side. This corresponds to the filtration $P$ by the isomorphism (3.1.2) up to the shift of the filtration by 1, and we get the isomorphisms $$P^{\prime\,p+1}H^{j+1}(C_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})_k\cong P^pH^j(f^{-1}(1),{\mathbf C})_{\lambda}\quad\bigl(\lambda=\exp(-2\pi ik/d),\,k\in[1,d]\bigr), \leqno(4.4.7)$$ see \cite[Ch.~6, Thm.~2.9]{Di1} (and also \cite[Section 1.8]{DiSa2} in case $j=n-1$). By (3.1.3), we have the inclusions $$F^p\subset P^p\quad\hbox{on}\,\,\,\,H^j(f^{-1}(1),{\mathbf C})_{\lambda}, \leqno(4.4.8)$$ Here it is possible to show (4.4.8) by calculating the direct image of $({\mathcal O}_X,F)$ by $f$ as a filtered ${\mathcal D}$-module underlying a mixed Hodge module, see \cite{Sa1}, \cite{Sa2}, where a compactification of $f$ must be used. (The shift of the filtration by 1 comes from the direct image of ${\mathcal O}_X$ as a {\it left} ${\mathcal D}$-module by the graph embedding of $f$.) \par
The inclusion (4.4.8) implies some relation between the spectrum and the Poincar\'e series of the Koszul cohomologies via the spectral sequence (4.4.4), and the difference between $F^p$ and $P^p$ implies also their difference in certain cases, see also \cite{Di1}, \cite{Di3}, \cite{DiSt1}, etc. \par
\noindent {\bf 4.5.~Algebraic microlocal Gauss-Manin complexes.} For a homogeneous polynomial $f$, let $\widetilde{C}^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}_f$ be the algebraic microlocal Gauss-Manin complex (i.e. $\widetilde{C}_f^j=\Omega^j[{\partial}_t,\dd_t^{-1}]$). The algebraic microlocal Gauss-Manin systems $H^j\widetilde{C}^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}_f$ are free graded ${\mathbf C}[{\partial}_t,\dd_t^{-1}]$-modules of finite type. Replacing $C^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}_f$ with $\widetilde{C}^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}_f$ in (4.4.1) and (4.4.4), we have the filtrations $P'$, $G$ on $\widetilde{C}^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}_f$ together with the microlocal pole order spectral sequence $$_{P'}\widetilde{E}_1^{p,j-p}=H^j{\rm Gr}^p_{P'}\widetilde{C}_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}\Longrightarrow H^j\widetilde{C}_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}, \leqno(4.5.1)$$ where (4.4.3) holds again (i.e. $G={\rm Dec}\,P'$), and the last equalities of (4.4.5) and (4.4.6) hold for any $j,p\in{\mathbf Z}$, i.e. $${}_{P'}\widetilde{E}_r^{p,j-p}=\begin{cases}H^jK_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}((n-p)d)&\hbox{if}\,\,\,r=1,\\ H^j\K_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}((j-p)d)&\hbox{if}\,\,\,r=2.\end{cases} \leqno(4.5.2)$$ Moreover the last equality of (4.2.3) holds for any $k$, i.e. $$H^{j+1}(\widetilde{C}_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})_k=\widetilde{H}^j(f^{-1}(1),{\mathbf C})_{\lambda}\quad\hbox{with}\quad\lambda=\exp(-2\pi ik/d), \leqno(4.5.3)$$ (Note that the Gauss-Manin complex $C_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$ can be defined also as the single complex associated with the double complex having two differentials ${\rm d}$ and ${\rm d}f\wedge$, see \cite{Di1}, \cite{Di3}, etc.) \par
Let $P',G$ denote also the induced filtrations on $H^j(C_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})$, $H^j(\widetilde{C}_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})$ .There is a canonical inclusion $$C_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}\hookrightarrow\widetilde{C}_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}.$$ Set $$\omega_0:={\rm d}f\in H^1(G_0C_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})\,(=H^1A_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}).$$ By the same argument as in \cite{BaSa}, it generates a free ${\mathbf C}[t]$-module for $p\in{\mathbf N}\cup\{\infty\}$ $${\mathbf C}[t]\omega_0\subset H^1(G_pC_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}),$$ where $G_{\infty}C_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}:=C_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$. Set $$\widetilde{H}^j(G_pC_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})=\begin{cases}H^j(G_pC_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})&\hbox{if}\,\,\,j\ne 1,\\ H^j(G_pC_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})/{\mathbf C}[t]\omega_0&\hbox{if}\,\,\,j=1.\end{cases}$$ Then the above inclusion induces the canonical isomorphisms $$\widetilde{H}^j(G_pC_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})\buildrel\sim\over\longrightarrow H^j(G_p\widetilde{C}_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})\quad(p\in{\mathbf N}\cup\{\infty\},\,\,j\in{\mathbf Z}). \leqno(4.5.4)$$ In fact, the assertion for $p=\infty$ follows from the same argument as in loc.~cit. This implies the assertion for $p\in{\mathbf N}$ by using the canonical morphism of long exact sequences $$\begin{CD}@>>>\widetilde{H}^j(G_pC_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})@>>>\widetilde{H}^j(C_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})@>>> H^j(C_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}/G_pC_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})@>>>\\
@. @VVV @VVV @|\\ @>>>H^j(G_p\widetilde{C}_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})@>>>H^j(\widetilde{C}_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})@>>> H^j(\widetilde{C}_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}/G_p\widetilde{C}_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})@>>>\end{CD}$$ \par
From the canonical isomorphisms (4.5.4), we can deduce $$G_p\widetilde{H}^j(C_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})\buildrel\sim\over\longrightarrow G_pH^j(\widetilde{C}_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})={\partial}_t^pG_0H^j(\widetilde{C}_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})\quad(p\in{\mathbf N},\,\,j\in{\mathbf Z}). \leqno(4.5.5)$$ This implies $${\partial}_t:{\rm Gr}^G_p\widetilde{H}^j(C_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})_k\buildrel\sim\over\longrightarrow{\rm Gr}^G_{p+1}\widetilde{H}^j(C_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})_{k-d}\quad(p\in{\mathbf N},\,\,j,k\in{\mathbf Z}). \leqno(4.5.6)$$ Note that these hold with $G$ replaced by $P'$ by (4.4.3). We then get by (4.4.7) $$P^{\prime\,p+1}H^{j+1}(\widetilde{C}_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})_k\cong P^p\widetilde{H}^j(f^{-1}(1),{\mathbf C})_{\lambda}\quad\bigl(\lambda=\exp(-2\pi ik/d),\,k\in[1,d]\bigr), \leqno(4.5.7)$$ \par
\noindent {\bf Proposition~4.6.} {\it With the notation of $(4.4)$ and $(4.5)$, there are canonical isomorphisms for $r\geqslant 2$ $$\aligned{\rm Im}({\rm d}_r:{}_{P'}E_r^{\,p-r,n-p+r-1}\to{}_{P'}E_r^{\,p,n-p})&=\begin{cases}\,0&\hbox{if}\,\,\,p>n,\\{\rm Gr}^K_{r-1}(H^nA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})_{\rm tor}&\hbox{if}\,\,\,p=n,\\ {\rm Gr}^K_{r-1}({\rm Coker}\,\dd_t^{-1})((n-p)d)&\hbox{if}\,\,\,p<n,\end{cases}\\ {\rm Im}({\rm d}_r:{}_{P'}\widetilde{E}_r^{\,p-r,n-p+r-1}\to{}_{P'}\widetilde{E}_r^{\,p,n-p})&={\rm Gr}^K_{r-1}({\rm Coker}\,\dd_t^{-1})((n-p)d),\endaligned$$ where $K_{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$ is the kernel filtration, and ${\rm Coker}\,\dd_t^{-1}$ is a quotient of $(H^nA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})_{\rm tor}$ as in Theorem~$2$.} \par
\noindent {\it Proof.} We first show the assertion for the microlocal pole order spectral sequence, i.e. for the second isomorphism. Since ${}_{P'}\widetilde{E}_r^{p,j-p}=0$ for $j>n$, the images of the differentials $${\rm d}_r:{}_{P'}\widetilde{E}_r^{\,p-r,n-p+r-1}\to{}_{P'}\widetilde{E}_r^{\,p,n-p}\,\,\,(r\geqslant 2)$$ correspond to an increasing sequence of subspaces (with $p$ fixed): $$\widetilde{I}_r^{\,p,n-p}\subset{}_{P'}\widetilde{E}_2^{\,p,n-p}=({\rm Coker}\,\dd_t^{-1})((n-p)d)\,\,\,(r\geqslant 2), \leqno(4.6.1)$$ such that $${\rm Im}\bigl({\rm d}_r:{}_{P'}\widetilde{E}_r^{\,p-r,n-p+r-1}\to{}_{P'}\widetilde{E}_r^{\,p,n-p}\bigr)=\widetilde{I}_r^{\,p,n-p}/\widetilde{I}_{r-1}^{\,p,n-p}\,\,\,(r\geqslant 2),$$ with $\widetilde{I}_1^{\,p,n-p}:=0$. Here ${\rm Coker}\,\dd_t^{-1}$ is a quotient of $H^nA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$ (and not $(H^nA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})_{\rm tor})$, and (4.3.5) is used for the last isomorphism of (4.6.1). \par
By the construction of the spectral sequence (see e.g. \cite{De}), we have $$\widetilde{I}_r^{\,p,n-p}=K_{r-1}({\rm Coker}\,\dd_t^{-1})((n-p)d), \leqno(4.6.2)$$ where $K_{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$ is the kernel filtration defined just before Theorem~2. (More precisely, $K_{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$ defines a non-exhaustive filtration of $H^nA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$, and its union is $(H^nA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})_{\rm tor}$.) In fact, the left-hand side is given by the classes of $\omega\in\Omega^n$ such that there are $$\eta_i\in\Omega^n\,\,(i\in[0,r-1])$$ satisfying $$d\eta_0=\omega,\quad d\eta_{i+1}=df\wedge\eta_i\,(i\in[0,r-2]),\quad df\wedge\eta_{r-1}=0.$$ However, this condition is equivalent to that the class of $\omega$ in the Brieskorn module is contained in $K_{r-1}(H^nA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})_{\rm tor}$. (Note that $[df\wedge\eta_{r-2}]$ gives ${\partial}_t^{1-r}[\omega]$ and vanishes in $H^nA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$.) So the second isomorphism follows. \par
The argument is essentially the same for the first isomorphism by replacing (4.6.2) with $$I_r^{\,p,n-p}=\begin{cases}\,0&\hbox{if}\,\,\,p>n,\\ K_{r-1}(H^nA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})_{\rm tor}&\hbox{if}\,\,\,p=n,\\ K_{r-1}({\rm Coker}\,\dd_t^{-1})((n-p)d)&\hbox{if}\,\,\,p<n.\end{cases}$$ This finishes the proof of Proposition~(4.6). \par
As a corollary of Proposition~(4.6), we get the following. \par
\noindent {\bf Corollary~4.7.} {\it The following three conditions are equivalent to each other$\,:$ \par
\noindent $(a)$ The pole order spectral sequence $(4.4.4)$ degenerates at $E_2$. \par
\noindent $(b)$ The algebraic microlocal pole order spectral sequence $(4.5.1)$ degenerates at $E_2$. \par
\noindent $(c)$ The torsion subgroup $(H^nA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})_{\rm tor}$ vanishes.} \par
\noindent {\bf 4.8.~Proof of Theorem~2.} By (4.5.7) the assertion follows from the second isomorphism in Proposition~(4.6) by choosing any $p\in{\mathbf Z}$, where the obtained isomorphism is independent of the choice of $p$ by using the bijectivity of the action of ${\partial}_t$. (It is also possible to use the first isomorphism in Proposition~(4.6) by choosing some $p<n$ although the independence of the choice of $p$ is less obvious unless the relation with the algebraic microlocal pole order spectral sequence is used.) This finishes the proof of Theorem~2. \par
\noindent {\bf 4.9.~Thom-Sebastiani type theorem for $P'$.} Let $f,f_1,f_2$ be as in Proposition~(2.2). In the notation of (4.5), we have a canonical isomorphism $$(\widetilde{C}^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}_f,P')=(\widetilde{C}^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}_{f_1},P')\otimes_{{\mathbf C}[{\partial}_t,\,{\partial}_t^{-1}]}(\widetilde{C}^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}_{f_2},P').$$ Assume $f_2$ has an isolated singularity at the origin as in Proposition~(2.2). Then $$H^jGr^{P'}_k\widetilde{C}^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}_{f_2}=0\quad(j\ne n_2,\,\,k\in{\mathbf Z}).$$ Hence $(\widetilde{C}^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}_{f_2},P')$ is strict, and we get a filtered quasi-isomorphism $$(\widetilde{C}^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}_{f_2},P')\buildrel\sim\over\longrightarrow H^{n_2}(\widetilde{C}^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}_{f_2},P')[-n_2].$$ This implies a filtered quasi-isomorphism $$(\widetilde{C}^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}_f,P')\buildrel\sim\over\longrightarrow(\widetilde{C}^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}_{f_1},P')\otimes_{{\mathbf C}[{\partial}_t,\,{\partial}_t^{-1}]} H^{n_2}(\widetilde{C}^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}_{f_2},P')[-n_2], \leqno(4.9.1)$$ which is compatible with the action of $t$. More precisely, the action of $t$ on the left-hand side corresponds to $t\otimes id+id\otimes t$ on the right-hand side (since $f=f_1+f_2$). \par
Combining (4.9.1) with (4.5.7), we get the Thom-Sebastiani type theorem for the pole order spectrum: $${\rm Sp}_P(f)={\rm Sp}_P(f_1)\,{\rm Sp}_P(f_2),\quad{\rm Sp}_P^j(f)={\rm Sp}_P^j(f_1)\,{\rm Sp}_P^0(f_2)\quad(j\in{\mathbf N}), \leqno(4.9.2)$$ assuming that $f_2$ has an isolated singularity as above so that ${\rm Sp}_P(f_2)={\rm Sp}_P^0(f_2)$, see \cite{SkSt} for the case where $f_1$ has also an isolated singularity. Note that the Thom-Sebastiani type theorem holds for the Steenbrink spectrum by \cite{Sa5}. \par
\noindent {\bf Remarks~4.10.} (i) With the notation and the assumption of (4.9), the pole order spectral sequences degenerate at $E_2$ for $f$ if and only if they do for $f_1$. This follows from (4.9.1) together with Corollary~(4.7). \par
(ii) The equivalence between the $E_2$-degeneration of the pole order spectral sequence (4.4.4) and the vanishing of $(H^nA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})_{\rm tor}$ was shown in \cite{vSt} in the (non-graded) analytic local case. \par
(iii) Assuming $\dim{\rm Sing}\,f^{-1}(0)=1$, we have by (4.3.4) the following exact sequence: $$\aligned 0\to\widetilde{H}^{n-1}A_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}(-d)&\,{\buildrel{\dd_t^{-1}}\over\longrightarrow}\,\widetilde{H}^{n-1}A_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}\to H^{n-1}\K_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}\\ \to H^nA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}(-d)&\,{\buildrel{\dd_t^{-1}}\over\longrightarrow}\,H^nA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}\to H^n\K_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}\to 0,\endaligned$$ where $\widetilde{H}^{n-1}A_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$ is defined by $H^{n-1}A_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}$ if $n\ne2$, and by its quotient by ${\mathbf C}[t]\omega_0$ if $n=2$. (For $\omega_0$, see the definition of $\widetilde{H}^j(G_pC_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})$ in (4.5).) This exact sequence has sufficient information about the torsion subgroup $(H^n\K_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})_{\rm tor}$ to give another proof of Theorem~2. \par
(iv) By forgetting the grading, Proposition~(4.6) and Corollary~(4.7) can be extended to the analytic local case where $f$ is a germ of a holomorphic function on a complex manifold with $\dim{\rm Sing}\,f^{-1}(0)=1$. \par
The following will be used in the proof of Theorem~(5.2) below. \par
\noindent {\bf 4.11.~Multiplicity of the minimal exponent.} Let $g$ be a germ of holomorphic function on a complex manifold $(Y,0)$ having an isolated singularity. We have the direct image ${\mathcal B}_g:={\mathcal O}_{Y,0}[{\partial}_t]$ of ${\mathcal O}_{Y,0}$ as a left ${\mathcal D}_{Y,0}$-module by the graph embedding of $g$. (Note that it is an analytic ${\mathcal D}$-module.) It has the Hodge filtration $F$ by the order of ${\partial}_t$ and the filtration $V$ of Kashiwara \cite{Ka} and Malgrange \cite{Ma}. \par
Consider ${\rm Gr}_V^{\alpha}({\mathcal B}_g,F)$ for $\alpha<1$. These underlie mixed Hodge modules supported at $0$, and are the direct images of filtered vector spaces by the inclusion $\{0\}\hookrightarrow Y$ as filtered ${\mathcal D}$-modules. (This is shown by using \cite[Lemma 3.2.6]{Sa1} applied to any function vanishing at $0$.) So we get $$\hbox{The ${\rm Gr}^F_p{\rm Gr}_V^{\alpha}{\mathcal B}_g$ are annihilated by ${\mathfrak m}_{Y,0}\subset{\mathcal O}_{Y,0}$ for $\alpha<1$,} \leqno(4.11.1)$$ where ${\mathfrak m}_{Y,0}\subset{\mathcal O}_{Y,0}$ is the maximal ideal. \par
Let $\widetilde{\mathcal B}_g:={\mathcal O}_{Y,0}[{\partial}_t,\dd_t^{-1}]$ be the algebraic microlocalization of ${\mathcal B}_g$. By \cite[Sections 2.1-2]{Sa2}, it has the Hodge filtration $F$ by the order of ${\partial}_t$ and also the filtration $V$ such that $$\aligned{\partial}_t:F_pV^{\alpha}\widetilde{\mathcal B}_g&\buildrel\sim\over\longrightarrow F_{p+1}V^{\alpha-1}\widetilde{\mathcal B}_g\quad(\forall\,p,\alpha).\\({\rm Gr}_V^{\alpha}{\mathcal B}_g,F)&\buildrel\sim\over\longrightarrow({\rm Gr}_V^{\alpha}\widetilde{\mathcal B}_g,F)\quad(\alpha<1),\endaligned$$ Then (4.11.1) implies $$\hbox{The ${\rm Gr}^F_p{\rm Gr}_V^{\alpha}\widetilde{\mathcal B}_g$ are annihilated by ${\mathfrak m}_{Y,0}\subset{\mathcal O}_{Y,0}$ for any $\alpha$.} \leqno(4.11.2)$$ \par
Consider the (relative) de Rham complexes $${\mathcal C}_g:={\rm DR}_Y({\mathcal B}_g),\quad\widetilde{\mathcal C}_g:={\rm DR}_Y(\widetilde{\mathcal B}_g).$$ Up to a shift of complexes, these are the Koszul complexes associated with the action of ${\partial}_{y_i}$ on ${\mathcal B}_g$ and $\widetilde{\mathcal B}_g$ where the $y_i$ are local coordinates of $Y$. It has the filtrations $F$ and $V$ induced by those on ${\mathcal B}_g$ and $\widetilde{\mathcal B}_g$. Here $V$ is stable by the action of ${\partial}_{y_i}$, but we need a shift for $F$ depending on the degree of the complexes ${\mathcal C}_g$, $\widetilde{\mathcal C}_g$. By the above argument we have $$H^j{\rm Gr}_p^F{\rm Gr}_V^{\alpha}\widetilde{\mathcal C}_g=H^j{\rm Gr}_V^{\alpha}\widetilde{\mathcal C}_g=H^j{\rm Gr}_p^F\widetilde{\mathcal C}_g=0\quad(j\ne 0), \leqno(4.11.3)$$ where we also use the fact that ${\rm Gr}_p^F\widetilde{\mathcal C}_g$ is the Koszul complex for the regular sequence $\{{\partial} g/{\partial} y_j\}$. These imply the vanishing of $H^jF_p{\rm Gr}_V^{\alpha}\widetilde{\mathcal C}_g$, etc.\ for $j\ne 0$, and we get $$\hbox{$(\widetilde{\mathcal C}_g;F,V)$ is strict,} \leqno(4.11.4)$$ by showing the exactness of some commutative diagram appearing in the definition of strict complex \cite{Sa1}. \par
It is known that the filtration $V$ on ${\mathcal C}_g$ is strict, and induces the filtration $V$ of Kashiwara and Malgrange on the Gauss-Manin system $H^0{\mathcal C}_g$ (by using the arguments in the proof of \cite[Prop.~3.4.8]{Sa1}). This assertion holds by replacing ${\mathcal C}_g$ with $\widetilde{\mathcal C}_g$, since ${\mathcal C}_g/V^{\alpha}{\mathcal C}_g=\widetilde{\mathcal C}_g/V^{\alpha}\widetilde{\mathcal C}_g$ for $\alpha\leqslant 1$ and $H^0{\mathcal C}_g=H^0\widetilde{\mathcal C}_g$ (see e.g. \cite{BaSa}). Here we also get the canonical isomorphism $$(H^0{\mathcal C}_g,V)=(H^0\widetilde{\mathcal C}_g,V). \leqno(4.11.5)$$ \par
Consider now $({\rm Gr}^F_0\widetilde{\mathcal C}_g,V)$. This is a complex of filtered ${\mathcal O}_{Y,0}$-modules, and is strict. By the above argument we get the canonical isomorphism of filtered ${\mathcal O}_{Y,0}$-modules $$H^0({\rm Gr}^F_0\widetilde{\mathcal C}_g,V)=({\mathcal O}_{Y,0}/({\partial} g),V). \leqno(4.11.6)$$ Combining this with (4.11.2), (4.11.4) and using ${\rm Gr}_V^{\alpha}{\rm Gr}^F_p\widetilde{\mathcal C}_g={\rm Gr}^F_p{\rm Gr}_V^{\alpha}\widetilde{\mathcal C}_g$, we get $$\hbox{The ${\rm Gr}_V^{\alpha}({\mathcal O}_{Y,0}/({\partial} g))$ are annihilated by ${\mathfrak m}_{Y,0}\subset{\mathcal O}_{Y,0}$ for any $\alpha$.} \leqno(4.11.7)$$ In particular, the multiplicity of the minimal exponent is 1. \par
\par
\vbox{\centerline{\bf 5. Calculation of ${\rm d}^{(1)}$.} \par
\noindent In this section we calculate ${\rm d}^{(1)}$ in certain cases, and prove Theorems~(5.2) and (5.3).} \par
\noindent {\bf 5.1.~Relation with the isolated singularities in ${\mathbf P}^{n-1}$.} Let $\rho:\widetilde{X}\to X$ be the blow-up of the origin of $X:={\mathbf C}^n$. Let $y=\sum_ic_ix_i$ be as in the introduction (i.e. $(c_i)\in{\mathbf C}^n$ are sufficiently general). We may assume that $$y=x_n,$$ replacing the coordinates $x_1,\dots,x_n$ of $X={\mathbf C}^n$. Let $\widetilde{X}'$ be the complement of the proper transform of $\{x_n=0\}$. It has the coordinates $\widetilde{x}_1,\dots,\widetilde{x}_n$ such that $$\rho^*x_i=\begin{cases}\widetilde{x}_i\,\widetilde{x}_n&\hbox{if}\,\,\,i\ne n,\\ \widetilde{x}_n&\hbox{if}\,\,\,i=n.\end{cases}$$ Set $n':=n-1$. Define the complex $\spKf$ with $R$ and $f$ respectively replaced with
$${\mathbf C}[\widetilde{x}_1,\dots,\widetilde{x}_{n'}][\widetilde{x}_n,\widetilde{x}_n^{\,-1}]\quad\hbox{and}\quad f':=\rho^*f|_{\widetilde{X}'}=\widetilde{x}_n^d\,h(\widetilde{x}_1,\dots,\widetilde{x}_{n'}),$$ where $h(\widetilde{x}_1,\dots,\widetilde{x}_{n'}):=f(\widetilde{x}_1,\dots,\widetilde{x}_{n'},1)$, and the grading is given only by the degree of $\widetilde{x}_n$. This is compatible with ${}^s\!K^{\ssb}_f$ via $\rho^*$. We have the canonical graded morphism $$H^j({}^s\!K^{\ssb}_f)\to H^j(\spKf),$$ in a compatible way with the differential ${\rm d}$. This morphism induces injective morphisms $$N\hookrightarrow H^{-1}(\spKf),\quad M''\hookrightarrow H^0(\spKf), \leqno(5.1.1)$$ where the image of $M'$ in $H^0(\spKf)$ vanishes. We have the inclusion $$N^{(2)}_{p+d}\subset{\rm Ker}\bigl({\rm d}:H^{-1}(\spKf)\to H^0(\spKf)\bigr)\cap N_{p+d}, \leqno(5.1.2)$$ under the first injection of (5.1.1), and the equality holds if $M'_p=0$. \par
Let $Y'$ be the complement of $\{\widetilde{x}_n=0\}$ in $Y:={\mathbf P}^{n'}$. Then $$\widetilde{X}'=Y'\times{\mathbf C},$$ where $\widetilde{x}_1,\dots,\widetilde{x}_{n'}$ and $\widetilde{x}_n$ are respectively coordinates of $Y'$ and ${\mathbf C}$. Moreover $\spKf$ is quasi-isomorphic to the mapping cone of $${\partial} f'/{\partial} \widetilde{x}_n=d\,\widetilde{x}_n^{\,d-1}h:(\Omega_{Y'}^{n'}/{\rm d}h\wedge\Omega_{Y'}^{n'-1})[\widetilde{x}_n,\widetilde{x}_n^{\,-1}]\to (\Omega_{Y'}^{n'}/{\rm d}h\wedge\Omega_{Y'}^{n'-1})[\widetilde{x}_n,\widetilde{x}_n^{\,-1}],$$ where $\Omega_{Y'}^j$ is identified with the group of global sections. \par
Let $\{z_k\}$ be the singular points of the morphism $h:Y'\to{\mathbf C}$. These are all isolated singular points. (In fact, they are the union of the singular points of $\{h=c\}$ for $c\in{\mathbf C}$. But $\{h=c\}$ is the intersection of $\{f=c\}$ and $\{x_n=1\}$ in ${\mathbf C}^n$, and the intersection of its closure in ${\mathbf P}^n$ with the boundary ${\mathbf P}^{n'}={\mathbf P}^n\setminus{\mathbf C}^n$ is the intersection of $\{f=0\}$ and $\{x_n=0\}$ in ${\mathbf P}^{n'}$, which is smooth by hypothesis. So the assertion follows.) \par
Since the support of the ${\mathbf C}[\widetilde{x}_1,\dots,\widetilde{x}_{n'}]$-module $\Omega_{Y'}^{n'}/{\rm d}h\wedge\Omega_{Y'}^{n'-1}$ is $\{z_k\}$, we have the canonical isomorphism $$\Omega_{Y'}^{n'}/{\rm d}h\wedge\Omega_{Y'}^{n'-1}=\h{$\bigoplus$}_k\,\Omega_{h_k}^{n'}\quad \hbox{with}\quad\Omega_{h_k}^{n'}:=\Omega_{Y'_{\rm an},z_k}^{n'}/{\rm d}h_k\wedge\Omega_{Y'_{\rm an},z_k}^{n'-1},$$ where $Y'_{\rm an}$ is the associated analytic space, and $h_k$ is the germ of an analytic function defined by $h$ at $z_k$. \par
Let $z_k$ ($k\leqslant r_0$) be the singular points contained in $\{h=0\}$. These are the singular points of $Z:=f^{-1}(0)\subset{\mathbf P}^{n'}$ since $x_n$ is sufficiently general. Since $h_k$ is invertible for $k>r_0$, we have $$H^n(\spKf)=\h{$\bigoplus$}_{k\leqslant r_0}\,(\Omega_{h_k}^{n'}/h_k\,\Omega_{h_k}^{n'})\wedge{\mathbf C}[\widetilde{x}_n,\widetilde{x}_n^{\,-1}]\,{\rm d} \widetilde{x}_n, \leqno(5.1.3)$$ and there is a similar formula for $H^{n-1}(\spKf)$ (with ${\rm d} \widetilde{x}_n$ on the right-hand side deleted and $\wedge$ replaced by $\otimes$). So the $z_k$ for $k>r_0$ may be forgotten from now on. \par
Note that, via (5.1.1) and (5.1.3), we have for $p\gg 0$ $$M''\supset(\Omega_{h_k}^{n'}/h_k\,\Omega_{h_k}^{n'})\wedge{\mathbf C}[\widetilde{x}_n]\,\widetilde{x}_n^{\,p}\,{\rm d} \widetilde{x}_n. \leqno(5.1.4)$$ \par
Take an element of pure degree $p$ of $${\rm Ker}\bigl(h_k:\Omega_{h_k}^{n'}[\widetilde{x}_n,\widetilde{x}_n^{\,-1}]\to\Omega_{h_k}^{n'}[\widetilde{x}_n,\widetilde{x}_n^{\,-1}]\bigr)\quad(k\leqslant r_0).$$ It is represented by $\psi:=\frac{1}{d}\,\widetilde{x}_n^{\,p}\xi$ where $\xi\in\Omega_{Y'_{\rm an},z_k}^{n'}$ satisfies $$h_k\,\xi={\rm d}h_k\wedge\eta\quad\hbox{with}\quad\eta\in\Omega_{Y'_{\rm an},z_k}^{n'-1}. \leqno(5.1.5)$$ The corresponding element of $H^{n'}(\spKf)$ is represented by $$\psi':=\hbox{$\frac{1}{d}$}\,\widetilde{x}_n^{\,p}\xi+\widetilde{x}_n^{\,p-1}{\rm d} \widetilde{x}_n\wedge\eta.$$ Its image in $H^n(\spKf)$ by the differential ${\rm d}$ is given by $${\rm d}[\psi']=\pm\bigl[\bigl(\hbox{$\frac{p}{d}$}\,\xi-{\rm d}\eta\bigr)\wedge \widetilde{x}_n^{\,p-1}{\rm d} \widetilde{x}_n\bigr], \leqno(5.1.6)$$ and we have by (5.1.5) $$[d\eta]={\partial}_tt\,[\xi]\quad\hbox{in}\,\,\,H''_{h_k}. \leqno(5.1.7)$$ \par
Let $V$ be the $V$-filtration of Kashiwara \cite{Ka} and Malgrange \cite{Ma} on the Gauss-Manin system ${\mathcal G}_{h_k}$ indexed by ${\mathbf Q}$, see e.g.\ \cite{SkSt}. (It is closely related with the theory of asymptotic Hodge structure \cite{Va}.) We denote also by $V$ the induced filtration on the Brieskorn module $H''_{h_k}$ and also on $\Omega_{h_k}^{n'}$ via the canonical inclusion and the surjection $${\mathcal G}_{h_k}\supset H''_{h_k}\to\Omega_{h_k}^{n'},$$ see \cite{Bri} for the latter. In this paper we index $V$ so that ${\partial}_tt-\alpha$ is nilpotent on ${\rm Gr}_V^{\alpha}{\mathcal G}_{h_k}$. \par
Let $\{\alpha_{h_k,j}\}$ be the exponents of $h_k$ counted with multiplicity; more precisely $$\#\{j:\alpha_{h_k,j}=\alpha\}=\dim{\rm Gr}^{\alpha}_V\Omega^{n'}_{h_k}\quad\hbox{and}\quad{\rm Sp}_{h_k}(t)=\h{$\sum$}_i\,t^{\,\alpha_{h_k,j}}. \leqno(5.1.8)$$ Here we may assume the $\alpha_{h_k,j}$ are weakly increasing (i.e. $\alpha_{h_k,j}\leqslant\alpha_{h_k,j+1}$) for each $k$. We have the symmetry $\{\alpha_{h_k,j}\}_j=\{n-\alpha_{h_k,j}\}_j$ (counted with multiplicity) by \cite{St2}. \par
\noindent {\bf Theorem~5.2.} {\it With the notation of $(5.1)$, assume $h_k$ is non-quasihomogeneous $($i.e. $h_k\notin({\partial} h_k))$ for some $k\leqslant r_0$. Then the kernel and cokernel of $d^{(1)}:N\to M$ $($i.e. $N^{(2)}$ and $M^{(2)}$ in Theorem~$2)$ and $(H^nA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})_{\rm tor}$ are all infinite dimensional over ${\mathbf C}$.} \par
\noindent {\it Proof.} Since the minimal exponent $\alpha_{h_k,1}$ has multiplicity 1 (see (4.11)), we have $$V^{>\alpha_{h_k,1}}\,\Omega_{h_k}^{n'}={\mathfrak m}_{Y,z_k}\Omega_{h_k}^{n'}\supset{\rm Ker}(h_k:\Omega_{h_k}^{n'}\to\Omega_{h_k}^{n'}).$$ Combined with (5.1.7), this implies for $\xi$ as in (5.1.5) $$\bigl[\hbox{$\frac{p}{d}$}\,\xi-{\rm d}\eta\bigr]\in V^{>\alpha_{h_k,1}\,}\Omega_{h_k}^{n'}. \leqno(5.2.1)$$ So the infinite dimensionality of $M^{(2)}$ follows from (5.1.4), (5.1.6) and (5.2.1). It implies the assertion for $N^{(2)}$ since the morphisms $${\rm d}^{(1)}:N_{p+d}\to M_p$$ are morphisms of finite dimensional vector spaces of the same dimension for $p\gg 0$. The assertion for the torsion part $(H^nA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})_{\rm tor}$ then follows from Theorem~2. This finishes the proof of Theorem~(5.2). \par
\noindent {\bf Theorem~5.3.} {\it With the notation of $(5.1)$, assume the $h_k$ are quasihomogeneous $($i.e. $h_k\in({\partial} h_k))$ for any $k\leqslant r_0$. Then the kernel and cokernel of $d^{(1)}:N\to M$ $($i.e. $N^{(2)}$ and $M^{(2)}$ in Theorem~$2)$ and $(H^nA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})_{\rm tor}$ are finite dimensional over ${\mathbf C}$. More precisely we have
$$\nu^{(2)}_{p+d}:=\dim N^{(2)}_{p+d}\leqslant\#\bigl\{(k,j)\,\big|\,\,\alpha_{h_k,j}=\hbox{$\frac{p}{d}$}\,\,(k\leqslant r_0)\,\bigr\}, \leqno(5.3.1)$$ and the equality holds in the case where $\mu'_p=0$ and either $\nu_{p+d}=\tau_Z$ or all the singularities of $Z$ are ordinary double points.} \par
\noindent {\it Proof.} By Theorem~2, it is enough to show the inequality (5.3.1) together with the equality in the special case as above. Take any $k\leqslant r_0$. In the notation of (5.1) there is a local analytic coordinate system $(y_1,\cdots,y_{n'})$ of $Y'$ around $z_k$ together with positive rational numbers $w_1,\dots,w_{n'}$ such that $h_k$ is a linear combination of monomials $\prod_iy_i^{m_i}$ with $\sum_iw_im_i=1$ (see \cite{SaK}). Then $$v(h_k)=h_k\quad\hbox{with}\quad v:=\h{$\sum$}_i\,w_i\,y_i\,{\partial}_{y_i}.$$ We will denote the contraction of ${\rm d} y_1\wedge\dots\wedge y_{n'}$ and $v$ by $\zeta$. \par
Take a monomial basis $\{\xi_j\}$ of $\Omega_{h_k}^{n'}$, where monomial means that $$\xi_j=\hbox{$\prod$}_i\,y_i^{m_{j,i}}\,{\rm d} y_1\wedge\dots\wedge{\rm d} y_{n'}\quad\hbox{with}\quad m_{j,i}\in{\mathbf N}.$$ Set $$\eta_j:=\hbox{$\prod$}_i\,y_i^{m_{j,i}}\,\zeta,\quad w(\xi_j):=\h{$\sum$}_i\,w_i(m_{j,i}+1).$$ Then $${\rm d}f\wedge\eta_j=f\,\xi_j,\quad{\rm d}\eta_j=w(\xi_j)\,\xi_j.$$ So we get $${\partial}_tt\,[\xi_j]=w(\xi_j)\,[\xi_j]\quad\hbox{in}\,\,\,H''_{h_k}.$$ In particular $$w(\xi_j)=\alpha_{h_k,j},$$ by changing the ordering of the $\xi_j$ if necessary. The inequality (5.3.1) then follows from (5.1.6) and (5.1.7) together with the inclusion (5.1.2). In case the assumption after (5.3.1) is satisfied, we have the equality by using the remark after (5.1.2) together with the fact that $\alpha_{h_k,j}=n'/2$ if $z_k$ is an ordinary double point of $Z$. This finishes the proof of Theorem~(5.3). \par
\noindent {\bf Corollary~5.4.} {\it With the hypothesis of Theorem~$(5.3)$, assume $n=2$ or more generally
$$\max\bigl\{\,\alpha_{h_k,j}\,\,\big|\,\,d\alpha_{h_k,j}\in{\mathbf N}\,\,\,(k\leqslant r_0)\,\bigr\}<1+\hbox{$\frac{n}{d}$}, \leqno(5.4.1)$$ $($for instance, $d\alpha_{h_k,j}\notin{\mathbf N}$ for any $j$ and $k\leqslant r_0)$. Then the pole order spectral sequences $(4.4.4)$ and $(4.5.1)$ degenerate at $E_2$, and $(H^nA^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}}_f)_{\rm tor}=0$.} \par
\noindent {\it Proof.} This follows from Theorem~(5.3) together with Corollary~(4.7) and Theorem~2 since ${\rm d}^{(r)}$ is a graded morphism of degree $-rd$. \par
\noindent {\bf Corollary~5.5.} {\it With the first hypothesis of Theorem~$(5.3)$, assume $n=3$. Let $\alpha'_f:=\min\{\alpha_{h_k,j}\}$ in the notation of $(5.1.8)$. Then} $$\nu_{p+d}=0\quad\hbox{for}\quad p<d\alpha'_f. \leqno(5.5.1)$$ \par
\noindent {\it Proof.} Note first that $\alpha'_f\leqslant 1$ since $\dim Z=1$. Assume $\nu_{p+d}\ne 0$ with $p<d\alpha'_f$. Then the image of ${\rm d}^{(1)}:N_{p+d}\to M_p$ is nonzero by Theorem~(5.3). We get hence by Theorem~2 $$n'_{f,p/d}<\mu_p\leqslant\binom{p-1}{n-1},$$ where $n'_{f,p/d}$ is the coefficient of the pole order spectrum ${\rm Sp}_P(f)$ (i.e. ${\rm Sp}_P(f)=\h{$\sum$}_{\alpha}\,n'_{f,\alpha}t^{\alpha}$). However, this contradicts Remark~(3.6)(ii). So Corollary~(5.5) follows. \par
\noindent {\bf Remarks~5.6.} (i) In Theorem~(5.3), the inequality (5.3.1) holds with the left-hand side replaced by the dimension of the kernel of the composition $$N_{p+d}\buildrel{{\rm d}^{(1)}}\over\longrightarrow M_p\to M''_p.$$ In fact, (5.1.1) implies that (5.1.2) holds with $N_{p+d}^{(2)}$ replaced by this kernel. \par
(ii) Corollary~(5.4) seems to be closely related with the short exact sequence in \cite[Thm.~1]{DiSa1}. \par
(iii) If all the singularities of $Z$ are nodes, then $\alpha'_f=1$, and the estimation obtained by Corollary~(5.5) coincides with the one in \cite[Thm.~4.1]{DiSt1}, which is known to be sharp. It is also sharp for instance if the singularities are $A_1$ or $D_4$ (e.g. $f=(x^2-y^2)(x^2-z^2)(y^2-z^2)$). \par
(iv) The proof of the finiteness of $(H^nA_f^{\raise.15ex\h{${\scriptscriptstyle\bullet}$}})_{\rm tor}$ can be reduced to the analytic local case by considering the formal completion where the direct sum is replaced with the infinite direct product and the convergent power series factors through the formal completion. \par
(v) The argument in (5.1) can be extended to the analytic local case if there is a projective morphism of complex manifolds $\rho:\widetilde{X}\to X$ such that the restriction of $\rho$ over $X\setminus\{0\}$ is an isomorphism and the following two conditions are satisfied: \par
\noindent $(a)$ The proper transform of each irreducible component of ${\rm Sing}\,f$ transversally intersects $\rho^{-1}(0)_{\rm red}$ at a smooth point $z_k$, and $z_k\ne z_{k'}$ for $k\ne k'$, \par
\noindent $(b)$ We have $\rho^*f=\widetilde{x}_n^{\,a_k}h_k(\widetilde{x}_1,\dots,\widetilde{x}_{n-1})$ around each $z'_k$ where $\widetilde{x}_1,\dots,\widetilde{x}_n$ are local coordinates of $\widetilde{X}$ around $z_k$, $h_k$ is a germ of a holomorphic function of $n-1$ variables, and $a_k\in{\mathbf N}$. \par
Note that condition~$(b)$ implies that $\rho^{-1}(0)_{\rm red}$ is locally defined by $\{\widetilde{x}_n=0\}$. Let $y$ be a sufficiently general linear combination of local coordinates of $(X,0)$. Here we assume that $\rho$ factors through the blow-up along $0\in X$, and moreover the intersection of the exceptional divisor $E_0$ of the blow-up along $0$ with the proper transform of each irreducible component of ${\rm Sing}\,f$ is not contained in the hyperplane of $E_0$ defined by $h$ (by replacing $h$ if necessary). Then $\rho^*h=u_k\widetilde{x}_n^{b_k}$ with $b_k\in{\mathbf N}$ and $u_k$ an invertible function. Here we may assume $\rho^*h=\widetilde{x}_n^{b_k}$ by replacing $\widetilde{x}_n$, but the equality in condition~$(b)$ is replaced by $\rho^*f=u'_k\widetilde{x}_n^{\,a_k}h_k(\widetilde{x}_1,\dots,\widetilde{x}_{n-1})$ where $u'_k$ is an invertible function. Then condition~$(b)$ can be replaced with \par
\noindent $(b)'$ The restriction of $\rho^*y$ to the proper transform $\widetilde{D}$ of $f^{-1}(0)$ gives an analytically trivial deformation on a neighborhood of each $z'_k$ by replacing $\rho^*y:\widetilde{D}\to{\mathbf C}$ with the normalization of the base change by an appropriate ramified covering of ${\mathbf C}$ if necessary. \par
Under these assumptions, Theorem~(5.2) can be extended to the analytic local case where $h_k$ is as in condition~$(b)$ above. However, it does not seem easy to generalize Theorem~(5.3) unless $f$ admits a ${\mathbf C}^*$-action (or the arguments related to the grading are completely ignored). \par
(vi) If $n=3$ and $Z$ has only ordinary double points as singularities, then the coefficients $n_{f,\alpha}$ of the Steenbrink spectrum for $\alpha\notin{\mathbf Z}$ are the same as that of a central hyperplane arrangement in ${\mathbf C}^3$ having only ordinary double points in ${\mathbf P}^2$. (Note that its formula can be found in \cite{BuSa}.) In fact, the vanishing cycle sheaf $\varphi_{f,\ne 1}{\mathbf Q}_X$ is supported at the origin so that we have the symmetry of the coefficients $n_{f,\alpha}$ for $\alpha\notin{\mathbf Z}$. Moreover $n_{f,\alpha}$ for $\alpha<1$ can be obtained by Remark~(3.6)(ii), and $n_{f,\alpha}$ for $\alpha\in(1,2)$ can be calculated from the $n_{f,\alpha}$ for $\alpha\notin(1,2)$ by using the relation with the Euler characteristic of the complement of $Z\subset{\mathbf P}^2$. (The latter follows from (3.1.2).) Note also that the $n_{f,\alpha}$ for $\alpha\in{\mathbf Z}$ can be obtained from the Hodge numbers of the complement of $Z\subset{\mathbf P}^{n-1}$. \par
\noindent {\bf Examples~5.7.} We first give some examples where the assumptions of Corollary~(5.4) and the last conditions of Theorem~(5.3) are all satisfied, and moreover Remark~(5.6)(vi) can also be applied. These are also examples of type (I) singularities (i.e. (0.5) is satisfied). \par
\noindent (i) $f=xyz\,\,\,$(three $A_1$ singularities in ${\mathbf P}^2$) $\,\,n=d=3.$ $$\begin{array}{cccccccccccccccc} k\, &1 &2 &3 &4 &5 &6 &7 &8 &9 &\cdots \\ \gamma_k & & &1 &3 &3 &1\\ \mu'_k \\ \mu''_k & & &1 &3 &3 &3 &3 &3 &3 &\cdots\\ \mu_k & & &1 &3 &3 &3 &3 &3 &3 &\cdots\\ \nu_k & & & & & &2 &3 &3 &3 &\cdots\\ \mu^{\scriptscriptstyle(2)}_k & & &1\\ \nu^{\scriptscriptstyle(2)}_k & & & & & &2\\ {\rm Sp}_P & & &1 & & &-2\\ {\rm Sp} & & &1 & & &-2\\ \end{array}$$ \par
\noindent (ii) $f=x^2y^2+x^2z^2+y^2z^2\,\,\,$(three $A_1$ singularities in ${\mathbf P}^2$) $\,n=3,\,d=4.$ $$\begin{array}{cccccccccccccccccc} k\, &1 &2 &3 &4 &5 &6 &7 &8 &9 &10 &11 &12 &\cdots&\raise-3mm\hbox{ }\\ \gamma_k & & &1 &3 &6 &7 &6 &3 &1\\ \mu'_k & & & & &3 &4 &3\\ \mu''_k & & &1 &3 &3 &3 &3 &3 &3 &3 &3 &3 &\cdots\\ \mu_k & & &1 &3 &6 &7 &6 &3 &3 &3 &3 &3 &\cdots\\ \nu_k & & & & & & & & &2 &3 &3 &3 &\cdots \\ \mu^{\scriptscriptstyle(2)}_k & & &1 &3 &4 &4 &3\\ \nu^{\scriptscriptstyle(2)}_k \\ {\rm Sp}_P & & &1 &3 &4 &4 &3 &0 &0\\ {\rm Sp} & & &1 &3 &3 &4 &3 &0 &1\\ \end{array}$$ \par
\noindent (iii) $f=xyz(x+y+z)\,\,\,$(six $A_1$ singularities in ${\mathbf P}^2$) $\,n=3,\,d=4.$ $$\begin{array}{cccccccccccccccccc} k\, &1 &2 &3 &4 &5 &6 &7 &8 &9 &10 &11 &12 &\cdots&\raise-3mm\hbox{ }\\ \gamma_k & & &1 &3 &6 &7 &6 &3 &1\\ \mu'_k & & & & & &1 & & &\\ \mu''_k & & &1 &3 &6 &6 &6 &6 &6 &6 &6 &6 &\cdots\\ \mu_k & & &1 &3 &6 &7 &6 &6 &6 &6 &6 &6 &\cdots\\ \nu_k & & & & & & & &3 &5 &6 &6 &6 &\cdots \\ \mu^{\scriptscriptstyle(2)}_k & & &1 &3 &1 &1\\ \nu^{\scriptscriptstyle(2)}_k & & & & & & & &3\\ {\rm Sp}_P & & &1 &3 &1 &1 &0 &-3 &0\\ {\rm Sp} & & &1 &3 &0 &1 &0 &-3 &1\\ \end{array}$$ Here we have $\mu''_4=3$ by Lemma~(2.1), but the proof of $\mu''_5=6$ is not so trivial. In fact, if $\mu''_5<6$, then we have $\nu_{\,7}\ne 0$ by Corollary~2. However, this contradicts Corollary~(5.5). \par
\noindent {\bf Examples~5.8.} (i) $f=x^2y^2+z^4\,\,\,$(two $A_3$ singularities in ${\mathbf P}^2$) $\,n=3,\,d=4.$ \par
\noindent The calculation of this example does not immediately follow from Corollary~(5.4) since the last conditions of Theorem~(5.3) are not satisfied and Remark~(5.6)(vi) does not apply to this example. This example can be calculated by using the Thom-Sebastiani type theorems in (2.2) and (4.9). \par
$$\begin{array}{cccccccccccccccccc} k\, &1 &2 &3 &4 &5 &6 &7 &8 &9 &10 &11 &12 &\cdots&\raise-3mm\hbox{ }\\ \gamma_k & & &1 &3 &6 &7 &6 &3 &1\\ \mu'_k & & & & &1 &1 &1\\ \mu''_k & & &1 &3 &5 &6 &6 &6 &6 &6 &6 &6 &\cdots\\ \mu_k & & &1 &3 &6 &7 &7 &6 &6 &6 &6 &6 &\cdots\\ \nu_k & & & & & & &1 &3 &5 &6 &6 &6 &\cdots \\ \mu^{\scriptscriptstyle(2)}_k & & &1 &1 &2 &1 &1\\ \nu^{\scriptscriptstyle(2)}_k & & & & & & &1 &1 &1\\\ {\rm Sp}_P & & &1 &1 &2 &1 &0 &-1 &-1\\ {\rm Sp} & & &1 &1 &2 &1 &0 &-1 &-1\\ \end{array}$$ We note the calculation in the case $f=x^2y^2$ for the convenience of the reader. \par
\noindent (ii) $f=x^2y^2\,\,\,$(two $A_1$ singularities in ${\mathbf P}^1$) $\,\,n=2,\,d=4.$ $$\begin{array}{cccccccccccccccc} k\, &1 &2 &3 &4 &5 &6 &7 &8 &\cdots \\ \gamma_k & &1 &2 &3 &2 &1\\ \mu'_k & & & &1\\ \mu''_k & &1 &2 &2 &2 &2 &2 &2 &\cdots\\ \mu_k & &1 &2 &3 &2 &2 &2 &2 &\cdots\\ \nu_k & & & & & &1 &2 &2 &\cdots\\ \mu^{\scriptscriptstyle(2)}_k & &1 & &1\\ \nu^{\scriptscriptstyle(2)}_k & & & & & &1\\ {\rm Sp}_P & &1 &0 &1 &0 &-1\\ {\rm Sp} & &1 &0 &1 &0 &-1\\ \end{array}$$ \par
\noindent {\bf Remark~5.9.} We have the $V$-filtration of Kashiwara and Malgrange on $N_p$, $M''_p$ by using the injections in (5.1.1). Assume all the singularities of $Z$ are weighted homogeneous. It seems that the following holds in many examples: $$\dim{\rm Gr}_V^{\alpha}N_{p+d}=\begin{cases}\nu_{p+d}^{(2)}=\nu_{p+d}^{(\infty)}=n^1_{f,\alpha+1}&\hbox{if}\,\,\,p/d=\alpha,\\ 0&\hbox{if}\,\,\,p/d<\alpha,\end{cases} \leqno(5.9.1)$$ where $n^j_{f,\alpha}$ is as in (3.2.3), and $\nu_{p+d}^{(\infty)}:=\nu_{p+d}^{(r)}$ ($r\gg 0$). Note that (5.9.1) would imply the $E_2$-degeneration of the pole order spectral sequence in Question~2. \par
As for $M''_p$, (5.9.1) seems to correspond by duality to the following: $$\dim{\rm Gr}_V^{\alpha}M''_p=\begin{cases}n_{Z,\alpha}-n^1_{f,\alpha}&\hbox{if}\,\,\,p/d=\alpha,\\ n_{Z,\alpha}&\hbox{if}\,\,\,p/d>\alpha,\end{cases} \leqno(5.9.2)$$ where $n_{Z,\alpha}:=\h{$\sum$}_{k\leqslant r_0}\,n_{h_k,\alpha}$ with $n_{h_k,\alpha}$ defined for the isolated singularities $\{h_k=0\}$ ($k\leqslant r_0$) as in (3.2.1). In fact, we have the symmetries $$n_{Z,\alpha}=n_{Z,n-1-\alpha},\quad n^1_{f,\alpha}=n^1_{f,n-\alpha}\quad(\alpha\in{\mathbf Q}),$$ and it is expected that the duality isomorphism in Theorem~1 is compatible with the filtration $V$ on $N_p$, $M''_p$ in an appropriate sense so that we have the equality $$\dim{\rm Gr}_V^{\alpha}N_p+\dim{\rm Gr}_V^{n-1-\alpha}M''_{nd-p}=n_{Z,\alpha}\quad(\alpha\in{\mathbf Q},\,p\in{\mathbf Z}), \leqno(5.9.3)$$ giving a refinement of Corollary~2. Note that (5.9.2) for $\alpha=p/d$ is closely related with \cite{Kl}. \par
If the above formulas hold, these would imply a refinement of Corollary~5.5 (and also its generalization to the case $n>3$ in \cite[Theorem~9]{DiSa3}). However, it is quite nontrivial whether (5.9.2) holds, for instance, even for $p/d>\alpha$, since this is closely related to the independence of the $V$-filtrations associated to various singular points of $Z$. In the case where the Newton boundaries of $f$ are non-degenerate, the formula for $M''_p$ with $\alpha\leqslant 1$ seems to follow from the theories of multiplier ideals and microlocal $V$-filtrations.
\end{document} |
\begin{document}
\title{Search of clustered marked states with lackadaisical quantum walks}
\begin{abstract} Nature of quantum walk in presence of multiple marked state has been studied by Nahimovs and Rivosh \cite{10.1007/978-3-662-49192-8_31}. They have shown that if the marked states are arranged in a $\sqrt{k} \times \sqrt{k}$ cluster in a $\sqrt{N} \times \sqrt{N}$ grid, then to find a single marked state among the multiple ones, quantum walk requires $\Omega(\sqrt{N} - \sqrt{k})$ time. In this paper, we show that using lackadaisical quantum walk with the weight of the self-loop as $\frac{4}{N(k + \lfloor{\frac{\sqrt{{k}}}{2}}\rfloor)}$, where $k$ is odd, the probability of finding a marked state increases by $\sim 0.2$. Furthermore, we show that instead of applying the quantum walk $\mathcal{O}(k)$ times to find all the marked states, classical search in the vicinity of the marked state found after the first implementation of the quantum walk can find all the marked states in $\mathcal{O}(\sqrt{k})$ time on average. \end{abstract}
\section{Introduction}
Grover's algorithm showed that quantum computers can provide quadratic speedup for searching a marked location in an unsorted database \cite{Grover:1996:FQM:237814.237866}. While a classical algorithm requires $\mathcal{O}(N)$ time, Grover's algorithm can search the database in $\mathcal{O}(\sqrt{N})$ time. However, Benioff showed that if the $N$ data points are arranged in a $\sqrt{N} \times \sqrt{N}$ grid, then the quantum speedup is lost \cite{benioff2000space}. Since then, researches have been carried out to design faster algorithms to search an unsorted database arranged in a two or higher dimensional grid. Ambainis et al proposed an algorithm based on quantum random walk which can detect a marked state with probability $\mathcal{O}(\frac{1}{logN})$ in $\mathcal{O}(\sqrt{NlogN})$ time \cite{Ambainis:2005:CMQ:1070432.1070590}. To increase the probability, amplitude amplification is necessary, which has a time complexity of $\mathcal{O}(\sqrt{logN})$. This gives an overall running time of the algorithm to be $\mathcal{O}(\sqrt{N}logN)$. Childs and Goldstone matched this runtime with continuous time quantum walk \cite{PhysRevA.70.042312}. Ambainis et al, henceforth, proposed an algorithm which does not require amplitude amplification, and hence can perform the search in $\mathcal{O}(\sqrt{NlogN})$ time \cite{10.1007/978-3-642-35656-8_7}. Further researches have been performed to study quantum walk algorithms in other graph structures \cite{PhysRevA.67.052307, PhysRevLett.114.110503}, but in this paper we shall stick to two-dimensional grid.\\
Most of the quantum walk based search algorithms consider one or two marked locations. In \cite{10.1007/978-3-662-49192-8_31}, Nahimovs and Rivosh considered searching multiple marked states in a $\sqrt{N} \times \sqrt{N}$ grid. They showed that if $k$ marked states are grouped in a $\sqrt{k} \times \sqrt{k}$ block, then the algorithm of \cite{Ambainis:2005:CMQ:1070432.1070590} can perform the search in $\Omega(\sqrt{N} - \sqrt{k})$ time. Whereas if the $k$ marked locations are distributed uniformly over the grid, then the algorithm requires $\mathcal{O}(\sqrt{\frac{N}{k}log\frac{N}{k}})$ time. In \cite{978-3-319-29817-7}, They also showed that when $k$ is even, quantum walk exceptionally fails to find any of the marked locations. Wong proposed lackadaisical quantum walk \cite{Wong2018}, where each location on the grid contains $l$ number of self loops. These loops give some probability to the walker to remain at its location, hence making the walk lazy. In \cite{wang2017adjustable}, the authors have used adjustable self-loops and have shown that when the weight of the self-loop is $\frac{4}{N}$, the probability of finding a single marked state in a $\sqrt{N} \times \sqrt{N}$ grid increases, as compared to normal quantum walk (not lackadaisical).\\
In this paper we extend the model of \cite{wang2017adjustable} to multiple marked states arranged in a $\sqrt{k} \times \sqrt{k}$ cluster, where $k$ is odd. We show by simulation that adjusting the weight of the self-loop as $\frac{4}{N(k + \lfloor{\frac{\sqrt{{k}}}{2}}\rfloor)}$, the probability of finding the states increases by $\sim 0.2$ for all values of $N$. In Table~\ref{tab:summary} we provide an overview of the time complexity and success probability of both classical and quantum random walk algorithms to detect single and multiple marked states. We show by simulation that using weight of self loop as $\frac{4}{N(k + \lfloor{\frac{\sqrt{{k}}}{2}}\rfloor)}$, the number of steps required is less than that of quantum walk with no self loop.
\begin{table}[h!] \caption{Time complexity \& success probability of finding single and multiple marked states with existing algorithms}
\centering
\begin{tabular}{ |c|cc|cc| }
\hline
& \multicolumn{2}{|c}{Single Mark State}&\multicolumn{2}{|c|}{Clustered Mark States}\\ \hline
& Success Probability & Steps & Success Probability & Steps \\ \hline Random Walk & $\mathcal{O}(\frac{1}{N})$ & $\mathcal{O}(NlogN)$ & $\mathcal{O}(\frac{k}{N})$ & $\mathcal{O}(\frac{N}{k}log\frac{N}{k})$ \\ Quantum Walk & $\mathcal{O}(\frac{1}{logN})$ & $\mathcal{O}(\sqrt{NlogN})$ & $\mathcal{O}(\frac{1}{log(N/k)})$ & $\Omega(\sqrt{N} - \sqrt{k})$ \\
\hline \end{tabular} \label{tab:summary} \end{table}
\section{Discrete time quantum walk} A quantum random walk consists of position Hilbert Space $H_p$ and coin Hilbert Space $H_c$. A quantum state consists of these two degrees of freedom, $\ket{c} \otimes \ket{v}$. where $\ket{c} \in H_c$ and $\ket{v} \in H_p$. A step in quantum walk is a unitary evolution $U = S.(C \otimes I)$ where $S$ is the shift operator and $C$ is the coin operator, which acts only on the coin Hilbert Space $H_c$. If we consider a $\sqrt{N} \times \sqrt{N}$ grid, then the quantum walk starts in a superposition \begin{equation}
\ket{\psi(0)} = \frac{1}{\sqrt{4N}}(\sum_{i=1}^{4}\ket{i} \otimes \sum_{x,y=1}^{\sqrt{N}}\ket{x,y}) \end{equation}
where each location $(x,y)$ corresponds to a quantum register $\ket{x,y}$ with $x,y \in \{1, 2, \hdots, \sqrt{N}\}$ and the coin register $\ket{i}$ with $i \in \{\leftarrow, \rightarrow, \uparrow, \downarrow\}$. The most often used transformation on the coin register is the Grover's Diffusion Transformation $D$ \begin{center}
$D = \frac{1}{2}\begin{pmatrix}
-1 & 1 & 1 & 1\\
1 & -1 & 1 & 1\\
1 & 1 & -1 & 1\\
1 & 1 & 1 & -1
\end{pmatrix}$ \end{center}
The Diffusion Operator can also be written as $D = 2\ket{s_D}\bra{s_D} - I_4$, where $\ket{s_D} = \frac{1}{\sqrt{4}}\sum_{i=1}^{4}\ket{i}$.\\
The transformation creates a superposition of the coin states $\ket{i}$, which in turn governs the shift operation. In this paper, we apply the Flip-Flop Shift transformation S proposed in \cite{Ambainis:2005:CMQ:1070432.1070590}
\begin{center}
$\ket{i,j,\uparrow} = \ket{i,j-1,\downarrow}$\\
$\ket{i,j,\downarrow} = \ket{i,j+1,\uparrow}$\\
$\ket{i,j,\leftarrow} = \ket{i-1,j,\rightarrow}$\\
$\ket{i,j,\rightarrow} = \ket{i+1,j,\leftarrow}$ \end{center}
It is easy to see that $\ket{\psi(t)}$ is a $+1$ eigenstate of the operator $U = S.(D \otimes I)$. A perturbation is created in the quantum state by applying the coin operator $-I$ instead of $D$ for marked locations. A general quantum walk algorithm applies this unitary operation (appropriately for the marked and the unmarked states) $t$ times to create the state $\ket{\psi(t)}$ such that $\braket{\psi(t)|\psi(0)}$ becomes close to 0. Measurement of the state $\ket{\psi(t)}$ is expected to give the marked location with high probability.
\subsection{Lackadaisical quantum walk}
In lackadaisical quantum walk, the coin degree of freedom is five-dimensional, i.e. $i \in \{\leftarrow, \rightarrow, \uparrow, \downarrow, . \}$. The flip-flop transformation conditioned on the $\ket{.}$ coin state is
\begin{center}
$S(\ket{i,j} \otimes \ket{.}) = \ket{i,j} \otimes \ket{.}$ \end{center}
If $l$ self-loops are allowed, then the Coin operator will be $D = 2\ket{s_D}\bra{s_D} - I_5$, where
\begin{center}
$\ket{s_D} = \frac{1}{\sqrt{4+l}}(\ket{\uparrow} + \ket{\downarrow} + \ket{\leftarrow} + \ket{\rightarrow} + \sqrt{l}\ket{.})$ \end{center}
In \cite{Wong2018}, Wong showed that using lackadaisical quantum walk with $l = \frac{4}{N}$, the success probability of finding a marked state in an $\sqrt{N} \times \sqrt{N}$ grid becomes close to 1 in $\mathcal{O}(\sqrt{NlogN})$ time.
\section{Clustered marked state over 2-D grid} In \cite{10.1007/978-3-662-49192-8_31}, the authors have considered multiple marked locations. In accordance with the paper, we consider the walk is taking place on a $\sqrt{N} \times \sqrt{N}$ grid and there are $k$ marked locations arranged in a $\sqrt{k} \times \sqrt{k}$ cluster (shown in Fig.~\ref{fig:cluster}).
\begin{figure}
\caption{Grouped placement of $k$ marked locations \cite{10.1007/978-3-662-49192-8_31}}
\label{fig:cluster}
\end{figure}
In \cite{10.1007/978-3-662-49192-8_31}, the authors have used quantum walk where the weight on the self loop is $0$, or in other words, is not lackadaisical. In \cite{wang2017adjustable}, the authors have shown that using a weight of $\frac{4}{N}$ for the self loop, the probability to find a single marked state increases with respect to non-lackadaisical walk. However, for multiple marked states, this weight provides a probability poorer than non-lackadaisical walk.\\
Changing the weight of the self loop to be $\frac{1}{4N}$, the total probability becomes close to $0.85$ in $\mathcal{O}(\sqrt{N})$ steps for $\sqrt{N} > 12$. The probability of finding each marked state is \emph{total probability/$k$}. The total probability exceeds the probability in non-lackadaisical walk by $\sim 0.2$. However, if the weight on the self loop is a particular function of both the size of the grid ($N$) and the number of marked states ($k$), then the probability is shown to be even higher.\\
We set the probability of the self loop as $\frac{4}{N(k + \lfloor{\frac{\sqrt{{k}}}{2}}\rfloor)}$ for odd values of $k$. Our simulations show that using this weight, the probability is close to $0.8$ for $\sqrt{N} \le 14$ and exceeds $0.95$ for larger values of $N$. In Figure~\ref{fig:test1} and \ref{fig:test2}, we represent the results of our simulations graphically. In each Figure, we have shown three cases - (i) weight of self loop is 0 (blue color) \cite{10.1007/978-3-662-49192-8_31}, (ii) weight of self loop is $\frac{4}{N}$ (red color) \cite{wang2017adjustable} and (iii) weight of self loop is $\frac{4}{N(k + \lfloor{\frac{\sqrt{{k}}}{2}}\rfloor)}$ (proposed weight, green color). We show both the highest total probability and the number of steps required to attain this probability. We show the results of simulations for $k = 9$, i.e. 9 marked states arranged in a $3 \times 3$ cluster. The grid size, in the graphical representations, is varied from $\sqrt{N} = 8$ to $30$. For brevity, the plots show the probability to find the marked state for even values of $\sqrt{N}$ only. The odd values are exactly similar.
\begin{figure}
\caption{Probability and number of steps for $8 \le \sqrt{N} \le 30$}
\label{fig:test1}
\end{figure}
\section{Local search in vicinity}
The original paper by Nahimovs and Rivosh \cite{10.1007/978-3-662-49192-8_31} considered finding only one marked state among the multiple ones. In this section, we consider the scenario where it is required to find all the marked states. This may be necessary for scenarios like tracking an object in a picture. An obvious choice is to run the quantum walk algorithm multiple times to find all the marked states. Since the probability of finding each of the marked states is equal, and there are $k$ marked states, the algorithm needs to be run $\mathcal{O}(k)$ times. However, in current scenario, it is more costly to apply quantum walk $\mathcal{O}(k)$ times than to apply classical search for the similar times. We show next that applying classical search in the vicinity of the marked state require $\mathcal{O}(\sqrt{k})$ times to find all the marked states.\\
It is important to consider that the starting point is a marked state of some coordinate $(l,m)$ where $l$ and $m$ are unknown. Since, the structure of the marked states is always a grid, and the number of marked states are also known a-priori, one can search in the vicinity of the $(l,m)$ point to find its relative location with respect to the boundary of the grid of marked states. Thus by knowning the values of $l$ and $m$, the position of all the marked states can be obtained in $\mathcal{O}(1)$ time.
\begin{theorem} Starting from a marked state, chosen uniformly from a $\sqrt{k} \times \sqrt{k}$ cluster, the average time to find all the marked states in that cluster, using classical search, is $\mathcal{O}(\sqrt{k})$. \end{theorem}
\textbf{Proof} Let us consider that the lower left marked state in the $\sqrt{k} \times \sqrt{k}$ cluster has a coordinate $(0,0)$. The search algorithm starts from some point $(l,m)$ on the grid, where $l$ and $m$ are not known a priori. Let us assume that the classical search algorithm always moves once towards the left and and then down from the starting point $(l,m)$ until it hits the boundary. To determine the lower boundary, it has to move $l+1$ steps and to determine the left boundary, it has to move $m+1$ steps. The extra step in both cases is required to determine that the boundary has indeed been reached.\\
Consider the leftward walk of the search algorithm. In the best case, when the starting point is on the left-most boundary, a single step is sufficient. Whereas, if the starting point is on the right-most boundary, then $\sqrt{k}+1$ steps are necessary. Since, each location on the grid is equally likely to start with, and for the horizontal walk, there are $\sqrt{k}$ possible starting points, the expected time requirement is
\begin{equation*}
\frac{1 + 2 + ... + (\sqrt{k}+1)}{\sqrt{k}} = \frac{(\sqrt{k}+1)(\sqrt{k}+2)}{\sqrt{k}} = \mathcal{O}(\sqrt{k}) \end{equation*}
A similar approach is required for the downward movement from the lasting position, which, by symmetry will also require $\mathcal{O}(\sqrt{k})$ time. Hence, the total time requirement to search for the relative values of $l$ and $m$ with respect to the grid of marked states is $\mathcal{O}(\sqrt{k})$.\\
From the above theorem, it is evident that applying quantum walk once, and then search using classical algorithm can find all the marked states in $\mathcal{O}(\sqrt{k})$ times, whereas using only quantum walk would have required $\mathcal{O}(k)$ time.
\section{Conclusion}
In this paper we have studied the application of quantum random walk on an $\sqrt{N} \times \sqrt{N}$ grid for all odd values of $k \ge 1$ marked states, where the marked states are arranged in a $\sqrt{k} \times \sqrt{k}$ cluster. Our simulations show that using lackadaisical quantum walk, where the weight of the self loop is $\frac{4}{N(k + \lfloor{\frac{\sqrt{{k}}}{2}}\rfloor)}$, the probability of finding a marked state becomes close to 1 in time less than that of quantum walk with no self loop. Classical search in the vicinity can find all the marked states in $\mathcal{O}(\sqrt{k})$ time on average. Hence, instead of using quantum walk for $\mathcal{O}(k)$ times, one can use classical search after a single step of quantum walk and still obtain the speedup. Future scopes of this work is to prove this bound mathematically, and to find the optimum weight for which the highest probability is attained.
\end{document} |
\begin{document}
\footskip30pt
\title{On the triangulated category of framed motives $\text{DFr}_{-}^{eff}(k)$}
\author{Ivan Panin} \address{St. Petersburg Branch of V. A. Steklov Mathematical Institute, Fontanka 27, 191023 St. Petersburg, Russia} \email{paniniv@gmail.com}
\thanks{ }
\keywords{Motivic homotopy theory, framed correspondences, spectral categories}
\subjclass[2010]{14F42, 19E08, 55U35}
\begin{abstract} The category of framed correspondences $Fr_*(k)$ was invented by Voevodsky \cite[Section 2]{Voe2} in order to give another framework for $\text{SH}(k)$ more amenable to explicit calculations. Based on \cite{Voe2} and \cite{GP4} Garkusha and the author introduced in \cite[Section 2]{GP5} a triangulated category of framed bispectra $\text{SH}_{nis}^{fr}(k)$. It is shown in \cite[Section 2]{GP5} that $\text{SH}_{nis}^{fr}(k)$ recover classical Morel--Voevodsky triangulated categories of bispectra $\text{SH}(k)$.
For any infinite perfect field $k$ a triangulated category of $\mathbb {F}\text{r}$-motives $\text{D}\mathbb {F}\text{r}_{-}^{eff}(k)$ is constructed in the style of Voevodsky's construction of the category $\text{DM}_-^{eff}(k)$. In our approach the Voevodsky category of Nisnevich sheaves with transfers is replaced with the category of $\mathbb {F}\text{r}$-modules. To each smooth $k$-variety $X$ the $\mathbb {F}\text{r}$-motive $\text{M}_{\mathbb {F}\text{r}}(X)$ is associated in the category $\text{D}\mathbb {F}\text{r}_{-}^{eff}(k)$.
We identify the triangulated category $\text{D}\mathbb {F}\text{r}_{-}^{eff}(k)$ with the full triangulated subcategory $\text{SH}^{eff}_{-}(k)$ of the classical Morel--Voevodsky triangulated category $\text{SH}^{eff}(k)$ of effective motivic bispectra \cite{Jar2}. Moreover, the triangulated category $\text{D}\mathbb {F}\text{r}_{-}^{eff}(k)$ is naturally {\it symmetric monoidal}. Particularly, $\text{M}_{\mathbb {F}\text{r}}(X)\otimes_{\mathbb {F}\text{r}} \text{M}_{\mathbb {F}\text{r}}(Y)=\text{M}_{\mathbb {F}\text{r}}(X\times Y)$. The mentioned identification of the triangulated categories respects the symmetric monoidal structures on both sides.
We work with the derived category $\text{D}\mathbb {F}\text{r}_-(k)$ of bounded below $\mathbb {F}\text{r}$-modules rather than with the homotopy category $\text{SH}_{nis}(k)$ of bispectra as in \cite[Section 2]{GP5}. \end{abstract} \maketitle
\thispagestyle{empty} \pagestyle{plain}
\newdir{ >}{{}*!/-6pt/@{>}}
\section{Introduction}
The Voevodsky triangulated category of motives $\text{DM}_-^{eff}(k)$~\cite{Voe1} provides a natural framework to study motivic cohomology. In this paper a new short approach to constructing the part $\text{SH}^{eff}_{-}(k)$ of the classical triangulated category $\text{SH}(k)$ is presented providing the base field is infinite and perfect.
We work in the framework of strict $V$-spectral categories introduced in \cite[Definition~\ref{vsp}]{GP2} The main new feature of our spectral category $\mathbb {F}\text{r}$ is that {\it it is symmetric monoidal}. It is also connective and Nisnevich excisive in the sense of~\cite{GP}. Each $\pi_0(\mathbb {F}\text{r})$-presheaf $\mathcal F$ of Abelian groups is automatically a radditive framed presheaf of Abelian groups in the sense of \cite{Voe2}. By \cite[Lemma 2.15]{GP3} such an $\mathcal F$ is a $\mathbb ZF_*(k)$-presheaf of Abelian groups in the sense of \cite[2.13]{GP3}. By \cite[Lemma 4.5]{Voe2} and \cite[Lemma 2.15]{GP3} its associated Nisnevich sheaf $\mathcal F_{nis}$ is canonically a $\mathbb ZF_*(k)$-presheaf of Abelian groups. If $\mathcal F$ is homotopy invariant and stable in the sense of \cite{Voe2} (see also \cite[Def. 2.13, 2.14]{GP3}), then by \cite[Thm. 1.1]{GP3} the framed Nisnevich sheaf $\mathcal F_{nis}$ is strictly homotopy invariant and stable.
The main symmetric monoidal strict $V$-spectral category $\mathbb {F}\text{r}$ is constructed in Section \ref{The_Category}. It is strict over infinite perfect fields. Denote by $\text{D}\mathbb {F}\text{r}_-(k)$ the full triangulated subcategory of $\text{SH}^{nis}(\mathbb {F}\text{r})$ of bounded below $\mathbb {F}\text{r}$-modules. We also denote by $\text{D}\mathbb {F}\text{r}_-^{eff}(k)$ the full triangulated subcategory of $\text{D}\mathbb {F}\text{r}_-(k)$ of those $\mathbb {F}\text{r}$-modules $M$ such that each $\mathbb ZF_*(k)$-presheaf
$\pi_i(M)|_{\mathbb ZF_*(k)}$ is {\it homotopy invariant and stable} in the sense of \cite[Def. 2.13, 2.14]{GP3}.
We call $\text{D}\mathbb {F}\text{r}_-^{eff}(k)$ {\it the triangulated category of $\mathbb {F}\text{r}$-motives}. The category $\text{D}\mathbb {F}\text{r}_{-}^{eff}(k)$ is naturally symmetric monoidal. For each $X\in Sm/k$ the $\mathbb {F}\text{r}$-module
$$C_*(\mathbb {F}\text{r}(X)):=|d\mapsto\underline{\Hom}(\Delta^d,\mathbb {F}\text{r}(X))|$$ belongs to $\text{D}\mathbb {F}\text{r}_{-}^{eff}(k)$ and is called {\it the} $\mathbb {F}\text{r}$-{\it motive of} $X$; \ $\text{M}_{\mathbb {F}\text{r}}(X)\otimes_{\mathbb {F}\text{r}} \text{M}_{\mathbb {F}\text{r}}(Y)=\text{M}_{\mathbb {F}\text{r}}(X\times Y)$.
The latter triangulated category {\it is identified} with the full triangulated subcategory $\text{SH}^{eff}_{-}(k)$ of the classical Morel--Voevodsky triangulated category $SH^{eff}(k)$ of effective motivic bispectra ({\it this is the main result of the preprint}). See Theorem \ref{VeryMain}.
The mentioned identification respects {\it the symmetric monoidal structures} on both sides.
It can be shown that the identification triangulated functor as in Theorem \ref{VeryMain} $$\mathbb M_{\text{SH}}: \text{D}\mathbb {F}\text{r}_-^{eff}(k)\to SH^{eff}_-(k)$$ takes the $\mathbb {F}\text{r}$-motive $\text{M}_{\mathbb {F}\text{r}}(X)$ of $X$ to the symmetric bispectrum $\Sigma_{\mathbb G_m}\Sigma_{S^1}(X_+)$.
Sections 2 and 3 contains the materials of \cite[Sections 2 and 3]{GP2} adapted to the symmetric monoidal spectral category $\mathbb {F}\text{r}$, which is defined in Section 4. In Section 4 the language of triangulated categories is used as opposed to the model categories language. This allows to state all constructions and results in a very explicit form. The main result here is Theorem \ref{neploho}. However it seems that this language does not allow to prove Theorem 6.2 (the main result of this preprint).
Also this language does not allow to state and prove the following true result: there is a triangulated equivalence of the triangulated categories $$\text{SH}^{mot}(\mathbb {F}\text{r})\to \text{SH}^{mot}(k).$$
Triangulated subcategories $\text{SH}^{nis}(\mathbb {F}\text{r})$, $\text{D}\mathbb {F}\text{r}_-(k)$ and $\text{D}\mathbb {F}\text{r}_-^{eff}(k)$ are defined in Section 5. The main result of the preprint (Theorem \ref{VeryMain}) is stated in Section 6. Its proof is postponed to the next preprint.
Throughout the paper we denote by $Sm/k$ the category of smooth separated schemes of finite type over the base field $k$. The base field $k$ is supposed to be infinite and perfect. The paper \cite{DP} shows that there is no restriction on the characteristic of $k$. \\ {\bf Acknowledgements}. The author is very grateful to G.Garkusha for his deep interest in the topic of this preprint. I am very grateful also to my mother in law K.Shahbazian for her very stimulating interest to the present work on all its stages.
\section{Preliminaries}
We work in the framework of spectral categories and modules over them in the sense of Schwede--Shipley~\cite{SS}. We start with preparations.
We follow \cite[Definition 2.1.1, Remark 2.1.5]{HSS}. A symmetric sequence of objects in a category $\mathcal C$ is a functor $\Sigma \to \mathcal C$, and the category of symmetric sequences of objects in $\mathcal C$ is the functor category $\mathcal C^{\Sigma}$. The category $\Sigma$ is a skeleton of the category of finite sets and isomorphisms. Hence every symmetric sequence has an extension, which is unique up to isomorphism, to a functor on the category of all finite sets and isomorphisms. We will use both view points (often the second one).
Recall that symmetric spectra have two sorts of homotopy groups which we shall refer to as {\it naive\/} and {\it true homotopy groups\/} respectively following terminology of~\cite{Sch}. Precisely, the $k$th naive homotopy group of a symmetric spectrum $X$ is defined as the colimit
$$\hat\pi_k(X)=\colim_n\pi_{k+n}X_n.$$ Denote by $\gamma X$ a stably fibrant model of $X$ in $Sp^\Sigma$. The $k$-th true homotopy group of $X$ is given by
$$\pi_kX=\hat\pi_k(\gamma X),$$ the naive homotopy groups of the symmetric spectrum $\gamma X$.
Naive and true homotopy groups of $X$ can considerably be different in general (see, e.g.,~\cite{HSS,Sch}). The true homotopy groups detect stable equivalences, and are thus more important than the naive homotopy groups. There is an important class of {\it semistable\/} symmetric spectra within which $\hat\pi_*$-isomorphisms coincide with $\pi_*$-isomorphisms. Recall that a symmetric spectrum is semistable if some (hence any) stably fibrant replacement is a $\pi_*$-isomorphism. Suspension spectra, Eilenberg--Mac Lane spectra, $\Omega$-spectra or $\Omega$-spectra from some point $X_n$ on are examples of semistable symmetric spectra (see~\cite{Sch}).
Semistability is preserved under suspension, loop, wedges and shift.
A symmetric spectrum $X$ is {\it $n$-connected\/} if the true homotopy groups of $X$ are trivial for $k\geqslant n$. The spectrum $X$ is {\it connective\/} is it is $(-1)$-connected, i.e., its true homotopy groups vanish in negative dimensions. $X$ is {\it bounded below\/} if $\pi_i(X)=0$ for $i\ll 0$.
\begin{defs}\label{basic}{\rm (1) Following~\cite{SS} a {\it spectral category\/} is a category $\mathcal O$ which is enriched over the category $Sp^\Sigma$ of symmetric spectra (with respect to smash product, i.e., the monoidal closed structure of \cite[2.2.10]{HSS}). In other words, for every pair of objects $o,o'\in\mathcal O$ there is a morphism symmetric spectrum $\mathcal O(o,o')$, for every object $o$ of $\mathcal O$ there is a map from the sphere spectrum $S$ to $\mathcal O(o,o)$ (the ``identity element" of $o$), and for each triple of objects there is an associative and unital composition map of symmetric spectra $\mathcal O(o',o'')\wedge\mathcal O(o,o') \to\mathcal O(o,o'')$. An $\mathcal O$-module $M$ is a contravariant spectral functor to the category $Sp^\Sigma$ of symmetric spectra, i.e., a symmetric spectrum $M(o)$ for each object of $\mathcal O$ together with coherently associative and unital maps of symmetric spectra $M(o)\wedge\mathcal O(o',o)\to M(o')$ for pairs of objects $o,o'\in\mathcal O$. A morphism of $\mathcal O$-modules $M\to N$ consists of maps of symmetric spectra $M(o)\to N(o)$ strictly compatible with the action of $\mathcal O$. The category of $\mathcal O$-modules will be denoted by $\Mod\mathcal O$.
(2) A {\it spectral functor\/} or a {\it spectral homomorphism\/} $F$ from a spectral category $\mathcal O$ to a spectral category $\mathcal O'$ is an assignment from $\Ob\mathcal O$ to $\Ob\mathcal O'$ together with morphisms $\mathcal O(a,b)\to\mathcal O'(F(a),F(b))$ in $Sp^\Sigma$ which preserve composition and identities.
(3) The {\it monoidal product\/} $\mathcal O\wedge\mathcal O'$ of two spectral categories $\mathcal O$ and $\mathcal O'$ is the spectral category where $\Ob(\mathcal O\wedge\mathcal O'):=\Ob\mathcal O\times\Ob\mathcal O'$ and $\mathcal O\wedge\mathcal O'((a,x),(b,y)):= \mathcal O(a,b)\wedge\mathcal O'(x,y)$.
(3') A monoidal spectral category consists of a spectral category $\mathcal O$ equipped with a spectral functor $\diamond: \mathcal O \wedge \mathcal O \to \mathcal O$, a unit $u \in Ob \mathcal O$, a $Sp^{\Sigma}$-natural associativity isomorphism and two $Sp^{\Sigma}$-natural unit isomorphisms. Symmetric monoidal spectral categories are defined similarly.
(4) A spectral category $\mathcal O$ is said to be {\it connective\/} if for any objects $a,b$ of $\mathcal O$ the spectrum $\mathcal O(a,b)$ is connective.
(5) By a ringoid over $Sm/k$ we mean a preadditive category $\mathcal R$ whose objects are those of $Sm/k$ together with a functor
$$\rho:Sm/k\to\mathcal R,$$ which is identity on objects. Every such ringoid gives rise to a spectral category $\mathcal O_{\mathcal R}$ whose objects are those of $Sm/k$ and the morphisms spectrum $\mathcal O_{\mathcal R}(X,Y)$, $X,Y\in Sm/k$, is the Eilenberg--Mac~Lane spectrum $H\mathcal R(X,Y)$ associated with the abelian group $\mathcal R(X,Y)$. Given a map of schemes $\alpha$, its image $\rho(\alpha)$ will also be denoted by $\alpha$, dropping $\rho$ from notation.
(6) By a spectral category over $Sm/k$ we mean a spectral category $\mathcal O$ whose objects are those of $Sm/k$ together with a spectral functor
$$\sigma:\mathcal O_{naive}\to\mathcal O,$$ which is identity on objects. Here $\mathcal O_{naive}$ stands for the spectral category whose morphism spectra are defined as
$$\mathcal O_{naive}(X,Y)_p=\Hom_{Sm/k}(X,Y)_+\wedge S^p$$ for all $p\geqslant 0$ and $X,Y\in Sm/k$.
It is straightforward to verify that the category of $\mathcal O_{naive}$-modules can be regarded as the category of presheaves $Pre^\Sigma(Sm/k)$ of symmetric spectra on $Sm/k$. This is used in the sequel without further comment.
}\end{defs}
Let $\mathcal O$ be a spectral category and let $\Mod\mathcal O$ be the category of $\mathcal O$-modules. Recall that the projective stable model structure on $\Mod\mathcal O$ is defined as follows (see~\cite{SS}). The weak equivalences are the objectwise stable weak equivalences and fibrations are the objectwise stable projective fibrations. The stable projective cofibrations are defined by the left lifting property with respect to all stable projective acyclic fibrations.
Recall that the Nisnevich topology is generated by elementary distinguished squares, i.e. pullback squares
\begin{equation}\label{squareQ}
\xymatrix{\ar@{}[dr] |{\textrm{$Q$}}U'\ar[r]\ar[d]&X'\ar[d]^\varphi\\
U\ar[r]_\psi&X}
\end{equation} where $\varphi$ is etale, $\psi$ is an open embedding and $\varphi^{-1}(X\setminus U)\to(X\setminus U)$ is an isomorphism of schemes (with the reduced structure). Let $\mathcal Q$ denote the set of elementary distinguished squares in $Sm/k$ and let $\mathcal O$ be a spectral category over $Sm/k$. By $\mathcal Q_{\mathcal O}$ denote the set of squares
\begin{equation}\label{squareOQ}
\xymatrix{\ar@{}[dr] |{\textrm{$\mathcal O Q$}}\mathcal O(-,U')\ar[r]\ar[d]&\mathcal O(-,X')\ar[d]^\varphi\\
\mathcal O(-,U)\ar[r]_\psi&\mathcal O(-,X)}
\end{equation} which are obtained from the squares in $\mathcal Q$ by taking $X\in Sm/k$ to $\mathcal O(-,X)$. The arrow $\mathcal O(-,U')\to\mathcal O(-,X')$ can be factored as a cofibration $\mathcal O(-,U')\rightarrowtail Cyl$ followed by a simplicial homotopy equivalence $Cyl\to\mathcal O(-,X')$. There is a canonical morphism $A_{\mathcal O Q}:=\mathcal O(-,U)\bigsqcup_{\mathcal O(-,U')} Cyl\to\mathcal O(-,X)$.
\begin{defs}[see~\cite{GP}]{\rm I. The {\it Nisnevich local model structure\/} on $\Mod\mathcal O$ is the Bousfield localization of the stable projective model structure with respect to the family of projective cofibrations
\begin{equation*}\label{no}
\mathcal N_{\mathcal O}=\{\cyl(A_{\mathcal O Q}\to\mathcal O(-,X))\}_{\mathcal Q_{\mathcal O}}.
\end{equation*} The homotopy category for the Nisnevich local model structure will be denoted by $SH^{\nis}_{S^1}\mathcal O$. In particular, if $\mathcal O=\mathcal O_{naive}$ then we have the Nisnevich local model structure on $Pre^\Sigma(Sm/k)=\Mod\mathcal O_{naive}$ and we shall write $SH^{\nis}_{S^1}(k)$ to denote $SH^{\nis}_{S^1}\mathcal O_{naive}$.
II. The {\it motivic model structure\/} on $\Mod\mathcal O$ is the Bousfield localization of the Nisnevich local model structure with respect to the family of projective cofibrations
\begin{equation*}\label{ao}
\mathcal A_{\mathcal O}=\{\cyl(\mathcal O(-,X\times\mathbb A^1)\to\mathcal O(-,X))\}_{X\in Sm/k}.
\end{equation*} The homotopy category for the motivic model structure will be denoted by $SH^{\mot}_{S^1}\mathcal O$. In particular, if $\mathcal O=\mathcal O_{naive}$ then we have the motivic model structure on $Pre^\Sigma(Sm/k)=\Mod\mathcal O_{naive}$ and we shall write write $SH^{\mot}_{S^1}(k)$ to denote $SH^{\mot}_{S^1}\mathcal O_{naive}$.
}\end{defs}
\begin{defs}[see~\cite{GP}]\label{Nis_and_Mot_exc}{\rm I. We say that $\mathcal O$ is {\it Nisnevich excisive\/} if for every elementary distinguished square $Q$
\begin{equation*}
\xymatrix{\ar@{}[dr] |{\textrm{$Q$}}U'\ar[r]\ar[d]&X'\ar[d]^\varphi\\
U\ar[r]_\psi&X}
\end{equation*} the square $\mathcal O Q$~\eqref{squareOQ} is homotopy pushout in the Nisnevich local model structure on $Pre^\Sigma(Sm/k)$.
II. $\mathcal O$ is {\it motivically excisive\/} if:
\begin{itemize} \item[(A)] for every elementary distinguished square $Q$ the square $\mathcal O Q$~\eqref{squareOQ} is homotopy pushout in the motivic model structure on $Pre^\Sigma(Sm/k)$ and
\item[(B)] for every $X\in Sm/k$ the natural map
$$\mathcal O(-,X\times\mathbb A^1)\to\mathcal O(-,X)$$ is a weak equivalence in the motivic model structure on $Pre^\Sigma(Sm/k)$. \end{itemize}
}\end{defs}
Recall that a sheaf $\mathcal F$ of abelian groups in the Nisnevich topology on $Sm/k$ is {\it strictly $\mathbb A^1$-invariant\/} if for any $X\in Sm/k$, the canonical morphism
$$H^*_{\nis}(X,\mathcal F)\to H^*_{\nis}(X\times\mathbb A^1,\mathcal F)$$ is an isomorphism.
\begin{defs}\label{vsp}{\rm Let $(\mathcal O, \diamond, pt)$ be a {\it symmetric monoidal} spectral category over $Sm/k$ together with the
structure spectral functor $\sigma:\mathcal O_{naive}\to\mathcal O$ and an additive functor $\mathbb ZF_*(k)\xrightarrow{\varepsilon}\pi_0\mathcal O$. We say that $((\mathcal O,\diamond,pt),\sigma,\varepsilon)$ is a {\it symmetric monoidal $V$-spectral category\/} if
\begin{enumerate} \item $\mathcal O$ is connective and Nisnevich excisive; \item the structure map $\rho: Sm/k \to \pi_0\mathcal O$ induced by $\sigma$ equals $\varepsilon \circ in$, where $in: Sm/k \to \mathbb ZF_*(k)$ is the graphic functor. \end{enumerate} } \end{defs}
\begin{rem}\label{additivity} {\rm Since $\mathcal O$ is connective and Nisnevich excisive, for each $\mathcal O$-module $M$ and each integer $i$ the presheaf
$\pi_i(M)|_{Sm/k}$ is {\it radditive} (the restriction is taken via the $\rho$). That is $\pi_i(M)(\emptyset)=0$ and $\pi_i(M)(X_1\sqcup X_2)=\pi_i(M)(X_1)\times \pi_i(M)(X_2)$. Particularly, the functor $\pi_i(M)|_{\mathbb ZF_*(k)}$ is additive. So, $\pi_i(M)|_{\mathbb ZF_*(k)}$ is a {\it presheaf of Abelian groups on} $\mathbb ZF_*(k)$ in the sense of \cite[Def. 2.13]{GP3} (the restriction is taken via the $\varepsilon$). } \end{rem}
We note that if $(\mathcal O, \diamond, pt)$ is a symmetric monoidal spectral category over $Sm/k$, then for every $\mathcal O$-module $M$ and any smooth scheme $U$, the presheaf of symmetric spectra
$$\underline{\Hom}(U,M):=M(-\times U)$$ is an $\mathcal O$-module. Moreover, $M(-\times U)$ is functorial in $U$.
\begin{lem}\label{pepe} Every symmetric monoidal $V$-spectral category $\mathcal O$ is motivically excisive. \end{lem}
\begin{proof} Every symmetric monoidal $V$-spectral category is, by definition, Nisnevich excisive. Since there is an action of smooth schemes on $\mathcal O$, the fact that $\mathcal O$ is motivically excisive is proved similar to~\cite[5.8]{GP}. \end{proof}
\begin{defs}\label{SHnis}{\rm Let $((\mathcal O,\diamond,pt),\sigma,\varepsilon)$ be a symmetric monoidal $V$-spectral category. Since it is both Nisnevich and motivically excisive, it follows from~\cite[5.13]{GP} that the pair of natural adjoint fuctors
$$\xymatrix{{\Psi_*}:Pre^\Sigma(Sm/k)\ar@<0.5ex>[r]&\Mod\mathcal O:{\Psi^*}\ar@<0.5ex>[l]}$$ induces a Quillen pair for the Nisnevich local projective (respectively motivic) model structures on $Pre^\Sigma(Sm/k)$ and $\Mod\mathcal O$. In particular, one has adjoint functors between triangulated categories
\begin{equation}\label{adjoint}
{\Psi_*}: \text{SH}^{nis}(\mathcal O_{naive})\rightleftarrows \text{SH}^{nis}(\mathcal O):{\Psi^*}\quad\textrm{ and }
\quad {\Psi_*}:\text{SH}^{mot}(\mathcal O_{naive})\rightleftarrows \text{SH}^{mot}(\mathcal O):{\Psi^*}.
\end{equation} } \end{defs}
\section{The triangulated category $D\mathcal O_-^{eff}(k)$}\label{dominus}
In this section we work with a symmetric monoidal $V$-spectral category $((\mathcal O,\diamond,pt),\sigma,\varepsilon)$ in the sense of Definition \ref{vsp}. We work in this section with the category $\text{SH}^{nis}(\mathcal O)$ as in Definition \ref{SHnis}.
Let $M$ be an $\mathcal O$-module. By Remark \ref{additivity} its $\pi_0\mathcal O$-presheaves $\pi_i(M)$ restricted via the $\varepsilon$ to the additive category $\mathbb ZF_*(k)$ are $\mathbb ZF_*(k)$-{\it presheaves of Abelian groups} in the sense of \cite[Def. 2.13]{GP3}. Thus, by \cite[Lemma 4.5]{Voe2} and \cite[Cor. 2.17]{GP3} the associated Nisnevich sheaf $\pi^{nis}_i(M)$ is canonically a $\mathbb ZF_*(k)$-presheaves of Abelian groups (possibly it is not a $\pi_0\mathcal O$-presheaf).
We shall often work with simplicial $\mathcal O$-modules
$M[\bullet]$. The {\it realization\/} of $M[\bullet]$ is the $\mathcal O$-module $|M|$ defined as the coend
$$|M|=\Delta[\bullet]_+\wedge_{\Delta} M[\bullet]$$ of the functor $\Delta[\bullet]_+\wedge M[\bullet]:\Delta\times\Delta^{{\textrm{\rm op}}}\to\Mod\mathcal O$. Here $\Delta[n]$ is the standard simplicial $n$-simplex.
Recall that the simplicial ring $k[\Delta]$ is defined as
$$k[\Delta]_n=k[x_0,\ldots,x_n]/(x_0+\cdots+x_n-1).$$ By $\Delta^{\cdot}$ we denote the cosimplicial affine scheme $\spec(k[\Delta])$. Given an $\mathcal O$-module $M$, we set
$$C_*(M):=|\underline{\Hom}(\Delta^{\cdot},M)|.$$ Note that $C_*(M)$ is an $\mathcal O$-module and is functorial in $M$. {\bf Our} $C_*(M)$ {\bf is different of} $C_*(M)$ {\bf used in} \cite[Sect. 3]{GP2}.
\begin{defs}[Definition 3.3 in \cite{GP2}] \label{boundedOmod}{\rm The $\mathcal O$-motive $M_{\mathcal O}(X)$ of a smooth algebraic variety $X\in Sm/k$ is the $\mathcal O$-module $C_*(\mathcal O(-,X))$. We say that an $\mathcal O$-module $M$ is {\it bounded below\/} if for $i\ll 0$ the Nisnevich sheaf $\pi_i^{\nis}(M)$ is zero. $M$ is {\it $n$-connected\/} if $\pi_i^{\nis}(M)$ are trivial for $i\leqslant n$. $M$ is {\it connective\/} is it is $(-1)$-connected, i.e., $\pi_i^{\nis}(M)$ vanish in negative dimensions. } \end{defs}
\begin{defs}[\cite{GP2}]\label{DOminus}{\rm Denote by $\Mod_{-}\mathcal O$ the full subcategory of of bounded below $\mathcal O$-modules. \\ Denote by $D\mathcal O_-(k)$ the full triangulated subcategory of $SH^{nis}(\mathcal O)$ of bounded below $\mathcal O$-modules. We also denote by $D\mathcal O_-^{eff}(k)$ the full triangulated subcategory of $D\mathcal O_-(k)$ of those $\mathcal O$-modules $M$ such that each $\pi_0\mathcal O$-presheaf $\pi_i(M)$ regarded via the functor $\varepsilon$ as a $\mathbb ZF_*(k)$-presheaf of Abelian groups is {\it homotopy invariant and stable} in the sense of \cite[Def. 2.13, 2.14]{GP3}.
The category $D\mathcal O_-^{eff}(k)$ is an analog of Voevodsky's triangulated category $DM_-^{eff}(k)$. } \end{defs}
\begin{lem}[Corollary 3.4 in \cite{GP2}] \label{porto}{\rm If an $\mathcal O$-module $M$ is bounded below (respectively $n$-connected) then so is $C_*(M)$. In particular, the $\mathcal O$-motive $M_{\mathcal O}(X)$ of any smooth algebraic variety $X\in Sm/k$ is connective. } \end{lem}
\begin{rem}\label{1st_endo_funct}{\rm By Lemma \ref{porto} the assignment $M\mapsto C_*(M)$ is a functor $C_*: \Mod_{-}\mathcal O\to \Mod_{-}\mathcal O$. } \end{rem}
\begin{lem}[Compare with Lemma 3.5 in \cite{GP2}] \label{spain}{\rm The functor $C_*: \Mod_{-}\mathcal O\to \Mod_{-}\mathcal O$ respects local equivalences and induces a triangulated endofunctor
$$C_*: D\mathcal O_-(k)\to D\mathcal O_-(k)$$ } \end{lem}
\begin{thm}[Compare with Theorem 3.5 in \cite{GP2}] \label{neploho}{\rm Let $(\mathcal O,\diamond, pt)$ be a symmetric monoidal $V$-spectral category. Consider the full triangulated subcategory $\mathcal T$ of $SH^{nis}(\mathcal O)$ generated by the compact objects $\cone(\mathcal O(-,X\times\mathbb A^1)\to\mathcal O(-,X)),\ X\in Sm/k.$ Then the triangulated endofunctor
$$C_*:D\mathcal O_-(k)\to D\mathcal O_-(k)$$ as in Lemma \ref{spain} lands in $D\mathcal O_-^{eff}(k)$. The kernel of $C_*$ is $\mathcal T_-:=\mathcal T\cap D\mathcal O_-(k)$. Moreover, $C_*$ is left adjoint to the inclusion functor
$$i:D\mathcal O_-^{eff}(k)\to D\mathcal O_-(k)$$ and $D\mathcal O_-^{eff}(k)$ is triangle equivalent to the quotient category $D\mathcal O_-(k)/\mathcal T_-$ \ . } \end{thm}
\section{The main symmetric monoidal strict $V$-spectral category}\label{The_Category} We construct in this section our main symmetric monoidal strict $V$-spectral category $(\mathbb {F}\text{r},\diamond, pt)$.
First construct a spectral category $\mathbb {F}\text{r}$. Its objects are those of $Sm/k$. To each pair $Y,X\in Sm/k$ we assign a symmetric spectrum $\mathbb {F}\text{r}(Y,X)$. The latter is described as follows. Its terms are the functors $A \mapsto \mathbb {F}\text{r}(Y,X)_A=Fr_A(Y,X\otimes S^A)$ (here $A$ runs over the category of finite sets and their isomorphisms). The structure maps are defined by the obvious compositions $$\varepsilon_{A,B}: Fr_A(Y,X\otimes S^A)\wedge S^B\to Fr_{A}(Y,X\otimes S^{A\sqcup B})\hookrightarrow Fr_{A\sqcup B}(Y,X\otimes S^{A\sqcup B}).$$ For each triple $Z,Y,X\in Sm/k$ there is an obvious symmetric spectra morphism $$\circ_{Z,Y,X}: \mathbb {F}\text{r}(Y,X)\wedge \mathbb {F}\text{r}(Z,Y)\to \mathbb {F}\text{r}(Z,X) \ \ \text{(the composition law)}.$$ It is uniquely determined by simplicial set morphisms $\mathbb {F}\text{r}(Y,X)_A\wedge \mathbb {F}\text{r}(Z,Y)_B\to \mathbb {F}\text{r}(Z,X)_{A\sqcup B}$ which on $n$-simplices are given by the set maps $$Fr_A(Y,X\otimes (S^A)_n)\wedge Fr_B(Z,Y\otimes (S^B)_n)\to Fr_A(Y\otimes (S^B)_n,X\otimes (S^A)_n\otimes (S^B)_n)\wedge Fr_B(Z,Y\otimes (S^B)_n)\to$$ $$\to Fr_{A\sqcup B}(Z,X\otimes (S^{A\sqcup B})_n).$$ In details, the set map is given by $$(\alpha,\beta)\mapsto (\alpha\otimes id_{(S^B)_n},\beta)\mapsto (\alpha\otimes id_{(S^B)_n})\circ \beta.$$ For each $X\in Sm/k$ the identity morphism $id_X$ gives rise to the symmetric spectra morphism $u_X: \mathbb S\to \mathbb {F}\text{r}(X,X)$. We formed a spectral category $\mathbb {F}\text{r}$ and a spectral functor $\sigma: \mathcal O_{naive}\to \mathbb {F}\text{r}$, which is identity on objects. The pair $(\mathbb {F}\text{r},\sigma)$ is a spectral category over $Sm/k$ in the sense of Definition \ref{basic}(6).
Equip now the spectral category $\mathbb {F}\text{r}$ with a spectral functor $\diamond: \mathbb {F}\text{r}\wedge \mathbb {F}\text{r}\to \mathbb {F}\text{r}$ (taking $(X_1,X_2)$ to $X_1\times X_2$),
a unit $u\in \mathbb {F}\text{r}$, a $Sp^{\Sigma}$-natural associativity isomorphism $a$ and two $Sp^{\Sigma}$-natural unit isomorphisms $u_l$, $u_r$ and a twist isomorphism $tw: \mathbb {F}\text{r}\wedge \mathbb {F}\text{r}\to \mathbb {F}\text{r}\wedge \mathbb {F}\text{r}$ and a spectral functor isomorphism $\Phi: \diamond\to \diamond\circ tw$ such that the data $$(\mathbb {F}\text{r}, \diamond, tw, \Phi, u, a, u_l, u_r)$$ form a symmetric monoidal spectral category.
First construct the spectral functor $\diamond$. On objects it takes an object $(X_1,X_2)\in Sm/k\times Sm/k$ to $X_1\times X_2 \in Sm/k$. To construct $\diamond$ on morphisms it sufficient to construct certain symmetric spectra morphisms $$\diamond_{(V,Y),(U,X)}: \mathbb {F}\text{r}(V,U)\wedge \mathbb {F}\text{r}(Y,X)\xrightarrow{} \mathbb {F}\text{r}(V\times Y,U\times X)$$ and check that they satisfy the expected properties. To construct the morphism $\diamond_{(V,Y),(U,X)}$ it is sufficient to construct simplicial set morphisms $$\boxtimes_{(V,Y),(U,X),A,B}: \mathbb {F}\text{r}(V,U)_A\wedge \mathbb {F}\text{r}(Y,X)_B \xrightarrow{} \mathbb {F}\text{r}(V\times Y,U\times X)_{A\sqcup B}$$ subjecting the known properties. The latter are given on $n$-simplices by the exterior product maps $$\boxtimes_{(V,Y),(U,X),A,B,\ n}: Fr_A(V,U\otimes (S^A)_n)\wedge Fr_B(Y,X\otimes (S^B)_n) \to Fr_{A\sqcup B}(V\times Y,(U\times X) \otimes (S^{A\sqcup B})_n).$$ We constructed the spectral functor $\diamond$.
Second we take the point $pt:=Spec (k)$ as the unit of the spectral category $\mathbb {F}\text{r}$ and we skip constructions of desired $a$, $u_l$, $u_r$ (they are obvious).
Third we construct the twist spectral categories isomorphism $tw: \mathbb {F}\text{r}\wedge \mathbb {F}\text{r}\to \mathbb {F}\text{r}\wedge \mathbb {F}\text{r}$. On objects it takes $(X_1,X_2)$ to $(X_2,X_1)$. On morphisms it is determined by certain symmetric spectra isomorphisms $$tw_{(V,Y),(U,X)}: \mathbb {F}\text{r}(V,U)\wedge \mathbb {F}\text{r}(Y,X)\xrightarrow{} \mathbb {F}\text{r}(Y,X)\wedge \mathbb {F}\text{r}(V,U).$$ In turn the $tw_{(V,Y),(U,X)}$ is determined by the family of simplicial set isomorphisms (switching factors) $$tw^C_{A,B}: \mathbb {F}\text{r}(V,U)_A\wedge \mathbb {F}\text{r}(Y,X)_B \to \mathbb {F}\text{r}(Y,X)_B\wedge \mathbb {F}\text{r}(V,U)_A.$$ Here for each finite set $C$ the ordered pairs $(A,B)$ run over all subsets $A\subseteq C$, $B\subseteq C$ such that $A\cup B=C$ and $A\cap B=\emptyset$.
Finally we construct the desired spectral functor isomorphism $\Phi: \diamond\to \diamond\circ tw$. It is the assignment $(V,Y)\mapsto \Phi(V,Y)=[\tau_{V,Y}: V\times Y \to Y\times X]$. Here the switching factors isomorphism $\tau_{V,Y}$ is regarded as a point in $Fr_0(V\times Y, Y\times V)$. So, it is regarded as a symmetric spectra morphism $\mathbb S\xrightarrow{\Phi(V,Y)} \mathbb {F}\text{r}(V\times Y, Y\times V)$. It's easy to check that $\Phi$ is a spectral functor isomorphism indeed.
We left to the reader to check that the data $(\mathbb {F}\text{r}, \diamond, tw, \Phi, u, a, u_l, u_r)$ form a symmetric monoidal spectral category.
\section{Properties of the main spectral category} Let $((\mathbb {F}\text{r},\diamond,pt), \sigma:\mathcal O_{naive}\to \mathbb {F}\text{r})$ be the {\it symmetric monoidal} spectral category over $Sm/k$ as in Section \ref{The_Category}. \begin{lem}\label{epsilon}{\rm There is an additive functor $\mathbb ZF_*(k)\xrightarrow{\varepsilon}\pi_0(\mathbb {F}\text{r})$ such that the data $((\mathcal O, \diamond, pt),\sigma,\varepsilon)$ is a symmetric monoidal $V$-spectral category in the sense of Definition \ref{vsp}. } \end{lem} Applying now Lemma \ref{pepe} we get the following \begin{cor}\label{Nis_and_Mot_exc_true}{\rm The symmetric monoidal spectral category $(\mathbb {F}\text{r}, \diamond, pt, tw, \Phi, u, a, u_l, u_r)$ as in Section \ref{The_Category} is Nisnevich and Motivically excisive in the sense of \cite{GP} (see Definition \ref{Nis_and_Mot_exc}). } \end{cor}
The following definition is just Definition \ref{boundedOmod} adapted to the category $\Mod \mathbb {F}\text{r}$. \begin{defs}\label{boundedFrmod}{\rm The $\mathbb {F}\text{r}$-motive $M_{\mathbb {F}\text{r}}(X)$ of a smooth algebraic variety $X\in Sm/k$ is the $\mathbb {F}\text{r}$-module $C_*(\mathbb {F}\text{r}(-,X))$. We say that an $\mathbb {F}\text{r}$-module $M$ is {\it bounded below\/} if for $i\ll 0$ the Nisnevich sheaf $\pi_i^{\nis}(M)$ is zero. $M$ is {\it $n$-connected\/} if $\pi_i^{\nis}(M)$ are trivial for $i\leqslant n$. $M$ is {\it connective\/} is it is $(-1)$-connected, i.e., $\pi_i^{\nis}(M)$ vanish in negative dimensions.
}\end{defs}
\begin{defs}{\rm Denote by $\Mod \mathbb {F}\text{r}_{-}$ the full subcategory of of bounded below $\mathbb {F}\text{r}$-modules. \\ Denote by $\text{D}\mathbb {F}\text{r}_-(k)$ the full triangulated subcategory of $SH^{nis}(\mathbb {F}\text{r})$ of bounded below $\mathbb {F}\text{r}$-modules. We also denote by $\text{D}\mathbb {F}\text{r}_-^{eff}(k)$ the full triangulated subcategory of $\text{D}\mathbb {F}\text{r}_-(k)$ of those $\mathbb {F}\text{r}$-modules $M$ such that each $\mathbb ZF_*(k)$-presheaf
$\pi_i(M)|_{\mathbb ZF_*(k)}$ is {\it homotopy invariant and stable} in the sense of \cite[Def. 2.13, 2.14]{GP3}. } \end{defs} In certain sense $\text{D}\mathbb {F}\text{r}_-^{eff}(k)$ is an analog of Voevodsky's triangulated category $DM_-^{eff}(k)$ \cite{Voe1}.
\begin{defs}\label{prekrasno}{\rm The triangulated category $\text{D}\mathbb {F}\text{r}_-^{eff}(k)$ is called {\it the triangulated category of effective $\mathbb {F}\text{r}$-motives. } } \end{defs}
One can prove the following \begin{thm}\label{DFr_eff_and_SH_eff}{\rm There is a natural triangulated equivalence between the triangulated categories $\text{D}\mathbb {F}\text{r}_-^{eff}(k)$ and the Voevodsky category $SH^{eff}_-(k)$. } \end{thm} A sketch of a proof of this result will be presented in the next section.
\section{Triangulated equivalences $SH^{eff}(k)\rightleftarrows \text{D}\mathbb {F}\text{r}_-^{eff}(k)$ } We construct in this section triangulated equivalences (quasi-inverse to each the other) $$\mathbb M^{\mathbb {F}\text{r}}_{\text{eff}}: SH^{eff}_-(k)\rightleftarrows \text{D}\mathbb {F}\text{r}_-^{eff}(k): \mathbb M_{\text{SH}}^{\text{eff}}.$$ To construct these functors we need preliminaries. Let $\mathbb G^{\wedge 1}_m\in \Delta^{op}(Fr_0(k))$ be as in \cite[Notation 8.1]{GP4}. Let $\mathbb G_m^{\wedge n}$ be the $n$th monoidal power $\mathbb G_m^{\wedge 1}$ be as in \cite[Notation 8.1]{GP4}. The category $Pre^{\Sigma}_{S^1, \mathbb G^{\wedge 1}_m}(Sm/k)$ of presheaves of symmetric bispectra can be regarded as the category of symmetric $\mathbb G^{\wedge 1}_m$-spectra in the category $\Mod\mathcal O_{naive}$ of presheaves of symmetric spectra (see Definition \ref{basic}).
Similarly we can (and will) consider a category of symmetric $\mathbb G^{\wedge 1}_m$-spectra in the category $\Mod\mathbb {F}\text{r}$. It follows from~\cite[5.13]{GP} that there is a pair of natural adjoint fuctors
$$\xymatrix{{\Phi_*}: Pre^{\Sigma}_{S^1, \mathbb G^{\wedge 1}_m}(Sm/k)=Sp_{\mathbb G^{\wedge 1}_m}(\Mod\mathcal O_{naive})\ar@<0.5ex>[r]& Sp_{\mathbb G^{\wedge 1}_m}(\Mod\mathbb {F}\text{r}):{\Phi^*}\ar@<0.5ex>[l]}$$ There is another pair of adjoint functors $$\xymatrix{{\Sigma^{\infty}_{\mathbb {F}\text{r}(\mathbb G^{\wedge 1}_m)}}: \Mod\mathbb {F}\text{r} \ar@<0.5ex>[r]& Sp_{\mathbb G^{\wedge 1}_m}(\Mod\mathbb {F}\text{r}): \Omega^{\infty}_{\mathbb {F}\text{r}(\mathbb G^{\wedge 1}_m)}\ar@<0.5ex>[l]}$$ Here $\mathbb {F}\text{r}(\mathbb G^{\wedge 1}_m)$ stands for the $\mathbb {F}\text{r}$-module represented by the simplicial scheme $\mathbb G^{\wedge 1}_m$.
For each $\mathbb {F}\text{r}$-module $M$ consider the $\mathbb {F}\text{r}$-module
$C_*(M):=|\underline{\Hom}(\Delta^{\cdot},M)|$ as in Section \ref{dominus}.
By Lemma \ref{spain} and Theorem \ref{neploho} the endo-functor $C_*: \Mod\mathbb {F}\text{r}_{-}\to \Mod\mathbb {F}\text{r}_{-}$ induces a trangulated functor $C_*: \text{D}\mathbb {F}\text{r}_-(k) \to \text{D}\mathbb {F}\text{r}_-^{eff}(k)$. By Theorem \ref{neploho} the pair of triangulated functors \begin{equation}\label{Sigma_Omega_2}
C_*: \text{D}\mathbb {F}\text{r}_-(k) \rightleftarrows \text{D}\mathbb {F}\text{r}_-^{eff}(k):i
\end{equation} is a pair of adjoint triangulated functors (here $i$ is the inclusion functor).\\ Let $\mathbb {F}\text{r}(n)=\text{M}_{\mathbb {F}\text{r}}(\mathbb G^{\wedge n}_m)$ be the $\mathbb {F}\text{r}$-motive of $\mathbb G^{\wedge n}_m$. For each cofibrant object $E$ in the projective model structure on $\Mod \mathbb {F}\text{r}$ put $E(n)=E\otimes^{\mathbb {F}\text{r}}\mathbb {F}\text{r}(n)$. It is a cofibrant object in the projective model structure on $\Mod \mathbb {F}\text{r}$. Clearly, $\Sigma^{\infty}_{\mathbb {F}\text{r}(1)}(E):=(E, E(1), E(2), ... )$ is naturally an object of $Sp_{\mathbb G^{\wedge 1}_m}(\Mod\mathbb {F}\text{r}$). \begin{defs}{\rm Let $E\mapsto E^c$ be the cofibrant replacement in the projective model structure on $Pre^{\Sigma}_{S^1, \mathbb G^{\wedge 1}_m}(Sm/k)$. Put\\ $\mathbb M^{\mathbb {F}\text{r}}(E)=(C_* \circ \Omega^{\infty}_{\mathbb {F}\text{r}(1)} \circ \Phi_*)(E^c)= \Omega^{\infty}_{\mathbb G^{\wedge 1}_m}C_*\mathbb F\text{r}(E^c) \in \Mod \mathbb {F}\text{r}$. \\ Let $\mathcal E\mapsto \mathcal E^c$ be the cofibrant replacement in the projective model structure on $\Mod \mathbb {F}\text{r}$. Put\\ $\mathbb M_{\text{SH}}(\mathcal E)=\Phi^*(\Sigma^{\infty}_{\mathbb {F}\text{r}(1)}(\mathcal E^c))\in Pre^{\Sigma}_{S^1, \mathbb G^{\wedge 1}_m}(Sm/k)$. Thus,\\ $\mathbb M_{\text{SH}}(\mathcal E)=\text{the object} \ \Sigma^{\infty}_{\mathbb {F}\text{r}(1)}(\mathcal E^c) \ \text{of} \ Sp_{\mathbb G^{\wedge 1}_m}(\Mod\mathbb {F}\text{r}) \ \text{regarded as an object in} \ Pre^{\Sigma}_{S^1, \mathbb G^{\wedge 1}_m}(Sm/k)$. } \end{defs} A proof of the following result is postponed to the next preprint. It can be given in the spirit of the proofs as in \cite[Section 2]{GP5}. \begin{thm}\label{VeryMain}{\rm $\bullet$ The functor $\mathbb M_{\text{SH}}$ induces a triangulated equivalence \\ $\mathbb M^{eff}_{\text{SH}}: \text{D}\mathbb {F}\text{r}_-^{eff}(k)\to SH^{eff}_-(k)$ \\ between these triangulated categories; \\ $\bullet$ A triangulated functor $\mathbb M^{\mathbb {F}\text{r}}_{eff} : SH^{eff}_-(k)\to \text{D}\mathbb {F}\text{r}_-^{eff}(k)$ \\ quasi-inverse to $\mathbb M^{eff}_{\text{SH}}$ is induced by the functor \\ $\mathbb M^{\mathbb {F}\text{r}}: Pre^{\Sigma}_{S^1, \mathbb G^{\wedge 1}_m}(Sm/k)\to \Mod \mathbb {F}\text{r}$. } \end{thm}
\end{document} |
\begin{document}
\def\spacingset#1{\renewcommand{\baselinestretch} {#1}\small\normalsize} \spacingset{1}
\if11 {
\title{\bf Bivariate Hierarchical Bayesian Model for Combining Summary Measures and their Uncertainties from Multiple Sources}
\author{Yujing Yao$^{1}$, R.\ Todd Ogden$^{1}$, Chubing Zeng$^{2}$, and Qixuan Chen$^{1}$
\hspace{.2cm}\\
$^{1}$Department of Biostatistics, Mailman School of Public Health, \\
Columbia University, New York, NY\\
$^{2}$Division of Biostatistics, Department of Preventive Medicine, \\
University of Southern California, Los Angeles, CA}
\maketitle } \fi
\if01 {
\begin{center}
{\LARGE\bf Bivariate Hierarchical Bayesian Model for Combining Summary Measures from Multiple Sources} \end{center}
} \fi
\begin{abstract}
It is often of interest to combine available estimates of a similar quantity from multiple data sources. When the corresponding variances of each estimate are also available, a model should take into account the \textit{uncertainty of the estimates} themselves as well as the \textit{uncertainty in the estimation of variances}.
In addition, if there exists a strong association between estimates and their variances, the correlation between these two quantities should also be considered. In this paper, we propose a bivariate hierarchical Bayesian model that jointly models the estimates and their estimated variances assuming a correlation between these two measures.
We conduct simulations to explore the performance of the proposed bivariate Bayesian model and compare it to other commonly used methods under different correlation scenarios. The proposed bivariate Bayesian model has a wide range of applications. We illustrate its application in three very different areas: PET brain imaging studies, meta-analysis, and small area estimation. \end{abstract}
\noindent {\it Keywords:} Brain imaging; correlation between measures and uncertainty; estimation of uncertainty; meta-analysis; small area estimation.
\spacingset{1}
\section{Introduction} \label{s:intro}
In many areas of health and science research, it is common that multiple studies are conducted to address a similar question, or that similar measurements are available from data collected in multiple geographic areas. Any estimate from a single study or geographic area may be affected by small sample size, missing data, or measurement error. In such a situation, inference can often be improved by combining estimates from the different studies or areas. To generalize treatment we use the term ``source'' to refer to each data origin from which an estimate is obtained. This general idea of combining estimates from multiple sources has wide applications. For example, meta-analysis involves combining the results of multiple independent studies \citep{glass1976primary, borenstein2011introduction}. Small area estimation in survey sampling involves the estimation of parameters in small areas by combining estimates of all areas to improve precision \citep{ghosh1994small,pfeffermann2002small,rao2015small}.
When data are comparable across sources, a natural approach to combine the estimates is simply to calculate their average. When data from the various sources have rather different characteristics, however, we can use instead a weighted average. The weights could be determined based on estimates of precision, sample sizes of sources, and other factors \citep{cochran1954combination, borenstein2011introduction}. However, if there is also uncertainty in the factors needed to compute the weights, it can lead to increased uncertainty of the resulting estimate. In such a case, weight trimming could be applied to reduce large weights to a maximum value, reducing variability but increasing bias \citep{potter1988survey, potter1990study}.
Hierarchical models can be used as an alternative approach to combine estimates from multiple sources while accounting for heterogeneity between sources, e.g., variation in estimates \citep{browne2006comparison,goldstein2011multilevel}. In meta-analysis, a normal-normal hierarchical model is often used to combine estimates from individual studies to obtain a joint estimate, based on the assumption that the studies may be estimating distinct, but related effects \citep{dumouchel1994hierarchical,sutton2001bayesian,higgins2002quantifying,higgins2009re}. Similarly in small area estimation, the Fay-Herriot area-level model is a widely used hierarchical model for estimating parameters in small areas of a large survey by combining the unstable direct survey estimates of all small areas \citep{fay1979estimates,wang2003mean, rao2015small}. Both models consider the setting with a single outcome measure and assume the estimated variance of the summary measure to be a fixed quantity.
More recently, hierarchical models are further developed in both meta-analysis and small area estimation for broader applications. In meta-analysis, \cite{reitsma2005bivariate} extended the normal-normal model to a bivariate hierarchical model exclusively for sensitivity and specificity in the area of diagnostic studies, assuming a bivariate normal distribution for logit sensitivity and logit specificity. Similar to the normal-normal hierarchical model, the variance estimates of sensitivity and specificity were assumed to be fixed values. To allow sparse data in the number of true positives, true negatives, false positives, or false negative in a study, \cite{chu2006bivariate} further developed a bivariate hierarchical model by assuming binomial distributions for the number of true positives and the number of true negatives and a hierarchical bivariate normal distribution for logit sensitivity and logit specificity \citep{paul2010bayesian, guo2017bayesian}. On the other hand, in small area estimation, the Fay-Herriot model was extended by taking into consideration the variability in the estimated sampling variance. Specifically, \cite{you2006small} assumed the estimated variance has a chi-squared distribution. \cite{maiti2014prediction} and \cite{sugasawa2017bayesian} modified the model by further assuming a underlying inverse-Gamma distribution of the true variance.
Despite these new developments, the existing hierarchical models for summary measures still have some limitations. Both the univariate and bivariate hierarchical models in meta-analysis assume the observed variability of the summary measure to be fixed values \citep{reitsma2005bivariate, chu2006bivariate, fay1979estimates}. Although the uncertainty in the estimated variance is modeled in the modified Fay-Herriot models, independence was assumed between the estimated summary measure and its corresponding estimated variance \citep{wang2003mean,rao2015small,you2006small,maiti2014prediction,sugasawa2017bayesian}. However, variance of the summary measure in a study is usually unknown and estimated and thus the uncertainty in the variance estimate should also be modeled if it is not negligible. Further, there often exists a strong association between the observed summary measure and its corresponding variance estimate in a study, e.g. when the summary measure is a log odds ratio or a log rate. Neglecting the dependence between summary measures and their variances could result in a poor estimation of population and source-specific parameters. Motivated by this, we propose a bivariate hierarchical Bayesian model that jointly model a summary measure and its variance while allowing correlation between these two quantities both in their estimates and in the underlying parameters. This bivariate hierarchical model has wide applications in combining summary measures and their uncertainties from multiple sources, including but not limited to meta-analysis and small area estimation.
\section{Methods} \label{s:model}
We consider the situation in which we have estimates of similar measures from $n$ separate data sources. Let $y_{i}$ and $s_i$ denote the direct estimate of a summary measure and its uncertainty, respectively, from the $i^{th}$ source, and $\theta_i$ and $\sigma_i$ denote the corresponding source-specific true value of the parameters, $i=1,\ldots, n$. Our initial goal is to obtain estimates of population averages denoted by $\mu$ across all $n$ sources and then also to obtain refined estimates of each $\theta_i$. Building on this simplest case, we will then generalize the model to allow for the estimation of regression coefficients in a linear regression framework.
We first review the univariate hierarchical Bayesian model that has been widely used in meta-analysis and small area estimation in Section \ref{s:ubm}, and then present the new bivariate hierarchical Bayesian model in Section \ref{s:bbm}, in which we account for not only the uncertainty in estimating $s_i$ but also for the correlations between $y_i$ and $s_i$ and between $\theta_i$ and $\sigma_i$.
\subsection{A review of univariate hierarchical Bayesian model}\label{s:ubm}
In a typical univariate hierarchical Bayesian model (UBM) for combining summary measures, $y_i$ is assumed to be normally distributed centered at the source-specific true value $\theta_i$ with variance $\sigma_i^2$. The source-specific values (the $\theta_i$'s) are then assumed to be normally distributed with a common mean $\mu$ and a common variance $\tau^2$. Specifically,
\begin{equation}\label{m:ubmnoCov}
\begin{aligned}
&f(y_i|\theta_i, \sigma_i) \sim N(\theta_i, \sigma_i^2) \\
&f(\theta_i|\mu, \tau) \sim N(\mu, \tau^2),
\,\, i=1,\ldots, n.
\end{aligned}
\end{equation}
By assuming a uniform prior for $\mu$, when $\tau^2$ and $\sigma_i^2$'s are known, the posterior distribution of $\mu$ can be obtained as:
\begin{equation}
\label{postdist:mu}
f(\mu|\bm{y}, \bm{\sigma}, \tau) \sim N \left(\frac {\sum_{i=1}^n \omega_i y_i}{\sum_{i=1}^n \omega_i}, \frac{1}{\sum_{i=1}^n \omega_i} \right), \text{~with~}\omega_i = \frac{1}{\sigma_i^2+\tau^2},
\end{equation}
where $\bm{y}$ is the $n\times 1$ vector $(y_1, \cdots, y_n)^T$ and $\bm{\sigma}$ is the $n\times 1$ vector $(\sigma_1, \dots, \sigma_n)^T$.
That is, the common mean $\mu$ can be estimated using the posterior mean
\begin{equation}\label{e:ubmnoCov}
\hat{\mu} = \frac {\sum_{i=1}^n \omega_i y_i}{\sum_{i=1}^n \omega_i}.
\end{equation}
In contrast to the typical weighted average estimator of $\mu$ that assigns weight $1/\sigma_i^2$ to the observation $y_i$, the inclusion of $\tau^2$ in the expression for $\omega_i$ reduces the chance of having extreme weights with very small $\sigma_i^2$, especially when $\tau^2$ is large relative to $\sigma_i^2$.
When source-specific covariates $\bm{x_i}^T=(x_{i1}, \cdots, x_{ip})$ are available, Model (\ref{m:ubmnoCov}) can be extended to incorporate these into a regression model, with $\bm{\beta} = (\beta_1, \cdots, \beta_p)^T$ being a $p \times 1$ vector of coefficients:
\begin{equation}\label{m:ubmwCov}
\begin{aligned}
&f(y_i|\theta_i, \sigma_i) \sim N(\theta_i, \sigma_i^2) \\
&f(\theta_i|\bm{\beta}, \tau) \sim N(\bm{x_i}^T \bm{\beta}, \tau^2), \,\, i=1, \ldots, n.
\end{aligned}
\end{equation}
If the $\sigma_i^2$'s and $\tau^2$ are known, with a uniform prior for ${\bm{\beta}}$, we can derive the posterior distribution of ${\bm{\beta}}$:
\begin{equation}
f(\bm{\beta}|\bm{y}, \bm{X}, \bm{\sigma}, \tau) \sim N \left(\left(\sum_{i=1}^n \omega_i \bm{x}_i \bm{x}_i^T\right)^{-1} \left(\sum_{i=1}^n \omega_i \bm{x}_i y_i\right), \left(\sum_{i=1}^n \omega_i \bm{x}_i \bm{x}_i^T\right)^{-1} \right)
\end{equation}
where $\bm{X}$ is the $n\times p$ matrix of $(\bm{x}_1, \cdots, \bm{x}_n)^T$. We can then estimate $\bm{\beta}$ using the posterior mean of ${\bm{\beta}}$:
\begin{equation}\label{e:ubmwCov}
\hat{\bm{\beta}} = \left(\sum_{i=1}^n \omega_i \bm{x}_i \bm{x}_i^T\right)^{-1} \left(\sum_{i=1}^n \omega_i \bm{x}_i y_i\right).
\end{equation}
The UBM is equivalent to the normal-normal hierarchical model in meta-analysis \citep{dumouchel1994hierarchical} and the Fay-Herriot model in small area estimation \citep{fay1979estimates} (where interest also lies in estimating source-specific means $\theta_i, i=1, \ldots, n$).
The posterior distribution for $\theta_i$ can be written as:
\begin{equation}
f(\theta_i | y_i, \bm{x}_i, \bm{\beta}, \sigma_i, \tau) \sim N\left(\gamma_i y_i + (1-\gamma_i) \bm{x}_i^T \bm{\beta}, \,\,\, \sigma_i^2\gamma_i\right), \text{~where~}\gamma_i = \frac{\tau^2}{\tau^2 +\sigma_i^2 },
\end{equation}
and the estimate of $\theta_i$ can be obtained as the posterior mean with $\bm{\beta}$ substituted by its estimator in (\ref{e:ubmwCov}):
\begin{equation}\label{e:ubmtheta}
\hat{\theta}_i = \gamma_i y_i + (1-\gamma_i) \bm{x}_i^T \hat{\bm{\beta}}, \,\, i=1,\ldots, n.
\end{equation}
\subsection{Bivariate hierarchical Bayesian model} \label{s:bbm}
We consider a bivariate hierarchical Bayesian model (BBM)
to take into account both the estimate and its variance, as well as the correlation between the two quantities.
In the simplest scenario without source-specific covariates, the model is
\begin{equation}
\label{m:bbmnoCov}
\begin{aligned}
&\left. \left( \begin{array}{c} y_i \\ \log s_i \\ \end{array} \right) \right\vert \theta_i, \sigma_i, \rho_1, \sigma_{s_i} \sim N_2 \left( \left( \begin{array}{c} \theta_i \\ \log \sigma_i \\ \end{array} \right),
\left( \begin{array}{cc} \sigma_i^2 & \rho_1\sigma_i\sigma_{s_i} \\\rho_1\sigma_i\sigma_{s_i} & \sigma_{s_i}^2 \\ \end{array} \right) \right),
\\
&\left. \left( \begin{array}{c} \theta_i \\ \log \sigma_i \\ \end{array} \right) \right\vert \mu_{\theta}, \mu_{\sigma}, r_{\theta}, \rho_2, r_{\sigma} \sim N_2
\left( \left( \begin{array}{c} \mu_\theta \\\mu_\sigma \\ \end{array} \right),
\left( \begin{array}{cc} r_\theta^2 & \rho_2 r_{\theta}r_{\sigma} \\\rho_2 r_{\theta}r_{\sigma} & r_\sigma^2 \\ \end{array} \right) \right), \,\, i=1, \ldots, n.
\end{aligned}
\end{equation}
This differs from existing methods in the following aspects:
(1) We incorporate the uncertainties in estimating both $y_i$ and $\log s_i$ into the model using the variance $\sigma_i$ and $\sigma_{s_i}$.
(2) We model $y_i$ and $\log s_i$ as bivariate normal random variables and model $\theta_i$ and $\log \sigma_i$ also as bivariate normal in the second level, while existing hierarchical Bayesian models in small area estimation specify a different underlying distribution for $s_i^2$, e.g., Chi-squared distribution, inverse Gamma distribution.
(3) Given the bivariate normal setting, we further introduce parameters $\rho_1$ and $\rho_2$ to allow correlations between $y_i$ and $\log s_i$ and between $\theta_i$ and $\log \sigma_i$, respectively.
The conditional distribution for $y_i$ given $\log s_i$ and the conditional distribution of $\theta_i$ given $\log \sigma_i$ are
\begin{equation}
\label{m:bbmcond}
\begin{aligned}
& y_i \vert \log s_i, \theta_i, \sigma_i, \rho_1, \sigma_{s_i}
\sim N \left( \theta_i + \rho_1 \frac{\sigma_i}{\sigma_{s_i}}(\log s_i - \log \sigma_i) , \sigma_i^2 (1-\rho_1^2) \right), \\
& \theta_i \vert \log \sigma_i, \mu_{\theta}, \mu_{\sigma}, r_{\theta}, \rho_2, r_{\sigma} \sim N \left( \mu_{\theta} + \rho_2 \frac{r_{\theta}}{r_{\sigma}}(\log \sigma_i - \mu_{\sigma}), r_{\theta}^2 (1-\rho_2^2) \right), \,\, i=1, \ldots, n.
\end{aligned}
\end{equation}
With a flat prior on $\mu_{\theta}$, if all other parameters are known, the posterior distribution of $\mu_{\theta}$ can be obtained as:
\begin{equation}
\label{postdist:mu_theta}
f(\mu_{\theta}|\bm{y}, \log \bm{s}, \bm{\sigma}, \rho_1, \bm{\sigma_s}, \mu_\sigma, r_\theta, \rho_2, r_\sigma)
\sim N \left(\frac {\sum_{i=1}^n \xi_i \Tilde{y}_i}{\sum_{i=1}^n \xi_i}, \frac{1}{\sum_{i=1}^n \xi_i} \right)
\end{equation}
where $\xi_i = \frac{1}{\sigma_i^2 (1-\rho_1^2) + r_{\theta}^2 (1-\rho_2^2)}$, $\Tilde{y}_i = \left (y_i - \rho_2 \frac{r_{\theta}}{r_{\sigma}} (\log \sigma_i - \mu_{\sigma})- \rho_1 \frac{\sigma_i}{\sigma_{s_i}} (\log s_i - \log \sigma_i) \right )$, $\log \bm{s}$ is the $n\times 1$ vector $(\log s_1, \cdots, \log s_n)^T$, and $\bm{\sigma_s}$ is the $n\times 1$ vector $(\sigma_{s_1}, \dots, \sigma_{s_n})^T$.
The population mean can be estimated using the posterior mean
\begin{equation}\label{e:bbmnoCov}
\Tilde{\mu}_\theta = \frac {\sum_{i=1}^n \xi_i \Tilde{y}_i}{\sum_{i=1}^n \xi_i}.
\end{equation}
When $\rho_1 = \rho_2 = 0$, the posterior mean $\Tilde{\mu}_\theta$ in (\ref{e:bbmnoCov}) reduces to $\hat{\mu}$ in (\ref{e:ubmnoCov}) obtained from the UBM. If either of the correlations $\rho_1$ and $\rho_2$ is non-zero, $\hat{\mu}$ is biased.
When source-specific covariates $\bm{x}_i, i=1, \cdots, n$ are available, we replace the constant means $\mu_{\theta}$ and $\mu_{\sigma}$ in (\ref{m:bbmnoCov}) with regressions on the covariates:
\begin{equation}\label{m:bbmwCov}
\begin{aligned}
&\left. \left( \begin{array}{c} y_i \\ \log s_i \\ \end{array} \right) \right\vert \theta_i, \sigma_i, \rho_1, \sigma_{s_i} \sim N_2 \left( \left( \begin{array}{c} \theta_i \\ \log \sigma_i \\ \end{array} \right),
\left( \begin{array}{cc} \sigma_i^2 & \rho_1\sigma_i\sigma_{s_i} \\\rho_1\sigma_i\sigma_{s_i} & \sigma_{s_i}^2 \\ \end{array} \right) \right), \\
&\left. \left( \begin{array}{c} \theta_i \\ \log \sigma_i \\ \end{array} \right) \right\vert \bm{\beta}_\theta, \bm{\beta}_\sigma, r_\theta, \rho_2, r_\sigma \sim N_2
\left( \left( \begin{array}{c} \bm{x}_i^T \bm{\beta}_\theta \\ \bm{x}_i^T \bm{\beta}_\sigma \\ \end{array} \right),
\left( \begin{array}{cc} r_\theta^2 & \rho_2 r_{\theta}r_{\sigma} \\\rho_2 r_{\theta}r_{\sigma} & r_\sigma^2 \\ \end{array} \right) \right), \,\, i=1,\ldots, n
\end{aligned}
\end{equation}
where $\bm{\beta}_\theta$ and $\bm{\beta}_\sigma$ are regression coefficients associated with $\bm{x}_i$ in predicting $\theta_i$ and $\log \sigma_i$, respectively. The covariates can also be different in the models for $\theta_i$ and $\log \sigma_i$.
Similar to model (\ref{m:bbmnoCov}), with a uniform prior for $\bm{\beta}_\theta$, if all other parameters are known, we can derive the posterior distribution of $\bm{\beta}_\theta$:
\begin{equation}
\begin{aligned}
& f(\bm{\beta}_\theta|\bm{y}, \bm{X}, \log \bm{s}, \bm{\sigma}, \rho_1, \bm{\sigma_s}, \bm{\beta}_\sigma, r_\theta, \rho_2, r_\sigma) \\
& \sim N \left(\left(\sum_{i=1}^n \xi_i \bm{x}_i \bm{x}_i^T\right)^{-1} \left(\sum_{i=1}^n \xi_i \bm{x}_i \breve{y}_i\right), \left(\sum_{i=1}^n \xi_i \bm{x}_i \bm{x}_i^T\right)^{-1} \right)
\end{aligned}
\end{equation}
where $\breve{y}_i = \left (y_i - \rho_2 \frac{r_{\theta}}{r_{\sigma}} (\log \sigma_i - \bm{x}_i^T \bm{\beta}_\sigma )- \rho_1 \frac{\sigma_i}{\sigma_{s_i}} (\log s_i - \log \sigma_i) \right )$.
We can then estimate the regression coefficient $\bm{\beta}_\theta$ using the posterior mean:
\begin{equation}\label{e:bbmwCov}
\Tilde{\bm{\beta}}_\theta = \left(\sum_{i=1}^n \xi_i \bm{x}_i \bm{x}_i^T\right)^{-1} \left(\sum_{i=1}^n \xi_i \bm{x}_i \breve{y}_i \right).
\end{equation}
Further, the posterior distribution of source-specific mean, $\theta_i$, can be obtained as:
\begin{equation}
\begin{aligned}
& f(\theta_i | y_i, \bm{x}_i, \log s_i, \bm{\beta}_\theta, \sigma_i, \rho_1, \sigma_{s_i}, \bm{\beta}_\sigma, r_\theta, \rho_2, r_\sigma) \\
& \sim N\left(\zeta_i (y_i-\rho_1 \frac{\sigma_i}{\sigma_{s_i}} (\log s_i - \log \sigma_i) ) +
(1-\zeta_i) ( \bm{x}_i^T {\bm{\beta}}_\theta + \rho_2 \frac{r_{\theta}}{r_{\sigma}} (\log \sigma_i - \bm{x}_i^T \bm{\beta}_\sigma ) ), \,\,\, \zeta_i \sigma_i^2(1-\rho_1^2) \right)
\end{aligned}
\end{equation}
where $\zeta_i = \frac{r_\theta^2(1-\rho_2^2)}{r_\theta^2(1-\rho_2^2) +\sigma_i^2(1-\rho_1^2) }$.
We estimate $\theta_i$ using the posterior mean with $\bm{\beta}$ substituted by its estimator in (\ref{e:bbmwCov}):
\begin{equation}\label{e:bbmtheta}
\Tilde{\theta}_i =
\zeta_i \left (y_i-\rho_1 \frac{\sigma_i}{\sigma_{s_i}} (\log s_i - \log \sigma_i)\right ) +
(1-\zeta_i) \left ( \bm{x}_i^T \Tilde{\bm{\beta}}_\theta + \rho_2 \frac{r_{\theta}}{r_{\sigma}} (\log \sigma_i - \bm{x}_i^T \bm{\beta}_\sigma ) \right ), \,\, i=1,\ldots, n.
\end{equation}
When $\rho_1= \rho_2 = 0$, the source-specific posterior mean $\Tilde{\theta}_i$ in (\ref{e:bbmtheta}) reduces to $\hat{\theta}_i$ in (\ref{e:ubmtheta}) obtained from the UBM. If $\rho_1$ and $\rho_2$ are ignored when either of them is not equal to 0, the estimator $\hat{\theta}_i$ in (\ref{e:ubmtheta}) from the UBM can be biased even if we have good estimates for $\tau^2$ and $\sigma_i^2, i=1, \cdots, n$.
In some applications, the normality assumption may not be appropriate, e.g., when source-level estimates are proportions \citep{fabrizi2016hierarchical,sugasawa2018hierarchical}. In such a case, we consider applying some transformation $g(\cdot)$ to the outcome, and the variance of the transformed outcome can be approximated by $g^\prime(y_i)^2\sigma_i^{2}$.
\subsection{Bayesian computation}
To get full inference of the above models, Bayesian statistics can be used by specifying independent prior distributions for all non-intermediate parameters.
To ease the computation, we apply Cholesky parameterization for the covariance matrix in the second level of the multivariate normal distribution. Specifically, the covariance matrix is decomposed as
\begin{align*}
\left( \begin{array}{cc} r_\theta^2 & \rho_2 r_{\theta}r_{\sigma} \\\rho_2 r_{\theta}r_{\sigma} & r_\sigma^2 \\ \end{array} \right) =
\left( \begin{array}{cc} r_\theta & 0 \\0 & r_\sigma \\ \end{array} \right) LL^T \left( \begin{array}{cc} r_\theta & 0 \\0 & r_\sigma \\ \end{array} \right),
\end{align*}
with the Cholesky factor of the correlation matrix $L = \left( \begin{array}{cc} 1 & 0 \\ \rho_2 & \sqrt{1-\rho_2^2} \\ \end{array} \right)$. Then we place an LKJ prior distribution on the Cholesky factor rather than placing a non-informative prior on $\rho_2$ \citep{gelman2006data}.
Not all these parameters have a closed-form expression for their posterior distributions, and thus Gibbs sampling can be challenging. We use Stan's NUTS-Hamiltonian Monte Carlo (HMC) sampler, via RStan \citep{carpenter2017stan}, for the Bayesian computation of BBM. HMC is a Markov chain Monte Carlo (MCMC) method that uses the derivatives of the density function being sampled to generate efficient transitions spanning the posterior. It can properly explore high-dimensional target distributions, and is faster and more scalable \citep{neal2011mcmc,hoffman2014no, betancourt2015hamiltonian}.
To check for sampling behavior and model convergence, we consider trace plots, the effective sample size, and the Gelman-Rubin diagnostic statistic $\hat{R}$ \citep{gelman1992inference}.
An R-package \texttt{bmsSum} for \textbf{B}ayesian \textbf{M}odel with \textbf{S}tan for combining \textbf{Sum}mary measures using UBM and BBM with or without covariates is available on GitHub.
\section{Simulation}\label{s:simu}
\subsection{Simulation Design} \label{s:simuset}
We used the synthetic data and compared BBM with UBM and other existing methods to access how well they each estimate
\begin{enumerate}
\item the population mean, i.e., $\mu$ in model (\ref{m:ubmnoCov}) and $\mu_{\theta}$ in model (\ref{m:bbmnoCov}) (section \ref{s:mu}),
\item the regression coefficient, i.e., $\bm{\beta}$ in model (\ref{m:ubmwCov}) and $\bm{\beta}_{\theta}$ in model (\ref{m:bbmwCov}) (section \ref{s:beta}),
\item the source-specific means, i.e., $\theta_i, i=1, \cdots, n$ in model (\ref{m:ubmnoCov}) and (\ref{m:bbmnoCov}) (section \ref{s:theta}).
\end{enumerate}
We considered four scenarios with different combinations of $\rho_1$ and $\rho_2$, where $\rho_1$ is the correlation between $y_i$ and $\log s_i$ and $\rho_2$ is the correlation between $\theta_i$ and $\log \sigma_i$ (see models in Section \ref{s:bbm}). Those scenarios are: (1) $\rho_1=\rho_2=0$; (2) $\rho_1 \neq 0, \rho_2=0$; (3) $\rho_1=0, \rho_2 \neq 0$; (4) $\rho_1=\rho_2 \neq 0$.
In each simulation, we generated a random sample of $n=50$ observations. We used Rstan to fit both UBM and BBM with independent improper uniform priors for $\mu$, $\mu_{\theta}$ and $\mu_{\sigma}$; $\texttt{Normal}(0, 10^6)$ prior for $\beta_{\theta,j}$, and $\beta_{\sigma, j}$, $j=1, \ldots, p$; $\texttt{Half-Cauchy}(2.5)$ prior for $\tau$, $r_\theta$, $r_\sigma$, and $\sigma_{s_i}$; $\texttt{LKJCorr}(4)$ prior for the Cholesky factor $L$; and $\texttt{Unif}(-1,1)$ prior for $\rho_1$. To obtain the posterior distributions of parameters of interest,
we ran three chains with 5000 iterations, 2000 warm-up and a lag of 10 in each chain, which generated 900 draws for each model parameter.
Point estimates are the means of the posterior distributions, and the 95\% credible interval (CI) was constructed by equally splitting the tail areas of the posterior distributions.
We generated $500$ datasets for each scenario. For each simulation setting and for each estimator, we calculated empirical bias, mean squared error (MSE), and coverage rate of the corresponding intervals.
\subsection{Estimation of population mean} \label{s:mu}
To address the first question, we generated
$(\theta_i$, $\log \sigma_i)^T$, $i =1, \cdots, n$ from a bivariate normal distribution with mean vector $(\mu_\theta = 10, \mu_\sigma =2)^T$ and variance components $r_\theta =3, r_\sigma=1$, and $\sigma_{s_i} = 1$. The correlations, $\rho_1$ and $\rho_2$, can take values $0, 0.3, 0.5$ and $0.7$. To account for the situation when data sources are more homogeneous, we also generated data with smaller $\text{log}\sigma_i$ by using $\mu_{\sigma}=0.2$ and $r_{\sigma}=0.1$.
We compared the BBM estimator to the UBM estimator as well as the following three estimators from commonly used (non-Bayesian) methods, including
\begin{enumerate}
\item (``raw''): $\frac{1}{n}\sum_{i=1}^n y_i$;
\item (``weighted'') estimation with weights $1/s_i^2$: $\sum_{i=1}^n \frac{1}{s_i^2} y_i/\sum_{i=1}^n \frac{1}{s_i^2}$;
\item (``trimmed'') weighted estimation with weights $\omega_i$ trimmed to 3 times the mean $1/s_i^2$: \\
$\sum_{i=1}^n \omega_i y_i/\sum_{i=1}^n \omega_i$, where $\omega_i = 1/s_i^2$ if $1/s_i^2 \le \frac{3}{n}\sum_{i=1}^n 1/s_i^2$ and $\omega_i = \frac{3}{n}\sum_{i=1}^n 1/s_i^2$ if $1/s_i^2 > \frac{3}{n}\sum_{i=1}^n 1/s_i^2$ \citep{chen2017approaches}.
\end{enumerate}
For ``raw'' and ``weighted'' estimators, 95\% confidence intervals (CIs) were based on the normality assumption. For the ``trimmed'' estimator, 95\% CIs were based on bootstrap samples.
\begin{figure}
\caption{Comparison of BBM and UBM to the three non-Bayesian methods for estimating the population overall mean with $\rho_1$ and $\rho_2$ taking different values. The violin plot presents the distribution of estimates and the number above the violin plot shows the 95\% CI coverage rate based on $500$ simulations.}
\label{f-1}
\end{figure}
Results for the setting with more heterogeneity in the summary measures, $y_i$, across data sources (i.e., $\mu_{\sigma}=2$ and $r_{\sigma}=1$) are shown in Figure \ref{f-1} and Supplementary Table S1. When $\rho_1=\rho_2=0$, all methods provide unbiased estimates of the overall mean. However, the raw estimator and weighted estimator yield larger variation than the other three estimators; the smallest variation is observed for the BBM estimator. All methods yield a coverage rate close to 95\%, except for the weighted estimator. The UBM works well in this scenario but not as well as the BBM, since the UBM uses $s_i$ in place of the true $\sigma_i$. The BBM works well without over-fitting the data even though there are no correlations between measures and uncertainties of the measures in this scenario.
When at least one of $\rho_1$ or $\rho_2$ is nonzero, the BBM method outperforms the other estimators since it takes both types of correlations into consideration. Similar to the $\rho_1=\rho_2=0$ scenario, the raw estimator provides unbiased estimate with close to the nominal level coverage rate but displays rather large variation. The two weighted estimators and the UBM do not perform well in general. When $\rho_1 \neq 0$ and $\rho_2 = 0$, the two weighted methods, and UBM yield biased estimation with CIs below the nominal level coverage rate, and the bias and under-coverage becomes more severe for larger values of $\rho_1$. When $\rho_1=0$ and $\rho_2 \neq 0$, the two weighted estimators still perform poorly but the UBM performs reasonably well. Finally, when both $\rho_1$ and $\rho_2$ are nonzero, the two weighted estimators and the UBM perform even worse with very large bias and very poor coverage. It's interesting to note that when $\rho_2 \neq 0$ as compared to the situation in which $\rho_1 \neq 0$, the UBM estimator yields less bias. The weighted estimator with trimmed weights can reduce variation in the estimate compared to the weighted estimator without weight trimming, but this step can also introduce bias and may lead to worse interval coverage.
When data sources are less heterogeneous with small variations between $y_i$ ($\mu_{\sigma}=0.2$ and $r_{\sigma}=0.1$), all methods perform better than the setting with more variations between $y_i$. BBM still performs better than UBM and the weighted estimators with or without trimming, especially when $\rho_1 \neq$ 0. The raw estimator performs similarly to BBM now with estimates centered at the true population mean, small variations in the estimates, and coverage rate close to 0.95 (see Supplementary Figure S1).
\subsection{Estimation of regression coefficient} \label{s:beta}
To address the second question, we generated the design matrix $\bm{X}$ of the regression model (\ref{m:bbmwCov}) with 3 columns, including a vector of 1 for intercept, $\bm{x}_1 \sim \texttt{Normal}(0,1)$, and $\bm{x}_2 \sim \texttt{Bernoulli}(0.2)$. We set the regression coefficients $\beta_{\theta} = (5,3,1)^T$ and $\beta_{\sigma} = (1,1,0)^T$, with the same variance-covariance matrix and correlation setting in section (\ref{s:mu}). We compared the BBM estimator to the UBM estimator as well as to estimators obtained from three non-Bayesian approaches, including (unweighted) linear regression (LR), weighted linear regression with weights $1/s_i^2$ (WLR), and weighted linear regression with trimmed weights (TWLR) as defined above.
\begin{table}[!ht] \caption{Comparison of bias, MSE, and coverage rate of 95\% CIs of the two Bayesian model-based estimators and the three non-Bayesian estimators in estimating the slope associated with the continuous predictor $\bm{x}_1$ under different combinations of $\rho_1$ and $\rho_2$.} \label{t-2} \begin{center} \scalebox{0.8}{
\begin{tabular}{c|c|ccc|ccc|ccc} \hline \textbf{Correlation} & & \multicolumn{3}{c}{\textbf{$\rho=0.3$}} & \multicolumn{3}{c}{\textbf{$\rho=0.5$}} & \multicolumn{3}{c}{\textbf{$\rho=0.7$}} \\\hline Scenario & $\beta_{\theta,1}=3$ & Bias & MSE & Coverage & Bias & MSE & Coverage & Bias & MSE & Coverage \\\hline $\rho_1=0, \rho_2 = 0$ & LR & 0.02 & 13.83 & 0.89 & 0.02 & 13.83 & 0.89 & 0.02 & 13.83 & 0.89 \\
& WLR & 0.02 & 5.14 & 0.41 & 0.02 & 5.14 & 0.41 & 0.02 & 5.14 & 0.41 \\
& TWLR & 0.03 & 2.85 & 0.54 & 0.03 & 2.85 & 0.54 & 0.03 & 2.85 & 0.54 \\
& UBM & 0.01 & 1.51 & 0.91 & 0.01 & 1.51 & 0.91 & 0.01 & 1.51 & 0.91 \\
& BBM & -0.02 & 0.74 & 0.94 & -0.02 & 0.74 & 0.94 & -0.02 & 0.74 & 0.94 \\\hline $\rho_1 = \rho , \rho_2 = 0$ & LR & 0.02 & 13.83 & 0.89 & 0.01 & 12.57 & 0.88 & 0.11 & 11.31 & 0.88 \\
& WLR & -0.29 & 5.06 & 0.42 & -0.52 & 5.12 & 0.42 & -0.67 & 5.46 & 0.4 \\
& TWLR & -0.3 & 2.9 & 0.53 & -0.57 & 3.17 & 0.52 & -0.77 & 3.5 & 0.51 \\
& UBM & -0.69 & 1.96 & 0.85 & -1.19 & 3.31 & 0.76 & -1.64 & 4.83 & 0.65 \\
& BBM & 0.03 & 0.72 & 0.95 & 0.06 & 0.71 & 0.95 & 0.06 & 0.68 & 0.95 \\\hline $\rho_1 = 0, \rho_2 = \rho $ & LR & 0.18 & 13.13 & 0.88 & 0.06 & 13.5 & 0.86 & 0.2 & 12.34 & 0.87 \\
& WLR & -0.1 & 5.21 & 0.45 & -0.2 & 5.22 & 0.39 & -0.41 & 5.03 & 0.38 \\
& TWLR & -0.14 & 2.98 & 0.52 & -0.28 & 2.98 & 0.53 & -0.49 & 3.06 & 0.48 \\
& UBM & -0.17 & 1.59 & 0.9 & -0.24 & 1.67 & 0.89 & -0.32 & 1.91 & 0.86 \\
& BBM & -0.11 & 0.67 & 0.94 & -0.16 & 0.68 & 0.95 & -0.24 & 0.67 & 0.96 \\\hline $\rho_1 = \rho_2 = \rho $ & LR & -0.01 & 12.37 & 0.89 & 0.15 & 13.14 & 0.88 & 0.21 & 12.95 & 0.87 \\
& WLR & -0.34 & 5.16 & 0.44 & -0.75 & 5.43 & 0.36 & -1.15 & 5.28 & 0.32 \\
& TWLR & -0.46 & 2.87 & 0.54 & -0.89 & 3.4 & 0.43 & -1.33 & 4.23 & 0.32 \\
& UBM & -0.88 & 2.2 & 0.82 & -1.47 & 4.15 & 0.64 & -2.03 & 6.47 & 0.39 \\
& BBM & -0.06 & 0.67 & 0.96 & -0.09 & 0.62 & 0.96 & -0.13 & 0.56 & 0.95 \\\hline \end{tabular}} \end{center} \end{table}
Simulation results are provided in Table \ref{t-2} for the slope of $X_1$ and in Supplementary Table S2 for the intercept and slope of $X_2$. The findings are similar to the estimation of population mean. The LR estimates are slightly less biased but have much larger MSE and nominal level coverage rates that are below the nominal rate for all scenarios. The BBM performs best, yielding the smallest bias and MSE with the coverage rate close to the nominal level. The UBM performs poorly with large bias, large MSE, and below nominal level coverage rate when $\rho_1 > 0$, and its performance deteriorates for larger values of $\rho_1$. In contrast, the UBM performs reasonably well when $\rho_1 = 0$ even if $\rho_2 > 0$. The weighted estimator with trimmed weights is more efficient than the weighted estimator without weight trimming, and both are also more efficient than the LR estimator but less efficient than the two Bayesian model-based estimators. The 95\% CIs of the two weighted estimators yield very poor coverage rates.
\subsection{Estimation of source-specific means} \label{s:theta}
In some applications like small area estimation, interest lies in estimating $\theta_i$, the mean of $Y$ from the $i^{th}$ data source. In such a situation we are interested in how the estimator of UBM and BBM improves over the ``raw'' estimate $y_i$. We considered the scenario in which there is correlation between a measurement and its uncertainty. As was done previously, we generated $(\theta_i, \log \sigma_i)^T, i =1, \cdots, n$ from bivariate normal distribution with mean vector $(\mu_\theta = 10, \mu_\sigma =2)^T$, variance components $r_\theta =3, r_\sigma=1, \sigma_{s_i} = 1$, and set $\rho_1 = \rho_2 = 0.7$. One simulation was performed with a random sample of $n=20$ observations, and we compared the point estimates of $\theta_i$ and 95\% CIs using the posterior distributions of parameters in BBM and UBM to the observed data $y_i$ and the true $\theta_i$.
\begin{figure}
\caption{The plot of $\theta_i$ (true value) , $y_i$ (observed value) with 95\% CI (top, solid line, some truncated at (-50, 50)), and comparison to the point estimates and 95\% CIs of BBM (middle, dashed line) and UBM (bottom, dotted line) for a simulated data with $n=20$. Results are sorted by the descending absolute distance of $\theta_i$ and $y_i$ . Numbers on the right column indicate the sorting order $i=1, \cdots 20$.}
\label{f-2-2}
\end{figure}
Figure \ref{f-2-2} shows the estimates and 95\% CIs for estimating the source-specific mean $\theta_i$ using UBM, BBM and the raw estimate $y_i$, sorted by the absolute distance between $y_i$ and $\theta_i$. When $y_i$ is close to $\theta_i$ and the CI for $y_i$ is narrow (e.g., in the case $i = 17$), the estimates of UBM and BBM are similar but UBM yields a shorter 95\% CI. This indicates that when the direct estimate $y_i$ is already a good estimate of $\theta_i$, the simpler UBM tends to have better estimation performance than BBM. When $y_i$ is close to $\theta_i$ but $y_i$ has a wide 95\% CI (e.g., in the case $i = 11$), estimates of UBM and BBM are still close but BBM improves efficiency and thus yields a shorter CI. When $y_i$ is farther away from $\theta_i$, BBM performs much better than UBM. Specifically, the BBM estimate tends to be closer to $\theta_i$ than that of UBM; when the CI of $y_i$ is narrow and does not cover $\theta_i$ (e.g., in the case $i = 1$), BBM yields a wider CI that contains $\theta_i$ more frequently than UBM; when the CI of $y_i$ is wide and covers $\theta_i$ (e.g., in the case $i = 4$), BBM improves efficiency and yields a shorter CI while still containing $\theta_i$.
\section{Real data studies}\label{s:appl}
We illustrated the application of BBM using three very different data examples, including PET brain imaging, meta-analysis, and small area estimation. In the applications, we assessed model fitting of BBM using the Bayesian posterior predictive $p$-value \citep{rubin1984bayesianly, gelman1996posterior,gelman2013bayesian}:
\begin{align*}
p = Pr \left (T(\bm{Z}_{rep},\bm{\Psi}) \geq T(\bm{Z}_{obs},\bm{\Psi} ) | \bm{Z}_{obs} \right ),
\end{align*}
where $\bm{Z} = (\bm{y}, \log \bm{s})$. Note that $T(\cdot)$ is a test statistic that depends on data $\bm{Z}$ and parameters denoted using $\bm{\Psi}$. $\bm{Z}_{obs}$ denotes observed data and $\bm{Z}_{rep}$ denotes replicated data drawn from the posterior predictive distributions. If a model fits the data well, $T(\bm{Z}_{obs},\bm{\Psi})$ will be close to the center of the density plot of $T(\bm{Z}_{rep},\bm{\Psi})$. In other words, the posterior predictive $p$-value will be close to 0.5. Extreme $p$-values (near 0 or 1) suggest poor fit. Naturally, the choice of test statistic $T(\cdot)$ varies according to the application at hand \citep{crespi2009bayesian}. We used a test statistic that measures the discrepancy between the observed data $y_{obs,i}$ and the fitted distribution of $y_i$ given $\log s_i$ across all data sources $i=1, \cdots, n$:
$$
T(\bm{Z}_{obs},\bm{\Psi}) = \sum_{i=1}^n \frac{(y_{obs,i}-E(y_i|\log s_{obs,i}, \bm{\Psi}))^2}{Var (y_i|\log s_{obs,i}, \bm{\Psi})},
$$
where $E(y_i|\log s_{obs,i}, \bm{\Psi}) = \theta_i + \rho_1 \frac{\sigma_i}{\sigma_{s_i}}(\log s_i-\log\sigma_i)$ and $Var (y_i|\log s_{obs,i}, \bm{\Psi}) = \sigma_i^2(1-\rho_1^2)$ according to Model (\ref{m:bbmcond}).
\subsection{Application to PET brain imaging data}\label{s:pet}
In the study of the human brain, positron emission tomography (PET) allows \emph{in vivo} measurement of the density of a protein of interest through modeling the kinetics of the concentration of a radioactive ligand over time \citep{morris2004kinetic,carson2005tracer}. This is typically done separately for each subject, but interest generally lies in the population average. To estimate this average, subject-level estimates may be weighted according to estimates of precision which is also calculated at the subject level. These can depend on both biological factors as well as the amount of injected dose, the presence of imaging artifacts, the noise level in measurements of blood samples necessary for quantification, etc.
However, the resulting weighted estimate of a population mean can be unstable since the estimates of precision themselves are typically not very precise, and any underestimate of variance (arising purely due to chance) can result in extremely large weights. Since PET imaging is invasive, expensive, and labor intensive, sample sizes in PET studies are typically small, and so it is vitally important to combine all measures into a population estimate as efficiently as possible.
In our data set, 82 subjects, including 51 depressed subjects and 31 normal controls, were each scanned as part of a study examining the density of the serotonin transporter throughout the brain.
The subject-level estimate of the binding potential (a measure of the density of the transporters) is calculated based on the acquired sequence of PET images, along with measurements taken on blood samples drawn during the scan. Using a bootstrap algorithm \citep{ogden2005estimation}, it is possible to obtain an estimate of the variance of the estimated binding potential for each subject. Primary interest lies in investigating the population average of the binding potential and whether it differs on average between patients and control subjects.
\begin{figure}
\caption{Application to PET brain imaging data: (A) Scatter plot of log-transformed uncertainty (y-axis) versus individual-level binding potential (x-axis) ; (B) Comparison of impact of each data point (measured using standardized weights) to the estimation of the overall binding potential mean in \texttt{amygdala} region using the weighted, trimmed, UBM and BBM methods. The grey dashed line at $y=1/82$ represents the setting of equal contribution from all data points; (C) Comparison of estimates and 95\% CIs for binding potential of \texttt{amygdala} region population average $\mu$ using different methods; (D) estimation of regression coefficient associated with diagnosis group.}
\label{f-3}
\end{figure}
We illustrated the methods described in section \ref{s:simuset} by applying them to the PET imaging data, focusing on the amygdala region.
The data suggest a positive correlation between the individual binding potential estimates and the corresponding log-transformed variance estimates (Figure~\ref{f-3}(A)). The estimated population average of the binding potential based on BBM is 112.2 (95\% CI: 105.3, 119.6), which is close to the result of unweighted method (Figure~\ref{f-3}(B)). The two weighted estimators lead to smaller estimates of the population average with wider 95\% CIs. By trimming extreme weights, the trimmed method provides a narrower CI. The UBM falls between the estimates of the weighted methods and the BBM with shorter CI than the weighted estimates. Figure~\ref{f-3}(C) shows the estimate of the regression coefficient associated with diagnosis group after adjusting for age and gender using corresponding regression models. The patterns are similar to the results for the population average estimation without any covariates, and BBM still provides the shortest 95\% credible interval among all the methods. The BBM shows an estimate of -8.9 (95\% CI: -24.5,5.6) for the coefficient associated with diagnosis group and suggests that the binding potential of amygdala region was not different between the two diagnosis groups. The conclusion is consistent with LR, UBM, but different from WLR and TWLR. The corresponding numerical results are in Supplementary Table S3. The Bayesian posterior predictive $p$-value is 0.47 and 0.49 in the BBM model without and with covariates, respectively, suggesting proper fit of the BBM models to the data (Supplementary Figure S2).
To further investigate the differences of UBM, BBM compared to existing weighted methods in the analysis for the PET brain imaging data, we visualize the contribution of each data point in estimating the overall mean. Figure~\ref{f-3}(D) shows the standardized weights (denoted by $\lambda_i$, such that $\sum_{i=1}^n \lambda_i =1$) of each data point using the different methods. Specifically, $\lambda_i=w_i/\sum_{i=1}^n w_i, i=1, \cdots, n$, with $w_i$ being $\frac{1}{s_i^2}$ for the weighted estimator, the trimmed weights for the trimmed method, $\frac{1}{\sigma_i^2+\tau^2}$ in Formula (2) for UBM, and $ \frac{1}{\sigma_i^2(1-\rho_1^2)+r_\theta^2(1-\rho_2^2)}$ in Formula (11) for BBM.
The parameters $\sigma_i^2$, $\tau^2$, $r_\theta^2$, $\rho_1$ and $\rho_2$ in weights of UBM or BBM are estimated by their posterior means.
The BBM in Model (\ref{m:bbmnoCov}) accounts for the correlation between $y_i$ and $s_i$ ($\Tilde{\rho_1} = 0.85$, 95\% CI: 0.50, 0.99) and the correlation between $\theta_i$ and $\sigma_i$ ($\Tilde{\rho_2} = 0.24$, 95\% CI: -0.44, 0.76) and results in standardized weights close to $1/82$ for all subjects. Consequently, the BBM estimate is similar to the unweighted one. However, there are some significant fluctuations in $\lambda_i$ among the other three estimators. The weighted estimator involves assigning very large $\lambda_i$ values to the subjects with low estimated binding potential, explaining why weighting yielded smaller estimates than the other methods. The large variation in $\lambda_i$ of weighted methods is due to the large variation in $s_i$ values (range: from a low of 4.44 up through 99.52). To a lesser extent, this pattern is also seen with the trimmed estimator, though the trimming greatly reduces the range of $\lambda_i$ values. With UBM, the variation of weights is decreased even more since $\tau$ is incorporated in the weight and $\tau$ was estimated to be $25.73$ (95\% CI: 20.11, 32.84). The variation in $\lambda_i$ is further reduced in the BBM due to the large estimated value of $\rho_1$.
\subsection{Application to meta-analysis}\label{s:meta}
Next, we considered application of these procedures in the context of a meta-analysis. Here, our data set contains 22 independent trials investigating the effect of selective decontamination of the digestive tract on the risk of respiratory tract infection. In all trials, patients in intensive care units were randomized to receive treatment by a combination of non-absorbable antibiotics or to receive no treatment \citep{smith1995bayesian, turner2000multilevel}. All trials reported estimates and variances for log-odds ratios (log-ORs) of respiratory tract infection between the treatment and placebo groups.
\begin{figure}
\caption{Application to meta-analysis: the overall estimates and 95\% CIs for log-odds ratio of respiratory tract infection between treatment and placebo using the inverse-variance weighted, UBM and BBM methods, and comparison of impact of each trial to the overall estimate using standardized weights for each method. }
\label{f-11}
\end{figure}
The data indicates a strong negative correlation between the summary measures and their corresponding variances (Supplementary Figure S3). We applied the BBM to the log-ORs and compared the results to the UBM and the inverse-variance weighted estimator. The BBM shows that the risk of respiratory tract infection for the treatment group is largely reduced compared to the placebo group (OR: $\exp(-1.66)=0.19$, 95\% CI: $0.12, 0.30$). The weighted estimator (OR: $0.39$, 95\% CI: $0.26, 0.50$) and UBM (OR: $0.27$, 95\% CI: $0.17, 0.39$) estimated a smaller effect than the BBM.
Figure~\ref{f-11} shows the standardized weights of each trial data with each method for their contribution to the overall mean estimate along with the point estimates and 95\% CIs for overall mean. The BBM weights are similar across all trials; while larger inverse of variance weights are associated with trials with larger log-OR estimates (closer to zero) due to the negative association between $y_i$ and $s_i$, leading to an overall log-OR point estimate that is closer to zero and thus smaller effect estimate. The values of UBM weights fall between BBM weights and inverse of variance weights, resulting in an estimate that is smaller than the BBM but larger than the weighted estimator. The Bayesian posterior predictive $p$-value is 0.49 in the BBM model, suggesting proper fit of the BBM model to the data (Supplementary Figure S4).
\subsection{Application to traffic safety data, small area estimation}\label{s:fars}
Finally, we illustrated these various estimation approaches by applying them to data from a small area estimation study.
The Fatality Analysis Reporting System (FARS) was conducted by the National Highway Traffic Safety Administration in the United States to provide an overall measure of highway safety \citep{national2016fatality}. FARS contains data on a census of fatal vehicle crashes within the 50 states and the District of Columbia. In this application, we used 34,247 records across the 50 states and the District of Columbia from FARS 2017 to estimate the average numbers of vehicles involved in each crash in each state. This is a small area estimation problem for which states are the geographic areas of interest.
For state $i$, we can use the sample mean ($y_i$) and standard error ($s_i$) as estimates, but these can be unstable due to the sparse available data in some states. Alternatively, the Fay-Herriot model (UBM) and our proposed BBM can be applied to improve the estimation. In both models, we considered state-level covariates including resident population size, number of vehicles registered, whether the state has a law legalizing marijuana use, and geographical region.
\begin{figure}
\caption{Application to traffic safety data: comparison of the estimated state mean number of vehicles involved in a crash using FARS 2017 data: (A) raw estimator $y_i$ (B) the UBM estimator of $\theta_i$ (C) the BBM estimator of $\theta_i$.}
\label{f-4}
\end{figure}
Figure \ref{f-4} shows maps of estimates of the mean numbers of vehicles involved in crashes for the 50 states and District of Columbia using the raw estimator $y_i$ in (A), the UBM estimator of $\theta_i$ in (B), and the BBM estimator of $\theta_i$ in (C). Supplementary Table S4 presents the point estimates of the three methods and their corresponding 95\% CIs. The raw estimates are very different to the UBM and BBM estimates in some states; while the UBM and BBM estimates are similar in most states. When they differ from the raw estimates, the BBM estimate tends to fall between the raw estimate and the UBM estimate. Compared to the raw estimate, UBM and BBM also yield shorter 95\% CIs. For example, in South Dakota, the mean is estimated to be 1.43 (95\% CI: 1.33, 1.53) by using the raw estimate; 1.49 (95\% CI: 1.41, 1.54) using UBM, and 1.47 (95\% CI: 1.41, 1.53) using BBM. Overall, the average numbers of vehicles involved in each crash in each state range fall in the range of 1.3 to 1.7, with the states of California, Florida, Georgia, Michigan, Texas and Pennsylvania reporting the highest numbers.
To better understand the differences between UBM and BBM in this application, we further examined whether $y_i$ and $\log s_i$ are correlated and whether it is appropriate to replace $\sigma_i$ with $s_i$. Supplementary Figure S5 shows the scatter plot of $\log s_i$ and $y_i$ overlaid with a loess curve, which suggests some negative correlation between the raw measure and its (log-transformed) standard error but the association is not as strong as was observed in the imaging and meta-analysis applications. After regressing on the covariates, $\rho_1$ is estimated to be 0.09 (95\% CI: -0.70, 0.80) and $\rho_2$ is estimated to be 0.03 (95\% CI: -0.65, 0.62). Both estimates are close to zero. Therefore the correlation between the measures and their uncertainty estimates is relatively weak after adjusting for the covariates. Supplementary Figure S6 shows the posterior distribution of $\sigma_i$ and compares it to $s_i$ in each state. In most states, $s_i$ falls within the 95\% CI of $\sigma_i$.
As expected, when $\rho_1$ is small and each $s_i$ is reasonably close to its corresponding $\sigma_i$, the UBM and BBM estimates are similar. Finally, we checked the model fit of BBM by calculating the Bayesian posterior predictive $p$-value. A $p$-value of 0.42 suggests a reasonable fit of the model (Supplementary Figure S7).
\section{Discussion} \label{s:discuss}
In this paper, we propose a bivariate hierarchical Bayesian model (BBM) for combining estimates from multiple sources. This method not only models measures and measures of their uncertainty jointly, but also takes the correlation between these two quantities into consideration.
The simulation studies show that the BBM can provide estimates on overall mean, regression coefficients, and refined source-specific means that are less biased and more efficient with the coverage rate of 95\% CI closer to the nominal level, compared to univariate hierarchical Bayesian model (UBM) and other alternative approaches, especially if the correlation between measure and its uncertainty is not negligible. The advantage becomes more pronounced as the values of first level ($\rho_1$, observation-level) correlation and second level ($\rho_2$, population-level) correlation increase. It is interesting to note that the UBM performs poorly when $\rho_1 \neq 0$ but its performance is less sensitive to the value of $\rho_2$. Moreover, as the heterogeneity in the summary measures between data sources increases, the improvement of BBM over the alternative methods becomes more noticeable. When the variation in the summary measures between sources is small, BBM and the raw estimate perform similarly, but both still outperform the other methods. However, the raw estimator has the largest variation in estimation when the summary measures vary greatly across data sources.
Our applications showed that BBM can be applied to very different data examples, with summary measures, such as mean, log odds ratio, and log rate ratio etc., and with applications in meta-analysis, small area estimation, and any other settings that combine estimates from multiple sources. We assume a bivariate normal distribution for the summary measure and its log-transformed variance estimate given the data source specific true parameter values. Transformation can be applied to the summary measure if normality assumption is not reasonable. Residuals can also be checked for the bivariate normal assumption using Q-Q plot and contour plot \citep{korkmaz2014mvn}.
Our bivariate hierarchical model for combining summary measures and their uncertainties from multiple sources is different from the bivariate meta-analysis model for sensitivity and specificity in diagnostic studies \citep{reitsma2005bivariate,chu2006bivariate}. In the bivariate meta-analysis model, two summary measures (logit sensitivity and logit specificity) are modeled jointly as a bivariate normal distribution while assuming the corresponding variance measures as fixed quantities. In contrast, our bivariate hierarchical model only considers one summary measure but assumes that the summary measure and its corresponding log-transformed standard error follow a bivariate normal distribution. For modeling two correlated summary measures, the proposed bivariate model can be extended to a multivariate model by assuming that the two summary measures and their log-transformed variance estimates follow a multivariate normal distribution with a $4\times4$ variance-covariance matrix that allows different correlations between the two summary measures and each summary measure and their corresponding variance estimate.
The BBM is more computationally intensive than the UBM. The computation of the BBM is also more complex than the bivariate meta-analysis model, because the true variance parameter $\sigma_i$ for data source $i$ appears in both the mean for $\text{log}s_i$ and the residual variance for $y_i$ given $\theta_i$. To further improve computational efficiency of BBM, rather than assigning a prior distribution for $\sigma_{s_i}$, the residual variance of $\text{log}s_i$, we could take an empirical Bayes approach by setting $\sigma_{s_i}$ to be an informative fixed value, such as the empirical estimate of standard deviation (SD) of $\log s_i$. Our numerical studies show that this can greatly reduce computation time without introducing notable bias. Therefore, in standard practice, we would recommend the BBM method with $\sigma_{s_i}$ replaced with the estimated SD of $\log s_i$.
In the applications, the estimated credible intervals obtained from BBM for the correlations are relatively wide even though the descriptive statistics shows a strong correlation. Our simulation study also shows that the credible interval gets wider as the absolute value of the correlations gets smaller. Despite the wide credible intervals for the correlation estimates, the BBM performs much better in estimating population and source-specific means than the alternative methods. Therefore, when correlations are considered as nuisance parameters in a study, BBM can provide a satisfactory result for the parameters of interest.
\end{document} |
\begin{document}
\title[]{Root groupoid and related Lie superalgebras} \author{M.~Gorelik} \address{Weizmann Institute of Science} \email{maria.gorelik@gmail.com} \author{V.~Hinich} \address{University of Haifa} \email{vhinich@gmail.com} \author{V. Serganova} \address{UC Berkeley} \email{serganov@math.berkeley.edu}
\begin{abstract}
We introduce a notion of a root groupoid as a replacement of the notion of Weyl group for (Kac-Moody) Lie superalgebras. The objects of the root groupoid classify certain root data, the arrows are defined by generators and relations. As an abstract groupoid the root groupoid has many connected components and we show that to some of them one can associate an interesting family of Lie superalgebras which we call root superalgebras. We classify root superalgebras satisfying some additional assumptions.
To each root groupoid component we associate a graph (called skeleton) generalizing the Cayley graph of the Weyl group. We establish the
Coxeter property of the skeleton generalizing in this way the fact that the Weyl group of a Kac-Moody Lie algebra is Coxeter. \end{abstract} \maketitle \section{Introduction}
\subsection{Generalities} \subsubsection{}
In this paper we present an attempt to generalize the notion of Weyl group to Lie superalgebras. For a semisimple Lie algebra Weyl group parametrizes Borel subalgebras containing a fixed torus. This cannot be directly extended to Lie superalgebras since there are essentially different choices of Borel subalgebras. In order to describe all Borel subalgebras, the notion of odd (or isotropic) reflection was introduced many years ago, \cite{S2},~\cite{P},~\cite{DP}. An odd reflection can not be naturally extended to an automorphism of the Lie superalgebra. For many years a strong feeling persisted among the experts that one should extend Weyl group to ``Weyl groupoid''. One attempt was made in \cite{S3}. A somewhat reminiscent construction of groupoid was suggested by I.~Heckenberger and collaborators ~\cite{H}, \cite{HY}, see also~\cite{AA}. More recently another notion named Weyl groupoid was introduced by Sergeev and Veselov~\cite{SV} for finite-dimensional superalgebras in order to describe the character ring. In~\ref{ss:comment} we comment on these definitions.
The notion of root groupoid presented in this paper is close to the notion defined in~\cite{S3}.
\subsubsection{}The connection between semisimple Lie algebras and root systems can be described from two opposite perspectives. One can start with a Lie algebra, choose a Cartan subalgebra and study the geometry of the set of roots. On the other hand, one can start with a Cartan matrix and construct a Lie algebra by generators and relations. The second approach was vastly extended to construct a very important family of infinite-dimensional Lie algebras by Kac, Moody, Borcherds and others. Our approach follows the same pattern for construction of Lie superalgebras from combinatorial data. \subsubsection{} Another idea that motivated our work is the observation that the classical Serre relation can be interpreted as reflected Chevalley relations. This led us to the notion of root algebra which, roughly speaking, respects the symmetries determined by a root groupoid. In many cases there is a unique root algebra which can be defined by Chevalley relations reflected in all possible ways. Sometimes there is a number of root algebras defined by a given root datum. The description of all root algebras is an open question --- we don't know the answer even for Lie algebras. For finite dimensional and affine Lie superalgebras all Serre's relations were described in \cite{Y}, see also~\cite{Zh}. One can see from this description that Serre's relations may involve more than two generators.
\subsection{Root groupoid}
In \cite{Kbook}, 1.1, Kac defines a realization of a Cartan matrix $A=(a_{xy})$, $x,y\in X$, as a triple $(\mathfrak{h}, a,b)$ such that $a=\{a(x)\in\mathfrak{h}\}$, $b=\{b(x)\in\mathfrak{h}^*\}$ and $\langle a(x),b(x)\rangle=a_{xy}$. Adopting this definition to Lie superalgebras, we add the parity function $p:X\to\mathbb{Z}_2$ on the index set $X$ and make a quadruple $v=(\mathfrak{h},a,b,p)$ an object of {\sl the root groupoid $\mathcal{R}$} --- the main object of our study. Every quadruple $v$ defines a Cartan matrix by the formula above. The pair $(A,p)$ is called in this paper {\sl Cartan datum}. There are three kinds of generators in the set of arrows in $\mathcal{R}$. Two of them are quite dull: one (a homothety) rescales $a(x)$, another (tautological arrow) is defined by an isomorphism $\theta:\mathfrak{h}\to\mathfrak{h}'$; the third kind are {\sl reflexions} that retain the same vector space $\mathfrak{h}$ but change the collections $a(x)$ and $b(x)$ by usual (even or odd) reflection formulas. Each generator $f:v\to v'$ defines a linear transformation $\mathfrak{h}_v\to\mathfrak{h}_{v'}$ (it is identity for homotheties and reflexions, and $\theta$ for the tautological arrow defined by $\theta$); two compositions of generators leading from $v$ to $v'$ are equivalent if they define the same linear transformation. The root groupoid $\mathcal{R}$ has a lot of components, some of them, most probably, useless. However, some connected components (we call them admissible) lead to interesting Lie superalgebras. It is worth mentioning that Cartan matrices $A_v$ are different even inside one component: one type of reflexions, {\sl isotropic reflexions}, modify Cartan matrices in a certain way (see the formulas in \ref{sss:cartanmatrix-change}).
\subsection{Root algebras}
For each $v\in\mathcal{R}$ one defines a (huge) Lie superalgebra $\widetilde\mathfrak{g}(v)$ (we call it half-baked Lie superalgebra) basically in the same way as did our predecessors V.~Kac and R.~Moody, see~\ref{sss:half} in this paper. For an arrow $\gamma:v\to v'$ in $\mathcal{R}$ the isomorphism $\mathfrak{h}(\gamma):\mathfrak{h}(v)\to \mathfrak{h}(v')$ does not extend to a homomorphism of of the half-baked algebras. We define a root algebra supported on a component $\mathcal{R}_0$ of $\mathcal{R}$ as a collection of quotients $\mathfrak{g}(v)$ of $\widetilde\mathfrak{g}(v)$ such that for any $\gamma:v\to v'$ the isomorphism $\mathfrak{h}(\gamma)$ extends to an isomorphism $\mathfrak{g}(v)\to\mathfrak{g}(v')$.
A component $\mathcal{R}_0$ of $\mathcal{R}$ is called admissible if it admits a root algebra. Admissibility can be expressed in terms of weak symmetricity of the Cartan matrices at $\mathcal{R}_0$, see Theorem~\ref{thm:admissible=wsym}.
For an admissible component $\mathcal{R}_0$ there always exists an initial and a final object in the category of root algebras. The initial root algebra $\mathfrak{g}^\mathtt{U}$ is called {\sl universal}. The final root algebra $\mathfrak{g}^\mathtt{C}$ is called {\sl contragredient}. Note that $\mathfrak{g}^\mathtt{C}$ in the admissible case is defined as the quotient of $\widetilde\mathfrak{g}(v)$ by the maximal ideal having zero intersection with $\mathfrak{h}$. The universal root algebra $\mathfrak{g}^\mathtt{U}$ is obtained by imposing on $\widetilde\mathfrak{g}(v)$ reflected Chevalley relations --- so generalizing the classical Serre relations.
{Note that these were two different approaches of the founding fathers of Kac-Moody Lie algebras: Victor Kac~\cite{Kbook} factored the half-baked algebra by the maximal ideal having zero intersection with the Cartan subalgebra, whereas Robert Moody~\cite{M} imposed on it the Serre relations.}
\subsection{Groups associated to the root groupoid}
An only algebraic invariant of a connected groupoid is the automorphism group of its object. The group $\operatorname{Aut}_\mathcal{R}(v)$ is one of a plethora of groups we assign to an admissible component $\mathcal{R}_0$. It acts (up to a torus) on any root Lie algebra and on the set of its roots. For the component corresponding to a semisimple Lie algebra, $\operatorname{Aut}_\mathcal{R}(v)$ coincides with the Weyl group. In the case of conventional Kac-Moody Lie algebras $\operatorname{Aut}_\mathcal{R}(v)$ is the product of the Weyl group and a certain group of ``irrelevant'' automorphisms. The group of irrelevant automorphism $K(v)$ is very easy to describe. This is a subgroup of automorphisms $\theta\in\mathrm{GL}(\mathfrak{h}(v))$ preserving all $b(x)\in\mathfrak{h}^*$ as well as
all $a(x)$ up to constant. It is a unipotent abelian group in the case of Kac-Moody algebras. The equality $\operatorname{Aut}_\mathcal{R}(v)=W\times K$ does not hold already for $\mathfrak{gl}(1|1)$, see~\ref{sss:aut-gl11}.
\subsubsection{Skeleton} We will now present a combinatorial description of the quotient group $\operatorname{Aut}_\mathcal{R}(v)/K(v)$. Let $\mathtt{Sk}\subset\mathcal{R}$ (skeleton) be the subgroupoid whose arrows are the compositions of reflexions. Denote by $\mathtt{Sk}(v)$ the connected component of $v\in\mathtt{Sk}$. This is a contractible groupoid; it makes sense to study it as a marked graph, whose edges are reflexions marked by the elements of the index set $X$. We denote by $\mathtt{Sk}^D(v)$ the subset of vertices in $\mathtt{Sk}(v)$ having a Cartan datum $D$-equivalent to $A_v$, see~\ref{dfn:Deq}. The set $\mathtt{Sk}^D(v)$ has a group structure and Proposition~\ref{prp:structure-Aut}(3) claims that there is an isomorphism $\operatorname{Aut}_\mathcal{R}(v)/K(v)=\mathtt{Sk}^D(v)$.
\subsubsection{Weyl group}
For a vertex $v$ in an admissible $\mathcal{R}_0$ we define Weyl group $W(v)$ (up to isomorphism, it depends on the component only) as a certain subgroup of $\mathrm{GL}(\mathfrak{h})$ generated by reflections (more precisely, by the reflections with respect to anisotropic roots, see~\ref{ss:weyl}). The Weyl group $W(v)$ is a normal subgroup of $\operatorname{Aut}_\mathcal{R}(v)$.
\subsubsection{Spine}
We define the spine $\mathtt{Sp}$ as the subgroupoid of $\mathtt{Sk}$ whose arrows are generated by isotropic reflexions only. For instance, if there are no isotropic reflexions (for example, if $p(x)=0$ for all $x$) then $\mathtt{Sp}$ has no arrows. The connected component of $v$ in $\mathtt{Sp}$ is denoted $\mathtt{Sp}(v)$ and the intersection $\mathtt{Sp}^D(v)=\mathtt{Sp}(v)\cap\mathtt{Sk}^D(v)$ is a subgroup. Proposition~\ref{prp:structure-Aut} claims that $\operatorname{Aut}_\mathcal{R}(v)/K(v)=\mathtt{Sk}^D(v)$ is a semidirect product $W(v)\rtimes\mathtt{Sp}^D(v)$. In particular, if $\mathtt{Sp}^D$ is trivial, this gives $\operatorname{Aut}(v)=W\times K$.
\subsection{Coxeter properties}
A fundamental property of Kac-Moody Lie algebras is that its Weyl group is a Coxeter group. We generalize this result to the Weyl groups appearing in any admissible component. Similarly to the classical result, the length of an element $w\in W$ can be expressed as the number of positive anisotropic roots that become negative under $w$, see Corollary~\ref{crl:W-len}.
An analog of Coxeter property holds also for the skeleton $\mathtt{Sk}(v)$. The length of the shortest path from $v$ to $v'$ in $\mathtt{Sk}(v)$ can also be expressed as the number of real positive roots that become negative, see~\ref{crl:Sk-len}.
Coxeter property for groups provides its presentation in terms of generators and relations, with relations defined by ``pairwise interaction'' of the generators. It turns out that a similar presentation exists for the skeleton. In Section~\ref{sec:coxeter2} we define the notion of Coxeter graph that generalizes that of Coxeter group, and prove that the skeleton $\mathtt{Sk}(v)$ satisfies this property.
\subsection{Fully reflectable components}
Admissible Cartan matrices are not in general required to allow reflexions $r_x$ for all $x\in X$. We call a component $\mathcal{R}_0$ fully reflectable if all reflexions are allowed at all vertices of $\mathcal{R}_0$. This means that $\mathtt{Sk}(v)$ is a regular graph of degree $|X|$. In Section~\ref{sec:trichotomy} we divide all admissible indecomposable fully reflectable components into three types: finite, affine and indefinite. This trichotomy extends the similar trichotomy for Kac-Moody Lie algebras. There is a full classification of those types that contain an isotropic root; it has been done by C.~Hoyt and V. Serganova, see~\cite{Hoyt}, \cite{S3}. Curiously, there are only two new indefinite series having an isotropic root; they are called $Q^\pm(m,n,k)$.
\subsection{On the (lack of) uniqueness of a root Lie superalgebra} We have already mentioned that, for an admissible component $\mathcal{R}_0$ there is an initial $\mathfrak{g}^\mathtt{U}$ and a final $\mathfrak{g}^\mathtt{C}$ root algebra supported at $\mathcal{R}_0$. The natural map $\mathfrak{g}^\mathtt{U}\to\mathfrak{g}^\mathtt{C}$ is surjective and all root algebras are factors lying in between. In Sections~\ref{sect:sym} and \ref{sect:aff} we study the gap between $\mathfrak{g}^\mathtt{U}$ and $\mathfrak{g}^\mathtt{C}$ in the fully reflectable case. The result of these sections can be summarized as follows.
\begin{Thm} Let $\mathcal{R}_0$ be an admissible indecomposable fully reflectable component. Then
$\mathfrak{g}^\mathtt{U}=\mathfrak{g}^\mathtt{C}$ except for the cases $\mathfrak{g}^\mathtt{C}=\mathfrak{g}\mathfrak{l}(1|1)$, $\mathfrak{g}^\mathtt{U}=\mathfrak{sl}(n|n)^{(i)}$, $(i=1,2,4)$, $\mathfrak{s}\mathfrak q(n)^{(2)}$ and the case when $\mathcal{R}_0$ is indefinite and nonsymmetrizable. \end{Thm} The similar result for symmetrizable Kac-Moody Lie algebras was proven by Gabber-Kac,
\cite{GabberKac}. Their proof was adapted to our symmetrizable case in Section~\ref{sect:sym}. In the case when $\mathfrak{g}^\mathtt{C}=\mathfrak{g}\mathfrak{l}(1|1)$ the algebra $\mathfrak{g}^\mathtt{U}$ has dimension $(4|2)$
and the algebras $\mathfrak{g}^\mathtt{U}$ and $\mathfrak{g}^\mathtt{C}$ are the only two root algebras in this component, see~\ref{rank1}. Note that the explicit realization of $\mathfrak{g}^\mathtt{C}$ for $\mathfrak{g}^\mathtt{U}=\mathfrak{sl}(n|n)^{(i)}$, $(i=1,2,4)$ and $\mathfrak{s}\mathfrak q(n)^{(2)}$ is given in~\cite{S3}.
The results for nonsymmetrizable affine algebras, $S(2,1,b)$ and $\mathfrak{s}\mathfrak q(n)^{(2)}$ are new.
We also prove that if $\mathfrak{g}^{KM}\neq\mathfrak{gl}(1|1)$ then any algebra $\mathfrak{g}$ sandwiched between $\mathfrak{g}^{\mathtt{C}}$ and $\mathfrak{g}^{\mathtt{U}}$ is a root algebra.
\subsection{Examples of calculation of $\operatorname{Aut}(v)$}
In the last Section~\ref{sec:app} we compute the group $\operatorname{Aut}_\mathcal{R}(v)$ for two classes of connected components.
The first one is the case of a ``star-shaped'' spine. It includes the algebras
$\mathfrak{s}\mathfrak q(3)^{(2)}$, $B(1|1)^{(1)}$, $D(2|1,a)$, $D(2|1,a)^{(1)}$, $Q^{\pm}(m,n,k)$. Here one has $\operatorname{Aut}_\mathcal{R}(v)=W\times K$ as in this case $\mathtt{Sp}^D(v)$ is trivial. For the same reason $\operatorname{Aut}_\mathcal{R}(v)=W$ for all finite dimensional Lie superalgebras except for
the case of $\mathfrak{gl}(n|n)$; the latter is considered in~\ref{sss:glmn}. The second class is the class of components whose skeleton identifies with that of
$\mathfrak{sl}_n^{(1)}$. This includes the root data for $\mathfrak{sl}(k|l)^{(1)}$, $\mathfrak{s}\mathfrak q(n)^{(2)}$
and $S(2|1,b)$. In these cases the Weyl group $W(\mathfrak{sl}_n^{(1)})$ acts simply transitively on the skeleta $\mathtt{Sk}(v)$. This allows one to realize the Weyl group and $\mathtt{Sk}^D(v)=\operatorname{Aut}_\mathcal{R}(v)/K(v)$ as subgroups of $W(\mathfrak{sl}_n^{(1)})$.
\subsection{Borcherds-Kac-Moody algebras} {R.~Borcherds in \cite{Bo} introduced a generalization of Kac-Moody algebras, where the Cartan matrix is real symmetric and satisfies additional conditions. The proof of Gabber-Kac is valid for this class (see~\cite{Kbook}, 11.13) and give $\mathfrak{g}^{\mathtt{C}}=\mathfrak{g}^\mathtt{U}$ if the Cartan matrix is symmetrizable and satisfies the conditions
(C1')--(C3') in ~\cite{Kbook}, 11.13). Borcherds-Kac-Moody (BKM) superalgebras were studied by M.~Wakimoto in~\cite{W}. Note that any Kac-Moody algebra is a BKM algebra, but many Kac-Moody superalgebras (including $\mathfrak{gl}(m|n)$ for $m,n>2$) are not BKM superalgebras.}
\subsection{Comment on the groupoids studied in~\cite{HY,AA}} \label{ss:comment}
\VH{In~\cite{HY} the authors assign a groupoid (called Coxeter groupoid) to a collection of vectors in a vector space $\mathfrak{h}^*$ endowed with a nondegenerate symmetric bilinear form.} The objects of "Coxeter groupoid" appearing in the definition in~\cite{HY} correspond to different choices of (accessible) Borel subalgebras \VH{of a Kac-Moody superalgebra given by a symmetrizable Cartan datum}; thus, they correspond to the vertices of what we call a skeleton component. The arrows are generated by reflections with respect to all simple roots. Our result claiming coxeterity of the skeleton (Theorem~\ref{thm:skeleton-coxeter}) means that the groupoid defined in~\cite{HY} is contractible. For instance, it assigns to a semisimple Lie algebra the Cayley graph of its Weyl group (which is contractible when considered as a groupoid). In order to get for a semisimple Lie algebra the classifying groupoid of the Weyl group instead of the contractible Cayley graph, one has to identify the vertices having the same Cartan matrix as it is done in~\cite{AA}.
In the present paper we do something similar to~\cite{AA}. However, from a categorical point of view it is better to replace factoring the set of objects of a groupoid by an equivalence relation with adding isomorphisms connecting the objects that we are willing to declare equivalent. This is precisely what we do. In our root groupoid we have generators for the arrows of three different types: apart from reflexions, we have tautological arrows and homotheties that connect vertices with D-equivalent Cartan matrices. In absence of isotropic reflexions (for instance for Kac-Moody algebras) the Weyl group coincides with the automorphism group of an object of the corresponding component of a root groupoid. However, this does not hold in general, see~\ref{prp:structure-Aut} and Section~\ref{sec:app}.
\section{Setup}
\subsection{Groupoid of root data} \label{ss:RDG} For a complex vector space $\mathfrak{h}$ and a set $X$, a map $a:X\to\mathfrak{h}$ will be called injective if the induced map $\operatorname{Span}_\mathbb{C}(X)\to \mathfrak{h}$ is an injective map of vector spaces.
Once and forever we fix a finite set $X$. The cardinality of $X$ will be called {\sl the rank} of root data and of Lie superalgebras connected to them.
\subsubsection{} We now define {\sl the root groupoid} $\mathcal{R}$.
The objects of $\mathcal{R}$ (the root data) are the quadruples $(\mathfrak{h}, a:X\to \mathfrak{h}, b:X\to \mathfrak{h}^*, p:X\to\mathbb{Z}_2)$ where $\mathfrak{h}$ is a finite dimensional vector space over $\mathbb{C}$
such that $a,b$ are injective.
We will define the arrows of $\mathcal{R}$ by generators and relations.
We have generating arrows of three types: \begin{itemize} \item[1.] a reflexion~\footnote{In this paper we follow the idea of K.~Chukovsky~\cite{Krokodil} to use synonyms for different (although related) objects. In {\sl loc. cit} these are Hyppopotamus and Behemoth that are synonymous in Russian. In this paper we will later introduce {\sl reflections} generating the Weyl group, that will be related to, but different from the reflexions defined now.}
$r_x:(\mathfrak{h},a,b,p) \to (\mathfrak{h},a',b',p')$ defined by a source $(\mathfrak{h},a,b,p)$ and {\sl a reflectable element} $x\in X$, see~\ref{ss:reflexions} for the explicit formulas; \item[2.] a tautological arrow $t_\theta:(\mathfrak{h},a,b,p)\to (\mathfrak{h}',a',b',p)$ determined by $\theta:\mathfrak{h}\stackrel{\sim}{\to} \mathfrak{h}'$. Here $a':=\theta\circ a$, $b'=((\theta^*)^{-1}) \circ b$. \item[3.] a homothety $h_\lambda:(\mathfrak{h},a,b,p)\to (\mathfrak{h},a',b,p)$ determined by $\lambda:X\to\mathbb{C}^*$, with $a'(x)=\lambda(x)a(x)$. \end{itemize}
This collection of objects and arrows (=quiver) generates a free category denoted (temporarily) $\mathcal{F}$. The groupoid $\mathcal{R}$ will be defined as the one with the same objects as $\mathcal{F}$, and whose arrows are equivalence classes of the arrows above. The equivalence relation is defined below.
First of all, we define a functor $\mathfrak{h}:\mathcal{F}\to\mathtt{Vect}$ to the category of vector spaces carrying $(\mathfrak{h},a,b,p)$ to $\mathfrak{h}$, carrying the reflexions and the homotheties to the identities, and tautological arrows to the respective isomorphisms of the underlying vector spaces.
\subsubsection{} \label{sss:eq} The equivalence relation on each Hom-set of $\mathcal{F}$ is defined as follows: two compositions of arrows $(\mathfrak{h},a,b,p)\to(\mathfrak{h}',a',b',p')$ are equivalent if they induce the same isomorphism $\mathfrak{h}\to\mathfrak{h}'$.
\subsection{Formulas for the reflexions} \label{ss:reflexions} Any root datum $(\mathfrak{h},a,b,p)$ determines a Cartan matrix $A(a,b)=(a_{xy})_{x,y\in X}$ given by the formula $$a_{xy}:=\langle a(x),b(y)\rangle.$$
\begin{dfn} An element $x\in X$ is called {\sl reflectable} at $v=(\mathfrak{h},a,b,p)$ if the following conditions hold. \begin{itemize} \item[1.] If $a_{xx}=0$ then $p(x)=1$; \item[2.] If $a_{xx}\ne 0$ and $p(x)=0$ then $\frac{2a_{xy}}{a_{xx}} \in\mathbb{Z}_{\leq 0}$. \item[3.] If $a_{xx}\ne 0$ and $p(x)=1$ then $\frac{a_{xy}}{a_{xx}} \in\mathbb{Z}_{\leq 0}$. \end{itemize} \end{dfn} \subsubsection{} \label{sss:reflexionformulas} Let $x\in X$ be reflectable at $v=(\mathfrak{h},a,b,p)$. The reflexion $r_x:v\to v'=(\mathfrak{h},a',b',p')$ is defined as follows. \begin{itemize} \item[(anisotropic)] If $a_{xx}\not=0$, then $p':=p$ and $$a'(y):=a(y)- 2\frac{a_{yx}}{a_{xx}}a(x),\ \ \ b'(y):=b(y)- 2\frac{a_{xy}}{a_{xx}}b(x).$$ \item[(isotropic)] If $a_{xx}=0$ then $p(x)=1$ and $$(a'(y),b'(y),p'(y)):=\left\{\begin{array}{l} (-a(x),-b(x),p(x)) \ \ \ \ \ \text{ if } x=y,\\ (a(y),b(y),p(y)) \ \ \ \ \ \ \ \ \ \text{ if } x\not=y,\ \ a_{xy}=0,\\ (a(y)+\frac{a_{yx}}{a_{xy}}a(x),
b(y)+b(x), 1+p(y))
\ \ \text{ if } a_{xy}\not=0.\end{array} \right.$$ \end{itemize}
\begin{dfn} The pair $(A=\{a_{xy}\}, p)$ will be called {\sl Cartan datum} for $v$. \end{dfn}
Note that the reflectability of $x\in X$, as well as the formulas for the reflexion $r_x$ depend only on the Cartan datum.
\subsubsection{} \label{sss:cartanmatrix-change} Let us indicate what happens to a Cartan matrix under a reflexion. Anisotropic reflexions preserve the Cartan matrix. If $r_x:v\to v'$ is an isotropic reflexion ($a_{xx}=0$), the Cartan matrix $(a'_{yz})$ is given by the following formulas $$\begin{array}{ll} a'_{xy}=-a_{yx}, \\ a'_{yx}=-a_{xy},\\ a'_{yy}=\left\{\begin{array}{ll} a_{yy}+2a_{yx}&\text{ if } a_{xy}\ne 0\\ a_{yy}&\text{ if } a_{xy}=0.\end{array} \right. \\ a'_{yz}=\left\{\begin{array}{ll} a_{yz} \ \ \ \ \ & \text{ if }a_{xz}=0, x,y\not=z,\\ a_{yz}+a_{yx} &\text{ if } a_{xz}\not=0, a_{xy}=0, x,y\not=z\\ a_{yz}+a_{yx}(1+\frac{a_{xz}}{a_{xy}}) & \text{ if } a_{xz}\not=0, a_{xy}\not=0.\end{array} \right. \end{array}$$
\begin{PRP} The category $\mathcal{R}$ is a groupoid. \end{PRP} \begin{proof}
It is enough to verify that each generating arrow in $\mathcal{F}$ has invertible image in $\mathcal{R}$. First of all, in our category the composition of two tautological arrows is tautological, defined by the composition of the corresponding isomorpisms $\mathfrak{h}\stackrel{\sim}{\to}\mathfrak{h}'\stackrel{\sim}{\to}\mathfrak{h}''$. Similarly, composition of two homotheties is a homothety. This implies that tautological arrows and homotheties are invertible. Invertibility of reflexions follows from the formulas: one has $r_x^2=\mathrm{id}$ for all $x$ (this is an explicit calculation). \end{proof}
Note the following observation. \begin{lem} \label{lem:sym-stable} All reflexions preserve the symmetricity of a Cartan matrix. \end{lem} \begin{proof} Anisotropic reflexion does not change the Cartan matrix. Isotropic reflexions do change, but the resulting Cartan matrix remains symmetric if the original matrix was symmetric. This results from a direct calculation. \end{proof}
\begin{dfn} A connected component $\mathcal{R}_0$ of $\mathcal{R}$ is called symmetrizable if there exists $v\in\mathcal{R}_0$ having a symmetric Cartan matrix. \end{dfn}
Note that $\mathcal{R}_0$ is symmetrizable if all Cartan matrices of $v'\in\mathcal{R}_0$ are symmetrizable in the sense of Kac~\cite{Kbook}.
\subsection{Properties} \subsubsection{} \label{sss:comcom} One has obviously $t_\theta\circ t_{\theta'}=t_{\theta\circ\theta'}$ and $h_\lambda\circ h_{\lambda'}=h_{\lambda\lambda'}$. The morphisms $r_x$, $t_\theta$ and $h_\lambda$ commute with each other.
\
The root groupoid $\mathcal{R}$ consists of connected components some of which will lead to interesting Lie superalgebras.
We present below properties of a component $\mathcal{R}_0$ of $\mathcal{R}$ that will be relevant to Lie theory.
This is weak symmetricity.
\begin{dfn} \label{dfn:quasisym} $ $ \begin{itemize} \item[1.] A root datum is {\sl locally weakly symmetric} if
$a_{xy}=0$ implies $a_{yx}=0$ for any reflectable $x$.
\item[2.] A root datum is weakly symmetric if all root data in its connected component are locally weakly symmetric. \end{itemize} \end{dfn}
\begin{rem} Let $v\in\mathcal{R}$. The group of automorphisms $\operatorname{Aut}_\mathcal{R}(v)$ acts on $\mathfrak{h}(v)$. This action is faithful by definition of the equivalence relation on the Hom sets of $\mathcal{F}$, see \ref{sss:eq}. \end{rem}
\begin{rem} The root groupoid $\mathcal{R}$ is an object of ``mixed'' nature. It is a groupoid, but its objects and Hom sets carry an extra information (markings $a,b,p$, generators $r_x,t_\theta,h_\lambda$ for arrows). This is why we cannot easily replace $\mathcal{R}$ with any equivalent groupoid (for instance, leaving only one object for each connected component).
Nevertheless, we can safely assume that $\mathfrak{h}$ is the same vector space at all objects of a given connected component $\mathcal{R}_0$, allowing however the tautological arrows $t_\theta$ for automorphisms $\theta:\mathfrak{h}\to\mathfrak{h}$. \end{rem}
\begin{rem} Tautological arrows and anisotropic reflexions (those with $a_{xx}\not=0$) preserve the Cartan datum. Homotheties also preserve local weak symmetricity. Isotropic reflexions usually do not satisfy this property. For this reason admissible root data with isotropic reflexions can be classified under the assumption that all elements $x\in X$ are reflectable at every vertex, \cite{Hoyt}. \end{rem}
\begin{dfn} \label{dfn:Deq} Two Cartan data, $(A,p)$ and $(A',p')$, will be called $D$-equivalent if $p=p'$ and there exists an invertible diagonal matrix $D$ such that $A'=DA$. \end{dfn} Obviously, homotheties carry a Cartan datum to a $D$-equivalent one.
\begin{rem} In studying a connected component $\mathcal{R}_0$ of $\mathcal{R}$ it is often important to describe Cartan data $(A(v),p)$ at all vertices $v\in\mathcal{R}_0$, up to $D$-equivalence. Since only isotropic reflections change the Cartan data, it is sufficient to perform only sequences of isotropic reflections, see~\ref{sss:spine}. \end{rem}
\subsection{Examples: reflectability}
\subsubsection{} \label{ex:nonreflectable} We present an example of a reflexion $r_x:v\to v'$ such that all $y\in X$ are reflectable at $v$ but some are not reflectable at $v'$.
Take the root datum $v$ with $X=\{x,y\}$, the Cartan matrix $\begin{pmatrix} 0 &-s\\-s &1\end{pmatrix}$, $s\in\mathbb{N}$, $p(x)=p(y)=1$. Then $x$ and $y$ are reflectable at $v$. For the reflexion $r_x:v\to v'$ the reflected Cartan matrix is $\begin{pmatrix} 0& s\\s &1-2s\end{pmatrix}$ and $p'(x)=1$, $p'(y)=0$. Thus $y$ is reflectable at $v'$ only if $\frac{2s}{2s-1}\in\mathbb{N}$ that is for $s=0,1$.
\subsection{Examples: calculation of $\operatorname{Aut}_\mathcal{R}(v)$}
\subsubsection{Semisimple case} \label{sss:ss}
Let $v=(\mathfrak{h},a,b,p)$ represent a root system of a finite dimensional semisimple Lie algebra. This means that $p(x)=0$, $a:X\to\mathfrak{h}$ is a set of simple coroots and $b:X\to\mathfrak{h}^*$ is the set of simple roots. Both $a$ and $b$ give bases in $\mathfrak{h}$ and $\mathfrak{h}^*$. Let us calculate the group of automorphisms of $(\mathfrak{h},a,b,0)$. Any reflexion $r_x:(\mathfrak{h},a,b,0)\to(\mathfrak{h},a',b',0)$ gives rise to an automorphism $s_x:v\to v$, $s_x=t_{s_{b(x)}}\circ r_x$ where the automorphism $s_{b(x)}:\mathfrak{h}\to\mathfrak{h}$ of $\mathfrak{h}$ is the standard reflection on $\mathfrak{h}$ with respect to $b(x)\in\mathfrak{h}^*$. Note that $s_x:v\to v$ induces precisely the automorphism $s_{b(x)}:\mathfrak{h}\to\mathfrak{h}$, so that the assignment $s_{b(x)}\mapsto s_x$ is compatible with the action of the Weyl group $W$ and of $\operatorname{Aut}_\mathcal{R}(v)$ on $\mathfrak{h}$. Since the actions are faithful, this defines an injective group homomorphism $$ i:W\to\operatorname{Aut}_\mathcal{R}(v). $$ We claim that it is bijective. In fact, any automorphism $\eta:v\to v$ in $\mathcal{R}$ is a composition of reflexions $r_x$, tautological arrows and homotheties. Since reflexions, tautological arrows and homotheties commute, one can, using~\ref{sss:comcom}, present \begin{equation} \label{eq:ss-deco} \eta=h_\lambda\circ t_\theta\circ i(w), \end{equation} for a certain $w\in W$. It remains to verify that if $h_\lambda\circ t_\theta\in\operatorname{Aut}_\mathcal{R}(v)$, then it is identity. Since $t_\theta$ does not change the Cartan matrix, $h_\lambda=\mathrm{id}$. Since any automorphism of $v$ carries $a(x)$ and $b(x)$ to themselves, and $a(x)$ generate $\mathfrak{h}$, $\theta=\mathrm{id}$.
\subsubsection{The case of Kac-Moody algebras} \label{sss:KMexample} In the case when $(\mathfrak{h},a,b,0)$ has the Cartan matrix satisfying the conditions of~\cite{Kbook}, 1.1, the calculation of ~\ref{sss:ss} works almost as well.
Let $W$ denote the Weyl group and let $\widetilde W=\operatorname{Aut}_\mathcal{R}(v)$. We have a group homomorphism $i:W\to\widetilde W$ defined exactly as in the semisimple case. Precisely as in the semisimple case we have a decomposition~(\ref{eq:ss-deco}) of an automorphism $\eta\in\widetilde W$ and deduce that $h_\lambda=\mathrm{id}$ as the Cartan matrix has no zero rows. Denote $$
K=\{\theta:\mathfrak{h}\to\mathfrak{h}|\theta(a(x))=a(x),\theta^*(b(x))=b(x),\ x\in X\}. $$ Any $\theta\in K$ commutes with $s_{b(x)}:\mathfrak{h}\to\mathfrak{h}$. This implies that $\widetilde W=W\times K$.
Let us show $K$ is a commutative unipotent group.
Denote $A\subset\mathfrak{h}$ and $B\subset\mathfrak{h}^*$ the subspaces spanned by the images of $a$ and $b$. One has $\dim A=\dim B=|X|$ and $\dim\mathfrak{h}=2|X|-r$ where $r$ is the rank of the Cartan matrix. This is equivalent to saying that the orthogonal complement $B^\perp\subset\mathfrak{h}$ of $B$ lies in $A$. If $\theta$ is an automorphism of the triple $(\mathfrak{h},a,b)$, $\theta-1$ vanishes on $A$ and has image in $B^\perp$.
This means that $(\theta-1)^2=0$. Moreover, any two such automorphisms commute. The dimension of $K$ is $(|X|-r)^2$.
\subsubsection{Root datum for $\mathfrak{g}\mathfrak{l}(1|1)$} \label{sss:aut-gl11}
We assume $\dim(\mathfrak{h})=2$, $X=\{x\}$, $a=a(x)\in\mathfrak{h}$, $b=b(x)\in\mathfrak{h}^*$ so that $a\ne 0, b\ne 0$ but $\langle b,a\rangle=0$. The only isotropic reflexion carries the quadruple $v=(\mathfrak{h},a,b,p=1)$ to $v'=(\mathfrak{h},-a,-b,1)$. The tautological arrow $t_{-1}:v'\to v$ is defined by $-1':\mathfrak{h}\to\mathfrak{h}$. The composition $t_{-1}\circ r_x$ is an automorphism of $v$ of order $2$. It is easy to see that $\operatorname{Aut}(v)=\mathbb{Z}_2\times K$ where $\mathbb{Z}_2$ is generated by the automorphism described above and
$K=\{\theta:\mathfrak{h}\to\mathfrak{h}|\ \theta(a)\in\mathbb{C}^*a,\theta^*(b)=b\}$.
For more examples see~\ref{sss:gl12} and Section~\ref{sec:app}.
\section{Root Lie superalgebras} \label{sec:root}
In this section we define root Lie superalgebras corresponding to certain (admissible) connected components of the groupoid $\mathcal{R}$ of root data.
\subsection{Half-baked Lie superalgebra} \subsubsection{} \label{sss:half} Let $v=(\mathfrak{h},a,b,p)\in\mathcal{R}$. We assign to $v$ a Lie superalgebra $\widetilde\mathfrak{g}(v)$ generated by $\mathfrak{h}=\mathfrak{h}(v)$, $\tilde e_x,\tilde f_x,\ x\in X$, with the parity given by $p(\mathfrak{h})=0,\ p(\tilde e_x)=p(\tilde f_x)=p(x)$, subject to the relations \begin{itemize} \item[1.] $[\mathfrak{h},\mathfrak{h}]=0$, \item[2.] $[h,\tilde{e}_x]=\langle b(x), h\rangle \tilde{e}_x, [h,\tilde{f}_x]=-\langle b(x), h\rangle \tilde{f}_x$ \item[3.] $[\tilde e_x,\tilde f_y]=0$ for $y\ne x$ \item[4.] $[\tilde e_x,\tilde f_x]=a(x)$ \end{itemize} for each $x\in X$.
We call $\widetilde\mathfrak{g}(v)$ {\sl the half-baked Lie superalgebra} defined by the root datum $v\in\mathcal{R}$.
\subsubsection{} \label{sss:properties} The following properties of $\widetilde\mathfrak{g}:=\widetilde\mathfrak{g}(v)$ are proven in Thm. 1.2 of~\cite{Kbook} ~\footnote{It is assumed in ~\cite{Kbook} that $\widetilde\mathfrak{g}$ is a Lie algebra. The proof, however, works verbatim for superalgebras.}.
\begin{itemize} \item[1.] The algebra $\mathfrak{h}$ acts diagonally on $\widetilde\mathfrak{g}$. We denote by $\widetilde\mathfrak{g}_\mu$ the weight space of weight $\mu$, so that $\widetilde\mathfrak{g}=\oplus_{\mu\in\operatorname{Span}_\mathbb{Z}(b)}\widetilde\mathfrak{g}_\mu$, where $\operatorname{Span}_\mathbb{Z}(b)$ denotes the abelian subgroup of $\mathfrak{h}^*$ generated by $b(x), x\in X$. \item[2.]There is a standard triangular decomposition $$ \widetilde\mathfrak{g}=\widetilde\mathfrak{n}^+\oplus\mathfrak{h}\oplus\widetilde\mathfrak{n}^-, $$ where $\widetilde\mathfrak{n}^+$ is freely generated by $\tilde e_x$, $x\in X$ and $\widetilde\mathfrak{n}^-$ is freely generated by $\tilde f_x$. \item[3.] For each $x\not=y$ one has $\widetilde\mathfrak{g}_{jb(x)+b(y)}=0$ for $j\not\in\mathbb{Z}_{\geq 0}$ and $\widetilde\mathfrak{g}_{jb(x)+b(y)}$ is spanned by $(\mathrm{ad} \tilde{e}_x)^j\tilde{e}_y$. \end{itemize}
The following theorem is very similar to \cite{Kbook}, Thm. 2.2.
\begin{prp} \label{prp:likekac22} Let $v\in\mathcal{R}$ have a symmetric Cartan matrix $(a_{xy})$.
Let $(\cdot|\cdot)$ be a nondegenerate symmetric form on $\mathfrak{h}$ satisfying the condition \begin{itemize}
\item[] $(a(x)|h)=\langle b(x),h\rangle$ for any $x\in X$, $h\in\mathfrak{h}$ \footnote{Such form exists as the Cartan matrix is symmetric.}. \end{itemize}
Then there exists a unique extension of $(\cdot|\cdot)$ to an invariant symmetric bilinear form on $\widetilde\mathfrak{g}=\widetilde\mathfrak{g}(v)$. This extension enjoys the following properties. \begin{itemize}
\item[1.] $(\tilde e_x|\tilde f_y)=\delta_{xy}$.
\item[2.] $(\widetilde\mathfrak{g}_\alpha|\widetilde\mathfrak{g}_\beta)=0$ unless $\alpha+\beta=0$.
\item[3.] $[z,t]=(z|t)\nu(\alpha)$ for $z\in\widetilde\mathfrak{g}_\alpha$, $t\in\widetilde\mathfrak{g}_{-\alpha}$, where $\nu:\mathfrak{h}^*\to\mathfrak{h}$ is the isomorphism defined by the original nondegenerate form. \end{itemize} \end{prp} \qed
\subsubsection{} \label{sss:automorphism}
The algebra $\widetilde\mathfrak{g}(v)$ admits a standard {\sl superinvolution} $\theta$, that is an automorphism whose square is $\mathrm{id}$ on the even part and $-\mathrm{id}$ on the odd part of $\widetilde\mathfrak{g}(v)$. We will define the superinvolution $\theta$ by the following formulas. \begin{itemize}
\item $\theta|_\mathfrak{h}=-\mathrm{id}$. \item $\theta(\tilde e_x)=\tilde f_x$. \item $\theta(\tilde f_x)=(-1)^{p(x)}\tilde e_x$. \end{itemize}
\subsubsection{Example: rank one} \label{sss:rank1-wt}
Let $X=\{x\}$. The Cartan matrix is a $1\times 1$ matrix $(a_{xx})$.
If $a_{xx}\not=0$ and $p(x)=0$, we have $\widetilde\mathfrak{g}=\mathfrak{sl}_2$;
if $p(x)=1$, we have $\widetilde\mathfrak{g}=\mathfrak{osp}(1|2)$.
If $a_{xx}=0$ and $p(x)=0$, $\widetilde\mathfrak{g}$ is the $(4|0)$-dimensional algebra $a(x),d, e_x,f_x$, with $\mathfrak{h}=\operatorname{Span}(a(x),d)$~\footnote{this is the smallest possible $\mathfrak{h}$. The general case can be treated using~\ref{sss:decomposable}.},
$a(x)=[e_x,f_x]$ central and $[d,e_x]=e_x$, $[d,f_x]=-f_x$.
In the remaining case
$p(x)=1$ and $a_{xx}=0$. The algebra $\widetilde\mathfrak{g}$ has dimension $(4|2)$ with a basis $$a(x),d, e_x,f_x,e_x^2,f_x^2,$$ ($e_x$ and $f_x$ odd) with $\mathfrak{h}=\operatorname{Span}(a(x),d)$, $a(x)=[e_x,f_x]$ central and $[d,e_x]=e_x$, $[d,f_x]=-f_x$.
\subsubsection{} \label{sss:properties-2}
The space $[\widetilde\mathfrak{g}_{jb(x)+b(y)},\widetilde\mathfrak{g}_{-jb(x)-b(y)}]$ lies in $\mathfrak{h}$ for any $j\geq 0$ and is at most one-dimensional. We wish to describe, under certain assumptions, the greatest value of $j$ for which it is nonzero.
Assume that $x\ne y\in X$, $x$ is reflectable at $v$.
Let $r_x:v\to v'=(\mathfrak{h},a',b',p')$ be the corresponding reflexion in $\mathcal{R}$. Choose $j_0$ such that $b(y)+j_0b(x)=b'(y)$, that is $j_0=-2\frac{a_{xy}}{a_{xx}}$ for $a_{xx}\not=0$, $j_0=1$ for $a_{xx}=0$, $a_{xy}\ne 0$, and $j_0=0$ for $a_{xx}=0=a_{xy}$.
\begin{lem} \label{lem:rk2-ideal} Assume that $X=\{x,y\}$ and $x$ is reflectable at $v=(\mathfrak{h},a,b,p)$. Let $j_0$ be defined as above. Define the ideal $I$ of $\widetilde\mathfrak{g}=\widetilde\mathfrak{g}(v)$ generated by the elements \begin{equation} \label{eq:rk2-ideal} E:=(\mathrm{ad}\tilde e_x)^{j_0+1}\tilde e_y,\ F:=(\mathrm{ad}\tilde f_x)^{j_0+1}\tilde f_y. \end{equation} Then \begin{itemize} \item[(a)] If $a_{xx}=0$ then the ideal $I'$ generated by $\tilde e_x^2$, $\tilde f_x^2$ satisfies $I'\cap\mathfrak{h}=0$. \item[(b)] If $a_{xx}=0$, $a_{xy}\ne 0$ then $I\subset I'$ and $I=I'$ iff $a_{yx}\ne 0$. \item[(c)] $I\cap\mathfrak{h}\ne 0$ if and only if $a_{xx}\ne 0, a_{yx}\ne 0$ and $a_{xy}=0$. \end{itemize} \end{lem} \begin{proof} (a) Let $a_{xx}=0$. Then $p(x)=1$ and \begin{equation}\label{eff} [\tilde e_x,\tilde f_x^{2}]=0.\end{equation} Since $[\tilde e_y,\tilde f_x^{2}]=0$ we obtain $[\widetilde\mathfrak{n}^+, f_x^2]=0$; similarly, $[\widetilde\mathfrak{n}^-, e_x^2]=0$. This gives $I'\cap\mathfrak{h}=0$ and establishes (a).
(b) Take $a_{xx}=0$, $a_{xy}\not=0$. Then $j_0=1$ so $$ F=(\mathrm{ad}\tilde f_x)^{2}\tilde f_y=(\mathrm{ad}\tilde f_x^{2})\tilde f_y,\ \ \ E=(\mathrm{ad}\tilde e_x^{2})\tilde e_y $$ In particular, $I\subset I'$ and $$[\tilde e_y, F]=\pm [\tilde f_x^{2}, a(y)]=\pm 2 a_{yx}\tilde f_x^2.$$ This gives $I=I'$ if $a_{yx}\not=0$. Consider the case $a_{yx}=0$. By above, $[\tilde e_y, F]=0$. By~(\ref{eff}) we have $[\tilde e_x, F]=0$. Hence $[\widetilde\mathfrak{n}^+, F]=0$ and so $F\not\in I'$. This completes the proof of (b).
(c) By (a), (b) it follows that $I\cap \mathfrak{h}=0$ if $a_{xx}=0$, $a_{xy}\not=0$. Therefore we may assume that $a_{xy}=a_{yx}=0$ or $a_{xx}\not=0$. It is enough to verify that $[\tilde e_z,F]=[\tilde f_z,E]=0$ for $z=x,y$. These formulas are similar so we will check only the formula $[\tilde e_z,F]=0$.
If $a_{xy}=a_{yx}=0$, then $j_0=0$ and $$[\tilde e_x, F]=[\tilde e_x, [\tilde f_x,\tilde f_y]]=[[\tilde e_x, \tilde f_x]\tilde f_y]=[a(x),f_y]=-a_{xy}f_y=0$$ as well as $[\tilde e_y, F]=\pm a_{yx}f_x=0$ as required.
Consider the case when $a_{xx}, a_{xy}, a_{yx}\not=0$. Then $j_0=-2\frac{a_{xy}}{a_{xx}}$. Recall that $\tilde f_x,\tilde e_x$
generate $\mathfrak{sl}_2$ if $p(x)=0$ and $\mathfrak{osp}(1|2)$ if $p(x)=1$. Since $[\tilde e_x,\tilde f_y]=0$, a direct computation implies $$(\mathrm{ad} \tilde e_x)(\mathrm{ad}\tilde f_x)^{j_0+1}\tilde f_y=0.$$ On the other hand, $[\tilde e_y,\tilde f_x]=0$ implies $$[\tilde e_y, F]=\pm (\mathrm{ad}\tilde f_x)^{j_0+1} a(y)=\pm a_{yx}(\mathrm{ad}\tilde f_x)^{j_0} \tilde f_x=0$$ since $[\tilde f_x,\tilde f_x]=0$ for $p(x)=0$ and $[\tilde f_x,[\tilde f_x,\tilde f_x]]=0$ if $p(x)=1$ (in the case $a_{xx}\not=0$, $p(x)=1$ the condition that $x$ is reflectable at $v$ implies that $j_0$ is even, in particular, $j_0\geq 2$). Hence $[\tilde e_y, F]=[\tilde e_x, F]=0$ as required.
Finally, if $a_{xx}\ne 0$, $a_{xy}=0$, $a_{yx}\ne 0$, then $b'(y)=b(y)$ and $a'(y)=a(y)-2\frac{a_{yx}}{a_{xx}}a(x)$. Furthermore, $E=[\tilde{e}_x,\tilde{e}_y]$, so that $$ [\tilde f_x,[\tilde{f}_y,E]]=\pm[\tilde f_x,[\tilde{e}_x,a(y)]]=\pm a_{yx}a(x)\ne 0. $$
\end{proof}
\begin{prp} \label{prp:bracket} Assume that $x\not=y\in X$ and $x$ is reflectable. We also assume that if $a_{xx}\ne 0$ and $a_{xy}=0$ then $a_{yx}=0$. \begin{itemize} \item[1.]The bracket $[\widetilde\mathfrak{g}_{jb(x)+b(y)},\widetilde\mathfrak{g}_{-jb(x)-b(y)}]$ is zero for $j>j_0$. \item[2.]$[\widetilde\mathfrak{g}_{b'(y)},\widetilde\mathfrak{g}_{-b'(y)}]$ is spanned by $a'(y)$. \end{itemize} \end{prp} \begin{proof} The claim immediately reduces to the case $X=\{x,y\}$. Denote by $I$ the ideal of $\widetilde\mathfrak{g}$ generated by the elements $$ E:=(\mathrm{ad}\tilde e_x)^{j_0+1}\tilde e_y,\ F:=(\mathrm{ad}\tilde f_x)^{j_0+1}\tilde f_y. $$ By Lemma~\ref{lem:rk2-ideal} we have $I\cap\mathfrak{h}=0$. The homomorphism $\widetilde\mathfrak{g}\to\mathfrak{g}=\widetilde\mathfrak{g}/I$ is identity on $\mathfrak{h}$, so both claims of the proposition would follow from the similar claims for $\mathfrak{g}$. Since the first claim of the proposition tautologically holds for $\mathfrak{g}$, we have proven it also for $\widetilde\mathfrak{g}$.
To prove the second claim for $\mathfrak{g}$, we will study the isotropic and the anisotropic cases separately.
{\sl The case $a_{xx}\ne 0$.}
The rank one subalgebra defined by $\{x\}\in X$ contains a copy of $\mathfrak{sl}_2$.
$\mathfrak{g}$ is integrable as an $\mathfrak{sl}_2$-module as it is generated by the elements on which $\tilde e_x,\tilde f_x$ act locally nilpotently,
see~\cite{Kbook}, Lemma 3.4. Therefore, the automorphism $\sigma:\mathfrak{g}\to\mathfrak{g}$ given by the formula \begin{equation} \label{eq:sigma:gtog} \sigma=\exp(\tilde f_x)\circ\exp(-\tilde e_x)\circ\exp(\tilde f_x), \end{equation} is defined. Its restriction on $\mathfrak{h}$ is given by the standard formula $ \sigma(h)=h-\frac{2}{a_{xx}}\langle h,b(x)\rangle a(x), $ so $\sigma(\mathfrak{g}^\mathtt{U}_\mu)=\mathfrak{g}^\mathtt{U}_{\sigma(\mu)}$, where the action of $\sigma$ on $\mathfrak{h}^*$ is induced by its action on $\mathfrak{h}$. The latter implies the second claim of the proposition for the algebra $\mathfrak{g}$.
{\sl The case $a_{xx}=0$. } If $a_{xy}=0$, the second claim is immediate. In the case $a_{xy}\ne 0$ a direct calculation shows that $$ [[\tilde e_x,\tilde e_y],[\tilde f_x,\tilde f_y]]= (-1)^{p(y)}a_{xy}(a(y)+\frac{a_{yx}}{a_{xy}}a(x)). $$
\end{proof}
\subsection{Coordinate systems and root algebras} \begin{dfn} Let $v\in\mathcal{R}$. A $v$-coordinate system on a Lie superalgebra $\mathfrak{g}$ is a surjective homomorphism $\widetilde\mathfrak{g}(v)\to\mathfrak{g}$ whose kernel has zero intersection with $\mathfrak{h}(v)$. \end{dfn}
In other words, a $v$-coordinate system on $\mathfrak{g}$ consists of an injective map of Lie superalgebras $\mathfrak{h}\to\mathfrak{g}$ ($\mathfrak{h}$ is even commutative), and a collection of generators $e_x,f_x$ such that the relations 1--4 of \ref{sss:half} hold.
Here is our main definition.
\begin{dfn} Let $\mathcal{R}_0\subset\mathcal{R}$ be a connected component. A root Lie superalgebra $\mathfrak{g}$ supported on $\mathcal{R}_0$ is a collection of Lie superalgebras $\mathfrak{g}(v),\ v\in\mathcal{R}_0$, endowed with $v$-coordinate systems so that for any $\alpha:v\to v'$ in $\mathcal{R}_0$ there exists an isomorphism $a:\mathfrak{g}(v)\to\mathfrak{g}(v')$ extending the isomorphism $\mathfrak{h}(\alpha):\mathfrak{h}(v)\to\mathfrak{h}(v')$. \end{dfn}
Let $\mathfrak{g}$ be a root Lie superalgebra at $\mathcal{R}_0$. There is a weight space decomposition $$ \mathfrak{g}(v)=\mathfrak{h}(v)\oplus\bigoplus_{\mu\in\Delta(v)}\mathfrak{g}(v)_\mu $$ with $\Delta(v)\subset\operatorname{Span}_\mathbb{Z}(b)$. The elements of $\Delta(v)$ are called {\sl the roots} of $\mathfrak{g}$ (at $v$). The elements $b(x),\ x\in X$, are {\sl the simple roots} at $v$. Any $\alpha:v\to v'$ carries the root decomposition at $v$ to that at $v'$.
\begin{dfn} A component $\mathcal{R}_0$ of $\mathcal{R}$ is called {\sl admissible} if it admits a root Lie superalgebra. \end{dfn}
\subsubsection{} Let $v\in\mathcal{R}$. The half-baked algebra $\widetilde\mathfrak{g}(v)$ has a triangular decomposition. This implies the existence of the maximal ideal $\mathfrak{r}(v)$ having zero intersection with $\mathfrak{h}(v)$. If $\mathcal{R}_0$ is admissible, then the collection of $\mathfrak{g}^\mathtt{C}(v)=\widetilde\mathfrak{g}(v)/\mathfrak{r}(v)$ is a root Lie superalgebra supported at $\mathcal{R}_0$. In fact, given a root algebra $\mathfrak{g}$ with $\mathfrak{g}(v)=\widetilde\mathfrak{g}(v)/I(v)$, the quotient ideal
$\bar\mathfrak{r}(v)=\mathfrak{r}(v)/I(v)$ is the maximal ideal in $\mathfrak{g}(v)$ having zero intersection with $\mathfrak{h}(v)$. Obviously, any isomorphism $a:\mathfrak{g}(v)\to\mathfrak{g}(v')$ over $\alpha:v\to v'$ in $\mathcal{R}$ carries $\bar\mathfrak{r}(v)$ to $\bar\mathfrak{r}(v')$, and therefore induces an isomorphism $\mathfrak{g}^\mathtt{C}(v)\to\mathfrak{g}^\mathtt{C}(v')$.
We call the collection $\mathfrak{g}^\mathtt{C}=\{\mathfrak{g}^\mathtt{C}(v)\}_{v\in\mathcal{R}_0}$ the {\sl contragredient} Lie superalgebra supported at an admissible component $\mathcal{R}_0$. In other words, the contragredient Lie superalgebra $\mathfrak{g}^\mathtt{C}$ is the terminal object in the category of root Lie superalgebras supported at an admissible component $\mathcal{R}_0$.
The superinvolution $\theta$ of $\widetilde\mathfrak{g}$ defined in \ref{sss:automorphism} induces an automorphism of $\mathfrak{g}^\mathtt{C}$.
\subsubsection{Rank one} \label{rank1} The Lie algebra $\mathfrak{sl}_2$ plays a prominent role in Lie theory. A similar role in our setup will be played by root algebras of rank 1. Let us describe them all.
Let $X=\{x\}$. In this case $\widetilde{\mathfrak{g}}(v)$ is described in \ref{sss:rank1-wt}. It is a root algebra.
If $a_{xx}\not=0$ or $p(x)=0$, then $\mathfrak{g}^\mathtt{C}=\widetilde\mathfrak{g}$.
If $a_{xx}=0$ and $p(x)=1$, the maximal ideal $\mathfrak{r}$ of $\widetilde\mathfrak{g}$ having zero intersection with $\mathfrak{h}$ is spanned by
$e_x^2,f_x^2$ and $\mathfrak{g}^\mathtt{C}=\widetilde\mathfrak{g}/\mathfrak{r}\cong \mathfrak{gl}(1|1)$. The algebras $\widetilde\mathfrak{g}$ and $\mathfrak{g}^\mathtt{C}$ are exactly two root algebras in this case as only these two allow an automorphism lifting $\gamma=t_{-1}\circ r_x$, see~\ref{sss:aut-gl11}.
\subsubsection{Decomposable root datum} \label{sss:decomposable} Let $X=X_1\sqcup X_2$ and let $v_i=(\mathfrak{h}_i, a_i:X_i\to\mathfrak{h}_i,b_i:X_i\to\mathfrak{h}_i^*,p_i:X_i\to\mathbb{Z}_2$,
$i=1,2$, be two root data of ranks $|X_1|$ and $|X_2|$ respectively.
We define their sum $v=v_1+v_2$ in an obvious way, as the root datum with $\mathfrak{h}=\mathfrak{h}_1\oplus\mathfrak{h}_2$ and $a:X\to\mathfrak{h}$, $b:X\to\mathfrak{h}^*$ and $p:X\to\mathbb{Z}_2$ defined by the conditions $$
a_{|X_i}=s_i(a_i), b_{|X_i}=s_i^*(b_i),\ p|_{X_i}=p_i, $$ where $s_i:\mathfrak{h}_i\to\mathfrak{h}$ and $s_i^*:\mathfrak{h}_i^*\to\mathfrak{h}^*$ are the obvious embeddings.
We will denote by $\mathcal{R}(X),\ \mathcal{R}(X_1)$ and $\mathcal{R}(X_2)$ the groupoids of root data for the sets $X,X_1$ and $X_2$. The component $\mathcal{R}_0$ of $\mathcal{R}(X)$ containing $v=v_1+v_2$ is obviously a direct product $\mathcal{R}'_0\times\mathcal{R}''_0$ of the corresponding components of $\mathcal{R}(X_1)$ and $\mathcal{R}(X_1)$. If $\mathfrak{g}_1$ and $\mathfrak{g}_2$ are root algebras supported on the components $\mathcal{R}'_0$ and $\mathcal{R}''_0$ respectively, the product $\mathfrak{g}=\mathfrak{g}_1\times\mathfrak{g}_2$ is a root algebra of $\mathcal{R}_0$. In particular, $\mathfrak{g}^\mathtt{C}_1\times\mathfrak{g}^\mathtt{C}_2$ is the contragredient root algebra for $\mathcal{R}_0$. Theorem~\ref{thm:admissible=wsym} implies that if $\mathcal{R}_0$ is admissible, then both $\mathcal{R}'_0$ and $\mathcal{R}''_0$ are admissible. It is not true in general that any root algebra supported on $\mathcal{R}_0$ is a product.
Here is the best we can say.
\begin{prp} \label{prp:deco} Let $X=X_1\sqcup X_2$, $v=v_1+v_2$ be defined as above, with $v\in\mathcal{R}_0$, $v_1\in\mathcal{R}_0'$ and $v_2\in\mathcal{R}_0''$. Assume that all $x\in X_1$ are reflectable at all $v'\in\mathcal{R}'_0$. Then any root algebra supported on $\mathcal{R}_0$ uniquely decomposes as a product of a root algebra supported on $\mathcal{R}_0'$ and a root algebra supported on $\mathcal{R}_0''$. \end{prp} \begin{proof} The algebra $\mathfrak{g}=\mathfrak{g}(v)$ is generated by $\mathfrak{h}$, $e_x,f_x,e_y,f_y$ where $x\in X_1$ and $y\in X_2$. We have to verify that $[e_x,e_y]=0=[f_x,f_y]$ for $x\in X_1$ and $y\in X_2$. The reflexion $r_x:v\to v'$ with respect to $x\in X_1$ carries, up to scalars, $e_x$ to $f'_x$ and $f_x$ to $e'_x$, retaining $e_y$ and $f_y$. Since $[e'_x,f'_y]=0=[e'_y,f'_x]$, we deduce $[e_x,e_y]=0=[f_x,f_y]$. \end{proof}
We can apply the sum of root data operation to an empty root datum $\emptyset_V$ corresponding to $X=\emptyset$ and uniquely defined by a vector space $V$. For $v=(\mathfrak{h},a,b,p)$ the sum $\emptyset_V+v$ has form $(\mathfrak{h}\oplus V,a,b,p)$ and any root algebra based on it is the direct product of a root algebra based on $v$ with the commutative algebra $V$.
The following result is a corollary of ~\ref{rank1}.
\begin{crl}\label{corgalpha} Let $\mathcal{R}_0$ be an admissible component of $\mathcal{R}$ and $\mathfrak{g}:=\mathfrak{g}(v)$ be a root algebra. Fix $x\in X$ and set $\alpha:=b(x)$. We denote by $\mathfrak{g}\langle\alpha\rangle$ the subalgebra of $\mathfrak{g}$ generated by $\mathfrak{g}_{\alpha}$ and $\mathfrak{g}_{-\alpha}$. \begin{enumerate} \item If $a_{xx}\not=0$ and $p(x)=0$, one has $\mathfrak{g}\langle\alpha\rangle=\mathfrak{sl}_2$ and $\mathfrak{g}_{i\alpha}=0$ for $i\not\in\{0,\pm 1\}$. \item If $a_{xx}\not=0$ and $p(x)=1$,
one has $\mathfrak{g}\langle\alpha\rangle=\mathfrak{osp}(1|2)$ and $\mathfrak{g}_{i\alpha}=0$ for $i\not\in\{0,\pm 1,\pm 2\}$. \item If $a_{xx}=0$ and $p(x)=0$ then $\mathfrak{g}\langle\alpha\rangle$ is the Heisenberg algebra and $\mathfrak{g}_{i\alpha}=0$ for $i\not\in\{0,\pm 1\}$. \item If $p(x)=1$, $a_{xx}=0$ and $a_{xy}, a_{yx}\not=0$ for some $y$
then $\mathfrak{g}\langle\alpha\rangle\cong \mathfrak{sl}(1|1)$ and $\mathfrak{g}_{i\alpha}=0$ for $i\not\in\{0,\pm 1\}$. \item If $p(x)=1$, $a_{xx}=0$ and $a_{xy}, a_{yx}=0$ for all $y$
then $\mathfrak{g}\langle\alpha\rangle$ is the $(4|2)$-dimensional algebra described in~\ref{sss:rank1-wt}. \end{enumerate} \end{crl} \begin{proof} Clearly, $\mathfrak{g}\langle\alpha\rangle$ is a quotient of the algebra $[\widetilde\mathfrak{g},\widetilde\mathfrak{g}]$ where $\widetilde\mathfrak{g}$ is the corresponding algebra listed in~\ref{rank1}; this gives (1), (2), (3) and shows that in (4) it is enough to verify $\mathfrak{g}_{2b(x)}=0$. This follows from Lemma~\ref{lem:rk2-ideal}(b).
\end{proof}
Note that Corollary~\ref{corgalpha} implies that $x\in X$ is reflectable at $v$ iff for $\alpha=b(x)$ the algebra $\mathfrak{g}\langle \alpha\rangle$ is not the Heisenberg algebra and $e_\alpha$ acts on $\mathfrak{g}$ locally nilpotently.
\subsection{Admissibility is just a weak symmetricity}
In this subsection we prove the following result. \begin{thm} \label{thm:admissible=wsym} A connected component $\mathcal{R}_0$ of $\mathcal{R}$ is admissible iff it is weakly symmetric. \end{thm} \begin{proof}
1. Let $\mathcal{R}_0$ be a weakly symmetric component of $\mathcal{R}$. We claim that the collection of $\mathfrak{g}^\mathtt{C}(v)=\widetilde\mathfrak{g}(v)/\mathfrak{r}(v)$ forms a root Lie superalgebra. Let $r_x:v'\to v$ be a reflexion. Denote $\widetilde\mathfrak{g}'=\widetilde\mathfrak{g}(v')$, $\mathfrak{g}=\mathfrak{g}^\mathtt{C}(v)$. Let us show that there exists a homomorphism $\rho:\widetilde\mathfrak{g}'\to\mathfrak{g}$ identical on $\mathfrak{h}$. The half-baked Lie superalgebra $\widetilde\mathfrak{g}(v')$ is generated by $\mathfrak{h}$, $\tilde e'_y$ and $\tilde f'_y$, $y\in X$. In order to construct $\rho$, we have to find $\rho(\tilde e'_y)$, $\rho(\tilde f'_y)$, and verify the (very few) relations.
The weight of $\tilde e'_y$ is $b'(y)$, so we have to look for $\rho(\tilde e'_y)$ in $\mathfrak{g}^\mathtt{C}_{b'(y)}$. We know that $\widetilde\mathfrak{g}_{b'(y)}$ is one-dimensional. By Proposition~\ref{prp:bracket} (2), the ideal generated by $\widetilde\mathfrak{g}_{b'(y)}$ contains $a'(y)\in\mathfrak{h}$, so $\mathfrak{r}(v)$ does not contain it. Therefore, $\mathfrak{g}_{b'(y)}$ is also one-dimensional. We will define arbitrarily $0\ne\rho(\tilde e'_y)\in\mathfrak{g}_{b'(y)}$ and choose $\rho(\tilde f'_y)\in\mathfrak{g}_{-b'(y)}$ so that $[\rho(\tilde e'_y),\rho(\tilde f'_y)]=a'(y)$. The latter is also possible by~Proposition~\ref{prp:bracket}(2). It remains to verify that $[\rho(\tilde e'_y),\rho(\tilde f'_z)]=0$ for $y\ne z$.
(a) $y\ne x,\ z\ne x$. In this case the bracket should have weight $b'(y)-b'(z)=b(y)-b(z)+cb(x)$ for some $c\in\mathbb{Z}$. This is not a weight of $\widetilde\mathfrak{g}$, so the bracket should vanish.
(b) $z=x\ne y$. In this case the bracket should have weight $b'(y)-b'(x)=b(y)+j_0b(x)+b(x)$ where $j_0$ is defined as in~\ref{sss:properties-2}. According to Lemma~\ref{lem:rk2-ideal}(c) the ideal generated by this weight space has no intersection with $\mathfrak{h}$, so this is not a weight of $\mathfrak{g}$ and the bracket vanishes.
Therefore, we have constructed a homomorphism $\rho:\widetilde\mathfrak{g}'\to\mathfrak{g}$ for each reflexion $r_x:v'\to v$. It is identity on $\mathfrak{h}$, so it induces a homomorphism $\mathfrak{g}'\to\mathfrak{g}$. Any reflexion has order two, so there is also a homomorphism $\mathfrak{g}\to\mathfrak{g}'$ in the opposite direction. Their composition preserves weight spaces, so it is invertible.
2. Assume now that $\mathcal{R}_0$ is an admissible component. We will deduce that it is necessarily weakly symmetric. Assume that there exists $v\in\mathcal{R}_0$, a $v$-reflectable element $x\in X$ and another $y\in X$ such that $a_{xy}=0$. Let $\mathfrak{g}$ be a root algebra.
Look at the $x$-reflexion $r_x:v\to v'$. Since $$b'(x)=-b(x),\ \ b'(y)=b(y)$$ one has $\tilde{\mathfrak{g}}'_{b(x)+b(y)}=0$ so $\mathfrak{g}_{b(x)+b(y)}=0$. Therefore $[e_x,e_y]=0$. One has $$a_{yx} e_x=[a(y),e_x]=[[e_y,f_y],e_x]=0$$ so $a_{yx}=0$ as required. \end{proof}
\subsection{Admissible components in rank two} \label{ss:ranktwo}
In this subsection we show that any locally weakly symmetric root datum of rank two belongs to an admissible component (that is, a local weak symmetricity implies a weak symmetricity).
\subsubsection{Fully reflectable} A component $\mathcal{R}_0$ of $\mathcal{R}$ is called {\sl fully reflectable} if all $x\in X$ are reflectable at all $v\in\mathcal{R}_0$. Classification of fully reflectable root data is available for all ranks. Fully reflectable admissible root data without isotropic real roots can be easily classified as all Cartan matrices in the component are $D$-equivalent. The classification of fully reflectable admissible root data with isotropic real roots was obtained in~\cite{Hoyt}.
\subsubsection{Symmetrizable}
The cases $a_{xy}=a_{yx}=0$ as well as $a_{xy}\ne 0$ and $a_{yx}\ne 0$ are symmetrizable, therefore, symmetrizable at all vertices by Lemma~\ref{lem:sym-stable}.
\subsubsection{Weakly symmetric but not symmetrizable}
This is possible only if $\mathcal{R}_0$ contains an object $v$ having nonreflectable $y\in X$. Thus, the Cartan matrix should have form
$$ A=\begin{pmatrix} a_{xx} & a_{xy}\\ 0 & a_{yy}\end{pmatrix}, $$ with $a_{xy}\ne 0$. Since $y$ is nonreflectable, $a_{yy}=0$ and $p(y)=0$.
(a) Let $a_{xx}=0$ so $p(x)=1$ since $x$ is reflectable. Then $$ A=\begin{pmatrix} 0 & a_{xy}\\ 0 & 0\end{pmatrix}, $$ that, after the reflexion, will become $$ A'=\begin{pmatrix} 0 & -a_{xy}\\ 0 & 0\end{pmatrix} $$ which is $D$-equivalent to $A$.
(b) $a_{xx}\ne 0$. In this case the Cartan matrix is not changed and therefore the component is weakly symmetric.
\subsection{The canonical extension of $\mathcal{R}_0$}
\subsubsection{} Let $\mathcal{G},\mathcal{H}$ be groupoids. A functor $f:\mathcal{G}\to\mathcal{H}$ is called a {\sl fibration} if for any $g\in\mathcal{G}$ and $\beta:f(g)\to h$ in $\mathcal{H}$ there exists $\alpha:g\to g'$ in $\mathcal{G}$ such that $f(\alpha)=\beta$.
Given a fibration $f:\mathcal{G}\to\mathcal{H}$ and $h\in\mathcal{H}$, the fiber of $f$ at $h$, $\mathcal{G}_h$, is defined as follows. \begin{itemize}
\item $\operatorname{Ob}(\mathcal{G}_h)=\{g\in\mathcal{G}| f(g)=h\}$.
\item $\mathrm{Hom}_{\mathcal{G}_h}(g,g')=\{\alpha:g\to g'|f(\alpha)=\mathrm{id}_h\}$ ~\footnote{For a general $f$ the fiber $\mathcal{G}_h$ defined as above may change if one replaces $\mathcal{G}$ with an equivalent groupoid. A more invariant notion of fiber has as objects pairs $(g,\alpha:f(g)\to h)$.} . \end{itemize}
\subsubsection{} \label{sss:wtR}
Let $\mathcal{R}_0$ be an admissible component of the root groupoid and let $\mathfrak{g}$ be a root algebra on $\mathcal{R}_0$. Define the groupoid of symmetries of $\mathfrak{g}$, $\mathcal{G}_0$, together with a fibration $\pi:\mathcal{G}_0\to\mathcal{R}_0$, as follows. The groupoids $\mathcal{G}_0$ and $\mathcal{R}_0$ have the same objects. For $\alpha:v\to v'\in\mathcal{R}_0$, we define $\mathrm{Hom}^\alpha_{\mathcal{G}_0}(v,v')$, the set of arrows $v\to v'$ in $\mathcal{G}_0$, as the set of isomorphisms $\mathfrak{g}(v)\to\mathfrak{g}(v')$ extending the isomorphism $\mathfrak{h}(\alpha)$.
The fiber of $\pi$ at $v\in\mathcal{R}_0$ consists of automorphisms of $\mathfrak{g}(v)$ that are identity on $\mathfrak{h}(v)$. Any such automorphism $a$ preserves the weight spaces, and so it is uniquely given by a collection of $\lambda_x\in\mathbb{C}^*$, $\mu_x$ so that $a(e_x)=\lambda_x e_x$, $a(f_x)=\mu_xf_x$. Since $[e_x,f_x]=a(x)\ne 0$, one necessarily has $\mu_x=\lambda_x^{-1}$.
Therefore, the fiber of $\pi$ at $v$ identifies with the classifying groupoid~\footnote{Recall that the classifying groupoid of a group $G$ is the groupoid having a single object with the group of automorphisms $G$.} of the torus $(\mathbb{C}^*)^X$.
\subsubsection{Canonicity of $\mathcal{G}_0$}
Let $\mathfrak{g}$ be a root algebra on $\mathcal{R}_0$. For any $v$ the algebra $\mathfrak{g}(v)$ has a maximal ideal $\mathfrak{r}(v)$ having no intersection with $\mathfrak{h}(v)$.
Thus, $\mathfrak{g}(v)/\mathfrak{r}(v)=\mathfrak{g}^\mathtt{C}(v)$ for all $v$. Let $\alpha:v\to v'$ be an arrow in $\mathcal{R}$. Any isomorphism $\mathfrak{g}(v)\to\mathfrak{g}(v')$ extending $\mathfrak{h}(\alpha)$ induces an isomorphism $\mathfrak{g}^\mathtt{C}(v)\to\mathfrak{g}^\mathtt{C}(v')$. This leads to a functor $\mathcal{G}_0\to\mathcal{G}_0^\mathtt{C}$ over $\mathcal{R}_0$, where $\mathcal{G}_0^\mathtt{C}$ denotes (temporarily) the groupoid extension of $\mathcal{R}_0$ constructed as in \ref{sss:wtR} with the root algebra $\mathfrak{g}^\mathtt{C}$. It is an equivalence as it induces an equivalence of fibers at any $v\in\mathcal{R}_0$.
\subsection{Universal root algebra}
\subsubsection{} In this subsection we will prove the existence of an initial object in the category of root algebras associated to an admissible component $\mathcal{R}_0$ of $\mathcal{R}$.
Let $\mathfrak{g}$ be a root Lie superalgebra for the component $\mathcal{R}_0$. Fix $v\in\mathcal{R}_0$. The $v$-coordinate system for $\mathfrak{g}$ is a Lie superalgebra epimorphism $\widetilde\mathfrak{g}(v)\to\mathfrak{g}(v)$. Let $\mathfrak{k}(v)$ be its kernel.
Choose an arrow $\alpha:v'\to v$ in $\mathcal{R}$ presentable as a composition of reflexions. We denote $\mathfrak{g}'=\mathfrak{g}(v')$ and $\mathfrak{g}=\mathfrak{g}(v)$. The existence of isomorphism $\mathfrak{g}'\to\mathfrak{g}$ lifting $\alpha$ proves that $\mathfrak{g}_{b'(x)-b'(y)}=0$ for $y\ne x$, so that $\mathfrak{k}(v)\supset\mathfrak{s}(v)$ where $\mathfrak{s}(v)$ is the ideal of $\widetilde\mathfrak{g}(v)$ generated by $\sum\widetilde\mathfrak{g}_{b'(x)-b'(y)}(v)$, the sum being taken over all $\alpha:v'\to v$ presentable as compositions of reflexions.
Let us verify that the collection $\mathfrak{g}^\mathtt{U}=\{\mathfrak{g}^\mathtt{U}(v)=\widetilde\mathfrak{g}(v)/\mathfrak{s}(v),\ v\in\mathcal{R}_0\}$ is a root Lie superalgebra. Note that $\mathfrak{s}(v)\subset \mathfrak{k}(v)$, so one has an obvious surjective homomorphisms $q:\mathfrak{g}^\mathtt{U}(v)\to\mathfrak{g}(v)$.
We have to define, for each arrow $\alpha:v\to v'$ in $\mathcal{R}$, an isomorphism $\tilde\alpha:\mathfrak{g}^\mathtt{U}(v)\to\mathfrak{g}^\mathtt{U}(v')$ extending $\mathfrak{h}(\alpha):\mathfrak{h}\to\mathfrak{h}'$. This is enough to verify separately for reflexions, homotheties and tautological arrows. In the case when $\alpha$ is a tautological arrow or a homothety, it extends to an isomorphism $\tilde\alpha:\widetilde\mathfrak{g}(v)\to\widetilde\mathfrak{g}(v')$. Since the homotheties and the tautological arrows commute with the reflexions, $\tilde\alpha$ carries $\mathfrak{s}(v)$ to $\mathfrak{s}(v')$, and this induces an isomorphism $\mathfrak{g}^\mathtt{U}(v)\to\mathfrak{g}^\mathtt{U}(v')$. It remains to define, for each reflexion $r_x:v\to v'$ in $\mathcal{R}$, an isomorphism $\rho=\tilde r_x:\mathfrak{g}^\mathtt{U}(v)\to\mathfrak{g}^\mathtt{U}(v')$ extending $\mathrm{id}_\mathfrak{h}$.
The algebra $\mathfrak{g}^\mathtt{U}(v)$ is generated over $\mathfrak{h}$ by the elements $e_y$ of weight $b(y)$, $f_z$ of weight $-b(z)$, subject to relations listed in \ref{sss:half} and factored out by $\mathfrak{s}(v)$. Thus, in order to construct $\rho$, we have to choose $\rho(e_y)\in\mathfrak{g}^\mathtt{U}_{b(y)}(v')$, $\rho(f_z)\in\mathfrak{g}^\mathtt{U}_{-b(z)}(v')$, so that $\rho$ vanishes at all the relations.
The weight spaces $\mathfrak{g}^\mathtt{U}_{b(y)}(v')$ and $\mathfrak{g}^\mathtt{U}_{-b(y)}(v')$ are one-dimensional by property 3 of \ref{sss:properties} as the map $q:\mathfrak{g}^\mathtt{U}(v)\to\mathfrak{g}(v)$ is surjective and the weight spaces $\mathfrak{g}_{b(y)}(v')$ and $\mathfrak{g}_{-b(y)}(v')$ are one-dimensional. We will define arbitrarily $0\ne\rho(e_y)\in\mathfrak{g}^\mathtt{U}_{b(y)}(v')$ and choose $\rho(f_y)\in\mathfrak{g}^\mathtt{U}(v')$ so that $[\rho(e_y),\rho(f_y)]=a(y)$. The latter is possible by~Proposition~\ref{prp:bracket}(2). The rest of the relations say that, for any composition of reflexions $\gamma:v''\to v$ with $v''=(\mathfrak{h},a'',b'',p'')$, the weight space $\mathfrak{g}^\mathtt{U}_{b''(y)-b''(z)}(v)$ vanishes for all $y\ne z$. Now $\rho$ defined as above yields a homomorphism as
$\mathfrak{g}^\mathtt{U}_{b''(y)-b''(z)}(v')=0$ by definition of $\mathfrak{s}(v')$. Thus, we have constructed an algebra homomorphism $\rho:\mathfrak{g}^\mathtt{U}(v)\to\mathfrak{g}^\mathtt{U}(v')$.
Any reflexion has order two, so there is also a homomorphism in the opposite direction. Their composition preserves weight spaces, so it is invertible.
This proves that the collection of algebras $\mathfrak{g}^\mathtt{U}=\{\widetilde\mathfrak{g}(v)/\mathfrak{s}(v)\}$ is the initial object in the category of root algebras based on $\mathcal{R}_0$.
\begin{dfn} \label{dfn:universalroot} The root algebra $\mathfrak{g}^\mathtt{U}=\{\widetilde\mathfrak{g}(v)/\mathfrak{s}(v)\}$ defined as above is called {\sl the universal root Lie superalgebra} defined by the component $\mathcal{R}_0$~\footnote{It was J.~Bernstein who once pointed out that factoring out by the maximal ideal having no intersection with the Cartan may be unjustified. The present work is to a large extent outcome of his remark.} . \end{dfn} The superinvolution $\theta$ of $\widetilde\mathfrak{g}$ defined in \ref{sss:automorphism} induces an automorphism of the universal root algebra.
\subsubsection{Serre relations}
The classical Serre relations $$ (\mathrm{ad} e_x)^{-a_{xy}+1}(e_y)=0,\ (\mathrm{ad} f_x)^{-a_{xy}+1}(f_y)=0, $$ for $x,y\in X$ such that $a_{xx}\ne 0$ are among the most obvious relations defining the universal Lie superalgebra. They correspond to the summand $\widetilde\mathfrak{g}_{\pm(b'(x)-b'(y))}$ of $\mathfrak{s}(v)$ defined by the reflexion $r_x:v'\to v$. The ideal $\mathfrak{s}(v)$, however, is usually not generated by the classical Serre relations.
\subsubsection{} \label{sss:invariantideals} Let $\mathfrak{g}^\mathtt{U}=\{\mathfrak{g}^\mathtt{U}(v)\}$ denote the universal root algebra and let $\mathfrak{g}=\{\mathfrak{g}(v)=\mathfrak{g}^\mathtt{U}(v)/I(v)\}$ be a root algebra.
Any automorphism $\eta\in\operatorname{Aut}_\mathcal{R}(v)$ lifts to an automorphism of $\mathfrak{g}^\mathtt{U}(v)$ preserving $I(v)$.
The converse of this fact also holds; one has the following easy result. \begin{Lem} Let $\mathfrak{g}^\mathtt{U}$ be the universal root algebra at a component $\mathcal{R}_0$, $v\in\mathcal{R}_0$. Any $\operatorname{Aut}_\mathcal{R}(v)$-invariant ideal $J(v)$ of $\mathfrak{g}^\mathtt{U}(v)$ such that $J(v)\cap\mathfrak{h}=0$ defines a canonical root algebra $\mathfrak{g}$ whose $v$-component is $\mathfrak{g}(v)=\mathfrak{g}^\mathtt{U}(v)/J(v)$. \end{Lem} \begin{proof} For any $v'\in\mathcal{R}_0$ choose an isomorphism $\tilde\gamma:\mathfrak{g}^\mathtt{U}(v)\to\mathfrak{g}^\mathtt{U}(v')$ and set $J(v')=\tilde\gamma(J(v))$. By invariance of $J(v)$ the ideal $J(v')$ is independent of the choice of $\tilde\gamma$. \end{proof}
\begin{rem} The lemma above implies that a root Lie superalgebra is canonically determined by any its component $\mathfrak{g}(v)=\widetilde\mathfrak{g}(v)/I(v)$. An ideal $I(v)\subset\widetilde\mathfrak{g}(v)$ defines a root superalgebra iff it contains $\mathfrak{s}(v)$ and its image in $\mathfrak{g}^\mathtt{U}(v)$ is $\operatorname{Aut}_\mathcal{R}(v)$-invariant. \end{rem}
\subsection{A side remark: groupoid extensions} \label{ss:side}
The groupoid extension $\pi:\mathcal{G}_0\to\mathcal{R}_0$ has fibers isomorphic to classifying spaces of a torus. This very special type of extension admits a description in terms of gerbes.
For $v\in\mathcal{R}_0$ and $\gamma:v\to v$ in $\mathcal{R}_0$ choose a lifting $\tilde\gamma:v\to v$ in $\mathcal{G}_0$. This defines an automorphism of the fiber $(\mathcal{G}_0)_v$ given by the formula $\alpha\mapsto \tilde\gamma\circ\alpha\circ\tilde\gamma^{-1}$. The result is independent of the choice of $\tilde\gamma$ as tori are abelian groups.
The above described action can be encoded into a groupoid extension $p:\mathcal{T}\to\mathcal{R}_0$ that is a group over $\mathcal{R}_0$: one has a multiplication $$m:\mathcal{T}\times_{\mathcal{R}_0}\mathcal{T}\to\mathcal{T}$$ corresponding to the fiberwise multiplication. Finally, $\pi:\mathcal{G}_0\to\mathcal{R}_0$ is a $\mathcal{T}$-torsor: there is an action $$\mathcal{T}\times_{\mathcal{R}_0}\mathcal{G}_0\to\mathcal{G}_0.$$
In more classical terms, we are talking about presenting an abelian group extension as a torsor over a split abelian group extension that is a semidirect product of the base and the fiber.
The group $p:\mathcal{T}\to\mathcal{R}_0$ is easy to describe. The groupoid $\mathcal{R}_0$ comes with the functor $\mathfrak{h}:\mathcal{R}_0\to\mathtt{Vect}$.
We define a functor $T:\mathcal{R}_0\to\mathtt{Gp}$ into the category of groups assigning to $v$ the factor group $T(v)=\mathfrak{h}(v)/K(v)$ where
$$K(v)=\{h\in\mathfrak{h}|b(x)(h)\in 2\pi i\mathbb{Z}\textrm{ for all }x\in X\}.$$
The functor $T$ gives rise to a groupoid extension $p:\mathcal{T}\to\mathcal{R}_0$ with $\operatorname{Ob}(\mathcal{T})=\operatorname{Ob}(\mathcal{R}_0)$ and $\mathrm{Hom}_\mathcal{T}(v',v)=\mathrm{Hom}_{\mathcal{R}_0}(v',v)\times T(v)$.
The action $\mathcal{T}\times_{\mathcal{R}_0}\mathcal{G}_0\to\mathcal{G}_0$ is defined as follows. Let $\mathfrak{g}=\{\mathfrak{g}(v)\}$ be a root algebra based on $\mathcal{R}_0$. To $(\alpha,\tau)\in\mathrm{Hom}_{\mathcal{R}}(v',v)\times T(v)$ and $\tilde\alpha:\mathfrak{g}(v')\to\mathfrak{g}(v)$, we assign $\tau\circ\tilde\alpha$ where $\tau:\mathfrak{g}(v)\to\mathfrak{g}(v)$ is given by rescaling.
Note that the torsor $\mathcal{G}_0$ is nontrivial as, for instance, for $\mathfrak{g}=\mathfrak{sl}_2$ the groupoid extension $\pi:\mathcal{G}_0\to\mathcal{R}_0$ is the projection $N(T)\to W$ of the normalizer of the torus to the Weyl group that is not split.
\section{Weyl group}
Throughout this section we assume that $\mathcal{R}_0$ is an admissible component of $\mathcal{R}$.
\subsection{Real roots} For $v\in\mathcal{R}_0$ we denote $$Q(v)=\operatorname{Span}_\mathbb{Z}\{b(x)\}_{x\in X}\subset \mathfrak{h}^*(v),$$
The parity function $p:X\to\mathbb{Z}_2$ extends to a group homomorphism $p:Q(v)\to\mathbb{Z}_2$ that we denote by the same letter $p$.
\begin{lem} \begin{itemize} \item[1.] For any $\gamma:v\to v'$ the isomorphisms $\mathfrak{h}(v)\to\mathfrak{h}(v')$ and $\mathfrak{h}^*(v)\to\mathfrak{h}^*(v')$ induce isomorphisms $\operatorname{Span}_\mathbb{C}\{a(x)\}_{x\in X}\to\operatorname{Span}_\mathbb{C}\{a'(x)\}_{x\in X}$ and $Q(v)\to Q(v')$. \item[2.] The isomorphisms $Q(v)\to Q(v')$ are compatible with the parity $p$. \end{itemize} \end{lem} \begin{proof} The claim directly follows from the formulas for reflexions. \end{proof}
\begin{dfn} An element $\alpha\in Q(v)$ is called a real root if there exists $\gamma:v'\to v$ and $x\in X$ so that $\gamma(b'(x))=\alpha$. \end{dfn}
\subsubsection{} \label{sss:realinall} The collection of real roots in $\mathfrak{h}(v)$ is denoted by $\Delta^\mathit{re}(v)$. By~\ref{corgalpha}, for any root algebra $\mathfrak{g}$, $\Delta^\mathit{re}(v)\subset\Delta(v)$ and all real root spaces of $\mathfrak{g}$ are one-dimensional. Real roots coming as described above from $\gamma:v\to v'$ form a subset $\Sigma_{\gamma}(v)$. We write $\Sigma(v)=\Sigma_\mathrm{id}(v)$ for the set of simple roots at $v$.
Clearly \begin{equation} \label{eq:rroots-bigunion} \Delta^\mathit{re}(v)=\bigcup_{\gamma:v\to v'}\Sigma_\gamma(v), \end{equation} but the union is not disjoint. Any $\alpha:v\to v'$ sends bijectively $\Delta^\mathit{re}(v)$ to $\Delta^\mathit{re}(v')$ and $\Sigma_{\gamma\circ\alpha}(v)$ to $\Sigma_\gamma(v')$.
\subsection{Isotropic, anisotropic and nonreflectable real roots}
\begin{dfn} \begin{itemize} \item[1.] A simple root $b(x)\in\mathfrak{h}^*(v)$ is called isotropic if $x$ is reflectable at $v$ and $\langle a(x),b(x)\rangle=0$. {\sl One has always $p(x)=1$ for an isotropic root $b(x)$.} \item[2.]A simple root $b(x)\in\mathfrak{h}^*(v)$ is called anisotropic if $x$ is reflectable at $v$ and $\langle a(x),b(x)\rangle\ne 0$. \item[3.]For an anisotropic simple root $\alpha=b(x)$ we define $\alpha^\vee=\frac{2a(x)}{a_{xx}}\in\mathfrak{h}(v)$. \end{itemize} \end{dfn}
We are going to extend these definitions to real roots. Since a real root at $v$ is defined by a path $\gamma:v\to v'$ and a simple root at $v'$, the extension is possible if two simple roots at $v'$ and $v''$ defining the same real root, are of the same type.
\begin{prp} \label{prp:rroots-class} Let $\alpha\in\Sigma_{\gamma_1}(v)\cap\Sigma_{\gamma_2}(v)$ so that $\alpha=\gamma_1^*(b_1(x_1))=\gamma_2^*(b_2(x_2))$ for $\gamma_i:v\to v_i$. Then one of the following options holds. \begin{itemize} \item[1.] Both $b_i(x_i)\in\mathfrak{h}^*(v_i)$ are isotropic roots. \item[2.] Both $b_i(x_i)\in\mathfrak{h}^*(v_i)$ are anisotropic roots and $(\gamma_2\circ\gamma_1^{-1})^*(b_1(x_1)^\vee)=b_2(x_2)^\vee$. \item[3.] $x_1$ is nonreflectable at $v_1$ and $x_2$ is nonreflectable ay $v_2$. \end{itemize} \end{prp} \begin{proof} We can assume, without loss of generality, that $\gamma_1=\mathrm{id}_v$ and $\gamma_2=\gamma:v\to v'$. Then $\alpha=b(x)=\gamma^*(b'(y))$.
Let $\mathfrak{g}$ be a root algebra and let $\alpha=b(x)$ for $v\in\mathcal{R}_0$ so that $x$ is $v$-reflectable. Then $\mathfrak{g}\langle\alpha\rangle$ is not the Heisenberg algebra and $e_x$ acts locally nilpotently on $\mathfrak{g}$. If, for $\gamma:v\to v'$, $\alpha=\gamma^*(b'(y))$, $e'_y$ acts locally nilpotently on $\mathfrak{g}(v')$, and, since $\mathfrak{g}\langle\alpha\rangle$ is not the Heisenberg algebra, this implies that $y$ is reflectable at $v'$. Let now $x$ be reflectable at $v$ and $y$ reflectable at $v'$. Then Corollary~\ref{corgalpha} describes possible options for $\mathfrak{g}\langle\alpha\rangle$. This implies the claim. \end{proof}
\subsubsection{}
Proposition~\ref{prp:rroots-class} allows one to extend the classification of simple roots to all real roots.
One has a decomposition \begin{equation} \label{eq:reunion} \Delta^\mathit{re}(v)=\Delta_\mathit{iso}(v)\sqcup\Delta_\mathit{an}(v) \sqcup\Delta_\mathit{nr}(v), \end{equation} where \begin{itemize} \item[] $\Delta_\mathit{iso}(v)$ is the set of isotropic real roots that are reflectable simple roots at some $v\in\mathcal{R}_0$. \item[] $\Delta_\mathit{an}(v)$ is the set of anisotropic real roots that are reflectable simple roots at some $v\in\mathcal{R}_0$. Any anisotropic real root $\alpha\in\Delta_\mathit{an}(v)$ defines a coroot $\alpha^\vee\in\mathfrak{h}(v)$. \item[] $\Delta_\mathit{nr}(v)$ is the set of non-reflectable real roots, those that for any $v\in\mathcal{R}_0$ and $x\in X$ such that $\alpha=b(x)$, $x$ is non-reflectable at $v$. \end{itemize}
{\begin{Rem}In our definition isotropic roots are necessarily real. In another tradition, a root of a symmetrizable Lie superalgebra is called isotropic if it has length zero. For the real roots both notions of isotropicity coincide. \end{Rem}}
For $\alpha\in\Delta_\mathit{an}(v)$ the pair $(\alpha,\alpha^\vee)$ defines a reflection $s_\alpha$ acting both on $\mathfrak{h}(v)$ and on $\mathfrak{h}^*(v)$ by the usual formulas \begin{equation} \label{eq:salpha} s_\alpha(\beta)=\beta-\langle \beta,\alpha^\vee\rangle\alpha,\ s_\alpha(h)=h-\langle \alpha,h\rangle\alpha^\vee. \end{equation}
\begin{crl} \label{crl:deltare-w} \begin{itemize} \item[1.] The set of real roots $\Delta^\mathit{re}(v)\subset\mathfrak{h}^*(v)$ is $\operatorname{Aut}_\mathcal{R}(v)$-invariant. \item[2.] For $\gamma\in\operatorname{Aut}_\mathcal{R}(v)$ and $\alpha\in\Delta_\mathit{an}(v)$ one has \begin{equation} s_{\gamma(\alpha)}=\gamma s_\alpha \gamma^{-1}. \end{equation} \end{itemize} \end{crl} \begin{proof} The first claim is a direct consequence of formula (\ref{eq:rroots-bigunion}) and \ref{prp:rroots-class}. The second claim directly follows from the formulas for $s_\alpha$. \end{proof}
\subsubsection{Skeleton} \label{sss:skeleton}
We define $\mathtt{Sk}\subset\mathcal{R}$ as the subgroupoid having the same objects as $\mathcal{R}$; an arrow $\gamma:v\to v'$ is in $\mathtt{Sk}$ if it can be presented as a composition of reflexions. This is {\sl the skeleton groupoid}.
We denote by $\mathtt{Sk}(v)$ the connected component of the skeleton containing $v$. Note that, by definition, any arrow in $\mathtt{Sk}(v)$ induces the identity map of $\mathfrak{h}(v)$, so any two arrows with the same ends coincide. Therefore, $\mathtt{Sk}(v)$ is a contractible groupoid. Note that any arrow $\gamma:v\to v'$ in $\mathcal{R}$ can be decomposed $\gamma=\gamma''\circ\gamma'$ where $\gamma'$ is in $\mathtt{Sk}$ and $\gamma''$ is a composition of a homothety and a tautological arrow.
\begin{rem} \label{rem:uniqueness} As we prove later in~\ref{crl:unique-in-sk}, this decomposition is unique. \end{rem} \subsubsection{} If $\beta:v\to v'$ is a homothety or a tautological arrow, $\beta(\Sigma(v'))=\Sigma(v)$. Therefore, for $\gamma=\gamma''\circ\gamma'$ as above, $\Sigma_\gamma(v)=\Sigma_{\gamma''}(v)$. Since $\mathtt{Sk}(v)$ is contractible, it makes sense to denote $\Sigma_{v'}(v)=\Sigma_\gamma(v)$ for $\gamma:v\to v'$ in $\mathtt{Sk}(v)$.
Thus, we have
\begin{equation} \Delta^\mathit{re}(v)=\bigcup_{v'\in\mathtt{Sk}(v)}\Sigma_{v'}(v) \end{equation} (the union still does not have to be disjoint).
\subsubsection{Spine} \label{sss:spine}
We denote by $\mathtt{Sp}$ the subgroupoid of $\mathtt{Sk}$ spanned by the isotropic reflections only. The component of $\mathtt{Sp}$ containing $v$ is denoted by $\mathtt{Sp}(v)$. It is obviously contractible. Cartan data of $\mathtt{Sp}(v)$ describe all possible Cartan data for the component $\mathcal{R}_0$ of $\mathcal{R}$ containing $v$, up to $D$-equivalence.
\subsection{Weyl group and its actions} \label{ss:weyl}
In this subsection we define the Weyl group assigned to a component $\mathcal{R}_0$. By definition, the Weyl group identifies with a subgroup of $\mathrm{GL}(\mathfrak{h}(v))$, for every $v$. Any arrow $\gamma:v\to v'$ defines an isomorphism of the Weyl groups at $v$ and at $v'$.
We also define an action of $W(v)$ on $\mathtt{Sk}(v)$.~\footnote{The objects of $\mathtt{Sk}(v)$ classify the (attainable) Borel subalgebras containing a given Cartan subalgebra $\mathfrak{h}(v)$.}
\begin{dfn} \label{dfn:weylgroup} The Weyl group $W=W(v)$ (at $v\in\mathcal{R}$) is the group of automorphisms of $\mathfrak{h}(v)$ generated by the reflections with respect to anisotropic real roots. \end{dfn}
\subsubsection{Embedding $i:W(v)\to\operatorname{Aut}_\mathcal{R}(v)$} \label{sss:weyltoaut}
The representation of $\operatorname{Aut}_\mathcal{R}(v)$ in $\mathfrak{h}=\mathfrak{h}(v)$ is faithful by definition of $\mathcal{R}$. Let us show that $W(v)$ is a subgroup of the image of $\operatorname{Aut}_\mathcal{R}(v)$ in $\mathrm{GL}(\mathfrak{h}(v))$. Let $\alpha=b'(x)$ be an anisotropic root. Without loss of generality we can assume that there is an arrow $\gamma:v\to v'$ in $\mathtt{Sk}(v)$. Then the composition
$$\gamma^{-1}\circ t_{s_\alpha}\circ r_x\circ\gamma:v\to v$$ induces the reflection $s_\alpha$ on $\mathfrak{h}$. This proves that generators of $W(v)$ are in the image of the embedding $\operatorname{Aut}_\mathcal{R}(v)\to\mathrm{GL}(\mathfrak{h}(v))$, so that the Weyl group identifies with a subgroup of $\operatorname{Aut}_\mathcal{R}(v)$.
It is clear that any arrow $\gamma:v\to v'$ intertwines the canonoical embeddings $W(v)\to\operatorname{Aut}(v)$ and $W(v')\to\operatorname{Aut}(v')$.
Note that $\operatorname{Aut}_\mathcal{R}(v)$ acts on $W(v)$ so that the embedding $i$ commutes with this action. This means that $W(v)$ is a normal subgroup of $\operatorname{Aut}_\mathcal{R}(v)$.
\begin{lem} \label{lem:step1} Let $r_x:v\to v'=(\mathfrak{h},a',b',p)$ be an anisotropic reflexion, $\alpha=b(x)\in\mathfrak{h}^*$. Then $s_\alpha(a(y))=a'(y)$ and $s_\alpha(b(y))=b'(y)$
for all $y\in X$. \end{lem} \begin{proof} Immediate from the formulas~\ref{sss:reflexionformulas} and~(\ref{eq:salpha}). \end{proof}
\begin{lem} \label{lem:step2} Let $r_x: v\to v'=(\mathfrak{h},a',b',p')$ and $r_x:w=(\mathfrak{h},a_w,b_w,p_w)\to w'=(\mathfrak{h},a'_w,b'_w,p'_w)$ be reflexions. Let $\alpha\in\Delta^\mathit{re}$ satisfy the conditions \begin{equation} \label{eq:v-to-w}
s_\alpha(a(y))=a_w(y),\ s_\alpha(b(y))=b_w(y),\ p(y)=p_w(y),\ y\in X. \end{equation} Then \begin{equation} \label{eq:v'-to-w'}
s_\alpha(a'(y))=a'_w(y),\ s_\alpha(b'(y))=b'_w(y),\ p'(y)=p'_w(y),\ y\in X. \end{equation} \end{lem} \begin{proof} The automorphism $s_\alpha$ carries the basis $\{b(y)\}$ of $Q(v)$ to the basis $\{b_w(y)\}$ of $Q(w)$. The Cartan matrices at $v$ and $w$ coincide and the formulas defining $r_x$ are the same.
\end{proof}
\begin{rem} Note that if (\ref{eq:v-to-w}) holds then $x$ is reflectable at $v$ if and only if it is reflectable at $w$. This is so as the Cartan matrices of $v$ and of $w$ coincide. \end{rem}
\begin{prp} \label{prp:WSigma} Let $w\in W(v)$, $v'=(\mathfrak{h},a',b',p')\in\mathtt{Sk}(v)$. Then there exists a unique $v''=(\mathfrak{h},a'',b'',p')\in\mathtt{Sk}(v)$ such that \begin{equation} \label{eq:v'-to-w'}
w(a'(y))=a''(y),\ w(b'(y))=b''(y),\ y\in X. \end{equation} \end{prp} The proposition defines an action of the Weyl group $W$ on $\mathtt{Sk}(v)$. \begin{proof} The uniqueness claim is obvious. For the existence, it is sufficient to verify the claim for $w=s_\alpha$. We can assume that $\alpha=b(x)$ is a simple root at $v$ and let $r_x:v\to u$ be the reflexion. If $v'=v$ then $v''=u$ satisfies the requirements by Lemma~\ref{lem:step1}. Otherwise, choose an isomorphism $\phi:v\to v'$, present it as a composition $\phi=\phi_n\circ\ldots\circ\phi_1$, where each $\phi_i$ is a reflexion. We define an arrow $\psi:u\to v''$ as the composition $\psi=\psi_n\circ\ldots\circ\psi_1$ where $\psi_i=r_y$ if $\phi_i=r_y$~\footnote{Note that $\psi_i$ and $\phi_i$ are {\sl namesakes}: they have the same name but are applied to different objects of the groupoid.}. Note that the composition $\psi$ necessarily makes sense. Now a consecutive application of Lemma~\ref{lem:step2} yields the result. \end{proof}
\begin{rem} \label{rem:explicit} The proof provides us with an explicit formula: Let $\alpha=b_v(x)$. Then $v''=s_\alpha(v')$ is the target of the composition $\psi\circ r_x\circ\phi^{-1}:v'\to v''$, see the picture below. \end{rem}
\begin{equation} \label{eq:pic-salpha} \xymatrix{ &\overset{v}{\bullet}\ar_{r_x}^{\alpha=b_v(x)}[dd]\ar^{\phi_1=r_{y_1}}[rr]&&\bullet &\dots&\bullet\ar^{\phi_n=r_{y_n}}[rr]&&\overset{v'}{\bullet}\ar@/^1pc/ @[red]^{\color{red}{u'=s_\alpha(v')}}[dd] \\ &&&&&&\\ &\overset{u}{\bullet}\ar^{\psi_1=r_{y_1}}@{-->}[rr]&&\bullet &\dots&\bullet\ar^{\phi_n=r_{y_n}}@{-->}[rr]&&\overset{v''}{\bullet} } \end{equation}
The embedding $i:W(v)\to\operatorname{Aut}_\mathcal{R}(v)$ can be easily expressed in terms of the action of $W$ on $\mathtt{Sk}(v)$.
\begin{crl} \label{crl:w} For any $w\in W(v)$ let $\gamma_w:v\to w(v)$ be the arrow in $\mathtt{Sk}(v)$. Then $$ i(w)=t_w\circ\gamma_w. $$ \end{crl} \begin{proof} The composition $t_w\circ\gamma_w$ is an endomorphism of $v$. The automorphism $i(w)$ is uniquely defined by its action on $\mathfrak{h}$. The composition $t_w\circ\gamma_w$ provides the same action. \end{proof}
We will show later (see~\ref{crl:Wfree}) that the action of the Weyl group $W(v)$ on $\mathtt{Sk}(v)$ is free. It is not transitive in general. Here is what we can say about the orbits of the action.
\begin{lem} \label{lem:decomposition0} For every $v,\ v'\in\mathtt{Sk}(v)$ there exists $w\in W(v)$ and a sequence of isotropic reflexions $$ v\stackrel{r_{x_1}}{\to}\ldots\stackrel{r_{x_k}}{\to}v'' $$ such that $v'=w(v'')$. In other words, there exists $w\in W(v)$ and $v''\in\mathtt{Sp}(v)$ so that $v'=w(v'')$. \end{lem} \begin{proof} Choose a presentation of $\phi:v\to v'$ as a composition $\phi=\phi_n\circ\ldots\circ\phi_1$ of reflexions. If $i$ is the first index for which $\phi_i$ is an anisotropic reflexion, we can, as in the proof of Proposition~\ref{prp:WSigma}, erase it, replacing reflexions $\phi_j$, $j>i$ with their namesakes $\psi_j$, so that the target of the composition $$ \psi_n\circ\ldots\circ\psi_{i+1}\circ\phi_{i-1}\circ\ldots\circ\psi_1: v\to v'' $$ satisfies the property $s_\alpha(v'')=v'$, for an anisotropic root $\alpha$ defined by $\phi_i$. Continuing parsing the decomposition of $\phi$ in this way, we end up with the required decomposition. \end{proof}
\subsubsection{Principal reflections}
In the case $p(x)=0$ for all $x$ and for all $v\in\mathcal{R}_0$, the Weyl group $W$ is known to be generated by simple reflections $s_{b(x)}, x\in X$ for a fixed vertex $v\in\mathcal{R}_0$. This is not true in general, as, for instance, there may exist $v\in\mathcal{R}_0$ for which all $a_{xx}=0$.
Here is what can be said in general.
\begin{dfn} Fix $v\in\mathcal{R}_0$. A root $\alpha\in\Delta_\mathit{an}(v)$, is called $v$-principal if there exists $v'\in\mathtt{Sp}(v)$ and an element $x\in X$ such that $\alpha=b'(x)$. A reflection $s_\alpha$ with respect to a $v$-principal root is called a $v$-principal reflection. \end{dfn}
One has \begin{prp}\label{prp:generators} The Weyl group $W(v)$ is generated by $v$-principal reflection. \end{prp} \begin{proof} Let $\alpha\in\Sigma_\gamma(v)$ be anisotropic where $\gamma:v\to v'=(\mathfrak{h},a',b',p')$ is a composition of reflexions and $\alpha=b'(x)$. We will prove the claim by induction on length of the presentation of $\gamma$ as a composition of reflexions.
If the sequence consists of isotropic reflexions only, $\alpha$ is principal and there is nothing to prove. Otherwise there is an anisotropic reflexion in the sequence. We denote below by $\phi'$ a composition of isotropic reflexions and by $r_y$ the first anisotropic reflexion. $$ v\stackrel{\phi'}{\to}v_1\stackrel{r_y}{\to} v_2\stackrel{\phi}{\to}v'. $$ Let $v_1=(\mathfrak{h},a_1,b_1,p_1)$ and $\beta=b_1(y)$. By Proposition~\ref{prp:WSigma}, $s_\beta$ carries $v'$ to a vertex $v''$ obtained as the target of a composition of reflexions $\psi:v_1\to v''$ having the same indices as the components of $\phi:v_2\to v'$. We denote $v''=(\mathfrak{h},a'',b'',p'')$ and we get $b'(x)=s_\beta(b''(x))$. Therefore, $s_\alpha=s_{b'(x)}=s_{s_\beta(b''(x))}=s_\beta s_{b''(x)}s_\beta$, the last equality by~\ref{crl:deltare-w}. Now $s_\beta$ is principal and $v''$ has a shorter sequence of reflexions connecting it to $v$. \end{proof}
\begin{rem} \label{rem:aniso-w} The proof of \ref{prp:generators} implies that any root $\alpha\in\Delta_\mathit{an}(v)$ is $W$-conjugate to a principal root. \end{rem}
\subsection{Modules over a root algebra} \begin{dfn} Let $\mathfrak{g}:=\mathfrak{g}(v)$ be a root Lie superalgebra supported at $\mathcal{R}_0$. A weight $\mathfrak{g}$-module $M$ is, by definition, an $\mathfrak{g}(v)$-module $M$ whose restriction
$\mathfrak{h}$ is semisimple.
\end{dfn}
For a weight $\mathfrak{g}$-module $M$ we denote by $\Omega(M)$ the set of weights of $M$.
We will now define integrable $\mathfrak{g}$-modules. \begin{dfn} Let $\mathfrak{g}=\mathfrak{g}(v)$ be a root Lie superalgebra.
We say that a weight $\mathfrak{g}$-module $M$ is {\sl integrable}
if $\mathfrak{g}_\alpha$ acts locally nilpotently on $M$ for each anisotropic $\alpha\in\Delta^\mathit{re}$. \end{dfn}
Note that the adjoint representation of any root Lie superalgebra is integrable.
Let $\mathfrak{g}$ be a root Lie superalgebra and let $M$ be an integrable $\mathfrak{g}$-module. Corollary~\ref{corgalpha} implies that $\Omega(M)$ is $W$-invariant. Moreover, the multiplicities of the weights $\mu$ and $w(\mu)$ coincide.
The adjoint representation of any root Lie superalgebra $\mathfrak{g}$ is integrable. In particular, the set of roots $\Delta(\mathfrak{g})$ of any root algebra is $W$-invariant.
\section{Coxeter structures} \label{sec:coxeter}
\subsection{Introduction} A Coxeter structure on a group $G$ is a set of elements $s_i\in G$ such that $(G,\{s_i\})$ is a Coxeter group. A Coxeter structure on a group provides its combinatorial description.
In this section we prove that the Weyl group of any admissible component $\mathcal{R}_0$ has a Coxeter structure. A somewhat similar combinatorial description can be given to the components of the root groupoid.
\subsubsection{} \label{sss:notation-coxeter}
Fix an indecomposable admissible component $\mathcal{R}_0$ and $v\in\mathcal{R}_0$. In what follows we use the notation of \ref{sss:skeleton}, suppressing the parameter $v$ from the notation. Thus, we will
write $\mathfrak{h}$ for $\mathfrak{h}(v)$, $\Sigma$ for $\Sigma(v)$, and, for $v'\in\mathtt{Sk}(v)$, $\Sigma_{v'}$ for $\Sigma_{v'}(v)$. Recall that $\Sigma=\{b(x)\}_{x\in X}$ and $Q=\operatorname{Span}_\mathbb{Z}(\Sigma_{v'})$ is independent of $v'$.
We set $$ Q^+_{v'}:=\mathbb{Z}_{\geq 0}\Sigma_{v'}\subset Q, \ \ \ Q^+:=Q^+_v.$$
\subsection{Coxeter structure of the Weyl group} Fix a vertex $v\in\mathcal{R}_0$. Let $\alpha_1,\dots,\alpha_m$ be the set of $v$-principal roots and $s_i$ be the reflection $s_{\alpha_i}$. The Weyl group $W$ is generated by $s_i$. We say that $w=s_{i_1}\ldots s_{i_l}$ is a reduced decomposition if it has a minimal length. In this case we say that $\ell(w)=l$ is the length of $w$.
Let $$C:=\bigcap_{v'\in\mathtt{Sp}(v)}Q^+_{v'}.$$ \begin{lem} Let $\alpha$ be an anisotropic real root.
\begin{enumerate}
\item There is $w\in W$ such that $w(\alpha)$ is $v$-principal.
\item If $\alpha\in Q^+_{v'}$ for some $v'\in\mathtt{Sp}(v)$ then $\alpha\in C$.
\item Either $\alpha\in C$ or $\alpha\in -C$.
\end{enumerate}
\end{lem}
\begin{proof} For (1) see~\ref{rem:aniso-w}.
To prove (2) we notice that
$(Q^+_{v'}\setminus Q^+_v)\cap\Delta^{re}$ consists of isotropic roots.
Now let us show (3). By (1) and (2) it suffices to check that if $\alpha\in C$ and $s_i$ is a principal reflection then $s_i(\alpha)\in C$ or
$s_i(\alpha)\in -C$. Indeed, let $v'$ be a vertex such that $\alpha_i\in \Sigma_{v'}$. Then $s_i(\alpha)\in Q^+_{v'}$ unless
$\alpha=-\alpha_i$.
In the latter case $\alpha\in -C$.
\end{proof}
Lemma above claim (2) means that
$$Q^+_{v'}\cap\Delta_\mathit{an}=C\cap\Delta_\mathit{an}.$$ This is the set of positive anisotropic roots (with respect to any $v'\in\mathtt{Sp}(v)$).
\begin{lem}\label{lem:reduced} Let $w=s_{i_1}\dots s_{i_t}$ and let
$\alpha_i$ be a principal root such that $w(\alpha_i)\in -C$. Then there exists $j$
such that $ws_i=s_{i_1}\dots \hat{s}_{i_j}\dots s_{i_t}$.
\end{lem}
\begin{proof} Define $\beta_k:=s_{i_{k+1}}\dots s_{i_t}(\alpha_i)$
for $k=0,\ldots,t-1$ and $\beta_t:=\alpha_i$. Since $\beta_t\in C$ and $\beta_0\in-C$ there is $j$ such that $\beta_j\in C$ and $\beta_{j-1}\in -C$. Hence $\beta_{j}=\alpha_{i_j}$. We get $\alpha_{i_j}=u(\alpha_i)$ for $u:=s_{i_{j+1}}\dots s_{i_t}$. Using the formula $us_{\alpha}u^{-1}=s_{u\alpha}$, see~\ref{crl:deltare-w}, we obtain
$$ws_i=s_{i_1}\dots s_{i_{j-1}}(us_iu^{-1})us_i=s_{i_1}\dots \hat{s}_{i_j}\dots s_{i_t}.$$
\end{proof}
\begin{crl}\label{crl: exchange} If $w=s_{i_1}\dots s_{i_l}$ is a reduced decomposition and $\alpha_i$ is a principal root then
\begin{enumerate}
\item $\ell(ws_i)<\ell(w)=l$ if and only if $w(\alpha_i)\in -C$.
\item $w(\alpha_{i_l})\in -C$.
\item If $\ell(ws_i)<\ell(w)$ then for some $j$
$$s_{i_j}\dots s_{i_l}=s_{i_{j+1}}\dots s_{i_l}s_i.$$
\end{enumerate}
\end{crl}
\begin{proof} See \cite{Kbook}, Lemma 3.11.
\end{proof}
\begin{crl} $W$ is a Coxeter group generated by $s_1,\ldots,s_m$.
In the Coxeter relations $(s_is_j)^m=1$ the possible values of
$m$ are $2,3,4,6$ or $\infty$.
\end{crl}
\begin{proof} See~\cite{B}, Th\'eor\`eme 6.1, \S 1, Ch.~4.
If $\alpha$ and $\beta$ are principal roots so that
$s_1=s_\alpha$ and $s_2=s_\beta$, it is easy to see that the
union $W'(\alpha)\cup W'(\beta)$, where $W'$ is the subgroup
of $W$ generated by $s_1$ and $s_2$, is a classical root system of rank $2$. This implies that $m=2,3,4,6$ or $\infty$.
\end{proof}
\begin{crl} If $w(\alpha_i)\in C$ for all $i$ then $w=1$.
\end{crl}
\begin{proof} Follows from~\ref{crl: exchange} (2).
\end{proof}
\begin{crl}
\label{crl:Wfree}
Let $v'=w(v)\in\mathtt{Sk}(v)$. If $Q^+_v=Q^+_{v'}$ then $w=1$. In particular, the action of $W$ on $\mathtt{Sk}(v)$ is faithful.
\end{crl}
\begin{proof}
If $Q^+_v=Q^+_{v'}$ then $w(Q^+_v)=Q^+_{v}$ and hence $w(\alpha)\in C$ for all anisotropic $\alpha\in Q^+_v$.
\end{proof}
\subsubsection{} We denote by $\Delta^+_\mathit{re}(v)$ the set of real roots positive at $v$. We set $\Delta^+_\mathit{an}(v)=\Delta_\mathit{an}\cap\Delta^+_\mathit{re}(v)$. \begin{crl} \label{crl:W-len}
Let $v'=w(v)\in\mathtt{Sk}(v)$. Then $\ell(w)$ is the cardinality
of the set $\Delta^+_\mathit{an}(v)-\Delta^+_\mathit{an}(v')$. \end{crl} \begin{proof} Let $w=s_{i_1}\ldots s_{i_l}$ be a reduced decomposition. Set $\beta_j=s_{i_1}\ldots s_{i_{j-1}}(\alpha_{i_j})$. Then $v'=s_{\beta_l}\ldots s_{\beta_1}(v)$ and $\Delta^+_\mathit{an}(v)-\Delta^+_\mathit{an}(v')=\{\beta_1,\ldots,\beta_l\}$. \end{proof}
{
\begin{crl}
\label{crl:Spiff}
For $v'\in \mathtt{Sk}(v)$ there exists a unique $v''\in\mathtt{Sp}(v)$ and $w\in W$
such that $v'=w(v'')$. The cardinality
of the set $\Delta^+_\mathit{an}(v)-\Delta^+_\mathit{an}(v')$ is equal to $\ell(w)$.
\end{crl}
\begin{proof}
The existence of $v'',w$ follows from
Lemma~\ref{lem:decomposition0}. An isotropric reflection does not change the set $\Delta^+_{\mathit{an}}$, so
$\Delta^+_{\mathit{an}}(v'')=\Delta^+_{\mathit{an}}(v)$ and the required formula for
$\ell(w)$ follows from~\ref{crl:W-len}.
For the uniqueness assume that $v'=w_1(v_1)=w_2(v_2)$
for $v_1,v_2\in \mathtt{Sp}(v)$. Then $w_1^{-1}w_2(v_2)=v_1$
and $\Delta^+_{\mathit{an}}(v_1)=\Delta^+_{\mathit{an}}(v_2)$, so $\ell(w_1^{-1}w_2)=0$.
Thus $w_1=w_2$ and $v_1=v_2$ as required. \end{proof}
}
\subsection{Skeleton as a graph}
\label{skeleton_property} A structure similar to the Coxeter structure on the Weyl group exists also on admissible components of the root groupoid. We fix $v_0\in\mathcal{R}$ and study a combinatorial structure of $\mathtt{Sk}(v_0)$. Note that, from the algebraic point of view, $\mathtt{Sk}(v_0)$ is a contractible groupoid, so it may be seen as something lacking any interest. However, its arrows are compositions of reflexions, and remembering these reflexions makes a lot of sense. In this subsection we present a description of the shortest path length in this graph, similar to the one given in~\ref{crl:W-len}. {It has a nice application to the description of the group $\operatorname{Aut}_\mathcal{R}(v)$ in \ref{ss:autv}.} In Section~\ref{sec:coxeter2} we study a Coxeter property of $\mathtt{Sk}(v)$.
\subsubsection{} We look at the skeleton $\mathtt{Sk}(v_0)$ as the graph where the reflexions connect the vertices. Thus, the reflexions are the edges of our graph. We color the edges by elements of $\mathfrak{h}^*=\mathfrak{h}(v_0)^*$: a reflexion $v\xrightarrow{r_{x}}v'$ is colored by the real root $\alpha=-b(x)=b'(x)$. Note that $\Delta^+_\mathit{re}(v')$ is obtained from $\Delta^+_\mathit{re}(v)$ by replacing the (existing) root $-\alpha$ with $\alpha$.
For a path $$v_0\xrightarrow{r_{x_1}}v_1\xrightarrow{r_{x_2}}\dots \xrightarrow{r_{x_d}}v_d\xrightarrow{r_{x_{t}}}v_{t}=v'$$ colored by the sequence $(\alpha_1,\ldots,\alpha_t)$ we have \begin{equation}\label{Delta+rev'} \Delta^+_{\mathit{re}}(v')=\bigl(\Delta^+_{\mathit{re}}(v')\cup\{\alpha_i\}_{i=1}^t\bigr) \setminus \{-\alpha_i\}_{i=1}^t.\end{equation} In particular, if a path is colored by the sequence $(\alpha_1,\ldots,\alpha_t)$ with $\alpha_t=\alpha_1$, then there exists $i$ such that $\alpha_i=-\alpha_1$.
We will start with an obvious remark. \subsubsection{Remark} \label{axyyx=0} Let $v\xrightarrow{r_{x}}v'$ be a reflection. If $a_{xy}=a_{yx}=0$ and $x\ne y$, then $a'(y)=a(y)$, $b'(y)=b(y)$ and the $y$th rows (and the $y$th columns) of the Cartan matrices $A_v,A_{v'}$ are equal.
\begin{lem} \label{lem:independent} Given a path $v_0\stackrel{r_x}{\to} v_1\stackrel{r_y}{\to} v_2$ colored by $(\alpha,\beta)$, $\alpha\ne-\beta$, the following conditions are equivalent. \begin{itemize} \item[(1)] $\alpha-\beta\not\in\Delta^\mathtt{C}$ (the set of roots of $\mathfrak{g}^\mathtt{C}$). \item[(2)] There exists a path $v_0\stackrel{r_y}{\to} v_3\stackrel{r_x}{\to} v_2$ colored by $(\beta,\alpha)$. \end{itemize} \end{lem} \begin{proof} If (2) is fulfilled, both $\alpha$ and $\beta$ are simple roots at $v_2$, so their difference is not a root. Let us prove that (1) implies (2). We have $\alpha, -\beta\in\Sigma_{v_1}$, $-\alpha\in\Sigma_{v_0}$ and $\beta\in\Sigma_{v_2}$. We will denote by $A^i=(a^i_{xy})$ the Cartan matrix at $v_i$ and we will write $a_i(z)$ and $b_i(z)$ instead of $a_{v_i}(z)$ and $b_{v_i}(z)$.
If $\beta$ is anisotropic, $\langle\alpha,\beta^\vee\rangle=0$ as otherwise both $s_{-\beta}(\alpha)=\alpha-\langle\alpha,\beta^\vee\rangle\beta$ and $\alpha$ are roots, which would imply that $\alpha-\beta$ is also a root. This implies that $a^1_{xy}=0$. If $\beta$ is isotropic, we still have $a^1_{xy}=0$ as otherwise $r_y$ would carry $\alpha=b_1(x)$ to $\alpha-\beta$ that is not a root. Thus, by admissibility, $a^1_{yx}=0$. Using Remark~\ref{axyyx=0}, we deduce $-\beta\in\Sigma_{v_0}$ and $\alpha\in\Sigma_{v_2}$ so that
$$b_0(x)=-\alpha, b_0(y)=-\beta$$ $$b_1(x)=\alpha, b_1(y)=-\beta\textrm{ and } a_1(y)=a_0(y)$$ $$b_2(x)=\alpha,\ b_2(y)=\beta\textrm{ and } a_2(x)=a_1(x).$$
We will show that $x$ is reflectable at $v_2$, $y$ is reflectable at $v_3$ and $r_y\circ r_x$ carries $v_2$ to $v_0$. This will give the square in $\mathtt{Sp}(v)$ shown in the picture. $$ \xymatrix{ & & v_1\ar^{r_y}_\beta[rd] & \\ & v_0\ar^{r_x}_\alpha[ru]&& v_2\ar^{r_x}_{-\alpha}[ld]\\ && v_3\ar^{r_y}_{-\beta}[lu] & } $$ Reversing the lower reflexions, we get the required result.
Note that reflectability of $x\in X$ at $v$ is determined by the $x$-th row of the Cartan matrix at $v$. By~\ref{axyyx=0} the $x$-row of $A^2$ is equal to the $x$-row of $A^1$, so $x$ is reflectable at $v_2$. Since $b_1(x)=b_2(x)$, $a_1(x)=a_2(x)$ and the $x$th row (resp., $x$th column) of $A^2$ is equal to the $x$th row (resp., $x$th column) of $A^1$ we have $$b_0(z)-b_1(z)=b_3(z)-b_2(z),\ \ \ a_0(z)-a_1(z)=a_3(z)-a_2(z).$$
Once more, by ~\ref{axyyx=0} applied to $r_x:v_2\to v_3$, the $y$ row of $A^3$ is equal to the $y$-row of $A^2$, so $y$ is reflectable at $v_3$. It remains to show that $r_y$ carries $v_3$ to $v_0$. Since $b_2(y)=b_3(y)$, $a_2(y)=a_3(y)$ and the $y$th row (resp., $y$th column) of $A^3$ is equal to the $y$th row (resp., $y$th column) of $A^2$, we have $$b_1(z)-b_2(z)=b'_0(z)-b_3(z),\ \ \ a_1(z)-a_2(z)=a'_0(z)-a_3(z).$$ Therefore, $b'_0(z)=b_0(z)$ and $a'_0(z)=a_0(z)$. Hence $v'_0=v_0$.
\end{proof}
\begin{lem} \label{lem:pre-exchange} Let $$v_0\xrightarrow{r_{x_1}}v_1\xrightarrow{r_{x_2}}\dots \xrightarrow{r_{x_s}}v_s$$ be a path in $\mathtt{Sp}(v)$ colored by a sequence $(\alpha_1,\ldots,\alpha_s)$ with the property $\alpha_i\not=-\alpha_j$ for $i\not=j$. Assume that $\alpha=b_{v_0}(x)=b_{v_s}(y)$ is isotropic. Then $\alpha-\alpha_i\not\in\Delta^{\mathtt{C}}$, $x=y$ and $b_{v_i}(x)=\alpha$, $a_{v_i}(x)=a_{v_0}(x)$ for all $i$. \end{lem} \begin{proof} Set $\beta:=\alpha-\alpha_1$. Let us show that $\beta$ is not a root. Assume the contrary. Then $\beta$ is even and $\frac{\beta}{2}$ is not a root. Since {the set of even positive roots $\beta$ such that $\frac{\beta}{2}$ is not a root }
is preserved by isotropic reflections, $\beta=\alpha-\alpha_1\in \Delta^+_{v_s}$. Therefore $\alpha_1\in -\Delta^+_{v_s}$. Since $\alpha_1\in \Delta^+_{v_1}$, there should exist $1<i\leq s$ such that $\alpha_i=-\alpha_1$, a contradiction. Since $\beta\not\in\Delta^\mathtt{C}$, we have $b_{v_1}(x)=b_{v_0}(x)=\alpha$ and $a_{v_1}(x)=a_{v_0}(x)$.
Now the assertion follows by induction in $s$. \end{proof}
The following result describes an exchange property for a sequence of isotropic reflections.
\begin{prp} \label{prp:spine-com} Let $$v_0\xrightarrow{r_{x_1}}v_1\xrightarrow{r_{x_2}}\dots \xrightarrow{r_{x_d}}v_d\xrightarrow{r_{x_{d+1}}}v_{d+1}$$ be a path in $\mathtt{Sp}(v)$ colored by a sequence $(\alpha_1,\ldots,\alpha_{d+1})$ with the property $\alpha_{d+1}=-\alpha_1$ and $\alpha_i\not=-\alpha_j$ for $1\leq i<j\leq d$. Then $x_{d+1}=x_1$ and there is a sequence of isotropic reflections $$v_0\xrightarrow{r_{x_2}}v'_2\xrightarrow{r_{x_3}}\dots \xrightarrow{r_{x_{d-1}}}v'_{d-1}\xrightarrow{r_{x_d}}v_{d+1}$$ colored by the sequence $(\alpha_2,\ldots,\alpha_d)$. \end{prp} \begin{proof} We apply Lemma~\ref{lem:pre-exchange} to the sequence of reflexions $v_1\xrightarrow{r_{x_2}}\dots \xrightarrow{r_{x_d}}v_d$ and the root $\alpha:=\alpha_1$. We deduce that $\alpha_1-\alpha_2\not\in\Delta^\mathtt{C}$. This implies that, by Lemma~\ref{lem:independent}, one can replace the sequence $v_0\to v_1\to v_2$ with $v_0\to v_2'\to v_2$ and then a simple induction gives the required result. \end{proof}
\begin{rem} Lemma \ref{lem:pre-exchange} implies that for $v,v'$ in $\mathtt{Sp}(v_0)$ we have $$b_v(x)=b_{v'}(y)\in \Delta_{\mathit{iso}}\ \ \Longrightarrow\ \ x=y, a_v(x)=a_{v'}(y).$$ In \ref{ss:s21b} below
we will see that $b_v(x)=b_{v'}(y)\in \Delta_{\mathit{an}}$ does not imply neither $x=y$ or $a_v(x)=a_{v'}(y)$.
\end{rem}
\begin{crl} \label{crl:unique-in-sk} Let $v'\in\mathtt{Sk}(v)$ satisfy $\Delta^+_\mathit{re}(v')=\Delta^+_\mathit{re}(v)$. Then $v'=v$. In particular, if a homothety can be presented as a composition of reflexions, it is the identity. \end{crl} \begin{proof} By Lemma~\ref{lem:decomposition0} there exist $v''\in\mathtt{Sp}(v)$ and $w\in W$ such that $v'=w(v'')$. The sets of positive anisotropic roots at $v$ and at $v''$ coincide as none of them can become negative under an isotropic reflection. Therefore, $w=1$ by \ref{crl:W-len}. This implies that $v'\in\mathtt{Sp}(v)$. Let $$v=v_0\xrightarrow{r_{x_1}}v_1\xrightarrow{r_{x_2}}\dots \xrightarrow{r_{x_d}}v_d=v'$$ be a sequence of isotropic reflections colored by a sequence $(\alpha_1,\ldots,\alpha_d)$. Since $\Delta^+_\mathit{re}(v')=\Delta^+_\mathit{re}(v)$, the formula(\ref{Delta+rev'}) implies $\alpha_i=-\alpha_j$ for some $i,j$. Then by ~\ref{prp:spine-com} the sequence of isotropic reflections can be shortened. \end{proof}
\begin{dfn} For two vertices $v,v'\in\mathtt{Sk}(v_0)$ the distance $d(v,v')$ is defined to be the minimal number of reflexions in the decomposition of the arrow
$v\to v'$. \end{dfn}
\begin{crl} \label{crl:Sk-len} For $v,v'\in\mathtt{Sk}(v_0)$ the distance $d(v,v')$ is the cardinality of $\Delta^+_\mathit{re}(v)-\Delta^+_\mathit{re}(v')$. \end{crl} \begin{proof} If the difference $\Delta^+_\mathit{re}(v)-\Delta^+_\mathit{re}(v')$ is nonempty, it has an element that is a simple root $\alpha$ at $v$ that can be replaced with $-\alpha$ by a reflection. Continuing this, we can get, after the required number of steps, a vertex $v''$ having the same $\Delta^+_\mathit{re}(v'')$ as $\Delta^+_\mathit{re}(v')$. Then by~\ref{crl:unique-in-sk} $v''=v'$. \end{proof}
Note the following description of non-reflectable roots.
\begin{crl}\label{crl:rereflectable} $\Delta_\mathit{nr}=\Delta^\mathit{re}\setminus(-\Delta^\mathit{re})$. \end{crl} \begin{proof} Obviously, if $\alpha$ is isotropic or anisotropic, $-\alpha\in\Delta_\mathit{re}$. Let us assume that $-\alpha\in\Delta_\mathit{re}$, $\alpha\in\Sigma_v$ and $-\alpha\in\Sigma_{v'}$. By formula (\ref{Delta+rev'}) any path connecting $v$ with $v'$ contains an edge where $\alpha$ becomes negative. This proves reflectability of $\alpha$. \end{proof}
\subsubsection{Weyl vector} \label{sss:weylvector} Choose $\rho_v\in\mathfrak{h}^*$ such that \begin{equation} \label{eq:rho-v} 2\langle\rho_v, a_v(x)\rangle =\langle b_v(x), a_v(x)\rangle \end{equation} for all $x\in X$. For each $v'\in \mathtt{Sk}(v)$ we define $$\rho_{v'}:=\rho_v+\sum_{\alpha\in \Delta^+_{\mathit{an}}(v')-\Delta^+_{\mathit{an}}(v)}\alpha -\sum_{\alpha\in \Delta^+_{\mathit{iso}}(v')-\Delta^+_{\mathit{iso}}(v)}\alpha. $$
Note that the formula~(\ref{eq:rho-v}) holds for all $v'\in\mathtt{Sk}(v)$.
\begin{Rem} If $\rho_v=\rho_{v_0}$ and $v\in \mathtt{Sp}(v_0)$, then $v=v_0$. \end{Rem} The collection of $\rho_{v'}$, $v'\in\mathtt{Sk}(v)$, is called the Weyl vector. The choice of $\rho_v$ is not unique. Weyl vectors play an important role in Lie theory.
\subsection{Structure of $\operatorname{Aut}_\mathcal{R}(v)$ } \label{ss:autv}
The action of $W(v)$ on $\mathtt{Sk}(v)$ extends to an action of $\operatorname{Aut}_\mathcal{R}(v)$.
\begin{prp} \label{prp:Aut-action-skeleton} There is a unique action of $\operatorname{Aut}_\mathcal{R}(v)$ on $\mathtt{Sk}(v)$ such that for any $u\in\mathtt{Sk}(v)$ and $\gamma\in\operatorname{Aut}_\mathcal{R}(v)$, $ b_{\gamma(u)}(x)=\gamma(b_u(x)). $ \end{prp} \begin{proof} Uniqueness follows from \ref{crl:unique-in-sk}. It is therefore sufficient to verify that for each $u\in\mathtt{Sk}(v)$ and $\gamma\in\operatorname{Aut}_\mathcal{R}(v)$ there is $u'\in\mathtt{Sk}(v)$ satisfying the property $b_{u'}(x)=\gamma(b_u(x))$. We proceed as follows. We present $\gamma=\gamma''\circ\gamma'$ where $\gamma':v\to v'$ is a composition of reflexions and $\gamma''$ is a composition of a homothety with a tautological arrow. Choose a path $$ v=v_0\stackrel{r_{x_1}}{\to}\ldots\stackrel{r_{x_k}}{\to}v_k=u $$ of reflexions connecting $v$ with $u$. Since the Cartan data at $v$ and at $v'$ are $D$-equivalent, there is a namesake path $$ v'=v'_0\stackrel{r_{x_1}}{\to}\ldots\stackrel{r_{x_k}}{\to}v'_k=u' $$ defining $u'\in\mathtt{Sk}(v)$. One obviously has $b_{u'}(x)=\gamma(b_u(x))$ which proves the claim. \end{proof}
\begin{crl} \label{crl:Aut-action-roots} The action of $\operatorname{Aut}_\mathcal{R}(v)$ on $\mathfrak{h}^*$ preserves $\Delta^\mathit{re}$, as well as $\Delta_\mathit{iso}$, $\Delta_\mathit{an}$, $\Delta_\mathit{nr}$. \end{crl} \begin{proof} The first claim follows from the formula $b_{\gamma(u)}(x)=\gamma(b_u(x))$. The rest follows from the fact that $u$ and $u'=\gamma(u)$ have $D$-equivalent Cartan data. \end{proof}
The group $\operatorname{Aut}_\mathcal{R}(v)$ has a trivial part which we now describe. \begin{dfn} An automorphism $\gamma\in\operatorname{Aut}_\mathcal{R}(v)$ is called irrelevant if it can be presented as a composition of a homothety and a tautological arrow. \end{dfn} The group of irrelevant automorphisms identifies with \begin{equation}
K(v)=\{\theta:\mathfrak{h}\to\mathfrak{h}|\forall x\in X\ \theta(a(x))\in\mathbb{C}^*a(x),\theta^*(b(x))=b(x)\}. \end{equation}
\begin{lem} \label{lem:Kisnormal} $K(v)$ is a normal subgroup of $\operatorname{Aut}_\mathcal{R}(v)$. \end{lem} \begin{proof} $K(v)$ is the kernel of the action of $\operatorname{Aut}_\mathcal{R}(v)$ on $\Delta^\mathit{re}$. \end{proof}
We can easily describe the image of $\operatorname{Aut}_\mathcal{R}(v)$ in the automorphisms of $\mathtt{Sk}(v)$. The description of the action presented above implies that the automorphism of $\mathtt{Sk}(v)$ defined by $\gamma\in\operatorname{Aut}_\mathcal{R}(v)$ is uniquely determined by the target $v'$ of $\gamma':v\to v'$ where $\gamma'$ is the composition of reflexions appearing in the decomposition of $\gamma$. The vertex $v'\in\mathtt{Sk}(v)$ has the Cartan datum $D$-equivalent to that of $v$. This identifies the image of $\operatorname{Aut}_\mathcal{R}(v)$ with the set of the vertices on $\mathtt{Sk}(v)$ satisfying this property.
\subsubsection{} We denote by $\mathtt{Sk}^D(v)$ the subset of (the vertices of) $\mathtt{Sk}(v)$ consisting of the vertices whose Cartan data are $D$-equivalent to that of $v$. The set $\mathtt{Sk}^D(v)$ is endowed with the group structure induced from the group structure on $\operatorname{Aut}_\mathcal{R}(v)$. It is combinatorially described using ``namesake path'' construction described in the proof of Proposition~\ref{prp:Aut-action-skeleton}. By construction we have an isomorphism \begin{equation} \label{eq:skd} \operatorname{Aut}_\mathcal{R}(v)/K(v)=\mathtt{Sk}^D(v). \end{equation}
The composition $W(v)\stackrel{i}{\to}\operatorname{Aut}_\mathcal{R}(v)\to\mathtt{Sk}^D(v)$ is injective as $W(v)\cap K(v)$ is trivial by~\ref{rem:uniqueness}.
\subsubsection{} The group $\mathtt{Sk}^D(v)$ has a subgroup $\mathtt{Sp}^D(v)$ defined as the subset of $\mathtt{Sk}^D(v)$ belonging to $\mathtt{Sp}(v)$. The following proposition summarizes what we know about the structure of the automorphism group.
\begin{prp} \label{prp:structure-Aut} \begin{itemize} \item[1.] $W(v)\subset\operatorname{Aut}_\mathcal{R}(v)$ is a normal subgroup. \item[2.] $K(v)\subset \operatorname{Aut}_\mathcal{R}(v)$ is a normal subgroup. \item[3.] There is a canonical isomorphism $\operatorname{Aut}_\mathcal{R}(v)/K(v)=\mathtt{Sk}^D(v)$. \item[4.] $\mathtt{Sk}^D(v)=W(v)\rtimes\mathtt{Sp}^D(v)$. \end{itemize} \end{prp}
\begin{proof}Only Claim 4 needs proof. The intersection $W(v)\cap\mathtt{Sp}^D(v)$ is trivial. Indeed, let $v'=w(v)\in\mathtt{Sp}^D(v)$. Any isotropic reflexion preserves the set of positive anisotropic roots, so $\Delta^+_\mathit{an}(v)=\Delta^+_\mathit{an}(v')$. Thus, $w=1$ by~\ref{crl:W-len}.
Every automorphism $\phi:v\to v$ decomposes as $$ v\stackrel{\psi}{\to}v'\stackrel{\eta}{\to}v $$ where $\psi$ is a composition of reflexions and $\eta$ is a composition of a homothety with a tautological arrow. By~\ref{lem:decomposition0} $\psi$ decomposes as $v\stackrel{\rho}{\to}v''\stackrel{\gamma_w}{\to}v'$ where $\rho$ denotes a composition of isotropic reflexions and $\gamma_w$ is the unique arrow in $\mathtt{Sk}(v)$ connecting $v''$ with $v'=w(v'')$. The Cartan data of $v'=w(v'')$ and $v''$ are $D$-equivalent (actually, the same), so $\mathtt{Sk}^D(v)$ is generated by $W$ and $\mathtt{Sp}^D$. \end{proof}
\begin{crl} \label{crl:invariantideals} Let $\mathfrak{g}^\mathtt{U}$ be the universal root algebra at a component $\mathcal{R}_0$, $v\in\mathcal{R}_0$. An ideal $J(v)$ of $\mathfrak{g}^\mathtt{U}(v)$ such that $J(v)\cap\mathfrak{h}=0$ defines a root algebra $\mathfrak{g}$ having the $v$-component $\mathfrak{g}(v)=\mathfrak{g}^\mathtt{U}(v)/J(v)$ if and only if it is invariant with respect to $\mathtt{Sp}^D(v)$. In particular, if $\mathcal{R}_0$ has no isotropic reflexions, any ideal of $\mathfrak{g}^\mathtt{U}$ having zero intersection with $\mathfrak{h}$ defines a root algebra. \end{crl} \begin{proof} By~\ref{sss:invariantideals} one has to verify that $J(v)$ is invariant with respect to any $\gamma\in\operatorname{Aut}_\mathcal{R}(v)$. We will verify that any ideal is invariant with respect to the action of $W(v)$ and of $K(v)$. The Weyl group is generated by reflections that are inner automorphisms by formula (\ref{eq:sigma:gtog}). So, the Weyl group preserves all ideals. Any $\gamma\in K(v)$ preserves the weights, so it preserves the weight spaces. Thus, its multiplies by a constant each $\mathfrak{g}^\mathtt{U}_\alpha$ where $\alpha$ is a simple root or its opposite. Since any root $\beta$
of $\mathfrak{g}^\mathtt{U}$ is either sum of simple roots or a sum of its opposites,
$\gamma$ acts on each $\mathfrak{g}^\mathtt{U}_\beta$ by multiplication by a constant. Since any ideal of $\mathfrak{g}^\mathtt{U}(v)$ is a sum of its weight subspaces, any $\gamma\in K(v)$ preserves it. Proposition~\ref{prp:structure-Aut} now implies the claim. \end{proof}
We will see in~\ref{rootalg} that for all admissible fully reflectable indecomposable components
$\mathcal{R}_0$, except for $\mathfrak{gl}(1|1)$, any ideal $J(v)$ of $\mathfrak{g}^\mathtt{U}(v)$ having zero intersection with $\mathfrak{h}$ is automatically $\mathtt{Sp}^D(v)$-invariant and therefore gives rise to a root algebra.
\begin{crl} \label{crl:all-different} Assume that no Cartan data at different vertices of $\mathtt{Sp}(v)$ are $D$-equivalent. Then $\operatorname{Aut}_\mathcal{R}(v)$ is the direct product of the Weyl group $W$ and the subgroup $K$ of irrelevant automorphisms. If, moreover, the Cartan matrix $A(a,b)$ at $v$ has no zero rows and
$\dim\mathfrak{h}=2|X|-\operatorname{rk} A(a,b)$ is minimal possible, $K(v)$ is a commutative unipotent group. \end{crl} \begin{proof}
Under the assumption, $\mathtt{Sp}^D(v)$ is trivial and so $\mathtt{Sk}^D(v)=W(v)$. Since $W(v)$ is a normal subgroup of $\operatorname{Aut}_\mathcal{R}(v)$, one has a direct decomposition $\operatorname{Aut}_\mathcal{R}(v)=W(v)\times K(v)$. \end{proof}
\begin{prp} \label{prp:finsuper} Let $\mathcal{R}_0$ be an admissible component with finite dimensional
$\mathfrak{g}^\mathtt{C}\ne\mathfrak{gl}(n|n)$. Then $\operatorname{Aut}_\mathcal{R}(v)=W(v)$. \end{prp} \begin{proof} By~\cite{S1} the conditions of Corollary~\ref{crl:all-different} are fulfilled. The rest follows from triviality of the group $K$. \end{proof}
Note that for $\mathfrak{g}^\mathtt{C}=\mathfrak{gl}(n|n)$ one has $\operatorname{Aut}_\mathcal{R}(v)=W(v)\rtimes \mathbb{Z}_2$, see~\ref{sss:glmn}.
\subsubsection{Example} \label{sss:gl12}
Look at the root datum containing the root algebra $\mathfrak{gl}(1|2)$. Here $X=\{1,2\}$, $\mathfrak{h}=\operatorname{Span}\{e,h_1,h_2\}$ and $\mathfrak{h}^*=\operatorname{Span}\{\epsilon,\delta_1,\delta_2\}$ (the dual basis), the spine $\mathtt{Sp}(v_0)$ has three vertices \begin{itemize} \item[$v_0$:] $a(1)=-e-h_1$, $a(2)=h_1-h_2$, $b(1)=\epsilon-\delta_1$, $b(2)=\delta_1-\delta_2$, $p(1)=1$, $p(2)=0$; \item[$v_1$:] $a(1)=e+h_1$, $a(2)=-e-h_2$, $b(1)=\delta_1-\epsilon$, $b(2)=\epsilon-\delta_2$, $p(1)=p(2)=1$; \item[$v_2$:] $a(1)=h_1-h_2$, $a(2)=e+h_2$, $b(1)=\delta_1-\delta_2$, $b(2)=\delta_2-\varepsilon$, $p(1)=0$, $p(2)=1$. \end{itemize}
The Weyl group $W(v_0)$ has two elements, with the nonunit interchanging $\delta_1$ with $\delta_2$. The group $\operatorname{Aut}_\mathcal{R}(v_0)$ coincides with $W(v_0)$ by~\ref{prp:finsuper}.
\section{The Coxeter property of the skeleton} \label{sec:coxeter2}
In this section we define Coxeter graphs and prove that the skeleton $\mathtt{Sk}(v)$ satisfies this property. The notion of Coxeter graph generalizes that of a Coxeter group. The Cayley graph of a group $G$ with respect to a set of generators $S=\{s_i\}$ is Coxeter iff $(G,S)$ is a Coxeter group. There are, however, Coxeter graphs that do not come from Coxeter groups. It is an interesting question to describe all finite Coxeter graphs.
\subsection{Coxeter graphs}
Let $X$ be a finite set, $G$ a graph with the set of vertices $V$ and the set of edges $E$, endowed with a marking $r:E\to X$. We assume that $G$ is connected and that the edges having a common end, have different markings. We denote by $r_x:v\to v'$ the edge connecting $v$ and $v'$ marked with $x$. By the assumption, for a chosen $v$ such edge is unique, if exists. Note that $r_x$ comes with a choice of direction for the edge connecting $v$ and $v'$.
A path $\phi:v\to v'$ consists of a sequence of arrows $$ v=v_0\stackrel{r_{x_1}}{\to}\dots\stackrel{r_{x_n}}{\to}v_n=v'. $$ We denote $\ell(\phi)=n$ the length of $\phi$.
The path $\phi^{-1}:v'\to v$ is obtained from $\phi$ by changing the direction of all arrows.
\begin{dfn} A Coxeter loop $\phi:v\to v$ is one of the following. \begin{itemize} \item[1.] $\phi=r_x^2$ (These are called the trivial loops.) \item[2.] $\phi=(r_y\circ r_x)^m$. (These are called the loops of length $2m$). \end{itemize} \end{dfn}
\begin{dfn} Let $\phi,\psi:v\to v'$ be a pair of paths. If the concatenation $\psi^{-1}\circ\phi$ is a Coxeter loop, we will say that one has an elementary Coxeter modification $\phi\Rightarrow\psi$. \end{dfn}
\begin{dfn} A Coxeter modification from $\phi$ to $\psi$ is a presentation $\phi=\phi_1\circ\phi_2\circ\phi_3$, $\psi=\psi_1\circ\psi_2\circ\psi_3$ such hat $\phi_1=\psi_1$, $\phi_3=\psi_3$ and one has an elementary Coxeter modification $\phi_2\Rightarrow\psi_2$.
\end{dfn}
\begin{dfn} A marked graph $(X,G,r)$ is called Coxeter if any pair of paths from $v$ to $v'$ can be connected by a sequence of Coxeter modifications. \end{dfn}
\subsubsection{} As an example, take a group $\Gamma$ generated by a set $S$ of elements with $s^2=1$. Let $G$ be the corresponding Cayley graph, where the vertices are $g\in\Gamma$, $X=S$, and $g$ and $h$ are connected by the edge marked by $s$ if $g=hs$. Then $\Gamma$ is a Coxeter group iff $G$ is a Coxeter graph.
Let $v\in\mathcal{R}$. We look at the skeleton $\mathtt{Sk}(v)$ as marked graph, with the reflection $r_x$ marked with $x\in X$. Conversely, one has the following easy result.
\begin{prp} Let $(X,G,r)$ be a Coxeter graph such that for any $v\in V$ and $x\in X$ there exists an edge $r_x:v\to v'$. Then $(X,G,r)$ is the Cayley graph of a Coxeter group if and only if for any pair $x,y\in X$ the length $2m_{xy}$ of $(x,y)$ loop $\phi=(r_y\circ r_x)^{m_{xy}}:v\to v$ is independent of $v$. \end{prp} \begin{proof} The necessity of the condition is clear. Define $\Gamma$ as the Coxeter group generated by $s_x,\ x\in X$ subject to the relations $(s_xs_y)^{m_{xy}}=1$. The isomorphism of $(X,G,r)$ with the Cayley graph of $\Gamma$ is defined by an arbitrary choice of a vertex $v\in V$ and the assignment of $s_x$ to $r_x$. Coxterity of the graph implies that any two paths $v\to v'$ in $G$ define the same image in $\Gamma$. \end{proof}
Here is our main result.
\begin{thm} \label{thm:skeleton-coxeter} \begin{itemize} \item[1.]$\mathtt{Sk}(v)$ is a Coxeter graph. \item[2.] Nontrivial Coxeter loops may have length $2m$ where $m=2,3,4$ or $6$. \end{itemize} \end{thm}
The proof of the theorem is based on a presentation of the skeleton $\mathtt{Sk}(v)$ as the $1$-skeleton of a convex polyhedron. In the following subsection we present basic facts about convex polyhedra. In \ref{ss:proof-skeleton-coxeter} we construct a polyhedron having $\mathtt{Sk}(v)$ as its $1$-skeleton. This easily implies Theorem~\ref{thm:skeleton-coxeter}.
\begin{rem} Note that in the case when $\mathcal{R}_0$ is fully reflectable and all reflexions are anisotropic the skeleton $\mathtt{Sk}(v)$ is isomorphic to the
Cayley graph of the Weyl group.
\end{rem} \subsection{Convex polyhedra: generalities}
\subsubsection{Polytopes} Recall that a polytope $P$ in a real finite dimensional affine space $E$ is defined as the convex hull of a finite set of points. The dimension of $P$ is, by definition, the dimension of the affine span of $P$.
A polytope $P$ of dimension $n$ has stratification $P=P_0\sqcup\ldots\sqcup P_n$, where $P_n$ is the inerior of $P$ in its affine span and $P_k$ for $k<n$ consists of points $v$ for which the intersection of all supporting hyperplanes at $v$ has dimension $k$. Thus, $P_0$ is the set of vertices of $P$ and $P$ is the convex hull of $P_0$.
\subsubsection{Polyhedra}
In this paper we use a slightly generalized notion of a convex polyhedron. We collect all necessary material here.
\begin{Dfn} A polyhedron $\mathcal{P}$ in $E$ is a closed convex set such that any $v\in\mathcal{P}$ has a neighborhood isomorphic to a neighborhood of a point of a polytope.~\footnote{The isomorphism is meant to be given by an affine transformation. } \end{Dfn}
The dimension of a polyhedron is the dimension of its affine span. The stratification of points of a convex polytope extends to a stratification of a polyhedron: one has $\mathcal{P}=\mathcal{P}_0\sqcup\dots\sqcup\mathcal{P}_n$ where $\mathcal{P}_n$ is the interior of $\mathcal{P}$ in its affine span and $\mathcal{P}_k$ consists of the points for which the intersection of all supporting hyperplanes has dimension $k$. In particular, $\mathcal{P}_0$ is the set of vertices of $\mathcal{P}$. This is a discrete subset of $E$, not necessarily finite. Moreover, $\mathcal{P}$ is in general not a convex hull of $\mathcal{P}_0$.
For any $v\in\mathcal{P}_{n-1}$ there is a unique supporting hyperplane at $v$. Its intersection with $\mathcal{P}$ is a face of dimension $n-1$. Each of them is a polyhedron of dimension $n-1$ and their union is $\partial\mathcal{P}$.
The following notation is used below. A linear hyperplane $H\subset V$ and $v\in E$ define an affine hyperplane $v+H$. The complement $V\setminus H$ consists of two components; their closures are the halfspaces defined by $H$ and denoted by $H^+$ and $H^-$. In the same manner $v+H^+$ denotes the affine halfspace.
Note that $\mathcal{P}$ coincides with the intersection of the affine halfspaces $v+H^+$ defined by the faces of $\mathcal{P}$ of maximal dimension.
\begin{dfn} Let $A$ be the set of supporting hyperplanes $v_\alpha+H_\alpha$ of $\mathcal{P}$ and let $v+H_\alpha^+$ be the affine halfspaces containing $\mathcal{P}$. The cone of $\mathcal{P}$, $C(\mathcal{P})$ is defined as the intersection $\cap_{\alpha\in A}H^+_\alpha.$ \end{dfn}
Obviously, if $A_0\subset A$ satisfies the condition $\mathcal{P}=\cap_{\alpha\in A_0}(v_\alpha+H^+_\alpha)$ then $C(\mathcal{P})=\cap_{\alpha\in A_0} H^+_\alpha$. In particular, $C(\mathcal{P})$ is the intersection of the linear halfspaces $H_\alpha$ defined by the $(n-1)$-faces of $\mathcal{P}$.
Note that by definition $C(\mathcal{P})$ is a convex cone in $V$ and $\mathcal{P}$ is invariant under the action of $C(\mathcal{P})$: for $\xi\in\mathcal{P}$ and $\eta\in C(\mathcal{P})$ one has $\xi+\eta\in\mathcal{P}$.
\begin{lem} \begin{itemize} \item[1.] If $C(\mathcal{P})\ne\{0\}$ then $\partial\mathcal{P}$ is contractible. \item[2.] $C(\mathcal{P})=\{0\}$ iff $\mathcal{P}$ is compact. \item[3.] $\mathcal{P}$ is compact iff it is a polytope. \end{itemize} \end{lem} \begin{proof} Choose an interior point $\zeta\in\mathcal{P}$ and define the projection from $\partial\mathcal{P}$ to the unit sphere $S$ with the center at $\zeta$ by the formula $$\phi(\xi):=(\zeta+\mathbb R^+(\xi-\zeta))\cap S.$$ Since $\mathcal{P}$ is convex, $\phi$ is injective. From the definition of $\mathcal{P}$ we see that $\xi\in S$ is not in the image of $\phi$ iff $\xi\in\zeta- C(\mathcal{P})$. Set $U=(\zeta- C(\mathcal{P}))\cap S$. The restriction of $\phi$ to any $(n-1)$-face is a stereographic projection. Since any point of $\mathcal{P}$ has a neighborhood isomorphic to a neighborhood of a point of a polytope,
the map $\phi$ is an open embedding and so it defines a homeomorphism of $\partial\mathcal{P}$ with $S\setminus U$. If $C(\mathcal{P})\ne\{0\}$, $U$ is a nonempty convex subset of $S$, so $S\setminus U$ is contractible. This proves Claim 1.
To prove Claim 2, note that the $C(\mathcal{P})$ acts of $\mathcal{P}$: if $c\in C(\mathcal{P})$ and $p\in\mathcal{P}$ then $p-c\in\mathcal{P}$. Therefore, if $C(\mathcal{P})\ne\{0\}$, $\mathcal{P}$ cannot be compact. On the contrary, if $C(\mathcal{P})=\{0\}$, $\partial\mathcal{P}$ is homeomorphic to sphere, so it is compact. $\mathcal{P}$ is the convex hull of its boundary, so it is also compact.
Finally, if $\mathcal{P}$ is compact then it is a convex hull of its boundary that is a finite union of compact polyhedra of smaller dimension. This implies that $\mathcal{P}$ is the convex hull of the set of its vertices. \end{proof}
The only result we need in our study of Coxeter property of the skeleton is the following. \begin{crl} \label{crl:h1} For any polyhedron $\mathcal{P}$ {of dimension $>2$} one has $H^1(\partial\mathcal{P})=0$. \end{crl} \qed
\subsection{A polyhedron defined by $\mathtt{Sk}(v)$} \label{ss:proof-skeleton-coxeter}
Let $\mathcal{R}_0$ be an admissible component of a root groupoid, $n=|X|$ and $\mathtt{Sk}(v)$ a skeleton. Let $Q_{\mathbb R}:=Q\otimes_{\mathbb Z}\mathbb R$ and for any vertex $u$ of $\mathtt{Sk}(v)$ set $Q^+_{u,\mathbb R}:=\sum_{\alpha\in\Sigma_u}\mathbb R^+\alpha$. \begin{lem} \label{lem:lambda} There exist an injective map $\lambda:\mathtt{Sk}(v)\to Q$, $u\mapsto \lambda_u$ such that
$$\lambda_u-\lambda_{u'}=\sum_{\alpha\in \Delta_{re}^+(u)-\Delta_{re}^+(u')}\alpha.$$
\end{lem}
\begin{proof} Choose $\lambda_v=0$, and set $$\lambda_u:=\sum_{\alpha\in \Delta_{re}^+(u)-\Delta_{re}^+(v)}\alpha.$$
Here we use Corollary 5.3.7 and Corollary 5.3.9 of the main text to check injectivity of $\lambda$.
\end{proof} We define \begin{equation} \label{eq:thepolyhedron} \mathcal{P}=\bigcap_{u\in\mathtt{Sk}(v)}(\lambda_u- Q^+_{u,\mathbb R}) \end{equation} and \begin{equation} \label{eq:q++} Q^{++}_\mathbb{R}=\bigcap_{u\in\mathtt{Sk}(v)}Q^+_{u,\mathbb R}. \end{equation}
\begin{prp} \label{prp:ppolyhedron} $\mathcal{P}$ is a polyhedron in $Q_\mathbb{R}$ and $C(\mathcal{P})=-Q^{++}_\mathbb{R}$. \end{prp} \begin{proof} Set $\lambda_v=0$. Let $f$ be the linear function on $Q_\mathbb{R}$ such that $f(b_x(v))=1$ for all $x\in X$. Denote $$H_N:=\{\xi\in Q_{\mathbb R}\mid f(\xi)= N\},\ H_N^+:=\{\xi\in Q_{\mathbb R}\mid f(\xi)\geq N\}, $$ $$\mathcal{P}_N:=\mathcal{P}\cap H_N^+,\ \ \mathtt{Sk}_N(v):=\{u\in\mathtt{Sk}(v)\mid f(\lambda_u)\geq N\},\ \ \mathcal{Q}_N:=H^+_N\cap\bigcap_{u\in \mathtt{Sk}_N(v)}(\lambda_u- Q^+_{u,\mathbb R}). $$
The following claims are obvious:
\begin{enumerate}
\item $\mathtt{Sk}_N(v)$ is finite (the vertices are in $-Q^+(v)$).
\item $\mathcal{P}=\bigcup_{N<0}\mathcal{P}_{N}$,
\item $\mathcal{P}_N\subset\mathcal{Q}_N$,
\item $\mathcal{Q}_N$ is a convex polytope (compact, bounded by finitely many hyperplanes).
\end{enumerate} We intend to show that $\mathcal{P}_N=\mathcal{Q}_N$ and that the vertices of the polytope $\mathcal{P}_N$ belonging to $H^+_N\setminus H_N$ are precisely
$\{\lambda_u|f(\lambda_u)>N\}$. This implies that $\mathcal{P}$ is a polyhedron. In fact, for $\mu\in\mathcal{P}$ choose $N$ so that $f(\mu)>N$. Then $\mu\in\mathcal{P}_N=\mathcal{Q}_N$, so $\mu$ has a neighborhood that is a neighborhood in a polytope.
Note that all $\lambda_u$ are vertices of $\mathcal{P}$ since there is a hyperplane in $Q_\mathbb{R}$ intersecting $\mathcal{P}$ at one point $\lambda_u$. For the same reason all $\lambda_u$ satisfying $f(\lambda_u)>N$ are vertices of $\mathcal{Q}_N$. In order to show that $\mathcal{Q}_N=\mathcal{P}_N$, it is sufficient to verify that any vertex $\mu$ of $\mathcal{Q}_N$ belongs to $\mathcal{P}$. The 1-skeleton of $\mathcal{Q}_N$ is connected, so it is enough to verify that any edge of $\mathcal{Q}_N$ connecting $\lambda_u$ with another vertex $\mu$, belongs to $\mathcal{P}$. We know all edges of $\mathcal{Q}_N$ in a neighborhood of $\lambda_u$: they are just $b_u(x)$, $x\in X$. If $x$ is reflectable at $u$, there is an arrow $r_x:u\to u'$, and $\mu$ lies on the segment connecting $\lambda_u$ with $\lambda_{u'}$. If $b_u(x)$ is non-reflectable, $b_u(x)\in\mathcal{Q}^{++}$, so $\lambda_u-\mathbb{R}^+ b_u(x)$ is the infinite edge of $\mathcal{P}$ containing $\mu$.
The minus sign in the formula for $C(\mathcal{P})$ is due to the minus sign in the formula (\ref{eq:thepolyhedron}). \end{proof}
\begin{lem} Let $\mathcal{P}$ be bounded. Then $\mathcal{R}_0$ is fully reflectable, $\mathtt{Sk}(v)$ is finite. \end{lem} \begin{proof} $\mathtt{Sk}(v)$ embeds into the intersection of $\mathcal{P}$ with a lattice, therefore, it is finite. If $x\in X$ is not reflectable at $u\in\mathtt{Sk}(v)$, the root $b_u(x)$ belongs to $Q^+_u$, and, therefore, to all $Q^+_{u'},\ u'\in\mathtt{Sk}(v)$. This contradicts the condition $Q^{++}_\mathbb{R}=\{0\}$. \end{proof}
\
We will now be able to describe the faces of $\mathcal{P}$.
Let $Y\subset X$, $|Y|=k$ and $u\in \mathtt{Sk}(v)$. Let $H_Y(u)$ be the affine $k$-plane passing through $\lambda_u$ and spanned by $b_u(y), y\in Y$.
Set $F_Y(u):=\mathcal{P}\cap H_Y(u)$. By definition $F_{\emptyset}(u)=\lambda_u$.
\begin{lem}\label{lem:faces} \begin{itemize} \item[1.] Any $k$-dimensional face of $\mathcal{P}$ is of the form $F_Y(u)$ for a certain $u\in\mathtt{Sk}(v)$ and a $k$-element set $Y\subset X$. \item[2.] One has $$F_Y(u)=\bigcap_{u'\in \mathtt{Sk}_Y(u)}(\lambda_{u'}-\sum_{y\in Y}\mathbb R^+b_{u'}(y)), $$ where $\mathtt{Sk}_Y(u)$ denotes the connected component of $u\in\mathtt{Sk}(v)$ in the subgraph spanned by the arrows $r_y$ for $y\in Y$. \end{itemize} \end{lem}
\begin{proof} The boundary $\partial\mathcal{P}$ of $\mathcal{P}$ by the proof of
\ref{prp:ppolyhedron} lies in the union of
hyperplanes $H_Y(u)$ for all $(n-1)$-element subsets $Y$ of $X$. It is clear that $\lambda_{u'}\in F_Y$ if and only if $\lambda_{u'}-\lambda _u\in -\sum_{y\in Y}\mathbb R^+b_{u}(y)$. Note that
$\lambda_{u}-\lambda_{u'}=
\sum_{\alpha\in \Delta_{re}^+(u)-\Delta_{re}^+(u')}\alpha$, so
each of $\alpha\in \Delta_{re}^+(u)-\Delta_{re}^+(u')$ lies in the non-negative span of $b_u(y)$ for $y\in Y$. Consider
the arrow $u\xrightarrow{\gamma}u'$. Write it as $\gamma=r_{x_s}\ldots r_{x_1}$ so that $s$ is minimal possible. Let us show that all $x_i\in Y$.
Let $\gamma_i=r_{x_i}\dots r_{x_1}$, $\gamma_i:u\to u_i$ and $\beta_i=b_{u_{i-1}}(x_i)$. Choose minimal $i$ such that $x_i\notin Y$. Then $\beta_i\equiv b_u(x_i)\mod \sum_{y\in Y}\mathbb R b_u(y)$ --- a contradiction.
That proves (2).
Now for $k=n-1$ the statement (1) follows since (2) implies that $F_Y(u)$ has codimension $1$. For general $k$ it follows by induction in codimension.
\end{proof}
\begin{crl}
\label{crl:identification} The map $\lambda$ as in Lemma~\ref{lem:lambda} establishes a one-to-one correspondence between $\mathtt{Sk}(v)$ and the set of vertices of $\mathcal{P}$. Moreover, $\mathtt{Sk}(v)$ identifies with the $1$-skeleton of $\mathcal{P}$ so that the reflexions $r_x:u\to u'$ in $\mathtt{Sk}(v)$ identify with the edges connecting $\lambda_u$ with $\lambda_{u'}$.
\end{crl}
\begin{crl} \label{lem:twodim} The two-dimensional face $F_Y(u)$ of $\mathcal{P}$ defined by a two-element subset $Y$ of $X$ is bounded iff $\mathtt{Sk}_Y(u)$ is the finite skeleton of a rank 2 fully reflectable component. In this case $\mathtt{Sk}_Y(u)$ isomorphic to the Cayley graph of the dihedral group $D_m$ where $m=2,3,4$ or $6$ . The noncompact face $F_Y(u)$ has a non-compact contractible boundary.~\footnote{The set of vertices in it is linearly ordered. If there is a smallest (or greatest) vertex, it has a non-compact edge corresponding to a non-reflectable root.}
\end{crl}
\begin{proof} The claim immediately follows from Lemma ~\ref{lem:faces}. The allowable values for $m$ result from a well-known classification of rank $2$ fully reflectable components with finite skeleton, see, for example, \cite{S3}.
\end{proof} \subsubsection{Proof of Theorem~\ref{thm:skeleton-coxeter}} By~\ref{crl:identification} $\mathtt{Sk}(v)$ indentifies with the $1$-skeleton of the polyhedron $\mathcal{P}$. By~\ref{crl:h1} any pair of paths leading from $u$ to $u'$ in $\mathtt{Sk}(v)$ is connected by relations defned by compact $2$-faces. Finally, by~\ref{lem:twodim}, compact $2$-faces gives rise to Coxeter relations with $m=2,3,4,6$.
\section{A trichotomy for admissible fully reflectable components} \label{sec:trichotomy} \subsection{Overview} From now on we will consider only indecomposable admissible fully reflectable components.
In this section we define three types of such components: finite, affine and indefinite. We investigate the structure of the sets of roots of corresponding root algebras. Expectedly, the trichotomy for admissible components is closely connected to the trichotomy for the types of Cartan matrices defined by Kac in~\cite{Kbook}, Theorem 4.3. \subsubsection{} We keep the notation of~\ref{sss:notation-coxeter}. Fix an indecomposable admissible fully reflectable component $\mathcal{R}_0$ and $v\in\mathcal{R}_0$. Let $\mathfrak{g}$ be a root Lie superalgebra supported at $\mathcal{R}_0$.
We denote by $\Delta=\Delta(\mathfrak{g})$ the set of roots of $\mathfrak{g}$ and by $\mathfrak{r}$ the kernel of the canonical map $\mathfrak{g}\to\mathfrak{g}^\mathtt{C}$. Recall that $\mathfrak{r}$ is the maximal ideal of $\mathfrak{g}$ having zero intersection with $\mathfrak{h}$.
In this section we will deduce a certain information abot the ideal $\mathfrak{r}$ for different types of components, see~\ref{crlfin}, \ref{corfindim}. In particular, we will be able to deduce, for certain types of components, that they admit a unique root Lie superalgebra $\mathfrak{g}^\mathtt{C}$.
\subsection{Roots} Recall that $\Sigma_{v'}=\{b_{v'}(x)\}_{x\in X}$ and $Q^+_{v'}:=\mathbb{Z}_{\geq 0}\Sigma_{v'}\subset Q, \ Q^+:=Q^+_v.$ We have $\Delta\subset (-Q^+\cup Q^+)$. Recall~\ref{sss:realinall} that $$ \Delta^\mathit{re}=\bigcup_{v'\in\mathtt{Sk}(v)} \Sigma_{v'}\subset\Delta $$ and the root spaces $\mathfrak{g}_{\alpha}$, $\alpha\in\Delta^\mathit{re}$, are one-dimensional, in particular, are purely even or purely odd. This yields a decomposition of the family of real roots into even and odd part
$$\Delta^\mathit{re}=\Delta^{\mathit{re},0}\sqcup\Delta^{\mathit{re},1}. $$ For anisotropic $\alpha\in\Delta^\mathit{re}$ the elements $\alpha^\vee\in\mathfrak{g}\langle\alpha\rangle\cap\mathfrak{h}$ are defined so that $\langle\alpha,\alpha^\vee\rangle=2$.
We define
$$
\Delta^\mathit{im}=\{\alpha\in \Delta|\ \mathbb{Q}\alpha\cap\Delta^\mathit{re}=\emptyset\}. $$
For each $v'\in\mathtt{Sk}(v)$ we have the triangular decompositions $$\Delta=\Delta^+_{v'}\sqcup (-\Delta^+_{v'}),\ \ \text{ where }\Delta^+_{v'}:=\Delta\cap Q^+_{v'}.$$
\begin{prp}\label{crlDeltare} \begin{enumerate} \item For $v'\stackrel{r_x}{\to}v''$ with $x\in X$, let $\alpha=b_{v'}(x)$. One has $$\Delta^+_{v''}=\left\{ \begin{array}{ll} \{-\alpha\}\cup \Delta^+_{v'} \setminus\{\alpha\} \ & \text{ if } 2\alpha\not\in\Delta\\ \{-\alpha,-2\alpha\}\cup \Delta^+_{v'} \setminus\{\alpha,2\alpha\}\ & \text{ if } 2\alpha\in\Delta.\\ \end{array}\right.$$ \item For any $v'$ one has $\Delta^\mathit{im}\cap \Delta^+_{v'}=\Delta^\mathit{im}\cap \Delta^+_{v}$. \item
$\Omega(\mathfrak{r})\subset \Delta^\mathit{im}$, except for the rank one algebra $\widetilde\mathfrak{g}=\mathfrak{g}^\mathtt{U}$ with $\mathfrak{g}^\mathtt{C}=\mathfrak{gl}(1|1)$, see~\ref{rank1}. \item If $\mathcal{R}_0$ has rank greather than one, then $$\Delta=\Delta^\mathit{re}\cup\Delta^\mathit{im}\cup
\{2\alpha|\ \alpha\in\Delta^{\mathit{re},1}\ \text{ is anisotropic}\}.$$ \end{enumerate} \end{prp} \begin{proof} Claim (1) is standard and (2) follows from (1). Claims (3) and (4) follow from~\ref{corgalpha}. \end{proof}
\subsection{Types of $\mathcal{R}_0$} \subsubsection{The case of Kac--Moody Lie algebras} In ~\cite{Kbook}, Thm. 4.3 Kac-Moody Lie algebras are divided in three types according to the corresponding type of Cartan matrices as follows. Let $V:=\mathbb{R}\otimes_\mathbb{Z} Q$; for $v\in V$ we set $v>0$ (resp., $v\geq 0$) if $v=\sum_{\alpha\in\Sigma} k_{\alpha}\alpha$ with $k_{\alpha}\geq 0$ (resp., $k_{\alpha}>0$) for each $\alpha\in\Sigma$.
View an indecomposable Cartan matrix $A$ as a linear operator on $V$. It is given by the formula $$ A(v)=\sum_iv(\alpha_i^\vee)\alpha_i,\ v\in V. $$
By~\cite{Kbook}, Thm.4.3, $A$ satisfies exactly one of the following conditions \begin{itemize} \item $\exists v>0$ such that $Av>0$ (type (FIN)). \item $\exists v>0$ such that $Av=0$ (type (AFF)). \item $\exists v>0$ such that $Av<0$ (type (IND)). \end{itemize}
Moreover, one has \begin{itemize} \item (FIN) $Au\geq 0$ implies $u>0$ or $u=0$. \item (AFF) $Au\geq 0$ implies $u\in\mathbb{R}v$. \item(IND) $Au\geq 0$ with $u\geq 0$ implies $u=0$. \end{itemize}
It is proven there that the Kac-Moody Lie algebras of type (FIN) are all simple finite-dimensional Lie algebras, the Kac-Moody Lie algebras of type (AFF) have finite growth: they are always symmetrizable and can be obtained as (twisted) affinizations of simple finite-dimensional Lie algebras. The Kac-Moody algebras of indefinite type have infinite growth.
We present below a version of this trichotomy in terms of connected components of root groupoids. The component is required to be indecomposable and fully reflectable. Note that both conditions hold in the context of \cite{Kbook}, Thm. 4.3.
\subsubsection{} Let $\mathcal{R}_0$ be a component of the root groupoid with a fixed vertex $v$ and indecomposable $A(v)$. Set $$Q^{++}:=\displaystyle\bigcap_{v'\in\mathtt{Sk}(v)} Q^+_{v'}.$$ {Obviously, $Q^{++}=Q^{++}_\mathbb{R}\cap Q$.} Note that the sets $\Delta^\mathit{re}$ and $Q^{++}$ depend on the component $\mathtt{Sk}(v)$ only. One has $Q^{++}\cap \mathbb{Q}\alpha=0$ for each $\alpha\in\Delta^\mathit{re}$.
In the definition below we introduce three classes of components analogous to the classes (FIN), (AFF), (IND) of Cartan matrices defined in \cite{Kbook}, Thm. 4.3.
\begin{dfn} \label{dfn:types} We say that $\mathcal{R}_0$ {\em is of type } \begin{itemize} \item[(Fin)] if $Q^{++}=\{0\}$. \item[(Aff)] if $Q^{++}=\mathbb{Z}_{\geq 0}\delta$ for some $\delta\not=0$. \item[(Ind)] if $\mathcal{R}_0$ is not of type (Fin) or (Aff). \end{itemize} \end{dfn}
\subsubsection{Purely anisotropic case}\label{Deltareisoempty} Assume that all simple roots $b(x)$ at $v$ are anisotropic. Then
the Cartan matrices $A(v')$ are the same at all $v'\in\mathcal{R}_0$. Lemma~\ref{WorbitQ+} below shows that in this case the classes (Fin), (Aff) and (Ind) coincide with (FIN), (AFF) and (IND). Indeed, in this case $Q^{++}=\bigcap_{w\in W}w(Q^+)$ is the union of $W$-orbits belonging to $Q^+$. \begin{lem}\label{WorbitQ+}$ $ \begin{itemize} \item[1.] In the case {\rm(FIN)} the unique $W$-orbit lying in $Q^+$ is $\{0\}$. \item[2.] In the case {\rm(AFF)} all $W$-orbits lying in $Q^+$ are of the form $\{j\delta\}$ for $j\in\mathbb{Z}_{\geq 0}$ for some $\delta\ne 0$. \item[3.] In the case {\rm(IND)} the unique finite $W$-orbit lying in $Q^+$ is $\{0\}$; $Q^+$ contains an infinite $W$-orbit. \end{itemize}
\end{lem} \begin{proof} Notice that $Au\geq 0$ ($Au=0$) for $u\in V\subset \mathfrak{h}^*$ means $u(\alpha^{\vee})\geq 0$ (resp., $u(\alpha^{\vee})=0$ for each $\alpha\in\Sigma$.
For $\nu=\sum_{\alpha\in\Sigma}k_{\alpha}\alpha\in Q^+$ set $\operatorname{ht} \nu:=\sum_{\alpha\in\Sigma}k_{\alpha}$.
Let $\nu\in Q^+$ be such that $W\nu\in Q^+$
and $\operatorname{ht} \nu$ is minimal in its orbit. Viewing $\nu$ as an element of $V$ we have $\nu\geq 0$ and
$\operatorname{ht} r_{\alpha}\nu\geq \operatorname{ht}\nu$ for each $\alpha\in\Sigma$. Then $\nu(\alpha^\vee)\leq 0$ for all $\alpha\in\Sigma$ and therefore $A\nu\leq 0$. Hence $\nu=0$ in type {\rm(FIN)} and $\nu$ is proportional to $\delta$ in type {\rm(AFF)}.
In the remaining type {\rm(IND)}, assume $W\nu\subset Q^+$ is finite and $\operatorname{ht} \nu$ is maximal. Then $\nu(\alpha^\vee)\geq 0$ for all $\alpha$ and, therefore, $A\nu\geq 0$. Hence $\nu=0$. By the assumption there exists $v>0$ such that $Av<0$. Then $Wv\subset Q^+$ by~\cite{Kbook}, Lemma 5.3 and, by above, this is an infinite orbit. \end{proof}
\subsubsection{Purely anisotropic components of finite and affine types} \label{sss:aniso-fin-aff} If $p(x)=0$ for each $x$, then $\mathfrak{g}^\mathtt{C}$ is a Kac-Moody Lie algebra. In this case $\mathfrak{g}^\mathtt{C}$ is finite-dimensional if and only if the Cartan matrix $A$ if of type {\rm(FIN)} and a (twisted) affine Lie algebra if $A$ is of type {\rm(AFF)}.
If we do not require all generators to be even, we have an extra requirement saying that the $x$-row of $A$ consists of even entries if $p(x)=1$. Therefore, to every anisotropic component one can associate a Kac-Moody Lie algebra by changing the parity of all generators to $0$. As we showed in the previous subsection, this operation does not change the type of the corresponding components. We call all contragredient Lie superalgebras obtained in this way from a Kac-Moody Lie algebra $\mathfrak{g}$ {\sl the cousins of } $\mathfrak{g}$.
The Cartan matrices of types (FIN) and (AFF) are well-known. Let us describe the cases when such a matrix has a row with even entries.
In the type (FIN) the only such case is the type $B_n$ and it has exactly one row with even entries. The Kac-Moody Lie algebra with Cartan matrix $B_n$ is $\mathfrak{so}(2n+1)$ and its cousin is a finite-dimensional
simple Lie superalgebra $\mathfrak{osp}(1|2n)$.
The affine Kac-Moody Lie algebras whose Cartan matrices have at least one row with even entries are $\mathfrak{so}(2n+1)^{(1)}$, $\mathfrak{sl}(2n+1)^{(2)}$ and
$\mathfrak{so}(2n+2)^{2}$. The cousin of $\mathfrak{so}(2n+1)^{(1)}$ is $\mathfrak{sl}(1|2n)^{(2)}$, the cousin of $\mathfrak{sl}(2n+1)^{(2)}$ is
$\mathfrak{osp}(1|2n)^{(1)}$, and $\mathfrak{so}(2n+2)^{(2)}$ has two cousins $\mathfrak{osp}(2|2n)^{(2)}$ and $\mathfrak{sl}(1|2n+1)^{(4)}$, see ~\cite{vdL} for construction of (twisted) affine superalgebras.
\subsection{Components of type {\rm (Fin)}} Most of the root Lie superalgebras of finite type have isotropic roots. \begin{lem}\label{crlfin}
Assume that $\mathcal{R}_0$ is of type {\rm(Fin)}. Then
\begin{enumerate} \item $\Delta^\mathit{im}=\emptyset$.
\item $\mathfrak{g}=\mathfrak{g}^\mathtt{C}$ except for the case $\mathfrak{g}^\mathtt{C}=\mathfrak{gl}(1|1)$ (see~\ref{rank1}). \item $\mathfrak{g}$ is finite-dimenisonal. \end{enumerate} \end{lem} \begin{proof} (1) follows from \ref{crlDeltare}(4), (2) and (3) from \ref{crlDeltare} (5).
\end{proof}
\begin{crl}\label{corfindim} If $\dim\mathfrak{g}<\infty$ then $\mathcal{R}_0$ is of type {\rm(Fin)}. \end{crl} \begin{proof} It suffices to check that $\mathtt{Sk}(v)$ contains $v'$ with $\Sigma_{v'}=-\Sigma$ which is equivalent to
$\Delta^+_{v'}(\mathfrak{g}^\mathtt{C})=-\Delta^+_{v}(\mathfrak{g}^\mathtt{C})$. Since $\dim\mathfrak{g}<\infty$, $\Delta(\mathfrak{g}^\mathtt{C})$ is finite. For each $v'\in\mathtt{Sk}(v)$ let $k(v')$ be the cardinality of $\Delta^+_{v'}(\mathfrak{g}^\mathtt{C})\cap \Delta^+_{v}(\mathfrak{g}^\mathtt{C})$. If $k(v')\not=0$, then $\Delta^+_{v'}(\mathfrak{g}^\mathtt{C})$ does not lie in $-\Delta^+_{v}(\mathfrak{g}^\mathtt{C})$, so there exists $\alpha\in\Sigma_{v'}$ with $\alpha\in \Delta^+_{v}(\mathfrak{g}^\mathtt{C})$. By~\ref{crlDeltare} (2), there is a reflexion $v'\to v''$ that replaces $\alpha$ (and, possibly, $2\alpha$) in $\Delta^+_{v'}$ with $-\alpha$ (and, possibly, $-2\alpha$). This means that $k(v'')$ is equal to $k(v')-1$ or to $k(v')-2$. Hence $k(v')=0$ for some $v'\in\mathtt{Sk}(v)$. \end{proof}
\subsubsection{} The results of C.~Hoyt~\cite{Hoyt}, see~\ref{sss:hoytclass} below, together with \ref{sss:aniso-fin-aff}, imply that $\mathfrak{g}^{\mathtt{C}}$ of finite type
are: $\mathfrak{gl}(1|1)$ and all basic classical Lie superalgebras (except
that the simple algebra $\mathfrak{psl}(n|n)$ should be replaced with
$\mathfrak{g}^\mathtt{C}=\mathfrak{gl}(n|n)$). In all cases except
$\mathfrak{gl}(1|1)$ we have $\mathfrak{g}^{\mathtt{C}}=\mathfrak{g}^\mathtt{U}$ by \ref{crlDeltare}(4).
\subsection{Components of type {\rm(Aff)}} \begin{lem}\label{crlaff}
Let $\mathcal{R}_0$ be of type {\rm(Aff)}. Then
\begin{enumerate} \item $\Omega(\mathfrak{r})\subset\Delta^\mathit{im}\subset\mathbb{Z}\delta\setminus\{0\}$. \item $\mathfrak{r}$ lies in the center of $[\mathfrak{g},\mathfrak{g}]$. \item If $\langle\delta,a(x)\rangle\not=0$ for some $x\in X$ then $\mathfrak{g}=\mathfrak{g}^{\mathtt{C}}$. \end{enumerate} \end{lem} \begin{proof} Using~\ref{crlDeltare} we get (1) and $\Omega(\mathfrak{r})\subset\Delta^{im}\subset\mathbb{Z}\delta\setminus\{0\}$.
Since $\mathfrak{g}=[\mathfrak{g},\mathfrak{g}]+\mathfrak{h}$, $\mathfrak{r}$ lies in $[\mathfrak{g},\mathfrak{g}]$ and $[\mathfrak{g},\mathfrak{g}]$ is generated by $\mathfrak{g}_{\pm\alpha}$ for $\alpha\in\Sigma$. Since $j\delta\pm\alpha\not\in\mathbb{Z}\delta$, $[\mathfrak{g}_{\pm\alpha},\mathfrak{r}]=0$. This gives $[[\mathfrak{g},\mathfrak{g}],\mathfrak{r}]=0$ and establishes (2). For (3) assume that $\mathfrak{r}\not=0$. Then $\mathfrak{r}\cap\mathfrak{g}_{j\delta}\not=0$ for some $j\not=0$. Hence
$\mathfrak{g}_{j\delta}$ has a non-zero intesection with the center of $[\mathfrak{g},\mathfrak{g}]$. Since $a(x)\in [\mathfrak{g},\mathfrak{g}]$ for each $x\in X$ this gives $\langle\delta,a(x)\rangle=0$. \end{proof}
\subsubsection{Hoyt's classification} \label{sss:hoytclass} Indecomposable contragredient Lie superalgebras with at least one simple isotropic root were classified in~\cite{Hoyt}. In this subsection we review the results of C.~Hoyt classification that will be used in the following sections. Exactly one of the following options holds in this case: \begin{enumerate} \item $\dim \mathfrak{g}^\mathtt{C}<\infty$. \item $\dim\mathfrak{g}^\mathtt{C}=\infty$ and $\Delta^{im}=\mathbb Z\delta$, $\Delta\subset\mathbb Z\delta+\Delta'$
for some finite set $\Delta'\subset \mathfrak{h}^*$ and some $\delta\in\Delta^+$ \footnote{$\Delta^+_v$ depends on $v$; however, since $\delta$ is imaginary, it is positive or negative regardless of the choice of $v\in\mathcal{R}_0$.}. In this case all symmetrizable contragredient Lie superalgebras are twisted affinizations of simple
finite-dimensional Lie superalgebras. They also appear in Van de Leur classification of symmetrizable Kac-Moody superalgebras of finite growth.
In addition, there is one-parameter contragredient superalgebra $S(2,1;a)$ and the twisted affinization $\mathfrak q(n)^{(2)}$ of the strange superalgebra
$\mathfrak{psq}(n)$ for $n\geq 3$. By direct inspection one can check that there exists $m\in\mathbb Z$ such that if $\alpha\in\Delta$ then
$\alpha\pm m\delta\in \Delta$. \item The algebra $\mathfrak{g}^\mathtt{C}=Q^{\pm}(m,n,t)$ with $\dim(\mathfrak{h})=3$ where
$m,n,t$ are negative integers, not all equal to $-1$, with
non-symmetrizable and nondegenerate Cartan matrices. There are three
linearly independent principal roots, therefore the Weyl group has no non-zero fixed vectors in $\mathfrak{h}^*$.
Hence $Q^{\pm}(m,n,t)$ are of type {\rm(Ind)}. Little is known about Lie superalgebras of this type. \end{enumerate} \subsubsection{}
Let $\mathcal{R}_0$ be a component of $\mathcal{R}$ of type (2) in Hoyt's classification~\ref{sss:hoytclass}. We will prove that it is of type {\rm(Aff)}.
\begin{lem}\label{lem_aff-iso} Let $F:=Q_\mathbb{R}^*$ and $\gamma\in F$ satisfy $\langle\gamma,\delta\rangle=1$ and $\langle\gamma,\beta\rangle\neq 0$ for any $\beta\in\Delta$. Then there exists $v\in\mathcal{R}_{0}$
such that $\langle\gamma,\alpha\rangle>0$ for any $\alpha\in\Sigma_v$. \end{lem} \begin{proof} Choose a vertex $u\in\mathcal{R}_0$. Let
$$T_u(\gamma)=\{\beta\in\Delta^+_u\mid \langle\gamma,\beta\rangle<0\}.$$
We claim that $T_u(\gamma)$ is finite. Indeed, since $\delta\in \Delta^+_u$ we have $\alpha+M\delta\in \Delta^+_u$ for sufficiently large $M$ and all $\alpha\in\Delta'$ while $\alpha-M\delta\notin \Delta^+_u$. On the other hand, if
we choose $$M>\max\{\langle\gamma,\alpha\rangle\mid\alpha\in\Delta'\},$$
then $\langle\gamma,\alpha+s\delta\rangle>0$ for all $s>M$. Thus,
$$T_u(\gamma)\subset \{\alpha+s\delta\mid \alpha\in\Delta', -M\leq s\leq M\}$$
and hence $T_u(\gamma)$ is finite.
Suppose that $u$ does not satisfy the conditions of the lemma. Then there is $x\in X$ such that $ \langle\gamma,b(x)\rangle<0$. Consider
$u\stackrel{r_x}{\longrightarrow}u'$. By Corollary ~\ref{crlDeltare}(2) we get
$T_{u'}(\gamma)=T_u(\gamma)\setminus\{b(x)\}$ or $T_u(\gamma)\setminus\{b(x),2 b(x)\}$ if $2b(x)$ is a root. Anyway $|T_{u'}(\gamma)|<|T_u(\gamma)|$. Repeating the argument several times, we end up with a vertex $v$ such that
$T_v(\gamma)=\emptyset$.
\end{proof}
\begin{crl}\label{cor1_aff-iso} If $\mathcal{R}_0$ is of type (2), then $Q^{++}=\mathbb Z_{\geq 0}\delta$ and hence $\mathcal{R}_0$ is of type {\rm(Aff)}.
\end{crl}
\begin{proof} Let $$F_1:=\{\gamma\in F\mid \langle\gamma,\delta\rangle=1\},\quad S_\gamma^+=\{\nu\in Q\mid \langle\gamma,\delta\rangle\geq 0\}.$$
Then by Lemma ~\ref{lem_aff-iso}
$$Q^{++}=\cap_{\gamma\in F_1}S_\gamma^+=\mathbb Z_{\geq 0}\delta.$$
\end{proof}
\subsection{} Combining the results of~\cite{Hoyt} with~\ref{Deltareisoempty} we obtain the following result.
\begin{prp} Let $\mathcal{R}_0$ be an indecomposable fully reflectable component. \begin{enumerate} \item The following conditions are equivalent: \begin{itemize} \item $\mathcal{R}_0$ of type {\rm(Fin)}; \item $W$ is finite; \item
$\dim\mathfrak{g}<\infty$; \item $\dim\mathfrak{g}^\mathtt{C}<\infty$. \end{itemize} \item The following conditions are equivalent: \begin{itemize} \item $\mathcal{R}_0$ of type {\rm(Aff)}; \item $W$ is infinite and $\mathfrak{h}^*$ contains a non-zero trivial $W$-orbit. \end{itemize} \item The following conditions are equivalent: \begin{itemize} \item $\mathcal{R}_0$ of type {\rm(Ind)}; \item $\mathfrak{g}$ has an infinite Gelfand-Kirillov dimension. \end{itemize} \end{enumerate} \end{prp}
\begin{rem}
Cartan matrices of components of type (Fin) are usually nondegenerate. The only exception is $\mathfrak{gl}(n|n)$. Cartan matrices of type (Aff) are always degenerate, usually of corank one. The only exception is $\mathfrak{sl}(n|n)^{(1)}$ where corank is two. \end{rem}
\section{Symmetrizable root data} \label{sect:sym} \label{sectKacThm}
We retain the notation of Section~\ref{sec:trichotomy}. We continue to assume that all $x\in X$ are reflectable at all $v\in\mathcal{R}_0$. In this section we prove, following a method of Gabber-Kac~\cite{GabberKac}, that if $\mathcal{R}_0$ has a symmetric Cartan matrix (and, therefore,
all Cartan matrices associated to $\mathcal{R}_0$
are symmetrizable) then $\mathfrak{g}^\mathtt{C}$ is the only root algebra, except for the cases $\mathfrak{g}^\mathtt{C}=\mathfrak{gl}(1|1)$ and $(\rho|\delta)=0$ where $(-|-)$ is the nondegenerate symmetric bilinear form on $\mathfrak{h}^*$ introduced in~\ref{prp:likekac22} and $\rho$ is as in~\ref{sss:weylvector}.
Fix $v\in\mathcal{R}_0$, an admissible component of $\mathcal{R}$. We keep the notation of Section~\ref{sec:root} for the half-baked algebra $\widetilde\mathfrak{g}=\widetilde\mathfrak{n}^-\oplus\mathfrak{h}\oplus\widetilde\mathfrak{n}^+$, a root algebra $\mathfrak{g}$ and the contragredient algebra $\mathfrak{g}^\mathtt{C}=\widetilde\mathfrak{g}/\mathfrak{r}$. We set $\widetilde{\mathfrak{b}}:=\widetilde{\mathfrak{n}}^+ +\mathfrak{h}$, its image $\mathfrak{b}$ in $\mathfrak{g}$ and $\mathfrak{r}^{\pm}:=\mathfrak{r}\cap\tilde{\mathfrak{n}}^{\pm}$. Note that $\mathfrak{r}^{\pm}$ are ideals of $\widetilde{\mathfrak{g}}$.
\subsection{Verma modules} Let $\widetilde{M}(\lambda)$ (resp., $M(\lambda)$, $M^\mathtt{C}(\lambda)$) denote a Verma module of highest weight $\lambda$ over $\widetilde{\mathfrak{g}}$ (resp., $\mathfrak{g}$, $\mathfrak{g}^\mathtt{C}$). Since $\Omega(\widetilde{M}(\lambda)) \subset\lambda-Q^+$, the module
$\widetilde{M}(\lambda)$ admits a unique maximal proper submodule $\widetilde{M}'(\lambda)$.
The Verma modules $\widetilde{M}(\lambda)$, $M(\lambda)$, $M^\mathtt{C}(\lambda)$ admit unique simple quotients.
\begin{lem}\label{lemMM} One has $$M(\lambda)=\mathcal{U}(\mathfrak{g})\otimes_{\mathcal{U}(\widetilde{\mathfrak{g}})} \widetilde{M}(\lambda).$$ \end{lem} \qed
\subsection{Embedding of $\mathfrak{r}^-/[\mathfrak{r}^-,\mathfrak{r}^-]$}
The composition $$\mathfrak{r}^-\hookrightarrow \widetilde{\mathfrak{g}}/\widetilde{\mathfrak{b}}\hookrightarrow \mathcal{U}(\widetilde{\mathfrak{g}})/\mathcal{U}(\widetilde{\mathfrak{g}})\widetilde{\mathfrak{b}}=\widetilde{M}(0)$$ has the image in $\widetilde{M}'(0)=\bigoplus_{\alpha\in\Sigma}\widetilde{M}(-\alpha)$. We denote by \begin{equation} \label{eq:phifromrf-} \phi:\mathfrak{r}^-\to \bigoplus_{\alpha\in\Sigma}M^\mathtt{C}(-\alpha) \end{equation} the composition of this with the projection $$ \bigoplus_{\alpha\in\Sigma}\widetilde{M}(-\alpha)\to \bigoplus_{\alpha\in\Sigma}M^\mathtt{C}(-\alpha). $$ \begin{prp} \label{propK911} The map $\phi$ defined above is a map of $\widetilde\mathfrak{g}$-modules with kernel $[\mathfrak{r}^-,\mathfrak{r}^-]$. \end{prp} \begin{proof} This result is the main part of the proof of Proposition 9.11 of~\cite{Kbook}.
\end{proof}
\subsubsection{Example} If $\mathfrak{g}^\mathtt{C}=\mathfrak{sl}_2\times\mathfrak{sl}_2$ with $\Sigma=\{\alpha_1,\alpha_2\}$, the image of $\phi$ in $M^\mathtt{C}(-\alpha_i)$ is equal to $M^\mathtt{C}(-\alpha_1-\alpha_2)$.
\
Recall that $\mathfrak{g}^\mathtt{U}$ denotes the universal root algebra. \begin{crl}\label{crlanotherideal} Assume that $\bigoplus_{\alpha\in\Sigma} M^\mathtt{C}(-\alpha)$ has no nonzero integrable subquotients. Then $\mathfrak{g}^\mathtt{U}=\mathfrak{g}^\mathtt{C}$. \end{crl}
\begin{proof} Let $\mathfrak{s}=\mathrm{Ker}(\widetilde\mathfrak{g}\to\mathfrak{g}^\mathtt{U})$. Set $\mathfrak{s}^-:=\widetilde{\mathfrak{n}}^-\cap \mathfrak{s}$. Obviously, $\mathfrak{s}\subset\mathfrak{r}$ so $\mathfrak{s}^-\subset\mathfrak{r}^-$.
Assume that $\mathfrak{r}^-/\mathfrak{s}^-\ne 0$. This Lie superalgebra is a semisimple $\mathfrak{h}$-module with the weights belonging to $-Q^+\setminus\{0\}$. This implies that it does not coincide with its commutator, that is, that $\mathfrak{r}^-/(\mathfrak{s}^-+[\mathfrak{r}^-,\mathfrak{r}^-])\ne 0$. Since the adjoint representation of $\mathfrak{g}$ is integrable, $\mathfrak{r}^-/(\mathfrak{s}^-+[\mathfrak{r}^-,\mathfrak{r}^-])$ is a nonzero integrable $\mathfrak{g}$-module. Using \ref{propK911} we get a nonzero
integrable subquotient in $\bigoplus_{\alpha\in\Sigma} M^\mathtt{C}(-\alpha)$ which contradicts the conditions. Thus, $\mathfrak{s}^-=\mathfrak{r}^-$, so automatically $\mathfrak{s}^+=\mathfrak{r}^+$ as the automorphisms $\theta$, see~\ref{sss:automorphism}, defined on $\widetilde\mathfrak{g}$, $\mathfrak{g}^\mathtt{U}$ and $\mathfrak{g}^\mathtt{C}$, identifies $\mathfrak{s}^+$ with $\mathfrak{s}^-$ and $\mathfrak{r}^+$ with $\mathfrak{r}^-$. \end{proof}
\subsection{Main result} In this subsection we assume that the Cartan matrix for $r$ is symmetric, i.e. $$\forall x,y\in X\ \ \ \ \langle b(x),a(y)\rangle=\langle b(y),a(x)\rangle.$$ Note that by \ref{lem:sym-stable} all Cartan matrices at $r'\in\mathcal{R}_0$ are symmetrizable.
By Proposition~\ref{prp:likekac22} \ $\widetilde{\mathfrak{g}}$ admits an invariant
bilinear form such that the restriction of this form on $\mathfrak{h}$ is non-degenerate and and $(a(x)|h)=\langle b(x),h\rangle$ for each $h\in\mathfrak{h}$.
\subsubsection{}
Let us show that $\mathfrak{r}$ coincides with the kernel of this form. Indeed, since the kernel is an ideal and the restriction of $(-|-)$ on $\mathfrak{h}$ is
non-degenerate, the kernel lies in $\mathfrak{r}$. Since $(\widetilde{\mathfrak{g}}_{\alpha}|\widetilde{\mathfrak{g}}_{\beta})=0$
for $\alpha+\beta\not=0$, one has $(\mathfrak{h}|\mathfrak{r})=0$. Thus
$$\mathfrak{r}^{\perp}:=\{g\in\widetilde{\mathfrak{g}}|\ (g|\mathfrak{r})=0\}$$ is an ideal containing $\mathfrak{h}$, so $\mathfrak{r}^{\perp}=\widetilde{\mathfrak{g}}$, that is
$\mathfrak{r}$ lies in the kernel of $(-|-)$~\footnote{For symmetrizable Kac-Moody algebras this was earlier noted in~\cite{SchV}.}. Thus, the algebra $\mathfrak{g}^\mathtt{C}$ inherits a non-degenerate invariant bilinear form having the properties listed in \ref{prp:likekac22}.
\begin{thm}{} \label{thm:symmetric-g-gkm}
Let $\mathcal{R}_0$ be symmetrizable and let $\mathfrak{g}$ be a root Lie superalgebra. Then $\mathfrak{g}=\mathfrak{g}^\mathtt{C}$, except for the cases $\mathfrak{gl}(1|1)$ and {\rm(Aff)} with
$(\rho|\delta)=0$. \end{thm} \begin{proof}
Symmeric nondegenerate bilinear form of $\mathfrak{g}^\mathtt{C}$ allows one to define a {\em Casimir operator}, see~\cite{Kbook}, 2.5. This operator acts on $M^\mathtt{C}(\lambda)$ by $(\lambda|\lambda+2\rho)\cdot\mathrm{id}$. This implies \begin{equation} \label{Casimir} [M^\mathtt{C}(\lambda):L^\mathtt{C}(\mu)]\not=0\Longrightarrow
(\lambda|\lambda+2\rho)=(\mu|\mu+2\rho). \end{equation}
Assume that $\mathfrak{r}\not=\mathfrak{s}$. By~\ref{crlanotherideal}, for some $\alpha\in\Sigma$ there is a non-zero homomorphism $$\mathfrak{r}^-\to M^\mathtt{C}(-\alpha).$$ Hence $M^\mathtt{C}(-\alpha)$ admits an integrable subquotient $L^\mathtt{C}(\mu)$ for some $\mu$. Since $L^\mathtt{C}(-\alpha)$ is a subquotient of $M^\mathtt{C}(0)$, the formula (\ref{Casimir}) gives \begin{equation} \label{rhonu}
(\mu|\mu+2\rho)=0. \end{equation}
If $\mathcal{R}_0$ is of type (Fin) and not $\mathfrak{gl}(1|1)$ then $\mathfrak{r}=\mathfrak{s}$ by~\ref{crlfin} (1).
Let us consider the case when $\mathcal{R}_0$ is of type (Aff). By~\ref{crlfin} (2), $\mu=j\delta$ for some $j\in\mathbb{Z}_{>0}$ and $\delta(h)=0$ for each $h\in\mathfrak{h}\cap [\mathfrak{g},\mathfrak{g}]$. Therefore
$(\delta|\alpha)=0$ for each $\alpha\in\Sigma$. This gives
$(\delta|\delta)=0$. Using~(\ref{rhonu}), we get
$h^{\vee}_v=2(\rho|\delta)=0$.
It remains to consider the component $\mathcal{R}_0$ of type (Ind). By~\cite{Hoyt}, the algebras $Q^{\pm}(m,n,t)$ are not symmetrizable. The rest of indefinite types satisfy $\Delta_\mathit{iso}=\emptyset$. Then $a_{xx}\not=0$ for each $x\in X$ and $a_{xy}=a_{yx}$. It is easy to see that we can choose $v\in\mathcal{R}_0$ in such a way that $a_{xx}\in\mathbb{Z}_{>0}$. Then the integrability gives
$(\mu|\alpha)\geq 0$ for each $\alpha\in \Sigma$. Since $-\mu\in Q^+$ and $\mu\not=0$, we obtain
$(\mu|\rho)<0, (\mu|\mu)<0$, a contradiction to (\ref{rhonu}). \end{proof}
\section{The affine case} \label{sect:aff} \subsection{}In this section we prove the following result.
\begin{thm} \label{thm:UKM-ns}
Let $\mathcal{R}_0$ be an indecomposable component of type
(Aff). If $\mathcal{R}_0$ is of type $A(n-1|n-1)^{(1)}$ (resp., $A(2n-1|2n-1)^{(2)}$,
$A(2n|2n)^{(4)}$), then
$\mathfrak{g}^\mathtt{U}=\mathfrak{sl}(n|n)^{(1)}$ (resp., $\mathfrak{g}^\mathtt{U}=\mathfrak{sl}(2n|2n)^{(2)}$,
$\mathfrak{sl}(2n+1|2n+1)^{(4)}$).
If $\mathcal{R}_0$ is of type $\mathfrak q(n)^{(2)}$ then $\mathfrak{g}^\mathtt{U}=\mathfrak{sq}(n)^{(2)}$. In the rest of the cases $\mathfrak{g}^\mathtt{U}=\mathfrak{g}^{\mathtt{C}}$. \end{thm}
Let us first notice that for $S(2,1,b)$ Lemma~\ref{crlaff} (3) and \ref{sss:s21b} imply $\mathfrak{g}^\mathtt{U}=\mathfrak{g}^{\mathtt{C}}$. In all other cases we define for any root algebra $\mathfrak{g}$ its subfactor
$\bar{\mathfrak{g}}:=[\mathfrak{g},\mathfrak{g}]/Z(\mathfrak{g})$.
Then $\bar{\mathfrak{g}}^{\mathtt{C}}=[\mathfrak{g}^\mathtt{C},\mathfrak{g}^\mathtt{C}]/Z(\mathfrak{g}^\mathtt{C})$ is isomorphic to the twisted loop algebra $\mathcal{L}(\mathfrak{s})^{\sigma}$ for some simple superalgebra $\mathfrak{s}$ and an automorphism $\sigma$ of finite order $m$. In particular, $\bar\mathfrak{g}^\mathtt{C}$ is perfect. The superalgebra $\mathfrak{s}$ is basic classical, exceptional or $\mathfrak{p}\mathfrak{s}\mathfrak q_n$. Its even part $\mathfrak{s}_{\bar 0}$, therefore, is a reductive Lie algebra.
Let $\mathfrak{h}'$ be the even part of the Cartan subalgebra of $\mathfrak{s}$. One can choose $\sigma$ so that $\sigma(\mathfrak{h}')=\mathfrak{h}'$.
Furthermore, if $k\delta$ is an even root and $\varepsilon=e^{\frac{2\pi i}{m}}$ then $$\bar{\mathfrak{g}}^\mathtt{C}_{k\delta}=\{h\otimes t^k\mid h\in\mathfrak{h}',\sigma(h)=\varepsilon^k h\}.$$
The cohomology group $H^i(\bar{\mathfrak{g}}^{\mathtt{C}},\mathbb C)$ has a natural structure of $\mathfrak{h}$-module. We write $H^i(\bar{\mathfrak{g}}^{\mathtt{C}},\mathbb C)_\mu$ for the cohomology group of weight $\mu$ with respect to $\mathfrak{h}$-action.
\begin{lem}\label{lem:extension} For every $k\neq 0$
$$\dim\mathfrak{g}^{\mathtt{U}}_{k\delta}-\dim\mathfrak{g}^{\mathtt{C}}_{k\delta}=\dim H^2(\bar{\mathfrak{g}}^{\mathtt{C}},\mathbb C)_{k\delta}.$$
\end{lem}
\begin{proof} Let $\hat\mathfrak{g}$ be the graded central extension of $\bar{\mathfrak{g}}^{\mathtt{C}}$ given by the exact sequence
$$0\to \bigoplus_{k\neq 0}H^2(\bar{\mathfrak{g}}^{\mathtt{C}},\mathbb C)^*_{k\delta}\to \hat\mathfrak{g}\to \bar{\mathfrak{g}}^{\mathtt{C}}\to 0.$$
Take the pullback
$$0\to \bigoplus_{k\neq 0}H^2(\bar{\mathfrak{g}}^{\mathtt{C}},\mathbb C)^*_{k\delta}\to \hat\mathfrak{g}'\to [{\mathfrak{g}}^{\mathtt{C}},{\mathfrak{g}}^{\mathtt{C}}]\to 0,$$
and then extend to the exact sequence
$$0\to \bigoplus_{k\neq 0}H^2(\bar{\mathfrak{g}}^{\mathtt{C}},\mathbb C)^*_{k\delta}\to \mathfrak{g}\to {\mathfrak{g}}^{\mathtt{C}}\to 0$$
using the semidirect product decomposition
$\mathfrak{g}^{\mathtt{C}}=\mathfrak{t}\ltimes [{\mathfrak{g}}^{\mathtt{C}},{\mathfrak{g}}^{\mathtt{C}}]$ where $\mathfrak{t}\subset\mathfrak{h}$ is a suitable abelian subalgebra.
We claim that $\mathfrak{g}$ is a root algebra. Indeed, we just have to check the relations \ref{sss:half} at every vertex $v\in\mathcal{R}_0$. The only
non-trivial relation is $[\tilde e_x,\tilde f_y]=0$ for $x\neq y$. This is equivalent to $b(x)-b(y)\neq k\delta$ and the latter follows
from $k\delta\in Q^+(v)$ for positive $k$ and $k\delta\in -Q^+(v)$ for negative $k$.
Finally, let us prove that $\mathfrak{g}=\mathfrak{g}^{\mathtt{U}}$. Indeed, by ~\ref{crlaff} the kernel $\mathfrak{k}$ of the map $\mathfrak{g}^{\mathtt{U}}\to\mathfrak{g}$ lies in the center of $[\mathfrak{g}^{\mathtt{U}},\mathfrak{g}^{\mathtt{U}}]$
and is a direct sum $\bigoplus_{k\neq 0}\mathfrak{k}_{k\delta}$. Therefore $\mathfrak{g}^{\mathtt{U}}=\mathfrak{g}$.
\end{proof}
\subsubsection{} Let $\delta$ have degree $d$ in the standard grading of $\mathcal{L}(\mathfrak{s})^{\sigma}$. The base change $H^2(\mathfrak{s},\mathbb{C})\to H^2(\mathcal{L}(\mathfrak{s}),\mathbb{C}[t,t^{-1}])$ composed with the linear map $\mathbb{C}[t,t^{-1}]\to\mathbb{C}$ carrying $\sum c_it^i$ to $c_{kd}$, yields a homomorphism \begin{equation} \label{eq:h2map} H^2(\mathfrak{s},\mathbb{C})\to H^2(\mathcal{L}(\mathfrak{s})^\sigma,\mathbb{C}). \end{equation} It is given on $2$-cocycles by the formula \begin{equation} \label{eq:2cocycles}
\tilde c(x\otimes t^a,y\otimes t^b)=\delta_{kd,a+b}c(x,y). \end{equation}
Let $\bar\mathfrak{g}^{\mathtt{C}}=\mathcal{L}(\mathfrak{s})^\sigma$ and $\mathfrak{h}$ be a Cartan subalgebras of $\mathfrak{g}^{\mathtt{C}}$. Set $\mathfrak{h}^{\circ}:=\ker\delta$. Then $\mathfrak{h}^\circ$ acts on $\mathfrak{s}$ and therefore on $H^2(\mathfrak{s},\mathbb{C})$. We denote by $H^2(\mathfrak{s},\mathbb{C})^{\circ}$ the
$\mathfrak{h}^{\circ}$-invariant subspace.\footnote{In most cases $\mathfrak{h}^\circ=(\mathfrak{h}')^\sigma$ and $H^2(\mathfrak{s},\mathbb{C})^{\circ}=H^2(\mathfrak{s},\mathbb{C})$. The only case $\mathfrak{h}^\circ\neq(\mathfrak{h}')^\sigma$ is when the Cartan matrix of $\mathfrak{g}^{\mathtt{C}}$ has corank $2$ and that happens for $\mathfrak{s}=\mathfrak{psl}(n|n)$, $n\geq 2$ and $\sigma=\mathrm{id}$.} The automorphism $\sigma$ acts on $H^2(\mathfrak{s},\mathbb C)^{\circ}$ and induces a $\mathbb Z/m\mathbb Z$-grading.
\begin{lem}\label{lem:redfindim} If $k\delta$ is an even root and $kd\equiv p\mod m$ then the homomorphism (\ref{eq:h2map}) induces an isomorphism
$H^2(\mathfrak{s},\mathbb C)^{\circ}_{p}\simeq H^2(\mathcal{L}(\mathfrak{s})^{\sigma},\mathbb C)_{k\delta}$.
\end{lem}
\begin{proof} The correspondence between the weight spaces follows from formula (\ref{eq:2cocycles}). Injectivity of the map is straightforward. To prove surjectivity it suffices to show that every class in
$H^2(\mathcal{L}(\mathfrak{s})^{\sigma},\mathbb C)_{k\delta}$ is represented by a cocycle $\varphi$ such that \begin{equation} \label{eq:goodcocycle}
\varphi(x\otimes t^{a-m},y\otimes t^{b+m})=\varphi(x\otimes t^{a},y\otimes t^{b}) \end{equation}
for all $a,b\in\mathbb{Z}$ and $x,y\in\mathfrak{s}$.
The Lie algebra $\mathfrak{s}'=[\mathfrak{s}_{\bar 0},\mathfrak{s}_{\bar 0}]$ is semisimple.
The corresponding twised affine Lie algebra $\hat\mathfrak{s}'$ is symmetrizable and, therefore, $(\hat\mathfrak{s}')^\mathtt{U}=(\hat\mathfrak{s}')^\mathtt{C}$.
By Lemma~\ref{lem:extension} $H^2(\mathcal{L}(\mathfrak{s}')^\sigma,\mathbb C)_{k\delta}=0$. On the other hand $\mathcal{L}(\mathfrak{s})_{\bar 0}^\sigma=\mathcal{L}(\mathfrak{s}')^\sigma\oplus \mathfrak{a}$ for some abelian Lie algebra
$\mathfrak{a}$. Thus, we can choose $\varphi$ so that
$\varphi(\mathcal{L}(\mathfrak{s}')^\sigma,\mathcal{L}(\mathfrak{s})^\sigma_{\bar 0})=0$.
Since $k\delta$ is an even root, $\varphi$ is an even cocycle, so
$\varphi(\mathcal{L}(\mathfrak{s}')^\sigma,\mathcal{L}(\mathfrak{s})^\sigma)=0$.
In particular, for every $h\in(\mathfrak{h}'\cap\mathfrak{s}')^\sigma$ we have $\varphi(h\otimes t^m,\mathcal{L}(\mathfrak{s})^\sigma)=0$. Let $\alpha$ be a non-zero weight of
$\mathfrak{s}$ with respect to
$(\mathfrak{h}'\cap\mathfrak{s}')^\sigma$ and $x\in \mathfrak{s}_\alpha, y\in \mathfrak{s}_{-\alpha}$, we can choose
$h$ so that $\alpha(h)\ne 0$. Then the cocycle condition
$$d\varphi(x\otimes t^{a-m},y\otimes t^{b},h\otimes t^m)=0$$
implies (\ref{eq:goodcocycle}) for $x\in \mathfrak{s}_\alpha, y\in \mathfrak{s}_{-\alpha}$. Since the $\mathfrak{s}_{\alpha}$ for all nonzero weights $\alpha$ generate $\mathfrak{s}$
and $\varphi(x\otimes t^a,y\otimes t^b)=0$ for $x\in\mathfrak{s}_\alpha$
and $y\in\mathfrak{s}_\beta$ with $\alpha+\beta\ne 0$,
one proves the desired identity for all $x,y$ using linearity and the cocycle condition.
\end{proof}
Lemma~\ref{lem:redfindim} implies Theorem~\ref{thm:UKM-ns} in all
cases when $\delta$ is an even root. If $\mathfrak{s}\ne\mathfrak{psl}(n|n)$ or $\mathfrak{p}\mathfrak{s}\mathfrak q(n)$, $H^2(\mathfrak{s},\mathbb C)=0$ and then $\mathfrak{g}^\mathtt{U}=\mathfrak{g}^\mathtt{C}$.
If $\mathfrak{s}=\mathfrak{psl}(n|n)$ or $\mathfrak{p}\mathfrak{s}\mathfrak q(n)$,
$H^2(\mathfrak{s},\mathbb C)^{\circ}=\mathbb C$, see, for instance, \cite{S4}. This gives the cases
$\mathfrak{g}^\mathtt{U}=\mathfrak{sl}(n|n)^{(1)}$ and $\mathfrak{g}^\mathtt{U}=\mathfrak{sl}(2n|2n)^{(2)}$.
The only cases left are $\mathfrak{g}^\mathtt{C}=\mathfrak{p}\mathfrak{sl}(2n+1|2n+1)^{(4)}$ and $\mathfrak{p}\mathfrak{s}\mathfrak q(n)^{(2)}$ where $\delta$ is an odd root.
For these remaining cases the theorem will follow from the lemma below.
\begin{lem}\label{lem:oddextensions} If $\mathcal{R}_0$ is of type $A(2n|2n)^{(4)}$ or $\mathfrak q(n)^{(2)}$ then
$H^2(\bar{\mathfrak{g}}^{\mathtt{C}},\mathbb C)_{k\delta}=0$ for any odd $k$.
\end{lem}
\begin{proof} First let us deal with $A(2n|2n)^{(4)}$. In this case $\mathfrak{s}=\mathfrak{psl}(2n+1|2n+1)$, $m=4$ and we can choose $\sigma$ so that
$\mathfrak{s}^{\sigma}=\mathfrak{so}(2n+1)\oplus\mathfrak{so}(2n+1)$. We will establish
an isomorphism $H^2(\mathfrak{s},\mathbb C)^\circ_{p}\simeq H^2(\mathcal{L}(\mathfrak{s})^{\sigma},\mathbb C)_{k\delta}$ for odd $k$. As in the proof of
Lemma ~\ref{lem:redfindim}, it suffices to check that we can choose a cocycle $\varphi$ satisfying (\ref{eq:goodcocycle}). This in
turn would follow from the condition $\varphi(h\otimes t^4,\mathcal{L}(\mathfrak{s})^\sigma)=0$ for all $h\in(\mathfrak{h}')^{\sigma}$. Using the root
description,~\cite{vdL}, we see that $\alpha$ and
$-\alpha+k\delta$ are both real roots of $\bar{\mathfrak{g}}^{\mathtt{C}}$ only for the short anisotropic $\alpha$. Thus, if $x\in\bar{\mathfrak{g}}^{\mathtt{C}}_\beta$ for
some long anisotropic root $\beta$ then $\varphi(x,\bar{\mathfrak{g}}^{\mathtt{C}})=0$. On the other hand, every $h\otimes t^4$ can be obtained as a linear
combination of $[x,y]$, $x\in\bar{\mathfrak{g}}^{\mathtt{C}}_{\beta}$ and $y\in\bar{\mathfrak{g}}^{\mathtt{C}}_{-\beta+4\delta}$ for some long anisotropic roots $\beta$. Therefore
$\varphi(h\otimes t^4,\mathcal{L}(\mathfrak{s})^\sigma)=0$ for all $h\in(\mathfrak{h}')^{\sigma}$. The statement of lemma now follows from
$H^2(\mathfrak{s})_{\bar 1}=0$.
In the case of $\mathfrak q(n)^{(2)}$ we have a grading $\bar{\mathfrak{g}}^{\mathtt{C}}=\bigoplus\bar{\mathfrak{g}}^{\mathtt{C}}_i$ induced by the standard grading on Laurent polynomials, with
$\bar{\mathfrak{g}}^{\mathtt{C}}_0=\mathfrak{sl}(n)$. For every $i$ the term $\bar{\mathfrak{g}}^{\mathtt{C}}_i$ is the adjoint $\bar{\mathfrak{g}}^{\mathtt{C}}_0$-module. The parity of
$\bar{\mathfrak{g}}^{\mathtt{C}}_i$ equals the parity of $i$. Let $s=2k+1$. To compute $H^2(\bar{\mathfrak{g}}^{\mathtt{C}},\mathbb C)_{s\delta}$ we consider the first layer
of Hochshild-Serre spectral sequence (see, for instance, \cite{F}, Sect. 5) with respect to subalgebra $\bar{\mathfrak{g}}^{\mathtt{C}}_0$:
$$H^2(\bar{\mathfrak{g}}^{\mathtt{C}}_0,\mathbb C)\oplus H^1(\bar{\mathfrak{g}}^{\mathtt{C}}_0,(\bar{\mathfrak{g}}^{\mathtt{C}}_s)^*)\oplus
H^0(\bar{\mathfrak{g}}_0^{\mathtt{C}},\oplus_{a+b=s} (\bar{\mathfrak{g}}^{\mathtt{C}}_a\otimes\bar{\mathfrak{g}}^{\mathtt{C}}_b)^*).$$
Since $H^2(\bar{\mathfrak{g}}^{\mathtt{C}}_0,\mathbb C)=0$, $H^1(\bar{\mathfrak{g}}^{\mathtt{C}}_0,(\bar{\mathfrak{g}}^{\mathtt{C}}_s)^*)=0$ and
$H^0(\bar{\mathfrak{g}}_0^{\mathtt{C}},(\bar{\mathfrak{g}}^{\mathtt{C}}_a\otimes\bar{\mathfrak{g}}^{\mathtt{C}}_b)^*)=\mathbb C$ we obtain that every cocycle
$c\in H^2(\bar{\mathfrak{g}}^{\mathtt{C}},\mathbb C)_{s\delta}$ can be written in the form
$$c(x\otimes t^a,y\otimes t^b)=\gamma(a,b)\operatorname{tr} (xy),\ \gamma:\mathbb{Z}\times\mathbb{Z}\to\mathbb{C}.$$
Furthermore $\gamma$ has the following properties
\begin{itemize}
\item weight condition: $\gamma(a,b)=0$ unless $a+b=s$;
\item skew-symmetry: $\gamma (a,b)=-\gamma(b,a)$;
\item $\gamma(0,s)=0$;
\item cocycle condition: $\gamma(a,b+c)=\gamma(a+b,c)-\gamma(b,a+c)$.
\end{itemize}
The last condition follows by direct computation using the property of the trace $\operatorname{tr}(uvw)=\operatorname{tr}(vwu)$.
Without loss of generality assume that $s>0$. By the cocycle condition and
skew-symmetry
$$\gamma(p,s-p)=\gamma(p,s-p+1-1)=\gamma(s+1,-1)+\gamma(p-1,s-p+1).$$
By induction
$$\gamma(p,s-p)=p\gamma(s+1,-1)+\gamma(0,s)=p\gamma(s+1,-1).$$
Hence $0=\gamma(s,0)=s\gamma(s+1,-1)$
that implies $\gamma(s+1,-1)=0$. Therefore $\gamma\equiv 0$. Thus, $H^2(\bar{\mathfrak{g}}^{\mathtt{C}},\mathbb C)_{s\delta}=0$.
\end{proof}
\section{Description of root algebras. Examples} \label{sec:app}
In Subsection \ref{rootalg} we describe root algebras in the indecomposable fully reflectable case. In the rest of this section we compute some of the groups $\operatorname{Aut}_\mathcal{R}(v)$.
In this section we identify admissibile components of $\mathcal{R}$ by root Lie superalgebras supported on them.
\subsection{}\label{rootalg} By contrast with the case $\mathfrak{gl}(1|1)$, see~\ref{rank1}, we have the following
\begin{thm} Let $\mathcal{R}_0$ be a indecomposable admissible fully reflectable component of the root groupoid, not isomorphic to $\mathfrak{gl}(1|1)$.
Then any ideal of $\mathfrak{g}^\mathtt{U}$ having zero intersection with $\mathfrak{h}$
defines a root algebra. If $\mathcal{R}_0$ is of type (Aff) and $\mathfrak{g}^{\mathtt{U}}\neq \mathfrak{g}^{\mathtt{C}}$ then all such ideals are in natural bijection with subsets of $\mathbb Z\setminus 0$. \end{thm} \begin{proof} By~\ref{crl:invariantideals} we need to consider only components with isotropic reflexions. Furthermore, we are only interested in the case $\mathtt{Sp}^D(v)\neq \{1\}$
and $\mathfrak{g}^{\mathtt{C}}\neq \mathfrak{g}^{\mathtt{U}}$. By~\ref{crlfin} and~\ref{Qpm} (see below)
this leaves us with components of type (Aff) listed in~\ref{thm:UKM-ns}. Let $\mathfrak{g}$ be a root algebra and
$$J^{\mathtt{C}}:=\mathrm{Ker}(\mathfrak{g}^{\mathtt{U}}\to\mathfrak{g}^{\mathtt{C}}),\ \ \ J:=\mathrm{Ker}(\mathfrak{g}^{\mathtt{U}}\to\mathfrak{g}).$$
By~\ref{thm:UKM-ns} we have
$$J^{\mathtt{C}}=\bigoplus_{s\in\mathbb Z\setminus 0} J^{\mathtt{C}}_{s\delta},\ \ \ \dim J^{\mathtt{C}}_{s\delta}\leq 1.$$
It follows from the definition of $Q^{++}$ that it is $\operatorname{Aut}_{\mathcal{R}}(v)$-stable. Since $Q^{++}=\mathbb Z_{\geq 0}\delta$ and $ J^{\mathtt{C}}$ is
$\operatorname{Aut}_{\mathcal{R}}(v)$-stable we obtain that
$ J^{\mathtt{C}}_{s\delta}$ is $\operatorname{Aut}_{\mathcal{R}}(v)$-stable for any $s$. Therefore any graded subspace of $ J^{\mathtt{C}}$ is $\operatorname{Aut}_{\mathcal{R}}(v)$-stable. Moreover, by~\ref{crlaff} (2)
any graded subspace of $ J^{\mathtt{C}}$ is an ideal. Hence by~\ref{sss:invariantideals} the root algebras are in bijection with the graded subspaces of $J^{\mathtt{C}}$.
The last assertion follows from the description of $\mathfrak{g}^{\mathtt{U}}$ given in~\ref{thm:UKM-ns}.
\end{proof}
\begin{rem} Note that by above theorem a root algebra may not admit a superinvolution $\theta$ defined in~\ref{sss:automorphism}.
\end{rem}
\subsection{Star-shaped spines} Here we calculate the automorphism groups in a few small examples.
\subsubsection{Example}\label{q32} The following root datum contains root algebra $q(3)^{(2)}$. Take $X=\{x_1,x_2,x_3\}$ and let $\mathfrak{h}=\mathfrak{h}(v)$ have dimension 4
with the Cartan matrix $$\begin{pmatrix} 0 & -1 & 1\\ -1 & 0 & 1\\ 1 & -1& 0 \end{pmatrix},\ \ \ p(x_i)=1\ \text{ for } i=1,2,3.$$ Then the graph $\mathtt{Sp}(v)$ is a star with $v$ at the center and three other vertices $v_i$ with $r_{x_i}:v\to v_i$ and the Cartan matrices $$v_1:\ \begin{pmatrix} 0 & -1 & 1\\ 1 & -2 & 1\\ -1 & -1& 2 \end{pmatrix} \ \ \ v_2:\ \begin{pmatrix} -2 & 1 & 1\\ 1 & 0 & -1\\ 1 & 1& -2 \end{pmatrix} \ \ \ v_3: \begin{pmatrix} 2 & -1 & -1\\ -1 & 2 & -1\\ -1 & 1& 0 \end{pmatrix}$$ with $p_{v_j}(x_i)=\delta_{ij}$. We have three principal reflections $s_{\alpha_k}$, where $$\alpha_k:=b(x_i)+b(x_j)=b_{v_i}(x_j)=b_{v_j}(x_i)$$ for $\{i,j,k\}=\{1,2,3\}$. The Weyl group is generated by these reflections (this group is isomorphic to the affine Weyl group $A_2^{(1)})$. The group $K(v)$ is the additive group $\mathbb{C}$. If we choose $\mathfrak{h}$ of dimension greater than $4$, the Weyl group will remain the same, but $K(v)$ will be different. Regardless of $\mathfrak{h}$, $\operatorname{Aut}_\mathcal{R}(v)= W(v)\times K(v)$ by~\ref{crl:all-different}.
\subsubsection{Example: $B(1|1)^{(1)}$, $D(2|1,a)$, $D(2|1,a)^{(1)}$, $Q^{\pm}(m,n,t)$}\label{Qpm} All these cases are similar to~\ref{q32}. We can (and will) choose a vertex $v$ such that $p(x)=1$ for all $x\in X$. We always have $a_{xy}\not=0$ if $x\not=y$. The graph $\mathtt{Sp}(v)$ is a star with the center at $v$. The other vertices are $v_x$ with the edges $r_{x}: v\to v_x$. If $a_{xx}=0$ then $p'(y)=0$ for each $y\not=x$. Hence $\mathtt{Sp}(v)$ consists of $v$ and all $v_x$ such that $a_{xx}=0$. Cartan data at all vertices of $\mathtt{Sp}(v)$ are not $D$-equivalent, so~\ref{crl:all-different} is applicable. This gives $\operatorname{Aut}(v)=W\times K$.
\subsection{$\mathfrak{sl}_n^{(1)}$, its relatives and friends}
There is a number of components of the root groupoid whose Cartan matrices satisfy common properties listed below in (\ref{aij01}) and whose automorphism groups allow a more or less uniform description. We call them ``relatives and friends of $\mathfrak{sl}_n^{(1)}$'' and they consist of the types
$\mathfrak{sl}(k|\ell)^{(1)}$ for $k,\ell$ such that $k+\ell=n$ and $\mathfrak q_n^{(2)}$.
We take $X=\{x_i\}_{i\in\mathbb{Z}_n}$. Let $v\in\mathcal{R}_0$ be a vertex with the Cartan matrix of the following form: \begin{equation}\label{aij01} \begin{array}{ll} a_{ij}=0\ \text{ for }j\not=i,i\pm 1;\\ a_{i,i\pm 1}\in \{\pm 1\},\ \ \ a_{i,i-1}+a_{ii}+a_{i,i+1}=0\\ p(x_i)=1\ \ \Longleftrightarrow\ \ a_{ii}=0. \end{array}\end{equation}
\subsubsection{}\label{goodmatrices} It is easy to check that \begin{itemize} \item If a Cartan matrix satisfies~(\ref{aij01}), then all $x_i$ are reflectable at $v$ and $\sum_i b_{v}(x_i)=\sum_i b_{v'}(x_i)$ for each reflexion $v\to v'$; \item all Cartan matrices in $\mathtt{Sk}(v)$ satisfy~(\ref{aij01}); \item two Cartan matrices $A,A'$ satisfying~(\ref{aij01}) are $D$-equivalent if and only if $p(x_i)=p'(x_i)$ for all $i$. \end{itemize}
\subsubsection{} \label{sss:iota} Let $\overline{\mathcal{R}}_0$ be the component of $\mathcal{R}$ corresponding to $\mathfrak{sl}_n^{(1)}$; we will use bar notation $\bar v$ etc. for the objects connected to $\overline\mathcal{R}_0$. Fix a linear isomorphism $\iota: Q_{\bar v}\stackrel{\sim}{\to} Q_{v}$ given by $\iota({b}_{\bar v}(x_i)):=b_v(x_i)$.
Let $v\to v'$ be a path in $\mathcal{R}_0$ and $\bar v\to \bar v'$ be its namesake in $\overline{\mathcal{R}}_0$. It is easy to see that $$b_v(x_i)=\iota({b}_{\overline{v}}(x_i)).$$ This provides a bijection between the sets of real roots $\Delta^\mathit{re}=\overline{\Delta}^\mathit{re}$. Note that all roots of $\overline\Delta^\mathit{re}$ are anisotropic. Since the set $\{b_v(x_i)\}_{i\in\mathbb{Z}_n}$ determines a vertex in $\mathtt{Sk}(v)$ by~\ref{crl:unique-in-sk}, this gives a bijection between $\mathtt{Sk}(v)$ and $\mathtt{Sk}(\bar v)$.
\subsubsection{} We identify $Q_{v}$ and $Q_{\bar v}$ via $\iota$.
By~\ref{crl:Wfree} the Weyl group $W(\mathfrak{sl}_n^{(1)})$ acts freely on $\mathtt{Sk}(\bar v)$. By~\ref{lem:decomposition0} this action is transitive. This gives a simply transitive action of $W(\mathfrak{sl}_n^{(1)})$ on $\mathtt{Sk}(v)$. Note that the Weyl group $W$ can be identified with a subgroup of $W(\mathfrak{sl}_n^{(1)})$ as it is generated by a part of the reflections belonging to $W(\mathfrak{sl}_n^{(1)})$.
Let us compute
$$\operatorname{Aut}(v)/K(v)=\mathtt{Sk}^D(v)=\{w\in W(\mathfrak{sl}_n^{(1)})|\ \ A_{w(v)}\ \text{ is $D$-equivalent to }A_v\}.$$
\subsubsection{Action of $W(\mathfrak{sl}_n^{(1)})$} By~\ref{goodmatrices}, the vector $$\delta:=\sum_{i=1}^n b_{v'}(x_i)$$ does not depend on the choice of $v'\in\mathtt{Sk}(v)$.
View $Q_{v}$ as a subset of $V=\operatorname{Span}_\mathbb{Z}(\varepsilon_1,\dots,\varepsilon_n,\delta)$ by setting $$b(x_i)=\varepsilon_i-\varepsilon_{i+1}\ \text{ for }i=1,\ldots,n-1; \ \ b(x_n)=\delta+\varepsilon_n-\varepsilon_1.$$ We can extend the parity function $p: Q_v\to \mathbb{Z}_2$ to $p: V\to \mathbb{Z}_2$ by setting $p(\varepsilon_1)=0$. Set
$$\bar Q:=\{\sum_{i=1}^n k_i\varepsilon_i|\ \sum_{i=1}^n k_i=0,\ k_i\in\mathbb{Z}\}.$$ (Note: $\bar Q$ is the lattice for the finite root system $A_{n-1}$.) By \cite{Kbook}, Thm. 6.5, $W(\mathfrak{sl}_n^{(1)})=S_n\ltimes \bar Q$ and this group acts on $V$ as follows: \begin{itemize} \item $S_n$ acts on $\{\varepsilon_i\}_{i=1}^n$ by permutations and stabilizes $\delta$; \item $\bar Q$ acts on $V$ by the formula $$\nu*\mu:=\mu-(\mu,\nu)\delta\ \ \text{ for }\nu\in \bar Q,\ \mu\in V$$ where the bilinear form on $V$ is given by $$(\varepsilon_i,\varepsilon_j)=\delta_{ij},\ \ \ (\varepsilon_i,\delta)=(\delta,\delta)=0.$$ \end{itemize} Note that $W(\mathfrak{sl}_n^{(1)})$ stabilizes $\delta$. By~\ref{goodmatrices}, $A_{w(v)}$ is $D$-equivalent to $A_v$ if and only if $p_v(x_i)=p_{w(v)}(x_i)$ for all $i$. Therefore, \begin{equation} \label{eq:skd=} w\in\mathtt{Sk}^D(v)\ \Longleftrightarrow\ \ p(w\varepsilon_i)-p(\varepsilon_i)\ \text{ is independent of $i$}. \end{equation} We will now compute the groups $\mathtt{Sk}^D(v)$ using the formula (\ref{eq:skd=}).
\subsubsection{Case $\mathfrak{sl}(k|\ell)^{(1)}$, $k,\ell\not=0$} We can choose $v$ in such a way that $p(x_i)=0$ for $i\not=k,n$ and $p(x_n)=p(x_k)=1$. Note that $p(\delta)=0$. Denote by $S_k\subset S_n$ (resp., $S_\ell\subset S_n$) the group of permutations of $\{\varepsilon_i\}_{i=1}^k$ (resp., of $\{\varepsilon_i\}_{i=k+1}^n$). In this case $p(w\varepsilon_i)=p(\varepsilon_i)$ for $w\in \bar Q$, so $\mathtt{Sk}^D(v)\supset \bar Q$.
One has
$$S_n\cap\mathtt{Sk}^D(v)=\{w\in S_n|\ p'(w(\varepsilon_i-\varepsilon_{i+1}))=p'(\varepsilon_i-\varepsilon_{i+1})\ \text{ for }i=1,\ldots,n-1\}.$$ If $k\ne\ell$ this gives $S_n\cap\mathtt{Sk}^D(v)=S_k\times S_{\ell}$. In the case $k=\ell$ we have \newline $S_n\cap\mathtt{Sk}^D(v)=(S_k\times S_k)\rtimes\mathbb{Z}_2$, where $\mathbb{Z}_2$ interchanges the two copies of $S_k$. Hence $$\mathtt{Sk}^D(v)=\left\{ \begin{array}{ll} (S_k\times S_{\ell})\ltimes \bar Q\ & \text{ if }k\not=\ell\\ ((S_k\times S_k)\rtimes\mathbb{Z}_2)\ltimes \bar Q\ & \text{ if }k=\ell.\end{array} \right.$$ Note that the Weyl group has the form $W=(S_k\times S_{\ell})\ltimes Q_0$ where $Q_0\subset\bar Q$ is the subgroup spanned $\{\varepsilon_i-\varepsilon_{i+1}\}_{i=1}^{k-1}\coprod \{\varepsilon_i-\varepsilon_{i+1}\}_{i=k+1}^{n-1}$. Observe that $W$ has an infinite index in $\mathtt{Sk}^D(v)$.
\begin{rem} \label{sss:glmn}
For $\mathcal{R}_0$ of type $A(k-1|\ell-1)$ a similar reasoning (replacing the index set $X=\{x_i\}_{i\in\mathbb{Z}_n}$ with the set $X=\{x_1,\ldots, x_n\}$)
shows that $S_{k+\ell}$ acts transitively on $\mathtt{Sk}(v)$ and that $$\mathtt{Sk}^D(v)=\left\{ \begin{array}{ll} S_k\times S_{\ell} & \text{ if }k\not=\ell\\ (S_k\times S_k)\rtimes\mathbb{Z}_2 & \text{ if }k=\ell.\end{array} \right.$$ Note that the Weyl group is in both cases $S_k\times S_{\ell}$. \end{rem}
If $k=l$ then $K(v)=\mathbb{C}$ and $\operatorname{Aut}(v)$ is a nontrivial semidirect product of $\mathbb{C}$ and $\mathtt{Sk}^D(v)$.
\subsubsection{Case $\mathfrak q_n^{(2)}$} Using~\cite{Kbook}, Thm. 6.5 and \cite{S3}, one gets $$W=S_n\ltimes 2\bar Q.$$ We will choose $v$ so that $p(x_i)=0$ for $i=1,\ldots,n-1$ and $p(x_n)=1$. Note that $p(\delta)=1$.
In this case $p(w\varepsilon_i)=p(\varepsilon_i)$ for $w\in S_n$, so $S_n\subset\mathtt{Sk}^D(v)$. Hence $$\mathtt{Sk}^D(v)=S_n\ltimes Q'$$ where $Q'=\bar Q\cap \mathtt{Sk}^D(v)$. Take $\nu\in\bar Q$. One has $$p(\nu*\varepsilon_i)-p(\varepsilon_i)\equiv (\nu,\varepsilon_i)\mod 2,$$ so
$$Q'=\{\sum_{i=1}^n k_i\varepsilon_i|\ \sum_{i=1}^n k_i=0,\ k_i\in\mathbb{Z}, k_i-k_j\equiv 0\mod 2\}.$$
If $n$ is odd this gives $Q'=2\bar Q$, so $\mathtt{Sk}^D(v)=W$ and $\operatorname{Aut}_\mathcal{R}(v)=W\times K$.
If $n$ is even, $2\bar Q$ has index $2$ in $Q'$. Thus $W$ has index two in $\mathtt{Sk}^D(v)$, so that $W\times K$ is an index 2 subgroup of $\operatorname{Aut}_\mathcal{R}(v)$.
\subsection{A deformation of $\mathfrak{sl}(2|1)^{(1)}$} \label{ss:s21b}
A very interesting relative of $\mathfrak{sl}(2|1)^{(1)}$
is the root Lie superalgebra $S(2|1,b)$ defined in~\cite{S3}. We will recall some of the results of~\cite{S3} below. Set $X:=\{x_1,x_2,x_0\}$ and fix $\mathfrak{h}$ with $\dim\mathfrak{h}=4$.
Let $\mathcal{R}(b)$, $b\ne 0$, be the component of $\mathcal{R}$ containing a vertex $v$ such that $p_v(x_1)=p_v(x_2)=1$, $p_v(x_0)=0$ and the Cartan matrix $A_v$ is equal to $$A(b):=\begin{pmatrix} 0 & b & 1-b\\ -b & 0 &1+b\\ -1 & -1 & 2 \end{pmatrix}$$ for $b\not=0$.
In studying skeleta of $\mathcal{R}(b)$ it is convenient to allow permutations of the elements of $X$. This leads to the action of $S_3$ on the components of $\mathcal{R}$ with the index set $X$ and, as we will see soon, carries components $\mathcal{R}(b)$ to components of the same type.
Permuting $x_1$ and $x_2$ in $A(b)$ we obtain
$A(-b)$, so $\mathcal{R}(b)$ is mapping to $\mathcal{R}(-b)$. In particular, each root algebra for $S(2|1;b)$ is isomorphic to a root algebra for $S(2|1; -b)$.
\begin{lem}\label{lemS21b} For any vertex $v\in\mathcal{R}(b)$ the Cartan matrix $A_v=(a^{(v)}_{xy})$ is of the form $\sigma(D A(b+i))$ where $i\in \mathbb{Z}$,
$D$ is an invertible diagonal matrix and $\sigma\in S_3$ is an even permutation. One has $p_v(x)=1$ if $a^{(v)}_{xx}=0$ and $p_v(x)=0$ otherwise. \end{lem} \begin{proof} It is enough to verify what happens to the Cartan datum under an isotropic reflexion $r_{x}:\ v\to v'$. Since permuting $x_1$ and $x_2$ in $A(b)$ yields
$A(-b)$, it is enough to verify the assertion for $x=x_1$. In this case we have $$A_{v'}=\begin{pmatrix} 0 & -b & -1+b\\ b & -2b & b\\ 1 & \frac{2-b}{b-1} & 0 \end{pmatrix}.$$ Taking the homothety $h_{\lambda}:v'\to v''$ with $\lambda=(-1,-b^{-1}, b-1)$ we get $$A_{v''}=\begin{pmatrix} 0 & b & 1-b\\ -1 & 2 & -1\\ b-1 & 2-b & 0 \end{pmatrix}.$$ Applying now the cyclic permutation carrying $x_2$ to $x_1$, we get the Cartan matrix $A(b-1)$. It is easy to see that going along the other isotropic reflection would produce in the same way the matrix $A(b+1)$. \end{proof}
\begin{crl}\label{crlS21b} \begin{itemize} \item[1.] $\mathcal{R}(b)$ is admissible if and only if $b\not\in\mathbb{Z}$; \item[2.] if $\mathcal{R}(b)$ is admissible, then for $i\in\mathbb{Z}$ each root algebra for
$S(2|1;\pm b\pm i)$ is isomorphic to a root algebra for $S(2|1;b)$. \end{itemize} \end{crl} \begin{proof} Note that $A(b)$ is locally weakly symmetric for $b\not=\pm 1$. Using Lemma~\ref{lemS21b} we obtain the assertions. \end{proof}
\subsubsection{}\label{S21bproperties} From now on we assume that $\mathcal{R}(b)$ is admissible i.e. $b\not\in\mathbb{Z}$. Using Lemma~\ref{lemS21b} we obtain
\begin{enumerate} \item all $x$ are reflectable at each $v\in\mathcal{R}(b)$; \item for each reflexion $r_{x}: v\to v'$ we have $b_{v'}(y)=b_{v}(x)+b_{v}(y)$ if $y\not=x$; \item a real root is isotropic if and only if it is odd. \end{enumerate}
\subsubsection{} Let ${\mathcal{R}}_{\bar v}$ be the component of the root groupoid with $\dim\mathfrak{h}'=4$ and a vertex $\bar v$ such that $p_{\bar v}(x_1)=p_{\bar v}(x_2)=1$, $p_{\bar v}(x_0)=0$ and the Cartan matrix $$A_{\bar v}:=\begin{pmatrix} 0 & -1 & -1\\ -1 & 0 &-1\\ -1 & -1 & 2 \end{pmatrix}.$$
Then the component ${\mathcal{R}}_{\bar v}$ of $\bar v$ is of type $\mathfrak{sl}(2|1)^{(1)}$.
As in \ref{sss:iota}, \ref{S21bproperties}(2) yields a linear isomorphism $\iota: Q_{\bar v}\to Q_{v}$ by setting $\iota(b_{\bar v}(x_i)):= b_v(x_i)$; by the same arguments, this gives a bijection between $\mathtt{Sk}(v)$ and $\mathtt{Sk}(\bar v)$ with $b_{v}(x_i)=\iota(b_{\bar v}(x_i)$.
Note that, contrary to~\ref{sss:iota}, $\iota$ preserves $p(x_i)$.
\subsubsection{} \label{sss:s21b} We have $$Q^{++}_v=\iota(Q^{++}_{\bar v})=\mathbb{N}\delta\ \text{ for } \delta:=\sum b_v(x_i).$$
Therefore, $S(2|1,b)$ is of type (Aff). Note that $\langle\delta,a_v(x_1)\rangle=1\not=0$, so by Corollary~\ref{crlfin}(3) $\mathfrak{g}^\mathtt{U}=\mathfrak{g}^{\mathtt{C}}$.
\subsubsection{}
By~\ref{S21bproperties}(3) we see that $\iota:Q_{\bar v}\to Q_v$ establishes bijection of
real, isotropic and anisotropic roots for $\bar v$ and $v$. Moreover, the bijection between $\mathtt{Sk}(v)$ and $\mathtt{Sk}(\bar v)$ gives a bijection between the spines $\mathtt{Sp}(v)$ and $\mathtt{Sp}(\bar v)$. In particular, $\mathtt{Sp}(v)$ has two principal roots $\alpha:=b_{v}(x_0)$ and $b_v(x_1)+b_v(x_2)=\delta-\alpha$. Using~\ref{S21bproperties} we obtain $$W=\overline{W}\cong A_1^{(1)}$$ and for each $\nu\in Q_{\bar v}$ we have $w\iota(\nu)=\iota(w\nu)$.
\begin{prp} $\operatorname{Aut}(v)=W\times K$. \end{prp} \begin{proof} It is enough to check that all Cartan matrices in $\mathtt{Sp}(v)$ are not $D$-equivalent. Note that $\mathtt{Sp}(v)$
can be seen as the infinite graph $$\ldots\stackrel{r_{x_0}}{\to} v_{-1} \stackrel{r_{x_2}}{\to} v_0\stackrel{r_{x_1}}{\to} v_1 \stackrel{r_{x_0}}{\to} v_2\stackrel{r_{x_2}}{\to} v_3\stackrel{r_{x_1}}{\to} v_3 \stackrel{r_{x_0}}{\to}\ldots $$
Consider the equivalence relation on the set of $3\times 3$ matrices generated by the action of $A_3$ (the group of even permutations in $S_3$) and $B\sim DB$ for a diagonal invertible matrix $D$. Observe that $A(b)\not\sim A(b')$ if $b\not=b'$.
In the proof of Lemma~\ref{lemS21b} we showed that if $A_v\sim A(b)$, then for an isotropic reflexion $v\stackrel{r_{x}}{\to} v'$ we have $A_{v'}\sim A(b\pm 1)$. This implies that $A_{v_k}\sim A(b-k)$, so $A_{v_k}\not\sim A_{v_0}$ for any $k\not=0$. Hence the group $\mathtt{Sp}^D(v_0)$ is trivial, so $\operatorname{Aut}(v_0)=W\times K$. \end{proof}
\end{document} |
\begin{document}
\title[Resolutions of local face modules]{Resolutions of local face modules, functoriality, and vanishing of local $h$-vectors}
\author{Matt Larson, Sam Payne, and Alan Stapledon}
\address{Stanford U. Department of Mathematics, 450 Jane Stanford Way, Stanford, CA 94305} \email{mwlarson@stanford.edu}
\address{UT Department of Mathematics, 2515 Speedway, RLM 8.100, Austin, TX 78712} \email{sampayne@utexas.edu}
\address{Sydney Mathematics Research Institute, L4.42, Quadrangle A14, University of Sydney, NSW 2006, Australia} \email{astapldn@gmail.com}
\begin{abstract} We study the local face modules of triangulations of simplices, i.e., the modules over face rings whose Hilbert functions are local $h$-vectors. In particular, we give resolutions of these modules by subcomplexes of Koszul complexes as well as functorial maps between modules induced by inclusions of faces. As applications, we prove a new monotonicity result for local $h$-vectors and new results on the structure of faces in triangulations with vanishing local $h$-vectors. \end{abstract}
\maketitle
\section{Introduction}
In this paper, we study the modules over face rings, introduced by Athanasiadis and Stanley, whose Hilbert functions are the relative local $h$-vectors of quasi-geometric homology triangulations of simplices, a broad class of formal subdivisions that includes all geometric triangulations and is natural from the point of view of combinatorial commutative algebra. See Section 2.1 for the precise definition and further references.
Fix an infinite field $k$. Let $\sigma \colon \Gamma \to 2^V$ be a quasi-geometric homology triangulation of a simplex, and let $E$ be a face of $\Gamma$. Say that a face $G \in \Gamma$ is \emph{interior} if $\sigma(G) = V$, and let $I$ be the ideal in the face ring $k[\lk_\Gamma(E)]$ generated by the faces that are interior relative to $E$, i.e., \[
I = (x^F : F \sqcup E \mbox{ is interior} ). \]
Let $d = |V|-|E|$, which is the Krull dimension of $k[\lk_\Gamma(E)]$, and let $\theta_1, \ldots, \theta_{d}$ be a special l.s.o.p., as in \cite{Stanley92, Athanasiadis12b}. See also \S\ref{ss:sr}, where we recall the definition and construction of special l.s.o.p.s.
\begin{definition} The \emph{local face module} $L(\Gamma,E)$ is the image of $I$ in $k[\lk_\Gamma(E)]/(\theta_1, \ldots, \theta_d)$. \end{definition}
\noindent Note that $L(\Gamma,E)$ is a finite dimensional graded $k$-vector space. The \emph{local $h$-vector} is its Hilbert function: \[ \ell(\Gamma, E) := (\ell_0, \ldots, \ell_{d}), \quad \quad \mbox{ where } \ \ell_i := \dim L(\Gamma,E)_i. \] The local face module $L(\Gamma,E)$ depends on the choice of a special l.s.o.p., but $\ell(\Gamma,E)$ is an invariant of the triangulation with the symmetry $\ell_i = \ell_{d- i}$. See \S\ref{sec:triangulations} for details and references. In the past few years, there has been significant research activity on the combinatorics of local $h$-vectors and relations to intersection homology \cite{Athanasiadis16, KatzStapledon16, Stapledon17, deCataldoMiglioriniMustata18}. Recent advances include a proof that every non-negative integer vector satisfying $\ell_0 = 0$ and $\ell_i = \ell_{d-i}$ is the local $h$-vector of a quasi-geometric triangulation for $E = \emptyset$ \cite{JKMS}, and a relative hard Lefschetz theorem that yields unimodality of local $h$-vectors for regular subdivisions in a more general setting (for regular nonsimplicial polyhedral subdivisions that are not necessarily rational) \cite{Karu19}.
Here, we investigate the local face modules $L(\Gamma, E)$ using methods of combinatorial commutative algebra. In particular, we describe natural combinatorial resolutions of these modules as well as natural maps of $k[\lk_\Gamma(E)]$-modules, $L(\Gamma,E) \to L(\Gamma, E')$, for $E \subset E'$. Our first theorem gives explicit generators for the kernel of the natural map $I \to k[\lk_\Gamma(E)]/(\theta_1, \ldots, \theta_d)$. Moreover, we extend this to an exact sequence of graded $k[\lk_\Gamma(E)]$-modules in which each term is a direct sum of degree-shifted monomial ideals.
Label the vertices of the simplex $V = \{ v_1, \ldots, v_n \}$. For a subset $U \subset V$, let $U^c := V \smallsetminus U$. After relabeling, we may assume that $\sigma(E)^c = \{v_1, \ldots, v_b\}$. Given $S \subset \{v_1, \ldots, v_d\}$, we define the ideal $I_S \subset k[\lk_\Gamma(E)]$ by \begin{equation*} I_S := ( x^F : \, \sigma(F \sqcup E)^c \subset S). \end{equation*} Note that $I_{S'} \subset I_{S}$ for $S' \subset S$, and $I_S$ depends only on $S \cap \{v_1, \ldots, v_b\}$. For instance, $I_{\emptyset} = I$ and $I_{S} = k[\lk_{\Gamma}(E)]$ if $\{v_1, \ldots, v_b \} \subset S$. By the definition of a special l.s.o.p. (Definition~\ref{def:special}), after reordering, we may assume \[ \supp(\theta_i) \subset \{ w \in \lk_\Gamma(E) : v_i \in \sigma(w) \}, \] for $1 \leq i \leq b$. As a consequence, for any $v_i \in S$, multiplication by $\theta_i$ induces a degree 1 map $\lambda_i \colon I_S \to I_{S \smallsetminus \{v_i\}}$.
\begin{theorem}\label{thm:resolution} There is an exact sequence of graded $k[\lk_{\Gamma}(E)]$-modules
$$0 \to k[\lk_{\Gamma}(E)][-d] \to \bigoplus_{ |S| = d - 1 } I_{S}[-(d-1)] \to \dotsb \to \bigoplus_{|S| = 1} I_{S}[-1] \to I \to L(\Gamma,E) \to 0,$$ where, for $S= \{v_{i_0}, \ldots, v_{i_k}\}$, with $i_0 < \cdots < i_k$, the differential restricted to $I_S$ is $\oplus_{j = 0}^k (-1)^j \lambda_{i_j}$. \end{theorem}
\begin{corollary}\label{cor:presentation} The kernel of the surjection $I \to L(\Gamma, E)$ is the ideal $J$ generated by
$$ \left \{ \theta_i \cdot x^{F} : F \sqcup E \mbox{ is interior } \right \} \cup \left \{ \theta_{j} \cdot x^G : \sigma(G \sqcup E) = \{v_j\}^c, \mbox{ for } 1 \leq j \leq b \right \}.$$ \end{corollary}
We also construct maps between local face modules, as follows. For faces $E \subset E'$ in $\Gamma$, let $\Star(E' \smallsetminus E)$ denote the closed star of $E' \smallsetminus E$ in $\lk_\Gamma(E)$. We have a natural inclusion of complexes $\lk_\Gamma(E') \subset \lk_\Gamma(E)$.
\begin{theorem}\label{thm:maps} Let $E \subset E'$ be faces of $\Gamma$, with $$d = n - \vert E \vert, \quad d' = n - \vert E' \vert, \quad
\mbox{ and } \quad b' = n - |\sigma(E')|.$$ Let $\{\theta_1, \dotsc, \theta_{d } \}$ be a special l.s.o.p. for $k[\lk_{\Gamma}(E)]$, and let
$\theta_i' := \theta_i|_{\Star(E' \smallsetminus E)}$. Then there is a unique homomorphism of graded $k$-algebras
\[ \phi\colon k[\lk_{\Gamma}(E)]/(\theta_1, \dotsc, \theta_d) \to k[\lk_{\Gamma}(E')]/(k[\lk_\Gamma(E')] \cap (\theta_1', \dotsc, \theta_{d}')) \]
whose kernel contains $\{[x^F] : F \not \in \Star(E' \smallsetminus E)\}$ and satisfies $\phi(x^F) = x^F$ for all $F \in \lk_{\Gamma}(E')$. Moreover, there is a special l.s.o.p. $\zeta_1, \ldots, \zeta_{d'}$ for $k[\lk_\Gamma(E')]$ such that $(\zeta_1, \ldots, \zeta_{d'}) = k[\lk_\Gamma(E')] \cap (\theta'_1, \ldots, \theta'_{d})$ and, up to reordering, we have $\theta_i|_{\lk_\Gamma(E')} = \zeta_i$, for $1 \leq i \leq b'$. With this choice of special l.s.o.p., $\phi(L(\Gamma,E)) \subset L(\Gamma,E')$. \end{theorem}
\begin{remark} Theorem~\ref{thm:maps} may be viewed as a functoriality statement for local face modules. Start by fixing the special l.s.o.p. $\theta_1, \ldots, \theta_d$. Then $L(\Gamma, E)$ is well-defined. For $E' \supset E$ the special l.s.o.p. $\zeta_1, \ldots, \zeta_{d'}$ depends on some choices, but the ideal that it generates does not, nor does the map $\phi \colon L(\Gamma,E) \to L(\Gamma,E')$. Moreover, for $E'' \supset E'$, one readily checks that the maps $\phi' \colon L(\Gamma, E') \to L(\Gamma, E'')$ and $\phi '' \colon L(\Gamma,E) \to L(\Gamma, E'')$ are independent of all choices and satisfy $\phi'' = \phi' \circ \phi$. Thus one obtains a functor from the poset of faces of $\Gamma$ that contain $E$ to graded vector spaces, given by $E' \mapsto L(\Gamma, E')$. \end{remark}
We now give two applications of the above theorems. The first is a monotonicity property for local $h$-vectors.
\begin{theorem}\label{thm:increase} Let $E \subset E'$ be faces of $\Gamma$ such that $\sigma(E) = \sigma(E')$. Then $\ell (\Gamma, E) \geq \ell(\Gamma, E')$. \end{theorem}
\noindent The inequality in Theorem~\ref{thm:increase} is term by term, i.e., $\dim L(\Gamma, E)_i \geq \dim L(\Gamma,E')_i$ for all $i$. The proof is by showing that the map $\phi \colon L(\Gamma, E) \to L(\Gamma,E')$ given by Theorem~\ref{thm:maps} is surjective.
Our second application of the above theorems is to a decades old problem posed by Stanley, who introduced and studied local $h$-vectors in the special case where $E = \emptyset$ and asked for a characterization of triangulations for which they vanish \cite[Problem~4.13]{Stanley92}. This problem remains open, and is of enduring interest \cite[Problem~2.12]{Athanasiadis16}. The extension to the case where $E$ is not empty is particularly relevant for applications to the monodromy conjecture \cite{Igusa78, DenefLoeser98, Stapledon17}. In \cite{LarsonPayneStapledon}, we prove a theorem on the structure of geometric triangulations with vanishing local $h$-vectors that is tailored to this purpose, and we use it to prove the monodromy conjectures for all singularities that are nondegenerate with respect to a simplicial Newton polyhedron. See Theorems~1.1.1, 1.4.3, and 4.1.3 in loc. cit.
Here, we apply Theorem~\ref{thm:resolution} to prove another theorem on the structure of faces in triangulations with vanishing local $h$-vectors. Let $F \in \lk_\Gamma(E)$ be a face such that $F\sqcup E$ is interior. Following terminology from the monodromy conjecture literature (see, e.g., \cite{LemahieuVanProeyen11}), we say that $F$ is a \emph{pyramid with apex $w \in F$} if $(F \sqcup E ) \smallsetminus w$ is not interior. Let $$\mathcal{A}_F := \{ w \in F : F \mbox{ is a pyramid with apex } w \}, \mbox{ \ and \ } V_w := \sigma( (F \sqcup E) \smallsetminus w)^c.$$
The elements of $V_w$ correspond to the \emph{base directions} of $F$, i.e., the facets of $2^V$ that contain the base of $F$, when viewed as a pyramid with apex $w$. We say $F$ is a $U$-pyramid if there is an apex $w \in \mathcal{A}_F$ such that $|V_w| = 1$. In other words, a $U$-pyramid is a pyramid with a unique base direction, for some choice of apex.
\begin{definition} Let $F \in \lk_{\Gamma}(E)$ be a face. An \emph{interior partition} of $F$ is a decomposition \[ F = F_1 \sqcup F_2 \sqcup \mathcal{A}_F \] such that $F_1 \sqcup \mathcal{A}_F \sqcup E$ and $F_2 \sqcup \mathcal{A}_F \sqcup E$ are both interior. \end{definition}
\begin{theorem}\label{thm:interiornonvanish}
Suppose $\ell(\Gamma,E) = 0$ and $F \in \lk_\Gamma(E)$ has an interior partition $F = F_1 \sqcup F_2 \sqcup \mathcal{A}_F$ such that $|F_1| \leq 2$. Then $F$ is a $U$-pyramid.
\end{theorem}
\noindent See Remark~\ref{r:specialcase} for a short proof in a special case that illustrates the naturality of the
$U$-pyramid condition. The method of proof breaks down when $|F_i| \geq 3$. See Example~\ref{example:nonrestriction}.
\begin{remark}
The analogous theorem in \cite{LarsonPayneStapledon} requires that the triangulation be geometric and that the interior partition satisfies the additional condition $\sigma(F_2 \sqcup E)^c = \bigcup_{w \in \mathcal{A}_F} V_w$. But then the hypothesis that $|F_1| \leq 2$ is dropped entirely. So, even for geometric triangulations, there are cases of Theorem~\ref{thm:interiornonvanish} that are not necessarily covered by \cite[Theorem~4.1.3]{LarsonPayneStapledon}. It should be interesting to look for a common generalization of these vanishing results, and to pursue further progress on Stanley's problem of characterizing triangulations with vanishing local $h$-vector more generally. \end{remark}
\begin{remark} To the best of our knowledge, all of the theorems stated in the introduction are new even for regular triangulations. The reader who prefers to do so may safely restrict attention to geometric or even regular triangulations. However, while the structure results for triangulations with vanishing local $h$-vectors in \cite{dMGPSS20} and \cite{LarsonPayneStapledon} rely on special properties of geometric triangulations, the proofs presented here work equally well for quasi-geometric homology triangulations, and we find it natural to work in this level of generality. \end{remark}
We conclude the introduction with an example illustrating the above theorems.
\begin{example} \label{ex:triforce} Let $\Gamma$ be the \emph{triforce} triangulation, which figures prominently in \cite{dMGPSS20} and in the adventures of hero protagonist Link in the video game series The Legend of Zelda.
\begin{center} \begin{tikzpicture}[scale=2] \draw (0.5,0.8) node[above] { $u$ } -- (1,0) node[right] { $v$ } -- (0,0) node[left] { $w$ } -- (0.5,0.8);
\draw (0.75,0.4) node[right] { $c$ } -- (0.5,0) node[below] { $a$ } -- (0.25,0.4) node[left] { $b$ } -- (0.75,0.4) ;
\draw (-.75,0.4) node {$\Gamma$ };
\end{tikzpicture} \end{center}
Let $x_a := x^{\{ a \}}$, $x_b := x^{ \{ b \} }$, $x_c := x^{\{ c \}}$, $x_u := x^{\{ u \}}$, $x_v := x^{\{ v \}}$, $x_w := x^{\{ w \}}$. Consider first $E = \emptyset$. The face ring is
\[ k[\lk_\Gamma(E)] = k[x_{a},x_{b},x_{c},x_{u},x_{v},x_{w}]/(x_{a}x_{u},x_{b}x_{v},x_{c}x_{w},x_{u}x_{v},x_{u}x_{w},x_{v}x_{w}), \]
and its ideal of interior faces is \[ I = (x_ax_b, x_ax_c, x_bx_c). \]
A special l.s.o.p. is of the form $\theta_1, \theta_2, \theta_3$, with \[ \supp(\theta_1) = \{ b,c,u\}, \quad \quad \supp(\theta_2) = \{ a,c,v\}, \quad \quad \supp(\theta_3) = \{ a,b,w\}, \] subject to the condition that the restrictions (of the corresponding affine linear functions) to the face $\{a, b, c\}$ are linearly independent. Our resolution of the local face module $L(\Gamma, E)$ also involves the monomial ideals
\[ \begin{array}{ccc} I_u = (x_a, x_bx_c), & I_v = (x_b, x_ax_c), & I_w = (x_c, x_ax_b), \\ I_{uv} = (x_a, x_b, x_w), & I_{uw} = (x_a, x_c, x_v), & I_{vw} = (x_b, x_c, x_u). \end{array} \] The resolution given by Theorem~\ref{thm:resolution} is then \[ 0 \to k[\lk_\Gamma(E)] \xrightarrow{\begin{bsmallmatrix} \theta_1 \\ -\theta_2 \\ \theta_3 \end{bsmallmatrix}} I_{vw} \oplus I_{uw} \oplus I_{uv} \xrightarrow{\begin{bsmallmatrix} 0 & -\theta_3 & -\theta_2\\ -\theta_3 & 0 & \theta_1\\ \theta_2 & \theta_1 & 0\\
\end{bsmallmatrix} } I_u \oplus I_v \oplus I_w \xrightarrow{\begin{bsmallmatrix} \theta_1 & \theta_2 & \theta_3 \end{bsmallmatrix}} I \to L(\Gamma, E) \to 0 \] In particular, we have $L(\Gamma, E) \cong I/J$, where
\[ ( \theta_1 \cdot x_a, \theta_2 \cdot x_b, \theta_3 \cdot x_c ) \subset J. \] Since $\theta_1$, $\theta_2$, and $\theta_3$ restrict to linearly independent functions on $\{a, b,c\}$, the elements $\{ \theta_1 \cdot x_a, \theta_2 \cdot x_b, \theta_3 \cdot x_c \}$
span the 3-dimensional subspace $\langle x_a x_b, x_a x_c, x_bx_c \rangle$
of $k[\lk_\Gamma(E)]$. Hence $I = J$ and $L(\Gamma, E) = 0$.
Next, consider $E' = \{c\}$. Then
\[ k[\lk_\Gamma(E')] = k[x_a, x_b, x_u, x_v] / (x_ax_u, x_bx_v, x_ux_v). \] A special l.s.o.p. is any l.s.o.p. of the form $\zeta_1, \zeta_2$, where $\supp(\zeta_1) \subset \{a, b\}$. The ideal of interior faces in this case is $I' = (x_a, x_b)$, and the resolution given by Theorem~\ref{thm:resolution} is \[ 0 \to k[\lk_\Gamma(E')] \xrightarrow{\begin{bsmallmatrix} -\zeta_2 \\ \zeta_1 \\ \end{bsmallmatrix}} k[\lk_\Gamma(E')] \oplus I' \xrightarrow{\begin{bsmallmatrix} \zeta_1 & \zeta_2 \end{bsmallmatrix}} I' \to L(\Gamma,E') \to 0. \] Note, in particular, that $L(\Gamma,E') \cong I'/J'$, where $J' = (\zeta_1, \zeta_2 x_a, \zeta_2 x_b)$. Thus one sees that $L(\Gamma,E')$ has dimension 1 in degree 1, i.e., $\ell(\Gamma, E') = (0,1,0)$.
Let us now consider Theorem~\ref{thm:maps} in this example. Let $\theta'_i$ denote the restriction of $\theta_i$ to $k[\Star(E' \smallsetminus E)]$. Note that $\zeta_1 := \theta'_3$ is supported on $\lk_\Gamma(E')$. Extend $\{ \zeta_1 \}$ to a basis for $k[\lk_\Gamma(E)] \cap (\theta'_1, \theta'_2, \theta'_3)$, e.g., by choosing $\zeta_2$ to be a linear combination of $\theta'_1$ and $\theta'_2$ in which the coefficient of $x_c$ vanishes. Then $\zeta_1, \zeta_2$ is a special l.s.o.p. for $k[\lk_\Gamma(E')]$, and the map $\phi$ in Theorem 1.4 is given as follows. First, we set \[ \phi(x_a) = x_a, \quad \phi(x_b) = x_b, \quad \phi(x_u) = x_u, \quad \phi(x_v) = x_v, \quad \phi(x_w) = 0. \] Then, writing $\theta_2 = \lambda_c x_c + \lambda_a x_a + \lambda_v x_v$, with all three coefficients nonzero, we set \[ \phi(x_c) = \frac{-1}{\lambda_c} (\lambda_ax_a + \lambda_v x_v). \]
Note that there is no subset of $\{ \theta_1, \theta_2, \theta_3 \}$ whose restrictions to $k[\lk_\Gamma(E')]$ form an l.s.o.p. This explains and motivates our two-step process for constructing the map: first restricting to $\Star(E' \smallsetminus E)$ and then intersecting with $k[\lk_\Gamma(E')]$ to produce the special l.s.o.p. that yields the functorial map $\phi \colon L(\Gamma,E) \to L(\Gamma,E')$.
Let also describe how Theorems~\ref{thm:increase} and \ref{thm:interiornonvanish} manifest in this example. For Theorem~\ref{thm:interiornonvanish}, observe that the face $F = \{a, b\}$ in $\lk_\Gamma(E')$ has an interior partition $F = \{a\} \sqcup \{b\}$. The proof in this case shows that the classes of both $x_a$ and $x_b$ are nonzero in $L(\Gamma,E')$, for any choice of special l.s.o.p.
Finally, note that $L(\Gamma,E) = 0$ and $L(\Gamma, E') \neq 0$, so there is no surjective map of graded vector space $L(\Gamma,E) \to L(\Gamma,E')$. In this case, $\sigma(E) \neq \sigma(E')$. Thus, we see that the hypothesis $\sigma(E) = \sigma(E')$ cannot be dropped in Theorem~\ref{thm:increase}. \end{example}
\noindent \textbf{Acknowledgments.} We thank the referees for their helpful comments. The work of ML is supported by an NDSEG fellowship and the work of SP is supported in part by NSF DMS--2001502 and DMS--2053261.
\section{Preliminaries} \label{sec:sr}
We begin by recalling definitions and background results that will be used throughout, following \cite[Chapter~III]{Stanley96} and \cite{Athanasiadis16}. We work over a field $k$. In particular, all rings are commutative $k$-algebras and singular homology is computed with coefficients in $k$.
\subsection{Triangulations of simplices} \label{sec:triangulations} In this section only, for the purposes of providing context, we allow that the field $k$ may be finite, and the triangulation $\sigma \colon \Gamma \to 2^V$ is not necessarily quasi-geometric.
We recall the notion of a homology triangulation, following \cite{Athanasiadis12}. A $d$-dimensional simplicial complex $\Gamma$ with trivial reduced homology is a \emph{homology ball} of dimension $d$ if there is a subcomplex $\partial \Gamma \subset \Gamma$ such that \begin{itemize} \item $\partial \Gamma$ is a homology sphere of dimension $d -1$,
\item $\lk_\Gamma(F)$ is a homology sphere of dimension $d - |F|$ for $F \not \in \partial \Gamma$.
\item $\lk_\Gamma(F)$ is a homology ball of dimension $d - |F|$ for all nonempty $F \in \partial \Gamma$. \end{itemize} The \emph{interior faces} of a homology ball $\Gamma$ are the faces not contained in $\partial \Gamma$. A \emph{homology triangulation} of the simplex $2^V$ is a finite simplicial complex $\Gamma$ and a map $\sigma\colon \Gamma \to 2^V$ such that for every non-empty $U \subset V$, \begin{itemize} \item the simplicial complex $\Gamma_U := \sigma^{-1}(2^U)$ is a homology ball of dimension $\vert U \vert - 1$. \item $\sigma^{-1}(U)$ is the set of interior faces of the homology ball $\sigma^{-1}(2^U)$. \end{itemize} \noindent Note that the Betti numbers of a simplicial complex, and hence the property of being a homology ball, depend only on the characteristic of the field $k$. Homology triangulations are a special case of the (strong) formal subdivisions of Eulerian posets considered in \cite[\S 7]{Stanley92} and \cite[\S 3]{KatzStapledon16}.
The \emph{carrier} of a face $F \in \Gamma$ is $\sigma(F)$. A homology triangulation $\sigma\colon \Gamma \to 2^V$ is \emph{quasi-geometric} if there is no face $F \in \Gamma$ and $U \subset V$ such that the dimension of $\Gamma_U$ is strictly smaller than the dimension of $F$ and the carrier of every vertex in $F$ is contained in $U$. A homology triangulation is \emph{geometric} if it can be realized in $\mathbb{R}^n$ as the subdivision of a geometric simplex into geometric simplices. Every geometric homology triangulation is quasi-geometric.
The local $h$-vector, which we have defined in the introduction as the Hilbert function of the local face module, can be expressed in terms of $h$-vectors of subcomplexes of links of faces in the homology balls $\Gamma_U$: \begin{equation} \label{eq:localh}
\ell(\Gamma, E) = \sum_{U \supset \sigma(E)} (-1)^{|V| - \vert U \vert} h(\lk_{\Gamma_U}(E)). \end{equation} Note that \eqref{eq:localh} makes sense even when $k$ is finite or $\sigma \colon \Gamma \to 2^V$ is not quasi-geometric, and should be taken as the definition of the local $h$-vector in this broader context.
\begin{theorem}[\cite{Stanley92, Athanasiadis12, KatzStapledon16}]\label{t:localproperties}
Let $\sigma \colon \Gamma \to 2^V$ be a homology triangulation, let $E$ be a face of $\Gamma$ and let $d = |V| - |E|$. Then the local $h$-vector $(\ell_0, \ldots, \ell_d)$ satisfies: \\ \begin{tabular}{lll} \quad \quad $\bullet$ & \emph{(symmetry)} & $\ell_i = \ell_{d-i};$ \\ \quad \quad $\bullet$ & \emph{(non-negativity)} & if $\Gamma$ is quasi-geometric, then $\ell_i \geq 0;$\\ \quad \quad $\bullet$ & \emph{(unimodality)} &
if $\Gamma$ is regular, then $\ell_0 \leq \ell_1 \leq \cdots \leq \ell_{\lfloor d/2 \rfloor}$. \end{tabular} \end{theorem}
\noindent Note that the proof of non-negativity for quasi-geometric triangulations, due to Stanley and Athanasiadis, is via the identification with the Hilbert function of the local face module. It suffices to consider the case where $k$ is infinite, since \eqref{eq:localh} is invariant under field extensions.
\subsection{Face rings and special l.s.o.p.s}\label{ss:sr}
Here, and for the remainder of the paper, the field $k$ is fixed and infinite, and all triangulations are quasi-geometric homology triangulations.
Given a finite simplicial complex $\Gamma$ with vertex set $V = \{v_1, \ldots, v_n\}$, let $k[\Gamma]$ denote the \emph{face ring}. In other words, for each subset $F \subset V$, let $x^F$ be the corresponding squarefree monomial in the polynomial ring $k[x_1, \ldots, x_n]$, i.e.,
$
x^F:= \prod_{v_i \in F} x_i.
$
Then the face ring is
\[
k[\Gamma] := k[x_1, \ldots, x_n] / (x^F : F \mbox{ is not a face in } \Gamma).
\]
Given a subcomplex $\Gamma'$ of $\Gamma$, we have a natural restriction map $k[\Gamma] \rightarrow k[\Gamma']$, taking $x^F$ to $x^F$ if $F \in \Gamma'$ and to 0 otherwise. Given $\theta \in k[\Gamma]$, let $\theta|_{\Gamma'}$ denote the image of $\theta$ in $k[\Gamma']$. In particular, each $F$ in $\Gamma$ may be viewed as a subcomplex, and we write $\theta|_F$ for the restriction of $\theta$ to this subcomplex.
Note that $k[\Gamma]$ is graded by degree. By definition, a linear system of parameters (l.s.o.p.) for a finitely generated graded $k$-algebra $R$ of Krull dimension $d$ is a sequence of elements $\theta_1, \ldots, \theta_d$ in $R_1$ such that $R/(\theta_1, \ldots, \theta_d)$ is a finite-dimensional $k$-vector space. If $\Gamma$ is a Cohen-Macaulay complex (i.e., if $k[\Gamma]$ is a Cohen-Macaulay ring) and $\theta_1, \ldots, \theta_d$ is an l.s.o.p.\ for $k[\Gamma]$, then $(\theta_1, \ldots, \theta_d)$ is a regular sequence and the $h$-polynomial of $\Gamma$ is the Hilbert series of $k[\Gamma]/(\theta_1, \ldots, \theta_d)$. Links of faces in triangulations of simplices are Cohen-Macaulay \cite{Reisner76}.
Suppose $\Gamma$ has dimension $d-1$, so $k[\Gamma]$ has Krull dimension $d$. Then a sequence of elements $\theta_1, \ldots, \theta_d$ in $k[\Gamma]_1$ is an l.s.o.p. for $k[\Gamma]$ if and only if the following condition is satisfied \cite[Lemma~2.4(a)]{Stanley96}:
\renewcommand{$(*)$}{$(*)$} \begin{itemize}
\item For every face $F \in \Gamma$ (or equivalently, for every facet $F \in \Gamma$), the restrictions $\theta_1|_F, \ldots, \theta_d|_F$ span a vector space of dimension $|F|$. \end{itemize}
\noindent This characterization provides flexibility in constructing l.s.o.p.s in which the linear functions have specified support, where the \emph{support} of $\theta = \sum a_i x_i$ is $\supp(\theta) := \{ v_i : a_i \neq 0 \}$.
\begin{lemma} \label{lemma:lsopexistence} Let $S_1, \ldots, S_d$ be subsets of the vertices of $\Gamma$. Then there is an l.s.o.p. $\theta_1, \ldots, \theta_d$ for $k[\Gamma]$ such that $\supp (\theta_i) = S_i$ for $1 \leq i \leq d$ if and only if, for every face $F \in \Gamma$, \begin{equation} \label{eq:marriageinequality}
| \{ S_i : S_i \cap F \neq \emptyset \}| \geq |F|. \end{equation} \end{lemma}
\begin{proof} The argument is similar to that given by Stanley in \cite[Corollary~4.4]{Stanley92}. The necessity of \eqref{eq:marriageinequality} follows immediately from (*). We now prove its sufficiency. Suppose $S_1, \ldots, S_d$ are chosen such that \eqref{eq:marriageinequality} holds for every $F \in \Gamma$.
Let $N = |S_1| + \cdots + |S_d|$, and consider the space $k^N$ parametrizing tuples $(\theta_1, \ldots, \theta_d)$ with $\supp (\theta_i) \subset S_i$. Fix $F = \{v_1, \dotsc, v_k\} \in \Gamma$. Let $X_F \subset k^N$ parametrize the tuples
whose restrictions to $F$
span a vector space of dimension $|F|$. Note that $X_F$ is Zariski open. By Hall's Marriage Theorem, there is a permutation $\sigma \in \mathfrak{S}_d$ such that $v_i \in S_{\sigma(i)}$. If we set $\theta_{\sigma(i)} = x_i$ for $1 \le i \le k$, and $\theta_{\sigma(i)} = 0$ for $i > k$, then $\theta \in X_F$, and hence $X_F$ is nonempty. Also, the subset of $k^N$ where all coordinates are nonzero is Zariski open and nonempty. Since $k$ is infinite, the intersection of these nonempty Zariski open subsets of $k^N$ is nonempty, and hence there is an l.s.o.p. $\theta_1, \ldots, \theta_d$ with $\supp(\theta_i)= S_i$. \end{proof}
Let $\sigma \colon \Gamma \to 2^V$ be a quasi-geometric homology triangulation, and let $E \in \Gamma$ be a face.
\begin{definition}[\cite{Stanley92, Athanasiadis12b}] \label{def:special} A linear system of parameters $\theta_1, \dotsc, \theta_{d}$ for $k[\lk_{\Gamma}(E)]$ is \textit{special} if, for each vertex $v \in V$ with $v \not \in \sigma(E)$, there is an element $\theta_v$ of the l.s.o.p. such that $\supp(\theta_v)$ consists of vertices in $\lk_{\Gamma}(E)$ whose carrier contains $v$, and such that $\theta_v \not= \theta_{v'}$ for $v \not= v'$. \end{definition}
In other words, after reordering so that $\sigma(E)^c = \{v_1, \ldots, v_b\}$, an l.s.o.p. for $k[\lk_\Gamma(E)]$ is special if we can order it $\theta_1, \ldots, \theta_d$ such that \[ \supp(\theta_i) \subset \{ w \in \lk_\Gamma(E) : v_i \in \sigma(w)\}, \] for $1 \leq i \leq b$. The existence of special l.s.o.p.s is well-known to experts and the proof is similar to Stanley's argument in the case $E = \emptyset$. For completeness, we provide a short proof.
\begin{proposition} \label{prop:speciallsopexistence} Suppose $k$ is infinite. Let $\sigma\colon \Gamma \to 2^V$ be a quasi-geometric homology triangulation of a simplex, and let $E$ be a face of $\Gamma$. Then there is a special l.s.o.p. for $k[\lk_{\Gamma}(E)]$. \end{proposition} \begin{proof}
Let $V = \{v_1, \ldots, v_n\}$. After renumbering, we may assume that $\sigma(E)^c = \{v_1, \dotsc, v_b\}$. Fix $d = n - \vert E \vert$. Note that $b \leq d$. We define subsets $S_1, S_2, \dotsc, S_{d}$ of the vertices in $\lk_{\Gamma}(E)$, as follows. For $i \le b$, let $S_i$ be the set of vertices $w$ such that $v_i \in \sigma(w)$.
For $i > b$, let $S_i$ be the set of all vertices of $\lk_{\Gamma}(E)$. Because $\sigma$ is quasi-geometric, for each face $F$ of $\lk_{\Gamma}(E)$, the union of the sets $\sigma(w) \subset V$, as $w$ ranges over vertices of $E \sqcup F$, has size at least $|E| + |F|$. It follows that $|\{i \leq b : S_i \cap F \neq \emptyset \}| \geq |F| - (d-b)$. Since $S_j \cap F \neq \emptyset$ for $j > b$, we conclude that $|\{i : S_i \cap F \neq \emptyset \}| \geq |F|$. Hence, by Lemma~\ref{lemma:lsopexistence}, there is an l.s.o.p. $\theta_1, \ldots, \theta_d$ for $k[\lk_\Gamma(E)]$ with $\supp(\theta_i) = S_i$. \end{proof}
\section{A resolution of the local face module}
In this section, we prove Theorem~\ref{thm:resolution}, giving an explicit resolution of the local face module $L(\Gamma,E)$ by a subcomplex of the Koszul resolution of $k[\lk_{\Gamma}(E)]/(\theta_1, \dotsc, \theta_{d})$. We continue to use the notation established above. In particular, $\sigma\colon \Gamma \to 2^V$ is a quasi-geometric homology triangulation of the simplex with vertex set $V = \{v_1, \ldots, v_n\}$. We consider a face $E \in \Gamma$ with $d = n - |E|$ and $b = n - |\sigma(E)|$. After reordering, we assume $\sigma(E)^c = \{v_{1}, \dotsc, v_{b}\}$. For $S \subset \{v_1, \ldots, v_d\}$, we consider the ideal $I_S \subset k[\lk_\Gamma(E)]$ given by \begin{equation*} I_S := ( x^F : \, \sigma(F \sqcup E)^c \subset S). \end{equation*}
Let $\theta_1, \ldots \theta_d$ be a special l.s.o.p. for $k[\lk_\Gamma(E)]$. We may assume that \[ \supp(\theta_i) \subset \{ w \in \lk_\Gamma(E) : v_i \in \sigma(w) \}, \] for $1 \leq i \leq b$. For any $v_i \in S$, multiplication by $\theta_i$ gives a map $\lambda_i \colon I_{S} \to I_{S \smallsetminus \{v_i\}}$, and we consider the complex of graded $k[\lk_{\Gamma}(E)]$-modules
\begin{equation} \label{eq:resolution}
0 \to k[\lk_{\Gamma}(E)][-d] \to \bigoplus_{ |S| = d - 1 } I_{S}[-(d-1)] \to \dotsb \to \bigoplus_{|S| = 1} I_{S}[-1] \to I \to L(\Gamma,E) \to 0, \end{equation} in which
the differential restricted to $I_S$, for $S= \{v_{i_0}, \ldots, v_{i_k}\}$, with $i_0 < \cdots < i_k$, is $\oplus_{j = 0}^k (-1)^j \lambda_{i_j}$.
\begin{example} If $E$ is an interior face of $\Gamma$ then every l.s.o.p. is special, $I_S = k[\lk_{\Gamma}(E)]$ for all $S$, and \eqref{eq:resolution} is the Koszul resolution of $L(\Gamma,E) = k[\lk_{\Gamma}(E)]/(\theta_1,\ldots,\theta_{d})$. \end{example}
\begin{proof}[Proof of Theorem \ref{thm:resolution}] We must show \eqref{eq:resolution} is exact.
We begin by considering two complexes of $k[\lk_{\Gamma}(E)]$-modules
studied by Stanley and Athanasiadis. Recall that, for $U \subset V$, we write $\Gamma_U := \sigma^{-1}(2^U)$.
Say $U \supset \sigma(E)$ and $U \smallsetminus \sigma(E) = \{v_{i_0}, \ldots, v_{i_k}\}$, with $i_0 < \cdots < i_k$. For $0 \leq j \leq k$, let $\rho_j \colon k[\lk_{\Gamma_U}(E)] \to k[\lk_{\Gamma_{U \smallsetminus \{v_{i_j}\}}}(E)]$ be the restriction map. The first complex we consider is \small \begin{equation}\label{eq:modcomp} \begin{tikzcd}
k[\lk_{\Gamma}(E)] \arrow[r] & \bigoplus\limits_{\substack{U \supset \sigma(E) \\ \vert U \vert = n - 1}} k[\lk_{\Gamma_U}(E)] \arrow[r] &\bigoplus\limits_{\substack{U \supset \sigma(E) \\ \vert U \vert = n - 2}} k[\lk_{\Gamma_U}(E)] \arrow[r] &\cdots \arrow[r] & k[\lk_{\Gamma_{\sigma(E)}}(E)] \arrow[r] & 0, \end{tikzcd} \end{equation} \normalsize in which the differential restricted to $k[\lk_{\Gamma_U}(E)]$ is $\bigoplus_j (-1)^j \rho_j$. Next, we consider its quotient by $(\theta_1,\ldots,\theta_{d})$:
\begin{equation}\label{eq:quotientedcomplex} \begin{tikzcd}
\frac{k[\lk_{\Gamma}(E)]}{(\theta_1, \dotsc, \theta_{d})} \arrow[r] & \bigoplus\limits_{\mathclap{\substack{U \supset \sigma(E) \\ \vert U \vert = n-1}}} \frac{k[\lk_{\Gamma_{U}}(E)]}{(\theta_1, \dotsc, \theta_{d})} \arrow[r] & \bigoplus\limits_{\mathcal{\substack{U \supset \sigma(E) \\ \vert U \vert = n-2}}} \frac{k[\lk_{\Gamma_{U}}(E)]}{(\theta_1, \dotsc, \theta_{d})} \arrow[r] &\cdots \arrow[r] & \frac{k[\lk_{\Gamma_{\sigma(E)}}(E)]}{(\theta_1, \dotsc, \theta_{d})} \arrow[r] &0.
\end{tikzcd} \end{equation}
For any $U \subset V$, with $U \supset \sigma(E)$, let $S_U$ be defined as \[ S_U := (U \cap \{v_1, \ldots, v_b\}) \cup \{ v_{b+1}, \ldots, v_{d}\}. \]
Then $\dim k[\lk_{\Gamma_U}(E)] = |S_U|$ and it follows that the restriction of $\theta_i$ to $\lk_{\Gamma_U}(E)$ is nonzero if and only if $v_i \in S_U$. Furthermore, $\{ \theta_i|_{\lk_{\Gamma_U}(E)} : v_i \in S_U \}$ is a special l.s.o.p. for $k[\lk_{\Gamma_{U}}(E)]$. Stanley and Athanasiadis proved that both \eqref{eq:modcomp} and \eqref{eq:quotientedcomplex} are exact, and the kernel of the first arrow in \eqref{eq:quotientedcomplex} is $L(\Gamma,E)$. (We will recall the proofs below.) Using the additivity of Hilbert functions in exact sequences, they deduced that the Hilbert function of $L(\Gamma, E)$ satisfies \eqref{eq:localh} \cite{Stanley92, Athanasiadis12}.
With the goal of proving that \eqref{eq:resolution} is exact, we take Koszul resolutions of each term in \eqref{eq:quotientedcomplex} to build a double complex of $k[\lk_{\Gamma}(E)]$-modules. Since $k[\lk_{\Gamma_{U}}(E)]$ is Cohen-Macauley, the special l.s.o.p. $\{ \theta_i|_{\lk_{\Gamma_U}(E)} : v_i \in S_U \}$ is a regular sequence. Hence the corresponding Koszul complex $K^{\bullet}_U$ \begin{center} \begin{tikzcd}[column sep = small] 0 \arrow[r] &k[\lk_{\Gamma_U}(E)]_{S_U} \arrow[r] &\bigoplus\limits_{\mathclap{\substack{S \subset S_U \\ \vert S \vert = \vert S_U \vert - 1}}} k[\lk_{\Gamma_U}(E)]_S \arrow[r]& \cdots \arrow[r] &\bigoplus\limits_{\mathclap{\substack{S\subset S_U\\ \vert S \vert = 1}}} k[\lk_{\Gamma_U}(E)]_S \arrow[r] &k[\lk_{\Gamma_U}(E)] \arrow[r]& \frac{k[\lk_{\Gamma_U}(E)]}{(\theta_1, \dotsc, \theta_{d})} \arrow[r] &0, \end{tikzcd} \end{center}
is exact. Here, for a graded module $M$ and a finite set $S$, we write $M_{S} := M[-|S|]$. Replacing each term in \eqref{eq:quotientedcomplex} with its corresponding Koszul resolution, gives a complex of complexes \begin{equation}\label{eq:koszul} \begin{tikzcd} K_{V}^{\bullet} \arrow[r] & \bigoplus\limits_{\mathclap{\substack{U \supset \sigma(E) \\ \vert U \vert = n - 1}}} K_U^{\bullet} \arrow[r] & \bigoplus\limits_{\mathclap{\substack{U \supset \sigma(E) \\ \vert U \vert = n - 2}}} K_U^{\bullet} \arrow[r] & \cdots \arrow[r] & K_{\sigma(E)}^{\bullet} \arrow[r] & 0, \end{tikzcd} \end{equation} which may be expanded as the commuting double complex shown in Figure~\ref{fig:doublecx}. \begin{figure}
\caption{The double complex obtained by taking the Koszul resolution of \eqref{eq:quotientedcomplex}.}
\label{fig:doublecx}
\end{figure} The columns of this complex are exact by construction. We claim that the rows are also exact, and prove this using ideas from \cite[Theorem~4.6]{Stanley92}. First, we show that all rows except for the top row are exact. Choose a subset $S$ of $\{v_1, \ldots, v_d\}$, and consider the piece of the complex indexed by $S$: \begin{equation}\label{eq:generalS} \begin{tikzcd}[column sep = small]
k[\lk_{\Gamma}(E)]_S \arrow[r] & \bigoplus\limits_{\substack{ S \subset S_U \\ \vert U \vert = n - 1}} k[\lk_{\Gamma_U}(E)]_S \arrow[r] &\bigoplus\limits_{\substack{S \subset S_U \\ \vert U \vert = n - 2}} k[\lk_{\Gamma_U}(E)]_S \arrow[r] &\cdots
\arrow[r] & 0. \end{tikzcd} \end{equation}
When $S = \emptyset$, we obtain (\ref{eq:modcomp}). Observe that the complex (\ref{eq:generalS}) is multigraded by $\mathbb{N}^m$, where $m$ is the number of vertices of $\lk_{\Gamma}(E)$. Explicitly, $\deg x_1^{\alpha_1} \dotsb x_m^{\alpha_m} = (\alpha_1, \dotsc, \alpha_m)$. Therefore it suffices to show exactness on graded pieces. Fix $\alpha = (\alpha_1, \dotsc, \alpha_m)$. By the definition of the face ring, every term of (\ref{eq:generalS}) will have $0$ in the graded piece corresponding to $\alpha$ unless the set of vertices with $\alpha_i \not= 0$ forms a face $F$, in which case the $\alpha$-graded part can be identified with the augmented cochain complex of a simplex, indexed by all $U$ that contain $\sigma(E) \cup \sigma(F) \cup S$, and hence is exact.
We now recall the proof that the top row of the double complex, \eqref{eq:quotientedcomplex}, is exact. \begin{center} \begin{tikzcd}
\frac{k[\lk_{\Gamma}(E)]}{(\theta_1, \dotsc, \theta_{d})} \arrow[r] & \bigoplus\limits_{\mathclap{\substack{U \supset \sigma(E) \\ \vert U \vert = n-1}}} \frac{k[\lk_{\Gamma_{U}}(E)]}{(\theta_1, \dotsc, \theta_{d})} \arrow[r] & \bigoplus\limits_{\mathcal{\substack{U \supset \sigma(E) \\ \vert U \vert = n-2}}} \frac{k[\lk_{\Gamma_{U}}(E)]}{(\theta_1, \dotsc, \theta_{d})} \arrow[r] &\cdots \arrow[r] & \frac{k[\lk_{\Gamma_{\sigma(E)}}(E)]}{(\theta_1, \dotsc, \theta_{d})} \arrow[r] &0 \end{tikzcd} \end{center} The proof involves showing that the quotients of (\ref{eq:modcomp}) by $(\theta_{d}, \dotsc, \theta_{d - (r-1)})$ is exact by induction on $r$. The case of $r = 0$ is the exactness of the second row.
Now assume that (\ref{eq:modcomp}) remains exact after quotienting by $(\theta_{d}, \dotsc, \theta_{d - (r-1)})$. Let $C^i$ denote the $i$th term of (\ref{eq:modcomp}) tensored with $k[\lk_{\Gamma}(E)]/(\theta_{d}, \dotsc, \theta_{d - (r - 1)})$. By the induction hypothesis, we have an exact sequence \begin{equation*} C^{\bullet}\colon \enskip C^0 \to C^1 \to \dotsb \to C^{b} \to 0. \end{equation*} Set $m = d - r$. Recall that
$\theta_i = 0 \in k[\lk_{\Gamma_{U}}(E)]$ if $v_i \notin S_U$, and that $\{ \theta_i|_{\lk_{\Gamma_U}(E)} : v_i \in S_U \}$ is a special l.s.o.p. for $k[\lk_{\Gamma_{U}}(E)]$. Also, for $\sigma(E) \subset U$, $v_m \notin S_U$ if and only if $v_m \notin U$. Hence, we have an exact sequence
\begin{equation} \label{eq:multbym} 0 \to B^{\bullet} \to C^{\bullet} \xrightarrow{\theta_{m}} C^{\bullet} \to C^{\bullet}/(\theta_{m}) \to 0, \end{equation} where \[ B^{i} = \bigoplus_{\substack{U \supset \sigma(E), \enskip \vert U \vert = n-i \\ v_m \not \in U}}k[\lk_{\Gamma_U}(E)]/(\theta_{d}, \dotsc, \theta_{m + 1}). \]
For example, when $m > b$, $v_m \in \sigma(E)$ and $B^{\bullet} = 0$. Up to signs and a degree shift, we can then identify $ B^{\bullet}$ with the complex (\ref{eq:modcomp}) for $\Gamma|_{\{v_m\}^c}$ quotiented by $(\theta_{d}, \dotsc, \theta_{m + 1})$. Then $ B^{\bullet}$ is exact by the induction hypothesis applied to $\Gamma|_{\{v_m\}^c}$. By breaking (\ref{eq:multbym}) up into two short exact sequences we see that $H^i(C^{\bullet}/(\theta_{m})) \cong H^{i + 2}(B^{\bullet}) = 0$ as desired.
Now that we know the exactness of (\ref{eq:koszul}), let \begin{equation*} \begin{split} A^{\bullet} &= \ker \Bigg( K_{V}^{\bullet} \to \bigoplus\limits_{\mathclap{\substack{U \supset \sigma(E) \\ \vert U \vert = n - 1}}} K_U^{\bullet} \Bigg). \end{split}\end{equation*} Then, by construction, we have an exact sequence of complexes \begin{equation*} \begin{tikzcd} 0 \arrow[r] & A^{\bullet} \arrow[r] &K_{V}^{\bullet} \arrow[r] & \bigoplus\limits_{\mathclap{\substack{U \supset \sigma(E) \\ \vert U \vert = n - 1}}} K_U^{\bullet} \arrow[r] & \bigoplus\limits_{\mathclap{\substack{U \supset \sigma(E) \\ \vert U \vert = n - 2}}} K_U^{\bullet} \arrow[r] & \cdots \arrow[r] & K_{\sigma(E)}^{\bullet} \arrow[r] & 0. \end{tikzcd} \end{equation*} As above, we repeatedly apply the long exact sequence on cohomology to see that $A^{\bullet}$ is exact. We may then identify $A^{\bullet}$ with the exact sequence
$$0 \to k[\lk_{\Gamma}(E)][-n] \to \oplus_{ |S| = d - 1 } I_{S}[-(n-1)] \to \dotsb \to \oplus_{|S| = 1} I_{S}[-1] \to I \to A^0 \to 0.$$ Since $I$ surjects onto $A^0$ and $A^0 \subset k[\lk_{\Gamma}(E)]/(\theta_1, \dotsc, \theta_{d})$, we conclude that $A^0 = L(\Gamma,E)$, as required. \end{proof}
\begin{remark}\label{r:specialcase} Let $\sigma\colon \Gamma \to 2^V$ be a quasi-geometric homology triangulation of a simplex, and let $E$ be a face of $\Gamma$. Let $F \in \lk_\Gamma(E)$ such that $F \sqcup E$ is interior, and suppose that $F = \mathcal{A}_F$ is an interior partition of $F$, i.e., with $F_1 = F_2 = \emptyset$. Suppose that $F$ is not a $U$-pyramid. By Corollary \ref{cor:presentation}, $J$ is generated by elements of the form $\theta_i \cdot x^{F}$ for $F \sqcup E$ interior or $\theta_j \cdot x^G$ for some $G$ with $\sigma(G \sqcup E) = \{v_j\}^c$. Because $F$ is not a $U$-pyramid, no monomial appearing in any of these generators divides $x^F$, so $x^F$ is nonzero in $L(\Gamma, E)$. This proves Theorem~\ref{thm:interiornonvanish} in the special case when $F_1 = F_2 = \emptyset$. \end{remark}
\section{Functorial properties of local face modules}
In this section, we prove Theorem~\ref{thm:maps}, giving natural maps between local face modules. Consider a quasi-geometric homology triangulation $\sigma\colon \Gamma \to 2^V$, and let $E \subset E'$ be faces of $\Gamma$.
\begin{lemma}\label{lemma:lsop} Let $R$ be a graded $k$-algebra with $R_0 = k$. Let $\{ \theta_1, \dotsc, \theta_n \}$ be an l.s.o.p. for $R[x_1, \dotsc, x_m]$, where each $x_j$ has degree $1$. Then there is a unique graded $R$-algebra isomorphism \[ \phi \colon R[x_1, \dotsc, x_m]/(\theta_1, \dotsc, \theta_n) \rightarrow R/R \cap (\theta_1, \dotsc, \theta_n). \] Moreover, any $k$-basis for $R_1 \cap (\theta_1, \dotsc, \theta_n)$ is an l.s.o.p. for $R$ and generates $R \cap (\theta_1, \dotsc, \theta_n)$. \end{lemma}
\begin{proof} Consider the exact sequence of $k$-linear maps \[ 0 \rightarrow R_1 \rightarrow R[x_1, \dotsc, x_m]_1 \rightarrow (x_1,\dotsc, x_m)_1 \rightarrow 0, \] where the right hand map takes $r + \sum_i \alpha_i x_i$ to $\sum_i \alpha_i x_i$, for any $r \in R_1$ and $\alpha_i \in k$. This restricts to an exact sequence of $k$-linear maps \[ 0 \rightarrow R_1 \cap (\theta_1, \dotsc, \theta_n)_1 \rightarrow (\theta_1, \dotsc, \theta_n)_1 \rightarrow (x_1,\dotsc, x_m)_1 \rightarrow 0, \] where the surjectivity of the right-hand map follows from the fact that $\theta_1, \dotsc, \theta_n$ is an l.s.o.p. Hence, for $1 \le i \le m$, we can write $x_i = r_i + s_i$, for some $r_i \in R_1$ and $s_i \in (\theta_1, \dotsc, \theta_n)_1$. For any $R$-algebra map $\phi \colon R[x_1, \dotsc, x_m]/(\theta_1, \dotsc, \theta_n) \rightarrow R/R \cap (\theta_1, \dotsc, \theta_n),$ we must have that $\phi(x_i) = r_i$, so there is a unique such map. On the other hand, the $R$-algebra homomorphism defined by $\phi(x_i) = r_i$ is well-defined, since if $x_i = r_i' + s_i'$, for some $r_i' \in R_1$ and $s_i' \in (\theta_1, \dotsc, \theta_n)_1$, then $r_i - r_i' \in R_1 \cap (\theta_1, \dotsc, \theta_n)_1$. Note that the unique $R$-algebra homomorphism from $R/R \cap (\theta_1, \dotsc, \theta_n)$ to $R[x_1, \dotsc, x_m]/(\theta_1, \dotsc, \theta_n)$ is the inverse of $\phi$.
Since $\phi$ is an isomorphism and factors through $R/(R_1 \cap (\theta_1, \dotsc, \theta_n)_1)$, we conclude that the $R$-ideal $R \cap (\theta_1, \dotsc, \theta_n)$ is generated in degree $1$ and hence any $k$-basis for $R_1 \cap (\theta_1, \dotsc, \theta_n)$ is an l.s.o.p. for $R$. \end{proof}
\begin{proof}[Proof of Theorem~\ref{thm:maps}]
Note that $\Star(E' \smallsetminus E)$ is the join of $E' \smallsetminus E$ with $\lk_{\Gamma}(E')$. The face ring $k[\Star(E' \smallsetminus E)]$ is therefore a polynomial ring over $k[\lk_\Gamma(E')]$. Its Krull dimension is equal to $d = \dim k[\lk_\Gamma(E)]$, and hence the restrictions $\theta'_1, \ldots, \theta'_d$ form an l.s.o.p., where $\theta'_i := \theta_i|_{\Star(E' \smallsetminus E)}$. By Lemma~\ref{lemma:lsop}, there is a unique graded $k[\lk_\Gamma(E')]$-algebra homomorphism $k[\Star(E' \smallsetminus E)]/(\theta'_1, \ldots, \theta'_d) \to k[\lk_\Gamma(E')]/ (k[\lk_\Gamma(E')] \cap (\theta'_1, \ldots, \theta'_d))$, which lifts to the unique homomorphism $\phi$ in the statement of the theorem. It remains to construct a special l.s.o.p. for $k[\lk_\Gamma(E')]$ with the specified properties.
After reordering, we may assume that \[ \sigma(E)^c = \{ v_1, \ldots, v_b\}, \quad \supp(\theta_i) \subset \{ w : v_i \in \sigma(w) \}, \ \mbox{ for } 1 \leq i \leq b, \quad \mbox{ and } \sigma(E')^c = \{v_1, \ldots, v_{b'}\}. \]
Note, in particular, that $\theta'_i$ is supported on vertices in the link of $E'$, for $1 \leq i \leq b'$. By Lemma~\ref{lemma:lsop}, any $k$-basis for $k[\lk_\Gamma(E')] \cap (\theta'_1, \ldots, \theta'_d)$ is an l.s.o.p. for $k[\lk_\Gamma(E')]$. Set $\zeta_i = \theta_i|_{\lk_\Gamma(E')}$, for $1 \leq i \leq b'$, and note that $\{\zeta_1, \ldots, \zeta_{b'}\}$ is linearly independent. Extending this independent set to a basis produces a special l.s.o.p. for $k[\lk_\Gamma(E')]$. It remains to verify that $\phi(L(\Gamma, E)) \subset L(\Gamma, E')$. Let $F \in \lk_{\Gamma}(E)$ be a face with $F \sqcup E$ interior. If $F$ is not in $\Star(E' \smallsetminus E)$, then $\phi(x^F) = 0$. Otherwise, $F$ can be written uniquely as the join of possibly empty faces $F_1 \subset E' \smallsetminus E$ and $F_2 \in \lk_{\Gamma}(E')$. Then $F_2 \sqcup E'$ is interior, and $\phi(x^F) = \phi(x^{F_1})x^{F_2} \in (x^{F_2})$. Hence $\phi(x^F) \in L(\Gamma,E')$, as required. \end{proof}
\begin{proof}[Proof of Theorem \ref{thm:increase}] Let $E \subset E'$ be faces of a quasi-geometric homology triangulation $\Gamma$ of a simplex, and assume that $\sigma(E) = \sigma(E')$. It is enough to show that the induced map $\phi \colon L(\Gamma, E) \to L(\Gamma,E')$ given by Theorem~\ref{thm:maps} is surjective. Note that $L(\Gamma,E')$ is generated by the monomials $x^F$ such that $F \in \lk_\Gamma(E')$ and $F \sqcup E'$ is interior. If $F$ is such a face, then it is also in the link of $E$ and, since $\sigma(E) = \sigma(E')$, the face $(F \sqcup E) < (F \sqcup E')$ is also interior. Then $\phi(x^F) = x^F$, and the theorem follows. \end{proof}
\section{Restrictions of local face modules}
In this section, we use the resolution found in Theorem~\ref{thm:resolution} to show that the vanishing of a local face module $L(\Gamma, E)$ implies the vanishing of a \emph{restricted local face module} $L(\Gamma, \mathcal{A}_F \sqcup E)|_{F_1 \sqcup F_2},$ for certain interior partitions $F_1 \sqcup F_2 \sqcup \mathcal{A}_F$. We then develop algebraic arguments, inspired by ideas from \cite{dMGPSS20}, to show that $F$ being a $U$-pyramid is necessary for the vanishing of the restricted local face module when $\vert F_1 \vert \le 2$ and thus prove Theorem~\ref{thm:interiornonvanish}.
We use the notation introduced in the introduction. Let $\Delta$ be a subcomplex of $\lk_{\Gamma}(E)$. For any $k[\lk_{\Gamma}(E)]$-module $M$, the \emph{restriction} of $M$ to $\Delta$ is $M|_{\Delta} := M \otimes_{k[\lk_{\Gamma}(E)]} k[\Delta]$, where $k[\Delta]$ is a $k[\lk_{\Gamma}(E)]$-module via the restriction map. By the resolution of $L(\Gamma,E)$ in Theorem~\ref{thm:resolution} and the right exactness of tensoring with $k[\Delta]$, we have an exact sequence \begin{equation}\label{e:restrict}
\bigoplus_{|S| = 1} I_{S}|_\Delta[-1] \to I|_\Delta \to L(\Gamma,E)|_\Delta \to 0. \end{equation}
Recall from Corollary~\ref{cor:presentation} that $L(\Gamma, E) \cong I/J$, where $J$ is the ideal generated by $\{\theta_i x^{F} : F \sqcup E \mbox{ is interior}\}$ and $\{\theta_{j} x^G : \sigma(G \sqcup E) = \{v_j\}^c \}$. Hence, $L(\Gamma,E)|_\Delta \cong I|_\Delta/J|_\Delta$, where $I|_\Delta,J|_\Delta$ are the $k[\Delta]$-ideals \begin{equation}\label{eq:Ires}
I|_\Delta = (x^H : H \subset \Delta, \sigma(H \sqcup E) = V), \end{equation} \begin{equation}\label{eq:Jres}
J|_\Delta = (\theta_1|_\Delta,\ldots,\theta_{d}|_\Delta) \cdot I|_\Delta + (\theta_{j}|_{\Delta} x^G: \enskip G \subset \Delta, \enskip \sigma(G \sqcup E) = \{v_j\}^c). \end{equation} For example, if $F$ is a face of $\lk_{\Gamma}(E)$, then $k[F]$ is a polynomial ring with variables indexed by the vertices of $F$, and
$L(\Gamma, E)|_{F}$ is identified with a quotient of ideals in this polynomial ring.
\begin{lemma}\label{lem:restriction} Let $\sigma\colon \Gamma \to 2^V$ be a quasi-geometric homology triangulation of a simplex, and let $E$ be a face of $\Gamma$. Let $F \in \lk_{\Gamma}(E)$ be a face with $F \sqcup E$ interior. Assume that $F$ is not a $U$-pyramid.
Then there is a surjective graded $k[F]$-module homomorphism
$$L(\Gamma, E)|_{F} \to L(\Gamma, \mathcal{A}_F \sqcup E)|_{F \smallsetminus \mathcal{A}_F}[-\vert \mathcal{A}_F \vert],$$ where the second term is a $k[F]$-module via the restriction map $k[F] \mapsto k[F \smallsetminus \mathcal{A}_F]$. \end{lemma}
\begin{proof}
If $\Delta$ is a subcomplex of $\lk_{\Gamma}(E)$ contained in the closed star of $\mathcal{A}_F$, then $x^{\mathcal{A}_F}$ is a non-zero divisor in $k[\Delta]$. In particular, $x^{\mathcal{A}_F}$ is a non-zero divisor in $k[F]$ (this is also clear since $k[F]$ is a polynomial ring). Note that every face of $F$ with carrier codimension at most $1$ contains $\mathcal{A}_F$. Thus $I|_F = x^{\mathcal{A}_F} \cdot M$ and $J|_F = x^{\mathcal{A}_F} \cdot N$, where $M$ and $N$ are the ideals in $k[F]$ \[ M = (x^H: \enskip H \subset F \smallsetminus \mathcal{A}_F, \enskip \sigma(H \sqcup \mathcal{A}_F \sqcup E) = V), \] \[
N = (\theta_1|_F,\ldots,\theta_{d}|_F) \cdot M + (\theta_{j}|_{F} x^G: \enskip G \subset F \smallsetminus \mathcal{A}_F, \enskip \sigma(G \sqcup \mathcal{A}_F \sqcup E) = \{v_j\}^c). \] Then we have surjective graded $k[F]$-module homomorphisms \[
I|_F/J|_F \rightarrow M /N [-|\mathcal{A}_F|] \rightarrow M|_{F \smallsetminus \mathcal{A}_F}/N|_{F \smallsetminus \mathcal{A}_F}[-|\mathcal{A}_F|], \]
where the first map is the isomorphism taking $x^{\mathcal{A}_F} x^H \mapsto x^H$ and the second map is restriction. Finally the right hand term can be identified with $L(\Gamma, \mathcal{A}_F \sqcup E)|_{F \smallsetminus \mathcal{A}_F}[-\vert \mathcal{A}_F \vert]$. \end{proof}
We will derive Theorem~\ref{thm:interiornonvanish} from the following more technical statement.
\begin{theorem}\label{thm:localizedcase}
Let $\sigma\colon \Gamma \to 2^V$ be a quasi-geometric homology triangulation, and let $E$ be a face. Let $F \in \lk_{\Gamma}(E)$ be a face with $F \sqcup E$ interior. Suppose $\mathcal{A}_F = \emptyset$ and $F$ admits an interior partition $F = F_1 \sqcup F_2$. Assume that $F$ has no faces $G$ with $G \sqcup E$ interior and $ \vert G \vert < \vert F_1 \vert$. If $\vert F_1 \vert \le 2$, then $L(\Gamma, E)|_{F}$ is non-zero in degree $\vert F_1 \vert$. \end{theorem}
\begin{example}\label{example:nonrestriction} The conclusion of Theorem~\ref{thm:localizedcase} can fail when $\vert F_1 \vert \ge 3$, even for $\mathcal{A}_F = E = \emptyset$. Consider a geometric triangulation $\sigma \colon \Gamma \to 2^V$, where $V = \{v_1, \ldots, v_6 \}$ with a face $F = \{w_1, \ldots, w_6\}$ such that \[ \begin{array}{lll} \sigma(w_1) = \{ v_1, v_3, v_6 \} & \sigma(w_2) = \{ v_1, v_4, v_5 \} & \sigma(w_3) = \{ v_2, v_3, v_5 \} \\ \sigma(w_4) = \{ v_2, v_4, v_6 \} & \sigma(w_5) = \{ v_3, v_4, v_5 \} & \sigma(w_6) = \{ v_3, v_5, v_6 \} \end{array} \]
Then $\mathcal{A}_F = \emptyset$, and $F$ admits an interior partition given by $F_1 = \{w_1, w_4, w_5\}$, $F_2 = \{w_2, w_3, w_6\}$. Then \eqref{e:restrict} gives generators and relations for $L(\Gamma, \emptyset)|_{F}$, and a linear algebra computation shows that $L(\Gamma, \emptyset)|_{F} = 0$. \end{example}
Before proceeding with the proof of Theorem~\ref{thm:localizedcase}, we show how Theorem~\ref{thm:interiornonvanish} follows from it.
\begin{proof}[Proof of Theorem~\ref{thm:interiornonvanish}] We may assume that $F = F_1' \sqcup F_2' \sqcup \mathcal{A}_F$ is an interior partition of $F$ with $\vert F_1' \vert$ minimal among all possible interior partitions of $F$. In particular, if $\vert F_1' \vert = 2$, then there is no vertex $v \in F \smallsetminus \mathcal{A}_F$ such that $\{v\} \sqcup \mathcal{A}_F \sqcup E$ is interior, as then $\{v\} \sqcup (F_1' \sqcup F_2' \smallsetminus \{v\}) \sqcup \mathcal{A}_F$ would be an interior partition. Hence there are no faces $G$ of $F \smallsetminus \mathcal{A}_F$ with $G \sqcup \mathcal{A}_F \sqcup E$ interior and with cardinality smaller than $\vert F_1' \vert$.
By Theorem \ref{thm:localizedcase}, $L(\Gamma, \mathcal{A}_F \sqcup E)|_{F_1' \sqcup F_2'}$ is non-zero in degree $\vert F_1' \vert$. Then, by Lemma~\ref{lem:restriction}, $L(\Gamma, E)$ is nonzero in degree $\vert F_1' \vert + \vert \mathcal{A}_F \vert$. \end{proof}
We now proceed with the proof of Theorem~\ref{thm:localizedcase}. We begin with a series of three lemmas. Inspired by the results of \cite{dMGPSS20} in the case $E = \emptyset$, we consider the \emph{internal edge graph} of a subcomplex $\Delta \subset \lk_{\Gamma}(E)$. This is the graph contained in the $1$-skeleton of $\lk_{\Gamma}(E)$ consisting of edges $e \subset \Delta$ with $e \sqcup E$ interior.
\begin{lemma}\label{lem:intedge}
Assume $\sigma(E)$ has codimension at least $2$. Let $\Delta$ be a subcomplex of $\lk_{\Gamma}(E)$, and assume $\Delta$ has no vertices $v$ with $\{v\} \sqcup E$ interior. If $L(\Gamma, E)|_{\Delta}$ is zero in degree $2$, then each connected component of the internal edge graph of $\Delta$ satisfies one of the following. \begin{enumerate} \item The component is a tree, and it has at most one vertex $v$ with $\{v\} \sqcup E$ having carrier codimension more than $1$. \item The component has a unique cycle, and the carrier codimension of $\{w\} \sqcup E$ is equal to $1$ for every vertex $w$ in the component. \end{enumerate} \end{lemma}
\begin{proof}
From \eqref{e:restrict}, we have the following exact sequence for the degree $2$ part of $L(\Gamma, E)|_{\Delta}$.
$$ \bigoplus_{\vert S \vert = 1} (I_S)_1\otimes_{k[\lk_{\Gamma}(E)]} k[\Delta] \to I_2\otimes_{k[\lk_{\Gamma}(E)]} k[\Delta] \to (L(\Gamma, E)|_{\Delta})_2 \to 0.$$
Because $(L(\Gamma, E)|_{\Delta})_2 = 0$, the first map in the above complex is surjective. As $\Delta$ has no vertices $v$ with $\{v\} \sqcup E$ interior, we see that \begin{equation}\label{eq:intedge} (x^{e}: e \subset \Delta, \enskip e \sqcup E \text{ is interior })_2 = (x^{\{v\}} \theta_{i}: v \subset \Delta, \enskip \sigma(\{v\} \sqcup E) = \{v_i\}^c)_2. \end{equation} Thus the number of edges $e$ with $e \sqcup E$ interior is less than or equal to the number of vertices $w$ with the carrier codimension of $\{w\} \sqcup E$ equal to $1$. If $\sigma(\{v\} \sqcup E) = \{v_i\}^c$ and $\theta_{i} = \sum_{w_j} a_{i,j}x^{\{w_j\}}$, then $$x^{\{v\}} \theta_{i} = \sum_{\{v, w_j\} \sqcup E \text{ interior }} a_{i,j}x^{\{v, w_j\}}.$$ In particular, both vector spaces in (\ref{eq:intedge}) naturally decompose into a direct sum of vector spaces indexed by the connected components of the internal edge graph. Therefore, in each connected component of the internal edge graph, the number of edges $e$ with $e \sqcup E$ interior is less than or equal to the number of vertices $v$ with $\{v\} \sqcup E$ of carrier codimension $1$. As the only connected graphs $(V, E)$ where $\vert E \vert \le \vert V \vert$ are either trees or contain a unique cycle, the result follows. \end{proof}
\begin{lemma}\label{lem:nofourcycle}
Assume $\sigma(E)$ has codimension at least $2$. Let $F \subset \lk_{\Gamma}(E)$ be a face. Assume $F$ has no vertices $v$ with $\{v\} \sqcup E$ interior. If $L(\Gamma, E)|_{F}$ is zero in degree $2$, then no component of the internal edge graph of $F$ contains a cycle of length $4$. \end{lemma} \begin{proof}
Suppose a component of the internal edge graph contains a $4$-cycle of vertices $F = \{t_1, t_2, u_1, u_2\}$. By Lemma~\ref{lem:intedge}, this is the unique cycle in this component and every vertex $w \in F$ has $\{w\} \sqcup E$ of carrier codimension $1$. Because $F$ is a face and there are no $3$-cycles in this component of the internal edge graph, we may assume that $\sigma(\{t_i\} \sqcup E) = \{v_1\}^c$ and $\sigma(\{u_i\} \sqcup E) = \{v_2\}^c$. Restricting to $F$ and using that $(L(\Gamma, E)|_{F})_2 = 0$, we have that $$(x^{\{ t_1, u_1 \}}, x^{ \{ u_1, t_2\}}, x^{\{t_2,u_2\}}, x^{\{u_2, t_1\}}) = (x^{\{t_1\}} \theta_{2}, x^{\{t_2\}} \theta_{2}, x^{\{u_1\}} \theta_{1}, x^{\{u_2\}} \theta_{1}).$$
The relation $\theta_{1} \theta_{2} - \theta_{2} \theta_{1} = 0$ expands into a relation between the generators of the right-hand side. But the left-hand side is $4$-dimensional, a contradiction. \end{proof}
\begin{lemma}\label{lem:cd1case} Assume $\sigma(E)$ has codimension $1$. Let $\Delta \subset \lk_{\Gamma}(E)$ be a subcomplex. Then
$$\dim (L(\Gamma, E)|_{\Delta})_1 \ge |\{v \in \Delta: \{v\} \sqcup E \text{ interior}\}| - 1.$$ \end{lemma} \begin{proof} By considering the degree $1$ part of \eqref{e:restrict}, as the codimension of $\sigma(E)$ is $1$, we get the following exact sequence. \begin{center} \begin{tikzcd}[column sep = large]
k \arrow[r] & \bigoplus\limits_{\mathclap{\substack{w \in \Delta \\ \{w\} \sqcup E \text{ interior }}}} k \cdot x^w \arrow[r] & (L(\Gamma, E)|_{\Delta})_1 \arrow[r] & 0, \end{tikzcd} \end{center} and the result follows. \end{proof}
\begin{proof}[Proof of Theorem~\ref{thm:localizedcase}]
We must show that $L(\Gamma, E)|_{F}$ is non-zero in degree $|F_1|$.
Recall that $L(\Gamma, E)|_{F}$ is isomorphic to $I|_{F}/J|_{F}$, where $I|_{F}$ and $J|_{F}$ are described in \eqref{eq:Ires} and \eqref{eq:Jres} respectively. First we handle the cases when $\vert F_1 \vert \le 1$. If $F_1 = \emptyset$, then $E$ is interior and $x^{\emptyset} = 1$, but $J|_{F}$ is a proper ideal as it is generated by elements of positive degree, so $x^{F_1} \not \in J|_{F}$. If $F_1 = \{v\}$, then we assume that $E$ is not an interior face. Then $J|_{F}$ is generated by elements of degree at least $2$, so $x^{F_1} \not \in J|_{F}$.
Suppose $\vert F_1 \vert = 2$. We assume that there are no vertices $v$ with $\{v\} \sqcup E$ interior and $E$ is not interior. If $\sigma(E)$ has codimension $1$, then both $F_1$ and $F_2$ must have a vertex $v$ with $\{v\} \sqcup E$ interior. Then by Lemma~\ref{lem:cd1case}, we see that $\dim L(\Gamma, E)|_{F} \ge 1$. Hence we may assume that $\sigma(E)$ has codimension at least $2$.
Let $F_1 = \{u, t\}$ and assume that $L(\Gamma, E)|_{F}$ has no non-zero elements in degree $2$. Consider the connected component of the internal edge graph containing $F_1$. By Lemma~\ref{lem:intedge}, we may assume that $\sigma(\{u\} \sqcup E) = \{v_1\}^c$. Note that $v_1 \in \sigma(t)$. There is a vertex $t' \in F_2$ such that $v_1 \in \sigma(t')$, so $\{u, t'\} \sqcup E$ is interior. Therefore either $\{t\} \sqcup E$ or $\{t'\} \sqcup E$ has carrier codimension $1$.
If $\sigma(\{t\} \sqcup E) = \{v_2\}^c$, then there is a vertex $u' \in F_2$ such that $v_2 \in \sigma(u')$. First assume $u'$ and $t'$ are distinct. Since at least one of $\{u'\} \sqcup E$ and $\{t'\} \sqcup E$ has carrier codimension $1$, it follows that $\{ u', t'\} \sqcup E$ is interior. Then $\{u, t, u', t'\}$ forms a $4$-cycle, contradicting Lemma~\ref{lem:nofourcycle}.
If $u' = t'$, then the internal edge graph contains a cycle and hence every vertex $w$ in it (including $t$) has $\{w\} \sqcup E$ of carrier codimension $1$. As $F_2$ is interior and $\{u'\} \sqcup E$ has carrier codimension $1$, there is a vertex $w \in F_2$ such that $\{u', w\} \sqcup E$ is interior. But then either $\{u, w\} \sqcup E$ or $\{t, w\} \sqcup E$ is interior, contradicting the uniqueness of the cycle in Lemma~\ref{lem:intedge}.
If $\{t\} \sqcup E$ does not have carrier codimension $1$, then we may assume that $\sigma(\{t'\} \sqcup E) = \{v_2\}^c$. Choose a vertex $u' \in F_2$ with $v_2 \in \sigma(u')$. Then $\{t', u'\} \sqcup E$ is interior, so $\{u'\} \sqcup E$ has carrier codimension $1$. If $v_1 \in \sigma(u')$, then $\{u, u'\} \sqcup E$ is interior. If $v_1 \not \in \sigma(u')$, then $\{t, u'\} \sqcup E$ is interior. In either case, there is a cycle and a vertex $v$ with $\{v\} \sqcup E$ of carrier codimension more than $1$ in the internal edge graph, contradicting Lemma~\ref{lem:intedge}. \end{proof}
\begin{remark}\label{rem:newnonvanish} One can use the same overall strategy more generally to show that other combinatorial types of faces cannot appear in triangulations with vanishing local $h$-vectors. For instance, suppose $V = \{v_1, \ldots, v_6\}$ and $\sigma \colon \Gamma \to 2^V$ is a geometric triangulation with a facet $F = \{w_1, \ldots, w_6\}$ such that \[ \begin{array}{lll} \sigma(w_1) = \{ v_1\} & \sigma(w_2) = \{ v_2 \} & \sigma(w_3) = \{ v_3 \} \\ \sigma(w_4) = \{ v_1, v_4, v_5 \} & \sigma(w_5) = \{ v_2, v_4, v_6 \} & \sigma(w_6) = \{ v_3, v_5, v_6 \} \end{array} \]
Then the interior $2$-faces of $F$ are $\{w_1, w_5, w_6\}$, $\{w_2, w_4, w_6\}$, $\{w_3, w_4, w_5\}$, and $\{w_4, w_5, w_6\}$. But $F$ has no interior vertices or edges, and it has only three edges with carrier codimension one, namely $\{w_4, w_5\}, \{w_4, w_6\},$ and $\{w_5, w_6\}$. Thus $L(\Gamma, \emptyset)|_{F}$ is non-zero in degree three. Note that $F$ is not a pyramid and does not admit an interior partition. \end{remark}
\end{document} |
\begin{document}
\begin{abstract} The Bousfield-Kan (or unstable Adams) spectral sequence can be constructed for various homology theories such as Brown-Peterson homology theory BP, Johnson-Wilson theory $E(n)$, or Morava $E$-theory $E_n$. For nice spaces the $E_2$-term is given by Ext in a category of unstable comodules. We establish an unstable Morava change of rings isomorphism between \break $\text{Ext}_{\mathcal{U}_{\Gamma_B}}(B,M)$ and $\text{Ext}_{\mathcal{U}_{E_{n*}E_n}/I_{n}}(E_{n*}/I_{n},E_{n*}\otimes_{BP_*} M)$ where $(B,\Gamma_B)$ denotes the Hopf Algebroid $(v_n^{-1}BP_*/I_{n}, v_n^{-1}BP_*BP/I_{n} )$. We show that the latter groups can be interpreted as $\text{Ext}$ in the category of continuous modules over the profinite monoid of endomorphisms of the Honda formal group law. By comparing this with the cohomology of the Morava stabilizer group we obtain an unstable Morava vanishing theorem when $p-1 \nmid n$. \end{abstract}
\title{An unstable change of rings for Morava E-theory}
\section{Introduction}
In \cite{BCM} it is shown that the unstable Adams spectral sequence, as formulated by Bousfield and Kan \cite{BK}, can be used with a generalized homology theory represented by a $p$-local ring spectrum $E$ satisfying certain hypotheses, and for certain spaces $X$. The main example is $E=BP$. In these cases the effectiveness of the spectral sequence is demonstrated by: 1) setting up the spectral sequence and proving convergence, 2) formulating a general framework for computing the $E_2$-term, and 3) computing the one and two line in the case where $E=BP$ and $X=S^{2n+1}$.
In \cite{BT} the present author and M. Bendersky showed that this framework can be extended to periodic homology theories such as the Johnson-Wilson spectra $E(n)$. However the approach to convergence in \cite{BT} is different from that in \cite{BCM}. In the latter the Curtis convergence theorem is used to obtain a general convergence theorem based on the existence of a Thom map \[E\ \to H\mathbf{Z}_{(p)}\] and a tower over $X$. This necessitates that $E$ be connective. Obviously this doesn't apply to periodic theories such as $E(n)$. In \cite{BT} we study a tower under $X$ and define the $E$-completion of $X$ to be the homotopy inverse limit of this tower. Convergence of the spectral sequence to the completion is guaranteed by, for example, a vanishing line in the $r$th term of the spectral sequence. For the example of $E(1)$ and $X = S^{2n+1}$ we compute the $E_2$-term, for $p$ odd, and obtain such a vanishing line.
It should be noted that the spectral sequence has been used to good effect in the work of Davis and Bendersky, in computing $v_1$-periodic homotopy groups of Lie Groups. It should also be noted that the construction of an $E$-completion given in \cite{BT} has been strongly generalized by Bousfield in \cite{BO9}. Also, the framework for the construction of the spectral sequence and the computation of the $E_2$-term in \cite{BCM} and \cite{BT} has been generalized by Bendersky and Hunton in \cite{BH} to the case of an arbitrary Landweber Exact ring spectrum $E$. This includes complete theories such as Morava $E$-theory.
In \cite{BH} the authors define an $E$-completion of $X$, and a corresponding Bousfield-Kan spectral sequence, for any space $X$ and any ring spectrum $E$, generalizing the construction of \cite{BT}. If one further supposes that $E$ is a Landweber exact spectrum then the authors show that one can define a category of unstable comodules over the Hopf algebroid $(E_*, E_*(E))$. This is accomplished by studying the primitives and indecomposables in the Hopf ring of $E$, extending the work of \cite{BCM}, \cite{BE5}. Letting $\mathcal{U}$ denote this category of unstable comodules they show, for example, that if $X$ is a space such that $E_*(X) \cong \Lambda(M)$, an exterior algebra on the $E_*$-module $M$ of primitives, where $M$ is a free $E_*$-module concentrated in odd degrees, then the $E_2$-term of the spectral sequence can be identified as \[E_2^{s,t}(X) \cong \ext^s_{\mathcal{U}}(E_*(S^t),M).\] This is Theorem 4.1 of \cite{BH}. In the literature $E_{*}(S^t)$ is often abbreviated to $E_{*}[t]$ and this bigraded Ext group is denoted by the shorthand \[ \ext^{s}_{\mathcal{U}}(E_{*}[t],M) \,\, \text{or even} \,\, \ext^{s,t}_{\mathcal{U}}(M).\]
There remains the problem of convergence and the problem of computing the $E_2$-term. In this paper, following work on the case of Morava $K$-theory in D. Mulcahey's thesis \cite{MUL}, we extend the definition of an unstable comodule to certain torsion Hopf algebroids, and establish bounds on the cohomological dimension of the unstable Ext groups. This involves an unstable version of the Morava change of rings theorem going from $v_{n}^{-1}BP/I_{n}$ to Morava $K$-theory, and then identification of the unstable cohomology as Ext groups in the category of continuous modules over $\End_n$, the profinite monoid of endomorphisms of $\Gamma_n$, the Honda formal group law, over $\mathbf{F}_{p^n}$. The multiplication in $\End_n$ is given by composition. The group of invertible endomorphisms is the well known Morava stabilizer group, and Morava theory tells us that the continuous cohomology of this group yields stable input into the chromatic machinery of stable homotopy theory. Unstable information is obtained by considering non-invertible endomorphisms of $\Gamma_n$ as well.
In the following theorem, the group on the left is Ext in the category of unstable comodules over the Hopf algebroid \[(B,\Gamma_{B}) = (B(n)_*,\Gamma_{B(n)_*}) = (v_{n}^{-1}BP_{*}/I_{n}, v_{n}^{-1}BP_*BP/I_{n}),\] and the group on the right is continuous Ext over the monoid $\End_n$, where $\Gal$ denotes the Galois group $\Gal(\mathbf{F}_{p^n}/\mathbf{F}_{p})$ and $E_{n*}$ is the coefficient ring of Morava $E$-theory. We will denote the unstable comodule which is the homology of the sphere by \[B[k] = B(n)_*(S^{k}) = v_n^{-1}BP_*(S^{k})/I_{n} .\] Let $M$ be an unstable $\Gamma_{B}$-comodule concentrated in odd dimensions.
\begin{theorem}\label{first-main-theorem} There is an isomorphism \[ \ext^{s}_{\mathcal{U}_{ \Gamma_{B} }}(B[t],M) \cong \ext^{s}_{\End_n}( (E_{n})_{1}[t] /I_{n} , (E_{n*}\otimes_{BP_*} M)_{1})^{\Gal}. \] \end{theorem}
In Section \ref{cohomological-dimension} we establish a relationship between the Ext groups over $\End_n$ and the cohomology of $S_n$, the Morava stabilizer group. Using the cohomological dimension of $S_n$ (see \cite{RA2}) we obtain an unstable Morava vanishing theorem.
\begin{theorem}\label{second-main-theorem} Let $\Gamma_B$ and $M$ be as in Theorem \ref{first-main-theorem}. Suppose $p-1 \nmid n$. Then \[ \ext^{s}_{\mathcal{U}_{\Gamma_{B}}}(M[t],M) = 0\quad \text{for}\quad s > n^2+1\] \end{theorem}
\section{Unstable Comodules}\label{section-unstable-comodules}
We begin by recalling some notions from \cite{BCM}, \cite{BH} and \cite{BT}. Suppose that $E$ is a spectrum representing a Landweber exact homology theory with coefficient ring concentrated in even degrees. Let $\underline{E}_*$ denote the corresponding $\Omega$-spectrum. There are generators $\beta_i \in E_{2i}(CP^{\infty})$ and under the complex orientation for complex cobordism $CP^{\infty} \to \underline{MU}_2$ these map to classes $E_{2i}(\underline{MU}_2)$. Localized at a prime $p$, denote the image of $\beta_{p^i}$ by $b_{(i)} \in E_{2p^i}(\underline{E}_2)$. Let $b_i \in E_{2p^i-2}(E)$ denote the image under stabilization. Following \cite{BCM}, \cite{BH}, and \cite{BOAR}, when $E= BP$, we replace the elements $b_i$ with $h_i = c(t_i)$ and replace $b_{(i)}$ with $h_{(i)}$, a canonical lift of $h_i$. For a finite sequence of integers $J=(j_1,j_2,\dots,j_n)$ define the {\em length} of $J$ to be $l(J) = j_1 +j_2 + \cdots j_n$ and define \[ h^J = h_1^{j_1}h_{2}^{j_2}\cdots h_n^{j_n}.\]
\begin{definition}\label{original-definition} Let $(A,\Gamma)$ denote the Hopf algebroid $(E_*,E_*E)$ for a Landweber exact spectrum $E$. Let $M$ be a free, graded $A-module$. Define $U_{\Gamma}(M)$ to be sub-$A$-module of $\Gamma\otimes_{A}M$ spanned by all elements of the form $h^J\otimes m$ where $2l(J) < |m|$. Secondly, define $V_{\Gamma}(M)$ to be sub-$A$-module of $\Gamma\otimes_{A}M$ spanned by all elements of the form $h^J\otimes m$ where $2l(J) \le |m|$. \end{definition} We will sometimes drop the subscript $\Gamma$ from the notation if it will not cause confusion.
The following theorem was proved in \cite{BCM} for $E=BP$ and in \cite{BH} for an arbitrary Landweber exact theory. Here $M_s$ denotes a free $A$-module generated by one class $i_s$ in dimension $s$.
\begin{theorem}\label{suspension-isomorphism} In the Hopf ring for $E$ the suspension homomorphism restricted to the primitives \[\sigma_*: PE_*(\underline{E}_s) \to U(M_s)\] and the suspension homomorphism restricted to the indecomposables \[\sigma_*: QE_*(\underline{E}_s) \to V(M_s)\] are isomorphisms. \end{theorem}
The functors $U_{\Gamma}(-)$ and $V_{\Gamma}(-)$ extend to the category of arbitrary $A$-modules. \begin{definition}\label{extend-U}
Let $M$ be an $A$-module and let \[F_1 \to F_0 \to M \to 0\] be exact with $F_1$ and $F_0$ free over $A$. Define $U_{\Gamma}(M)$ by \[U_{\Gamma}(M) = \coker(U_{\Gamma}(F_1) \to U_{\Gamma}(F_0))\] and $V_{\Gamma}(M)$ by \[V_{\Gamma}(M) = \coker(V_{\Gamma}(F_1) \to V_{\Gamma}(F_0)).\] \end{definition}
It is shown in \cite{BCM}, \cite{BH} that $U$ and $V$ are each the functor of a comonad $(U,\Delta,\epsilon)$ and $(V,\Delta,\epsilon)$ on the category of $A$-modules. For now we will focus on the functor $U$ but in everything that follows in this section and the next there are analogous results for $V$. Keep in mind that if $M$ is concentrated in odd dimensions, then $U(M)$ and $V(M)$ are the same.
Using some work from Dustin Mulcahey's thesis \cite{MUL} we can extend the above to a more general situation. But first we will reconcile two differently defined but apparently similar notions of $U(M)$. We still suppose $E$ is a spectrum representing a Landweber exact homology theory and let $M$ denote a free $E_*$-module, and let $F$ be any $p$-local homology theory which is torsion free with coefficients concentrated in even dimensions. In Definition 2.9 of \cite{BH}, Bendersky and Hunton define $U_F(M)$ to be the sub-$F_*$-module of $F_*(E)\otimes_{E_*}M$ spanned by elements $h^{I}\otimes m$ where $2l(I) < |m|$. If we let $F= BP$ this gives a $BP_*$-module $U_{BP}(M)$. However, regarding $M$ as a $BP_*$-module, which will not typically be a free $BP_{*}$-module, we also have the $BP_*$-module $U_{\Gamma}(M)$, where $BP_*BP = \Gamma$, defined in \ref{extend-U} above. Note that $U_{BP}(M)$ is a $BP_*$-submodule of $BP_*(E)\otimes_{E_*}M = BP_*BP\otimes_{BP_*}E_*\otimes_{E_*}M = BP_*BP\otimes_{BP_*}M$ by definition, whereas $U_{\Gamma}(M)$ maps to $BP_*BP\otimes_{BP_*}M$, but not {\em a priori} injectively.
\begin{proposition}\label{reconciliation} Denote $(BP_*,BP_*BP)$ by $(A,\Gamma)$ and $(E_*,E_*E)$ by $(B,\Sigma)$ for a Landweber exact homology theory $E$. Let $M$ denote a free $E_*$-module. Then the map $U_{\Gamma}(M) \to \Gamma \otimes_{A}M$ which comes from Definitions \ref{original-definition} and \ref{extend-U} is an injection and $U_{BP}(M) \cong U_{\Gamma}(M)$. Furthermore $B\otimes_{A} U_{\Gamma}(M) \cong U_{\Sigma}(M)$. A similar result holds for $V$. \end{proposition}
\begin{proof} Because $E$ is Landweber exact, it is torsion free, and it follows by a simple argument that $U_{\Gamma}(M)$ is also torsion free. So to establish injectivity it suffices to tensor with the rationals: \break $Q\otimes U_{\Gamma}(M) \to Q\otimes \Gamma \otimes_{A}M$. This map is in fact an isomorphism because rationally the unstable condition is vacuous. The isomorphism $U_{BP}(M) \cong U_{\Gamma}(M)$ follows by comparing the two images in $BP_*BP\otimes_{BP_*}M$. The last statement follows immediately from Corollary 2.12 of \cite{BH}. \end{proof} It follows that $B\otimes_{A} U_{\Gamma}(M) \cong U_{\Sigma}(M)$ for an arbitrary $B$-module, and similarly for $V$.
\begin{definition}\label{unstable-comodule-categories-Landweber-exact}
Let $\mathcal{U}_{\Sigma}$ denote the category of coalgebras over the comonad $U_{\Sigma}$ and similarly let $\mathcal{V}_{\Sigma}$ denote the category of coalgebras over the comonad $V_{\Sigma}$. We call an object in $\mathcal{U}_{\Sigma}$ (or in $\mathcal{V}_{\Sigma}$, depending on the context) an unstable $\Sigma$-comodule. \end{definition}
A Hopf algebroid $(B,\Sigma)$ is called {\em flat} if $\Sigma$ is flat as a left (and hence right) $B$-module. Flatness ensures that the category of $\Sigma$-comodules is an abelian category. We also want the category of unstable comodules to be abelian. This will follow from the exactness of the functor $U_{\Sigma}$ on the category of $B$-modules. The exactness of $U$ for $(BP_*,BP_*BP)$ has been asserted without proof in multiple places in the literature. A proof is given in \cite{BCR} but that proof only applies to the case of free modules.
The following proof is based on an idea of Martin Bendersky's and we are grateful to him for allowing it to be included here. In particular Bendersky suggested using the Boardman basis for the Hopf ring for $BP$ which is the most convenient for this purpose.
\begin{proposition}\label{exactness-of-U-Landweber-exact-case} Let $(A, \Gamma) = (BP_*,BP_*BP)$ and suppose $(B,\Sigma)$ is a Hopf algebroid associated to a Landweber exact homology theory. Then the functors $U_{\Sigma}$ and $V_{\Sigma}$ are exact on $B$-modules. \end{proposition}
\begin{proof}
We first give the proof for $BP$. We need to use the fact that the indecomposables and the primitives in the Hopf ring for $BP$ are free $BP_*$-modules. In \cite{RW1} Ravenel and Wilson compute $BP_*(\underline{BP}_*)$, the Hopf ring for $BP$, and show that the indecomposables and primitives are free {\em left} $BP_*$-modules. They write a down a basis, which in turn gives a basis as a free $Z_{(p)}$-module. In spite of the fact that there is no conjugation in the Hopf ring corresponding to the conjugation $c$ in $BP_*BP$, Boardman proves in \cite{BOAR} that if one considers the right action of $BP_*$ instead, the indecomposables and primitives are free {\em right} $BP_*$-modules as well.
To start we restrict to the even spaces in the Hopf ring and the functor $V$. The proof can then be extended to the odd spaces and the functor $U$ by standard arguments. By \cite{RW1} the indecomposables $QBP_*(\underline{BP}_*)$ are generated as a left $BP_*$-module by monomials $h^{\circ J'}\circ[v^K]$, where $J' = (j_0,j_1,\dots)$ and $K =(k_1,k_2,\dots)$ are sequences of non-negative integers. If also $I=(i_1,i_2,\dots)$, the bidegree of a generator is given by
\[ v^Ih^{J'}[v^K] \in QBP_{|v^I| + |h^{J'}|+2l(J')}(\underline{BP}_{2l(J')-|v^K|})\] where $v^Ih^{J'}[v^K]$ stands for $v^{I}\circ h_0^{\circ j_0}\circ h_1^{\circ j_1} \circ \dots \circ [v^K]$. The isomorphism in \ref{suspension-isomorphism} is given by \[ v^Ih^{J'}[v^K] \xrightarrow{} v^Ih^{J}\otimes v^K\iota_{2m}\]
where $J = (j_1,j_2,\dots)$ is obtained from $J'$ by dropping $j_0$ and $2m = 2j_0 + 2l(J) - |v^K|$.
The Ravenel-Wilson basis involves a condition on $J'$ and $K$. For the Boardman basis consider monomials $v^Ih^{J'}$ and call such a monomial {\em Boardman allowable} if it is not divisible by any monomial of the form \[v_{d_0}v_{d_1}^{p}v_{d_2}^{p^2}\dots v_{d_l}^{p^l}h_{l}\] where $l \ge 0$ and $d_0 \le d_1 \le d_2 \le \dots \le d_l$. Then the main theorem of \cite{BOAR} is that the Boardman allowable monomials are a basis for the indecomposables as a right $BP_*$-module. (Note that while $QBP_*(\underline{BP_{2m}})$ is a left $BP_*$-submodule it is not a right $BP_*$-submodule because multiplying by an element $v\in BP_*$ on the right changes the index of the space in the $\Omega$-spectrum. So we have to consider the entire Hopf ring when we consider the right module structure.)
If we let $\mathcal{B}$ denote the free $Z_{(p)}$-module generated by the Boardman allowable monomials then Boardman's theorem implies that \[QBP_*(\underline{BP}_*) \cong \mathcal{B} \otimes_{Z_{(p)}} A\] as free $Z_{(p)}$-modules. (The bigrading on the Hopf ring is not the tensor product of gradings on the two factors.)
Now suppose $F$ is free graded $BP_*$-module of rank one on a generator in dimension $2m$. Then by \ref{suspension-isomorphism} we identify $V(F)$ with the subgroup of $\mathcal{B} \otimes_{Z_{(p)}} A$ spanned by the monomials with second bidegree equal to $2m$. If $F$ is a free graded $BP_*$-module of arbitrary rank we identify $V(F)$ with a subgroup of a sum of copies of $\mathcal{B} \otimes_{Z_{(p)}} A $ by identifying each summand of $V(F)$ with a subgroup of a copy of $\mathcal{B} \otimes_{Z_{(p)}} A $. If $f:F_1\to F_0$ is a map of free $BP_*$-modules then this defines an evident map $\mathcal{B}\otimes f$ from a sum of copies of $\mathcal{B} \otimes_{Z_{(p)}} A$ to another sum of copies of $\mathcal{B} \otimes_{Z_{(p)}} A$, which restricts to $V(f)$. It follows that for any $BP_*$-module $M$, with a free presentation \[F_1 \to F_0 \to M \to 0\] the $Z_{(p)}$-module $V(M)$ is isomorphic to the subgroup of $\mathcal{B}\otimes_{Z_{(p)}} M$ of elements in the appropriate bidegree. It is immediate from this that $V$ is an exact functor.
For the arbitrary Landweber exact case suppose we have short exact sequence of $B$-modules \[ 0 \to M' \to M \to M'' \to 0.\] Thinking of this as a SES of $A$-modules we have a SES \[ 0 \to U_{\Gamma}(M') \to U_{\Gamma}(M) \to U_{\Gamma}(M'') \to 0\] since $U_{\Gamma}$ is exact. These are unstable $\Gamma$-comodules, hence stable $\Gamma$-comodules. Since $B$ is Landweber exact tensoring with $B$ preserves exactness on the category of $\Gamma$-comodules, and the result follows from the sentence that follows Proposition \ref{reconciliation}.
\end{proof}
Now, following Mulcahey (\cite{MUL}) we can generalize the definition of unstable comodules to certain non-Landweber exact homology theories. For the time being $(A, \Gamma)$ still denotes $(BP_*,BP_*BP)$. Suppose $A\xrightarrow{f} B$ is a map of graded algebras. If we define \[{\Sigma = B \otimes_{A} \Gamma \otimes_{A} B}\] then $(B,\Sigma)$ becomes a Hopf algebroid and we have a map of Hopf algebroids $(A,\Gamma) \to (B, \Sigma)$. The example that was treated in \cite{MUL} was $A=BP_*$ and $B = K(n)_*$ but the following makes sense more generally.
\begin{definition}\label{mulcahey-definition} The endofunctor $U_{\Sigma}$ on $B$-mod, the category of $B$-modules, is defined by \[U_{\Sigma}(N) = B \otimes_A U_{\Gamma}(N).\]
Define a comultiplication by \[ \begin{diagram} \node{ U_{\Sigma}(N) = B\otimes_A U_{\Gamma}(N) } \arrow{e,t}{ B\otimes \Delta^{\Gamma} } \arrow{se,t}{\Delta^{\Sigma}} \node{ B\otimes_A U^{2}_{\Gamma}(N) } \arrow{s,r}{B\otimes U_{\Gamma}(f\otimes U_{\Gamma}(N))} \\ \node{} \node{ B\otimes_A U_{\Gamma}(B\otimes_A U_{\Gamma}(N)) }\\ \end{diagram} \] and a counit \[ U_{\Sigma}(N) = B\otimes_A U_{\Gamma}(N) \xrightarrow{B\otimes \epsilon^{\Gamma}} B\otimes_A N \xrightarrow{} N \] Make an analogous definition for $V_{\Sigma}$. \end{definition}
\begin{proposition}[See \cite{MUL}] The functors $U_{\Sigma}$ and $V_{\Sigma}$ are both comonads on the category of $B$-modules. \end{proposition} \begin{proof} The proof is a straightforward diagram chase.\end{proof}
By Proposition \ref{reconciliation} this generalizes the definition of $U$ and $V$ in the Landweber exact case.
\begin{definition}\label{unstable-comodule-categories} Still denoting $(A, \Gamma) = (BP_*,BP_*BP)$, let $\mathcal{H}$ denote the category whose objects are Hopf algebroids $(B,\Sigma)$ arising from a map of commutative graded algebras $A\to B$, with $\Sigma = B\otimes_{A}\Gamma \otimes_{A}B$ as above, and satisfying \begin{enumerate} \item $\Sigma$ is flat as a $B$-module \item $U_{\Sigma}$ is an exact functor \end{enumerate} The morphisms in $\mathcal{H}$ are Hopf algebroid maps $(B,\Sigma) \xrightarrow{j} (C,\Phi)$ under $(A,\Gamma)$, i.e. a commutative diagram of Hopf algebroids. \[ \begin{tikzcd} & (B,\Sigma) \arrow[dd,"j"] \\ (A,\Gamma) \arrow[ur] \arrow[dr] & \\
& (C,\Phi) \\ \end{tikzcd} \] Define $\mathcal{U}_{\Sigma}$ and $\mathcal{V}_{\Sigma}$ just as in Definition \ref{unstable-comodule-categories-Landweber-exact}.
\end{definition}
Thus an unstable $\Sigma$-comodule has a lifting:
\[ \begin{diagram} \node{M} \arrow{e,t}{} \arrow{se,b}{\psi_{M}} \node{\Sigma\otimes_{B}M} \\ \node{} \node{U_{\Sigma}(M)} \arrow{n}\\ \end{diagram} \]
Now we will let $(A,\Gamma)$ denote an arbitrary object in $\mathcal{H}$. The functor $U_{\Gamma}$ restricted to $\mathcal{U}_{\Gamma}$ is the functor of a monad $(U_{\Gamma},\mu,\eta)$, using the definitions $\mu = U_{\Gamma}\epsilon$ and $\eta=\psi$. The Ext groups in $\mathcal{U}_{\Gamma}$ are defined and computed as follows (see \cite{BCM}, \cite{BH} and \cite{BT}).
\begin{definition} Suppose $M$ is an unstable comodule. Analogous to the stable case, the monad $(U,\mu,\eta)$ gives maps \begin{align*}
& U^{i}\eta^{U}U^{n-i}: U^{n}(M)\to U^{n+1}(M),\,\, 0\le i \le n,\\
& U^{i}\mu^{U}U^{n-i}: U^{n+2}(M)\to U^{n+1}(M),\,\, 0\le i \le n, \end{align*} which define a cosimplicial object in $\mathcal{U}$ called the cobar resolution. For each $t \ge 0$ let $A[t]$ denote a free $A$-module of rank one on a generator with dimension $t$. Apply the functor $\hom_{\mathcal{U}}(A[t],\,\,)$ to get a cosimplicial abelian group and hence a chain complex called the cobar complex \[\label{cobar-complex} \hom_{\mathcal{U}}(A[t],U(M)) \xrightarrow{\partial} \hom_{\mathcal{U}}(A[t],U^{2}(M)) \xrightarrow{\partial} \hom_{\mathcal{U}}(A[t],U^3(M))\xrightarrow{\partial} \cdots\\ \] with \[\partial = \sum_{i=0}^{n} (-1)^i d^{i}: \hom_{\mathcal{U}}(A[t],U^{n}(M)) \to \hom_{\mathcal{U}}(A[t],U^{n+1}(M)).\] Here $d^{i} = \hom_{\mathcal{U}}(A[t],U^{i}\eta^{U}U^{n-i})$. By the adjunction \[\hom_{\mathcal{U}}(A[t],U(N)) = \hom_{A-\text{mod}}(A[t],N) = N_t\] the cobar complex becomes \[ M_t \xrightarrow{\partial} U(M)_t \xrightarrow{\partial} U^2(M)_t \xrightarrow{\partial} \dots \] The homology of this chain complex gives $\ext^{s,t}_{\mathcal{U}}(A,M)$. \end{definition}
In \cite{MR1} Miller and Ravenel consider a morphism of Hopf algebroids $(A,\Gamma) \to (B,\Sigma)$ and define a pair of adjoint functors on the comodule categories
\begin{center} \begin{tikzpicture} \node (a) at (0,0) {$ \Gamma\text{-comod} $}; \node (b) at (3,0) {$ \Sigma\text{-comod} $}; \path[->,font=\scriptsize,>=angle 90] ([yshift= 3pt]a.east) edge node[above] {$\pi_*$} ([yshift= 3pt]b.west); \path[<-,font=\scriptsize,>=angle 90] ([yshift= -3pt]a.east) edge node[below] {$\pi^*$} ([yshift= -3pt]b.west); \end{tikzpicture} \end{center}
\noindent defined by $\pi_*(M) = B \otimes_A M$ and $\pi^*(N) = (\Gamma \otimes_A B)\cotensor_{\Sigma} N$ for a $\Gamma$-comodule $M$ and a $\Sigma$-comodule $N$. This adjunction is discussed in detail in several places, for example \cite{HOV2} and \cite{MUL}. The functors $\pi_*$ and $\pi^*$ often define inverse equivalences of comodule categories. For example if $\Sigma = B\otimes_{A} \Gamma \otimes_{A} B$ and $A\to B$ is a faithfully flat extension of rings, then it is not difficult to see that this is the case.
Now suppose that $(A,\Gamma)\to (B,\Sigma)$ is a morphism in $\mathcal{H}$. Following Mulcahey's work in \cite{MUL} we define unstable analogs of $\pi_*$ and $\pi^*$.
\begin{definition}\label{unstable-adjoint-functors} Define functors \begin{center} \begin{tikzpicture} \node (a) at (0,0) {$ \mathcal{U}_{ \Gamma} $}; \node (b) at (3,0) {$ \mathcal{U}_{ \Sigma} $}; \path[->,font=\scriptsize,>=angle 90] ([yshift= 3pt]a.east) edge node[above] {$\alpha_*$} ([yshift= 3pt]b.west); \path[<-,font=\scriptsize,>=angle 90] ([yshift= -3pt]a.east) edge node[below] {$\alpha^*$} ([yshift= -3pt]b.west); \end{tikzpicture} \end{center} \noindent by $\alpha_*(M) = B \otimes_A M$ for an unstable $\Gamma$-comodule $M$, and for an unstable $\Sigma$-comodule $N$, define $\alpha^*(N)$ to be the equalizer
\begin{center} \begin{tikzpicture} \node (a) at (0,0) {$\alpha^*(N)$}; \node (b) at (2,0) {$ U_{\Gamma}(N) $}; \node (c) at (5,0) {$ U_{\Gamma}U_{\Sigma }(N)$}; \path[->,font=\scriptsize,>=angle 90] (a) edge (b) ([yshift= 3pt]b.east) edge node[above] {$ U_{\Gamma}(\psi_{N}) $} ([yshift= 3pt]c.west) ([yshift= -3pt]b.east) edge node[below] {$U_{\Gamma}(\beta)\circ \Delta_{\Gamma} $} ([yshift= -3pt]c.west); \end{tikzpicture} \end{center} \end{definition} \noindent where $\beta:U_{\Gamma}(N)\to U_{\Sigma}(N).$
\begin{proposition}[See \cite{MUL}] The functors $\alpha_*$ and $\alpha^{*}$ form an adjoint pair. \end{proposition} \begin{proof} This follows by considering the map \[ B\otimes_A U_{\Gamma}(M) \xrightarrow{} B\otimes_A U_{\Gamma}(B\otimes_A M)\] which is natural in the $A$-module $M$ and gives a morphism of comonads $U_{\Gamma}\to U_{\Sigma}$ which leads to the adjoint pair on comodule categories. See \cite{MUL} for the details.\end{proof}
As an example we describe some torsion unstable $BP_*BP$-comodules. These will not be used in this paper, but are included to illustrate the unstable condition of Definition \ref{original-definition}. Stably, for every $n$, $BP_*/I_{n}$ is a $BP_*BP$-comodule and $v_n$ is a comodule map mod $I_n$. This is because $I_n$ is an invariant ideal. Unstably there is a subtlety because the terms in $\eta_{R}(v_k) - v_k$ may not lie in $U(BP_*(S^m)/I_{n})$ if the dimension of the sphere is too small. For example, $\eta_{R}(v_1) = v_1 - ph_1$, however $\eta_R(v_1) - v_1$ isn't divisible by $p$ in $U(BP_*(S^1))$ since $h_1$ doesn't live on the circle. You need to be on the 3-sphere or higher for $v_1$ to be an unstable comodule map, which makes $BP_*(S^m)/I_2$ into an an unstable comodule.
\begin{proposition}\label{benderskys-observation} Given $n$ and $p$, $BP_*(S^m)/I_{n}$ is an unstable comodule and \[BP_*(S^m)/I_{n} \xrightarrow{v_n} BP_*(S^m)/I_{n}\] is an unstable comodule map, as long as $m \ge 2(\dfrac{p^n-1}{p-1}) +1$. \end{proposition}
\begin{proof} The statement is true for $n=1$ by the example above. Let $n\ge 1$ and assume $m$ is as stated. Inductively $BP_*(S^m)/I_{n}$
is an unstable comodule because it is the cokernel of multiplication by $v_{n-1}$ on $BP_*(S^m)/I_{n-1}$. Consider $\eta_{R}(v_n) - v_n$ which is a polynomial in the $h's$ and $\eta_{R}(v)'s$. The largest length monomial in the $h's$ which could occur is $h_{1}^{\alpha}$ with $|h_{1}^{\alpha}| = |v_n|$, i.e. $2(p-1)\alpha = 2(p^n-1)$. Therefore with $m \ge 2(\dfrac{p^n-1}{p-1}) +1$ we have that $\eta_{R}(v_n) - v_{n} =0$ in $U(BP_*(S^m)/I_{n})$. \end{proof}
This result is not sharp. A stronger statement is possible but we will not pursue that here.
\section{Faithfully Flat Extensions}
The following theorem is an unstable version of a theorem due to Mike Hopkins, Mark Hovey, and Hal Sadofsky. See \cite{HOP}, \cite{HOV}, and \cite{HOVSA}. Hovey's paper \cite{HOV} has a detailed proof of the stable theorem in the form that we need, which is stated below as Theorem \ref{hovey's-theorem}. The proof in \cite{HOV} is based on a study of the category of quasi-coherent sheaves on a groupoid scheme. That theory has not yet been developed in an unstable setting but we don't need that for the present work. The author is very grateful to Mark Hovey for a detailed discussion of various aspects of Theorem \ref{hovey's-theorem} below.
\begin{theorem}\label{unstable-faithflat-equiv} Suppose $(A,\Gamma)\to (B,\Sigma)$ is a map of Hopf algebroids in $\mathcal{H}$. Assume there exists an algebra $C$ along with an algebra map $B \otimes_A \Gamma \xrightarrow{g} C$ such that the composite \[A \xrightarrow{1 \otimes \eta_R} B \otimes_A \Gamma \xrightarrow{g} C\] is a faithfully flat extension of $A$-modules. To be explicit the first map is the one that takes $a$ to $1\otimes \eta_R(a)$. Then $\alpha_*$ and $\alpha^*$ of \ref{unstable-adjoint-functors} are adjoint inverse equivalences of categories. \end{theorem}
The existence of the map $g$ satisfying the stated condition generalizes the condition of $A\to B$ being faithfully flat.
The stable theorem on which this is based says \begin{theorem}[Hopkins, Hovey, Hovey-Sadofsky]\label{hovey's-theorem} Let $(A,\Gamma)\to (B,\Sigma)$ be a map of flat Hopf algebroids such that $\Sigma = B \otimes_{A} \Gamma \otimes_{A} B$, and assume there exists an algebra $C$ along with an algebra map $B \otimes_A \Gamma \xrightarrow{g} C$ such that the composite \[A \xrightarrow{1 \otimes \eta_R} B \otimes_A \Gamma \xrightarrow{g} C\] is a faithfully flat extension of $A$-modules. Then \[\Gamma\text{-comod} \xrightarrow{\pi_*} \Sigma\text{-comod}\] is an equivalence of categories. \end{theorem}
This enables the following lemma. \begin{lemma}\label{hovey's-lemma} Let $(A,\Gamma) \to (B,\Sigma)$ satisfy the hypotheses of \ref{unstable-faithflat-equiv}. Recall that $\alpha_*:\mathcal{U}_{\Gamma} \to \mathcal{U}_{\Sigma}$ is given by $\alpha_*(M) = B\otimes_{A}M$. Let $f:M\to N$ be a morphism in $\mathcal{U}_{\Gamma}$. Then $\alpha_*(f)$ is an isomorphism if and only if $f$ is an isomorphism. Furthermore $\alpha_*$ is exact.
\end{lemma}
\begin{proof} By Theorem \ref{hovey's-theorem} the functor $\pi_*$ is exact since an equivalence of abelian categories is an exact functor. An unstable $\Gamma$-comodule map is a stable $\Gamma$-comodule map and a sequence in $\mathcal{U}_{\Gamma}$ is exact if and only if it's exact in $\Gamma\text{-comod}$, so $\alpha_*$ is exact on $\mathcal{U}_{\Gamma}$. A similar argument gives the first statement. \end{proof}
\begin{proof}[Proof of Theorem \ref{unstable-faithflat-equiv}] For $N \in \mathcal{U}_{\Sigma}$ consider the counit of the adjunction \[ \alpha_*\alpha^* N \xrightarrow{} N.\] By Lemma \ref{hovey's-lemma} $\alpha_*\alpha^* N = B\otimes_{A} \alpha^*(N)$ sits in an equalizer diagram
\begin{center} \begin{tikzpicture} \node (a) at (0,0) {$B\otimes_{A} \alpha^*(N)$}; \node (b) at (3,0) {$B\otimes_{A} U_{\Gamma}(N) $}; \node (c) at (7.5,0) {$B\otimes_{A} U_{\Gamma}U_{\Sigma }(N)$}; \path[->,font=\scriptsize,>=angle 90] (a) edge (b) ([yshift= 3pt]b.east) edge node[above] {$B\otimes_{A} U_{\Gamma}(\psi_{N}) $} ([yshift= 3pt]c.west) ([yshift= -3pt]b.east) edge node[below] {$B\otimes_{A} U_{\Gamma}(\beta)\circ \Delta_{\Gamma} $} ([yshift= -3pt]c.west); \end{tikzpicture} \end{center} \noindent which is the same thing as
\begin{center} \begin{tikzpicture} \node (a) at (0,0) {$B\otimes_{A} \alpha^*(N)$}; \node (b) at (2.5,0) {$ U_{\Sigma}(N) $}; \node (c) at (6,0) {$U_{\Sigma}U_{\Sigma }(N)$}; \path[->,font=\scriptsize,>=angle 90] (a) edge (b) ([yshift= 3pt]b.east) edge node[above] {$ U_{\Sigma}(\psi_{N}) $} ([yshift= 3pt]c.west) ([yshift= -3pt]b.east) edge node[below] {$ \Delta_{\Sigma} $} ([yshift= -3pt]c.west); \end{tikzpicture} \end{center} \noindent because $\Sigma = B\otimes_{A} \Gamma \otimes_{A} B$. It follows that $B\otimes_{A}\alpha^*N \cong N$.
For $M \in \mathcal{U}_{\Gamma}$ look at the unit of the adjunction \[M \xrightarrow{} \alpha^*\alpha_* M.\] The target sits in an equalizer diagram
\begin{center} \begin{tikzpicture} \node (a) at (0,0) {$ \alpha^*\alpha_* M$}; \node (b) at (2.5,0) {$ U_{\Gamma}(B\otimes_{A} M) $}; \node (c) at (6,0) {$U_{\Gamma}U_{\Sigma }(B\otimes_{A} M)$}; \path[->,font=\scriptsize,>=angle 90] (a) edge (b) ([yshift= 3pt]b.east) edge node[above] {$ $} ([yshift= 3pt]c.west) ([yshift= -3pt]b.east) edge node[below] {$ $} ([yshift= -3pt]c.west); \end{tikzpicture} \end{center} \noindent Tensor this with $B$
\begin{center} \begin{tikzpicture} \node (a) at (0,0) {$B\otimes_{A} \alpha^*\alpha_* M$}; \node (b) at (3.5,0) {$B\otimes_{A} U_{\Gamma}(B\otimes_{A} M) $}; \node (c) at (8,0) {$B\otimes_{A}U_{\Gamma}U_{\Sigma }(B\otimes_{A} M)$}; \path[->,font=\scriptsize,>=angle 90] (a) edge (b) ([yshift= 3pt]b.east) edge node[above] {$ $} ([yshift= 3pt]c.west) ([yshift= -3pt]b.east) edge node[below] {$ $} ([yshift= -3pt]c.west); \end{tikzpicture} \end{center} \noindent which gives
\begin{center} \begin{tikzpicture} \node (a) at (0,0) {$B\otimes_{A} \alpha^*\alpha_* M$}; \node (b) at (3,0) {$ U_{\Sigma}(B\otimes_{A} M) $}; \node (c) at (7,0) {$U_{\Sigma}U_{\Sigma }(B\otimes_{A} M)$}; \path[->,font=\scriptsize,>=angle 90] (a) edge (b) ([yshift= 3pt]b.east) edge node[above] {$ $} ([yshift= 3pt]c.west) ([yshift= -3pt]b.east) edge node[below] {$ $} ([yshift= -3pt]c.west); \end{tikzpicture} \end{center} \noindent So $B\otimes_{A} \alpha^*\alpha_*M \cong B\otimes_{A} M$. The unit of the adjunction is an unstable $\Gamma$-comodule map so Lemma \ref{hovey's-lemma} applies and we have ${M \xrightarrow{\cong} \alpha^*\alpha_*M}$. \end{proof}
This equivalence of categories induces a change of rings isomorphism of Ext groups. To be explicit we have \begin{theorem}\label{unstable-faithflat-cor}
Assume the hypotheses of \ref{unstable-faithflat-equiv}. Then for any unstable $\Gamma$-comodule $M$, there is an isomorphism \[ \ext^{s}_{\mathcal{U}_{\Gamma}}(A[t],M) \to \ext^{s}_{\mathcal{U}_{\Sigma}}(B[t],B\otimes_{A} M).\] \end{theorem}
First we make an observation.
\begin{lemma}\label{unstable-cotensor-lemma} For an unstable $\Sigma$-comodule $N$ we have $\alpha^*U_{\Sigma}(N) = U_{\Gamma}(N)$. \end{lemma}
\begin{proof} We have \begin{equation}\label{first-map}
U_{\Gamma}(N) \to \alpha^*U_{\Sigma}(N).
\end{equation}
Tensor with $B$ to get \[B\otimes_{A} U_{\Gamma}(N) \to B\otimes_{A} \alpha^*U_{\Sigma}(N)\] which is \[U_{\Sigma}(N) \xrightarrow{\cong} U_{\Sigma}(N).\] By Lemma \ref{hovey's-lemma} the map \ref{first-map} is an isomorphism. \end{proof}
\begin{proof}[Proof of \ref{unstable-faithflat-cor}] Let \[N \to U_{\Sigma}(N) \to U_{\Sigma}^{2}(N) \to U_{\Sigma}^{3}(N) \to \dots \] be the unstable cobar resolution for $N$. Apply $\alpha^*$ to get \begin{equation}\label{alpha-unstable-cobar} \alpha^{*}N \to \alpha^{*}U_{\Sigma}(N) \to \alpha^{*}U_{\Sigma}^{2}(N) \to \alpha^{*}U_{\Sigma}^{3}(N) \to \dots \end{equation} Since $\alpha^*$ is an equivalence of abelian categories it is exact so \ref{alpha-unstable-cobar} is exact.
By Lemma \ref{unstable-cotensor-lemma} $\alpha^{*}U_{\Sigma}(N) = U_{\Gamma}(N)$ so \ref{alpha-unstable-cobar} is a resolution of $\alpha^*N$ by models in the category of unstable $\Gamma$-comodules \[\alpha^{*}N \to U_{\Gamma}(N) \to U_{\Gamma}U_{\Sigma}^{}(N) \to U_{\Gamma}U_{\Sigma}^{2}(N) \to \dots ,\] and hence can be used to compute $\ext$ (see for example Theorem 2.3 of \cite{BCR}). Apply $\Hom_{\mathcal{U}_{\Gamma}}(A,\_)$ to get \[N \to U_{\Sigma}(N) \to U_{\Sigma}^{2}(N) \dots\] which is the $\Sigma$-cobar complex for $N$. This shows that \[ \ext_{\mathcal{U}_{\Gamma}}(A,\alpha^{*}N) \to \ext_{\mathcal{U}_{\Sigma}}(B,N)\] is an isomorphism. Apply this to the case $N=\alpha_*M$ to get the result. \end{proof}
\section{Morava $E$-theory}\label{Morava-$E$-theory}
This section is based on the work of Morava \cite{MOR}. We will closely follow the exposition of Devinatz \cite{DEV}. Let $W\mathbf{F}_{p^n}$ denote the Witt ring over $\mathbf{F}_{p^n}$, the complete local $p$-ring having $\mathbf{F}_{p^n}$ as its residue field. Let $\sigma$ denote the generator of the Galois group $\text{Gal} = \text{Gal}(\mathbf{F}_{p^n}/ \mathbf{F}_{p})$ which is cyclic of order $n$. Note that $\text{Gal}$ acts on $W\mathbf{F}_{p^n}$ by \[(\sum_{i} w_i p^i)^{\sigma} = \sum_{i} w_i^p p^i\] where the coefficients $w_i$ are multiplicative representatives.
Let $\Gamma_n$ be the height $n$ Honda formal group law over a field $k$ of characteristic $p$. The endomorphism ring of $\Gamma_n$ over $k=\mathbf{F}_{p^n}$, denoted $\End_n$, is known and is given by (see \cite{RA2}) \[\End_n = W\mathbf{F}_{p^n}\langle S \rangle /(S^n=p, Sw = w^{\sigma}S) .\] Here one can think of $S$ as a non-commuting indeterminant.
We will think of $\End_n$ as a topological monoid under multiplication. The submonoid consisting of invertible elements is the Morava stabilizer group $S_n = (\End_n)^{\times}$. Also, $\text{Gal}$ acts on $\End_n$ and hence on $S_n$, and we can form the semidirect product $G_n=S_n \rtimes \text{Gal}$, sometimes referred to as the extended stabilizer group (see for example \cite{DH04}).
Morava $E$-theory, also referred to as Lubin-Tate theory, is a Landweber exact homology theory represented by a spectrum denoted $E_n$ and corresponding to the Hopf algebroid
\[(E_{n*},E_{n*}E_{n}) = (E_{n*},E_{n*}\otimes_{BP_*} BP_*BP \otimes_{BP_*} E_{n*}) .\] The completion of this Hopf algebroid is \[(E_{n*},\text{Map}_c(G_n , Z_p)\hat{\otimes}_{Z_p} E_{n*})\] which we will talk about in the next section (Proposition 2.2 of \cite{DH04}). Here $\text{Map}_{c}$ refers to the set of continuous maps. The coefficient ring has the following description: \[E_{n*} = W\mathbf{F}_{p^n}[[u_1,\dots,u_{n-1}]][u,u^{-1}].\]
The ring $E_{n*}$ is graded by $|u_i| = 0$ and $|u| = -2$. There is a graded map of coefficients $BP_*\xrightarrow{\lambda} E_{n*}$ given by \begin{equation}\label{coefficient-map}\lambda(v_i) =
\begin{cases} u_iu^{1-p^i} & i<n \\ u^{1-p^n} & i = n \\ 0 & i> n.
\end{cases} \end{equation}
We have the Hopf algebroid associated to Morava $K$-theory $(K(n)_*, \Sigma(n))$, where $K(n)_* = \mathbf{F}_p[v_n,v_n^{-1}]$ and \[\Sigma(n) = K(n)_*\otimes_{BP_*} BP_*BP \otimes_{BP_*} K(n)_*\] (note that $\Sigma(n) \neq K(n)_*(K(n))$. See \cite{MR1}).
We consider the composite map of Hopf algebroids \begin{equation}\label{reduction} (B(n)_*, \Gamma_{B(n)_*}) \xrightarrow{} (K(n)_*, \Sigma(n) ) \xrightarrow{} ({E_{n}}_{*}/I_{n}, {E_{n}}_{*}E_{n}/I_{n})
\end{equation}
We wish to show these are all in the category $\mathcal{H}$.
First we need to establish a fact about the Hopf ring for $P(n)_* = BP_*/I_{n}$.
Again we are grateful for help from Martin Bendersky who suggested the use of the Boardman basis for $QBP_*(\underline{BP}_*)$ \cite{BOAR} and the use of Ravenel and Wilson's calculation of the Hopf ring for $P(n)$ in \cite{RW2}.
We start with the Hopf ring $BP_*(\underline{BP}_*)$ and tensor on the right and the left with $P(n)_*$. This gives an algebraically defined Hopf ring which corresponds to the factors described on page 3 of \cite{RW2} for which $a^I =1$. We further simplify by only considering the even dimensional spaces and the indecomposables. The result can be extended to the odd dimensional spaces and the primitives by standard arguments. Denote this object by $QP(n)^{*}_{*}$. Consider the Boardman basis for the indecomposables $QBP_*(\underline{BP}_*)$, which consists of monomials $v^Ih^{J'}$ and {\em excluding} all monomials of the form $v_{d_0}^{}v_{d_1}^p v_{d_2}^{p^2}\cdots v_{d_l}^{p^l} h_l$, $l\ge 0$, $d_{0} \le d_{1} \le \dots \le d_{l}$, and any monomial divisible by one of this form. By \cite{BOAR} this is a basis for $QBP_*(\underline{BP}_*)$ as a {\em right} $BP_*$-module.
\begin{theorem}\label{Boardman-basis-for-P(n)} The image of the Boardman basis in $QP(n)^{*}_{*}$ is a basis for $QP(n)^{*}_{*}$ as a free right $P(n)_*$-module. The monomials of the form $v^Ih^{J'}[v^K]$, where $v^I$ and $v^K$ are in $P(n)_*$ and $v^Ih^{J'}$ satisfies the Boardman condition above, give a basis for $QP(n)^{*}_{*}$ as an $F_p$-vector space. \end{theorem}
\begin{proof} We will follow the exposition and results of Boardman \cite{BOAR}, particularly pages 10-12. Let $w_k$ denote $[v_k]$, and let $W_n$ be the set of monomials in $\{w_n, w_{n+1}, \dots \}$. Similarly $V_n$ will denote monomials in $\{v_n,v_{n+1}, \dots \}$ and $H_m$ will be monomials in $\{h_m,h_{m+1}, \dots\}$. Boardman defines the Poincare series of a monomial $x\in BP_{2i}(\underline{BP}_{2n})$ by $P(x) = s^{i}t^{i-n}$. For example \[P(v_i)=s^{p^i-1}t^{p^i-1},\,\,\,P(h_j)=s^{p^j}t^{p^j-1},\,\,\,P(w_k)=t^{p^k-1},\,\,\,P(xy) = P(x)P(y)\] and for a family $S$, he defines $P(S)$ to be $\Sigma_{x\in S}P(x)$. For families $S$ and $T$, we have $P(ST)=P(S)P(T)$.
Boardman observes the formulas \[ P(V_k) = \Pi_{r=k}^{\infty} (1-P(v_r))^{-1};\quad P(H_m) = \Pi_{r=m}^{\infty} (1-P(h_r))^{-1};\quad\text{etc.} \] which are useful.
The family $A_{k,m}\subset V_kH_m$ is defined by excluding monomials of the form $v_{i_m}^{p^m}v_{i_{m+1}}^{p^{m+1}} \cdots v_{i_l}^{p^l} h_l$, $l \ge m$, $i_{m}\le i_{m+1} \le \dots \le i_{l}$, and any multiple of such. Boardman proves that the Poincare series satisfy \begin{equation}\label{boardmans-formula}P(A_{k,m}) = P(V_k)P(H_m)P(H_{k+m})^{-1}.\end{equation}
Note that $A_{n,0}$ is the image of the Boardman basis under the map $BP \to P(n)$, i.e. the image of the Boardman basis by modding out by $I_n$ on the left. So $A_{n,0} \subset V_{n}H_{0}$ and excludes monomials of the form $v_{i_0}^{}v_{i_{1}}^{p} \cdots v_{i_l}^{p^l} h_l$. Thus $P(A_{n,0}) = P(V_n)P(H_0)P(H_n)^{-1}$. It follows that the Poincare series for the free right $P(n)_*$ module on the image of the Boardman basis is given by $P(V_n)P(H_0)P(H_n)^{-1}P(W_n)$. We wish to compare this to the Ravenel-Wilson basis given in \cite{RW2}.
Define $R_{k,m} \subset H_kW_m$ by excluding monomials $h_{j_m}^{p^m}h_{j_{m+1}}^{p^{m+1}} \cdots h_{j_l}^{p^l} w_l$, $l\ge m$, $j_{m}\le j_{m+1} \le \dots \le j_{l}$. Note that $R_{0,n}$ is the Ravenel-Wilson basis of $n$-allowable monomials which exhibit $QP(n)^{*}_{*}$ as a free left $P(n)_*$-module.
We claim \begin{equation}\label{poincare-ravenel-wilson}P(R_{k,m}) = P(H_k)P(W_m)P(H_{k+m})^{-1}.\end{equation}
Granting this, the Poincare series for $QP(n)^{*}_{*}$ is given by \[P(V_n)P(R_{0,n}) = P(V_n)P(H_0)P(H_n)^{-1}P(W_n).\] This is the same as the Poincare series of the Boardman basis mod $I_n$. We know the Boardman basis spans $QP(n)^{*}_{*}$ because the map $BP\to P(n)$ is onto. Therefore the image of the Boardman basis is a basis for $QP(n)^{*}_{*}$ as a free right $P(n)_*$-module.
To prove the claim (\ref{poincare-ravenel-wilson}), we follow the very same argument given by Boardman in \cite{BOAR} to prove (\ref{boardmans-formula}). By the same process, decomposing $R_{k,m} \subset H_kW_m$ according to powers of $h_k$, we get a recurrence relation: \[P(R_{k,m}) = P(h_k)^{p^m}P(R_{k,m+1}) + \bigl(\Sigma_{r=0}^{p^m-1}P(h_k)^r\bigr)P(R_{k+1,m})\] and $P(R_{k,m})$ is the unique solution.
Normalizing, define \[f_{k,m} = P(R_{k,m})P(H_k)^{-1}P(W_m)^{-1}.\] We get \[f_{k,m} = P(h_k)^{p^m}f_{k,m+1}(1-P(w_m)) + \bigl(\Sigma_{r=0}^{p^m-1}P(h_k)^r\bigr)f_{k+1,m}(1-P(h_k))\] which becomes \[f_{k,m} = P(h_k)^{p^m}f_{k,m+1}(1-P(w_m)) + (1-P(h_k)^{p^m})f_{k+1,m}.\]
Now let $f_{k,m} = P(H_{k+m})^{-1}$. Substituting into the recurrence relation we get
\begin{multline*} P(H_{k+m})^{-1} = P(h_k)^{p^m}P(H_{k+m+1})^{-1}(1-P(w_m)) + (1-P(h_k)^{p^m})P(H_{k+m+1})^{-1}\\ \dfrac{P(H_{k+m})^{-1}}{P(H_{k+m+1})^{-1}} = P(h_k)^{p^m}(1-P(w_m)) + 1 - P(h_k)^{p^m} \\ 1-P(h_{k+m}) = 1 - P(h_k)^{p^m}P(w_m).
\end{multline*} This is true because $P(h_j) = s^{p^j}t^{p^j-1}$ and $P(w_j)=t^{p^j-1}$ by definition.
\end{proof}
\begin{proposition}\label{exactness-of-U-non-Landweber-exact-case}
The Hopf algebroids in \ref{reduction} are in the category $\mathcal{H}$.
\end{proposition}
\begin{proof} The flatness condition in Definition \ref{unstable-comodule-categories} is easy to check. The exactness condition is harder. Since all $K(n)_*$ and ${E_{n}}_{*}/I_{n}$-modules are free it is immediate that $U$ is exact in those cases. Now consider $(B,\Sigma) = (P(n)_*, BP_*BP/I_{n})$. The proof is essentially the same proof as in Proposition \ref{exactness-of-U-Landweber-exact-case}, using Theorem \ref{Boardman-basis-for-P(n)}, with the obvious modification of changing free $Z_{(p)}$-modules to $F_{p}$-vector spaces.
Finally, $B=B(n)_*$ is obtained from $P(n)_*$ by inverting $v_n$, and since direct limits preserve exactness the result follows for $B(n)_*$ as well.
\end{proof}
The following result is the first part of the proof of Theorem \ref{first-main-theorem}.
\begin{theorem}\label{first-part-of-first-main-theorem} Using the notation of Theorem \ref{first-main-theorem}, there is an isomorphism \[ \ext^{s}_{\mathcal{U}_{ \Gamma_{B} }}(B[t],M) \cong \ext^{s}_{\mathcal{U}_{ {E_{n}}_{*}E_{n}/I_{n} }}({E_{n}}_{*}/I_{n}[t],{E_{n}}_{*}/I_{n}\otimes_{B(n)_{*}} M ). \] \end{theorem}
\begin{proof} It is proved in \cite{HOVSA} using an observation of N. Strickland regarding a result of Lazard's (see Theorem 3.4 and the proof of Theorem 3.1 there) that the faithfully flat condition of Theorem \ref{unstable-faithflat-equiv} is satisfied for the first map in (\ref{reduction}). The second map \[K(n)_* \xrightarrow{} {E_{n}}_{*}/I_{n} \] is a faithfully flat extension, so again Theorem \ref{unstable-faithflat-equiv} applies.
\end{proof}
\noindent {\bf Remark}. In fact, this result can be generalized to an unstable version of Hovey-Sadofsky's change of rings theorem, since in \cite{HOVSA} they show that for $j \le n$ the map
\begin{equation} (B(j)_*, \Gamma_{B(j)_*}) \xrightarrow{} (v_j^{-1}{E({n})}_{*}/I_{j}, v_j^{-1}{E({n})}_{*}E({n})/I_{j}), \end{equation} satisfies the conditions of Theorem of \ref{unstable-faithflat-equiv}, but we will not use that in this paper.
\section{More on Unstable Comodules}
Now we give the description of unstable comodules in Morava $E$-theory that we are after. Start by recalling from \cite{DEV} that there is a Hopf algebroid $(U,US)$ which is equivalent to $(BP_*,BP_*BP)$ and lies between $(BP_*,BP_*BP)$ and $(E_{n*}, E_{n*}E_n)$.
Let $FGL_{p}$ be the groupoid valued functor on graded $p$-local algebras which assigns to an algebra $A$ the groupoid $FGL_{p}(A)$ whose objects are $p$-typical formal group laws over $A$, and whose morphisms are strict isomorphisms (\cite{RA2}). Let ${FGL_{p}}_{*}$ be the groupoid valued functor on graded $p$-local algebras which assigns to an algebra $A$ the groupoid ${FGL_{p}}_{*}(A)$ whose objects are pairs $(F,a)$ where $F$ is a $p$-typical formal group law over $A$, $a\in A^{\times}$, and a morphism $f:(F,a) \to (G,b)$ is an isomorphism from $F$ to $G$ with $a = f'(0)b$. If $F$ is a $p$-typical formal group law over $A$, and $a$ is a unit in $A$, define a formal group law $F^{a}$ by $F^{a}(x,y) = a^{-1}F(ax,ay)$.
Define graded algebras, \[U=\mathbf{Z}_{(p)}[u_1,u_2,\dots][u,u^{-1}]\] and \[US = U[s_0^{\pm 1},s_1,s_2,\dots]\]
with $|u_i| = 0$, for $i\ge 1$, $|s_i| = 0$ for $i \ge 0$, and $|u|= -2$.
There is a Hopf algebroid structure on $(U,US)$. There are isomorphisms of groupoid schemes \begin{align*} \theta:(\spec BP_*,\spec BP_*BP) &\to FGL_{p}^{\text{opp}}\\ \theta_{*}:(\spec U,\spec US) &\to {FGL_{p}}_{*}^{\text{opp}} \end{align*} and a natural transformation \[(\spec U , \spec US ) \xrightarrow{\spec \lambda} (\spec BP_* , \spec BP_*BP ),\]
which sends $(F,a)$ to the formal group law $F^{a^{-1}}$. This natural transformation is represented by the graded Hopf algebroid map
\[(BP_*, BP_*BP) \xrightarrow{\lambda} (U,US)\]
given by \begin{align*} \lambda(v_i) &= u_i u^{-(p^i-1)} \\ \lambda(t_i) &= s_i u^{-(p^i-1)} s_0^{-p^i}\ \end{align*} See \cite{DEV} for more details and proofs of these assertions.
The map ${\lambda}$ is a faithfully flat extension of coefficient rings, hence by Hopkins' theorem induces an equivalence of comodule categories. We want to identify the unstable comodule category. Unstably it is preferable to use the generators for $BP_*BP$ given by $h_i = c(t_i)$ where $c$ is the canonical antiautomorphism. In $US$ define $c_i= c(s_i)$ and note that \[c_0 = c(s_0) = s_0^{-1}\] and \begin{equation*} \eta_{R}(u) = c(\eta_{L}(u)) = s_0u = c_0^{-1} u. \end{equation*}
Morava $E$-theory is obtained from $(U,US)$ by killing off $u_i$ for $i>n$, setting $u_n=1$, completing with respect to the ideal $I_n=(p,u_1,u_2,\dots, u_{n-1})$, and tensoring with the Witt ring $W\mathbf{F}_{p^n}$. We have \[(E_{n*},E_{n*}E_n) = (E_{n*},E_{n*}[c_0^{\pm 1},c_1,\dots]{\otimes}_{U} E_{n*}).\] If we reduce modulo $I_n$ then \begin{multline*}
(E_{n*}/I_{n},E_{n*}E_n/I_{n}) \\
= (\mathbf{F}_{p^n}[u,u^{-1}], \mathbf{F}_{p^n}[u,u^{-1}][c_0^{\pm 1},c_1,\dots] {\otimes}_{U} E_{n*} ). \end{multline*}
Applying the canonical anti-automorphism $c$ to the map $\lambda$ we get
\begin{align*}
\lambda(h_i) &= c_i (s_0u)^{-(p^i-1)}c(s_0)^{-p^i} \\
&= c_i s_0^{-(p^i-1)}u^{-(p^i-1)}s_0^{p^i} \\
&= c_i u^{-(p^i-1)} s_0 \\
&= c_i u^{-(p^i-1)} c_0^{-1} \end{align*}
Let $K=(k_1,k_2,\dots)$ be a finite sequence of non-negative integers and denote $h_{1}^{k_1}h_{2}^{k_2}\dots$ by $h^K$ and similiarly $c_{1}^{k_1}c_{2}^{k_2}\dots$ by $c^K$. Also denote \[|K|=k_{1}(p-1)+k_{2}(p^2-1) + \dots\] and \[l(K) = k_1 + k_2 + \dots.\] Then we have
\[\lambda(h^K) = c^K u^{-|K|}c_0^{-l(K)}.\]
If $M$ is a $(BP_*,BP_*BP)$-comodule with coaction \[M \xrightarrow{\psi} BP_*BP \otimes_{BP_*} M\] then for each $x\in M$ we have \[ \psi(x) = \sum_K v_{K}h^{K} \otimes m_{K}\] where the sum is indexed over sequences $K$. The coefficient $v_{K}$ is just some element in $BP_*$. For each term in the sum we make the following calculation. Assume $m_{K}$ is even.
\begin{align*}
\label{linchpin}\lambda(v_{K}h^{K}) \otimes m_{K} &= u_{K}u^{-|v_K|/2} c^{K}u^{-|K|}c_0^{-l(K)} \otimes m_{K}u^{|m_{K}|/2} u^{-|m_{K}|/2} \\
&= u_{K}u^{-|v_K|/2} c^{K}u^{-|K|}c_0^{-l(K)}\eta_{R}(u^{-|m_{K}|/2}) \otimes m_{K}u^{|m_{K}|/2} \\
&= u_{K}u^{-|v_K|/2} c^{K}u^{-|K|}c_0^{-l(K)}(s_0u)^{-|m_{K}|/2} \otimes m_{K}u^{|m_{K}|/2} \\
&= u_{K}u^{(-|v_K|/2-|m_{K}|/2-|K|)} c^{K}c_0^{-l(K) +|m_{K}|/2} \otimes m_{K}u^{|m_{K}|/2}\\
&= u_{K}u^{(-|v_K|/2-|m_{K}|/2-|K|)} c^{K}c_0^{-l(K) +|m_{K}|/2}\otimes y \end{align*}
where $|y|=0$. In the case where $|m_k|$ is odd, multiply and divide on the right by $u^{(|m_{K}|-1)/2}$ resulting in $y$ on the right with $|y|=1$. This motivates the following definition.
\begin{definition}\label{non-negative-comodule} Let $(A,\Gamma)$ denote either the Hopf algebroid $(U,US)$ or
$(E_{n*}, E_{n*}E_n)$. Suppose $M$ is a free $A$-module. Define $V_{\ge 0}(M)$ to be the sub-$A$-module of $\Gamma \otimes_{A} M$ spanned by elements of the form $\gamma \otimes y$ where $y$ is in degree $0$ or $1$, and $\gamma = c_0^{k_0}c^K$ with $k_0 \ge 0$.
Then $V_{\ge 0}$ defines an endofunctor on the category of free $A$-modules. Extend this to an endofunctor on all $A$-modules as in Definition \ref{extend-U}. Now $V_{\ge 0}$ is the functor of a comonad on $A$-modules. Call the coalgebras over $V_{\ge 0}$ the non-negative comodules.
\end{definition}
Recall the category of unstable comodules $\mathcal{V}_{\Gamma}$ defined in \ref{unstable-comodule-categories-Landweber-exact}. \begin{proposition} \label{non-negative-comodules} The categories $\mathcal{V}_{BP_*BP}$ and $\mathcal{V}_{US}$ are both equivalent to the category of non-negative $US$-comodules. The category $\mathcal{V}_{E_{n*}E_n}$ is equivalent to the category of non-negative $E_{n*}E_n$-comodules. \end{proposition}
\begin{proof} For $BP$, by definition a $BP_*BP$-comodule that is free as a $BP_*$-module is unstable if the coaction on each element is in the $BP_*$-span of elements of the form $h^K\otimes m_K$ where
$2l(K) \le |m_K|$. For $(U,US)$ and $E_{n*}(E_n)$ the same condition applies, using the generators $b_i = c_i u^{-(p^i-1)} c_0^{-1}$ which are the images under $\lambda$ of $h_i$. (Refer to \cite{BCM} or \cite{BH}.) The calculation above shows that $V_{\Gamma}(M) = V_{\ge 0}(M)$ for every $M$ that is free as an $A$-module. Since this is true on free modules, it is true on all $A$-modules, and the conclusion for $(A,\Gamma)$ follows. Also, $\mathcal{V}_{BP_*BP}$ is equivalent to $\mathcal{V}_{US}$ by Theorem \ref{unstable-faithflat-equiv} because the map $BP_*\to U$ is faithfully flat. \end{proof}
\noindent {\bf Remark}. This is an analog for height $n$ of a height $\infty$ result which is described in \cite{POW} (Theorem 4.1.4) and \cite{BJ} (Section 4 and Appendix B). It is classical that the dual Steenrod algebra is a group scheme which represents the automorphism group of the additive formal group law. If one considers endomorphisms of the additive formal group law, not necessarily invertible, the representing object of this monoid scheme is a bialgebra, i.e. a 'Hopf algebra without an antiautomorphism'. At the prime $2$ this is described explicitly in \cite{BJ} (see Section 4 and Appendix B). Whereas the classical dual Steenrod is expressed as $\mathcal{S}_* = \mathbf{Z}/2[\xi_1,\xi_2,\dots]$, the extended Milnor coalgebra is $\mathcal{A} = \mathbf{Z}/2[a_{0}^{\pm 1},a_1,a_2,\dots]$. One can see that there is an equivalence between the category of graded comodules over $\mathcal{S}_*$ and the category of comodules over $\mathcal{A}$. One can also see that under this equivalence, the category of graded unstable comodules over $\mathcal{S}_*$ is equivalent to the category of 'positive' $\mathcal{A}$-comodules, i.e. comodules over the bialgebra $\mathcal{A}^{+} = \mathbf{Z}/2[a_{0},a_1,a_2,\dots]$. In \cite{POW} this result is extended to odd primes and generalized. Our Proposition \ref{non-negative-comodules} is a version for the Landweber-Novikov algebra. This goes back to \cite{MOR}.
We want to translate this to the $\mathbf{Z}/2$-graded case. First consider what happens stably. Let $M$ be an $E_{n*}E_{n}$-comodule, and for simplicity assume for the moment $M$ is concentrated in even degrees.
Let $((E_{n})_{0},(E_{n})_0(E_n))$ be the Hopf Algebroid of elements in degree $0$. There is functor $M \mapsto M_{0}$ from the category of $E_{n*}E_{n}$-comodules to the category of $(E_{n})_0E_{n}$-comodules defined as follows: As an $(E_{n})_0$-module let $M_{0}$ be the elements in $M$ of degree $0$. For $x\in M_{0}$ suppose the $E_{n*}E_{n}$-coproduct is given by $\psi_{M}(x) = \sum \gamma_i \otimes x_i$ and define the ungraded coproduct by
\[ \psi_{M_{0}}(x) = \sum \gamma_i \eta_{R}(u^{-|x_i|/2})\otimes u^{|x_i|/2}x_i.\] This defines an equivalence of categories from $E_{n*}E_{n}$-comodules to $(E_{n})_{0}E_{n}$-comodules and an isomorphism of Ext groups \[ \ext^{s,2t}_{E_{n*}E_{n}}(A,M) \xrightarrow{\cong} \ext^{s,0}_{E_{n*}E_{n}}(A[2t],M) \xrightarrow{\cong} \ext^{s}_{(E_{n})_0(E_{n})}(A[2t]_0, M_{0}).\]
There is an analogous statement for comodules with elements in odd degrees. Combining the two cases we get the functor $M \to M_0 \oplus M_1$ to $\mathbf{Z}/2$-graded $(E_{n})_{0}E_{n}$-comodules.
Looking at $\mathbf{Z}/2$-graded, unstable comodules over the Hopf algebra \begin{equation*}
((E_{n})_{0}/I_{n},(E_{n})_{0}E_n/I_{n})
= (\mathbf{F}_{p^n}, \mathbf{F}_{p^n}[c_0^{\pm 1},c_1,\dots]/ (c_i^{p^{n}}-c_i) ), \end{equation*} denote this Hopf algebra by $(B_0,\Sigma_0)$. The calculation above suggests that we consider the algebra $\mathbf{F}_{p^n}[c_0,c_1,\dots]/ (c_i^{p^{n}}-c_i)$. The coproduct preserves the non-negativity of the exponent of $c_0$ and so this is a bialgebra. It is not a Hopf algebra as there is no anti-automorphism. Soon we'll identify this bialgebra explicitly as a co-monoid object in the category of algebras.
\noindent {\bf Remark}. The map \[ \mathbf{F}_{p^n}[c_0,c_1,\dots]/ (c_i^{p^{n}}-c_i) \xrightarrow{\sigma'} \mathbf{F}_{p^n}[c_0^{\pm 1},c_1,\dots]/ (c_i^{p^{n}}-c_i), \] which corresponds to suspension, is not an injection because $c_{0}^{p^n-1} -1$ is in the kernel. For example, consider the case $n=1$. The module $U_{E(1)_*E(1)}(E(1)_*(S^1)/p)$ contains the element $1 \otimes v_1\iota_1 - v_1\otimes \iota_1$ which is non-zero even though $\eta_R(v_1) = v_1 - ph_1$ because $h_1$ doesn't live on the $1$-sphere. However this element suspends to zero in $E(1)_*E(1)\otimes_{E(1)_*} E(1)_*(S^1)/p$. There is a map \[ U_{E(1)_*E(1)}(E(1)_*(S^1)/p) \xrightarrow{} \mathbf{F}_{p}[c_0,c_1,\dots]/ (c_i^{p}-c_i)\] (see Proposition \ref{bialgebra-comonad} below) and this element goes to $c_{0}^{p-1} -1$.
Let $\Sigma = {E_{n}}_{*}E_n/I_{n}$ and let $N$ denote an unstable $\Sigma$-comodule.
\begin{proposition}\label{bialgebra-comonad} There are isomorphisms \begin{align*} \ext^{s}_{\mathcal{V}_{ \Sigma }}&({E_{n}}_{*}/I_{n}[t], N ) \\ & \cong \ext^{s}_{ \mathcal{V}_{ \Sigma_{0} } } ( (E_{n})_{0}/I_{n}[t] \oplus
(E_{n})_{1}/I_{n}[t] , N_{0}\oplus N_{1} ) \\ &{\cong}
\ext^{s}_{ \mathbf{F}_{p^n}[c_0,c_1,\dots]/ (c_i^{p^{n}}-c_i) }( (E_{n})_{0}/I_{n}[t] \oplus
(E_{n})_{1}/I_{n}[t] ,N_{0}\oplus N_{1}).
\end{align*}
\end{proposition}
\begin{proof} We need to show that the comonad ${V}_{\Sigma_{0}}$ on the category of $\mathbf{Z}/2$-graded $\mathbf{F}_{p^n}$-modules is isomorphic to the comonad given by tensoring with the bialgebra $\mathbf{F}_{p^n}[c_0,c_1,\dots]/ (c_i^{p^{n}}-c_i)$.
It suffices to consider an $\mathbf{F}_{p^n}$-module $N_0$ which has rank one and is assumed to be in degree zero. The degree one case is similar. Recall $V_{\Sigma_0}(N_0)$ is defined as a quotient of $V_{({E_n})_{0}E}(M)$, where $M$ is a free
${(E_n)}_0$-module which maps onto $N_0$, as depicted in the following diagram. We abbreviate $((E_n)_{0},(E_n)_{0}E_n)$ to $(A_{0},\Gamma_0)$ and $\mathbf{F}_{p^n}[c_0,c_1,\dots]/ (c_i^{p^{n}}-c_i)$ to
$\mathbf{F}_{p^n}[c_0,c_1,\dots]/{\sim}$.
\begin{center} \begin{tikzpicture} \node (a) at (0,0) {$\Gamma_0 \otimes_{A_0} M$}; \node (b) at (3,0) {$\Gamma_0 \otimes_{A_0} N_0$}; \node (c) at (6,0) {$\Sigma_0 \otimes_{A_0} N_0$}; \node (d) at (10,0) {$\mathbf{F}_{p^n}[c_0^{\pm 1},c_1,\dots]/{\sim}\otimes N_0$}; \node (e) at (0,-2) {$V_{\Gamma_0}(M)$}; \node (f) at (3,-2) {$V_{\Gamma_0}(N_0)$}; \node (g) at (6,-2) {$B_{0}\otimes_{A_0} V_{\Gamma_0}(N_0) $}; \node (h) at (10,-2) {$\mathbf{F}_{p^n}[c_0,c_1,\dots]/{\sim}\otimes N_0$}; \path[->,font=\scriptsize,>=angle 90] (a) edge node[above] {} (b) (b) edge node[above] {$=$} (c) (c) edge node[above] {$=$} (d) (e) edge node[above] {} (f) (f) edge node[above] {} (g) (g) edge node[above] {$\pi$} (h); \path[right hook->,font=\scriptsize,>=angle 90] (e) edge node[above] {} (a); \path[->,font=\scriptsize,>=angle 90] (f) edge node[above] {} (b) (g) edge node[left] {$\sigma$} (c) (h) edge node[left] {$\sigma'$} (d); \node (k) at (6,-4) {$ V_{\Sigma_0}(N_0) $}; \path[->,font=\scriptsize,>=angle 90] (k) edge node[left] {$=$} (g); \end{tikzpicture} \end{center}
The leftmost vertical map is an injection by definition, since $M$ is a free $A_{0}$-module. The middle vertical maps are not injections because $N_0$ has torsion. The top middle horizontal map is an isomorphism because stably $I_n$ is an invariant ideal. By Proposition \ref{non-negative-comodules} the stabilization map \[B_{0}\otimes_{A_0} V_{\Gamma_0}(N_0) \xrightarrow{\sigma} \mathbf{F}_{p^n}[c_0^{\pm 1},c_1,\dots]/{\sim}\otimes N_0\] factors through a surjective map $\pi$. We need to show that $\pi$ is injective. It is sufficient to show that $\pi\vert_{\ker{\sigma}}:\ker{\sigma} \to \ker{\sigma'}$ is injective. The kernel of $\sigma'$ has an $\mathbf{F}_{p^n}$-vector space basis consisting of the set of monomials $\mathcal{B}' = \{(c_{0}^{p^n-1} - 1)c^K\}$ indexed by finite non-negative sequences $K=(k_1,k_2,\dots)$ satsifying $k_i < p^n$. The corresponding set of elements $\mathcal{B} = \{(c_{0}^{p^n-1}-1)c^K\}$ in $V_{\Gamma_0}(M)$ maps bijectively to $\mathcal{B}'$, and so the vector space span of the image of $\mathcal{B}$ in $V_{\Sigma_0}(N_0)$, call it $S$, is mapped isomorphically to $\ker{\sigma'}$. We just need to check that $S$ is all of $\ker{\sigma}$. If $x\in \ker{\sigma}$, write $x$ as the image of an element $y\in V_{\Gamma_0}(M)$. We can assume that $y$ is a polynomial in the $c_i$'s with coefficients in the Witt ring since any terms containing elements in $I_n$ will map to zero in $V_{\Sigma_0}(N_0)$. Furthermore we can take the exponent of $c_0$ to be non-negative since $y$ satisfies the unstable condition. Finally, we claim that all the exponents of the $c_i$'s in $y$ can be taken to satisfy $k_i < p^n$, because $c_i^{p^n} = c_i$ holds in $V_{\Sigma_0}(N_0)$. Then, since by assumption the image of $y$ in $\Sigma_0 \otimes_{A_0} N_0$ is zero and hence is in the span of $\mathcal{B'}$, this implies that $x \in S$.
In order to verify the claim, we examine the relation $c_{i}^{p^n} = c_i$ which comes from Ravenel's formula (A2.2.5 of \cite{RA2}):
\begin{equation*}
\Sigma^{F^*} h_{k}v_{j}^{p^k} = \Sigma^{F^*} h_{k}^{p^j}\eta_{R}(v_j).
\end{equation*}
This version of the formula is obtained by applying the Hopf Algebroid anti-automorphism to the formula in \cite{RA2}.
The coefficients of the formal sum are polynomials in the $\eta_{R}(v)$'s. We are interested in the relation $r$ which is given by the
terms in dimension $|v_{n+i}|$. It is readily verified by the unstable condition of Definition \ref{original-definition} that $r\otimes m$ is defined in $V$ when $|m|=2$. This observation was made by Bendersky and is used in much of his work.
Recall that we are thinking of $N_0$ as the component in degree $0$ of a graded $\Sigma$-comodule $N$. Let $\iota$ denote an $F_{p^n}$-module generator of $N_0$. Then $u^{-1}\iota$ is a generator in dimension $2$. We get \begin{align*} &(v_{n+ i} + h_1v_{n + i -1}^{p} +h_{2}v_{n+i-2}^{p^2} + \dots + h_{n+i}p^{p^{n+i}}) \otimes u^{-1}\iota\\ =&(\eta_{R}(v_{n+i})+h_{1}^{p^{n+i-1}}\eta_{R}(v_{n+i-1})+\dots +h_{n+i}p) \otimes u^{-1}\iota\\
&+(-(\Sigma^{F^*}_{k+j < n+i}h_{k}v_{j}^{p^k})_{|v_{n+i}|} +(\Sigma^{F^*}_{k+j < n+i}h_{k}^{p^j}\eta_{R}(v_{j}))_{|v_{n+i}|}) \otimes u^{-1}\iota \end{align*}
The subscript $|v_{n+i}|$ in the third set refers to the terms in the sum that are in the appropriate dimension.
First consider $i=0$. In $V_{\Sigma}(N)$ we have $v_{n}\otimes u^{-1}\iota = 1\otimes v_{n}u^{-1}\iota$. Then, using $v_n = u^{-(p^n-1)}$ and $\eta_{R}(u^{-1}) = u^{-1}c_0$, we get \begin{align*} u^{-(p^n-1)}\otimes u^{-1}\iota &= 1\otimes u^{-(p^n-1)}u^{-1}\iota\\ u^{-(p^n-1)}\eta_{R}(u^{-1})\otimes \iota &= \eta_{R}(u^{-p^n})\otimes \iota\\ u^{-p^n}c_0 \otimes \iota &= u^{-p^n}c_0^{p^n} \otimes \iota. \end{align*} Since multiplication by $u$ is an isomorphism $N_{i} \cong N_{i-2}$ we conclude
$c_0^{p^n} = c_0$ holds in $V_{\Sigma_0}(N_0)$.
Now suppose $i>0$. By 6.1.13 of \cite{RA2} in $V_{\Sigma}(N)$ we get \[h_{i}v_{n}^{p^i}\otimes u^{-1}\iota = h_i^{p^n}\eta_{R}(v_n) \otimes u^{-1}\iota.\] Substituting $c_iu^{-(p^i-1)}c_0^{-1}$ for $h_i$, and $u^{-(p^n-1)}$ for $v_n$, we get $c_i^{p^n} \otimes \iota = c_i \otimes \iota$, and we conclude that $c_i^{p^n} = c_i$ holds in $V_{\Sigma_0}(N_0)$.
\end{proof}
The next step is to interpret an unstable $(E_{n})_0(E_n)$-comodule in terms of a continuous action of the monoid $\End_n$. The Galois group $\Gal$ acts on $E_{n*}$ by acting on the Witt ring and we have \break $E_{n*}^{\Gal} = \mathbf{Z}_p[[u_1,\dots,u_{n-1}]][u,u^{-1}]$. (In \cite{DEV} this ring is denoted $E_{*}\hat{}$.) According to Morava theory, after completing, there is an isomorphism of Hopf algebroids (Theorem 2.1 of \cite{DH04})
\begin{equation}\label{mapping-space} (E_{n*}^{\Gal} , (E_{n*}^{\Gal} E_{n}^{\Gal} )_{I_n}^{\hat{}})\cong (E_{n*}^{\Gal} ,\text{Map}_c(S_n , W\mathbf{F}_{p^n})^{\Gal}\hat{\otimes}_{\mathbf{Z}_p} E_{n*}^{\Gal} ). \end{equation} The category of graded, complete comodules over this Hopf algebroid is equivalent to the category of continuous, filtered, Galois equivariant twisted $S_n-E_{n*}$ modules. See \cite{DEV}, \cite{DH04} for details.
Mod $I_n$, in degree zero, the Hopf algebroid of equation \ref{mapping-space} becomes \[\label{stable-mod-p-mapping-space}(\mathbf{F}_{p},\text{Map}_c(S_n , \mathbf{F}_{p^n})^{\Gal}) =(\mathbf{F}_{p},\mathbf{F}_{p}[c_0^{\pm 1},c_1,\dots]/(c_i^{p^n}-c_i)).\]
The explicit description of the group scheme of automorphisms of $\Gamma_n$ over an $\mathbf{F}_{p}$-algebra $k$ is as follows. Let $D=\mathbf{F}_{p}[c_0^{\pm 1},c_1,\dots]/(c_i^{p^n}-c_i)$. In \cite{RA2} it is shown that every endomorphism of $\Gamma_n$, i.e. a power series $f$ satisfying \[f(\Gamma_n(x,y)) = \Gamma_n(f(x),f(y)),\] has the form \[f(x) = \sum_{i\ge 0}{}^{\Gamma_n} a_i x^{p^{i}}, \quad a_i\in k \] and this will be an automorphism if and only if $a_0 \in k^{\times}$.
For a ring map $h:D\to k$ let $h$ give the automorphism \[f(x) = \sum_{i\ge 0}{}^{\Gamma_n} h(c_i) x^{p^{i}}.\] If we do not require the coefficient of $x$ to be a unit, then it is apparent that $\spec(\mathbf{F}_{p}[c_0,c_1,\dots]/(c_i^{p^n}-c_i))$ is the monoid scheme whose value on $k$ is the monoid of endomorphisms of $\Gamma_n$ over $k$.
\begin{proposition}\label{unstable-mod-p-mapping-space} There is an isomorphism of bialgebras \[ (\mathbf{F}_{p},\Map_c(\End_n , \mathbf{F}_{p^n})^{\Gal}) =(\mathbf{F}_{p},\mathbf{F}_{p}[c_0,c_1,\dots]/(c_i^{p^n}-c_i)). \] \end{proposition}
\begin{proof} The proof given in Section four of \cite{DEV} applies to $\End_n$ as well. In particular equation (4.14) of \cite{DEV} establishes the result one generator at a time. \end{proof}
So we are studying discrete left comodules over the discrete bialgebra \break $\Map_c(\End_n , \mathbf{F}_{p^n})^{\Gal}$. Still following \cite{DEV}, given a left comodule $M$ with coaction \[ M \xrightarrow{\psi_M} \Map_c(\End_n , \mathbf{F}_{p^n})^{\Gal} {\otimes} M \cong \Map_c(\End_n , \mathbf{F}_{p^n} \otimes M )^{\Gal} \] define a right action of $\End_n$ on $\mathbf{F}_{p^n} \otimes M$ by \[ (a\otimes m)g = a\psi_M(m)(g).\] Note that this is a right action.
\begin{proposition} The functor $M \to \mathbf{F}_{p^n} \otimes M$ is an equivalence from the category of $\mathbf{Z}/2$-graded discrete $\Map_c(\End_n , \mathbf{F}_{p^n})^{\Gal}$-comodules to the category of discrete continuous Galois equivariant right $\End_n$-modules. \end{proposition}
\begin{proof} This is the $\mod I_n$, $\End_n$-analog of Proposition 5.3 of \cite{DEV}, and the same proof applies. The $\End_n$-modules we are considering are modules over $\mathbf{F}_{p^n}$. \end{proof}
Now we complete the proof of Theorem \ref{first-main-theorem}. \begin{proof} Since the $\Gamma$-comodule $M$ is concentrated in odd degrees, \[\ext^{s}_{\mathcal{V}_{\Gamma_B}}(B[t], M) \cong \ext^{s}_{\mathcal{U}_{\Gamma_B}}(B[t], M).\]
By Propositions \ref{bialgebra-comonad} and \ref{unstable-mod-p-mapping-space} it remains to prove that \begin{multline*} \ext^{s}_{\Map_c(\End_n , \mathbf{F}_{p^n})^{\Gal}}((E_{n})_{1}[t]/I_{n} , M) \\ \cong \ext^{s}_{\End_n}( (E_{n})_{1}[t]/I_{n} , \mathbf{F}_{p^n} \otimes M )^{\Gal}. \end{multline*} for a $(\mathbf{F}_{p},\Map_c(\End_n , \mathbf{F}_{p^n})^{\Gal})$-comodule. The group on the right is $\Gal$-equivariant continuous Ext over the monoid $\End_n$.
Again the proof is an adaptation of the proof given in \cite{DEV}. The cohomology of $\End_n$ with coefficients in a right module $N$ can be defined by the cochain complex \[ C^k(\End_n;N) = \Map_c(\End_n \times \dots \times \End_n,N)\] with differential \begin{equation*} \begin{split} df(g_1,\dots, g_{k+1}) &= f(g_2, \dots, g_{k+1}) \\
& \quad + \sum_{j=1}^{k}(-1)^{j}(g_1,\dots,g_jg_{j+1},\dots, g_{k+1}) \\
& \quad + (-1)^{k+1}f(g_1,\dots,g_k)g_{k+1}. \end{split} \end{equation*} The cobar complex for $\Map_c(\End_n , \mathbf{F}_{p^n})^{\Gal}$ is isomorphic to $C^*(\End_n;N)^{\Gal}$, the only difference from \cite{DEV} being that we are interpreting the action as a right action. \end{proof}
\section{Cohomological Dimension}\label{cohomological-dimension}
The purpose of this section is to prove the following proposition, which implies Theorem \ref{second-main-theorem} of the introduction.
\begin{proposition}\label{morava-vanishing-theorem} Suppose $M$ be a continuous Galois equivariant \break \mbox{$End_n-(E_{n})_0/I_n$}-module, concentrated in degree $1$. Suppose $(p-1) \nmid n$. Then $ \ext^{s}_{\End_n}((E_{n})_{1}[t]/I_{n},M)^{\Gal} = 0$ for $s > n^2+1$. \end{proposition}
To begin, note that \[ (E_{n})_{1}[t]/I_{n} = \begin{cases} \mathbf{F}_{p^n} \,\, \text{if $t$ is odd} \\ 0 \,\, \text{if $t$ is even} \end{cases}. \] The action of $\End_n$ depends on $t$. Since \[\ext^{s}_{\End_n}(\mathbf{F}_{p^n},M)^{\Gal} \subset \ext^{s}_{\End_n}(\mathbf{F}_{p^n},M)\] (see \cite{DEV}), for purposes of studying the vanishing line we can disregard the action of the Galois group.
The proof will be based on a construction used by Bousfield - see for example \cite{BO7}, Subsection 3.1. Here we carry out a version for the stabilizer group. To begin, recall from \cite{BO7} that if $E$ is a monoid which possesses an 'absorbing element' $0$, in other words an element such that $0e = e0 = 0$ for all $e\in E$, then any $E$-module $M$ has a decomposition \[ M = M_{\text{red}} \oplus M_{\text{fix}}\] where $M_{\text{red}} = \{ x\in M \,\vert\, x0 =0 \}$ and $M_{\text{fix}} = \{ x\in M \,\vert\, x0 = x \}$. Call a module {\em reduced} if $M = M_{\text{red}}$, and {\em trivial} if $M = M_{\text{fix}}$. Notice that $M_{\text{fix}} = \{ x\in M \,\vert\, xe = x \, \forall e \in E \}$. Let $\mathcal{E}^{\text{dis}}$ denote the category consisting of discrete $\mathbf{F}_{p^n}$-modules with a continuous right action of $\End_n$, and let $\mathcal{E}^{\text{dis}}_{\text{red}}$ denote the full subcategory of reduced modules. Because the functor $M \mapsto M_{\text{red}}$ is right adjoint to the forgetful functor, and the forgetful functor takes monomorphisms to monomorphisms, it follows that if $I$ is injective in $\mathcal{E}^{\text{dis}}$, then $I_{\text{red}}$ is injective in $\mathcal{E}^{\text{dis}}_{\text{red}}$.
The monoid $E=\End_n$ has an absorbing element. If $\mathbf{F}_{p^n}$ is trivial then $\ext^{s}_{\End_n}( \mathbf{F}_{p^n} ,M) = 0$ for $s>0$ because \[\Hom_{\End_n}( \mathbf{F}_{p^n} ,\,\,\,) = (\,\,\,)_{\text{fix}}\] is an exact functor. So we can assume that $\mathbf{F}_{p^n}$ is reduced, from which it follows that \[\ext^{s}_{\mathcal{E}^{\text{dis}}}( \mathbf{F}_{p^n} ,M) =\ext^{s}_{\mathcal{E}^{\text{dis}}_{\text{red}}}( \mathbf{F}_{p^n} ,M_{\text{red}}).\] So we can assume without loss of generality that all of our $\End_n$-modules are reduced.
So far we have been considering Ext groups in the category of discrete $\mathbf{F}_{p^n}$-modules with a continuous action of $\End_n$. The following construction will require us to work in the category of $p$-profinite $\mathbf{F}_{p^n}$-modules with a continuous action of $\End_n$. Pontryagin duality implies that these two categories of $\End_n$-modules are equivalent. Note that Pontryagin duality takes $\mathbf{F}_{p^n}$-modules to $\mathbf{F}_{p^n}$-modules, right $\End_n$-modules to left $\End_n$-modules, and reduced modules to reduced modules.
Let $\mathcal{A}$ denote the category consisting of $p$-profinite $\mathbf{F}_{p^n}$-modules with a continuous left action of $S_n$. Let $\mathcal{E}$ denote the category consisting of $p$-profinite $\mathbf{F}_{p^n}$-modules with a continuous left action of $\End_n$, and let $\mathcal{E}_{\text{red}}$ denote the full subcategory of $\mathcal{E}$ consisting of reduced modules. There is an obvious forgetful functor $J:\mathcal{E} \to \mathcal{A}$.
\begin{definition} We define a functor $\tilde{F}:\mathcal{A} \to \mathcal{E}$ as follows: For an $S_n$-module $M$, let $\tilde{F}(M)$ be $M\times M \times M \dots$ as an abelian group. For $g\in S_n$, $x=(x_1,x_2,\dots) \in \tilde{F}(M)$, define \[gx = (gx_1, g^{\sigma^{-1}}x_2,g^{\sigma^{-2}}x_3,\dots,g^{\sigma^{-(n-1)}}x_n,gx_{n+1}\dots).\] For the element $S\in \End_n$, define $Sx = (0,x_1,x_2,\dots)$. This defines a continuous $\End_n$ action on $\tilde{F}(M)$ as one can readily check the relation $Sgx = g^{\sigma}Sx$. \end{definition}
\begin{proposition}\label{Sn-End-adjunction} The functor $\tilde{F}$ takes values in $\mathcal{E}_{\text{red}}$, and is left adjoint to $J$ restricted to $\mathcal{E}_{\text{red}}$. \end{proposition}
\begin{proof} The unit of the adjunction $M\to J\tilde{F}(M)$ is given by $x\mapsto (x,0,0,\dots)$. The counit of the adjunction $\tilde{F}J(N) \to N$ is given by \[(x_1,x_2,x_3,\dots) \mapsto x_1 + Sx_2 + S^2x_3 + \dots.\] which converges because $N$ is reduced. \end{proof}
Proposition \ref{Sn-End-adjunction} says $\Hom_{\mathcal{E}_{\text{red}}}(\tilde{F}(M),N) \cong \Hom_{\mathcal{A}}(M,JN)$. We would like a similar statement for $\ext$.
\begin{proposition} The functors $\tilde{F}$ and $J$ are exact. It follows that for all~$s$, $\ext^s_{\mathcal{E}_{\text{red}}}(\tilde{F}(M),N) \cong \ext^s_{\mathcal{A}}(M,JN)$.\end{proposition}
\begin{proof}Straightforward.\end{proof}
Now we need a fundamental exact sequence. For an object $M$ in $\mathcal{A}$ define an object $M'$ in $\mathcal{A}$ as follows. Let $M' = M$ as $\mathbf{F}_{p^n}$-modules and for each $g\in S_n$, $x'\in M'$, let $gx' = g^{\sigma^{-1}}x$, where $x=x'$ and the expression on the right is the action on $M$. If $N$ is an object in $\mathcal{E}_{\text{red}}$ there is a map $S:N \to N$. If we think of $S$ as a map $S:(JN)' \to JN$ then one can check that $S$ is a morphism in $\mathcal{A}$. Thus we can define $\tilde{F}(S)$. Also $S:\tilde{F}((JN)') \to \tilde{F}(JN)$ is a morphism in $\mathcal{E}$. This gives a map \[\partial = \tilde{F}(S)-S:\tilde{F}((JN)') \to \tilde{F}(JN)\] in $\mathcal{E}_{\text{red}}$ and we have
\begin{proposition} There is a SES in $\mathcal{E}_{\text{red}}$ \[0 \xrightarrow{} \tilde{F}((JN)') \xrightarrow{\partial} \tilde{F}(JN) \xrightarrow{} N \xrightarrow{} 0.\] \end{proposition}
\begin{corollary} There is a LES for any pair of reduced $\End_n$-modules $N$ and $L$. \[\dots \to \ext^s_{\mathcal{E}}(N,L) \to \ext^s_{\mathcal{A}}(JN,JL)\to \ext^s_{\mathcal{A}}((JN)',JL)\to \ext^{s+1}_{\mathcal{E}}(N,L) \to \dots \] \end{corollary}
To finish the proof of Proposition \ref{morava-vanishing-theorem} we apply the preceding corollary to the case where $L$ is the Pontryagin dual of $(E_{n})_{1}[t]/I_{n} = \mathbf{F}_{p^n}$ and $N$ is the Pontryagin dual of $M$. The forgetful functor $J$, which restricts the action of $\End_n$ to the stabilizer group $S_n$, corresponds to suspension, and $JL$ is dual to $(E_n)_{1}(S^t)/I_{n}$ stably, as an $S_n$-module. The $S_n$-module $(E_n)_{1}(S^t)/I_{n}$ may or may not have the trivial action, depending on $t$. However we have an extension \[1 \to {S'}_n \to S_n \to \mathbf{F}_{p^n}^{\times} \to 1\] where ${S'}_n$ is the $p$-Sylow subgroup of $S_n$, i.e. ${S'}_n$ is the group of strict automorphisms of the Honda formal group law, which does act trivially on $(E_n)_{1}(S^t)/I_{n}$, and the Lyndon-Hochschild-Serre spectral sequence collapses to give \[\ext^{0}_{\mathbf{F}_{p^n}^{\times}}((E_{n})_{1}[t]/I_{n},H^s({S'}_n;M)) \cong \ext^s_{S_n}((E_{n})_{1}[t]/I_{n},M).\] Here $H^s({S'}_n;M)$ denotes group cohomology. For the stated values of $n$ and $p$, the $p$-Sylow subgroup ${S'}_n$ of the Morava stabilizer group has finite cohomological dimension equal to $n^2$ ( see for example \cite{MOR} or \cite{RA2}) and \ref{morava-vanishing-theorem} follows.
We will finish this section by sketching an outline of a second possible proof of Proposition \ref{morava-vanishing-theorem}. This approach, which is conjectural, because it presumes a Lyndon-Hochschild-Serre spectral sequence in the case of a monoid that is neither discrete nor profinite , may have a more intuitive appeal.
Since we can assume all modules under consideration are reduced, there is an isomorphism \[\ext^{s}_{\End_n}(\mathbf{F}_{p^n},M) = \ext^{s}_{\End_n-\{0\}}(\mathbf{F}_{p^n},M).\]
Any element in $\End_n-\{0\}$ can be uniquely written in the form $gS^k$ where $g\in S_n$ and $k \ge 0$.
This gives a monoid isomorphism from $\End_n-\{0\}$ to the semidirect product $S_n \rtimes \mathbf{N}$, where $\mathbf{N}$ is the free monoid on one generator.
Apply the Lyndon-Hochschild-Serre spectral sequence to the extension \[ 1 \to S_{n} \to S_{n} \rtimes N \to \mathbf{N} \to 1\] to get \[\ext^p_{\mathbf{N}}(\mathbf{F}_{p^n},\ext^{q}_{S_n}(\mathbf{F}_p,M)) \Rightarrow \ext^{p+q}_{S_{n} \rtimes N}(\mathbf{F}_{p^n} ,M).\] The group $S_{n}$ has finite cohomological dimension equal to $n^2$ as noted above. Since the cohomological dimension of $\mathbf{N}$ is 1, the result follows.
{}
\end{document} |
\begin{document}
\maketitle
\begin{abstract} We present a new characterization of the shape equivalent class and the Littlewood-Richardson correspondence of Young tableaux in terms of tropical (ultradiscrete) integrable systems. As an application, an alternative proof of the ``shape change theorem'' is given. \end{abstract}
\section{Introduction}
The {\it tropicalization} is the transformation \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ a+b\mapsto \min[A,B],\qquad ab\mapsto A+B,\qquad a^{-1}\mapsto -A, \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] through which the structure of rings $(+,\times,{}^{-1})$ is transformed into the structure of semi-fields $(\min,+,-)$. For example, the tropicalization of a polynomial function is a piecewise linear function. It is known there are a lot of interesting examples in the field of combinatorics, mathematical physics, {\it etc.}~where the tropicalization provides a new insight and an application.
One of the most significant case is the {\it Young tableau}. The earliest study on the tropical aspects of Young tableaux was made by A.~N.~Kirillov~\cite{2001phco.conf...82K}, who introduced the {\it geometric RSK correspondence} (originally, tropical RSK correspondence\footnote{The word ``tropical'' nowadays has a different meaning. Many researchers prefer to use the ``geometric RSK correspondence'' instead.}). This correspondence was studied further by M.~Noumi and Y.~Yamada~\cite{noumi2004} by means of tropical (ultradiscrete) integrable systems. In~\cite{noumi2004}, they showed the fact that the {\it row-bumming algorithm} is expressed as a recurrence equation of tropical matrices, which is the tropicalization of the {\it discrete Toda equation}. This technique has been accepted as a fundamental tool for studies on the combinatorics of Young tableau and related topics. (For resent studies on the geometric RKS correspondence and its applications, see \cite{corwin2014tropical,nguyen2016variants,o2014geometric} and references therein.)
On the other hand, Y.~Mikami~\cite{mikami2012en}, Y.~Katayama, and S.~Kakei~\cite{kakei2015en} introduced another new correspondence between Young tableaux and tropical integrable systems. Interestingly enough, their correspondence is apparently independent of Noumi and Yamada's correspondence. In 2018, Iwao~\cite{iwao2018rims} presented a tropical characterization of the {\it rectification of skew tableaux} based on these correspondences. We would expect that the combination of Noumi-Yamada's geometric tableaux and Katayama-Kakei's correspondence provides a rich tropical interpretations of the combinatorics of Young tableaux.
\renewcommand\thesubsubsection{\arabic{section}.\arabic{subsubsection}}
This work is a continuation of~\cite{iwao2018rims}. In Section \ref{sec:2}, we briefly review the previous work~\cite{iwao2018rims}. The jeu de taquin slide, which is a fundamental procedure of the combinatorics of Young tableaux, is expressed by the recurrence formula (\ref{eq:QandW}). This formula is equivalent to the combinatorial procedure $\varphi_k$ (\S \ref{sec:2.3}). The rectification of Young tableaux is now expressed as a composition of finitely many $\varphi_k$'s (see (\ref{eq:diagram-of-rectifiction})). It is diagrammatically expressed by a planar diagram such as (\ref{eq:diagram-of-rectifiction}). From this diagram, one can induce some {\it horizontal} diagram (see (\ref{eq:diagram-of-rectifiction-tate})) by getting each column ``together in one bundle.'' It is showed that these horizontal diagrams can be defined independently of the choice of planer diagrams.
In Section \ref{sec:3}, we deal with the ``dual'' object to the previous section. It would be natural to ask ``what will happen if one gets each {\it row} of the planer diagram together in one bundle?'' The result is some {\it vertical} diagram (see (\ref{eq:diagram-of-rectification-yoko})). We show (Proposition \ref{prop:map-of-UI}) that this vertical diagram is determined independently of the choice of planer diagrams. The proof is based on the fact that the diagram is characterized by some $\mathcal{L}$-formula (\S \ref{sec:1.3}). As an application, we give a proof of the ``shape change theorem~\cite{fulton_1996}'' (Theorem \ref{thm:main1}).
One can do both procedures for rows and columns simultaneously. In Section \ref{sec:4}, we introduce some ``concentrated'' diagram by getting each row and each column together in one bundle (see (\ref{eq:diagram-of-rectification-compact})). It is shown that this diagram is closely related with the {\it Littlewood-Richardson correspondence}. Moreover, we present a new characterization of the Littlewood-Richardson correspondence in terms of the tropical mathematics (Theorem \ref{thm:main2}).
For convenience of readers, we gave a short introduction in a combinatorics of Young tableaux in \S \ref{sec:appA}. The definition of the Takahashi-Satsuma Box-Ball system is given in \S \ref{sec:appB}.
\subsubsection{Notations for Young tableaux}
In this article, we follow the conventions in Fulton's book \cite{fulton_1996}. Let $\lambda=(\lambda_1\geq \lambda_2\geq \dots\geq \lambda_\ell)$ be a Young diagram. A {\it semi-standard tableau of shape $\lambda$} is obtained by filling the boxes in $\lambda$ with a number according to the following rules: (i) in each row, the numbers are weakly increasing from left to right, (ii) in each column, the numbers are strongly increasing from top to bottom. A semi-standard tableau is often referred to as {\it tableau} shortly. A tableau with $n$ boxes is called {\it standard} if it contains distinct $n$ numbers $1,2,\dots,n$. Let $\lambda/\mu$ be a skew diagram.
A {\it skew (semi-standard) tableau of shape $\lambda/\mu$} is obtained by filling the boxes with a number according to the same rule for a tableau. If a skew tableau with $n$ boxes contains distinct $n$ numbers $1,\dots,n$, it is said to be {\it standard}. See \S\ref{sec:appA} for other definitions.
\subsubsection{Notations for tropicalization}\label{sec:1.3}
We use the notations of mathematical logic in order to simplify arguments for tropicalization. For details, see \cite{iwao2018rims}.
Let $\mathcal{L}=\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}{+,\cdot,{}^{-1},1\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}}$ be a language, where $+$ and $\cdot$ are binary function symbols, ${}^{-1}$ is a unary function symbol, and $1$ is a constant symbol. Define the $\mathcal{L}$-structures $\mathcal{M}=(M,+,\cdot,{}^{-1},1)$ and $\o{\mathcal{M}}=(\o{M},\o{+},\o{\cdot},\o{{}^{-1}},\o{1})$, and the morphism $M\to \o{M}; x\mapsto \overline{x}$ of $\mathcal{L}$-structures as follows: \begin{itemize} \item $M$ is the set of germs at $\epsilon=0$ of continuous functions $f(\epsilon)$ of $\epsilon>0$ that satisfy $\lim\limits_{\epsilon\to +0}\epsilon\log f(\epsilon)\in \mathord{\mathbb{R}}$. Here $+,\cdot,{}^{-1}$ are standard addition, multiplication, and multiplicative inverse\footnote{Abstractly, $M$ can be replaced with any semi-field $M^{\ast}=(M^\ast,+,\cdot,{}^{-1},1)$ such that (i) there exists a surjection $M^\ast\to \o{M}$ of $\mathcal{L}$-structures, and (ii) $\mathrm{Th}(M^\ast)\supset \mathrm{Th}(M)$. Here $\mathrm{Th}(M)$ is the {\it theory} of $M$, which is the set of $\mathcal{L}$-sentences that are true over $M$. }. \item $\o{M}=\mathord{\mathbb{R}}$, $\o{+}=\min$, $\o{\cdot}=+$, $\o{{}^{-1}}=-$. \item $\overline{f(\epsilon)}=-\lim\limits_{\epsilon\to +0}\epsilon\log f(\epsilon)$. \end{itemize} The word ``$\mathcal{L}$-term'' means a subtraction-free rational function. The morphism $M\to \o{M};f(\epsilon)\mapsto \o{f(\epsilon)}$ is called the {\it ultradiscretization} or {\it tropicalization}.
The correspondence among combinatorial, tropical, and geometric objects is given as follows:
$$ \vbox{\offinterlineskip \halign{\vrule width 0pt height 15pt depth 10pt
$\vcenter{\hbox{#}}$
\vrule&\quad
$\vcenter{\hbox{#}}$
\quad\vrule &\quad
$\vcenter{\hbox{#}}$
\cr
Combinatorial & Tropical ($\simeq$ ultradiscrete) & Geometric \cr \hline\hline
\vbox{\hbox{Jeu de taquin slide} \hbox{starting from $k^\mathrm{th}$ row}} & The map $\varphi_k$ (\S \ref{sec:2.3}) & \vbox{\hbox{The discrete eq.~(\ref{eq:Laxform})} \hbox{$(L_j^t)_j\mapsto (L_j^{t+1})_j$,} \hbox{where $\o{R_0^t}=\o{E}([k])$} } \cr\hline
Rectification & Planer diagram (\ref{eq:diagram-of-rectifiction})
& Composition of (\ref{eq:Laxform})
\cr\hline
\vbox{\hbox{Sub-diagram consisted} \hbox{of empty boxes}} & Associated tableau & Geometric tableau\cr\hline
Shape equivalent class &Associated circled array& \vbox{\hbox{``$F$-matrix version'' of} \hbox{Geometric tableau \hspace{-15pt}\phantom{$k^\mathrm{th}$} (\ref{eq:F-geometric-tableau})}}\cr } } $$
\section{Tropical (ultradiscrete) KP and jeu de taquin}\label{sec:2}
In this section, we shortly review the previous results~\cite{iwao2018rims}. For definitions of the terms {\it ``jeu de taquin slide,'' ``inside corner,'' ``outside corner,''etc}, see \S \ref{sec:appA}.
\subsection{Tropical KP equation}\label{sec:2.1}
Let us consider the tropical KP equation: \begin{equation}\label{eq:udKP} F_{i,j}^t+F_{i,j+1}^{t+1}=\max\left[ F_{i+1,j+1}^t+F_{i-1,j}^{t+1}, F_{i,j+1}^t+F_{i,j}^{t+1} \right]. \end{equation} The following theorem is due to~\cite{kakei2015en}: \begin{thm}[\cite{mikami2012en,kakei2015en}] For a sequence of skew tableaux $S^0,S^1,S^2,\dots$, set \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ F_{i,j}^t= \left( \begin{array}{cc} \mbox{the number of boxes in $1^\mathrm{st},2^\mathrm{nd},\dots,i^{\mathrm{th}}$ rows in $S^t$ }\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \mbox{which are indexed by a number smaller than or equal to $j$} \end{array} \right), \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] where an empty box is considered as a box indexed by $0$. If each $S^{t+1}$ is obtained from $S^t$ by a jeu de taquin slide, $(F_{i,j}^t)_{i,j,t}$ satisfies the tropical KP equation $(\ref{eq:udKP})$. \end{thm}
\begin{prop}[\cite{iwao2018rims}] By setting \begin{equation}\label{prop:change} \begin{aligned} &Q_{i,j}^t=F_{i,j}^t+F_{i-1,j}^{t+1}-F_{i-1,j}^t-F_{i,j}^{t+1},\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} &W_{i,j}^t=F_{i,j}^t+F_{i,j+1}^{t}-F_{i-1,j}^t-F_{i+1,j+1}^{t}, \end{aligned} \end{equation} we have \begin{equation}\label{eq:QandW} \begin{aligned} &Q_{i+1,j+1}^t=(\min[Q_{i+1,j}^t,W_{i+1,j}^t]-\min[Q_{i,j}^t,W_{i,j}^t])+Q_{i,j}^t,\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} &W_{i,j}^{t+1}=(\min[Q_{i+1,j}^t,W_{i+1,j}^t]-\min[Q_{i,j}^t,W_{i,j}^t])+W_{i,j}^t. \end{aligned} \end{equation} \end{prop}
\subsection{Skew tableau and matrix $W$}\label{sec:2.2}
For a skew tableau, let $F_{i,j}$ be the number of $1,2,\dots,j$'s contained in the $1^{\mathrm{st}},2^{\mathrm{nd}},\dots,i^{\mathrm{th}}$ rows. Define \begin{align*} W_{i,j} &=F_{i,j}+F_{i,j+1}-F_{i-1,j}-F_{i+1,j+1}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} &=\sharp \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}{1,2,\dots,j\mbox{'s in the } i^{\mathrm{th}} \mbox{ row}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}}-\sharp\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}{1,2,\dots,(j+1)\mbox{'s in the } (i+1)^{\mathrm{th}} \mbox{ row}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}}. \end{align*} By definition of skew tableaux, $W_{i,j}$ must be non negative. Moreover, $W_{i,j}$ satisfies the following conditions: \begin{gather} \mbox{There exists some $N$ such that $j>N\Rightarrow W_{i,j}=W_{i,j+1}$ for all $i$}.\label{eq:cond1}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \mbox{There exists some $d$ such that $i>d\Rightarrow W_{i,j}=0$ for all $j$}.\label{eq:cond2}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \textstyle \sum_{p\geq 0}W_{i+p,j+p}\geq \sum_{p\geq 0}W_{i+1+p,j+p}.\label{eq:cond3} \end{gather}
Let $\Omega$ be the set of skew tableaux. Define \begin{equation*} \begin{aligned} \mathfrak{X}:=\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}{(W_{i,j})_{ \kasane{i\geq 1}{j\geq 0
} }\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}},\vert\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}},W_{i,j}\in \mathord{\mathbb{Z}}_{\geq 0},\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \mbox{with conditions (\ref{eq:cond1}), (\ref{eq:cond2}), (\ref{eq:cond3})} \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}}. \end{aligned} \end{equation*} Consider the mapping $W:\Omega\to \mathfrak{X}$ which corresponds a skew tableau with the matrix $(W_{i,j})_{i,j}$. \begin{prop}[\cite{iwao2018rims}] $W$ is bijective. \end{prop}
We always identify $\Omega$ with $\mathfrak{X}$. While the matrix $W$ is of infinite size, it is ``essentially finite'' because of (\ref{eq:cond1}) and (\ref{eq:cond2}). To simplify notations, we often regard $W$ as a matrix of size $d\times N$.
\subsection{Jeu de taquin slide starting from the $k^{\mathrm{th}}$ row}\label{sec:2.3}
Now we construct the map $\varphi_k:\mathfrak{X}\to \mathfrak{X}$ for any positive integer $k$, which is a tropical counterpart of the jeu de taquin slide starting from $k^\mathrm{th}$ row. Let $W=(W_{i,j})\in \mathfrak{X}$ and $[k]=(0,\dots,0, \stackrel{\stackrel{k}{\vee}}{1},\allowbreak 0,\dots)$. The definition of $\varphi_k$ is given as follows~\cite{iwao2018rims}: \begin{enumerate} \item Let $\bm{Q}_0=(Q_{1,0},Q_{2,0},\dots):=[k]$. \item Compute $Q_{i,j}$, $W_{i,j}^+$ recursively as follows: If the vector $\bm{Q}_j=(Q_{1,j},Q_{2,j},\dots)$ is already defined for some $j\in \mathord{\mathbb{Z}}_{\geq 0}$, define the new vectors $\bm{Q}_{j+1}=(Q_{1,j+1},Q_{2,j+1},\dots)$ and $\bm{W}^+_j=(W^+_{1,j},W^+_{2,j},\dots)$ by the formula \begin{equation}\label{eq:R-matrix} \begin{cases} Q_{i+1,j+1}=(\min[Q_{i+1,j},W_{i+1,j}]-\min[Q_{i,j},W_{i,j}])+Q_{i,j},\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} W_{i,j}^+=(\min[Q_{i+1,j},W_{i+1,j}]-\min[Q_{i,j},W_{i,j}])+W_{i,j}, \end{cases} \end{equation} where $Q_{0,j}=0$, $W_{0,j}=+\infty$. (Compare with (\ref{eq:QandW})). \end{enumerate} Define $\varphi_k(W)=(W_{i,j}^+)$.
We regard (\ref{eq:R-matrix}) as a recurrence formula whose inputs are $\bm{Q}_j$ and $\bm{W}_j$, and outputs are $\bm{Q}_{j+1}$ and $\bm{W}^+_j$. It is convenient to draw a diagram such as $ \sayou{ & \bm{W}_j & \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \bm{Q}_j & \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+ & \bm{Q}_{j+1}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}
& \bm{W}^+_j }$, where the inputs are written on the left and top sides, and the outputs are on the right and bottom sides. The whole procedure to calculate $\varphi_k(W)$ is diagrammatically displayed as \begin{equation}\label{eq:one-jeu-de-taquin-slide} \sayou{
& \bm{W}_0 & & \bm{W}_1 & & \bm{W}_2 & & \bm{W}_3 & \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \bm{Q}_0=[k] & \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+ & \bm{Q}_1 & \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+ & \bm{Q}_2 & \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+ & \bm{Q}_3 & \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+ &\cdots\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}
& \bm{W}^+_0 & & \bm{W}^+_1 & & \bm{W}^+_2 & & \bm{W}^+_3 & \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} }. \end{equation}
Moreover, the map $\varphi_k$ also admits a combinatorial interpretation as follows: \begin{itemize} \item Draw a path on the matrix $W=(W_{i,j})$ by the following rule (see \S \ref{example:first}): \begin{itemize} \item The path starts at the $(k,0)^{\mathrm{th}}$ position. \item When the path reaches at the $(i,j)^{\mathrm{th}}$ position, extend it to the lower right neighbor if $W_{i,j}=0$, or to the right neighbor if $W_{i,j}\neq 0$. \end{itemize} \item For each non-zero number $W_{i,j} $ on the path, decrease it by one and increase the number at the upper neighbor by one; $W_{i,j}\mapsto W_{i,j}-1$, $W_{i-1,j}\mapsto W_{i-1,j}+1$. The matrix given by this procedure coincides with $\varphi_k(W)$. \item The matrix $Q=(Q_{i,j})_{i,j}$ is given by putting $Q_{i,j}=1$ if the path goes through the $(i,j)^\mathrm{th}$ position, and $Q_{i,j}=0$ otherwise. \end{itemize}
\subsection{Example}\label{example:first}
The jeu de taquin slide \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ \Tableau{\bl & \bl & \bl & 2 & 3 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \gray & 1 & 3 & 4 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 2 & 2 & 4 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} } \quad \Tableau{\bl & \bl & \bl & 2 & 3 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 1 & \gray & 3 & 4 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 2 & 2 & 4 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} } \quad \Tableau{\bl & \bl & \bl & 2 & 3 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 1 & 2 & 3 & 4 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 2 & \gray & 4 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} } \quad \Tableau{\bl & \bl & \bl & 2 & 3 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 1 & 2 & 3 & 4 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 2 & 4 & \gray \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} } \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] corresponds with the matrices \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ W= \left( \haiti{
1 & 1 & 1 & 1 & 1 & 1 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 1\hbox to 0pt{$\to$}&0\vbox to 0pt{\hbox to 0pt{$\searrow$}}& 0 & 0 & 1 & 1 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}
0 & 0 &2\hbox to 0pt{$\to$}&2\hbox to 0pt{$\to$}&3\hbox to 0pt{$\to$}& 3 } \right),\quad Q= \left( \haiti{ 0 & 0 & 0 & 0 & 0 & 0 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 1 & 1 & 0 & 0 & 0 & 0 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 0 & 0 & 1 & 1 & 1 & 1 } \right). \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] The matrix $\varphi_k(W)$ is given as $ \left( \haiti{ 2 & 1 & 1 & 1 & 1 & 1 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 0 & 0 & 1 & 1 & 2 & 2 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 0 & 0 & 1 & 1 & 2 & 2 } \right) $.
\subsection{Rectification}
Any skew tableau reaches a (non-skew) tableau thorough a sequence of finitely many jeu de taquin slides. To repeat jeu de taquin slides is nothing but to choose inside corners repeatedly. By filling numbers in chosen inside corners in decreasing order, one obtains a standard tableau. For example, if we apply the sequence of jeu de taquin slides to \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ \Tableau{ \bl & \bl & \bl & 2 & 3\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \bl & 1 & 3 & 4\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 2 & 2 & 4 }\qquad \mbox{defined by}\qquad \Tableau{ \lgray 1 & \lgray 2 & \lgray 3\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \lgray 4 }, \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] we obtain the sequence of skew tableaux \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ \Tableau{ \bl & \bl & \bl & 2 & 3\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \gray & 1 & 3 & 4\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 2 & 2 & 4 }\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \to\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \Tableau{ \bl & \bl & \gray & 2 & 3\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 1 & 2 & 3 & 4\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 2 & 4 }\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \to\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \Tableau{ \bl & \gray & 2 & 3\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 1 & 2 & 3 & 4\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 2 & 4 }\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \to\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \Tableau{ \gray & 2 & 2 & 3\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 1 & 3 & 4\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 2 & 4 }\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \to\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \Tableau{ 1 & 2 & 2 & 3\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 2 & 3 & 4 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 4\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} }. \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] We call the (non-skew) tableau obtained by this procedure the {\it rectified tableau}. With diagrammatic expressions as in \S \ref{sec:2.3}, this procedure is displayed as \begin{equation}\label{eq:diagram-of-rectifiction} \sayou{ &(1,1,0)&&(1,0,0)&&(1,0,2)&&(1,0,2)&&(1,1,3)&\cdots \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} [2]&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+&[2]&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+&[3]&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+&[3]&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+&[3]&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+&[3]& \cdots\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} &(2,0,0)&&(1,0,0)&&(1,1,1)&&(1,1,1)&&(1,2,2)&\cdots \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} [1]&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+&[1]&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+&[1]&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+&[1]&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+&[1]&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+&[1]& \cdots\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} &(1,0,0)&&(0,0,0)&&(0,1,1)&&(0,1,1)&&(0,2,2)&\cdots \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} [1]&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+&[1]&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+&[2]&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+&[2]&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+&[2]&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+&[2]& \cdots\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} &(0,0,0)&&(0,0,0)&&(1,0,1)&&(1,0,1)&&(1,1,2)&\cdots \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} [1]&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+&[2]&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+&[3]&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+&[3]&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+&[3]&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+&[3]& \cdots\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} &(0,0,0)&&(0,0,0)&&(1,1,0)&&(1,1,0)&&(1,2,1)&\cdots \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} }. \end{equation}
If $[k]$ is placed on the leftmost column, it represents the jeu de taquin slide starting from the $k^\mathrm{th}$ row. The diagram (\ref{eq:diagram-of-rectifiction}) presents a sequence of jeu de taquin slides starting from the $2^\mathrm{nd}$, $1^\mathrm{st}$, $1^\mathrm{st}$, $1^\mathrm{st}$ rows. The vectors at the bottom correspond with the rectified tableau. In fact, \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ \Tableau{ 1 & 2 & 2 & 3\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 2 & 3 & 4 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 4\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} }\longleftrightarrow \left( \haiti{ 0 & 0 & 1 & 1 & 1 & 1 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 0 & 0 & 1 & 1 & 2 & 2 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 0 & 0 & 0 & 0 & 1 & 1 } \right). \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}]
\subsection{Associated tableaux}\label{sec:2.6}
For diagrams such as (\ref{eq:diagram-of-rectifiction}), we assign the tableau $\Tableau{t_1}\leftarrow \Tableau{t_2}\leftarrow \dots\leftarrow \Tableau{t_d}$ to a column whose entries are $[t_1],[t_2],\dots,[t_d]$ from top to bottom. For example, from (\ref{eq:diagram-of-rectifiction}), we induce the new diagram \begin{equation}\label{eq:diagram-of-rectifiction-tate} \sayou{ &(1,1,0)&&(1,0,0)&&(1,0,2)&&(1,0,2)&&(1,1,3)&\cdots \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \Tableau{1&1&1\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\2}&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+&\Tableau{1&1&2\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\2}&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+&\Tableau{1&2&3\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\3}&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+& \Tableau{1&2&3\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\3}&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+&\Tableau{1&2&3\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\3}&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+&\cdots\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} &(0,0,0)&&(0,0,0)&&(1,1,0)&&(1,1,0)&&(1,2,1)&\cdots \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} } \end{equation} It was proved by Iwao~\cite{iwao2018rims} that if one exchanges the entries in the leftmost column in (\ref{eq:diagram-of-rectifiction}) with $[t'_1],[t'_2],\dots,[t'_d]$ that gives the same tableau, one also obtain the same diagram (\ref{eq:diagram-of-rectifiction-tate}). This means that the diagram (\ref{eq:diagram-of-rectifiction-tate}) is well-defined independently of (\ref{eq:diagram-of-rectifiction}). The tableau associated with each column is called the {\it associated tableau}.
\begin{defi} Let $U(\mu)$ be the tableau of shape $\mu$ where any number in $i^\mathrm{th}$ row is $i$. \end{defi}
\begin{lemma}[Iwao~\protect{\cite[Lemma 5.5]{iwao2018rims}}, (also see Fulton~\protect{\cite[\S 5.2, Lemma 1]{fulton_1996}})] The associated tableau of any standard tableau of shape $\mu$ is $U(\mu)$. \end{lemma} \begin{cor}[The uniqueness of rectification] The rectified tableau of a skew tableau is unique. \end{cor}
\subsection{Lift to $\mathcal{M}$} All procedures in \S \ref{sec:2.1}--\S \ref{sec:2.6} can be expressed in terms of the language $\mathcal{L}$ and the $\mathcal{L}$-structure $\o{\mathcal{M}}$. They can be lifted to $\mathcal{M}$ appropriately, and their ``geometric'' counterparts are expressed as a discrete integrable system.
For example, let \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ R_j^t:=\left( \begin{array}{ccccc} I_{1,j}^t & 1 & & \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}
& I_{2,j}^t & 1 & \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}
& & I_{3,j}^t & \ddots \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}
& & & \ddots \end{array} \right),\quad L_j^t:=\left( \begin{array}{ccccc} 1 & & & \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} -V_{1,j}^t & 1 & & \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}
& -V_{2,j}^t & 1 & \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}
& & -V_{3,j}^t & 1 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}
& & & \ddots &\ddots \end{array} \right)^{-1}. \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] We can uniquely define the rational map $$ M^\infty\to M^\infty;\quad (I_{i,j}^t,V_{i,j}^t)_{i=1,2,\dots}\mapsto (I_{i,j+1}^t,V_{i,j}^{t+1})_{i=1,2,\dots} $$ by the equation \begin{equation}\label{eq:Laxform} R_j^tL_j^t=L_j^{t+1}R_{j+1}^{t}, \end{equation} which is a discrete analog of the Toda equation. It is verified that its tropicalization coincides with (\ref{eq:QandW}), where $Q_{i,j}^t=\o{I_{i,j}^t}$ and $W_{i,j}^t=\o{V_{i,j}^t}$ are the tropical variables.
All other procedures and facts can also be lifted to $\o{\mathcal{M}}$ and expressed in terms of the discrete integrable systems. See \cite{iwao2018rims} for details.
\section{Application 1: shape equivalence class}\label{sec:3}
\subsection{Definition of shape equivalence}
The rectification of \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ \Tableau{\bl & \bl & \bl & 2\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \bl & 1 & 3 & 6 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 4 & 5 & 7}\qquad \mbox{defined by}\qquad \Tableau{\lgray 1 &\lgray 2 &\lgray 3 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \lgray 4} \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] is expressed as \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ \Tableau{\bl & \bl & \bl & 2\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \gray & 1 & 3 & 6 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 4 & 5 & 7}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \to \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \Tableau{\bl & \bl & \gray & 2\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 1 & 3 & 6 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 4 & 5 & 7}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \to \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \Tableau{\bl & \gray & 2\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 1 & 3 & 6 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 4 & 5 & 7}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \to \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \Tableau{\gray & 2 & 6\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 1 & 3 & 7 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 4 & 5 }\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \to \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \Tableau{1 & 2 & 6\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 3 & 5 & 7 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 4 }. \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] On the other hand, the rectification of \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ \Tableau{\bl & \bl & \bl & 1\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \bl & 2 & 2 & 2 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 3 & 3 & 4}\qquad \mbox{defined by}\qquad \Tableau{\lgray 1 &\lgray 2 &\lgray 3 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \lgray 4}, \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] is expressed as \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ \Tableau{\bl & \bl & \bl & 1\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \gray & 2 & 2 & 2 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 3 & 3 & 4}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \to \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \Tableau{\bl & \bl & \gray & 1\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 2 & 2 & 2 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 3 & 3 & 4}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \to \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \Tableau{\bl & \gray & 1\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 2 & 2 & 2 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 3 & 3 & 4}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \to \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \Tableau{\gray & 1& 2\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 2 & 2 & 4 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 3 & 3 }\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \to \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \Tableau{1 & 2& 2\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 2 & 3 & 4 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 3 }. \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] Note that the shapes of these two sequence coincides with each other. In such case, two skew tableaux are said to {\it have the same shape changes by } $ \Tableau{\lgray 1 &\lgray 2 &\lgray 3 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \lgray 4}. $ The following theorem is referred to as the ``shape change theorem~\cite[Appendix A]{fulton_1996}.'' \begin{thm}[Shape change theorem]\label{thm:shape-change} If two skew tableaux have the same shape changes by some standard tableau, then they actually have the same shape changes by any standard tableau. \end{thm}
If two skew tableaux have the same shape changes by some standard tableau (therefore, if they have the same shape changes by any standard tableau), they are said to be {\it shape equivalent}.
\begin{example}\label{example:shape-equivalent} If we apply the sequence of jeu de taquin slides defined by $ \Tableau{\lgray 1 &\lgray 3 &\lgray 4 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \lgray 2} $ to the two skew tableaux above, we have \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ \Tableau{\bl & \bl & \gray & 2\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \bl & 1 & 3 & 6 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 4 & 5 & 7}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \to \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \Tableau{\bl & \gray & 2 & 6\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \bl & 1 & 3 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 4 & 5 & 7}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \to \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \Tableau{\bl & 1 & 2 & 6\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \gray & 3 & 7 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 4 & 5 }\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \to \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \Tableau{\gray & 1 & 2 & 6\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 3 & 5 & 7 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 4 }\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \to \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \Tableau{ 1 & 2 & 6\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 3 & 5 & 7 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 4 } \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] and \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ \Tableau{\bl & \bl & \gray & 1\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \bl & 2 & 2 & 2 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 3 & 3 & 4}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \to \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \Tableau{\bl & \gray & 1 & 2\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \bl & 2 & 2 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 3 & 3 & 4}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \to \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \Tableau{\bl & 1 & 2 & 2\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \gray & 2 & 4 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 3 & 3 }\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \to \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \Tableau{\gray & 1 & 2 & 2\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 2 & 3 & 4 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 3 }\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \to \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \Tableau{1 & 2 & 2\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 2 & 3 & 4 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 3 }. \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] \end{example}
\subsection{Circled array}
In the sequel, we will give a proof of Theorem \ref{thm:shape-change}.
For real vectors $I=(I_1,I_2,\dots)$ and $V=(V_1,V_2,\dots)$, we define the matrices $E(I)$, $F(V)$ of infinite size as \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ E(I)=\left( \begin{array}{cccc} I_1 & 1 & & \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}
& I_2 & 1 & \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}
& & I_3 & \ddots \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}
& & & \ddots \end{array} \right),\quad F(V)=\left( \begin{array}{cccc} 1 & & & \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} -V_1 & 1 & & \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}
& -V_2 & 1 & \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}
& & \ddots & \ddots \end{array} \right). \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] Moreover, for a real infinite vector $V$, define the matrix $F_k(V)$ by \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ F_k(V)=\left( \begin{array}{cc} \mathrm{Id}_{k-1} & \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}
& F(V) \end{array} \right). \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}]
Consider the map $M^\infty\to M^\infty;(V_{i,j})\mapsto (U_{i,j})$ uniquely defined by the equation \begin{equation}\label{eq:F-geometric-tableau} F(V_N)\cdots F(V_1)F(V_0)=F_{N+1}(U_{N+1})\cdots F_2(U_2)F_1(U_1). \end{equation} By induction on $N$, we can verify that this map is expressed by $\mathcal{L}$-terms (\S \ref{sec:1.3}). \begin{rem} This map can be regarded as an ``$F$-matrix version'' of Noumi-Yamada's geometric tableau. In fact, the geometric tableau $(I_{i,j})\mapsto (J_{i,j})$ is defined by the equation \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ E(I_\ell)\cdots E(I_2)E(I_1)=E_{1}(J_1)E_2(J_2)\cdots E_\ell(J_\ell), \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] where $E_k(J)$ is a matrix analogously defined to $F_k(V)$. For details, see \cite[Section 5]{iwao2018rims}. \end{rem}
Let $W_{i,j}=\o{V_{i,j}}$ and $L_{i,j}=\o{U_{i,j}}$ be the tropical variables. Through the tropicalization, we obtain the piecewise linear map $\o{M}^\infty\to \o{M}^\infty;\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}{W_{i,j}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}}\mapsto \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}{L_{i,j}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}}$. This map has a interesting combinatorial interpretation which we explain below.
A {\it circled array} is a collection of finitely many rows that consists of {\it circled numbers}, where the numbers are arranged in increasing ordering. (Empty rows are allowed.) For example, $ \Tableau{ \maru{1} & \maru{2} & \maru{2} & \maru{3}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \maru{1} & \maru{1} & \maru{3} \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \emptyset\bl\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \maru{2} }$ is a circled array. It consists of $4$ rows, one of which is empty. We define the action of an {\it one-rowed array} (e.g.~ $ \Tableau{1&1&2&3} $) to a circled array, which we will express as \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ \Tableau{ \maru{1} & \maru{2} & \maru{2} & \maru{3}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \maru{1} & \maru{1} & \maru{3} \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \emptyset\bl\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \maru{2} }\leftharpoonup \Tableau{1&1&2&3}, \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] by the following manner: \begin{enumerate} \item The action is calculated row by row. Arrange the members of the $1^\mathrm{st}$ row of the circled array and the members of the one-rowed array in the following order: circled $1$'s, boxed $1$'s, circled $2$'s, boxed $2$'s,\dots. For the example above, we have \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ \Tableau{\maru{1}&1&1&\maru{2}&\maru{2}&2&\maru{3}&3}. \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] \item Move the circles according to the time evolution rule of the {\it Takahashi-Satsuma box ball system}~\cite{takahashi1990soliton}. (See \S \ref{sec:appB}.) Here a circled number is regarded as a ``box with a ball,'' and a boxed number is regarded as an ``empty box.'' We neglect all the balls that go out from the right end. \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ \Tableau{ & \maru{}& & & & \maru{}& & \maru{}} \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] \item Number the circles and boxes according to the following rules: (i) Decrease each number by one at where a circle is replaced with a box. (ii) The numbers at the remaining places do not change. (iii) Delete boxed $0$'s. \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ \Tableau{\hbox to 0pt{\hspace{-5pt}\scalebox{1.6}{$\diagup$}} 0& \maru{1}& 1& 1& 1& \maru{2}& 2& \maru{3}} \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] \item The sequence of circled numbers in the diagram (e.g.~$\Tableau{\maru{1}&\maru{2}&\maru{3}}$) is the $1^\mathrm{st}$ row of the new circled array. The sequence of boxed numbers (e.g.~$\Tableau{1&1&1&2}$) proceeds to the $2^\mathrm{nd}$ row. \item Repeat recursively the same procedures to $2^\mathrm{nd},3^\mathrm{rd},\dots$ rows until the one-rowed array reaches to the bottom of the array. Finally, the one-rowed array is added to the bottom end as a sequence of circled numbers. \end{enumerate} In the exemplified case, the action is calculated as \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ \Tableau{ \maru{1} & \maru{2} & \maru{2}&\maru{3}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \maru{1} & \maru{1} & \maru{3}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \emptyset\bl\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \maru{2} }\quad \raise 18pt\hbox {$\leftharpoonup \Tableau{1&1&2&3}$} \qquad \Tableau{ \maru{1} & \maru{2} & \maru{3} \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \maru{1} & \maru{1} & \maru{3} \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \emptyset\bl\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \maru{2} }\quad \raise 6pt\hbox {$\leftharpoonup \Tableau{1&1&1&2}$} \qquad \Tableau{ \maru{1} & \maru{2} & \maru{3} \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \maru{1} & \maru{1} \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \emptyset\bl\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \maru{2} } \raise -6pt\hbox {$\leftharpoonup \Tableau{1&2&2}$} \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ \Tableau{ \maru{1} & \maru{2} & \maru{3} \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \maru{1} & \maru{1} \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \emptyset\bl\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \maru{2} } \raise -18pt\hbox {$\leftharpoonup \Tableau{1&2&2}$} \qquad\qquad \Tableau{ \maru{1} & \maru{2} & \maru{3} \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \maru{1} & \maru{1} \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \emptyset\bl\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \maru{2} } \raise -30pt\hbox {$\leftharpoonup \Tableau{1&1&2}$} \qquad \Tableau{ \maru{1} & \maru{2} & \maru{3} \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \maru{1} & \maru{1} \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \emptyset\bl\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \maru{2}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \maru{1}&\maru{1}&\maru{2}} \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] Let \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ x_1 \leftharpoonup x_2 \leftharpoonup x_3\leftharpoonup\cdots. \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] denote the circled array given by acting the one-rowed arrays $x_1,x_2,x_3,\dots$ to the empty array.
\begin{thm}\label{thm:geometric-circled-tableau} Let $\bm{W}_j=(W_{1,j},W_{2,j},\dots)$ be a sequence of nonnegative integers that satisfies $\sum_{i}W_{i,j}<\infty$. Let $w_j$ denote the one-rowed array that consists of $W_{1,j}$ $1$'s, $W_{2,j}$ $2$'s,\dots. Then the map $(W_{i,j})\mapsto (L_{i,j})$ has the following combinatorial interpretation: The number of $i$'s in the $j^\mathrm{th}$ row of the circled array \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ w_N \leftharpoonup w_{N-1} \leftharpoonup \cdots \leftharpoonup w_0 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] equals to $L_{i,j}$. Here $N$ is a sufficiently large integer. \end{thm} \begin{proof} Let $V,V',U,U'\in M^\infty$ be vectors of infinite length. For any $k$, there uniquely exists the map $M^\infty\to M^\infty;(U,V)\mapsto (U',V')$ defined by \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ F_k(U)F_k(V)=F_{k+1}(V')F_k(U'), \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] which is obviously equivalent to $F_1(U)F_1(V)=F_{2}(V')F_1(U')$. This equation is noting but the Lax formulation of the discrete Toda equation (see, for example, ). It is well known that its tropical counterpart is the Takahashi-Satsuma box-ball system. In this context, the action $w_a\leftharpoonup w_b$ can be seen as the tropical counterpart of the equation $F(V_a)F(V_b)=F_{2}(U^{(2)})F_1(U^{(1)})$, where $V_a,V_b$ associate with $w_a,w_b$ and $U^{(i)}$ associated with the $i^\mathrm{th}$ row of the circled array. Generally, the action $(w_{a_1}\leftharpoonup w_{a_2}\leftharpoonup\cdots w_{a_j})\leftharpoonup w_b$ is the counterpart of \begin{align*} &F_{j}(U^{(j)})\cdots F_3(U^{(3)})F_2(U^{(2)})\underline{F_{1}(U^{(1)})F(V_b)}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} &=F_{j}(U^{(j)})\cdots F_3(U^{(3)})\underline{F_2(U^{(2)})F_2(V_{b_1})}F_1(U^{(1)'})\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} &=F_{j}(U^{(j)})\cdots \underline{F_3(U^{(3)})F_3(V_{b_2})}F_2(U^{(2)'})F_1(U^{(1)'})\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} &=\cdots =F_{j+1}(U^{(j+1)'})\cdots F_3(U^{(3)'})F_2(U^{(2)'})F_1(U^{(1)'}), \end{align*} where the actions $F_k(U^{(k)})F_k(V_{b_{k-1}})=F_{k+1}(V_{b_k})F_k(U^{(k)'})$ are underlined. This proves the relation between (\ref{eq:F-geometric-tableau}) and the circled array. \end{proof}
\subsection{Associated circled array}
Let $N$ be a sufficiently large number. It is verified that one can uniquely define the map $M^\infty\to M^\infty;(V_i,I)_{i=0,1,\dots}\mapsto (V'_i,I')_{i=0,1,\dots}$ by the equation \begin{equation*} F(V_N')\cdots F(V_1')F(V_0')E(I)=E(I')F(V_N)\cdots F(V_1)F(V_0). \end{equation*} It is also verified that this map can be expressed by $\mathcal{L}$-terms. The tropicalization $\o{M}^\infty \to \o{M}^\infty;(W_i,Q)_{i=0,1,\dots}\mapsto (W'_i,Q')_{i=0,1,\dots}$ corresponds with the diagram \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ \sayou{
& \bm{W}_0 & & \bm{W}_1 & & \bm{W}_2 & & \bm{W}_N & \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} (\bm{Q}=)\bm{Q}_0 & \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+ & \bm{Q}_1 & \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+ & \bm{Q}_2 & \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+ & \cdots & \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+ &\bm{Q}_{N+1}(=\bm{Q}')\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}
& \bm{W}'_0 & & \bm{W}'_1 & & \bm{W}'_2 & & \bm{W}'_N & \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} }. \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}]
Let us introduce the new variables $U_{i,j},U_{i,j}'$ by \begin{gather*} F(V_N)\cdots F(V_0)=F_{N+1}(U_{N+1})\cdots F_1(U_1),\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} F(V_N')\cdots F(V_0')=F_{N+1}(U_{N+1}')\cdots F_1(U_1'). \end{gather*} Then we obtain \begin{gather}\label{eq:def-of-UI} F_{N+1}(U_{N+1}')\cdots F_2(U_2')F_1(U_1')E(I)=E(I')F_{N+1}(U_{N+1})\cdots F_2(U_2)F_1(U_1). \end{gather} \begin{prop}\label{prop:map-of-UI} There exists the map $M^\infty\to M^\infty;(U_i,I)\mapsto (U_i',I')$ that is uniquely defined by (\ref{eq:def-of-UI}). Moreover, this map is expressed by $\mathcal{L}$-terms. \end{prop} \begin{proof} It is enough to prove the existence and uniqueness of the map $M^\infty\to M^\infty;(U,I)\mapsto (U',I')$ such that $F_k(U')E(I)=E(I')F_k(U)$ for any $k$, which is easily verified by straightforward calculation. Obviously, this map is expressed by $\mathcal{L}$-terms. \end{proof}
From Proposition \ref{prop:map-of-UI}, the map $(U_i,I)\mapsto (U_i',I')$ induces a tropical map $(L_i,Q)\mapsto (L'_i,Q')$, where $L_{i,j}=\o{U_{i,j}}$ and $Q_{i,j}=\o{I_{i,j}}$. We write this map diagrammatically as \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ \sayou{ & (\bm{L}_1,\dots,\bm{L}_{N+1}) & \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \bm{Q}& \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+ & \bm{Q}'\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} & (\bm{L}'_1,\dots,\bm{L}'_{N+1})& }. \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}]
From Theorem \ref{thm:geometric-circled-tableau}, the data $(\bm{L}_1,\dots,\bm{L}_{N+1})$ naturally corresponds with a circled array, which we will call the {\it associated circled array}. Because the data $(\bm{L}_1,\dots,\bm{L}_{N+1})$ is determined by a skew tableau ($\simeq$ an array $W$), there exists a natural correspondence from the set of skew tableaux to the set of circled arrays. \begin{example} The skew tableau \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ \Tableau{ \bl & \bl & \bl & 1 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \bl & 2 & 2 & 2\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 3 & 3 & 4 },\quad W=\left( \haiti{ 2&0&0&0&0\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 1&1&2&1&1\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 0&0&0&2&3 } \right) \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] corresponds with the circled array $(N=4)$ \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ \Tableau{2&3&3&3}\leftharpoonup \Tableau{2&3&3}\leftharpoonup \Tableau{2&2}\leftharpoonup \Tableau{2}\leftharpoonup \Tableau{1&1&2} =\Tableau{\maru{2}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\maru{1}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\maru{1}&\maru{1}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\emptyset\bl\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\emptyset\bl} \left( =\Tableau{\maru{2}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\maru{1}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\maru{1}&\maru{1}} \right) . \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] To simplify notations, we often omit to write empty rows at the bottom of an array. We let the sign ``$\bm{\emptyset}$'' denote the array that consists of empty rows. \end{example}
For example, the rectification of \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ \Tableau{ \bl & \bl & \bl & 1 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \bl & 2 & 2 & 2\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 3 & 3 & 4 }\qquad\mbox{defined by}\qquad \Tableau{ \lgray 1 & \lgray 2 & \lgray 3\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \lgray 4 } \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] is expressed by the following diagram with circled arrays: \begin{equation}\label{eq:diagram-of-rectification-yoko} \sayou{ &\Tableau{\maru{2}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\maru{1}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\maru{1}&\maru{1}}&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} [2]&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+&[2]\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} &\Tableau{\maru{1}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\emptyset\bl\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\maru{1}&\maru{1}}&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} [1]&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+&[1]\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} &\Tableau{\emptyset\bl\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\emptyset\bl\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\maru{1}&\maru{1}}&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} [1]&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+&[3]\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} &\Tableau{\emptyset\bl\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\emptyset\bl\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\maru{1}}&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} [1]&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+&[3]\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} &\Tableau{\bl\bm{\emptyset}}& }. \end{equation}
\subsection{Proof of Shape change theorem}
We often look the diagrams ``upside-down.'' In other words, we look the data at right and bottom as inputs and at top and left as outputs. Combinatorially, this corresponds with the {\it reverse slide}~\cite[\S 1.2]{fulton_1996}. As its name suggests, the reverse slide is the reverse operation of the jeu de taquin slide.
\begin{lemma}\label{lemma:emptyrow} The associated circled array of any (non-skew) tableau is $\bm{\emptyset}$. \end{lemma} \begin{proof} Let $W=(W_{i,j})$ be the matrix associated with a (non-skew) tableau, and $w_j$ be the one-rowed array associated with the vector $\bm{W}_j=(W_{1,j},W_{2,j},\dots)$. Since any tableaux contains no empty box, the relation $i>j\Rightarrow W_{i,j}=0$ holds. Especially, the array $w_j$ consists of numbers lower than or equal to $j$. For sufficiently large $N$, one can verify by backward induction on $j\leq N$ that all numbers in the circled array \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ w_N\leftharpoonup w_{N-1}\leftharpoonup\dots \leftharpoonup w_j \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] are lower than or equal to $j$. Hence the circled array $w_N\leftharpoonup w_{N-1}\leftharpoonup\dots \leftharpoonup w_0$ consists of empty rows. \end{proof}
\begin{prop} Assume that in some tableau, new empty boxes arise in $b_k^\mathrm{th},b_{k-1}^\mathrm{th},\allowbreak\dots,b_1^\mathrm{th}$ rows when a sequence of reverse slides starting from $a_k^{\mathrm{th}},a_{k-1}^{\mathrm{th}},\dots,a_1^{\mathrm{th}}$ rows are operated. Then the sequence $b_1,b_2,\dots,b_k$ depends only on the sequence $a_1,a_2,\allowbreak\dots,a_k$, and is independent of the choice of tableaux. \end{prop} \begin{proof} The situation is expressed by a diagram such as (\ref{eq:diagram-of-rectification-yoko}) where the entries of the left column are $[b_1],\dots,[b_k]$ and the entries of the right column are $[a_1],\dots,[a_k]$. When we see the diagram ``upside-down,'' the sequence $a_1,\dots,a_k$ and $\bm{\emptyset}$ (see Lemma \ref{lemma:emptyrow}) are thought of the inputs. This implies that $b_1,\dots,b_k$ depends only on $a_1,\dots,a_k$. \end{proof}
\begin{example} When a sequence of reverse slides starting from $(2,1,2,4,3)^{\mathrm{th}}$ rows operated to any tableau, empty boxes must arise in $(1,1,2,3,2)^\mathrm{th}$ rows. For example, see \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ \Tableau{ 1&1\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 2&\gray\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 3}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \to\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \Tableau{ \bl&1&\gray\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 1&2\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 3}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \to\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \Tableau{ \bl&\bl&1\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 1&2&\gray\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 3}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \to\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \Tableau{ \bl&\bl&1\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \bl&1&2\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 3\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \gray}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \to\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \Tableau{ \bl&\bl&1\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \bl&1&2\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \bl&\gray\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 3}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \to\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \Tableau{ \bl&\bl&1\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \bl&\bl&2\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \bl&1\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 3} \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] and \begin{align*} & \Tableau{ 1&3&4&9&10\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 2&5&8&\gray\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 6&7\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 11 }\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \to\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \Tableau{ \bl&1&3&4&10&\gray\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 2&5&8&9\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 6&7\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 11 }\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \to\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \Tableau{ \bl&\bl&1&3&4&10\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 2&5&8&9&\gray\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 6&7\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 11 }\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \to\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} &\Tableau{ \bl&\bl&1&3&4&10\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \bl&2&5&8&9\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 6&7\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 11&\gray }\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \to\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \Tableau{ \bl&\bl&1&3&4&10\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \bl&2&5&8&9\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \bl&7&\gray\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 6&11 }\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \to\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \Tableau{ \bl&\bl&1&3&4&10\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \bl&\bl&5&8&9\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \bl&2&7\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 6&11 }. \end{align*} \end{example}
\begin{cor}\label{cor:cir} The associated circled array of a skew tableau is determined by the sequence $a_1,a_2,\dots,a_k$. \end{cor} \begin{proof} It suffices to see the diagram (\ref{eq:diagram-of-rectification-yoko}). \end{proof}
Then we have the following theorem which contains the shape change theorem: \begin{thm}\label{thm:main1} For two skew tableaux of the same shape, the following $($a--c$)$ are equivalent: \begin{enumerate} \def\alph{enumi}{\alph{enumi}} \item They are shape equivalent. \item They have the same shape changes by some standard tableau. \item Their associated circled arrays coincide with each other. \end{enumerate} \end{thm} \begin{proof} (a)$\Rightarrow$(b) is obvious. For (b)$\Rightarrow$(c), assume that two skew tableaux admit a sequence of jeu de taquin slides starting from $b_1^{\mathrm{th}},b_2^{\mathrm{th}},\dots,b_k^{\mathrm{th}}$ rows, and reach to tableaux where outside corners in $a_1^{\mathrm{th}},a_2^{\mathrm{th}},\dots,a_k^{\mathrm{th}}$ rows has been changed. Since their associated circled arrays depend on $a_i$'s only (Corollary \ref{cor:cir}), they coincide with each other. For (c)$\Rightarrow$(a), consider the diagram such as (\ref{eq:diagram-of-rectification-yoko}). Since the circled arrays at the top are same, the changes that occur outside corners by the sequence of jeu de taquin also coincide. \end{proof}
From Theorem \ref{thm:main1}, it can be said that the shape equivalent class of a skew tableau is completely determined by its associated circled array.
\begin{example} \def\vdots{\vdots} Both associated circled arrays of the skew tableaux $ \Tableau{\bl & \bl & \bl & 2\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \bl & 1 & 3 & 6 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 4 & 5 & 7} $ and $ \Tableau{\bl & \bl & \bl & 1\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \bl & 2 & 2 & 2 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 3 & 3 & 4} $ in Example $\ref{example:shape-equivalent}$ coincide with $\Tableau{\maru{2}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\maru{1}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\maru{1}&\maru{1}}$. \end{example}
\begin{rem} There exists an alternative way to determine the shape equivalent class by calculating the ``$Q$-tableau'' ~\cite[\S AppendixA]{fulton_1996}. This can be calculated easily, but essentially depends on the shape of the skew tableau. The circled array, however, depends only on the order of reverse slides (Corollary \ref{cor:cir}). It would be said that the circled array presents ``universal'' information of the shape equivalent class. \end{rem}
\section{Application 2: Littlewood-Richardson correspondence}\label{sec:4}
As we have seen in the previous sections, the diagram of rectification (\ref{eq:diagram-of-rectifiction}) induces a new useful diagram if one gets its rows or columns together in one bundle. If we do both procedures, we obtain the following diagram: \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ \sayou{&(\bm{L}_1,\dots,\bm{L}_{N+1})&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} (\bm{P}_1,\dots,\bm{P}_k)&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+&(\bm{P}'_1,\dots,\bm{P}'_k)\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} &(\bm{L}'_1,\dots,\bm{L}'_{N+1})& }. \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] This diagram defines the one-to-one correspondence $(L_i,P_i)\leftrightarrow (L_i',P_i')$. For example, from the diagram (\ref{eq:diagram-of-rectification-yoko}), we have \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ \sayou{ &\Tableau{\maru{2}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\maru{1}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\maru{1}&\maru{1}}&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \Tableau{1&1&1\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\2}&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+&\Tableau{1&3&3\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\2}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} &\bm{\emptyset}& }. \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}]
In general, for any two Young diagrams $\mu\subset \lambda$, we have \begin{equation}\label{eq:diagram-of-rectification-compact} \sayou{ &M& \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} U(\mu)&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+&Z\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} &\bm{\emptyset}& }, \end{equation} where $M$ is a circled array and $Z$ is a tableau of shape $\mu$. We will see that the correspondence is related closely with the {\it Littlewood-Richardson correspondence}~\cite[\S A.1]{fulton_1996}.
\subsection{Definition of the Littlewood-Richardson correspondence}\label{sec:4.1}
We review the definition of the Littlewood-Richardson correspondence. For the definitions of the terms {\it RSK correspondence} and {\it $P$ tableau}, see the standard textbook~\cite{fulton_1996}.
Similar to the case of jeu de taquin slides, repeating reverse slides is nothing but choosing outside corners repeatedly. In other words, this is equivalent to specify a standard skew tableau ``sticking'' outside of the tableau. For example, the sequence of reverse slides to \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ X= \Tableau{ 1 & 3 & 3 & 4\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 2 & 4 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 5\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \bl } \qquad\mbox{defined by}\qquad R= \Tableau{ \bl & \bl & \bl &\bl & \gray 5\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \bl & \bl & \gray 1 & \gray 2 \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \bl & \gray 4\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \gray 3 } \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] results the skew tableau \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ S= \Tableau{ \bl&\bl&\bl&1&3\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \bl&\bl&4&4\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 2&3\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 5 }. \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] There exists a useful correspondence between sequences of reverse slides and {\it tableaux pairs}. Here we give its outline briefly according to Fulton \cite[\S Appendix A]{fulton_1996}.
Let $R$ be the standard skew tableau sticking outside of the tableau $X$. For arbitrary tableau $V_\circ$ of shape $\lambda$, one can find a word $w=t_1t_2\dots t_m$ ($m=\vert \mu \vert$) and a tableau $U$ of shape $\mu$ such that \begin{itemize} \item $t_1\to t_2\to\cdots \to t_m\to U=V_\circ$, \item when $t_{m-i}$ is column-bumped, the new box evokes at the place of $\Tableau{\gray i}$ \end{itemize} by ``reverse column bumping algorithm.'' Let $T=P(w)$ denote the $P$-tableau associated with $w$. We call the pair $[T,U]$ the {\it Littlewood-Richardson pair}.
Let $\mathcal{S}(\lambda/\mu,X)$ be the set of skew tableaux of shape $\lambda/\mu$, the rectification of which is $X$. On the other hand, let $\mathcal{T}(\lambda,\mu,V_\circ)$ denote the set of the pairs $[T,U]$ where $T$ is of shape $\lambda$, $U$ is of shape is $\mu$, and $T\cdot U=V_\circ$. Let \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ \mathcal{S}(\lambda/\mu,X)\to \mathcal{T}(\lambda,\mu,V_\circ);\qquad S\mapsto [T,U] \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] be the map that associates the skew tableau $S$, given from $X$ through a sequence of reverse slides defined by $R$, with the Littlewood-Richardson pair $[T,U]$. It is known that this map is bijective \cite[\S 5.1, Proposition 2]{fulton_1996}. This map is called the {\it Littlewood-Richardson rule}. Moreover, for arbitrary two tableaux $X,Y$ of the same shape, one can define the bijection \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ \mathcal{S}(\lambda/\mu,X)\to \mathcal{T}(\lambda,\mu,V_\circ)\to \mathcal{S}(\lambda/\mu,Y). \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] This bijection $\mathcal{S}(\lambda/\mu,X)\leftrightarrow\mathcal{S}(\lambda/\mu,Y)$ is called the {\it Littlewood-Richardson correspondence by $V_\circ$}. Later we will see that it is independent of the choice of $V_\circ$.
\begin{example} Let us take $X,R,S$ as in the beginning of \S \ref{sec:4.1}. If one put $V_\circ= \Tableau{ 1&1&2&2&2\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 2&3&4&4\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 3&4\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 4} $, the Littlewood-Richardson pair associated with $S$ is \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ [T,U] =\left[ \Tableau{ 1&3&3\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 4&4}, \Tableau{ 1&2&2&2\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 2&4\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 4} \right]. \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] \end{example}
\subsection{Tropical interpretation of Littlewood-Richardson correspondence}
As we have seen in the previous subsection, the Littlewood-Richardson correspondence is essentially defined via the standard skew tableau $R$. Let $\mathcal{R}(\lambda,X)$ be the set of standard skew tableaux $R$ sticking outside of $X$, where the outside of $R$ is of shape $\lambda$. Therefore, we have the commutative diagram: \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ \xymatrix{ &\mathcal{R}(\lambda,X)\ar_f[dl]\ar^g[dr]&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \mathcal{S}(\lambda/\mu,X)\ar^{\cong}[rr]& &\mathcal{T}(\lambda,\mu,V_\circ) }, \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] where $f$ is the reverse column bumping and $g$ is a composition of $f$ and the Littlewood-Richardson rule.
Note that the equation $f(R)=S$ is equivalent to the diagram $$ \sayou{ &\bm{W}_0& &\bm{W}_1& &\bm{W}_2& \cdots & \bm{W}_N& \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} U(\mu)&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+& U_1 &\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+& U_2&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+& \cdots &\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+& Z=(\mbox{the associated tableau of $R$})\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} &\bm{W}'_0& &\bm{W}'_1& &\bm{W}'_2& \cdots & \bm{W}'_N& }, $$ where the matrices $W=(\bm{W}_j)$ and $W'=(\bm{W}'_j)$ correspond with $S$ and $X$ respectively. By introducing the equivalence relation \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ R_1\sim R_2\iff \mbox{the associated tableaux of $R_1$ and $R_2$ are same} \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] over $\mathcal{R}(\lambda,X)$, we induce the two one-to-one correspondences \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ (\mathcal{R}(\lambda,X)/\sim) \leftrightarrow \mathcal{S}(\lambda/\mu,X),\qquad (\mathcal{R}(\lambda,X)/\sim) \leftrightarrow \mathcal{T}(\lambda,\mu,V_\circ). \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] Because the shapes of $U(\mu)$ and $Z$ are same, each element of $\mathcal{R}(\lambda,X)/\sim$ can be identified with its associated tableau of shape $\mu$. If $\mathcal{T}(\mu)$ denotes the set of tableaux of shape $\mu$, we finally obtain the maps \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ F:\mathcal{S}(\lambda/\mu,X)\hookrightarrow \mathcal{T}(\mu),\qquad G:\mathcal{T}(\lambda,\mu,V_\circ)\hookrightarrow \mathcal{T}(\mu), \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] where \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ F(S)=G([T,U])\iff \mbox{$[T,U]$ is the Littlewood-Richardson pair associated with $S$.} \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}]
From this relation, we obtain a new characterization of the Littlewood-Richardson correspondence. \begin{thm}\label{thm:main2} Let $X,Y$ be two tableau of same shapes. The two skew tableaux $S_1\in \mathcal{S}(\lambda/\mu,X)$ and $S_2\in \mathcal{S}(\lambda/\mu,Y)$ correspond with each other if and only if $F(S_1)=F(S_2)$. Especially, the Littlewood-Richardson correspondence does not depend on the choice of $V_\circ$. \end{thm} \begin{cor} Two tableaux of the same shapes are shape equivalent if and only if they correspond with each other through the Littlewood-Richardson correspondence. \end{cor} \begin{proof} Let $S_1,S_2$ be two tableaux of the same shapes and $M_1,M_2$ be their associated circled arrays. From the diagram $ \sayou{ &M_i& \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} U(\mu)&\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}+& F(S_i)\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} &\bm{\emptyset}& } $ ($i=1,2$), we have $F(S_1)=F(S_2)\iff M_1=M_2 \iff $ $S_1$ and $S_2$ are shape equivalent. \end{proof}
\subsection*{Acknowledgment} This work is partially supported by JSPS KAKENHI:26800062.
\appendix
\section{Basics on the combinatorics of Young tableaux}\label{sec:appA}
A box $B$ in a Young diagram is said to be {\it placed in the corner} if there exists no box below nor on the right of $B$. For a skew diagram $\lambda/\mu$, the corner of $\lambda$ is called the {\it outside corner} and the corner of $\mu$ is called the {\it inside corner}.
For a skew tableau $T$ and an inside corner $b$, the {\it jeu de taquin starting from $b$} is defined as follows: (i) Compare the two numbers in the boxes below and on the right of the hole $b$, and slide a box with smaller number to $b$. If these two numbers are same, slide the box below $b$. (ii) Compare the two numbers in the boxes below and on the right of the hole, and slide a box according to the same rule in (i). (iii) Repeat (ii) until the hole reaches to the outside corner.
The following is an example of jeu de taquin. The grayed boxes denote the holes. \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ \Tableau{ \bl& \bl& 1 & 3\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \bl& \gray & 2 & 3\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 1 & 2 & 3& 4\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 2&4&5} \qquad \Tableau{ \bl& \bl& 1 & 3\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \bl& 2 & 2 & 3\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 1 & \gray & 3&4\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 2&4&5} \qquad \Tableau{ \bl& \bl& 1 & 3\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \bl& 2 & 2 & 3\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 1 & 3 & \gray &4\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 2&4&5} \qquad \Tableau{ \bl& \bl& 1 & 3\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} \bl& 2 & 2 & 3\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 1 & 3 & 4\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} 2&4&5} \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] In this example, a jeu de taquin starts in the $2^\mathrm{nd}$ row, and ends in the $3^\mathrm{rd}$ row.
Let $T$ be a tableau and $t$ be a number. The {\it row bumping} (or {\it row insertion}) of $t$ to $T$ is defined as follows: (i) If $t$ is equal to or greater than all the numbers in the $1^\mathrm{st}$ row of $T$, put a new box with $t$ at the place next to the rightmost box of this row. If not, $t$ ``bumps'' the leftmost number greater than $t$, then the ``bumped'' number goes to the next row. (ii) Apply similar procedure as (i) to the next row of $T$ and the bumped number. (iii) Repeat (ii) until no more number is bumped.
The following diagram presents the row bumping of $3$ to the tableau. \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ \Tableau{1&3&4&5&\bl \quad\leftarrow 3\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\2&4&6&6\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\4&5\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\6} \qquad \Tableau{1&3&3&5\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\2&4&6&6&\bl \quad\leftarrow 4\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\4&5\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\6} \qquad \Tableau{1&3&3&5\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\2&4&4&6\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\4&5&\bl& \bl &\bl \quad\leftarrow 6\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\6} \qquad \Tableau{1&3&3&5\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\2&4&4&6\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\4&5&6\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\6}. \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] The tableau obtained by row bumping of $t$ to $T$ is denoted by \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ T\leftarrow t\qquad \mbox{or}\qquad T\leftarrow \Tableau{t}. \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}]
Moreover, the {\it column bumping} (or {\it column insertion}) of $t$ to $T$ is defined as follows: (i) If $t$ is {\it greater than} all the numbers in the $1^\mathrm{st}$ column of $T$, put a new box with $t$ at the place next to the box at the bottom of this column. If not, $t$ ``bumps'' the number at the highest position among the numbers {\it equal to or greater than} $t$, then the ``bumped'' number goes to the next column. (ii) Apply similar procedure as (i) to the next column of $T$ and the bumped number. (iii) Repeat (ii) until no more number is bumped. The tableau obtained by column bumping of $t$ to $T$ is denoted by \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ t\to T\qquad \mbox{or}\qquad \Tableau{t}\to T. \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}]
\section{Takahashi-Satsuma Box-Ball system }\label{sec:appB}
Let us consider the map $(U_i,V_i)_{i=1,2,\dots}\mapsto (U'_i,V'_i)_{i=1,2,\dots}$ that is defined by the equation $F_1(U)F_1(V)=F_{2}(V')F_1(U')$. This map is expressed by $\mathcal{L}$-terms. Let $L=\o{U_i}$ and $W=\o{V_i}$ be the tropical variable. Then the tropical map $(L_i,W_i)_{i=1,2,\dots}\mapsto (L'_i,W'_i)_{i=1,2,\dots}$ is expressed as \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ L_i'=\min\left[W_i,L_i+\max_k[0,\textstyle \sum_{j=1}^k(L_{i-j}-W_{i-j})]\right],\qquad W_i'=L_i+W_i-L_i'. \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] The {\it Takahashi-Satsuma Box-Ball system}~\cite{takahashi1990soliton} is a combinatorial interpretation of this map, which is expressed as follows.
Let us consider a sequence of infinitely many boxes such as \begin{equation}\label{eq:BoxBall} \cdots \Tableau{&&&\tama{}& & \tama{}&\tama{} &\tama{} & & &\tama{}& & & & }\cdots \end{equation} Here each box either contains a ball or is empty. A string of balls is called a {\it soliton}. For instance, this picture contains three solitons.
Let $L_i$ be the number of balls that are contained in the $i^\mathrm{th}$ soliton from left, and $W_i$ be the distance between $i^\mathrm{th}$ and $(i+1)^\mathrm{th}$ solitons. Then (\ref{eq:BoxBall}) is expressed as the following combinatorial rule: (i) Create a copy of each ball. (ii) Move each copy to the nearest empty box on the right. (iii) Delete the original balls. This procedure is called the {\it time evolution role} of the Takahashi-Satsuma Box-Ball system. The following is an example of the combinatorial expression of the time evolution $(L_i,W_i)_{i=1,2,3}\mapsto (L'_i,W'_i)_{i=1,2,3}$: \begin{gather*} (L_i,W_i);\qquad \cdots \Tableau{&&&\tama{}& & \tama{}&\tama{} &\tama{} & & &\tama{}& & & & }\cdots\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}} (L'_i,W'_i);\qquad \cdots \Tableau{&&& & \tama{} & & & &\tama{} &\tama{} & &\tama{}&\tama{} &\tama{} & }\cdots \end{gather*} For more detail, see
\section{The correspondence $Z\leftrightarrow M$}\label{sec:appC}
Here is a short list of examples of the correspondence $Z\leftrightarrow M$: \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ \Tableau{1}\leftrightarrow\Tableau{\maru{1}}\qquad \Tableau{1&1}\leftrightarrow\Tableau{\maru{1}&\maru{1}}\qquad \Tableau{1&1&\cdots\bl&1}\leftrightarrow\Tableau{\maru{1}&\maru{1}&\cdots\bl&\maru{1}} \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}]
\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ \Tableau{1&1&2\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\2}\leftrightarrow\Tableau{\maru{1}&\maru{2}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\maru{1}&\maru{1}} \quad\Tableau{1&1&2\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\3}\leftrightarrow\Tableau{\maru{1}&\maru{1}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\maru{2}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\maru{1}} \quad\Tableau{1&2&2\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\2}\leftrightarrow\Tableau{\maru{2}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\maru{1}&\maru{1}&\maru{1}} \quad\Tableau{1&2&2\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\3}\leftrightarrow\Tableau{\maru{1}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\maru{1}&\maru{2}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\maru{1}} \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}[ \Tableau{1&2\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\3}\leftrightarrow\Tableau{\maru{1}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\maru{2}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\maru{1}}\qquad \Tableau{2&3\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\4}\leftrightarrow\Tableau{\emptyset\bl\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\maru{1}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\maru{2}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\maru{1}}\qquad \Tableau{1&3\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\2&5\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\4}\leftrightarrow\Tableau{\maru{3}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\maru{2}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\maru{2}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\maru{1}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\maru{1}}\qquad
\Tableau{1&2&2&3\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\2&3&3\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\4}\leftrightarrow\Tableau{\maru{2}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\maru{1}&\maru{2}&\maru{3}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\maru{1}&\maru{1}&\maru{2}\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}\maru{1}} \hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}] Let $Z^\ast$ denote the {\it Sch\hbox{\lower 5pt\hbox{\scalebox{3.5}{$+$}}}"{u}tzenberger dual tableau}\footnote{See, for example, \cite[Appendix A]{fulton_1996}.} of $Z$, and $i^\ast$ denote the {\it opposite number} of $i$. We have the following conjecture about the correspondence $Z\leftrightarrow M$: \begin{conj} Then the number of $j$'s in the $i^\mathrm{th}$ row of $M$ equals to the number of $i^\ast$'s in the $j^\mathrm{th}$ row of $R^\ast$. \end{conj}
\end{document} |
\begin{document}
\title[Canards from Chua's circuit] {Canards from Chua's circuit}
\author[J.M. Ginoux, J. Llibre] {Jean-Marc Ginoux$^1$, Jaume Llibre$^2$ and Leon Chua$^3$}
\address{$^1$ Laboratoire {\sc Protee}, I.U.T. de Toulon, Universit\'{e} du Sud, BP 20132, F-83957 La Garde cedex, France} \email{ginoux@univ-tln.fr}
\address{$^2$ Departament de Matem\`{a}tiques, Universitat Aut\`{o}noma de Barcelona, 08193 Bellaterra, Barcelona, Spain} \email{jllibre@mat.uab.cat}
\address{$^3$ DEECS Department, University of California, Berkeley 253 Cory Hall {\#}1770, Berkeley, CA 94720-1770} \email{chua@eecs.berkeley.edu}
\subjclass{}
\keywords{Geometric Singular Perturbation Method; Flow Curvature Method; singularly perturbed dynamical systems; canard solutions.}
\begin{abstract} At first, the aim of this work is to extend Beno\^{i}t's theorem for the generic existence of ``canards'' solutions in \textit{singularly perturbed dynamical systems} of dimension three with one fast variable to those of dimension four. Then, it is established that this result can be found according to the \textit{Flow Curvature Method}. Applications to Chua's cubic model of dimension three and four enables to state existence of ``canards'' solutions in such systems. \end{abstract}
\maketitle
\section{Introduction} \label{Intro}
Many systems in biology, neurophysiology, chemistry, meteorology, electronics exhibit several time scales in their evolution. Such systems, todays called \textit{singularly perturbed dynamical systems}, have been modeled by a system of differential equations (\ref{eq1}) having a small parameter multiplying one or several components of its vector field. Since the works of Andronov \& Chaikin [1937] and Tikhonov [1948], the \textit{singular perturbation method\footnote{For an introduction to singular perturbation method see Malley [1974] and Kaper [1999].}} has been the subject of many research, among which we will quote those of Arg\'{e}mi [1978] who carefully studied the slow motion. According to Tikhonov [1948], Takens [1976], Jones [1994] and Kaper [1999] \textit{singularly perturbed systems} may be defined as:
\begin{equation} \label{eq1} \begin{array}{*{20}c}
{{\vec {x}}' = \varepsilon \vec {f}\left( {\vec {x},\vec {y},\varepsilon } \right),\mbox{ }}
\\
{{\vec {y}}' = \vec {g}\left( {\vec {x},\vec {y},\varepsilon } \right)},
\\ \end{array} \end{equation}
where $\vec {x} \in \mathbb{R}^p$, $\vec {y} \in \mathbb{R}^m$, $\varepsilon \in \mathbb{R}^ + $, and the prime denotes differentiation with respect to the independent variable $t$. The functions $\vec {f}$ and $\vec {g}$ are assumed to be $C^\infty$ functions\footnote{In certain applications these functions will be supposed to be $C^r$, $r \geqslant 1$.} of $\vec {x}$, $\vec {y}$ and $\varepsilon$ in $U\times I$, where $U$ is an open subset of $\mathbb{R}^p\times \mathbb{R}^m$ and $I$ is an open interval containing $\varepsilon = 0$.
In the case when $0 < \varepsilon \ll 1$, i.e., is a small positive number, the variable $\vec {x}$ is called \textit{slow} variable, and $\vec {y}$ is called \textit{fast} variable. Using Landau's notation: $O\left( \varepsilon^k \right)$ represents a function $f$ of $u$ and $\varepsilon $ such that $f(u,\varepsilon) / \varepsilon^k$ is bounded for positive $\varepsilon$ going to zero, uniformly for $u$ in the given domain.
In general it is used to consider that $\vec {x}$ evolves at an $O\left( \varepsilon \right)$ rate; while $\vec {y}$ evolves at an $O\left( 1 \right)$ \textit{slow} rate. Reformulating system (\ref{eq1}) in terms of the rescaled variable $\tau = \varepsilon t$, we obtain
\begin{equation} \label{eq2} \begin{aligned} \dot {\vec {x}} & = \vec{f} \left( {\vec{x},\vec{y},\varepsilon} \right), \\ \varepsilon \dot {\vec {y}} & = \vec {g}\left( {\vec{x}, \vec{y},\varepsilon } \right). \end{aligned} \end{equation}
The dot represents the derivative with respect to the new independent variable $\tau$.
The independent variables $t$ and $\tau $ are referred to the \textit{fast} and \textit{slow} times, respectively, and (\ref{eq1}) and (\ref{eq2}) are called the \textit{fast} and \textit{slow} systems, respectively. These systems are equivalent whenever $\varepsilon \ne 0$, and they are labeled \textit{singular perturbation problems} when $0 < \varepsilon \ll 1$. The label ``singular'' stems in part from the discontinuous limiting behavior in system (\ref{eq1}) as $\varepsilon \to 0$.
In such case system (\ref{eq2}) leads to a differential-algebraic system called \textit{reduced slow system} whose dimension decreases from $p + m = n$ to $m$. Then, the \textit{slow} variable $\vec {x} \in \mathbb{R}^p$ partially evolves in the submanifold $M_0$ called the \textit{critical manifold}\footnote{It corresponds to the approximation of the slow invariant manifold, with an error of $O(\varepsilon)$.} and defined by
\begin{equation} \label{eq3} M_0 := \left\{ {\left( {\vec {x},\vec {y}} \right):\vec {g}\left( {\vec {x},\vec {y},0} \right) = {\vec {0}}} \right\}. \end{equation}
When $D_{\vec{x}}\vec{f}$ is invertible, thanks to the Implicit Function Theorem, $M_0 $ is given by the graph of a $C^\infty $ function $\vec {x} = \vec {G}_0 \left( \vec {y} \right)$ for $\vec {y} \in D$, where $D\subseteq \mathbb{R}^p$ is a compact, simply connected domain and the boundary of D is an $(p - 1)$--dimensional $C^\infty$ submanifold\footnote{The set D is overflowing invariant with respect to (\ref{eq2}) when $\varepsilon = 0$. See Kaper [1999] and Jones [1994].}.
According to Fenichel [1971-1979] theory if $0 < \varepsilon \ll 1$ is sufficiently small, then there exists a function $\vec {G}\left( {\vec {y},\varepsilon } \right)$ defined on D such that the manifold
\begin{equation} \label{eq4} M_\varepsilon := \left\{ {\left( {\vec {x},\vec {y}} \right):\vec {x} = \vec {G}\left( {\vec {y},\varepsilon } \right)} \right\}, \end{equation}
is locally invariant under the flow of system (\ref{eq1}). Moreover, there exist perturbed local stable (or attracting) $M_a$ and unstable (or repelling) $M_r$ branches of the \textit{slow invariant manifold} $M_\varepsilon$. Thus, normal hyperbolicity of $M_\varepsilon$ is lost via a saddle-node bifurcation of the \textit{reduced slow system} (\ref{eq2}). Then, it gives rise to solutions of ``canard'' type that have been discovered by a group of French mathematicians (Beno\^{i}t \textit{et al.} [1981]) in the beginning of the eighties while they were studying relaxation oscillations in the classical equation of Van der Pol [1926] (with a constant forcing term). They observed, within a small range of the control parameter, a fast transition for the amplitude of the limit cycle varying suddenly from small amplitude to a large amplitude. Due to the fact that the shape of the limit cycle in the phase plane looks as a duck they called it ``canard cycle''. So a ``canard'' is a solution of a singularly perturbed dynamical system following the \textit{attracting} branch $M_a$ of the \textit{slow invariant manifold}, passing near a bifurcation point located on the fold of the \textit{critical manifold}, and then following the \textit{repelling} branch $M_r$ of the \textit{slow invariant manifold}.
\begin{remark} Geometrically a \textit{maximal canard} corresponds to the intersection of the attracting and repelling branches $M_a \cap M_r$ of the slow manifold in the vicinity of a non-hyperbolic point. Canards are a special class of solution of singularly perturbed dynamical systems for which normal hyperbolicity is lost.\\ Canards in singularly perturbed systems with two or more slow variables {\rm (}$\vec {x} \in \mathbb{R}^p$, $p \geqslant 2$ {\rm )} and one fast variable {\rm (}$\vec {y} \in \mathbb{R}^m$, $m = 1${\rm )} are robust, since maximal canards generically persist under small parameter changes\footnote{See Beno\^{i}t [1983, 2001], Szmolyan \& Wechselberger [2001] and Wechselberger [2005].}.
\end{remark}
In dimension three, Beno\^{i}t [1983] has stated a theorem for the existence of canards (recalled in Sec. 3) in which he proved that if the ``reduced vector field'' has a pseudo-singular point of saddle type (whose definitions are recalled in Sec. 2), then the ``full system'' exhibits a canard solution which evolves from the attractive part of the slow manifold towards its repelling part. So, the first aim of this work (presented in Sec. 4) is to extend this theorem to dimension four.
Then, it is also stated that such condition for the generic existence of the peculiar solutions, called ``canards'', in \textit{singularly perturbed dynamical systems} of dimension three and four with only one fast variable can be found according to the \textit{Flow Curvature Method} developed by Ginoux \textit{et al.} [2008] and Ginoux [2009] and recalled in Sec. 5.
Thus, we will establish that Beno\^{i}t's condition for the generic existence of ``canards'' solutions in such systems is also given by the existence of a pseudo-singular point of saddle type for the \textit{flow curvature manifold} of the ``reduced systems''. This result, presented in Sec. 5, is based on the use of the so-called ``Second derivative test'' involving the Hessian of hypersurfaces. Applications to Chua's cubic model of dimension three and four enables to state existence of ``canards'' solutions in such systems.
\section{Definitions} \label{Sec2}
Let's consider a $n$-dimensional \textit{singularly perturbed dynamical system} which may be written as:
\begin{equation} \label{eq5} \begin{aligned} \dot {\vec {x}} & = \vec{f} \left( {\vec{x},\vec{y},\varepsilon} \right), \\ \varepsilon \dot {\vec {y}} & = \vec {g}\left( {\vec{x}, \vec{y},\varepsilon } \right), \end{aligned} \end{equation}
where $\vec{x}= (x_1, \ldots, x_p)^t \in \mathbb{R}^p$, $\vec{y}= (y_1, \ldots, y_m)^t \in \mathbb{R}^m$, $\vec{f}= (f_1, \ldots, f_p)^t$, $\vec{g}= (g_1, \ldots, g_m)^t$, $\varepsilon \in \mathbb{R}^ + $ such that $0 < \varepsilon << 1$, and the dot denotes differentiation with respect to the independent variable $t$. The functions $f_i$ and $g_i$ are assumed to be $C^2$ functions of $x_i$ and $y_j$ (with $1 < i < p$ and $1 < j < m$).
In order to tackle this problem many analytical approaches such as \textit{asymptotic expansions} and \textit{matching methods} were developed (see Zvonkin \& Schubin [1984] and Rossetto [1986]). According to O'Malley [1991] the asymptotic expansion is expected to diverge. Then, Beno\^{i}t [1982, 1983] used non-standard analysis to study canards in $\mathbb{R}^3$.\\
In the middle of the seventies, a geometric approach developed by Takens [1976] consisted in considering that the following system:
\begin{equation} \label{eq6} \begin{aligned} \dot {\vec {x}} & = \vec{f} \left( {\vec{x},\vec{y},\varepsilon} \right), \\ 0 & = \vec {g}\left( {\vec{x}, \vec{y},\varepsilon } \right), \end{aligned} \end{equation}
which has been called \textit{constrained system} corresponds to the \textit{singular approximation} of system (\ref{eq5}) and where $\vec {g}\left( \vec{x}, \vec{y},\varepsilon \right) = 0$ defines the so-called \textit{slow manifold} $S_0$ or \textit{critical manifold} of the \textit{singular approximation}, \textit{i.e.} the zero order approximation in $\varepsilon$ of the \textit{slow manifold}.
\section{Three-dimensional singularly perturbed systems} \label{Sec3}
In dimension greater than two, it is important to distinguish cases depending on \textit{fast} dimensions $m$ and \textit{slow} dimensions $p$. For three-dimensional \textit{singularly perturbed dynamical systems} we have two cases: $(p,m) = (2,1)$ and $(p,m) = (1,2)$. In this work we will only focus on the former case which has been subject of extensive research led by Eric Beno\^{i}t [1981, 1982, 1983, 2001] and summed up below. So, in the case $(p,m) = (2,1)$ three-dimensional \textit{singularly perturbed dynamical systems} (\ref{eq5}) may be defined as:
\begin{equation} \label{eq7} \begin{aligned}
\dot{x_1} & = f_1 \left( x_1, x_2, y_1 \right),
\\
\dot{x_2} & = f_2 \left( x_1, x_2, y_1 \right),
\\
\varepsilon \dot{y_1} & = g_1 \left( x_1, x_2, y_1 \right),
\end{aligned} \end{equation}
where $\vec{x}= (x_1, x_2)^t \in \mathbb{R}^2$, $\vec{y}= (y_1) \in \mathbb{R}^1$, $0 < \varepsilon << 1$ and the functions $f_i$ and $g_1$ are assumed to be $C^2$ functions of $(x_1, x_2, y_1)$.
\subsection{Fold, cusp and pseudo-singular points}
\\
Let's recall the following definitions
\begin{definition}\label{def1}
\\
The location of the points where $\partial_{y_1} g_1\left(x_1,x_2,y_1 \right) = p\left(x_1,x_2,y_1 \right) = 0$ and $g_1\left(x_1,x_2,y_1 \right) = 0$ is called the \textit{fold}. \end{definition}
Following to Arg\'{e}mi [1978], the \textit{cofold} is defined as the projection, if it exists, of the fold line onto $S_0$ along the $y_1$-direction.
According to Beno\^{i}t [1983] system (\ref{eq7}) may have various types of singularities.
\begin{definition} \label{def2}
\\
\begin{itemize}
\item[-] The fold is the set of points where the \textit{slow manifold} is tangent to the $y_1$-direction.
\item[-] The cusp is the set of points where the \textit{fold} is tangent to the $y_1$-direction.
\item[-] The stationary points are not on the \textit{fold} according to genericity assumptions.
\item[-] The pseudo-singular points are defined as the location of the points where
\begin{equation} \label{eq8} \begin{aligned} & g_1\left(x_1,x_2,y_1 \right) = 0,\\ & \frac{\partial g_1\left(x_1,x_2,y_1 \right)}{\partial y_1} = 0, \\ & \frac{\partial g_1\left(x_1,x_2,y_1 \right)}{\partial x_1} f_1\left(x_1, x_2, y_1 \right) + \frac{\partial g_1\left(x_1,x_2,y_1 \right)}{\partial x_2} f_2\left(x_1,x_2,y_1 \right) = 0. \end{aligned} \end{equation}
\end{itemize}
\end{definition}
The concept of \textit{pseudo-singular points} has been originally introduced by Takens [1976] and Arg\'{e}mi [1978]. Again, according to Beno\^{i}t [1983]:\\
\begin{itemize}
\item[-] the first condition indicates that the point belongs to the \textit{slow manifold},
\item[-] the second condition means that the point is on the \textit{fold},
\item[-] the third condition shows that the projection of the vector field on the $(x_1,x_2)$-plane is tangent to the \textit{fold}.
\end{itemize}
\subsection{Reduced vector field}
\\
If $x_1$ can be expressed as an implicit function of $x_2$ and $y_1$ defined by $g_1\left(x_1,x_2,y_1 \right) = 0$, the ``reduced normalized vector field'' reads:
\begin{equation} \label{eq9} \begin{aligned}
\dot{x_2} & = - f_2 \left( x_1, x_2, y_1 \right) \frac{\partial g_1}{\partial y_1}\left( x_1, x_2, y_1 \right),
\\
\dot{y_1} & = \frac{\partial g_1}{\partial x_1} f_1\left(x_1, x_2, y_1 \right) + \frac{\partial g_1}{\partial x_2} f_2\left(x_1, x_2, y_1 \right). \end{aligned} \end{equation}
\subsection{Reduced vector field method}
\\
By using the classification of fixed points of two-dimensional dynamical systems based on the sign of the eigenvalues of the functional Jacobian matrix, Beno\^{i}t [1983] characterized the nature of the \textit{pseudo-singular point} $M$ of the ``reduced vector field'' (\ref{eq9}). Let's note $\Delta$ and $T$ respectively the determinant and the trace of the functional Jacobian matrix associated with system (\ref{eq9}). The \textit{pseudo-singular point} $M$ is:\\
\begin{itemize}
\item a \textit{saddle} if and only if $\Delta < \dfrac{T^2}{4}$ and $\Delta < 0$.
\item a \textit{node} if and only if $0 < \Delta < \dfrac{T^2}{4}$.
\item a \textit{focus} if and only if $\dfrac{T^2}{4} < \Delta $.
\end{itemize}
Then, Beno\^{i}t [1983, p. 171] states the following theorem for the existence of canards:
\begin{theorem}
\\ \label{theo4} If the ``reduced vector field'' {\rm (\ref{eq9})} has a pseudo-singular point of saddle type, then system {\rm (\ref{eq7})} exhibits a canard solution which evolves from the attractive part of the slow manifold towards its repelling part.
\end{theorem}
\begin{proof} See Beno\^{i}t [1983, p. 171]. \end{proof}
\subsection{Chua's system}
\\
Let's consider the system introduced by Itoh \& Chua [1992]:
\begin{equation} \label{eq10} \begin{aligned} \dot{x} & = z - y, \\ \dot{y} & = \alpha(x + y), \\ \varepsilon \dot{z} & = -x - k(z), \end{aligned} \end{equation}
where $k(z) = z^3/3 - z$ and $\alpha$ is a constant.\\
According to Eq. (\ref{eq9}) the reduced vector field reads:
\begin{equation} \label{eq11} \begin{aligned} \dot{y} & = \alpha k'(z)(-k(z) + y) = \alpha (z^2 - 1)\left(- \dfrac{z^3}{3} + z + y\right), \\ \dot{z} & = y - z. \end{aligned} \end{equation}
By Def. \ref{def2} the singularly perturbed dynamical system (\ref{eq10}) admits $M(\pm 2/3, \pm 1, \pm 1)$ as \textit{pseudo-singular points}. The functional Jacobian matrix of reduced vector field (\ref{eq11}) evaluated at $M$ reads:
\begin{equation} \label{eq12} \begin{pmatrix} 0 & \dfrac{10\alpha}{3} \\ 1 & -1 \end{pmatrix} \end{equation}
from which we deduce that: $\Delta = -\dfrac{10\alpha}{3}$ and $T = -1$. So, we have:
\begin{equation} \label{eq13} \dfrac{T^2}{4} - \Delta = \dfrac{1}{12}\left( 3 + 40 \alpha \right). \end{equation}
Thus, according to Th. \ref{theo4}, if $3 + 40 \alpha > 0$ and $\alpha > 0$, then $M$ is a \textit{pseudo-singular saddle point} and so system (\ref{eq10}) exhibits canards solution. Itoh \& Chua [1992, p. 2791] have also noticed that if $\alpha > 0$, system (\ref{eq10}) has a \textit{pseudo-singular saddle point}.\\
Nevertheless, the original system (\ref{eq10}) admits, except the origin, two \textit{fixed points} $I(\pm \sqrt{6}, \mp \sqrt{6}, \mp \sqrt{6})$. The functional Jacobian matrix of the ``normalized slow dynamics'' evaluated at $I$ reads:
\begin{equation} \label{eq14} \begin{pmatrix} 0 & -5 & 5 \\ 5\alpha & 5\alpha & 0\\ 0 & 1 & -1 \end{pmatrix} \end{equation}
from which we deduce that there are three eigenvalues:
\[ \lambda_1 = 0 \mbox{ ; } \lambda_{2,3} = \dfrac{1}{2} \left(-1 + 5 \alpha \pm \sqrt{1 - 90 \alpha + 25 \alpha^2} \right) \]
Then, if these eigenvalues are complex conjugated we have:
\[ 2Re\left( \lambda_{2,3}\right) = -1 + 5\alpha \]
But, according to the theorem of Lyapounov [1892], \textit{fixed points} $I$ are non stable equilibria provided that $-1 + 5\alpha > 0$.\\
Thus, ``canards'' solutions are observed in Chua's system (\ref{eq10}) for $\alpha > 1/5$ as exemplified in Fig. 1 in which such solutions passing through the \textit{pseudo-singular saddle point} $M(2/3, 1, 1)$ have been plotted for parameter set $(\alpha = 0.2571389636, \varepsilon = 1/20)$ in the $(x,y,z)$ phase space.
\begin{figure}
\caption{Canards solutions of Chua's system (\ref{eq10}).}
\label{Fig1}
\end{figure}
In Fig. 2 ``canards solutions'' winding around the \textit{pseudo-singular saddle point} $M(2/3, 1, 1)$ have been plotted for various values of parameter $\alpha$ in the $(z,x)$ phase plane for $\varepsilon = 1/20$.
\begin{figure}
\caption{Canards solutions of Chua's system (\ref{eq10}).}
\label{fig2}
\end{figure}
\begin{remark} Let's notice that we would have obtained the same kind of figures with the \textit{pseudo-singular saddle point} $M(-2/3, -1, -1)$ due to the symmetry of the system {\rm (\ref{eq10})}. \end{remark}
\section{Four-dimensional singularly perturbed systems} \label{Sec4}
For four-dimensional \textit{singularly perturbed dynamical systems} we have three cases: $(p,m) = (3,1)$, $(p,m) = (2,2)$ and $(p,m) = (1,3)$. In this work we will only focus on the former case which will be subject to a special analysis allowing to extend Beno\^{i}t's Theorem \ref{theo4} to dimension four. So, in the case: $(p,m) = (3,1)$ four-dimensional \textit{singularly perturbed dynamical systems} may be defined as:
\begin{equation} \label{eq15} \begin{aligned}
\dot{x_1} & = f_1 \left( x_1, x_2, x_3, y_1 \right),
\\
\dot{x_2} & = f_2 \left( x_1, x_2, x_3, y_1 \right),
\\
\dot{x_3} & = f_3 \left( x_1, x_2, x_3, y_1 \right),
\\
\varepsilon \dot{y_1} & = g_1 \left( x_1, x_2, x_3, y_1 \right),
\end{aligned} \end{equation}
where $\vec{x}= (x_1, x_2, x_3)^t \in \mathbb{R}^3$, $\vec{y}= (y_1) \in \mathbb{R}^1$, $0 < \varepsilon << 1$, and the functions $f_i$ and $g_1$ are assumed to be $C^2$ functions of $(x_1, x_2, x_3, y_1)$.\\
The definitions of \textit{fold}, \textit{cusp} and \textit{pseudo-singular fixed points} may be extended to dimension four.
\subsection{Fold, cusp and pseudo-singular points}
\\
Let's propose the following definitions
\begin{definition}\label{def3}
\\
The location of the points where $\partial_{y_1} g_1\left(x_1,x_2, x_3, y_1 \right) = p\left(x_1,x_2, x_3, y_1 \right) = 0$ and $g_1\left(x_1,x_2,x_3,y_1 \right) = 0$ is called the \textit{fold}. \end{definition}
The \textit{cofold} is still defined as the projection, if it exists, of the fold line onto $S_0$ along the $y_1$-direction.
As previously system (\ref{eq15}) may have various types of singularities.
\begin{definition} \label{def4}
\\
\begin{itemize}
\item[-] The fold is the set of points where the \textit{slow manifold} is tangent to the $y_1$-direction.
\item[-] The cusp is the set of points where the \textit{fold} is tangent to the $y_1$-direction.
\item[-] The stationary points are not on the \textit{fold} according to genericity assumptions.
\item[-] The pseudo-singular points are defined as the location of the points where
\begin{equation} \label{eq16} \begin{aligned} & g_1\left(x_1,x_2,x_3,y_1 \right) = 0,\\ & \frac{\partial g_1\left(x_1,x_2,x_3,y_1 \right)}{\partial y_1} = 0, \\ & \frac{\partial g_1}{\partial x_1} f_1 + \frac{\partial g_1}{\partial x_2} f_2 + \frac{\partial g_1}{\partial x_3} f_3= 0. \end{aligned} \end{equation}
\end{itemize}
\end{definition}
Again, following Beno\^{i}t [1983]:\\
\begin{itemize}
\item[-] the first condition indicates that the point belongs to the \textit{slow manifold},
\item[-] the second condition means that the point is on the \textit{fold},
\item[-] the third condition shows that the projection of the vector field on the $(x_1,x_2)$-plane is tangent to the \textit{fold}.
\end{itemize}
\subsection{Reduced vector field}
\\
If $x_1$ can be expressed as an implicit function of $x_2$, $x_3$ and $y_1$ defined by $g_1\left(x_1,x_2, x_3, y_1 \right) = 0$, the ``reduced normalized vector field'' reads:
\begin{equation} \label{eq17} \begin{aligned}
\dot{x_2} & = - f_2 \left( x_1, x_2, x_3, y_1 \right) \frac{\partial g_1}{\partial y_1}\left( x_1, x_2, x_3, y_1 \right),
\\
\dot{x_3} & = - f_3 \left( x_1, x_2, x_3, y_1 \right) \frac{\partial g_1}{\partial y_1}\left( x_1, x_2, x_3, y_1 \right),
\\
\dot{y_1} & = \frac{\partial g_1}{\partial x_1} f_1 + \frac{\partial g_1}{\partial x_2} f_2 + \frac{\partial g_1}{\partial x_3} f_3. \end{aligned} \end{equation}
\subsection{Reduced vector field method}
\\
By using the classification of fixed points of three-dimensional dynamical systems based on the sign of the eigenvalues, we can characterize the nature of the \textit{pseudo-singular point} $M$ of the ``reduced vector field'' (\ref{eq16}). Let's note $\Delta$ and $T$ respectively the determinant and the trace of the functional Jacobian matrix associated with system (\ref{eq16}) and $S = \sum \limits_{i=1}^3 J_{ii} $ where $J_{ii}$ is the minor obtained by removing the $i^{th}$ row and the $i^{th}$ column in the functional Jacobian matrix. The discriminant of the \textit{characteristic polynomial} of the functional Jacobian matrix reads:
\[ R = 4P^3 + 27 Q^2 \mbox{\quad with \quad} P = S - \dfrac{T^2}{3} \mbox{\quad and \quad} Q = -\dfrac{2T^3}{27} + \dfrac{T S}{3} - \Delta \]
Then, the \textit{pseudo-singular point} $M$:
\begin{itemize}
\item a \textit{saddle} if and only if $R < 0$, i.e. $S < \dfrac{T^2}{3}$ and $\Delta < 0$.
\item a \textit{node} if and only if $R < 0$ and $\Delta > 0$.
\item a \textit{focus} if and only if $R > 0$.
\end{itemize}
Thus, we can extend Beno\^{i}t's Theorem \ref{theo4} to dimension four.
\begin{theorem} \label{theo5}
\\ If the ``reduced vector field'' {\rm (\ref{eq17})} has a pseudo-singular point of saddle type\footnote{In dimension three, a saddle point is a singular point having its three eigenvalues real but ``not all with the same sign''. See Poincar\'{e} [1886, p. 154].}, then system {\rm (\ref{eq15})} exhibits a canard solution which evolves from the attractive part of the slow manifold towards its repelling part.
\end{theorem}
\begin{proof} Proof is based on the same arguments as previously. \end{proof}
\subsection{Chua's system}
\\
Let's consider the system introduced by Thamilmaran \textit{et al.} [2004]:
\begin{equation} \label{eq18} \begin{aligned}
\dot{x} & = \beta_1 \left( z - x - u \right),
\\
\dot{y} & = \beta_2 z,
\\
\dot{z} & = -\alpha_2 z - y - x,
\\
\varepsilon \dot{u} & = x - k(u),
\end{aligned} \end{equation}
where $k(u) = c_1 u^3 + c_2 u$, $\varepsilon = 1/\alpha_1$, $\alpha_2$, $c_{1,2}$ and $\beta_{1,2}$ are constant.\\
According to Eq. (\ref{eq17}) the reduced vector field reads:
\begin{equation} \label{eq19} \begin{aligned}
\dot{y} & = - \beta_2 \left( - 3c_1 u^2 - c_2 \right)z,
\\
\dot{z} & = - \left( - 3c_1 u^2 - c_2 \right) \left(-y - c_1 u^3 - c_2 u - \alpha_2 z \right),
\\
\dot{u} & = \beta_1 \left( - u + z - c_1 u^3 - c_2 u \right). \end{aligned} \end{equation}
By Def. \ref{def4} the singularly perturbed dynamical system (\ref{eq18}) admits:
\[ M(0, \pm \frac{1}{3} \sqrt{\frac{- c_2}{3c_1}} (3 + 2 c_2), \pm \sqrt{\frac{-c_2}{3c_1}}) \]
as \textit{pseudo-singular points}. From the functional Jacobian matrix of system (\ref{eq19}) evaluated at $M$ we compute the \textit{characteristic polynomial} from which we deduce that:
\[ R=-\dfrac{4}{27} \beta_1^3 c_2^2 (3 \alpha_2 + 2 c_2 (1 + \alpha_2))^2 (8 c_2 (3 \alpha_2 + 2 c_2 (1 + \alpha_2)) + 3 \beta_1). \]
In the parameter set used in system (\ref{eq18}) $\beta_1 > 0$ and $c_2 < 0$.\\
So, $R < 0$ provided that:
\[ 8 c_2 (3 \alpha_2 + 2 c_2 (1 + \alpha_2)) + 3 \beta_1 < 0 \quad \Leftrightarrow \quad \alpha_2 < \dfrac{-16 c_2^2 - 3 \beta_1}{8 c_2 (3 + 2 c_2)} \]
From the functional Jacobian matrix we also deduce that:
\[ \Delta = 0 \]
This implies that one of the three (real) eigenvalues (say $\lambda_1$) is null. So, in order to have a \textit{pseudo-singular saddle point} the two remaining eigenvalues (say $\lambda_2$ and $\lambda_3$) must be of different sign.\\
But, since $S = \lambda_1\lambda_2 + \lambda_1\lambda_3 + \lambda_2\lambda_3$ it means that $S = \lambda_2\lambda_3 < 0$. Thus, we may have
\[ S = \lambda_{2} \lambda_{3} = -\frac{2}{3} \beta_1 c_2 (3 \alpha_2 + 2 c_2 (1 + \alpha_2)) < 0 \quad \Leftrightarrow \quad \alpha_2 < \dfrac{-2 c_2}{3 + 2 c_2} \]
Combining the two required conditions, i.e., $R < 0$ and $\Delta < 0$ ($S < 0$ in this case) we find that:
\[ \alpha_2 < \dfrac{-2 c_2}{3 + 2 c_2} < \dfrac{-16 c_2^2 - 3 \beta_1}{8 c_2 (3 + 2 c_2)} = \dfrac{-2 c_2}{3 + 2 c_2} + \dfrac{- 3 \beta_1}{8 c_2 (3 + 2 c_2)}. \]
So, according to Th. \ref{theo5}, if $\alpha_2 < -2c_2/(3 + 2 c_2)$, then $M$ is a \textit{pseudo-singular saddle point} and so system (\ref{eq18}) exhibits canards solution.\\
Thus, ``canards'' solutions are observed in Chua's system (\ref{eq18}) for $\alpha_2 < -2c_2 / (3 +2 c_2)$ as exemplified in Fig. 3 in which such solutions passing through the \textit{pseudo-singular saddle point} $M$ have been plotted for parameter set $\varepsilon = 1 / \alpha_1 = 1 / 10.1428 = 0.098592$ ; $\alpha_2 = 0.9$ ; $\beta_1 = 0.121$ ; $\beta_2 = 0.0047$ ; $c_1 = 0.393781$ ; $c_2 = -0.72357$ in the $(u,z,x)$ phase space.
\begin{figure}
\caption{Canards solutions of Chua's system (\ref{eq18}).}
\label{Fig3}
\end{figure}
\section{Flow curvature method} \label{Sec5}
A new approach called \textit{Flow Curvature Method} based on the use of \textit{Differential Geometry} properties of \textit{curvatures} has been recently developed by Ginoux \textit{et al.} [2008] and Ginoux [2009]. According to this method, the highest \textit{curvature of the flow}, i.e. the $(n - 1)^{th}$ \textit{curvature} of \textit{trajectory curve} integral of $n$-dimensional dynamical system defines a \textit{manifold} associated with this system and called \textit{flow curvature manifold}.
\begin{definition} \label{def5}
\\ The location of the points where the $(n - 1)^{th}$ \textit{curvature of the flow}, i.e. the \textit{curvature of the trajectory curve} $\vec {X}$, integral of any $n$-dimensional singularly perturbed dynamical system {\rm (\ref{eq5})} vanishes, defines a $(n - 1)$-dimensional \textit{flow curvature manifold} the equation of which is:
\begin{equation} \label{eq20} \phi ( {\vec {X}} ) = \dot { \vec {X} } \cdot ( {\ddot {\vec {X}} \wedge \dddot{\vec {X}}\wedge \ldots \wedge \mathop {\vec {X}}\limits^{\left( n \right)} } ) = \det( { \dot {\vec {X}},\ddot {\vec {X}}, \dddot{\vec {X}},\ldots ,\mathop {\vec {X}}\limits^{\left( n \right)} } ) = 0 \end{equation}
where $\mathop {\vec {X}}\limits^{\left( n \right)}$ represents the time derivatives up to order $n$ of $\vec {X} = (\vec{x}, \vec{y})^t$.
\end{definition}
\subsection{Three-dimensional singularly perturbed systems}
\\
According to the \textit{Flow Curvature Method} the \textit{flow curvature manifold} of the \textit{reduced vector field} (\ref{eq9}) is defined by:
\begin{equation} \label{eq21} \phi ( \vec{X} ) = \det( \dot {\vec {X}},\ddot {\vec {X}} ) = 0 \end{equation}
where $\vec {X} = (x_2, y_1)^t$.
We suppose that the \textit{flow curvature manifold} $\phi(x_2, y_1)$ admits at $M(x_2^*,y_1^*)$ an \textit{extremum} such that: $\partial_{x_2} \phi = \partial_{y_1} \phi =0$.\\
The Hessian matrix of the manifold $\phi (x_2, y_1)$ is defined, provided that all the second partial derivatives of $\phi$ exist, by
\begin{equation} \label{eq22} H_{\phi (x_2, y_1)} = \begin{pmatrix} \dfrac{\partial^2\phi}{\partial x^2_2 } & \dfrac{\partial^2\phi}{\partial x_2 \partial y_1 }
\\ \dfrac{\partial^2\phi}{\partial y_1 \partial x_2 } & \dfrac{\partial^2\phi}{\partial y^2_1 }\\ \end{pmatrix}. \end{equation}
Then, according to the so-called \textit{Second Derivative Test} (see for example Thomas \& Finney [1992]) and by noticing
\begin{equation} \label{eq23} D_1 = \dfrac{\partial^2 \phi}{\partial x^2_2} \mbox{,} \quad D_2 = \begin{vmatrix} \dfrac{\partial^2 \phi}{\partial x^2_2} & \dfrac{\partial^2 \phi}{\partial x_2 \partial y_1}
\\ \dfrac{\partial^2 \phi}{\partial y_1 \partial x_2} & \dfrac{\partial^2 \phi}{\partial y^2_1} \end{vmatrix} \end{equation}
if $D_2 \neq 0$, the \textit{flow curvature manifold} (\ref{eq21}) admits $M(x_2^*,y_1^*)$ as a \\
\begin{itemize}
\item \textit{local minimum}, if and only if $(D_1,D_2) = (+,+)$,
\item \textit{local maximum}, if and only if $(D_1,D_2) = (-,+)$, and
\item \textit{saddle-point}, if and only if $D_2 < 0$.\\
\end{itemize}
Thus, we have the following proposition:
\begin{proposition} \label{prop1}
\\ If the flow curvature manifold of the ``reduced vector field'' {\rm (\ref{eq9})} admits a pseudo-singular point of saddle-type, then system {\rm (\ref{eq7}) } exhibits a canard solution which evolves from the attractive part of the slow manifold towards its repelling part. \end{proposition}
\begin{proof} According to the theorem of Hartman-Grobman [1964] the flow of any dynamical system (\ref{eq9}) is \textit{locally topologically conjugated} to the flow of the linearized system in the vicinity of \textit{fixed points}. So, let's consider the linearized system in the basis of the eigenvectors:
\[ \begin{aligned}
\dot{x_1} & = \lambda_1 x_1,
\\
\dot{x_2} & = \lambda_2 x_2. \end{aligned} \]
where $\lambda_{1,2}$ are the eigenvalues of the functional Jacobian matrix. The \textit{flow curvature manifold} (\ref{eq21}) associated with this linearized system reads:
\[ \phi ( \vec{X} ) = \det( \dot {\vec {X}},\ddot {\vec {X}} ) = x_1 x_2 \lambda_1 \lambda_2 (\lambda_2 - \lambda_1) \]
Then, it's easy to check that the determinant $D_2$ of the Hessian may be written as:
\[ D_2 = - \Delta^2 \left( T^2 - 4 \Delta \right) \]
from which we deduce that if $D_2 < 0$ then $M$ is a \textit{saddle-point} provided that $T = \lambda_1 + \lambda_2 $ and $\Delta = \lambda_1 \lambda_2$ are not null. \end{proof}
\begin{remark} This idea corresponds to topographic system introduced by Poincar\'{e} {\rm [1881-1886]} in his memoirs entitled: ``Sur les courbes d\'{e}finies par une \'{e}quation diff\'{e}rentielle''. Topographic system consists in using level set such as $f\left(x_1, x_2 \right) = constant$ surrounding fixed points in order to define their nature (node, saddle, foci) and their stability. Moreover, Prop. \ref{prop1} leads to the same kind of result as that obtained by Szmolyan \textit{et al.} {\rm [2001]} but without needing to make a change of variables on system {\rm (\ref{eq7})} other than that proposed by Beno\^{i}t {\rm [1983, 2001]}. \end{remark}
The \textit{Flow Curvature Method} has been successfully used by the Ginoux \textit{et al.} [2011] for computing the bifurcation parameter value leading to a canard explosion in dimension two already obtained according to the so-called \textit{Geometric Singular Perturbation Method}.
\subsection{Chua's system}
\\
Let's consider again the system (\ref{eq10}) of Itoh \& Chua [1992]
\[ \begin{aligned} \dot{x} & = z - y, \\ \dot{y} & = \alpha(x + y), \\ \varepsilon \dot{z} & = -x - k(z), \end{aligned} \]
where $k(z) = z^3/3 - z$ and $\alpha$ is a constant.
The reduced vector field (\ref{eq11}):
\[ \begin{aligned} \dot{y} & = \alpha k'(z)(-k(z) + y) = \alpha (z^2 - 1)\left(- \dfrac{z^3}{3} + z + y\right), \\ \dot{z} & = y - z. \end{aligned} \]
The \textit{flow curvature manifold} (\ref{eq21}) associated with this reduced vector field reads:
\[ \begin{aligned} \phi(y,z) = & \dfrac{\alpha}{9} [-3 (y - z) (6 y^2 z + 4 z^3 (-2 + z^2) + y (-6 + 9 z^2 - 5 z^4)) \\ & + z (-6 + z^2) (-1 + z^2)^2 (-3 y - 3 z + z^3) \alpha ] = 0. \end{aligned} \]
Proposition \ref{prop1} enables to state that the determinant of the Hessian evaluated at $(y^*, z^*)=(\pm 1, \pm 1)$ becomes
\[ D_2 = -\dfrac{100}{27} \alpha^2 (3 + 40 \alpha), \]
from which one deduces that if $3 + 40 \alpha > 0$ then $M$ is a \textit{pseudo-singular saddle point} and so systems (\ref{eq10}) exhibits a canard solution. Thus, we find Beno\^{i}t's result according to the \textit{Flow Curvature Method}.
\subsection{Four-dimensional singularly perturbed systems}
\\
According to the \textit{Flow Curvature Method} the \textit{flow curvature manifold} of the \textit{reduced vector field} (\ref{eq17}) is defined by:
\begin{equation} \label{eq24} \phi ( \vec{X} ) = \det( \dot {\vec {X}},\ddot {\vec {X}}, \dddot {\vec {X}} ) = 0 \end{equation}
where $\vec {X} = (x_2, x_3, y_1)^t$.
We suppose that the \textit{flow curvature manifold} $\phi(x_2,x_3, y_1)$ admits at $M(x_2^*,x_3^*,y_1^*)$ an \textit{extremum} such that: $\partial_{x_2} \phi = \partial_{x_3} \phi = \partial_{y_1} \phi =0$.\\
The Hessian matrix of the manifold $\phi (x_2,x_3,y_1)$ is defined, provided that all the second partial derivatives of $\phi$ exist, by
\begin{equation} \label{eq25} H_{\phi (x_2,x_3,y_1)} = \begin{vmatrix} \dfrac{\partial^2\phi}{\partial x^2_2 } & \dfrac{\partial^2\phi}{\partial x_2 \partial x_3 } & \dfrac{\partial^2\phi}{\partial x_2 \partial y_1 }
\\ \dfrac{\partial ^2\phi}{\partial x_3 \partial x_2 } & \dfrac{\partial^2\phi}{\partial x^2_3 } & \dfrac{\partial^2\phi}{\partial x_3 \partial y_1 }
\\ \dfrac{\partial ^2\phi}{\partial y_1 \partial x_2 } & \dfrac{\partial^2\phi}{\partial y_1 \partial x_3 } & \dfrac{\partial^2\phi}{\partial y^2_1 } \end{vmatrix}. \end{equation}
Then, according to the so-called \textit{Second Derivative Test} and while noticing $D_1$ the determinant of the upper left $1 \times 1$ submatrix of $H_{\phi}$, $D_2$ the determinant of the $2 \times 2$ matrix of $H_{\phi}$ defined as:
\begin{equation} \label{eq26} D_1 = \dfrac{\partial^2 \phi}{\partial x^2_2} \mbox{,} \quad D_2 = \begin{vmatrix} \dfrac{\partial^2 \phi}{\partial x^2_2} & \dfrac{\partial^2 \phi}{\partial x_2 \partial x_3}
\\ \dfrac{\partial^2 \phi}{\partial x_3 \partial x_2} & \dfrac{\partial^2 \phi}{\partial x^2_3} \end{vmatrix}, \end{equation}
by $D_3$ the determinant of the $3 \times 3$ matrix of $H_{\phi}$ defined as:
\begin{equation} \label{eq27} D_3 = \begin{vmatrix} \dfrac{\partial^2\phi}{\partial x^2_2 } & \dfrac{\partial^2\phi}{\partial x_2 \partial x_3 } & \dfrac{\partial^2\phi}{\partial x_2 \partial y_1 }
\\ \dfrac{\partial ^2\phi}{\partial x_3 \partial x_2 } & \dfrac{\partial^2\phi}{\partial x^2_3 } & \dfrac{\partial^2\phi}{\partial x_3 \partial y_1 }
\\ \dfrac{\partial ^2\phi}{\partial y_1 \partial x_2 } & \dfrac{\partial^2\phi}{\partial y_1 \partial x_3 } & \dfrac{\partial^2\phi}{\partial y^2_1 } \end{vmatrix}, \end{equation}
if $D_3 \neq 0$, the \textit{flow curvature manifold} (\ref{eq24}) admits $M(x_2^*,x_3^*,y_1^*)$ as a \\
\begin{itemize}
\item \textit{local minimum}, if and only if $(D_1,D_2,D_3) = (+,+,+)$
\item \textit{local maximum}, if and only if $(D_1,D_2,D_3) = (-,+,-)$
\item \textit{saddle-point}, in all other cases.\\
\end{itemize}
So, we have the following proposition.
\begin{proposition} \label{prop2}
\\ If the flow curvature manifold of the ``reduced vector field'' {\rm (\ref{eq17})} admits a pseudo-singular saddle-point, then system {\rm (\ref{eq15})} exhibits a canard solution which evolves from the attractive part of the slow manifold towards its repelling part. \end{proposition}
\begin{proof} According to Hartman-Grobman's Theorem [1964] the flow of any dynamical system (\ref{eq17}) is \textit{locally topologically conjugated} to the flow of the linearized system in the vicinity of \textit{fixed points}. So, let's consider the linearized system in the basis of the eigenvectors:
\[ \begin{aligned}
\dot{x_1} & = \lambda_1 x_1,
\\
\dot{x_2} & = \lambda_2 x_2,
\\
\dot{x_3} & = \lambda_3 x_3. \end{aligned} \]
where $\lambda_{1,2,3}$ are the eigenvalues of the functional Jacobian matrix. The \textit{flow curvature manifold} (\ref{eq24}) associated with this linearized system reads:
\[ \phi ( \vec{X} ) = \det( \dot {\vec {X}},\ddot {\vec {X}},\dddot {\vec {X}} ) = x_1 x_2 x_3 \lambda_1 \lambda_2 \lambda_3 (\lambda_2 - \lambda_1)(\lambda_1 - \lambda_3)(\lambda_2 - \lambda_3). \]
Then, it's easy to check that the determinant $D_3$ of the Hessian evaluated at $M$ is such that\footnote{The symbol $\propto$ means proportional to.}:
\[ D_3 \propto - 2\Delta^2 R, \]
from which we deduce that if $D_3$ is positive, i.e. $R < 0$, then $M$ is a \textit{saddle-point} provided that $(D_1,D_2) \neq (+,+)$. \end{proof}
\subsection{Chua's system}
\\
Let's consider again the system (\ref{eq18}) of Thamilmaran \textit{et al.} [2004]:
\[ \begin{aligned}
\dot{x} & = \beta_1 \left( z - x - u \right),
\\
\dot{y} & = \beta_2 z,
\\
\dot{z} & = -\alpha_2 z - y - x,
\\
\varepsilon \dot{u} & = x - k(u).
\end{aligned} \]
where $k(u) = c_1 u^3 + c_2 u$, $\varepsilon = 1/\alpha_1$, $\alpha_2$, $c_{1,2}$ and $\beta_{1,2}$ are constant.\\
The reduced vector field (\ref{eq17}) reads:
\[ \begin{aligned}
\dot{y} & = - \beta_2 \left( - 3c_1 u^2 - c_2 \right)z,
\\
\dot{z} & = - \left( - 3c_1 u^2 - c_2 \right) \left(-y - c_1 u^3 - c_2 u - \alpha_2 z \right),
\\
\dot{u} & = \beta_1 \left( - u + z - c_1 u^3 - c_2 u \right). \end{aligned} \]
The \textit{flow curvature manifold} (\ref{eq24}) associated with this reduced vector field reads\footnote{This equation which is too large to be presented here is available at http://ginoux.univ-tln.fr.}:
\[ \phi ( \vec{X} ) = \det( \dot {\vec {X}},\ddot {\vec {X}}, \dddot {\vec {X}} ) = \phi ( y,z,u ) = 0. \]
By considering that the parameter set of this system is such that $\beta_2 \ll 1$ and according to proposition \ref{prop2} we find that:
\[ D_1 \propto c_2 (3\alpha_2 + 2 c_2 (1 + \alpha_2))^2, \]
\[ D_2 \propto - (6 c_2 \alpha_2 + 4c_2^2(1 + \alpha_2) + \beta_1), \]
\[ D_3 \propto (6 c_2 \alpha_2 + 4c_2^2(1 + \alpha_2))(6 c_2 \alpha_2 + 4c_2^2(1 + \alpha_2) + \beta_1) P(\alpha_2, c_2), \]
where $P(\alpha_2, c_2)$ is a positive quadratic polynomial in $\alpha_2$.\\
Since $c_2 < 0$, we deduce that $M$ is a \textit{saddle point} provided that
\[ \alpha_2 < \dfrac{-2 c_2}{3 + 2 c_2}. \]
Thus, we find Beno\^{i}t's result according to the \textit{Flow Curvature Method}.
\section{Discussion}
In this work Beno\^{i}t's theorem for the generic existence of ``canards'' solutions in \textit{singularly perturbed dynamical systems} of dimension three with one fast variable has been extended to those of dimension four. Then, it has been established that this result can be found according to the \textit{Flow Curvature Method}. The Hessian of the \textit{flow curvature manifold} and the so-called \textit{Second Derivative Test} enabled to characterize the nature of the \textit{pseudo-singular saddle points}. Applications to Chua's cubic model of dimension three and four highlighted the existence of ``canards'' solutions in such systems. According to Prof. Eric Beno\^{i}t (personal communications) the cases $(p,m)=(3,1)$ and $(p,m) = (2,2)$ for which his theorems [Beno\^{i}t, 1983, 2001] for canard existence at pseudo-singular points of saddle-type still holds have been completely analyzed while the case $p = 1$ and $m = 3$ remains an open problem since the fold becomes a two-dimensional manifold and the pseudo-singular fixed points become pseudo-singular curves. In this case, fold and cusps are defined according to the theory of surfaces singularities and are strongly related to Thom's catastrophe theory [Thom, 1989].
\section*{Acknowledgments}
First author would like to thank Prof. Martin Wechselberger for his fruitful advices. Moreover, let's notice that our main result has been already established by Wechselberger [2012] who has extended \textit{canard theory} of singularly perturbed systems to the more general case of $k+m$-dimensional \textit{singularly perturbed systems} with $k$ \textit{slow} and $m$ \textit{fast} dimensions, with $k \geqslant 2$ and $m \geqslant 1$. The second author is supported by the grants MICIIN/FEDER MTM 2008--03437, AGAUR 2009SGR410, and ICREA Academia and FP7-PEOPLE-2012-IRSES-316338.
\section*{References}
\hspace{0.2in} Andronov, A. A. {\&} Khaikin, S. E. [1937] \textit{Theory of oscillators}, I, Moscow (Engl. transl., Princeton Univ. Press, Princeton, N. J., 1949).\\
Arg\'{e}mi, J. [1978] ``Approche qualitative d'un probl\`{e}me de perturbations singuli\`{e}res dans $\mathbb{R}^4$,'' in \textit{Equadiff 1978}, ed. R. Conti, G. Sestini, G. Villari, 330--340.\\
Beno\^{i}t, E., Callot, J.L., Diener, F. \& Diener, M. [1981] ``Chasse au canard,'' \textit{Collectanea Mathematica} {\bf 31--32} (1-3), 37--119.\\
Beno\^{i}t, E. [1982] ``Les canards de $\mathbb{R}^3$,'' {\it C.R.A.S.} t. 294, S\'{e}rie I, 483--488.\\
Beno\^{i}t, E. [1983] ``Syst\`{e}mes lents-rapides dans $\mathbb{R}^3$ et leurs canards,'' \textit{Soci\'{e}t\'{e} Math\'{e}matique de France}, {\it Ast\'{e}risque} (190--110), 159--191.\\
Beno\^{i}t, E. [2001] ``Perturbation singuli\`{e}re en dimension trois~: Canards en un point pseudosingulier noeud,'' \textit{Bulletin de la Soci\'{e}t\'{e} Math\'{e}matique de France}, (129-1), 91--113.\\
Fenichel, N. [1971] ``Persistence and smoothness of invariant manifolds for flows,'' \textit{Ind. Univ. Math. J.} 21, 193--225.\\
Fenichel, N. [1974] ``Asymptotic stability with rate conditions,'' \textit{Ind. Univ. Math. J.} 23, 1109--1137.\\
Fenichel, N. [1977] ``Asymptotic stability with rate conditions II,'' \textit{Ind. Univ. Math. J.} 26, 81--93.\\
Fenichel, N. [1979] ``Geometric singular perturbation theory for ordinary differential equations,'' \textit{J. Diff. Eq.}, 53--98.\\
Ginoux, J.M., Rossetto, B. \& Chua, L.O. [2008] ``Slow Invariant Manifolds as Curvature of the Flow of Dynamical Systems,'' {\it Int. J. Bif. {\&} Chaos} 11 (18), 3409--3430.\\
Ginoux, J.M. [2009] \textit{Differential Geometry applied to Dynamical Systems}, World Scientific Series on Nonlinear Science, Series A {\bf 66} (World Scientific, Singapore).
Ginoux, J.M. \& Llibre, J. [2011] ``Flow Curvature Method applied to Canard Explosion,'' {\it J. Physics A: Math. Theor.}, 465203, 13pp.\\
Hartman, P. [1964] \textit{Ordinary Differential Equations}, J. Wiley and. Sons, New York.\\
Itoh, M. \& Chua, L.O. [1992] ``Canards and Chaos in Nonlinear Systems,'' \textit{Proc. of 1992 IEEE International Symposium on Circuits and Systems}, San Diego, 2789--2792.\\
Jones, C.K.R.T. [1994] ``Geometric Singular Perturbation Theory in Dynamical Systems,'' \textit{Montecatini Terme}, L. Arnold, Lecture Notes in Mathematics, vol. 1609, Springer-Verlag, 44--118.\\
Kaper, T. [1999] ``An Introduction to Geometric Methods and Dynamical Systems Theory for Singular Perturbation Problems,'' in \textit{Analyzing multiscale phenomena using singular perturbation methods}, (Baltimore, MD, 1998), pages 85--131. Amer. Math. Soc., Providence, RI.\\
Lyapounov, A.M. [1892] ``The general problem of the stability of motion,'' Ph-D Thesis, St Petersbourg, (1892), reprinted in ``Probl\`{e}me g\'{e}n\'{e}ral de la stabilit\'{e} du mouvement,'' \emph{Ann. Fac. Sci.}, Toulouse 9, 203-474, (1907), reproduced in \textit{Ann. Math. Stud.}, \textbf{12}, (1949).\\
O'Malley, R.E. [1974] \textit{Introduction to Singular Perturbations}, Academic Press, New York.\\
O'Malley, R.E. [1991] \textit{Singular Perturbations Methods for Ordinary Differential Equations}, Springer-Verlag, New York.\\
Poincar\'{e}, H. [1881] ``Sur les courbes d\'{e}finies par une \'{e}quation diff\'{e}rentielle,'' {\it Journal de Math\'{e}matiques Pures et Appliqu\'{e}es}, 3\textsuperscript{o} s\'{e}rie, {\bf 7}, 375--422.\\
Poincar\'{e}, H. [1882] ``Sur les courbes d\'{e}finies par une \'{e}quation diff\'{e}rentielle,'' {\it Journal de Math\'{e}matiques Pures et Appliqu\'{e}es}, 3\textsuperscript{o} s\'{e}rie, {\bf 8}, 251--296.\\
Poincar\'{e}, H. [1885] ``Sur les courbes d\'{e}finies par une \'{e}quation diff\'{e}rentielle,'' {\it Journal de Math\'{e}matiques Pures et Appliqu\'{e}es}, 4\textsuperscript{o} s\'{e}rie, {\bf 1}, 167--244.\\
Poincar\'{e}, H. [1886] ``Sur les courbes d\'{e}finies par une \'{e}quation diff\'{e}rentielle,'' {\it Journal de Math\'{e}matiques Pures et Appliqu\'{e}es}, 4\textsuperscript{o} s\'{e}rie, {\bf 2}, 151--217.\\
Rossetto, B. [1986] ``Trajectoires lentes de syst\`{e}mes dynamiques lents-rapides,'' \textit{International Conference on Analysis and Optimization}, unpublished notes.\\
Szmolyan, P. \& Wechselberger, M. [2001] ``Canards in $\mathbb{R}^3$,'' \textit{J. Dif. Eqs.} 177, 419--453.\\
Takens, F. [1976] ``Constrained equations, a study of implicit differential equations and their discontinuous solutions,'' in \textit{Structural stability, the theory of catastrophes and applications in the sciences}, \textit{Springer Lecture Notes in Math.}, \textbf{525}, 143--234.\\
Thamilmaran, K., Lakshmanan, M. {\&} Venkatesan, A. [2004] ``Hyperchaos in a modified canonical Chua's circuit,'' {\it Int. J. Bifurcation and Chaos}, vol. 14, 221--243.\\
Thom, R. [1989] \textit{Structural Stability and Morphogenesis: An Outline of a General Theory of Models} Reading, MA: Addison-Wesley.\\
Thomas, Jr. G.B. \& Finney, R.L. [1992] \textit{Maxima, Minima, and Saddle Points}, \S{}12.8 in Calculus and Analytic Geometry, 8$^{th}$ ed. Reading, MA: Addison-Wesley, 881-891.\\
Tikhonov, A.N. [1948] ``On the dependence of solutions of differential equations on a small parameter,'' \textit{Mat. Sbornik N.S.}, 31, 575--586.\\
Van der Pol, B. [1926] ``On relaxation-oscillations,'' {\it The London, Edinburgh, and Dublin Philosophical Magazine and Journal of Science}, {\bf 7} (2), 978--992.\\
Wechselberger, M. [2005] ``Existence and Bifurcation of Canards in $\mathbb{R}^3$ in the case of a Folded Node,'' \textit{SIAM J. Applied Dynamical Systems} 4, 101--139.\\
Wechselberger, M. [2012] ``{A} propos de canards,'' \textit{Trans. Amer. Math. Soc.}, \textbf{364} (2012) 3289--330.\\
Zvonkin, A. K. \& Shubin, M. A. [1984] ``Non-standard analysis and singular perturbations of ordinary differential equations,'' {\it Uspekhi Mat. Nauk.} \textbf{39} 2 (236), 69--131.
\end{document} |
\begin{document}
\setlength{\parindent}{2em}
\newtheorem{thm}{Theorem} \numberwithin{thm}{section} \newtheorem{lem}{Lemma} \newtheorem{cor}{Corollary}
\theoremstyle{definition} \newtheorem{definition}{Definition}
\newtheorem{remark}{Remark}
\numberwithin{lem}{section} \numberwithin{definition}{section} \numberwithin{equation}{section} \numberwithin{cor}{section}
\def{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}{{\mathbb R^{n}}} \def\left\lbrace} \def\rb{\right\rbrace{\left\lbrace} \def\rb{\right\rbrace} \def\mathcal{M}} \def\e{\text{e}{\mathcal{M}} \def\e{\text{e}} \def\displaystyle{\displaystyle} \def{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}{{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}} \def\left\vert} \def\rv{\right\vert} \def\v{\vert{\left\vert} \def\rv{\right\vert} \def\v{\vert} \def\left\Vert} \def\rV{\right\Vert} \def\V{\Vert{\left\Vert} \def\rV{\right\Vert} \def\V{\Vert} \def\left\lbrace} \def\rb{\right\rbrace{\left\lbrace} \def\rb{\right\rbrace} \def\mathcal{K}{\mathcal{K}}
\title[\bf The limiting weak type behaviors and The LOWER BOUND ...]{\bf The limiting weak type behaviors and The LOWER BOUND FOR a new WEAK $L\log L$ TYPE NORM OF STRONG MAXIMAL OPERATORS} \thanks{{\it Key words and phrases}. Lower bound, best constant, limiting weak type behavior, strong maximal operator, multilinear strong maximal operator.
\newline\indent\hspace{1mm} {\it 2010 Mathematics Subject Classification}. Primary 42B20; Secondary 42B25.
\newline\indent\hspace{1mm} The authors were supported partly by NSFC (Nos. 11671039, 11771358, 11871101) and NSFC-DFG (No. 11761131002).}
\date{} \author[M. Qin]{Moyan Qin} \address{Moyan Qin:
School of Mathematical Sciences \\
Beijing Normal University \\
Laboratory of Mathematics and Complex Systems \\
Ministry of Education \\
Beijing 100875 \\
People's Republic of China} \email{myqin@mail.bnu.edu.cn}
\author[ H. Wu]{ Huoxiong Wu} \address{ Huoxiong Wu:
School of Mathematical Sciences\\ Xiamen University\\ Xiamen 361005\\
People's Republic of China} \email{huoxwu@xmu.edu.cn}
\author[Q. Xue]{Qingying Xue$^\ast$} \address{Qingying Xue:
School of Mathematical Sciences \\
Beijing Normal University \\
Laboratory of Mathematics and Complex Systems \\
Ministry of Education \\
Beijing 100875 \\
People's Republic of China} \email{qyxue@bnu.edu.cn}
\thanks {*Corresponding author, E-mail: \texttt{qyxue@bnu.edu.cn}}
\maketitle
\begin{center}
\begin{minipage}{13cm}
{\small {\bf Abstract}\quad It is well known that the weak ($1,1$) bounds doesn't hold for the strong maximal operators, but it still enjoys certain weak $L\log L$ type norm inequality. Let $\Phi_n(t)=t(1+(\log^+t)^{n-1})$ and the space $L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})$ be the set of all measurable functions on ${\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}$ such that
$\|f\|_{L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})} :=\|\Phi_n(|f|)\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}<\infty$. In this paper, we introduce a new weak norm space $L_{\Phi_n}^{1,\infty}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})$, which is more larger than $L^{1,\infty}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})$ space, and establish the correspondng limiting weak type behaviors of the strong maximal operators. As a corollary, we show that $ \max\left\lbrace} \def\rb{\right\rbrace{2^n}{((n-1)!)^{-1}},1\rb$ is a lower bound for the best constant of the $L_{\Phi_n}\to L_{\Phi_n}^{1,\infty}$ norm of the strong maximal operators. Similar results have been extended to
the multilinear strong maximal operators.} \end{minipage} \end{center}
\section{Introduction}\label{sec1}
As one of the two fundamental operators in Harmonic analysis, the Hardy-Littlewood maximal operator has played very important roles in Harmonic analysis, ergodic theory and index theory. By Lebesgue differentiation theorem, it was known that the almost everywhere convergence property of some operators is closely related to whether their associated maximal operators enjoy certain weak type inequalities. Let $B(x,r)$ be a ball in ${\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}$, centered at $x$ with radius $r$. Recall that the Hardy-Littlewood maximal function \begin{equation}\label{eq11}
M(f)(x) = \sup_{r>0}\frac{1}{|B(x,r)|}\int_{B(x,r)}|f(y)|dy \end{equation} and their purpose in differentiation on $\mathbb R$ were introduced by Hardy and Littlewood \cite{HL1930}, and later extended and developed by Wiener \cite{W1939} on ${\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}$. The famous Hardy-Littlewood-Wierer Theorem states that $M$ is of weak $(1,1)$ type and $L^p$ bounded for $p>1$. Similar results also hold for uncentered Hardy-Littlewood maximal operator. In particular, Grafakos and Kinnunen \cite{GK1998} investigated the weak type estimates for uncentered Hardy-Littlewood maximal operator in general measure space of dimension one.
Now, we focus our concern on the the best constants problem of Hardy-Littlewood maximal operator. The best constants problem of weak endpoints estimates for Calder\'{o}n-Zygmund type operators has always attracted lots of attentions. For example, for $n=1$, Davis \cite{D1974} obtained the best constant of weak-type $(1,1)$ norm for Hilbert transform, and Melas \cite{M2003} proved that $\V M\V_{L^1\to L^{1,\infty}}=\frac{11+\sqrt{61}}{11}$. However, for $n\ge2$, things become more subtle. The upper bound of $\|M\|_{L^1\to L^{1,\infty}}$ is determined by Stein and Stromberg \cite{SS1983}. It was shown that it is less than a constant multiply $n\log n$. Since then, only tardy progress has been made. For the lower bound, it is easy to check that
$$\lim_{\lambda\to1^-}\lambda|\left\lbrace} \def\rb{\right\rbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:M(\chi_{B(0,1)})(x)>\lambda\rb| = \|\chi_{B(0,1)}\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}$$
which implies that $\|M\|_{L^1\to L^{1,\infty}}\ge1$.
In 2006, Janakiraman \cite{J2006} investigated the limiting weak type behavior of $M$. He proved that
$$\lim_{\lambda\to0^+}\lambda|\left\lbrace} \def\rb{\right\rbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:M(f)(x)>\lambda\rb|=\| f\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})},$$
which again indicates that $\|M\|_{L^1\to L^{1,\infty}}\ge1$. Therefore, this gives a new way to find the lower bound of the best constant of the maximal operator, as well as some other operators, such as singular integrals, fractional integral operators, etc. See \cite{DL2017,DL2017.,GHW,HGW2019,HW2019,HW2019.,HH2008,J2004} and the references therein.
If the supremum in (\ref{eq11}) is taken over some other kinds of non-trivial bases, such as, translation in-variant basis of rectangles in the work of C\'{o}rdoba, Fefferman \cite{CF1975}, basis formed by convex bodies in \cite{Bo}, using rectangles with a side parallel to some direction (lacunary parabolic set of directions in \cite{NSW}, Cantor set of directions in \cite{KA1}, arbitrary set of directions in \cite{AS,KA2}). The strong boundedness or the weak type estimate for these new maximal operators may fail to hold in these cases.
In this paper, the object of our investigation is the maximal operator associated with the translation in-variant basis of rectangles. In \rm{1935}, Jessen, Marcinkiewicz and Zygmund \cite{JMZ1935} pointed out that the following strong maximal function is not of weak type $(1,1)$, which is quite different from the classical Hardy-Littlewood maximal operator.
$$\mathcal{M}} \def\e{\text{e}_n({f)}(x) = \sup_{\substack{R \ni x \\ R\in\mathcal{R}}}\frac{1}{|R|} \int_R |f(y)|dy,$$ where $\mathcal{R}$ denotes the family of all rectangles in ${\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}$ with sides parallel to the axes. One may further ask why there is such a big difference between these two operators. This is mainly because the volume of a ball only depends on its one-dimensional radius, while the volume of a rectangle is related to the lengths of $n$ sides. Therefore, $M$ is essentially an operator of one parameter and $\mathcal{M}} \def\e{\text{e}_n$ is an operator of $n$ parameter.
As a replacement of weak ($1,1$) estimate, it was shown in \cite{JMZ1935} that the strong maximal operator enjoys the $L\log L$ weak type estimate as follows: \begin{equation}\label{eq12}
|\left\lbrace} \def\rb{\right\rbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\mathcal{M}} \def\e{\text{e}_n(f)(x)>\lambda\rb|\lesssim\int_{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}\Phi_n\left(\frac{|f(x)|}\lambda\right)dx, \end{equation} where $\Phi_n(t)=t(1+(\log^+t)^{n-1})$ and $\log^+t=\max\left\lbrace} \def\rb{\right\rbrace\log t,0\rb$. A geometric proof of inequality (\ref{eq12}) was given by C\'ordoba and Fefferman \cite{CF1975}. It is worth pointing out that their elegant proof relies heavily on a covering lemma they established therein. This covering lemma is very important and has been widely used in many subsequent works. We refer the readers to references \cite{B1983,BK1984/85,Ch,LP2014,M2006}.
This paper is devoted to find the lower bounds for the best constant of the weak $L\log L$ type norm of the strong maximal operators. This will be done by establishing the limiting weak type behavior of $\mathcal{M}} \def\e{\text{e}_n$. Since $\mathcal{M}} \def\e{\text{e}_n$ is not of weak type $(1,1)$, the space $L^{1,\infty}$ and the limit of $\lambda|\left\lbrace} \def\rb{\right\rbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\mathcal{M}} \def\e{\text{e}_n(f)(x)>\lambda\rb|$ are not suitable for our purpose. Therefore, we need to introduce the weak norm spaces $L_{\Phi_n}^{1,\infty}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})$.
\begin{definition}[\bf {New weak norm spaces $L_{\Phi_n}^{1,\infty}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})$}]Let $\Phi_n(t)=t(1+(\log^+t)^{n-1})$ and the space $L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})$ be the set of all measurable functions on ${\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}$ such that
$\|f\|_{L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})} :=\|\Phi_n(|f|)\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}<\infty$. The new weak norm space $L_{\Phi_n}^{1,\infty}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})$, which is more larger than $L^{1,\infty}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})$, is defined to be the set of all measurable functions on ${\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}$ such that
\begin{equation*}\|f\|_{L_{\Phi_n}^{1,\infty}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})} := \sup_{\lambda>0}\frac\lambda{1+(\log^+\frac1\lambda)^{n-1}}|\left\lbrace} \def\rb{\right\rbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:|f(x)|>\lambda\rb| < \infty.\end{equation*} \end{definition}
Our main results are as follows: \begin{thm}\label{thm1}
If $f\in L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})$, then $\mathcal{M}} \def\e{\text{e}_n(f)\in L_{\Phi_n}^{1,\infty}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})$ and enjoys the limiting weak type behaviors as follows:
\begin{enumerate}
\item[\rm{(i)}] $\displaystyle\lim_{\lambda\to0^+}\frac\lambda{1+(\log^+\frac1\lambda)^{n-1}}|\left\lbrace} \def\rb{\right\rbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\mathcal{M}} \def\e{\text{e}_n(f)(x)>\lambda\rb|=\frac{2^n}{(n-1)!}\|f\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})};$
\item[\rm{(ii)}] $\displaystyle\lim_{\lambda\to\infty}\frac\lambda{1+(\log^+\frac1\lambda)^{n-1}}|\left\lbrace} \def\rb{\right\rbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\mathcal{M}} \def\e{\text{e}_n(f)(x)>\lambda\rb|=0$.
\end{enumerate} \end{thm}
Denote the centered strong maximal operator by $\mathcal{M}} \def\e{\text{e}_n^c$, then we have \begin{thm}\label{thm2} If $f\in L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})$, then $\mathcal{M}} \def\e{\text{e}_n^c(f)\in L_{\Phi_n}^{1,\infty}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})$ and enjoys the limiting weak type behaviors as follows: \begin{enumerate}
\item[\rm{(i)}] $\displaystyle\lim_{\lambda\to0^+}\frac\lambda{1+(\log^+\frac1\lambda)^{n-1}}|\left\lbrace} \def\rb{\right\rbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\mathcal{M}} \def\e{\text{e}_n^c(f)(x)>\lambda\rb|=\frac{1}{(n-1)!}\|f\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})};$
\item[\rm{(ii)}] $\displaystyle\lim_{\lambda\to\infty}\frac\lambda{1+(\log^+\frac1\lambda)^{n-1}}|\left\lbrace} \def\rb{\right\rbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\mathcal{M}} \def\e{\text{e}_n^c(f)(x)>\lambda\rb|=0$. \end{enumerate} \end{thm}
From Theorem \ref{thm1} (i), it is easy to deduce the following corollary:
\begin{cor}\label{cor1} The best constant of $\mathcal{M}} \def\e{\text{e}_n$ and $\mathcal{M}} \def\e{\text{e}_n^c$ satisfies
$$\|\mathcal{M}} \def\e{\text{e}_n\|_{L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})\to L_{\Phi_n}^{1,\infty}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})} \ge \max\left\lbrace} \def\rb{\right\rbrace\frac{2^n}{(n-1)!},1\rb;\quad \|\mathcal{M}} \def\e{\text{e}_n^c\|_{L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})\to L_{\Phi_n}^{1,\infty}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})} \ge1.$$ \end{cor}
The organization of this paper is as follows. The proofs of Theorem \ref{thm1} and Corollary \ref{cor1} will be presented in Section \ref{sec2}. The method of the proof of Theorem \ref{thm1} also can be applied to prove Theorem \ref{thm2}, so we leave it to the readers. In Section \ref{sec3}, a discussion on multilinear strong maximal operators will be given.
\section{Proof of Theorem \ref{thm1} and Corollary \ref{cor1}}\label{sec2}
For readability, this section will be divided into four subsections. The proof of Theorem \ref{thm1} will be given in the first three subsections, and the proof of Corollary \ref{cor1} will be demonstrated in the last one.
We begin with the following lemma, which provides a foundation for our analysis.
\begin{lem}\label{lem21} Suppose $x=(x_1,\cdots,x_n)\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}$, $R_\varepsilon,r_\varepsilon,c$ be three positive numbers satisfy $c>(R_\varepsilon+r_\varepsilon)^n$. Then
$$\left|\left\lbrace} \def\rb{\right\rbrace x:x_1,\cdots,x_n>R_\varepsilon,\prod_{k=1}^n(x_k+r_\varepsilon)<c\rb\right| = \sum_{k=1}^nB_{n,k}c(\log c)^{n-k}+(-1)^n(R_\varepsilon+r_\varepsilon)^n,$$ where $B_{n,1}=1/(n-1)!$ and $B_{n,k}$ are real finite numbers only related to $n,k$ and $R_\varepsilon+r_\varepsilon$ for $k>2$. \end{lem}
\begin{proof} The proof will be done by reduction on $n$. Obviously, Lemma \ref{lem21} holds when $n=1$. Now assume that the result holds for $(n-1)$-dimensional case and we need to show that it holds for the $n$ dimensional case. By a fundamental calculation, we have \begin{align*}
& \left|\left\lbrace} \def\rb{\right\rbrace x:x_1,\cdots,x_n>R_\varepsilon,\prod_{k=1}^n(x_k+r_\varepsilon)<c\rb\right| \\
&= \int_{R_\varepsilon}^{\frac c{(R_\varepsilon+r_\varepsilon)^{n-1}}-r_\varepsilon}\left|\left\lbrace} \def\rb{\right\rbrace(x_2,\cdots,x_n):x_2,\cdots,x_n>R_\varepsilon,\prod_{k=2}^n(x_k+r_\varepsilon)<\frac c{x_1+r_\varepsilon}\rb\right|dx_1 \\
&= \frac1{(n-2)!}\int_{R_\varepsilon}^{\frac c{(R_\varepsilon+r_\varepsilon)^{n-1}}-r_\varepsilon}\frac c{(x_1+r_\varepsilon)}\left(\log\frac c{x_1+r_\varepsilon}\right)^{n-2}dx_1 \\
&\quad +\sum_{k=3}^nB_{n-1,k}\int_{R_\varepsilon}^{\frac c{(R_\varepsilon+r_\varepsilon)^{n-1}}-r_\varepsilon}\frac c{(x_1+r_\varepsilon)}\left(\log\frac c{x_1+r_\varepsilon}\right)^{n-k}dx_1 \\
&\quad +(-1)^{n-1}(R_\varepsilon+r_\varepsilon)^{n-1}\left(\frac c{(R_\varepsilon+r_\varepsilon)^{n-1}}-r_\varepsilon-R_\varepsilon\right) \\
&= \frac{c(\log c)^{n-1}}{(n-1)!}+\sum_{k=2}^nB_{n,k}c(\log c)^{n-k}+(-1)^n(R_\varepsilon+r_\varepsilon)^n. \end{align*} Therefore the proof of Lemma \ref{lem21} is finished by reduction. \end{proof}
Now we are ready to prove Theorem \ref{thm1}. We divide it into three subsections.
\subsection{$\mathcal{M}} \def\e{\text{e}_n$ is of type $(L_{\Phi_n},L_{\Phi_n}^{1,\infty})$.} \ \newline \indent Note that \begin{equation*}
\begin{aligned}
\log^+\frac{|f(x)|}\lambda &\le \left\lbrace} \def\rb{\right\rbrace
\begin{aligned}
\log|f(x)|+\log\frac1\lambda, \qquad |f(x)|\ge\lambda \\
0, \qquad\qquad\quad |f(x)|<\lambda
\end{aligned}\right. \\
&\le \log^+|f(x)| + \log^+\frac1\lambda,
\end{aligned} \end{equation*} then we have \begin{align*}
\left(\log^+\frac{|f(x)|}\lambda\right)^{n-1} &\le \left(\log^+|f(x)| + \log^+\frac1\lambda\right)^{n-1} \\
&\le 2^{n-1}\max\left\lbrace} \def\rb{\right\rbrace(\log^+|f(x)|)^{n-1},\left(\log^+\frac1\lambda\right)^{n-1}\rb \\
&\le 2^{n-1}\left(1+(\log^+|f(x)|)^{n-1}\right)\left(1+\left(\log^+\frac1\lambda\right)^{n-1}\right). \end{align*}
Therefore it follows from (\ref{eq12}) that \begin{align*}
& |\left\lbrace} \def\rb{\right\rbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\mathcal{M}} \def\e{\text{e}_n(f)(x)>\lambda\rb| \\
&\le C_n'\int_{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}\frac{|f(x)|}\lambda\left(1+2^{n-1}\left(1+(\log^+|f(x)|)^{n-1}\right)\left(1+\left(\log^+\frac1\lambda\right)^{n-1}\right)\right)dx \\
&\le 2^nC_n'\frac{1+(\log^+\frac1\lambda)^{n-1}}\lambda\|f\|_{L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}, \end{align*} which implies that \begin{equation}\label{eq21}
\frac\lambda{1+(\log^+\frac1\lambda)^{n-1}}|\left\lbrace} \def\rb{\right\rbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\mathcal{M}} \def\e{\text{e}_n(f)(x)>\lambda\rb| \le C_n\|f\|_{L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})} \end{equation} for all $\lambda>0$. This completes the proof that $\mathcal{M}} \def\e{\text{e}_n(f)\in L_{\Phi_n}^{1,\infty}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})$ if $f\in L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})$.
\subsection{Proof of Theorem \ref{thm1} (i)} \ \newline
\indent We may assume $\|f\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}>0$, otherwise there is nothing need to be proved.
Note that for all $0<\varepsilon\ll\max\left\lbrace} \def\rb{\right\rbrace\|f\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})},1\rb$, there exists a positive real number $r_\varepsilon>1$, such that
$$\|f\|_{L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}\backslash[-r_\varepsilon,r_\varepsilon]^n)}<\varepsilon.$$
Since $C([-r_\varepsilon,r_\varepsilon]^n)$ is dense in $L_{\Phi_n}([-r_\varepsilon,r_\varepsilon]^n)$, then there exists a continuous function $\widetilde f_1$ defined on $[-r_\varepsilon,r_\varepsilon]^n$ satisfying
$$\|f-\widetilde f_1\|_{L_{\Phi_n}([-r_\varepsilon,r_\varepsilon]^n)}<\varepsilon.$$ Now we denote \begin{align*}
f_1 &= |\widetilde f_1|+\frac\varepsilon{(2r_\varepsilon)^n}\chi_{[-r_\varepsilon,r_\varepsilon]^n}; \\
f_2 &= |f|\chi_{{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}\backslash[-r_\varepsilon,r_\varepsilon]^n}; \\
f_3 &= |f\chi_{[-r_\varepsilon,r_\varepsilon]^n}-\widetilde f_1|; \\
f_4 &= \frac\varepsilon{(2r_\varepsilon)^n}\chi_{[-r_\varepsilon,r_\varepsilon]^n}. \end{align*} Therefore
$$f_1-f_3-f_4 \le |f| \le f_1+f_2+f_3$$ and
$$\|f_i\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})} \le \|f_i\|_{L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})} \le \varepsilon,\quad i=2,3,4.$$ These two facts immediately indicate that \begin{equation}\label{eq221}
\mathcal{M}} \def\e{\text{e}_n(f_1)(x) - \sum_{i=3}^4\mathcal{M}} \def\e{\text{e}_n(f_i)(x) \le \mathcal{M}} \def\e{\text{e}_n(f)(x) \le \mathcal{M}} \def\e{\text{e}_n(f_1)(x) + \sum_{i=2}^3\mathcal{M}} \def\e{\text{e}_n(f_i)(x) \end{equation} and \begin{align*}
\|f_1\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})} - 2\varepsilon \le \|f\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})} \le \|f_1\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})} + 2\varepsilon. \end{align*} To control the weak norm of $\mathcal{M}} \def\e{\text{e}_n$, we need to introduce some notions. Let \begin{align*}
E_\lambda &= \left\lbrace} \def\rb{\right\rbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\mathcal{M}} \def\e{\text{e}_n(f)(x)>\lambda\rb; \\
E_\lambda^i &= \left\lbrace} \def\rb{\right\rbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\mathcal{M}} \def\e{\text{e}_n(f_i)(x)>\lambda\rb,\qquad i=1,2,3,4. \end{align*} Thus it follows from (\ref{eq221}) that \begin{equation}\label{eq222}
E_{(1+2\sqrt\varepsilon)\lambda}^1\backslash(E_{\sqrt\varepsilon\lambda}^3\cup E_{\sqrt\varepsilon\lambda}^4) \subset E_\lambda \subset E_{(1-2\sqrt\varepsilon)\lambda}^1\cup E_{\sqrt\varepsilon\lambda}^2\cup E_{\sqrt\varepsilon\lambda}^3. \end{equation}
To prove Theorem \ref{thm1} (i), we need to consider the contribution of each term on both sides of (\ref{eq222}). Here is the main structure of this proof. The upper estimates for $E_{\sqrt\varepsilon\lambda}^2$, $E_{\sqrt\varepsilon\lambda}^3$ and $E_{\sqrt\varepsilon\lambda}^4$ will be given in Step 1. In Step 2, we are going to estabilish the lower estimate of $E_{(1+2\sqrt\varepsilon)\lambda}^1$. Combining with the upper estimates in Step 1, we may deduce the lower estimate of $E_\lambda$. In Step 3, an upper estimate for $E_{(1-2\sqrt\varepsilon)\lambda}^1$ will be given. Then the results in Step 1 and Step 3 yield an upper estimate for $E_\lambda$.
\noindent{\bf Step 1: Upper estimates for $E_{\sqrt\varepsilon\lambda}^2,E_{\sqrt\varepsilon\lambda}^3,E_{\sqrt\varepsilon\lambda}^4$.}
By the fact that $\|f_i\|_{L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}\le\varepsilon$ for $i=2,3,4$, together with (\ref{eq21}), we obtain the upper estimates as follows: \begin{equation}\label{eq22s1}
|E_{\sqrt\varepsilon\lambda}^i| \le C_n\frac{1+(\log^+\frac1{\sqrt\varepsilon\lambda})^{n-1}}{\sqrt\varepsilon\lambda}\varepsilon = C_n\frac{1+(\log^+\frac1{\sqrt\varepsilon\lambda})^{n-1}}{\lambda}\sqrt\varepsilon,\qquad i=2,3,4. \end{equation}
\noindent{\bf Step 2: Lower estimate for $E_{(1+2\sqrt\varepsilon)\lambda}^1$.}
Recalling that $f_1$ is a continuous function on $[-r_\varepsilon,r_\varepsilon]^n$, then for all $y\in[-r_\varepsilon,r_\varepsilon]^n$, we have $$\frac\varepsilon{(2r_\varepsilon)^n} \le f_1(y) \le \max\limits_{y\in[-r_\varepsilon,r_\varepsilon]^n}f_1(y) =: A_\varepsilon <\infty.$$
Let $R_\varepsilon=(2r_\varepsilon)^{n+1}A_\varepsilon/\varepsilon+r_\varepsilon$ and define
$$E' = \left\lbrace} \def\rb{\right\rbrace(x_1,\cdots,x_n):|x_1|,\cdots,|x_n|>R_\varepsilon\rb.$$ From geometric view, $E'$ can be divided into $2^n$ intervals, so we denote $$E_1' = \left\lbrace} \def\rb{\right\rbrace(x_1,\cdots,x_n):x_1,\cdots,x_n>R_\varepsilon\rb,$$ and the others by $E'_2,\cdots,E'_{2^n}$.
For all $x=(x_1,\cdots,x_n)\in E'_1$ and $\vec a=(a_1,\cdots,a_n),\vec b=(b_1,\cdots,b_n)$ satisfy $$a_k\le x_k\le b_k\quad\text{and}\quad a_k<b_k,\qquad k=1,\cdots,n,$$ we define $$F(\vec a,\vec b,x) = \frac1{\prod\limits_{k=1}^n(b_k-a_k)}\int_{a_1}^{b_1}\cdots\int_{a_n}^{b_n}f_1(y)dy.$$ Then we have the following claim.
\noindent{\bf Claim 1:} $F(\vec a,\vec b,x)$ obtains its maximum at $\vec a=(-r_\varepsilon,\cdots,-r_\varepsilon)$ and $\vec b=x$.
Note that $\text{supp}f_1 = [-r_\varepsilon,r_\varepsilon]^n$. Obviously if there exists an $a_j\ge r_\varepsilon$, then $\text{supp}f_1 \cap ([a_1,b_1]\times\cdots\times[a_n,b_n])$ is a set of measure $0$, which means $F(\vec a,\vec b,x)=0$. So we only have to discuss the case all $a_j<r_\varepsilon$. It's also easy to observe that $F(\vec a,\vec b,x)$ is a decreasing function of $b_j$. Since each $b_j\ge x_j>r_\varepsilon$, thus $$F(\vec a,\vec b,x) \le F(\vec a,x,x) = \frac1{\prod\limits_{k=1}^n(x_k-a_k)}\int_{a_1}^{r_\varepsilon}\cdots\int_{a_n}^{r_\varepsilon}f_1(y)dy.$$
For $a_j<r_\varepsilon$, one may find \begin{itemize}
\item If $a_j<-r_\varepsilon$, then
$$\frac{\partial F}{\partial a_j}(\vec a,x,x) = \frac1{x_j-a_j}F(\vec a,x,x)>0;$$
\item If $-r_\varepsilon<a_j<r_\varepsilon$, then
\begin{align*}
& \frac{\partial F}{\partial a_j}(\vec a,x,x) \\
&= \frac1{\prod\limits_{k=1}^n(b_k-a_k)}\Bigg(\frac1{b_j-a_j}\int_{a_1}^{r_\varepsilon}\cdots\int_{a_n}^{r_\varepsilon}f_1(y)dy \\
&\quad -\int\limits_{a_1}^{r_\varepsilon}\cdots\int\limits_{a_{j-1}}^{r_\varepsilon}\int\limits_{a_{j+1}}^{r_\varepsilon}\cdots\int\limits_{a_n}^{r_\varepsilon}f_1(y_1,\cdots,y_{j-1},a_j,y_{j+1},\cdots,y_n)dy_n\cdots dy_{j+1}dy_{j-1}\cdots dy_1\Bigg) \\
&\le \frac{\prod\limits_{k\neq j}^n(r_\varepsilon-a_k)}{\prod\limits_{k=1}^n(b_k-a_k)}\left(\frac{r_\varepsilon-a_j}{x_j-a_j}A_\varepsilon-\frac{\varepsilon}{(2r_\varepsilon)^n}\right) \le \frac{\prod\limits_{k\neq j}^n(r_\varepsilon-a_k)}{\prod\limits_{k=1}^n(b_k-a_k)}\left(\frac{2r_\varepsilon}{R_\varepsilon-r_\varepsilon}A_\varepsilon-\frac{\varepsilon}{(2r_\varepsilon)^n}\right) < 0.
\end{align*} \end{itemize} These arguments deduce that $F(\vec a,x,x) \le F((-r_\varepsilon,\cdots,-r_\varepsilon),x,x)$. Therefore Claim 1 is proved.
For $x\in E'_1$, it follows from Claim 1 that $$\mathcal{M}} \def\e{\text{e}_n(f_1)(x) = \sup_{\vec a,\vec b}F(\vec a,\vec b,x) = \frac1{\prod\limits_{k=1}^n(x_k+r_\varepsilon)}\int_{[-r_\varepsilon,r_\varepsilon]^n}f_1(y)dy.$$
For any $0 < \lambda < {\|f_1\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}}/{((1+2\sqrt\varepsilon)(R_\varepsilon+r_\varepsilon)^n)}$, Lemma \ref{lem21} yields that \begin{align*}
|E_{(1+2\sqrt\varepsilon)\lambda}^1 \cap E'_1|
&= \left|\left\lbrace} \def\rb{\right\rbrace x:x_1,\cdots,x_n>R_\varepsilon,\prod_{k=1}^n(x_k+r_\varepsilon)<\frac{\|f_1\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}}{(1+2\sqrt\varepsilon)\lambda}\rb\right| \\
&= \sum_{k=1}^nB_{n,k}\frac{\|f_1\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}}{(1+2\sqrt\varepsilon)\lambda}\left(\log\frac{\|f_1\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}}{(1+2\sqrt\varepsilon)\lambda}\right)^{n-k} + (-1)^n(R_\varepsilon+r_\varepsilon)^n. \end{align*}
Repeated applications of the same technique to each $E'_i$ lead to the equation
$$|E_{(1+2\sqrt\varepsilon)\lambda}^1 \cap E'_i| = \sum_{k=1}^nB_{n,k}\frac{\|f_1\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}}{(1+2\sqrt\varepsilon)\lambda}\left(\log\frac{\|f_1\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}}{(1+2\sqrt\varepsilon)\lambda}\right)^{n-k} + (-1)^n(R_\varepsilon+r_\varepsilon)^n.$$ Combining with (\ref{eq222}) and (\ref{eq22s1}), we obtain that \begin{align*}
|E_\lambda|
&\ge |E_{(1+2\sqrt\varepsilon)\lambda}^1| - |E_{\sqrt\varepsilon\lambda}^3| - |E_{\sqrt\varepsilon\lambda}^4|
\ge \sum_{i=1}^{2^n}|E_{(1+2\sqrt\varepsilon)\lambda}^1 \cap E'_i| - |E_{\sqrt\varepsilon\lambda}^3| - |E_{\sqrt\varepsilon\lambda}^4| \\
&\ge 2^n\sum_{k=1}^nB_{n,k}\frac{\|f_1\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}}{(1+2\sqrt\varepsilon)\lambda}\left(\log\frac{\|f_1\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}}{(1+2\sqrt\varepsilon)\lambda}\right)^{n-k} + (-2)^n(R_\varepsilon+r_\varepsilon)^n \\
&\quad - 2C_n\frac{1+(\log^+\frac1{\sqrt\varepsilon\lambda})^{n-1}}{\lambda}\sqrt\varepsilon. \end{align*} Multipling $\lambda/(1+(\log^+\frac1\lambda)^{n-1})$ on both sides and let $\lambda\to0^+$, we conclude that \begin{align*}
\varliminf_{\lambda\to0^+}\frac\lambda{1+(\log^+\frac1\lambda)^{n-1}}|E_\lambda| &\ge \frac{2^nB_{n,1}}{1+2\sqrt\varepsilon}\|f_1\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})} - 2C_n\sqrt\varepsilon \\
&\ge \frac{2^n}{(n-1)!(1+2\sqrt\varepsilon)}\left(\|f\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}-2\varepsilon\right) - 2C_n\sqrt\varepsilon. \end{align*} By the arbitrariness of $\varepsilon$, we deduce that \begin{equation}\label{eq22s2}
\varliminf_{\lambda\to0^+}\frac\lambda{1+(\log^+\frac1\lambda)^{n-1}}|E_\lambda| \ge \frac{2^n}{(n-1)!}\|f\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}. \end{equation}
\noindent{\bf Step 3: Upper estimate for $E_{(1-2\sqrt\varepsilon)\lambda}^1$.}
The argument used in Step 2 also works for $|E_{(1-2\sqrt\varepsilon)\lambda}^1 \cap E'|$, one may obtain \begin{equation}\label{eq22s31}
|E_{(1-2\sqrt\varepsilon)\lambda}^1 \cap E'| = 2^n\sum_{k=1}^nB_{n,k}\frac{\|f_1\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}}{(1-2\sqrt\varepsilon)\lambda}\left(\log\frac{\|f_1\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}}{(1-2\sqrt\varepsilon)\lambda}\right)^{n-k} + (-2)^n(R_\varepsilon+r_\varepsilon)^n. \end{equation}
Now we only need to consider the contribution of $|E_{(1-2\sqrt\varepsilon)\lambda}^1 \cap ({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}\backslash E')|$.
Note that ${\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}\backslash E'$ can be written as \begin{equation}\label{eq22s32}
\begin{aligned}
{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}\backslash E' &= \bigcup_{i=1}^n\bigcup_{\tilde x\in\mathcal A}\left\lbrace} \def\rb{\right\rbrace x:|x_{l_1}|,\cdots,|x_{l_i}|\le R_\varepsilon,|x_{l_{i+1}}|,\cdots,|x_{l_n}|>R_\varepsilon\rb \\
&=: \left(\bigcup_{i=1}^{n-1}\bigcup_{\tilde x\in\mathcal A}E''_{i,\tilde x}\right) \cup [-R_\varepsilon,R_\varepsilon]^n,
\end{aligned} \end{equation} where $\tilde x=(x_{l_1},\cdots,x_{l_n})$, $\mathcal A$ is the family of all permutations of $(x_1,\cdots,x_n)$, and the cardinality of $\mathcal A$ is $n!$.
Similar as in Step 2, we may split $E''_{i,\tilde x}$ into $2^{n-i}$ sets and denote
$$E''_{i,\tilde x,1} = \left\lbrace} \def\rb{\right\rbrace x:|x_{l_1}|,\cdots,|x_{l_i}|\le R_\varepsilon,x_{l_{i+1}},\cdots,x_{l_n}>R_\varepsilon\rb,$$ and the others by $E''_{i,\tilde x,2},\cdots,E''_{i,\tilde x,2^{n-i}}$. See Figure \ref{figE} for $3$-dimensional case.
\begin{figure}
\caption{Part of $E'$,$E''_{1,\widetilde x}$ and $E''_{2,\widetilde x}$ in dimension 3.}
\label{figE}
\end{figure}
Now for $x\in E''_{i,\tilde x,1}$, we define an auxiluary function $h$ which depends on $\varepsilon$ and $\widetilde x$ as
$$h(x) = A_\varepsilon\cdot\chi_{\left\lbrace} \def\rb{\right\rbrace x:|x_{l_1}|,\cdots,|x_{l_i}|\le R_\varepsilon,|x_{l_{i+1}}|,\cdots,|x_{l_n}|\le r_\varepsilon\rb}(x).$$ It is easy to see that $0<f_1\le h$. Then we denote \begin{align*}
& H(\vec a,\vec b,x) = \frac1{\prod\limits_{k=1}^n(b_{l_k}-a_{l_k})}\int_{a_{l_1}}^{b_{l_1}}\cdots\int_{a_{l_n}}^{b_{l_n}}h(y)dy \\
&= A_\varepsilon\prod_{k=1}^i\frac{\min\left\lbrace} \def\rb{\right\rbrace R_\varepsilon,b_{l_k}\rb - \max\left\lbrace} \def\rb{\right\rbrace -R_\varepsilon,a_{l_k}\rb}{b_{l_k}-a_{l_k}}\cdot\prod_{k=i+1}^n\frac{\max\left\lbrace} \def\rb{\right\rbrace r_\varepsilon-a_{l_k},0\rb - \max\left\lbrace} \def\rb{\right\rbrace -r_\varepsilon-a_{l_k},0\rb}{b_{l_k}-a_{l_k}}, \end{align*} and claim that:
\noindent{\bf Claim 2:} $H(\vec a,\vec b,x)$ obtains its maximum at $-R_\varepsilon\le a_{l_k}<b_{l_k}\le R_\varepsilon$ for $1\le k\le i$ and $a_{l_k}=-r_\varepsilon,b_{l_k}=x_{l_k}$ for $i+1\le k\le n$.
In fact, for $1\le k\le i$, then it is obvious that $$\frac{\min\left\lbrace} \def\rb{\right\rbrace R_\varepsilon,b_{l_k}\rb - \max\left\lbrace} \def\rb{\right\rbrace -R_\varepsilon,a_{l_k}\rb}{b_{l_k}-a_{l_k}} \le 1,$$ and the equal sign works only if $-R_\varepsilon\le a_{l_k}<b_{l_k}\le R_\varepsilon$. On the other hand, for $i+1\le j\le n$, the following inequality holds: $$\frac{\max\left\lbrace} \def\rb{\right\rbrace r_\varepsilon-a_{l_k},0\rb - \max\left\lbrace} \def\rb{\right\rbrace -r_\varepsilon-a_{l_k},0\rb}{b_{l_k}-a_{l_k}} \le \frac{2r_\varepsilon}{x_{l_k}+r_\varepsilon},$$ and the equality works only if $a_{l_k}=-r_\varepsilon$ and $b_{l_k}=x_{l_k}$. Then Claim 2 was proved.
By Claim 2, it follows that $$\mathcal{M}} \def\e{\text{e}_n(f_1)(x) \le \mathcal{M}} \def\e{\text{e}_n(h)(x) = \sup_{\vec a,\vec b}H(\vec a,\vec b,x) = A_\varepsilon(2r_\varepsilon)^{n-i}\prod_{k=i+1}^n\frac1{x_{l_k}+r_\varepsilon}.$$ Therefore for $\lambda < A_\varepsilon(2r_\varepsilon)^{n-i}/((1-2\sqrt\varepsilon)(R_\varepsilon+r_\varepsilon)^{n-i})$, we have \begin{align*}
& |E_{(1-2\sqrt\varepsilon)\lambda}^1 \cap E''_{i,\tilde x,1}| \\
&\le \left|\left\lbrace} \def\rb{\right\rbrace x:|x_{l_1}|,\cdots,|x_{l_i}|\le R_\varepsilon,x_{l_{i+1}},\cdots,x_{l_n}>R_\varepsilon,\prod_{k=i+1}^n{x_{l_k}+r_\varepsilon}<\frac{A_\varepsilon(2r_\varepsilon)^{n-i}}{(1-2\sqrt\varepsilon)\lambda}\rb\right| \\
&= (2R_\varepsilon)^i\cdot\Bigg[\sum_{k=1}^{n-i}B_{n-i,k}\frac{A_\varepsilon(2r_\varepsilon)^{n-i}}{(1-2\sqrt\varepsilon)\lambda}\left(\log\frac{A_\varepsilon(2r_\varepsilon)^{n-i}}{(1-2\sqrt\varepsilon)\lambda}\right)^{n-i-k} + (-1)^{n-i}(R_\varepsilon+r_\varepsilon)^{n-i}\Bigg]. \end{align*}
Similarly, each of $|E_{(1-2\sqrt\varepsilon)\lambda}^1 \cap E''_{i,\tilde x,2}|,\cdots,|E_{(1-2\sqrt\varepsilon)\lambda}^1 \cap E''_{i,\tilde x,2^{n-i}}|$ enjoys the same bound. Therefore \begin{align*}
& |E_{(1-2\sqrt\varepsilon)\lambda}^1 \cap E''_{i,\tilde x}| \\
&\le (2R_\varepsilon)^i\cdot\Bigg[\sum_{k=1}^{n-i}B_{n-i,k}\frac{A_\varepsilon(4r_\varepsilon)^{n-i}}{(1-2\sqrt\varepsilon)\lambda}\left(\log\frac{A_\varepsilon(2r_\varepsilon)^{n-i}}{(1-2\sqrt\varepsilon)\lambda}\right)^{n-i-k} + (-2)^{n-i}(R_\varepsilon+r_\varepsilon)^{n-i}\Bigg]. \end{align*} Hence by (\ref{eq22s32}), we get \begin{equation}\label{eq22s33}
\begin{aligned}
& |E_{(1-2\sqrt\varepsilon)\lambda}^1 \cap ({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}\backslash E')| \le \sum_{i=1}^{n-1}\sum_{\widetilde x\in\mathcal A}|E_{(1-2\sqrt\varepsilon)\lambda}^1 \cap E''_{i,\tilde x}| + |[-R_\varepsilon,R_\varepsilon]^n| \\
&\le \sum_{i=1}^{n-1}n!(2R_\varepsilon)^i\cdot\Bigg[\sum_{k=1}^{n-i}B_{n-i,k}\frac{A_\varepsilon(4r_\varepsilon)^{n-i}}{(1-2\sqrt\varepsilon)\lambda}\left(\log\frac{A_\varepsilon(2r_\varepsilon)^{n-i}}{(1-2\sqrt\varepsilon)\lambda}\right)^{n-i-k}\Bigg] \\
&\quad + \sum_{i=1}^{n-1}n!(2R_\varepsilon)^i(-2)^{n-i}(R_\varepsilon+r_\varepsilon)^{n-i} +(2R_\varepsilon)^n.
\end{aligned} \end{equation}
Now it follows from (\ref{eq222}), (\ref{eq22s1}), (\ref{eq22s31}) and (\ref{eq22s33}) that \begin{align*}
|E_\lambda| &= |E_{(1-2\sqrt\varepsilon)\lambda}^1 \cap E'| + |E_{(1-2\sqrt\varepsilon)\lambda}^1 \cap ({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}\backslash E')| + |E_{\sqrt\varepsilon\lambda}^2| + |E_{\sqrt\varepsilon\lambda}^3| \\
&\le 2^n\sum_{k=1}^nB_{n,k}\frac{\|f_1\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}}{(1-2\sqrt\varepsilon)\lambda}\left(\log\frac{\|f_1\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}}{(1-2\sqrt\varepsilon)\lambda}\right)^{n-k} + (-2)^n(R_\varepsilon+r_\varepsilon)^n \\
&\quad + \sum_{i=1}^{n-1}n!(2R_\varepsilon)^i\cdot\Bigg[\sum_{k=1}^{n-i}B_{n-i,k}\frac{A_\varepsilon(4r_\varepsilon)^{n-i}}{(1-2\sqrt\varepsilon)\lambda}\left(\log\frac{A_\varepsilon(2r_\varepsilon)^{n-i}}{(1-2\sqrt\varepsilon)\lambda}\right)^{n-i-k}\Bigg] \\
&\quad + \sum_{i=1}^{n-1}n!(2R_\varepsilon)^i(-2)^{n-i}(R_\varepsilon+r_\varepsilon)^{n-i} + (2R_\varepsilon)^n + 2C_n\frac{1+(\log^+\frac1{\sqrt\varepsilon\lambda})^{n-1}}\lambda\sqrt\varepsilon . \end{align*} Multipling $\lambda/(1+(\log^+\frac1\lambda)^{n-1})$ on both sides and let $\lambda\to0^+$, we conclude that \begin{align*}
\varlimsup_{\lambda\to0^+}\frac\lambda{1+(\log^+\frac1\lambda)^{n-1}}|E_\lambda| &\le \frac{2^nB_{n,1}}{1-2\sqrt\varepsilon}\|f_1\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})} + 2C_n\sqrt\varepsilon \\
&\le \frac{2^n}{(n-1)!(1-2\sqrt\varepsilon)}\left(\|f\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}+2\varepsilon\right) + 2C_n\sqrt\varepsilon. \end{align*} Since $\varepsilon$ is arbitrary, it holds that \begin{equation}\label{eq22s34}
\varlimsup_{\lambda\to0^+}\frac\lambda{1+(\log^+\frac1\lambda)^{n-1}}|E_\lambda| \le \frac{2^n}{(n-1)!}\|f\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}. \end{equation}
Finally, combining (\ref{eq22s2}) and (\ref{eq22s34}), we deduce that
$$\lim_{\lambda\to0^+}\frac\lambda{1+(\log^+\frac1\lambda)^{n-1}}|E_\lambda| = \frac{2^n}{(n-1)!}\|f\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}.$$ Then we finish the proof of Theorem \ref{thm1} (i).
\subsection{Proof of Theorem \ref{thm1} (ii)} \ \newline \indent Since $\mathcal{M}} \def\e{\text{e}_n$ is bounded from $L^\infty$ to $L^\infty$, and apparently the best constant is $1$, then for all $\lambda>A_\varepsilon/(1-2\sqrt\varepsilon)$, it is easy to see
$$|E_{(1-2\sqrt\varepsilon)\lambda}^1| = 0.$$
Therefore for $\lambda>\max\left\lbrace} \def\rb{\right\rbrace A_\varepsilon/(1-2\sqrt\varepsilon),1/\sqrt\varepsilon\rb$, it follows from (\ref{eq33}), (\ref{eq34}) that
$$|E_\lambda| \le |E_{(1-2\sqrt\varepsilon)\lambda}^1| + \sum_{i=2}^3|E_{\sqrt\varepsilon}^i| \le 2C_n\frac{1+(\log^+\frac1{\sqrt\varepsilon\lambda})^{n-1}}{\lambda}\sqrt\varepsilon \le 2C_n\frac{\sqrt\varepsilon}\lambda.$$ Multipling $\lambda/(1+(\log^+\frac1\lambda)^{n-1})$ on both sides and let $\lambda\to\infty$, we have
$$\varlimsup_{\lambda\to0^+}\frac\lambda{1+(\log^+\frac1\lambda)^{n-1}}|E_\lambda| \le 2C_n\sqrt\varepsilon.$$ By the arbitrariness of $\varepsilon$, it yields that
$$\lim_{\lambda\to\infty}\frac\lambda{1+(\log^+\frac1\lambda)^{n-1}}|E_\lambda| = 0.$$ This completes the proof of Theorem \ref{thm1} (ii).
\subsection{Proof of Corollary \ref{cor1}} \ \newline
\indent Now we are ready to prove Corollary \ref{cor1}. Since the family of functions satisfying $f\in L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})$ and $\|f\|_{L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})} = \|f\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}$ is nonempty, therefore \begin{align*}
\|\mathcal{M}} \def\e{\text{e}_n\|_{L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})\to L_{\Phi_n}^{1,\infty}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})} &= \sup_{f\in L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}\frac{\|\mathcal{M}} \def\e{\text{e}_nf\|_{L_{\Phi_n}^{1,\infty}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}}{\|f\|_{L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}} \\
&\ge \sup_{\substack{f\in L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}) \\ \|f\|_{L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})} = \|f\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}}}\frac{\|\mathcal{M}} \def\e{\text{e}_nf\|_{L_{\Phi_n}^{1,\infty}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}}{\|f\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}} \ge \frac{2^n}{(n-1)!}, \end{align*} where the last inequality is a direct consequence of Theorem \ref{thm1} (i).
On the other hand, note that
$$\lim_{\lambda\to1^-}\frac\lambda{1+(\log^+\frac1\lambda)^{n-1}}|\left\lbrace} \def\rb{\right\rbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\mathcal{M}} \def\e{\text{e}_n(\chi_{B(0,1)})(x)>\lambda\rb|=\|\chi_{B(0,1)}\|_{L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})},$$ then it follows that
$$\|\mathcal{M}} \def\e{\text{e}_n\|_{L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})\to L_{\Phi_n}^{1,\infty}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})} \ge \max\left\lbrace} \def\rb{\right\rbrace\frac{2^n}{(n-1)!},1\rb.$$
It is easy to verify that
$$\lim_{\lambda\to1^-}\frac\lambda{1+(\log^+\frac1\lambda)^{n-1}}|\left\lbrace} \def\rb{\right\rbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\mathcal{M}} \def\e{\text{e}_n^c(\chi_{B(0,1)})(x)>\lambda\rb|=\|\chi_{B(0,1)}\|_{L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})},$$ which indicates that
$$\|\mathcal{M}} \def\e{\text{e}_n^c\|_{L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})\to L_{\Phi_n}^{1,\infty}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})} \ge1.$$
\section{Results for multilinear strong maximal operators}\label{sec3}
As a natural generalization of linear case, the multilinear strong maximal operator have been paid lots of attentions. It was first introduced by Grafakos et al. in \cite{GLPT2011}:
$$\mathcal{M}} \def\e{\text{e}_n^{(m)}(f_1,\cdots,f_m)(x) = \sup_{\substack{R \ni x \\ R\in\mathcal{R}}}\prod_{i=1}^m\frac1{|R|}\int_R|f_i(y)|dy.$$ The strong boundedness, endpoint weak type boundedness and weighted boundedness has been given. Subsequently, similar results was extented to multilinear fractional strong maximal operator by Cao et al. \cite{CXY2017,CXY2018,CXY2019}. For more works about $\mathcal{M}} \def\e{\text{e}_n^{(m)}$, we refer the readers to \cite{LXY2020,ZSX2019,ZX2020}.
It is quiet natural to ask the following question:
\noindent{\bf Question}: what kinds of limiting weak type behavior does the multilinear strong maximal operator enjoy?
In this section, we are devoted to study this question. Since the difference between $m$-linear case and bilinear case is not essential, we only demonstrate the bilinear case.
\begin{thm}\label{thm3} Let $f,g\in L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})$, then we have \begin{equation}\label{eq31}
\begin{aligned}
\lim_{\lambda\to0^+}\frac\lambda{1+(\log^+\frac1\lambda)^{n-1}}| & \lbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\mathcal{M}} \def\e{\text{e}_n^{(2)}(f,g)(x)>\lambda^2\rbrace| \\
&= \frac{2^n}{(n-1)!}(\|f\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}\|g\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})})^{1/2};
\end{aligned} \end{equation} and \begin{equation}\label{eq32}
\lim_{\lambda\to\infty}\frac\lambda{1+(\log^+\frac1\lambda)^{n-1}}|\lbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\mathcal{M}} \def\e{\text{e}_n^{(2)}(f,g)(x)>\lambda^2\rbrace|=0. \end{equation} \end{thm}
\begin{proof}
The notations in Section \ref{sec3} will continue to be used in this proof. We may still assume $\|f\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})},\|g\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}>0$. There also exist functions $g_1,g_2,g_3,g_4$ for $g$ similarly as $f_1,f_2,f_3,f_4$ for $f$. We may assume $\max\limits_{y\in[-r_\varepsilon,r_\varepsilon]^n}g_1(y) \le A_\varepsilon$, otherwise we can take $A_\varepsilon = \max\limits_{y\in[-r_\varepsilon,r_\varepsilon]^n}g_1(y)$.
By the sublinearity of $\mathcal{M}} \def\e{\text{e}_n^{(2)}$, it is easy to see \begin{align*}
\mathcal{M}} \def\e{\text{e}_n^{(2)}(f,g)(x) \le \mathcal{M}} \def\e{\text{e}_n^{(2)}(f_1,g_1)(x) &+ \sum_{i=2}^3\left(\mathcal{M}} \def\e{\text{e}_n^{(2)}(f_1,g_i)(x) + \mathcal{M}} \def\e{\text{e}_n^{(2)}(f_i,g_1)(x)\right) \\
&+ \sum_{i=2}^3\sum_{j=2}^3\mathcal{M}} \def\e{\text{e}_n^{(2)}(f_i,g_j)(x) \end{align*} and \begin{align*}
\mathcal{M}} \def\e{\text{e}_n^{(2)}(f,g)(x) \ge \mathcal{M}} \def\e{\text{e}_n^{(2)}(f_1,g_1)(x) &- \sum_{i=3}^4\left(\mathcal{M}} \def\e{\text{e}_n^{(2)}(f,g_i)(x) + \mathcal{M}} \def\e{\text{e}_n^{(2)}(f_i,g)(x)\right) \\
&- \sum_{i=3}^4\sum_{j=3}^4\mathcal{M}} \def\e{\text{e}_n^{(2)}(f_i,g_j)(x). \end{align*}
So we define \begin{align*}
& \widetilde E_\lambda = \lbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\mathcal{M}} \def\e{\text{e}_n^{(2)}(f,g)(x) > \lambda\rbrace; \\
& \widetilde E_\lambda^1 = \lbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\mathcal{M}} \def\e{\text{e}_n^{(2)}(f_1,g_1) > \lambda\rbrace; \\
& \widetilde E_\lambda^2 = \bigg\lbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\sum_{i=2}^3\left(\mathcal{M}} \def\e{\text{e}_n^{(2)}(f_1,g_i)(x) + \mathcal{M}} \def\e{\text{e}_n^{(2)}(f_i,g_1)(x)\right) > \lambda\bigg\rbrace; \\
& \widetilde E_\lambda^3 = \bigg\lbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\sum_{i=3}^4\left(\mathcal{M}} \def\e{\text{e}_n^{(2)}(f,g_i)(x) + \mathcal{M}} \def\e{\text{e}_n^{(2)}(f_i,g)(x)\right) > \lambda\bigg\rbrace; \\
& \widetilde E_\lambda^4 = \bigg\lbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\sum_{i=2}^4\sum_{j=2}^4\mathcal{M}} \def\e{\text{e}_n^{(2)}(f_i,g_j)(x) >\lambda\bigg\rbrace. \end{align*} Therefore the following inlcuding relationships hold: \begin{equation}\label{eq33}
\widetilde E_{(1+2\sqrt\varepsilon)\lambda^2}^1 \backslash (\widetilde E_{\sqrt\varepsilon\lambda^2}^3 \cup \widetilde E_{\sqrt\varepsilon\lambda^2}^4) \subset \widetilde E_{\lambda^2} \subset \widetilde E_{(1-2\sqrt\varepsilon)\lambda^2}^1 \cup \widetilde E_{\sqrt\varepsilon\lambda^2}^2 \cup \widetilde E_{\sqrt\varepsilon\lambda^2}^4 \end{equation}
We also divide this proof into four parts. The upper estimates for $\widetilde E_{\sqrt\varepsilon\lambda^2}^2$, $\widetilde E_{\sqrt\varepsilon\lambda^2}^3$ and $\widetilde E_{\sqrt\varepsilon\lambda^2}^4$ will be given in Step 1 and Step 2. Step 3 and Step 4 are devoted to demonstrate the lower and upper estimates of $\widetilde E_{(1+2\sqrt\varepsilon)\lambda^2}^1$ and $\widetilde E_{(1-2\sqrt\varepsilon)\lambda^2}^1$.
\noindent{\bf Step 1: Upper estimate for $\widetilde E_{\sqrt\varepsilon\lambda^2}^4$.}
A basic fact $\mathcal{M}} \def\e{\text{e}_n^{(2)}(f_i,g_i)(x) \le \mathcal{M}} \def\e{\text{e}_n(f_i)(x)\cdot\mathcal{M}} \def\e{\text{e}_n(g_i)(x)$ yields that \begin{align*}
\widetilde E_{\sqrt\varepsilon\lambda^2}^4 &\subset \bigcup_{i=2}^4\bigcup_{j=2}^4\left\lbrace} \def\rb{\right\rbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\mathcal{M}} \def\e{\text{e}_n^{(2)}(f_i,g_j)>\frac{\sqrt\varepsilon\lambda^2}9\rb \\
&\subset \bigcup_{i=2}^4\bigcup_{j=2}^4\left(\left\lbrace} \def\rb{\right\rbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\mathcal{M}} \def\e{\text{e}_n(f_i)(x)>\frac{\varepsilon^{1/4}\lambda}3\rb \cup \left\lbrace} \def\rb{\right\rbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\mathcal{M}} \def\e{\text{e}_n(g_j)(x)>\frac{\varepsilon^{1/4}\lambda}3\rb\right) \\
&= \bigcup_{i=2}^4\left(\left\lbrace} \def\rb{\right\rbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\mathcal{M}} \def\e{\text{e}_n(f_i)(x)>\frac{\varepsilon^{1/4}\lambda}3\rb \cup \left\lbrace} \def\rb{\right\rbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\mathcal{M}} \def\e{\text{e}_n(g_i)(x)>\frac{\varepsilon^{1/4}\lambda}3\rb\right). \end{align*}
Recall that for $i=2,3,4$, $\|f_i\|_{L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})},\|g_i\|_{L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})} \le \varepsilon$. Thus it follows from (\ref{eq21}) that \begin{equation}\label{eq34}
\begin{aligned}
& |\widetilde E_{\sqrt\varepsilon\lambda^2}^4| \\
&\le \sum_{i=2}^4\left(\left|\left\lbrace} \def\rb{\right\rbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\mathcal{M}} \def\e{\text{e}_n(f_i)(x)>\frac{\varepsilon^{1/4}\lambda}3\rb\right| + \left|\left\lbrace} \def\rb{\right\rbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\mathcal{M}} \def\e{\text{e}_n(g_i)(x)>\frac{\varepsilon^{1/4}\lambda}3\rb\right|\right) \\
&\le 6C_n\frac{1+(\log^+\frac3{\varepsilon^{1/4}\lambda})^{n-1}}{\varepsilon^{1/4}\lambda/3}\varepsilon \le 18C_n\frac{1+(\log^+\frac3{\varepsilon^{1/4}\lambda})^{n-1}}\lambda\varepsilon^{3/4} \\
&\le 18C_n\frac{1+(\log^+\frac3{\varepsilon^{3/4}\lambda})^{n-1}}\lambda\varepsilon^{1/4}.
\end{aligned} \end{equation} So we get the upper estimate for $\widetilde E_{\sqrt\varepsilon\lambda^2}^4$.
\noindent{\bf Step 2: Upper estimates for $\widetilde E_{\sqrt\varepsilon\lambda^2}^2$ and $\widetilde E_{\sqrt\varepsilon\lambda^2}^3$.}
Since $f_1$ is controlled by $|f|+f_3+f_4$, consequently, it holds that \begin{align*}
\|f_1\|_{L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}
&\le \int_{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}\Phi_n(|f(y)|+f_3(y)+f_4(y))dy \\
&\le \int_{|f|=\max\left\lbrace} \def\rb{\right\rbrace|f|,f_3,f_4\rb}\Phi_n(3|f(y)|)dy + \sum_{i=3}^4\int_{|f_i|=\max\left\lbrace} \def\rb{\right\rbrace|f|,f_3,f_4\rb}\Phi_n(3|f_i(y)|)dy. \end{align*} The same reasoning as in the beginning of Section \ref{sec3} yields that \begin{align*}
\|f_1\|_{L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})} &\le 2^n3(1+(\log3)^{n-1})\left(\|f\|_{L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})} + \sum_{i=3}^4\|f_i\|_{L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}\right) \\
&\le 2^{2n+2}\left(\|f\|_{L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})} + 2\varepsilon\right)
\le 2^{2n+3}\|f\|_{L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}, \end{align*}
where the last inequality follows from $0<\varepsilon\ll\|f\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}$. Similarly inequality also holds for $g_1$.
It is easy to see that \begin{align*}
\widetilde E_{\sqrt\varepsilon\lambda^2}^2
&\subset \bigcup_{i=2}^3\bigg(\bigg\lbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\mathcal{M}} \def\e{\text{e}_n^{(2)}(f_1,g_i)(x)>\frac{\sqrt\varepsilon\lambda^2}4\bigg\rbrace \cup \bigg\lbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\mathcal{M}} \def\e{\text{e}_n^{(2)}(f_i,g_1)(x)>\frac{\sqrt\varepsilon\lambda^2}4\bigg\rbrace\bigg) \\
&\subset \bigcup_{i=2}^3\Bigg(\bigg\lbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\mathcal{M}} \def\e{\text{e}_n(f_1)(x)>\frac\lambda{2\varepsilon^{1/4}}\bigg\rbrace \cup \bigg\lbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\mathcal{M}} \def\e{\text{e}_n(g_i)(x)>\frac{\varepsilon^{3/4}\lambda}2\bigg\rbrace \\
&\qquad\qquad \cup \bigg\lbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\mathcal{M}} \def\e{\text{e}_n(f_i)(x)>\frac{\varepsilon^{3/4}\lambda}2\bigg\rbrace \cup \bigg\lbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\mathcal{M}} \def\e{\text{e}_n(g_1)(x)>\frac\lambda{2\varepsilon^{1/4}}\bigg\rbrace\Bigg) \\
&= \left\lbrace} \def\rb{\right\rbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\mathcal{M}} \def\e{\text{e}_n(f_1)(x)>\frac\lambda{2\varepsilon^{1/4}}\rb \cup \left\lbrace} \def\rb{\right\rbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\mathcal{M}} \def\e{\text{e}_n(g_1)(x)>\frac\lambda{2\varepsilon^{1/4}}\rb \\
&\qquad \cup \bigcup_{i=2}^3\bigg(\bigg\lbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\mathcal{M}} \def\e{\text{e}_n(f_i)(x)>\frac{\varepsilon^{3/4}\lambda}2\bigg\rbrace \cup \bigg\lbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\mathcal{M}} \def\e{\text{e}_n(g_i)(x)>\frac{\varepsilon^{3/4}\lambda}2\bigg\rbrace\bigg). \end{align*} Therefore by Lemma \ref{lem21} we can get the upper estimate for $\widetilde E_{\sqrt\varepsilon\lambda^2}^2$: \begin{equation}\label{eq35}
\begin{aligned}
|\widetilde E_{\sqrt\varepsilon\lambda^2}^2|
&\le 2C_n(\|f_1\|_{L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})} + \|g_1\|_{L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})})\frac{1+(\log^+\frac{2\varepsilon^{1/4}}\lambda)^{n-1}}\lambda\varepsilon^{1/4} \\
&\quad + 8C_n\frac{1+(\log^+\frac2{\varepsilon^{3/4}\lambda})^{n-1}}\lambda\varepsilon^{1/4} \\
&\le (2^{2n+4}\widetilde C_n+8C_n)\frac{1+(\log^+\frac3{\varepsilon^{3/4}\lambda})^{n-1}}\lambda\varepsilon^{1/4},
\end{aligned} \end{equation}
where $\widetilde C_n = C_n\left(\|f\|_{L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})} + \|g\|_{L_{\Phi_n}({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}\right)$.
Applying the same method, we can also have the upper estimate for $\widetilde E_{\sqrt\varepsilon\lambda}^3$: \begin{equation}\label{eq36}
|\widetilde E_{\sqrt\varepsilon\lambda^2}^3| \le (2\widetilde C_n+8C_n)\frac{1+(\log^+\frac3{\varepsilon^{3/4}\lambda})^{n-1}}\lambda\varepsilon^{1/4}. \end{equation}
\noindent{\bf Step 3: Lower estimate for $\widetilde E_{(1+2\sqrt\varepsilon)\lambda^2}^1$.}
Define $G(\vec a,\vec b,x)$ by $$G(\vec a,\vec b,x) = \frac1{\prod\limits_{k=1}^n(b_k-a_k)}\int_{a_1}^{b_1}\cdots\int_{a_n}^{b_n}g_1(y)dy.$$
Since for $x\in E_1'$, it holds that $$\max_{\vec a,\vec b}F(\vec a,\vec b,x) = F((-r_\varepsilon,\cdots,-r_\varepsilon),x,x),$$ $$\max_{\vec a,\vec b}G(\vec a,\vec b,x) = G((-r_\varepsilon,\cdots,-r_\varepsilon),x,x),$$ thus we have \begin{align*}
\mathcal{M}} \def\e{\text{e}_n^{(2)}(f_1,g_1)(x) &= \sup_{\vec a,\vec b}F(\vec a,\vec b,x)G(\vec a,\vec b,x) \\
&= \frac1{\prod\limits_{k=1}^n(x_k+r_\varepsilon)^2}\int_{[-r_\varepsilon,r_\varepsilon]^n}f_1(y)dy\int_{[-r_\varepsilon,r_\varepsilon]^n}g_1(y)dy. \end{align*} This implies that for $\lambda$ small enough, we obtain \begin{align*}
& |\widetilde E_{(1+2\sqrt\varepsilon)\lambda^2}^1 \cap E_1'|
= \left|\left\lbrace} \def\rb{\right\rbrace x:x_1,\cdots,x_n>R_\varepsilon,\prod_{k=1}^n(x_k+r_\varepsilon)<\frac{\|f_1\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}^{1/2}\|g_1\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}^{1/2}}{\sqrt{1+2\sqrt\varepsilon}\lambda}\rb\right| \\
&= \sum_{k=1}^nB_{n,k}\frac{\|f_1\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}^{1/2}\|g_1\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}^{1/2}}{(n-1)!\sqrt{1+2\sqrt\varepsilon}\lambda}\left(\log\frac{\|f_1\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}^{1/2}\|g_1\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}^{1/2}}{\sqrt{1+2\sqrt\varepsilon}\lambda}\right)^{n-k}+(-1)^n(R_\varepsilon+r_\varepsilon)^n. \end{align*}
So does $|\widetilde E_{(1+2\sqrt\varepsilon)\lambda^2}^1 \cap E_i'|$ for $i=2,\cdots,2^n$.
Combining these with (\ref{eq33}), (\ref{eq34}) and (\ref{eq36}) yields that \begin{align*}
|\widetilde E_{\lambda^2}|
&\ge 2^n\sum_{k=1}^nB_{n,k}\frac{\|f_1\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}^{1/2}\|g_1\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}^{1/2}}{(n-1)!\sqrt{1+2\sqrt\varepsilon}\lambda}\left(\log\frac{\|f_1\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}^{1/2}\|g_1\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}^{1/2}}{\sqrt{1+2\sqrt\varepsilon}\lambda}\right)^{n-k} \\
&\quad + (-1)^n(R_\varepsilon+r_\varepsilon)^n - (2\widetilde C_n + 26C_n)\frac{1+(\log^+\frac3{\varepsilon^{3/4}\lambda})^{n-1}}\lambda\varepsilon^{1/4}. \end{align*} Multipling $\lambda/(1+(\log^+\frac1\lambda)^{n-1})$ on both sides and let $\lambda\to0^+$ we deduce that \begin{align*}
& \varliminf_{\lambda\to0^+}\frac\lambda{1+(\log^+\frac1\lambda)^{n-1}}|\widetilde E_{\lambda^2}| \ge \frac{2^nB_{n,1}\|f_1\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}^{1/2}\|g_1\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}^{1/2}}{\sqrt{1+2\sqrt\varepsilon}} - (2\widetilde C_n+26C_n)\varepsilon^{1/4} \\
&\ge \frac{2^n}{(n-1)!\sqrt{1+2\sqrt\varepsilon}}\left(\|f\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}-2\varepsilon\right)^{1/2}\left(\|g\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}-2\varepsilon\right)^{1/2} - (2\widetilde C_n+26C_n)\varepsilon^{1/4}. \end{align*} By the arbitrariness of $\varepsilon$, we get the lower estimate as follows: \begin{equation}\label{eq37}
\varliminf_{\lambda\to0^+}\frac\lambda{1+(\log^+\frac1\lambda)^{n-1}}|\widetilde E_{\lambda^2}| \ge \frac{2^n}{(n-1)!}\|f\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}^{1/2}\|g\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}^{1/2}. \end{equation}
\noindent{\bf Step 4: Upper estimate for $\widetilde E_{(1-2\sqrt\varepsilon)\lambda^2}^1$.}
The same arguments as in Step 3 of Section \ref{sec3} imply that \begin{equation}\label{eq38}
\begin{aligned}
& |\widetilde E_{(1-2\sqrt\varepsilon)\lambda^2}^1 \cap E'| \\
&= 2^n\sum_{k=1}^nB_{n,k}\frac{\|f_1\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}^{1/2}\|g_1\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}^{1/2}}{(n-1)!\sqrt{1-2\sqrt\varepsilon}\lambda}\left(\log\frac{\|f_1\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}^{1/2}\|g_1\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}^{1/2}}{\sqrt{1-2\sqrt\varepsilon}\lambda}\right)^{n-1} \\
&\quad + (-1)^n(R_\varepsilon+r_\varepsilon)^n.
\end{aligned} \end{equation}
It is easy to verify that \begin{align*}
& |\widetilde E_{(1-2\sqrt\varepsilon)\lambda^2}^1 \cap ({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}\backslash E')| \le |E_{(1-2\sqrt\varepsilon)\lambda}^1 \cap ({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}\backslash E')| + |\left\lbrace} \def\rb{\right\rbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\mathcal{M}} \def\e{\text{e}_n(g_1)(x)>\lambda\rb \cap ({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}\backslash E')| \\
&\le |E_{(1-2\sqrt\varepsilon)\lambda}^1 \cap ({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}\backslash E')| + |\left\lbrace} \def\rb{\right\rbrace x\in{\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}:\mathcal{M}} \def\e{\text{e}_n(g_1)(x)>(1-2\sqrt\varepsilon)\lambda\rb \cap ({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}}\backslash E')|. \end{align*} From Step 3 of Section \ref{sec2} we know that the right side multiply $\lambda/(1+(\log^+\frac1\lambda)^{n-1})$ will converge to $0$ as $\lambda\to0^+$. Then, by (\ref{eq33}), (\ref{eq34}), (\ref{eq35}) and (\ref{eq38}), we have \begin{align*}
& \varlimsup_{\lambda\to0^+}\frac\lambda{1+(\log^+\frac1\lambda)^{n-1}}|\widetilde E_{\lambda^2}| \le \frac{2^nB_{n,1}\|f_1\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}^{1/2}\|g_1\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}^{1/2}}{\sqrt{1+2\sqrt\varepsilon}} + (2^{2n+4}\widetilde C_n+26C_n)\varepsilon^{1/4} \\
&\le \frac{2^n}{(n-1)!\sqrt{1+2\sqrt\varepsilon}}\left(\|f\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}+2\varepsilon\right)^{1/2}\left(\|g\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}+2\varepsilon\right)^{1/2} + (2^{2n+4}\widetilde C_n+26C_n)\varepsilon^{1/4}. \end{align*} The lower estimate follows from the arbitrariness of $\varepsilon$: \begin{equation}\label{eq39}
\varlimsup_{\lambda\to0^+}\frac\lambda{1+(\log^+\frac1\lambda)^{n-1}}|\widetilde E_{\lambda^2}| \le \frac{2^n}{(n-1)!}\|f\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}^{1/2}\|g\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}^{1/2}. \end{equation}
Combining (\ref{eq37}) and (\ref{eq39}), we deduce that
$$\lim_{\lambda\to0^+}\frac\lambda{1+(\log^+\frac1\lambda)^{n-1}}|\widetilde E_{\lambda^2}| = \frac{2^n}{(n-1)!}\|f\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}^{1/2}\|g\|_{L^1({\mathbb R^n}} \def\sn{{\mathbb S^{n-1}})}^{1/2}.$$ The proof of (\ref{eq31}) is finished.
Finally, by Theorem \ref{thm1} (ii), (\ref{eq32}) follows from \begin{align*}
0 &\le \lim_{\lambda\to\infty}\frac\lambda{1+(\log^+\frac1\lambda)^{n-1}}\left|\left\lbrace} \def\rb{\right\rbrace x:\mathcal{M}} \def\e{\text{e}_n^{(2)}(f,g)(x)>\lambda^2\rb\right| \\
&\le \lim_{\lambda\to\infty}\frac\lambda{1+(\log^+\frac1\lambda)^{n-1}}\left(\left|\left\lbrace} \def\rb{\right\rbrace x:\mathcal{M}} \def\e{\text{e}_n(f)(x)>\lambda\rb\right| + \left|\left\lbrace} \def\rb{\right\rbrace x:\mathcal{M}} \def\e{\text{e}_ng(x)>\lambda\rb\right|\right) =0. \end{align*}
\end{proof}
\end{document} |
\begin{document}
\vspace*{-3mm}
\section*{The McMillan theorem for colored branching\\[-2pt] processes and dimensions of random fractals}
\centerline{\Large\it Victor I. Bakhtin}
\centerline{\large bakhtin@tut.by}
\renewcommand{\abstractname}{} \begin{abstract} For simplest colored branching processes we prove an analog to the McMillan theorem and calculate Hausdorff dimensions of random fractals defined in terms of the limit behavior of empirical measures generated by finite genetic lines. In this setting the role of Shannon's entropy is played by the Kullback--Leibler divergence and the Hausdorff dimensions are computed by means of the so-called Billingsley--Kullback entropy, defined in the paper.
{\bf Keywords:} {\it random fractal, Hausdorff dimension, colored brunching process, basin of the empirical measure, spectral potential, Billingsley--Kullback entropy, Kullback action, maximal dimension principle}
{\bf 2010 Mathematics Subject Classification:\,} 28A80, 37F35, 60J80
\end{abstract}
Let us consider the finite set $X =\{1,\dots,r\}$, whose elements denote different colors, and a vector $(\mu(1),\dots,\mu(r)) \in [0,1]$. A simplest colored branching process can be defined as an evolution of a population in which all individuals live the same fixed time and then, when the lifetime ends, each individual generates (independently of others) a random set of ``children'' containing individuals of colors $1$, \dots, $r$ with probabilities $\mu(1)$, \dots, $\mu(r)$ respectively. We will suppose that the evolution starts with a unique initial individual. It is suitable to represent this process as a random genealogical tree with individuals as vertices and each vertex connected by edges with its children. Denote by $X_n$ the set of all genetic lines of length $n$ (that survive up to generation $n$). The colored branching process can degenerate (when it turns out that starting from some $n$ all the sets $X_n$ are empty) or, otherwise, evolve endlessly. Every genetic line $x =(x_1,\dots,x_n)\in X_n$ generates an empirical measure $\delta_{x,n}$ on the set of colors $X$ by the following rule: for each $i\in X$ the value of $\delta_{x,n}(i)$ is the fraction of those coordinates of the vector $(x_1,\dots,x_n)$ that coincide with $i$.
Let $\nu$ be an arbitrary probability measure on $X$. The analog to the McMillan theorem that will be proved below asserts that under condition of nondegeneracy of the colored branching process the cardinality of the set $\{\pin x\in X_n\mid \delta_{x,n}\approx\nu\pin\}$ has an almost sure asymptotics of order $e^{-n\rho(\nu,\mu)}$, where \begin{equation*}
\rho(\nu,\mu) =\sum_{i\in X} \nu(i)\ln\frac{\nu(i)}{\mu(i)}. \end{equation*} Formally, the value of $\rho(\nu,\mu)$ coincides with the usual Kullback--Leibler divergence and differs from the latter only in the fact that in our setting the measure $\mu$ is not probability and so $\rho(\nu,\mu)$ can be negative.
In the paper we investigate also random fractals defined in terms of the sequence of empirical measures $\delta_{x,n}$ limit behavior. Let $X_\infty$ be the set of infinite genetic lines. Fix an arbitrary vector $\theta =(\theta(1),\dots,\theta(r)) \in (0,1)^r$ and define the following metrics on $X_\infty$: \begin{equation*}
\dist(x,y) =\prod_{t=1}^n \theta(x_t),\quad\ \text{where}\ \
n=\inf\pin\{\pin t\mid x_t\ne y_t\pin\} -1. \end{equation*}
Denote by $V$ any set of probability measures on $X$. It will be proved, in particular, that under the condition of nondegeneracy of the colored branching process \begin{equation*}
\dim_H\pin\{\pin x\in X_\infty \mid \delta_{x,n}\to V\pin\} \pin=\pin
\sup_{\nu\in V} d(\nu,\mu,\theta) \end{equation*} almost surely, where $d(\nu,\mu,\theta)$ is the \emph{Billingsley--Kullback entropy} defined below.
The paper can be divided into two parts. The first one (sections 1--5) contains known results; some of them have been modified in a certain way for the convenience of use in what follows. Anyway, most of them are proved below for the completeness and convenience of the reader. The second part (sections 6--9) contains new results.
In addition, we note that all the results of the paper can be easily extended to Moran's self-similar geometric constructions in $\mathbb R^n$, but we will not do that.
\section{The spectral potential}\label{1..}
Let $X$ be an arbitrary finite set. Denote by $B(X)$ the space of all real-valued functions on $X$, by $M(X)$ the set of all positive measures on $X$, and by $M_1(X)$ the collection of all probability distributions on $X$.
Every measure $\mu\in M(X)$ determines a linear functional on $B(X)$ of the form \begin{equation*}
\mu[f] = \int_X f\,d\mu =\sum_{x\in X} f(x)\mu(x). \end{equation*} It is easily seen that this functional is \emph{positive} (i.\,e., takes nonnegative values on nonnegative functions). If, in addition, the measure $\mu$ is probability then this functional is normalized (takes the value $1$ on the unit function).
Consider the nonlinear functional \begin{equation}\label{1,,1}
\lambda(\varphi,\mu) =\ln \mu[e^\varphi], \end{equation} where $\varphi\in B(X)$ and $\mu\in M(X)$. We will call it the \emph{spectral potential}. Evidently, it is monotone (if $\varphi\ge \psi$ then $\lambda(\varphi,\mu)\ge \lambda(\psi,\mu)$, additively homogeneous (that is, $\lambda(\varphi+t,\mu) =\lambda(\varphi,\mu) +t$ for each constant $t$), and analytic in $\varphi$.
Define a family of probability measures $\mu_\varphi$ on $X$, depending on the functional parameter $\varphi\in B(X)$, by means of the formula \begin{equation*}
\mu_{\varphi}[f] = \frac{\mu[e^\varphi f]}{\mu[e^\varphi]}, \qquad f\in B(X). \end{equation*} Evidently, each measure $\mu_\varphi$ is equivalent to $\mu$ and has the density $e^{\varphi - \lambda(\varphi,\mu)}$ with respect to $\mu$.
Let us compute the first two derivatives of the spectral potential with respect to the argument $\varphi$. Introduce the notation \begin{equation*}
\lambda'(\varphi,\mu)[f] =\frac{d\lambda(\varphi+tf,\mu)}{d\pin t}\biggr|_{t=0}. \end{equation*} This is nothing more than the derivative of the spectral potential in the direction $f$ at the point $\varphi$. An elementary computation shows that \begin{equation}\label{1,,2}
\lambda'(\varphi,\mu)[f] =\frac{d\ln \mu\bigl[e^{\varphi+tf}\bigr]}{d\pin t}\biggr|_{t=0} =
\frac{\mu[e^\varphi f]}{\mu[e^\varphi]} = \mu_{\varphi}[f]. \end{equation} In other words, the derivative $\lambda'(\varphi,\mu)$ coincides with the probability measure $\mu_\varphi$. Then put \begin{equation*}
\lambda''(\varphi,\mu)[f,g] =
\frac{\partial^2\lambda(\varphi+tf+sg,\mu)}{\partial s\,\partial\pin t}\biggr|_{s,t=0} \end{equation*}
\noindent and compute this derivative using just obtained formula \eqref{1,,2}: \begin{equation*}
\lambda''(\varphi,\mu)[f,g] =\frac{\partial}{\partial s}
\biggl(\frac{\mu[e^{\varphi+sg}f]}{\mu[e^{\varphi+sg}]}\pin\biggr)\biggr|_{s=0} =
\frac{\mu[e^\varphi fg]}{\mu[e^\varphi]} -\frac{\mu[e^\varphi f]\pin
\mu[e^\varphi g]}{(\mu[e^\varphi])^2} = \mu_{\varphi}[fg] -\mu_{\varphi}[f]\pin \mu_{\varphi}[g]. \end{equation*}
In probability theory the expression $\mu_\varphi[t]$ is usually called the expectation of the random variable $f$ with respect to the probability distribution $\mu_\varphi$, and the expression $\mu_{\varphi}[fg] -\mu_{\varphi}[f]\pin \mu_{\varphi}[g]$ is called the covariance of random variables $f$ and $g$. In particular, the second derivative \begin{equation*}
\frac{d^2\lambda(\varphi+tf,\mu)}{d\pin t^2}\biggr|_{t=0} =
\mu_{\varphi}\bigl[f^2\bigr] -\mu_{\varphi}[f]^2
=\mu_\varphi\bigl[(f -\mu_{\varphi}[f])^2\bigr] \end{equation*} is equal to the variance of the random variable $f$ with respect to the distribution $\mu_\varphi$. Since the variance is nonnegative it follows that the spectral potential is convex in $\varphi$.
\section{The Kullback action}\label{2..}
Denote by $B^*(X)$ the space of all linear functionals on $B(X)$. Then, obviously, \begin{equation*}
M_1(X)\subset M(X)\subset B^*(X). \end{equation*}
The following functional of two arguments $\nu\in B^*(X)$ and $\mu\in M(X)$ will be called the \emph{Kullback action}:
\begin{equation} \label{2,,1}
\rho(\nu,\mu) =
\begin{cases}
\mu[\varphi\ln\varphi] =\nu[\ln\varphi], &\text{if \,$\nu\in M_1(X)$ \,and \,$\nu=\varphi \mu$},
\\[2pt]
+\infty &\text{in all other cases}.
\end{cases} \end{equation} To be more precise, the ``all other cases'' fit into at least one of the three categories: \,a) singular w.\,r.\,t. $\mu$ probability measures $\nu$, \,b) nonnormalized functionals $\nu$, and \,c)~nonpositive functionals $\nu$.
In the literature, as far as I know, this functional have been defined only for probability measures $\nu$ and $\mu$. Different authors call it differently: the relative entropy, the deviation function, the Kullback--Leibler information function, the Kullback--Leibler divergence.
When $\nu$ is a probability measure the Kullback action can be defined by the explicit formula \begin{equation} \label{2,,2}
\rho(\nu,\mu) =\sum_{x\in X} \nu(x)\ln\frac{\nu(x)}{\mu(x)}. \end{equation}
\noindent In particular, if $\mu(x) \equiv 1$ then the Kullback action differs only in sign from Shannon's entropy \begin{equation} \label{2,,3}
H(\nu) = -\sum_{x\in X} \nu(x)\ln\nu(x). \end{equation}
In the case of probability measure $\mu$ the Kullback action is nonnegative and vanishes only if $\nu =\mu$. Indeed, if the functional $\nu$ is not an absolutely continuous with respect to $\mu$ probability measure then $\rho(\nu,\mu) =+\infty$. Otherwise, if $\nu$ is a probability measure of the form $\nu =\varphi\mu$ then from Jensen's inequality and strong convexity of the function $f(x) =x\ln x$ it follows that \begin{equation*}
\rho(\nu,\mu) =\mu[f(\varphi)] \ge f\bigl(\mu[\varphi]\bigr) =0 \end{equation*} (so long as $\mu[\varphi] =\nu[1] =1$), and the equality $\rho(\nu,\mu) =0$ holds if and only if $\varphi$ is constant almost everywhere and, respectively, $\nu$ coincides with $\mu$.
Every measure $\mu\in M(X)$ can be put down in the form $\mu =c\mu_1$, where $c =\mu[1]$ and $\mu_1\in M_1(X)$. If $\nu\in M_1(X)$ then \eqref{2,,2} implies \begin{equation} \label{2,,4}
\rho(\nu,\mu) =\rho(\nu,\mu_1) -\ln c \ge -\ln\mu[1]. \end{equation} In case $\nu\notin M_1(X)$ this inequality holds all the more since the Kullback action is infinite.
\begin{theorem}\label{2..1} The spectral potential and the Kullback action satisfy the Young inequality \begin{equation}\label{2,,5}
\rho(\nu,\mu)\ge \nu[\psi] -\lambda(\psi,\mu), \end{equation}
\noindent that turns into equality if and only if\/ $\nu =\mu_\psi$. \end{theorem}
\emph{Proof.} If $\rho(\nu,\mu) =+\infty$ then the Young inequality is trivial. If $\rho(\nu,\mu) <+\infty$ then by the definition of Kullback action the functional $\nu$ is an absolutely continuous probability measure of the form $\nu =\varphi\mu$, where $\varphi$ is a nonnegative density. In this case \begin{equation*}
\lambda(\psi,\mu) \pin=\pin\ln \mu[e^\psi] \pin\ge\pin \ln\! \intop_{\varphi>0} \! e^\psi\, d\mu
\pin=\pin \ln\! \intop_{\varphi>0}\! e^{\psi-\ln\varphi}\,d\nu \pin=\pin
\ln \nu\bigl[e^{\psi -\ln\varphi}\bigr] \pin\ge\pin \nu[\psi-\ln\varphi]
\end{equation*} (at the last step we have used Jensen's inequality and concavity of the logarithm function). Since $\rho(\nu,\mu) =\nu[\ln \varphi]$, this formula implies inequality \eqref{2,,5}.
Recall that $\mu_\psi =e^{\psi-\lambda(\psi,\mu)}\mu$. So if $\nu =\mu_\psi$ then by definition \begin{equation*}
\rho(\nu,\mu) =\nu[\psi-\lambda(\psi,\mu)] =\nu[\psi]-\lambda(\psi,\mu). \end{equation*} Vice versa, assume that $\rho(\nu,\mu) =\nu[\psi] -\lambda(\psi,\mu)$. Then subtract from the above equality the Young inequality $\rho(\nu,\mu)\ge \nu[\varphi] -\lambda(\varphi,\mu)$. We obtain \begin{equation*}
\lambda(\varphi,\mu) -\lambda(\psi,\mu) \ge \nu[\varphi-\psi]. \end{equation*} From this follows that $\nu =\lambda'(\psi,\mu)$. Finally, $\lambda'(\psi,\mu)$ coincides with $\mu_\psi$. \qed
\begin{theorem}\label{2..2} The Kullback action\/ $\rho(\nu,\mu)$ is the Legendre transform w. r. t.\/ $\nu$ of the spectral potential\/$:$ \begin{equation}\label{2,,6}
\rho(\nu,\mu) \pin= \sup_{\psi\in B(X)}\bigl\{\nu[\psi] -\lambda(\psi,\mu)\bigr\}, \qquad
\nu\in B^*(X),\ \ \mu\in M(X). \end{equation} \end{theorem}
\emph{Proof.} By the Young inequality the left hand side of \eqref{2,,6} is not less than the right one. Therefore it is enough to associate with any functional $\nu\in B^*(X)$ a family of functions $\psi_t$, depending on the real-valued parameter $t$, on which the equality in \eqref{2,,6} is attained.
At first, suppose that $\nu$ is an absolutely continuous with respect to $\mu$ probability measure of the form $\nu =\varphi\mu$, where $\varphi$ is a nonnegative density. Consider the family of functions \begin{equation*}
\psi_t(x) \,=\,
\begin{cases}
\ln\varphi(x),&\text{если}\ \ \varphi(x)> 0,\\[2pt]
-t,&\text{если}\ \ \varphi(x) =0.
\end{cases} \end{equation*}
\noindent When $t\to +\infty$ we have the following relations \begin{gather*}
\mu\bigl[e^{\psi_t}\bigr] \pin=\intop_{\varphi>0} \! \varphi\,d\mu\pin
+\intop_{\varphi=0} \!\pin e^{-t}\,d\mu\pin\ \longrightarrow\
\int_X \varphi\,d\mu \,=\,1,\\[6pt]
\nu[\psi_t] \pin=\intop_{\varphi>0} \!\varphi\ln\varphi\,d\mu\pin
+\intop_{\varphi=0} \! -t\varphi\,d\mu \pin=\pin \mu[\varphi\ln\varphi],\\[6pt]
\nu[\psi_t] -\lambda(\psi_t,\mu) \pin=\pin \nu[\psi_t] -\ln \mu\bigl[e^{\psi_t}\bigr]\,
\longrightarrow\, \mu[\varphi\ln\varphi] \pin=\pin \rho(\nu,\mu), \end{gather*} and so \eqref{2,,6} is proved.
In all the other cases, when $\nu$ is not an absolutely continuous probability measure, by definition $\rho(\nu,\mu) =+\infty$. Let us examine this cases one after another.
If $\nu$ is a singular relative to $\mu$ probability measure, then there exists $x_0\in X$ such that $\mu(x_0) =0$ and $\nu(x_0) >0$. In this case consider the family of functions \begin{equation*}
\psi_t(x) \,=\,
\begin{cases}
t, &\text{если}\ \ x=x_0, \\[2pt]
0, &\text{если}\ \ x\ne x_0.
\end{cases} \end{equation*} It is easily seen that \begin{equation*}
\nu[\psi_t] -\lambda(\psi_t,\mu) \pin\ge\pin t\nu(x_0) -\ln\mu\bigl[e^{\psi_t}\bigr]
\pin\ge\pin t\nu(x_0) - \ln\mu[1]. \end{equation*} The right hand side of the above formula goes to $+\infty$ while $t$ increases and \eqref{2,,6} holds again.
If the functional $\nu$ is not normalized then put $\psi_t =t$. Then the expression \begin{equation*}
\nu[\psi_t] -\lambda(\psi_t,\mu)\pin=\pin\nu[t] -\ln\mu[e^t]\pin=\pin t\pin(\nu[1]-1)-\ln\mu[1] \end{equation*} is unbounded from the above and hence \eqref{2,,6} is still valid.
Finally, if the functional $\nu$ is not positive then there exists a nonnegative function $\varphi$ such that $\nu[\varphi] <0$. Consider the family $\psi_t =-t\varphi$, where $t>0$. For it \begin{equation*}
\nu[\psi_t] -\lambda(\psi_t,\mu) \pin\ge\pin -t\nu[\varphi] -\lambda(0,\mu)
\, \longrightarrow\, +\infty \end{equation*} as $t\to +\infty$, and \eqref{2,,6} remains in force. \qed
\begin{corollary}\label{2..3} The functional\/ $\rho(\,\cdot\,,\mu)$ is convex and lower semicontinuous on\/ $B^*(X)$. \end{corollary}
\emph{Proof.} These are properties of the Legendre transform. \qed
\section{The local large deviations principle and\\[-2pt] the McMillan theorem}\label{3..}
As above, we keep to the following notation: $X$ is a finite set, $B(X)$ stands for the space of real-valued functions on $X$, $B^*(X)$ is the space of linear functionals on $B(X)$, $M_1(X)$ is the set of all probability measures on $X$, and $M(X)$ is the set of all positive measures on $X$.
To each finite sequence $x =(x_1,\dots,x_n)\in X^n$ let us correspond an \emph{empirical measure} $\delta_{x,n}\in M_1(X)$ which is supported on the set $\{x_1,\dots,x_n\}$ and assigns to every point $x_i$ the measure $1/n$. The integral of any function $f$ with respect to $\delta_{x,n}$ looks like \begin{equation*}
\delta_{x,n}[f] =\frac{f(x_1)+\,\dotsc\,+f(x_n)}{n}. \end{equation*}
Denote by $\mu^n$ Cartesian power of a measure $\mu\in M(X)$, which is defined on $X^n$.
\begin{theorem}[\hbox spread -4pt {the local large deviations principle}] \label{3..1} For any measure\/ $\mu\in M(X)$, any functional\/ $\nu\in B^*(X)$, and\/ $\eps>0$ there exists a neighborhood\/ $O(\nu)$ such that \begin{equation}\label{3,,1}
\mu^n\bigl\{x\in X^n\bigm| \delta_{x,n}\in O(\nu)\bigr\} \pin\le\pin e^{-n(\rho(\nu,\mu) -\eps)}. \end{equation} On the other hand, for any\/ $\eps>0$ and any neighborhood\/ $O(\nu)$ the following asymptotic estimate holds\/$:$ \begin{equation}\label{3,,2}
\mu^n\bigl\{x\in X^n\bigm| \delta_{x,n}\in O(\nu)\bigr\} \pin\ge\pin e^{-n(\rho(\nu,\mu) +\eps)},
\qquad n\to\infty. \end{equation} \end{theorem}
If $\rho(\nu,\mu) =+\infty$, then by the difference $\rho(\nu,\mu)-\eps$ in \eqref{3,,1} we mean an arbitrary positive number.
In the case of probability measure $\mu$ Theorem \ref{3..1} is a partial case of Varadhan's large deviations principle (which explicit formulation can be found, e.\,g., in \cite{Deuschel-Stroock} and \cite{Varadhan}). Therefore, this theorem can be deduced from Varadhan's large deviations principle by means of mere renormalization of $\mu$. Nevertheless, we will prove it independently for the purpose of completeness.
\emph{Proof.} By Theorem \ref{2..2} for any $\eps>0$ there exists $\psi\in B(X)$ such that \begin{equation}\label{3,,3}
\rho(\nu,\mu) -\eps/2 \pin<\pin \nu[\psi] -\lambda(\psi,\mu). \end{equation} Consider the probability measure $\mu_\psi =e^{\psi-\lambda(\psi,\mu)}\mu$. Obviously, \begin{equation}\label{3,,4}
\frac{d\mu^n(x)}{d\mu_\psi^n(x)} \pin=\pin \prod_{i=1}^n \frac{d\mu(x_i)}{d\mu_\psi(x_i)}
\pin=\pin \prod_{i=1}^n e^{\lambda(\psi,\mu) -\psi(x_i)} \pin=\pin
e^{n(\lambda(\psi,\mu) -\delta_{x,n}[\psi])}. \end{equation} Define a neighborhood of the functional $\nu$ as follows: \begin{equation*}
O(\nu) \pin=\pin \bigl\{\pin \delta\in B^*(X)\bigm| \delta[\psi] >\nu[\psi] -\eps/2\pin\bigr\}. \end{equation*} Then it follows from \eqref{3,,4} and \eqref{3,,3} that under the condition $\delta_{x,n}\in O(\nu)$ \begin{equation*}
\frac{d\mu^n(x)}{d\mu_\psi^n(x)} \pin<\pin
e^{n(\lambda(\psi,\mu)-\nu[\psi] +\eps/2)} \pin<\pin e^{n(-\rho(\nu,\mu)+\eps)}. \end{equation*} Consequently, \begin{equation*}
\mu^n\bigl\{x\in X^n\bigm| \delta_{x,n}\in O(\nu)\bigr\} \, =
\intop_{\delta_{x,n}\in O(\nu)}\hspace{-1 em} d\mu^n(x) \,\le
\intop_{\delta_{x,n}\in O(\nu)}\hspace{-1 em} e^{n(-\rho(\nu,\mu)+\eps)}\,d\mu_\psi^n(x)
\,\le\, e^{-n(\rho(\nu,\mu)-\eps)}. \end{equation*} Thus the first part of Theorem \ref{3..1} is proved.
The estimate \eqref{3,,2} is trivial if $\rho(\nu,\mu) =+\infty$. So it is enough to prove it only in the case when $\nu$ is a probability measure of the form $\nu =\varphi\mu$ and the Kullback action $\rho(\nu,\mu) =\nu[\ln\varphi]$ is finite. Fix any number $\eps>0$ and neighborhood $O(\nu)$. Define the sets \begin{equation*}
Y_n \pin=\pin \bigl\{\pin x\in X^n\bigm| \delta_{x,n}\in O(\nu),\ \ \big|\delta_{x,n}[\ln\varphi]
-\nu[\ln\varphi]\big| <\eps/2\pin\bigr\} \end{equation*}
\noindent (the last inequality in the braces means that $\varphi(x_i)>0$ at each point of the sequence $x =(x_1,\dots,x_n)$). Note that for $x\in Y_n$ \begin{equation*}
\frac{d\mu^n(x)}{d\nu^n(x)} \pin=\pin \prod_{i=1}^n \frac{d\mu(x_i)}{d\nu(x_i)} \pin=\pin
\prod_{i=1}^n \frac{1}{\varphi(x_i)} \pin=\pin e^{-n\delta_{x,n}[\ln\varphi]} \pin>\pin
e^{-n(\nu[\ln\varphi]+\eps/2)}. \end{equation*} Consequently, \begin{equation}\label{3,,5}
\mu^n(Y_n) \pin=\pin \int_{Y_n} d\mu^n(x) \pin\ge\pin
\int_{Y_n} e^{-n(\nu[\ln\varphi]+\eps/2)}\,d\nu^n(x) \pin=\pin
e^{-n\rho(\nu,\mu) -n\eps/2}\pin\nu^n(Y_n). \end{equation} By the Law of large numbers $\nu^n(Y_n)\to 1$. Hence \eqref{3,,5} implies \eqref{3,,2}. \qed
\begin{corollary}[the McMillan theorem] \label{3..2} For any probability measure\/ $\nu\in M_1(X)$ and\/ $\eps>0$ there exists a neighborhood\/ $O(\nu)$ such that \begin{equation*}
\#\{\pin x =(x_1,\dots,x_n)\in X^n\mid \delta_{x,n}\in O(\nu)\pin\} \pin\le\pin e^{n(H(\nu) +\eps)}. \end{equation*} On the other hand, for any neighborhood\/ $O(\nu)$ and\/ $\eps>0$ \begin{equation*}
\#\{\pin x \in X^n\mid \delta_{x,n}\in O(\nu)\pin\} \pin\ge\pin e^{n(H(\nu) -\eps)}
\quad\ \text{as}\ \ n\to\infty. \end{equation*} Here\/ $H(\nu)$ denotes Shannon's entropy defined in\/ \eqref{2,,3}. \end{corollary}
\emph{Proof.} This follows from equalities \eqref{2,,2}, \eqref{2,,3}, and the previous theorem, if we set $\mu(x) =1$ for all $x\in X$. \qed
\section{Hausdorff dimension and the maximal dimension principle} \label{4..}
Let us define the Hausdorff dimension of an arbitrary metric space $\Omega$.
Suppose that $\Omega$ is covered by at most countable collection of subsets \pin$\cal U =\{U_i\}$. Denote by $|\cal U|$ the diameter of this covering: $|\cal U| =\sup |U_i|$, where $|U_i|$ is the diameter of $U_i$. For every $\alpha\in \mathbb R$ put \begin{equation*}
\mathrm{mes}\kern 0.04em(\cal U,\alpha) =\sum_i |U_i|^\alpha. \end{equation*}
The \emph{Hausdorff measure} (of dimension $\alpha$) of the metric space $\Omega$ is \begin{equation*}
\mathrm{mes}\kern 0.04em(\Omega,\alpha) \pin=\pin \varliminf_{|\kern.04em\cal U|\to 0} \mathrm{mes}\kern 0.04em(\cal U,\alpha), \end{equation*} where \pin$\cal U$ is at most countable covering of $\Omega$. Obviously, \begin{equation*}
\mathrm{mes}\kern 0.04em(\cal U,\beta) \le \mathrm{mes}\kern 0.04em(\cal U,\alpha)\pin |\cal U|^{\beta-\alpha}
\quad \text{if}\ \ \beta\ge\alpha. \end{equation*} This implies the following property of the Hausdorff measure: if $\mathrm{mes}\kern 0.04em(\Omega,\alpha) < \infty$ for some $\alpha$, then $\mathrm{mes}\kern 0.04em(\Omega,\beta) =0$ for all $\beta> \alpha$.
The \emph{Hausdorff dimension} of the space $\Omega$ is the number \begin{equation}\label{4,,1}
\dim_H \Omega \pin=\pin\inf\pin\{\pin \alpha \mid \mathrm{mes}\kern 0.04em(\Omega,\alpha) =0\pin\}. \end{equation} In other words, $\dim_H \Omega =\alpha_0$ if $\mathrm{mes}\kern 0.04em(\Omega,\alpha) =0$ for all $\alpha>\alpha_0$ and $\mathrm{mes}\kern 0.04em(\Omega,\alpha) =\infty$ for all $\alpha<\alpha_0$.
Below we will consider the space of sequences \begin{equation*}
X^{\mathbb N} =\{\pin x=(x_1,x_2,x_3,\dots)\pin\}, \quad\ \text{где}\ \ x_i\in X =\{1,\dots,r\}. \end{equation*}
Let $x =(x_1,x_2,\dots)\in X^{\mathbb N}$. Denote by $Z_n(x)$ the set of sequences $y=(y_1,y_2,\dots)$ whose first $n$ coordinates coincide with the same coordinates of $x$. This set will be called a \emph{cylinder of rank} $n$. The collection of all cylinders generates the \emph{Tychonoff topology} on the space $X^{\mathbb N}$ and the \emph{cylinder $\sigma$-algebra} of subsets in $X^{\mathbb N}$.
Take an arbitrary positive function $\eta$ on the set of all cylinders that possesses the following two properties: first, if $Z_n(x) \subset Z_m(y)$ then $\eta(Z_n(x))\le \eta(Z_m(y))$ and, second, $\eta(Z_n(x)) \to 0$ as $n\to \infty$ at each point $x\in X^{\mathbb N}$. Define the \emph{cylinder metrics} on $X^{\mathbb N}$ by means of the formula \begin{equation} \label{4,,2}
\dist(x,y) =\eta(Z_n(x)), \quad\ \text{where} \ \
n=\max\pin\{\pin m\mid Z_m(x) =Z_m(y)\pin\}. \end{equation} Evidently, the diameter of $Z_n(x)$ in this metrics coincides with $\eta(Z_n(x))$.
Suppose on $X^{\mathbb N}$, besides the cylinder metrics \eqref{4,,2}, a Borel measure $\mu$ is given. The function \begin{equation*}
d_\mu(x) =\varliminf_{n\to\infty} \frac{\ln \mu(Z_n(x))}{\ln |Z_n(x)|} \end{equation*}
\noindent is called \emph{$($lower\/$)$ pointwise dimension of the measure\/ $\mu$}.
The next theorem provides an effective tool for computing the Hausdorff dimensions of various subsets of $X^{\mathbb N}$.
\begin{theorem} \label{4..1} Suppose\/ $A\subset X^{\mathbb N}$. If there exists a finite Borel measure\/ $\mu$ on\/ $X^{\mathbb N}$ such that\/ $d_\mu(x) \le d$ for each point\/ $x\in A$, then\/ $\dim_H A\le d$. On the contrary, if\/ $d_\mu(x) \ge d$ for each\/ $x\in A$ and the outer measure\/ $\mu^*(A)$ is positive, then\/ $\dim_H A \ge d$. \end{theorem}
It follows that if $d_\mu(x)\equiv d$ on the whole subset $A\subset X^{\mathbb N}$ then its dimension is equal to $d$.
A weakened version of the second part of Theorem \ref{4..1} in which the condition $d_\mu(x) \ge d$
is replaced by the more strong one $\mu(Z_n(x))\le |Z_n(x)|^d$ is usually called the \emph{mass distribution principle.}
\emph{Proof.} Every cylinder $Z_n(x)$ is, in fact, a ball in the metrics \eqref{4,,2}, whose radius equals to its diameter, and vice versa, any ball in this metrics coincides with a cylinder. Besides, any two cylinders $Z_n(x)$ and $Z_m(y)$ either have empty intersection or one of them is embedded into other. Therefore, while computing the Hausdorff measure and dimension of a subset $A\subset X^{\mathbb N}$ it is enough to operate with only disjoint coverings of $A$ by cylinders.
Suppose first that $d_\mu(x) <\alpha$ for all points $x\in A$. Then for each $x\in A$ there exist arbitrarily small cylinders $Z_n(x)$ satisfying the condition $|Z_n(x)|^\alpha < \mu(Z_n(x))$. Using this kind of cylinders we can put together a disjoint covering \pin$\cal U$ of the set $A$ of arbitrarily small diameter. For this covering we have the inequalities \begin{equation*}
\mathrm{mes}\kern 0.04em(\cal U,\alpha) \pin=\sum_{Z_n(x)\in \cal U} |Z_n(x)|^\alpha \pin\le
\sum_{Z_n(x)\in \cal U} \mu(Z_n(x)) \pin\le\pin \mu\bigl(X^{\mathbb N}\bigr), \end{equation*} and hence $\dim_H A\le \alpha$. Thus the first part of the theorem is proved.
Suppose now that $d_\mu(x) >\alpha$ for all points $x\in A$. Define the sets \begin{equation*}
A_\eps \pin=\pin \bigl\{ x\in A\bigm| |Z_n(x)|^\alpha > \mu(Z_n(x))\ \ \text{whenever}\ \
|Z_n(x)| <\eps\bigr\}. \end{equation*} Obviously, $A =\bigcup_{\eps>0} A_\eps$. Hence there exists an $\eps$ such that $\mu^*(A_\eps)>0$. Let \pin$\cal U$ be be a disjoint covering of $A$ by cylinders of diameters less than $\eps$. From the definition of $A_\eps$ it follows that $\mathrm{mes}\kern 0.04em(\cal U,\alpha) \ge \mu^*(A_\eps)$. Therefore $\dim_H A \ge \alpha$, and thus the second part of the theorem is proved. \qed
Theorem \ref{4..1} was first proved by Billingsley in the case when the function $\eta$ in \eqref{4,,2} is a probability measure on $X^{\mathbb N}$ (see \cite[Theorems 2.1 and 2.2]{Billingsley II}). An analog to this theorem for subsets $A\subset \mathbb R^r$ was proved in \cite{Young} and \cite{Pesin}.
Each point $x =(x_1,x_2,\dots) \in X^{\mathbb N}$ generates a sequence of empirical measures $\delta_{x,n}$ on the set $X$: \begin{equation*}
\delta_{x,n}(i) =\frac{\#\{\pin t\mid x_t=i,\pin\ t\le n\pin\}}{n}, \qquad i\in X. \end{equation*}
\noindent In other words, $\delta_{x,n}(i)$ is the fraction of those coordinates of the vector $(x_1,\dots,x_n)$ that coincide with $i$.
For every probability measure $\nu\in M_1(X)$ let us define its \emph{basin} $B(\nu)$ as the set of all points $x\in X^{\mathbb N}$ such that $\delta_{x,n}$ converges to $\nu$.
Evidently, basins of different measures do not intersect each other and are nonempty. If $x\in B(\nu)$, and $y\in X^{\mathbb N}$ differs from $x$ in only finite number of coordinates, then $y\in B(\nu)$. This implies density of each basin in $X^{\mathbb N}$.
Every measure $\nu\in M_1(X)$ generates Bernoulli distribution $P_\nu = \nu^{\mathbb N}$ on the space $X^{\mathbb N}$. By the strong law of large numbers the basin $B(\nu)$ has probability one with respect to Bernoulli distribution $P_\nu$, and its complement has zero probability $P_\nu$. In particular, any basin different from $B(\nu)$ has zero probability.
Points that does not belong to the union of all basins will be called \emph{irregular}. The set or irregular points has zero probability with respect to any distribution $P_\nu$, where $\nu\in M_1(X)$. As a result, $X^{\mathbb N}$ turns out to be decomposed into the disjoint union of different basins and the set of irregular points.
Let us fix some numbers $\theta(i)\in (0,1)$ for all elements $i\in X =\{1,\dots,r\}$, and define a \emph{cylinder\/ $\theta$-metrics} on $X^{\mathbb N}$ by the rule \begin{equation}\label{4,,3}
\dist(x,y) =\prod_{t=1}^n \theta(x_t),\quad\ \text{где}\ \
n=\inf\pin\{\pin t\mid x_t\ne y_t\pin\} -1. \end{equation} It is a partial case of the cylinder metrics \eqref{4,,2}.
For each measure $\nu\in M_1(X)$ and $\theta$-metrics \eqref{4,,3} define the quantity \begin{equation}\label{4,,4}
S(\nu,\theta) \pin=\,\frac{\sum_{i=1}^r \nu(i)\ln \nu(i)}{\sum_{i=1}^r \nu(i)\ln \theta(i)}.
\end{equation} We will call it the \emph{Billingsley entropy} because he was the first who wrote down this formula and applied it for the computation of Hausdorff dimensions \cite{Billingsley}. He expressed also this quantity in terms of Shannon's entropy and the Kullback action: \begin{equation*}
S(\nu,\theta) \,=\, \frac{H(\nu)}{H(\nu)+\rho(\nu,\theta)}. \end{equation*}
\begin{theorem} \label{4..2} Hausdorff dimension of any basin\/ $B(\nu)$ relative to the\/ $\theta$-metrics\/ \eqref{4,,3} is equal to the Billingsley entropy\/ $S(\nu,\theta)$. \end{theorem}
A partial case of this theorem in which $\theta(1) =\ldots =\theta(r) =1/r$ was first proved by Eggleston \cite{Eggleston}. In the complete form this theorem and its generalizations were proved by Billingsley in \cite{Billingsley,Billingsley II}.
\emph{Proof.} Assume first that $\nu(i)>0$ for every $i=1,\,\dots,\,r$. Obviously, \begin{equation*}
\frac{\ln P_\nu(Z_n(x))}{\ln |Z_n(x)|} \pin=\pin
\frac{\sum_{t=1}^n \ln \nu(x_t)}{\sum_{t=1}^n \ln \theta(x_t)} \pin=\pin
\frac{\sum_{i=1}^r n\delta_{x,n}(i) \ln \nu(i)}{\sum_{i=1}^r n\delta_{x,n}(i)\ln \theta(i)}.
\end{equation*} Hence for each point $x\in B(\nu)$ we have \begin{equation} \label{4,,5}
d_{P_\nu}(x) \pin=\pin
\varliminf_{n\to\infty} \frac{\ln P_\nu(Z_n(x))}{\ln |Z_n(x)|}
\pin=\,\frac{\sum_{i=1}^r \nu(i)\ln \nu(i)}{\sum_{i=1}^r \nu(i)\ln \theta(i)} \pin=\pin S(\nu,\theta). \end{equation} Applying Theorem \ref{4..1} to the set $A =B(\nu)$ and measure $\mu =P_\nu$, we obtain the statement of Theorem \ref{4..2}.
In the general case the same argument provides only lower bound $d_{P_\nu}(x)\ge S(\nu,\theta)$, that implies the lower bound $\dim_H B(\nu) \ge S(\nu,\theta)$. The inverse inequality is provided by the next lemma. \qed
\begin{lemma}\label{4..3} Suppose the space\/ $X^{\mathbb N}$ is equipped with the metrics\/ \eqref{4,,3}. Then for any measure\/ $\nu\in M_1(X)$ and\/ $\eps>0$ there exists a neighborhood\/ $O(\nu)$ such that Hausdorff dimension of the set \begin{equation*}
A=\bigl\{ x\in X^{\mathbb N}\bigm| \forall\,N\ \exists\,n>N\!:\, \delta_{x,n}\in O(\nu)\bigr\} \end{equation*} does not exceed\/ $S(\nu,\theta)+\eps$. \end{lemma}
\emph{Proof.} Fix a measure $\nu\in M_1(X)$ and an arbitrary positive number $\kappa$. By McMillan's theorem there exists a neighborhood $O(\nu)$ such that for each positive integer $n$ \begin{equation}\label{4,,6}
\#\bigl\{ Z_n(x)\bigm| \delta_{x,n}\in O(\nu)\bigr\} \pin\le\pin e^{n(H(\nu) +\kappa)}. \end{equation} Decrease this neighborhood in such a way that, in addition, for every measure $\delta\in O(\nu)$ the next inequality holds: \begin{equation*}
\sum_{i=1}^r \delta(i)\ln \theta(i) \pin<\pin \sum_{i=1}^r \nu(i) \ln \theta(i) +\kappa. \end{equation*} Then for every cylinder $Z_n(x)$ satisfying the condition $\delta_{x,n}\in O(\nu)$ we have the estimate \begin{align} \notag
|Z_n(x)| \pin=\pin \prod_{t=1}^n \theta(x_t) \pin&=\pin\exp\biggl\{\sum_{t=1}^n \ln \theta(x_t)\biggr\}
\pin=\pin \exp\biggl\{n\sum_{i=1}^r \delta_{x,n}(i)\ln \theta(i)\biggr\} \pin<\pin\\[3pt]
\pin&<\pin \exp\biggl\{n\sum_{i=1}^r \nu(i)\ln \theta(i) +n\kappa\biggr\}. \label{4,,7} \end{align}
For any positive integer $N$ the set $A$ is covered by the collection of cylinders \begin{equation*}
\cal U_N \pin=\pin\bigcup_{n=N}^\infty \bigl\{ Z_n(x)\bigm| \delta_{x,n}\in O(\nu)\bigr\}. \end{equation*} Evidently, the diameter of this covering goes to zero when $N$ increases. Now we can evaluate $\mathrm{mes}\kern 0.04em(\cal U_N,\alpha)$ by means of formulas \eqref{4,,6} and \eqref{4,,7}: \begin{align} \notag
\mathrm{mes}\kern 0.04em(\cal U_N,\alpha) \pin&=\sum_{Z_n(x)\in\pin \cal U_N}\hspace{-0.5em} |Z_n(x)|^\alpha
\pin\le\pin \sum_{n=N}^\infty e^{n(H(\nu) +\kappa)} \exp\biggl\{\alpha n\sum_{i=1}^r
\nu(i)\ln \theta(i) +\alpha n\kappa\biggr\} \pin=\pin \\[3pt] \label{4,,8}
\pin&=\pin \sum_{n=N}^\infty \exp\biggl\{n\pin\biggl(-\sum_{i=1}^r \nu(i)\ln \nu(i)
+\alpha\sum_{i=1}^r \nu(i)\ln \theta(i) +\kappa +\alpha\kappa\biggr)\!\pin\biggr\}. \end{align} If $\alpha > S(\nu,\theta)$, then we can choose so small $\kappa>0$ that the last exponent in braces is negative, and all the sum \eqref{4,,8} goes to zero as $N\to \infty$. Therefore Hausdorff measure (of dimension $\alpha$) of the set $A$ is zero, and hence $\dim_H A$ does not exceed $\alpha$. \qed
\proofskip
We will say that a sequence of empirical measures $\delta_{x,n}$ \emph{condenses} on a subset $V\subset M_1(X)$ (notation $\delta_{x,n}\succ V$) if it has at least one limit point in $V$.
Similarly to the famous large deviations principle by Varadhan \cite{Deuschel-Stroock,Varadhan}, it is natural that the next theorem be named the \emph{maximal dimension principle.}
\begin{theorem} \label{4..4} Let the space\/ $X^{\mathbb N}$ be equipped with the cylinder\/ $\theta$-metrics\/ \eqref{4,,3}. Then for any nonempty subset\/ $V\subset M_1(X)$ \begin{equation} \label{4,,9}
\dim_H\pin\bigl\{x\in X^{\mathbb N} \bigm| \delta_{x,n}\succ V\bigr\} \pin=\pin
\sup_{\nu\in V} S(\nu,\theta). \end{equation} \end{theorem}
\emph{Proof.} The set $A =\{\pin x\in X^{\mathbb N} \mid \delta_{x,n}\succ V\pin\}$ contains basins of all measures $\nu\in V$. So by Theorem \ref{4..2} its dimension is not less than the right hand side of \eqref{4,,9}.
It is easily seen from the definition \eqref{4,,4} of the Billingsly entropy $S(\nu,\theta)$ that it depends continuously on the measure $\nu\in M_1(X)$. Consider the closure $\barV$ of $V$. Obviously, it is compact. Fix any $\eps>0$. By Lemma \ref{4..3} for any measure $\nu\in\barV$ there exists a neighborhood $O(\nu)$ such that \begin{equation} \label{4,,10}
\dim_H\pin\bigl\{x\in X^{\mathbb N}\bigm| \delta_{x,n}\succ O(\nu)\bigr\} \pin\le\pin S(\nu,\theta)+\eps
\pin\le\pin \sup_{\nu\in V} S(\nu,\theta)+\eps. \end{equation} Pick out a finite covering of $\barV$ composed of neighborhoods of this sort. Then the set $A =\{\pin x\in X^{\mathbb N} \mid \delta_{x,n}\succ V\pin\}$ will be covered by a finite collection of sets of the form $\{\pin x\in X^{\mathbb N}\mid \delta_{x,n}\succ O(\nu)\pin\}$ satisfying \eqref{4,,10}. By the arbitrariness of $\eps$ this implies the statement of Theorem \ref{4..4}. \qed
\proofskip
A very similar to Theorem \ref{4..4} result was proved by Billingsley in \cite[Theorem 7.1]{Billingsley}.
Suppose that a certain subset \pin$\Xi\subset X$ is specified in the set $X =\{1,\dots,r\}$. In this case the subset \pin$\Xi^{\mathbb N}\subset X^{\mathbb N}$ will be named the \emph{generalized Cantor set.} It consists of those sequences $x =(x_1,x_2,\dots)$ in which all $x_t\in\Xi$.
\begin{theorem} \label{4..5} If the space\/ $X^{\mathbb N}$ is equipped with the\/ $\theta$-metrics\/ \eqref{4,,3} then Hausdorff dimension of the generalized Cantor set\/ \pin$\Xi^{\mathbb N}$ coincides with the unique solution of Moran's equation \begin{equation} \label{4,,11}
\sum_{i\pin\in\pin\Xi} \theta(i)^s =1. \end{equation} \end{theorem}
This theorem was first proved by Moran in 1946 \cite{Moran} for generalized Cantor subsets of the real axis and afterwards it was extended by Hutchinson \cite{Hutchinson} to the attractors of self-similar geometric constructions in $\mathbb R^r$. Let us show how it can be derived from the maximal dimension principle.
\emph{Proof.} Let $s$ be the solution to Moran's equation. Introduce a probability distribution $\nu$ on $X$, setting $\nu(i) =\theta(i)^s$ for $i\in\Xi$ and $\nu(i) =0$ for $i\notin\Xi$. Then \begin{equation} \label{4,,12}
S(\nu,\theta) \pin=\pin \frac{\sum_{i=1}^r \nu(i)\ln \nu(i)}{\sum_{i=1}^r \nu(i)\ln \theta(i)} \pin=\pin
\frac{\sum_{i\in\Xi}\theta(i)^s\ln \theta(i)^s}{\sum_{i\in\Xi}\theta(i)^s\ln \theta(i)} \pin=\pin s. \end{equation}
Consider the set $B(\nu)\cap\Xi^{\mathbb N}$. It has the unit measure with respect to the distribution $P_\nu =\nu^{\mathbb N}$. Besides, for every point $x\in B(\nu)\cap\Xi^{\mathbb N}$ by \eqref{4,,5} we have the equality $d_{P_\nu}(x) =S(\nu,\theta)$. In this setting it follows from Theorem \ref{4..1} and formula \eqref{4,,12} that \begin{equation} \label{4,,13}
\dim_H \bigl(B(\nu)\cap\Xi^{\mathbb N}\bigr) =S(\nu,\theta) =s. \end{equation}
Denote by $V$ the collection of all probability measures on $X$ supported on \pin$\Xi\subset X$. Evidently, for each point $x\in\Xi^{\mathbb N}$ all the limit points of the sequence $\delta_{x,n}$ belong to $V$. Hence, we can apply Theorem \ref{4..4} that implies \begin{equation} \label{4,,14}
\dim_H \pin\Xi^{\mathbb N} \pin\le\pin
\dim_H\pin\bigl\{x\in X^{\mathbb N} \bigm| \delta_{x,n}\succ V\bigr\} \pin=\pin
\sup_{\nu\in V} S(\nu,\theta) \pin=\pin
\sup_{\nu\in V}\frac{\sum_{i\in\Xi} \nu(i)\ln \nu(i)}{\sum_{i\in\Xi} \nu(i)\ln \theta(i)}\pin. \end{equation} Note that for every measure $\nu\in V$ \begin{equation*}
s\sum_{i\pin\in\pin\Xi} \nu(i)\ln \theta(i) \pin-\sum_{i\pin\in\pin\Xi} \nu(i)\ln \nu(i) \pin=\pin
\sum_{\nu(i)>0}\hspace{-0.3em} \nu(i)\ln\frac{\theta(i)^s}{\nu(i)} \pin\le\pin
\ln\Biggl\{\sum_{\nu(i)>0}\hspace{-0.3em} \nu(i)\frac{\theta(i)^s}{\nu(i)}\!\pin\Biggr\} \pin\le\pin
0, \end{equation*} where we have used concavity of the logarithm function. It follows that the right hand side in \eqref{4,,14} does not exceed $s$. Finally, comparing \eqref{4,,13} and \eqref{4,,14}, we obtain the desired equality $\dim_H \pin\Xi^{\mathbb N} =s$. \qed
\section{Brunching processes} \label{5..}
First let us introduce the basic notions about the simplest Galton--Watson brunching process.
Suppose that a random variable $Z$ takes nonnegative values $k\in\mathbb Z_+$ with probabilities $p_k$. The \emph{Galton--Watson brunching process} is a sequence of integer-valued random variables $Z_0$, $Z_1$, $Z_2$, \dots{} such that $Z_0\equiv 1$, \,$Z_1 =Z$, and further each $Z_{n+1}$ is defined as the sum of $Z_n$ independent counterparts of the random variable $Z$. In particular, if $Z_n =0$ then $Z_{n+1} =0$ as well. Usually $Z_n$ is thought of as the total number of descendants in $n$-th generation of a unique common ancestor under the condition that each descendant independently of others gives birth to $Z$ children.
It is known that in some cases the posterity of the initial ancestor may degenerate (when starting from a certain $n$ all $Z_n$ are zeros) and in other cases it can ``flourish'' (when $Z_n$ grows exponentially). The type of behavior of the brunching process depends on the mean number of children of any individual \begin{equation*}
m =\mathsf{E}\kern 0.07em Z = \sum_{k=0}^\infty kp_k \end{equation*} and on the generating function of that number \begin{equation*}
f(s) =f_1(s) =\sum_{k=0}^\infty p_k s^k. \end{equation*} Obviously, the restriction of the function $f(s)$ to the segment $[0,1]$ is nonnegative, nondecreasing, convex, and satisfies $f(1) =1$ and $f'(1) =m$.
In the theory of brunching processes (see, for instance, \cite{Athreya-Ney,Harris}) the following statements have been proved.
\begin{theorem} \label{5..1} The generating functions of the number of descendants in\/ $n$-th generation \begin{equation*}
f_n(s) =\sum_{k=0}^\infty \mathsf{P}\{Z_n=k\}\pin s^k \end{equation*}
\noindent satisfy the recursion relation\/ $f_{n+1}(s) =f(f_n(s))$. \end{theorem}
\begin{theorem} \label{5..2} If\/ $m\le 1$ then the brunching process degenerates almost surely\/ $($except the case when each individual gives birth to exactly one child\/$)$. If\/ $m>1$ then the probability\/ $q$ of degeneration is less than\/ $1$ and coincides with a unique nonunit root of the equation\/ $f(s) =s$ on the segment\/ $[0,1]$. \end{theorem}
\begin{theorem} \label{5..3} If\/ $m>1$ and\/ $\mathsf{E}\kern 0.07em Z^2<\infty$ then the sequence\/ $W_n =Z_n/m^n$ converges almost surely to a random variable\/ $W$ such that\/ $\mathsf{P}\{W>0\} =1-q$. If\/ $m>1$ and\/ $\mathsf{E}\kern 0.07em Z^2 =\infty$ then for any number\/ $m'<m$ with probability\/ $1-q$ \begin{equation*}
\lim_{n\to\infty} Z_n/m^n <\infty, \qquad \lim_{n\to\infty} Z_n\big/(m')^n =\infty \end{equation*} $($here\/ $q$ is the probability of degeneration of the brunching process\/$)$. \end{theorem}
Thereby, in the case $m>1$ there is an alternative for the total number of descendants $Z_n$: either it vanishes at a certain moment $n_0$ (with probability $q<1$) or it is asymptotically equivalent to $W m^n$ (with the complementary probability $1-q$), where the random variable $W>0$ does not depend on $n$ (except the case $\mathsf{E}\kern 0.07em Z^2 =\infty$, when only the logarithmic equivalence $\ln Z_n \sim \ln m^n$ is guaranteed). All other types of the descendants' number behavior have zero probability.
We will exploit these theorems in the study of colored brunching processes.
Suppose now that each individual may give birth to children of $r$ different colors (or $r$ different genders, if one likes). We will suppose that the posterity of each individual in the first generation represents a random set $X$ containing random number $k_1$ of children of the first color, random number $k_2$ of children of the second color, and so on up to $k_r$ children of color $r$. All elements of $X$ (including elements of the same color) are treated as different. The ordered array $k=(k_1,k_2,\dots,k_r) \in \mathbb Z_+^r$ will be called the \emph{color structure} of the set of children $X$. Denote by $p_k$ the probability of birth of the set $X$ with color structure $k=(k_1,k_2,\dots,k_r)$. Naturally, all the probabilities $p_k$ are nonnegative and \begin{equation*}
\sum_{k\in\mathbb Z_+^r} p_k =1. \end{equation*}
\noindent If an individual $x_1$ gave birth to $x_2$, then $x_2$ gave birth to $x_3$, and so on up to an individual $x_n$, then the sequence $x =(x_1,\dots,x_n)$ will be called the \emph{genetic line} of length $n$.
Let us construct a new branching process taking into account not only the total number of descendants but also the color of each individual and all its upward and downward lineal relations. This process may be thought of as a random \emph{genealogical tree} with a common ancestor in the root and all its descendants in the vertices, where each parent is linked with all its children. In the case of degenerating population its genealogical tree is finite, and in the case of ``flourishing'' one the tree is infinite.
Formally it is convenient to define such a process as a sequence of random sets $X_n$ containing all genetic lines of length $n$. As the first set $X_1$ we take $X$. The subsequent $X_n$ are built up by induction: if $X_n$ is already known, then for all genetic lines $(x_1,\dots,x_n)\in X_n$ define disjoint independent random sets of children $X(x_1,\dots,x_n)$, each with color structure distribution as in $X$, and put \begin{equation*}
X_{n+1} \pin=\pin \bigl\{ (x_1,\dots,x_n,x_{n+1})\bigm| (x_1,\dots,x_n)\in X_n,\ \, x_{n+1}\in
X(x_1,\dots,x_n) \bigr\}. \end{equation*} The built in such a way stochastic process $X_1$, $X_2$, $X_3$, \dots\ will be referred to as the \emph{colored branching process} (or \emph{unconditional} colored branching process if one wishes to emphasize that the posterity of any individual is independent of its color and genealogy).
\section{The McMillan theorem for colored branching\\[-2pt] processes} \label{6..}
Consider a colored branching process $X_1$, $X_2$, \dots\ determined by a finite collection of colors $\Omega =\{1,\dots,r\}$ and a probability distribution $\{\pin p_k\mid k\in\mathbb Z^r_+\pin\}$, where $k=(k_1,\dots,k_r)$ is the color structure of each individual's set of children $X$. We will always think that $X_1$ is generated by a unique initial individual.
For any genetic line $x =(x_1,\dots,x_n)\in X_n$ define the \emph{spectrum} $\delta_{x,n}$ as the corresponding empirical measure on $\Omega$ by the rule \begin{equation} \label{6,,1}
\delta_{x,n}(i) =\frac{\#\{\pin t\mid g(x_t) =i\pin\}}{n}, \qquad i\in\Omega, \end{equation} where $g(x_t)$ denotes the color of $x_t$. In other words, $\delta_{x,n}(i)$ is the fraction of individuals of color $i$ in the genetic line $x$. Our next goal is to obtain asymptotical estimates for cardinalities of the random sets \begin{equation*}
\bigl\{ x =(x_1,\dots,x_n)\in X_n\bigm| \delta_{x,n}\in O(\nu)\bigr\}, \end{equation*} where $O(\nu)$ is a small neighborhood of the distribution $\nu$ on the set of colors $\Omega$.
Denote by $\mu(i)$ the expectation of members of color $i$ in $X$: \begin{equation} \label{6,,2}
\mu(i) =\sum_{k\in\mathbb Z^r_+} k_ip_k, \qquad i=1,\,\dots,\,r. \end{equation} Provided all $\mu(i)$ are finite, the vector $\mu =(\mu(1),\dots,\mu(r))$ can be regarded as a measure on the set of colors $\Omega$. This measure generates the measure $\mu^n$ on $\Omega^n$ as Cartesian product.
Define a mapping $G:X_n\to\Omega^n$ by means of the formula \begin{equation*}
G(x_1,\dots,x_n) =\bigl(g(x_1),\pin\dots,\pin g(x_n)\bigr), \end{equation*} where $g(x_t)$ is the color of $x_t$.
\begin{lemma} \label{6..1} For any\/ $\omega =(\omega_1,\dots,\omega_n) \in \Omega^n$ we have \begin{equation} \label{6,,3}
\mathsf{E}\kern 0.07em\kern 0.05em\#\{\pin x\in X_n\mid G(x) =\omega\pin\} \pin=\pin \prod_{t=1}^n \mu(\omega_t)
\pin=\pin \mu^n(\omega). \end{equation} \end{lemma}
\emph{Proof.} Cast out the last coordinate in $\omega$ and let $\omega' = (\omega_1, \dots, \omega_{n-1})$. For any genetic line $(x_1,\dots,x_{n-1})\in X_{n-1}$, by virtue of the definition of unconditional colored branching process we have \begin{equation*}
\mathsf{E}\kern 0.07em\kern 0.05em\#\{\pin x_{n}\in X(x_1,\dots,x_{n-1})\mid g(x_{n}) =\omega_{n}\pin\}
\pin=\pin \mu(\omega_{n}). \end{equation*} Evidently, this expression does not depend on $x' =(x_1,\dots,x_{n-1})$. Therefore, \begin{equation*}
\mathsf{E}\kern 0.07em\kern 0.05em\#\{\pin x\in X_{n}\mid G(x) =\omega\pin\} \pin=\pin
\mathsf{E}\kern 0.07em\kern 0.05em\#\{\pin x'\in X_{n-1}\mid G(x') =\omega'\pin\}\pin \mu(\omega_{n}). \end{equation*} Repeated application of the latter equality gives \eqref{6,,3}. \qed
\proofskip
Define for the measure $\mu$ from \eqref{6,,2} the Kullback action \begin{equation*}
\rho(\nu,\mu) =\sum_{i\in\Omega} \nu(i)\ln\frac{\nu(i)}{\mu(i)}, \qquad \nu\in M_1(\Omega), \end{equation*} where $M_1(\Omega)$ is the set of all probability measures on $\Omega$. This formula is a copy of \eqref{2,,2}.
\begin{theorem} \label{6..2} Suppose\/ $X_1,\, X_2,\, \dots$ is an unconditional colored brunching process with finite collection of colors\/ $\Omega$. Then for any\/ $\eps>0$ and probability measure\/ $\nu\in M_1(\Omega)$ there exists a neighborhood\/ $O(\nu)\subset M_1(\Omega)$ such that for all natural\/ $n$ \begin{equation} \label{6,,4}
\mathsf{E}\kern 0.07em\kern 0.05em\#\{\pin x\in X_n\mid \delta_{x,n}\in O(\nu)\pin\} \pin\le\pin
e^{n(-\rho(\nu,\mu)+\eps)}. \end{equation} On the other hand, for any\/ $\eps>0$ and any neighborhood\/ $O(\nu)$ \begin{equation} \label{6,,5}
\mathsf{E}\kern 0.07em\kern 0.05em\#\{\pin x\in X_n\mid \delta_{x,n}\in O(\nu)\pin\} \pin\ge\pin
e^{n(-\rho(\nu,\mu)-\eps)} \quad\ \text{as}\ \ n\to\infty. \end{equation} \end{theorem}
If $\rho(\nu,\mu) =+\infty$, the expression $-\rho(\nu,\mu)+\eps$ in \eqref{6,,4} should be treated as an arbitrary negative real number.
\emph{Proof.} It follows from \eqref{6,,1} that for every genetic line $x\in X_n$ its spectrum $\delta_{x,n}$ coincides with the empirical measure $\delta_{\omega,n}$, where $\omega =G(x)$. Therefore, \begin{equation} \label{6,,6}
\#\{\pin x\in X_n\mid \delta_{x,n}\in O(\nu)\pin\} \,=
\sum_{\omega\in\Omega^n:\pin \delta_{\omega,n}\in O(\nu)}
\#\{\pin x\in X_n\mid G(x) =\omega\pin\}. \\[-6pt] \end{equation} It follows from \eqref{6,,3} and \eqref{6,,6} that \begin{equation*}
\mathsf{E}\kern 0.07em\kern 0.05em\#\{\pin x\in X_n\mid \delta_{x,n}\in O(\nu)\pin\} \pin=\pin
\mu^n\{\pin\omega\in\Omega^n\mid \delta_{\omega,n}\in O(\nu)\pin\}. \end{equation*} The latter equality converts estimates \eqref{6,,4} and \eqref{6,,5} into already proved estimates \eqref{3,,1}, \eqref{3,,2} from the large deviations principle. \qed
\proofskip
Remarkable that the last reference to the large deviations principle serves a unique ``umbilical cord'' linking the first three sections of the paper with others.
Now we are ready to state an analog of the McMillan theorem for colored branching processes. Let $q^*$ be a probability of degeneration of the process (probability of the occasion that starting from a certain number $n$ all the sets $X_n$ turn out to be empty).
\begin{theorem} \label{6..3} Suppose\/ $X_1,\, X_2,\, \dots$ is an unconditional colored brunching process with finite collection of colors\/ $\Omega$. Then for any\/ $\eps>0$ and any probability measure\/ $\nu\in M_1(\Omega)$ there exists a neighborhood\/ $O(\nu)\subset M_1(\Omega)$ such that for almost sure \begin{equation} \label{6,,7}
\#\{\pin x\in X_n\mid \delta_{x,n}\in O(\nu)\pin\} \pin<\pin e^{n(-\rho(\nu,\mu)+\eps)} \quad\
\text{as}\ \ n\to\infty. \end{equation} On the other hand, if\/ $\rho(\nu,\mu) <0$ then for any neighborhood\/ $O(\nu)$ and positive\/ $\eps$ the estimate \begin{equation} \label{6,,8}
\#\{\pin x\in X_n\mid \delta_{x,n}\in O(\nu)\pin\} \pin>\pin e^{n(-\rho(\nu,\mu)-\eps)}
\quad\ \text{as}\ \ n\to\infty \end{equation}
\noindent holds with probability\/ $1-q^*$ $($or almost surely under the condition that our branching process does not degenerate\/$)$. \end{theorem}
\emph{Proof.} Application of Chebyshev's inequality to \eqref{6,,4} gives \begin{equation*}
\mathsf{P}\bigl\{\#\{\pin x\in X_n\mid \delta_{x,n}\in O(\nu)\pin\} \ge
e^{n(-\rho(\nu,\mu)+2\eps)}\bigr\} \pin\le\pin e^{-n\eps}. \end{equation*} Sum up these inequalities over all $n\ge N$: \begin{equation*}
\mathsf{P}\bigl\{\exists\, n\ge N\!:\ \#\{\pin x\in X_n\mid \delta_{x,n}\in O(\nu)\pin\} \ge
e^{n(-\rho(\nu,\mu)+2\eps)}\bigr\} \pin\le\pin \frac{e^{-N\eps}}{1-e^{-\eps}}. \end{equation*} This implies \eqref{6,,7} with constant $2\eps$ instead of $\eps$, that does not change its sense.
Proceed to the second part of the theorem. Let $\kappa = -\rho(\nu,\mu) -\eps$ and the number $\eps$ be so small that $\kappa>0$. By the second part of Theorem \ref{6..2} for any neighborhood $O(\nu)$ there exists $N$ such that \begin{equation*}
\mathsf{E}\kern 0.07em\kern 0.05em\#\{\pin x\in X_N\mid \delta_{x,N}\in O(\nu)\pin\} \pin>\pin e^{N\kappa}. \end{equation*} Without loss of generality we may assume that $O(\nu)$ is convex.
Construct a Galton--Watson branching process satisfying the conditions \begin{gather} \label{6,,9}
Z_1 \pin=\pin \#\{\pin x\in X_N\mid \delta_{x,N}\in O(\nu)\pin\}, \\[6pt] \label{6,,10}
Z_n \pin\le\pin \#\{\pin x\in X_{nN}\mid \delta_{x,nN}\in O(\nu)\pin\},
\qquad n=2,\,3,\,\dots \end{gather} Let the random variable $Z_1$ be defined by \eqref{6,,9}. For $n>1$ define $Z_n$ as a total number of genetic lines \begin{equation*}
(x_1,\pin\dots,\pin x_N,\ \ .\ \ .\ \ .\ \ ,\pin x_{(n-1)N+1},\pin\dots,\pin x_{nN})
\pin\in\pin X_{nN} \end{equation*} such that the spectrum of each segment $(x_{kN+1},\pin\dots,\pin x_{(k+1)N})$ belongs to $O(\nu)$. In other words, we will treat as ``individuals'' of the process $Z_1$, $Z_2$, $Z_3$, \dots\ those segments $(x_{kN+1},\pin\dots,\pin x_{(k+1)N})$ of genetic lines of the initial process whose spectrum lies in $O(\nu)$. Then \eqref{6,,10} follows from convexity of $O(\nu)$, and from unconditionality of the initial colored branching process it can be concluded that the sequence $Z_1$, $Z_2$, \dots\ in fact forms a Galton--Watson branching process.
By construction, $\mathsf{E}\kern 0.07em Z_1 >e^{N\kappa}$. In this setting Theorem \ref{5..3} asserts that there is an alternative for the sequence $Z_n$: either it tends to zero with a certain probability $q<1$ or it grows faster than $e^{nN\kappa}$ with probability $1-q$. In the second case, by virtue of \eqref{6,,10}, \begin{equation} \label{6,,11}
\#\{\pin x\in X_{nN}\mid \delta_{x,nN}\in O(\nu)\pin\}\, e^{-nN\kappa}
\pin\rightarrow\pin \infty \quad\ \text{при}\ \ n\to\infty. \end{equation}
To finish the proof we have to do two things: verify that in fact \eqref{6,,11} is valid with probability $1-q^*$ and get rid of the multiplier $N$ there. To do this we will exploit two ideas. First, if the colored branching process $X_1$, $X_2$, \dots\ were generated by $m$ initial individuals instead of the unique one, then \eqref{6,,11} would be valid with probability at least $1-q^m$. Second, if one genetic line is a part of another and the ratio of their lengthes is close to $1$ then their spectra are close as well.
Obviously, the total number of individuals in the $n$-th generation of the initial branching process $X_1$, $X_2$, $X_3$, \dots\ equals $|X_n|$. The sequence of random variables $|X_n|$ forms a Galton--Watson brunching process with probability of degeneration $q^*$, that does not exceed
$q$. Therefore, the sequence $|X_n|$ grows exponentially with probability $1-q^*$.
Consider the colored brunching process $X_{k+1}$, $X_{k+2}$, $X_{k+3}$, \dots\ obtained from the initial one by virtue of truncation of the first $k$ generations. It represents a union of $|X_k|$ independent brunching processes generated by all individuals of $k$-th generation. It satisfies
\eqref{6,,11} with probability at least $1- q^{|X_k|}$. Hence for the initial process with even greater probability we obtain the condition \begin{equation} \label{6,,12}
\#\{\pin x\in X_{k+nN}\mid \delta_{x,k+nN}\in O^*(\nu)\pin\}\, e^{-nN\kappa}
\pin\rightarrow\pin \infty \quad\ \text{при}\ \ n\to\infty, \end{equation} where $O^*(\nu)$ is an arbitrary neighborhood of $\nu$ containing the closure of $O(\nu)$.
Suppose the sequence $|X_n|$ grows exponentially. Then for every $m\in\mathbb N$ define the numbers \begin{equation*}
k_i \pin=\pin\min\pin\{\pin k: |X_k|\ge m,\ \ k=i \hspace{-0.6em}\mod N\pin\},
\qquad i=0,\,1,\,\dots,\,N-1. \end{equation*} For each $k=k_i$, the condition \eqref{6,,12} holds with probability at least $1-q^m$, and in common they give the estimate \begin{equation*}
\#\{\pin x\in X_n\mid \delta_{x,n}\in O^*(\nu)\pin\} \pin>\pin e^{n\kappa}
\quad\ \text{as}\ \ n\to\infty \end{equation*}
with probability at least $1-Nq^m$. By virtue of the arbitrariness of $m$ this estimate is valid almost surely (under the condition $|X_n|\to\infty$, which takes place with probability $1-q^*$). It is equivalent to \eqref{6,,8}. \qed
\section{Dimensions of random fractals (upper bounds)}\label{7..}
We proceed investigation of the colored brunching process $X_1$, $X_2$, \dots\ with finite collection of colors $\Omega =\{1,\dots,r\}$. Let us consider the corresponding set of infinite genetic lines \begin{equation*}
X_\infty \pin=\pin\bigl\{ x=(x_1,x_2,x_3,\dots)\bigm| (x_1,\dots,x_n)\in X_n\ \
\forall\,n\in \mathbb N \bigr\}. \end{equation*}
Define the cylinder $\theta$-metrics on $X_\infty$ \begin{equation}\label{7,,1}
\dist(x,y) =\prod_{t=1}^n\theta(x_t), \qquad n=\inf\pin\{\pin t\mid x_t\ne y_t\pin\} -1, \end{equation} where the numbers $\theta(1)$, \dots, $\theta(r)$ are taken from $(0,1)$.
We will be interested in Hausdorff dimensions of both the space $X_\infty$ and its various subsets defined in terms of partial limits of empirical measures on $\Omega$ (those measures are called spectra and denoted $\delta_{x,n}$). If the colored brunching process degenerates then $X_\infty$
is empty. Therefore of interest is only the case when $m =\mathsf{E}\kern 0.07em |X_1| >1$ and the cardinality of $X_n$ increases with rate of order $m^n$.
As before, denote by $\mu(i)$, where $i\in \Omega$, the expectation of individuals of color $i$ in the random set $X_1$. It will be always supposed that $\mu(i)<\infty$. Consider any probability measure $\nu\in M_1(\Omega)$. It will be proved below that the dimension of $\{\pin x\in X_\infty\mid \delta_{x,n}\to \nu\pin\}$ can be computed by means of the function \begin{equation} \label{7,,2}
d(\nu,\mu,\theta) \pin=\pin \frac{\rho(\nu,\mu)\strut}{\sum_{i=1}^r \nu(i)\ln \theta(i)\strut}
\pin=\pin \frac{\sum_{i=1}^r \nu(i)\ln\frac{\textstyle \nu(i)}{\textstyle \mu(i)}}{\sum_{i=1}^r
\nu(i)\ln \theta(i)\strut}. \end{equation}
\noindent We will name it the \emph{Billingsley--Kullback entropy.}
In \eqref{7,,2} the numerator is the Kullback action and the denominator is negative. If $\mu$ is a probability measure on $\Omega$ then the Kullback action is nonnegative. But in our setting this is not the case since $m =\mu(1)+\ldots+ \mu(r) > 1$. In particular, if $\mu(i)>\nu(i)$ for all $i\in \Omega$ then the Kullback action will be negative, and the Billingsley--Kullback entropy positive. Note, in addition, that if $\mu(1) =\ldots =\mu(r) =1$ then $-\rho(\nu,\mu)$ is equal to Shannon's entropy $H(\nu)$, and the whole of Billingsley--Kullback entropy turns into the Billingsley entropy \eqref{4,,4}.
\begin{lemma}\label{7..1} Let the space\/ $X_\infty$ of infinite genetic lines be equipped with the metrics\/ \eqref{7,,1}. Then for any probability measure\/ $\nu\in M_1(\Omega)$ and any\/ $\eps>0$ there exists a neighborhood\/ $O(\nu)$ such that Hausdorff dimension of the set \begin{equation*}
A =\bigl\{ x\in X_\infty\bigm| \forall\,N\ \exists\,n>N\!:\, \delta_{x,n}\in O(\nu)\bigr\} \end{equation*} does not exceed\/ $d(\nu,\mu,\theta)+\eps$ almost surely. \end{lemma}
\emph{Proof} is carried out in the same manner as in Lemma \ref{4..3}. Take any $\kappa>0$. By Theorem \ref{6,,4} there exists a neighborhood $O(\nu)$ such that almost surely \begin{equation}\label{7,,3}
\#\bigl\{ x\in X_n\bigm| \delta_{x,n}\in O(\nu)\bigr\} \pin\le\pin e^{n(-\rho(\nu,\mu) +\kappa)}
\quad\ \text{as}\ \ n\to\infty. \end{equation} Reduce this neighborhood in such a way that in addition for all measures $\delta\in O(\nu)$, \begin{equation*}
\sum_{i=1}^r \delta(i)\ln \theta(i) \pin<\pin \sum_{i=1}^r \nu(i) \ln \theta(i) +\kappa. \end{equation*} Then for each cylinder $Z_n(x)$ satisfying the condition $\delta_{x,n}\in O(\nu)$ we have the estimate \begin{align} \notag
|Z_n(x)| \pin=\pin \prod_{t=1}^n \theta(x_t) \pin&=\pin\exp\biggl\{\sum_{t=1}^n \ln \theta(x_t)\biggr\}
\pin=\pin \exp\biggl\{n\sum_{i=1}^r \delta_{x,n}(i)\ln \theta(i)\biggr\} \pin<\pin\\[3pt]
\pin&<\pin \exp\biggl\{n\sum_{i=1}^r \nu(i)\ln \theta(i) +n\kappa\biggr\}. \label{7,,4} \end{align}
For every natural $N$ the set $A$ is covered by the collection of cylinders \begin{equation*}
\cal U_N \pin=\pin\bigcup_{n=N}^\infty \bigl\{ Z_n(x)\bigm| \delta_{x,n}\in O(\nu)\bigr\}. \end{equation*} Evidently, the diameter of this covering tends to zero as $N\to \infty$. Hence $\mathrm{mes}\kern 0.04em(\cal U_N,\alpha)$ can be estimated by virtue of formulas \eqref{7,,3} and \eqref{7,,4}: \begin{align} \notag
\mathrm{mes}\kern 0.04em(\cal U_N,\alpha) \pin&=\sum_{Z_n(x)\in\pin \cal U_N}\hspace{-0.5em} |Z_n(x)|^\alpha
\pin\le\pin \sum_{n=N}^\infty e^{n(-\rho(\nu,\mu) +\kappa)}
\exp\biggl\{\alpha n\sum_{i=1}^r \nu(i)\ln \theta(i) +\alpha n\kappa\biggr\} \pin=\pin \\[3pt]
\pin&=\pin \sum_{n=N}^\infty \exp\biggl\{n\pin\biggl(-\sum_{i=1}^r \nu(i)\ln\frac{\nu(i)}{\mu(i)}
+\alpha\sum_{i=1}^r \nu(i)\ln \theta(i)+\kappa+\alpha\kappa\biggr)\!\pin\biggr\}. \label{7,,5} \end{align} If $\alpha > d(\nu,\mu,\theta)$ then $\kappa$ can be chosen so small that the last exponent in braces is negative, and all the sum \eqref{7,,5} tends to zero as $N\to \infty$. Therefore Hausdorff measure (of dimension $\alpha$) of the set $A$ is zero, and its dimension does not exceed $\alpha$. \qed
\proofskip
As before, we say that the sequence of empirical measures $\delta_{x,n}$ condenses on a subset $V\subset M_1(\Omega)$ (notation $\delta_{x,n}\succ V$) if it has a limit point in $V$.
\begin{theorem} \label{7..2} Let\/ $X_1,\, X_2,\, X_3,\, \dots$ be an unconditional colored brunching process with finite set of colors\/ $\Omega$, and the set\/ $X_\infty$ of all infinite genetic lines equipped with the cylinder metrics\/ \eqref{7,,1}. Then for any subset\/ $V\subset M_1(\Omega)$ almost surely \begin{equation}\label{7,,6}
\dim_H\pin\{\pin x\in X_\infty \mid \delta_{x,n}\succ V\pin\} \pin\le\pin
\sup_{\nu\in V} d(\nu,\mu,\theta). \end{equation} In particular, $\dim_H X_\infty\le s$ for almost sure, where\/ $s$ is a unique root of the ``Bowen equation'' \begin{equation} \label{7,,7}
\sum_{i=1}^r \mu(i) \theta(i)^s =1. \end{equation} \end{theorem}
\emph{Proof.} It follows from the definition of the Billingsley--Kullback entropy $d(\nu,\mu,\theta)$ that it depends continuously on the measure $\nu\in M_1(\Omega)$. Let $\barV$ be the closure of $V$. Obviously, it is compact. Take an arbitrary $\eps>0$. By Lemma \ref{7..1} for any measure $\nu\in\barV$ there exists a neighborhood $O(\nu)$ such that almost surely \begin{equation} \label{7,,8}
\dim_H\pin\bigl\{x\in X_\infty\bigm| \delta_{x,n}\succ O(\nu)\bigr\} \pin\le\pin
d(\nu,\mu,\theta)+\eps \pin\le\pin \sup_{\nu\in V} d(\nu,\mu,\theta)+\eps. \end{equation} Choose a finite covering of $\barV$ by neighborhoods of this kind. Then the set $\{\pin x\in X_\infty \mid \delta_{x,n}\succ V\pin\}$ will be covered by a finite collection of sets of the form $\{\pin x\in X_\infty \mid \delta_{x,n}\succ O(\nu)\pin\}$ satisfying \eqref{7,,8}. By the arbitrariness of $\eps$ this implies the first statement of Theorem \ref{7..2}.
Let $s$ be a solution of equation \eqref{7,,7}. Note that for any measure $\nu\in M_1(\Omega)$, since the logarithm function is concave, \begin{gather*}
s\sum_{i=1}^r \nu(i)\ln \theta(i) \pin-\sum_{i=1}^r \nu(i)\ln\frac{\nu(i)}{\mu(i)} \pin=\pin
\sum_{i=1}^r \nu(i)\ln\frac{\mu(i)\theta(i)^{s}}{\nu(i)} \pin\le\pin \\[6pt]
\pin\le\pin \ln\Biggl\{\sum_{\nu(i)>0}\hspace{-0.3em}
\nu(i)\frac{\mu(i)\theta(i)^{s}}{\nu(i)}\!\pin\Biggr\} \pin\le\pin 0. \end{gather*} Consequently, $d(\nu,\mu,\theta)\le s$. Now the second part of our theorem follows from the first one if we take $V=M_1(\Omega)$. \qed
\proofskip
{\bf Remark.\,} In fact the ``Bowen equation'' is an equation of the form $P(s\varphi) =0$, where $P(s\varphi)$ is the topological pressure of a weight function $s\varphi$ in a dynamical system (more detailed explanations can be found in \cite{Pesin}). If we replace the topological pressure $P(s\varphi)$ by the spectral potential \begin{equation*}
\lambda(s\varphi,\mu) = \ln\sum_{i=1}^r e^{s\varphi(i)}\mu(i),
\quad\ \text{where}\ \ \varphi(i) =\ln \theta(i), \end{equation*} then the Bowen equation turns into the equation $\lambda(s\varphi,\mu) =0$, which is equivalent to \eqref{7,,7}.
\section{Block selections of colored brunching processes} \label{8..}
Let $\xi_1$, $\xi_2$, $\xi_3$, \dots\ be a sequence if independent identically distributed random variables taking values $0$ or $1$ (independent Bernoulli trials).
\begin{lemma} \label{8..1} If\/ $0<p'<p<1$ and\/ $\mathsf{P}\{\xi_i=1\}\ge p$, then \begin{equation} \label{8,,1}
\mathsf{P}\{\pin\xi_1+\ldots+\xi_k \ge p'k\pin\} \pin\to\pin 1 \quad \text{as}\ \ k\to\infty \end{equation} uniformly with respect to the probability\/ $\mathsf{P}\{\xi_i=1\}\ge p$. \end{lemma}
\emph{Proof.} In the case $\mathsf{P}\{\xi_i=1\} =p$ this follows from the law of large numbers. If $\mathsf{P}\{\xi_i=1\}$ increases then the probability in the left hand side of \eqref{8,,1} increases as well. \qed
\proofskip
Consider a colored brunching process $X_1$, $X_2$, \dots\ with finite set of colors $\Omega = \{1,\dots,r\}$. Each $X_n$ consists of genetic lines $(x_1,x_2,\dots,x_n)$ of length $n$, in which every subsequent individual has been born by the previous. Fix a (large enough) natural $N$. We will split genetic lines of length divisible by $N$ into blocks of length $N$: \begin{equation*}
(x_1,x_2,\dots,x_{nN}) =(y_1,\dots,y_n), \quad\ \text{where}\quad
y_{k} =(x_{(k-1)N+1},\pin\dots,\pin x_{kN}). \end{equation*} Each block $y_k$ generates an empirical measure $\delta_{y_k}$ (spectrum) on $\Omega$ by the rule \begin{equation*}
\delta_{y_k}(i) =\frac{\#\{\pin t\mid g(x_t)=i,\ \, (k-1)N <t\le kN\pin\}}{N}, \end{equation*} where $g(x_t)$ denotes the color of $x_t$.
A \emph{block selection of order\/ $N$} from a colored brunching process $X_1$, $X_2$, \dots\ is any sequence of random subsets $Y_n\subset X_{nN}$ with the following property: if $(y_1,\dots,y_{n+1})\in Y_{n+1}$ then $(y_1,\dots,y_n)\in Y_n$. In this case the sequence of blocks $(y_1,\dots,y_{n+1})$ will be called a \emph{prolongation} of the sequence $(y_1,\dots,y_n)$.
As above (see \eqref{6,,2}), denote by $\mu(i)$ the expectation of children of color $i$ born by each individual, and by $\mu$ the corresponding measure on $\Omega$.
\begin{theorem} \label{8..2} Let\/ $X_1,\, X_2,\, X_3,\, \dots$ be an unconditional colored brunching process with finite set of colors\/ $\Omega$ and probability of degeneration\/ $q^*<1$. If a measure\/ $\nu\in M_1(\Omega)$ satisfies the condition\/ $\rho(\nu,\mu) <0$, then for any its neighborhood\/ $O(\nu)\subset M_1(\Omega)$ and any number\/ $\eps>0$ with probability\/ $1-q^*$ one can extract from the brunching process a block selection\/ $Y_1$, $Y_2$, \dots\ of an order\/ $N$ such that each sequence of blocks\/ $(y_1,\dots,y_n)\in Y_n$ has at least\/ $l(N)$ prolongations in\/ $Y_{n+1}$, where \begin{equation} \label{8,,2}
l(N) = e^{N(-\rho(\nu,\mu)-\eps)}, \end{equation} and the spectra of all blocks belong to\/ $O(\nu)$. \end{theorem}
\emph{Proof.} Fix any numbers $p$ and $\eps$ satisfying the conditions \begin{equation*}
0<p<p+\eps<1-q^*, \qquad \rho(\nu,\mu)+\eps<0. \end{equation*} By the second part of Theorem \ref{6..3} for all large enough $N$ we have \begin{equation} \label{8,,3}
\mathsf{P}\bigl\{ \#\{\pin x\in X_N \mid \delta_{x,N}\in O(\nu)\pin\} >
e^{N(-\rho(\nu,\mu)-\eps/2)} \bigr\} \pin\ge\pin p +\eps. \end{equation}
Further we will consider finite sequences of random sets $X_1$, \dots, $X_{nN}$ and extract from them block selections $Y_1$, \dots, $Y_n$ of order $N$ such that the spectra of all their blocks belong to $O(\nu)$ and each sequence of blocks $(y_1,\dots,y_k)\in Y_k$ has at least $l(N)$ prolongations in $Y_{k+1}$. Denote by $A_n$ the event of existence of a block selection with these properties. Define one more event $A$ by the condition \begin{equation*}
\#\{\pin x\in X_N \mid \delta_{x,N}\in O(\nu)\pin\} \pin>\pin l(N)\pin e^{N\eps/2}. \end{equation*}
It follows from \eqref{8,,2} and \eqref{8,,3} that $\mathsf{P}(A)\ge p+\eps$. Evidently, $A\subset A_1$. Therefore, $\mathsf{P}(A_1)\ge p+\eps$. Now we are going to prove by induction that
$\mathsf{P}(A_n)\ge p$ whenever the order $N$ of selection is large enough. Let us perform the step of induction. Assume that $\mathsf{P}(A_n)\ge p$ is valid for some $n$. Consider the conditional probability $\mathsf{P}(A_{n+1}|A)$. By the definition of events $A_{n+1}$ and $A$ it cannot be less than the probability of the following event: there are at least $l(N)$ wins in a sequence of $[l(N) e^{N\eps/2}]$ independent Bernoulli trials with probability of win $\mathsf{P}(A_n)$ in each. Using Lemma \ref{8..1} (with $p' =p/2$ and $k=[l(N) e^{N\eps/2}]$) one can make this probability greater than $1-\eps$ at the expense of increasing~$N$. Then, \begin{equation*}
\mathsf{P}(A_{n+1}) \ge \mathsf{P}(A)\pin\mathsf{P}(A_{n+1}|A) > (p+\eps)(1-\eps) >p. \end{equation*} Thus the inequality $\mathsf{P}(A_n) >p$ is proved for all $n$.
It means that with probability greater than $p$ one can extract from the sequence $X_1$, \dots, $X_{nN}$ a block selection $Y_1$, \dots, $Y_n$ of order $N$ such that the spectra of all blocks belong to the neighborhood $O(\nu)$ and each sequence of blocks $(y_1,\dots,y_k)\in Y_k$ has at least $l(N)$ prolongations in $Y_{k+1}$.
To obtain a block selection of infinite length with the same properties, we will construct finite block selections $Y_1$, \dots, $Y_n$ in the following manner. Initially, suppose that every $Y_k$, where $k\le n$, consists of all sequences of blocks $(y_1,\dots,y_k)\in X_{kN}$ such that the spectrum of each block lies in $O(\nu)$. At the first step we exclude from $Y_{n-1}$ all sequences of blocks having less than $l(N)$ prolongations in $Y_n$, and then exclude from $Y_n$ all prolongations of the sequences that have been excluded from $Y_{n-1}$. At the second step we exclude from $Y_{n-2}$ all sequences of blocks having after the first step less than $l(N)$ prolongations in the modified $Y_{n-1}$, and then exclude from $Y_{n-1}$ and $Y_{n}$ all prolongations of the sequences that have been excluded from $Y_{n-2}$. Proceeding further in the same manner, after $n$ steps we will obtain a block selection $Y_1$, \dots, $Y_n$ such that each sequence of blocks from any $Y_k$ has at least $l(N)$ prolongations in $Y_{k+1}$. Evidently, this selection will be the maximal among all selections of order $N$ having the mentioned property. Therefore with probability at least $p$ all the sets $Y_k$ are nonempty.
For every $n$ let us construct, as is described above, the maximal block selection $Y^{(n)}_1$, \dots, $Y^{(n)}_n$. From the maximality of these selections it follows that \begin{equation*}
Y^{(n)}_n\supset Y^{(n+1)}_n\supset Y^{(n+2)}_n\supset\ldots \end{equation*} Define the sets $Y_n =\bigcap_{k\ge n} Y^{(k)}_n$. Then with probability at least $p$ all of them are nonempty and compose an infinite block selection from Theorem \ref{8..2}. Since $p$ may be chosen arbitrarily close to $1-q^*$, such selections do exist with probability $1-q^*$. \qed
\proofskip
Theorem \ref{8..2} can be strengthened by taking several measures in place of a unique measure $\nu\in M_1(\Omega)$.
\begin{theorem} \label{8..3} Let\/ $X_1,\, X_2,\, X_3,\, \dots$ be an unconditional colored brunching process with finite set of colors\/ $\Omega$ and probability of degeneration\/ $q^*<1$. If a finite collection of measures\/ $\nu_i\in M_1(\Omega)$, where\/ $i=1,\,\dots,\,k$, satisfy the inequalities\/ $\rho(\nu_i,\mu) <0$, then for any neighborhoods\/ $O(\nu_i)\subset M_1(\Omega)$ and any\/ $\eps>0$ with probability\/ $1-q^*$ one can extract from the brunching process a block selection\/ $Y_1$, $Y_2$, \dots\ of an order\/ $N$ such that for every\/ $i=1,\,\dots,\,k$ each sequence of blocks\/ $(y_1,\dots,y_n)\in Y_n$ has at least\/ \begin{equation*}
l_i(N) = e^{N(-\rho(\nu_i,\mu)-\eps)} \end{equation*} prolongations\/ $(y_1,\dots,y_n,y)\in Y_{n+1}$ with the property\/ $\delta_{y}\in O(\nu_i)$. \end{theorem}
It can be proved in the same manner as the previous one, only now the event $A_n$ should be understood as existence of a finite block selection $Y_1$, \dots, $Y_n$ satisfying the conclusion of Theorem \ref{8..3} and the event $A$ should be defined by the system of inequalities \begin{equation*}
\#\{\pin x\in X_N \mid \delta_{x,N}\in O(\nu_i)\pin\} \pin>\pin l_i(N)\pin e^{N\eps/2},
\qquad i=1,\,\dots,\,k. \end{equation*} We leave details to the reader.
\section{Dimensions of random fractals (lower bounds)}\label{9..}
Now we proceed investigation of the space of infinite genetic lines \begin{equation*}
X_\infty \pin=\pin\bigl\{ x=(x_1,x_2,x_3,\dots)\bigm| (x_1,\dots,x_n)\in X_n\ \
\forall\,n\in \mathbb N \bigr\}, \end{equation*} which is generated by an unconditional colored brunching process $X_1$, $X_2$, \dots\ with finite set of colors $\Omega =\{1,\dots,r\}$. It is supposed that there is a measure \begin{equation*}
\mu = (\mu(1),\,\dots,\,\mu(r)) \end{equation*} on $\Omega$, where $\mu(i)$ denotes the expectation of children of color $i$ born by each individual, and $X_\infty$ is equipped with the cylinder $\theta$-metrics \eqref{7,,1}.
\begin{theorem} \label{9..1} Let\/ $X_1,\, X_2,\, X_3,\, \dots$ be an unconditional colored brunching process with finite set of colors\/ $\Omega$ and probability of degeneration\/ $q^*<1$. If a measure\/ $\nu\in M_1(\Omega)$ satisfies the condition\/ $d(\nu,\mu,\theta) >0$, then with probability\/ $1-q^*$ for any neighborhood\/ $O(\nu)$ we have the lower bound \begin{equation} \label{9,,1}
\dim_H\pin \{\pin x\in X_\infty\mid \exists\,N\ \forall\,n>N\ \, \delta_{x,n}\in O(\nu)\pin\}
\,\ge\, d(\nu,\mu,\theta). \end{equation} \end{theorem}
\emph{Proof.} Fix any number $\alpha <d(\nu,\mu,\theta)$ and so small $\eps>0$ that \begin{equation} \label{9,,2}
d(\nu,\mu,\theta) \pin=\pin \frac{\rho(\nu,\mu)\strut}{\sum_{i=1}^r \nu(i)\ln \theta(i)\strut} \pin>\pin
\frac{\rho(\nu,\mu) +2\eps\strut}{\sum_{i=1}^r \nu(i)\ln \theta(i) -\eps\strut} \pin>\pin \alpha. \end{equation} Then choose a convex neighborhood $O^*(\nu)$ whose closure lies in $O(\nu)$ such that for any measure $\delta\in O^*(\nu)$ \begin{equation} \label{9,,3}
\sum_{i=1}^r \delta(i)\ln \theta(i) >\sum_{i=1}^r \nu(i)\ln \theta(i)-\eps. \end{equation}
By Theorem \ref{8..2} with probability $1-q^*$ one can extract from the brunching process under consideration a block selection $Y_1$, $Y_2$, \dots\ of order $N$ such that any sequence of blocks $(y_1,\dots,y_n)\in Y_n$ has at least $l(N)$ prolongations in $Y_{n+1}$, where \begin{equation*}
l(N) = e^{N(-\rho(\nu,\mu)-\eps)}, \end{equation*} and for each block $y_k =(x_{(k-1)N+1},\dots,x_{kN})$ the corresponding empirical measure $\delta_{y_k}$ (spectrum) belongs to $O^*(\nu)$. Exclude from this selection a certain part of genetic lines in such a way that each of the remaining sequences of blocks $(y_1,\dots,y_n)\in Y_n$ would have exactly $[l(N)]$ prolongations in $Y_{n+1}$.
Define the random set \begin{equation*}
Y_\infty \pin=\pin \bigl\{ y=(y_1,y_2,\dots) \bigm| (y_1,\dots,y_n)\in Y_n, \ \,
n=1,\,2,\,\dots\bigr\}. \end{equation*} Any sequence $y=(y_1,y_2,\dots)\in Y_\infty$ consists of blocks of length $N$. Having written down in order the elements of all these blocks, we obtain from $y$ an infinite genetic line $x = (x_1,x_2,\dots)\in X_\infty$. Denote it as $\pi(y)$. By the definition of $Y_\infty$ the spectrum of each block $y_k$ belongs to $O^*(\nu)$. For every point $x=\pi(y)$, where $y\in Y_\infty$, the empirical measure $\delta_{x,nN}$ is an arithmetical mean of empirical measures corresponding to the first $n$ blocks of $y$, and so belongs to $O^*(\nu)$ as well. It follows that \begin{equation} \label{9,,4}
\pi(Y_\infty) \pin\subset\pin \{\pin x\in X_\infty\mid \exists\,N\ \forall\,n>N\ \,
\delta_{x,n}\in O(\nu)\pin\}. \end{equation}
The family of all cylinders of the form $Z_{nN}(x)$, where $x\in \pi(Y_\infty)$, generates some $\sigma$-algebra on $\pi(Y_\infty)$. Define a probability measure $P$ on this $\sigma$-algebra such that \begin{equation*}
P\bigl(Z_{nN}(x)\bigr) =[l(N)]^{-n}. \end{equation*} Then for all large enough $N$, all $x \in \pi(Y_\infty)$, and all natural $n$ \begin{equation*}
P\bigl(Z_{nN}(x)\bigr) \le e^{nN(\rho(\nu,\mu)+2\eps)}. \end{equation*} On the other hand, by \eqref{9,,3} \begin{gather*}
|Z_{nN}(x)| \pin=\pin \prod_{t=1}^{nN} \theta(x_t) \pin=\pin
\exp\biggl\{\sum_{t=1}^{nN} \ln \theta(x_t)\biggr\} \pin=\pin
\exp\biggl\{\sum_{i=1}^r nN\delta_{x,nN}(i)\ln \theta(i)\biggr\} \pin\ge\pin \\[3pt]
\pin\ge\pin \exp\biggl\{nN\biggl(\pin\sum_{i=1}^r \nu(i)\ln \theta(i) -\eps\biggr)\biggr\}. \end{gather*} It follows from the last two formulas and \eqref{9,,2} that \begin{equation*}
|Z_{nN}(x)|^\alpha \pin\ge\pin
\exp\biggl\{nN\alpha\biggl(\pin\sum_{i=1}^r \nu(i)\ln \theta(i)-\eps\biggr)\biggr\} \pin\ge\pin
e^{nN(\rho(\nu,\mu)+2\eps)} \pin\ge\pin P\bigl(Z_{nN}(x)\bigr). \end{equation*}
Now we are ready to compute the Hausdorff measure of dimension $\alpha$ of the set $\pi(Y_\infty)$. If, while computing the Hausdorff measure, we used coverings of $\pi(Y_\infty)$ not with any cylinders, but with only cylinders of orders divisible by $N$, then the last formula would imply that such a measure will be at least $P(\pi(Y_\infty)) =1$. Any cylinder can be put in a cylinder of order divisible by $N$ such that the difference of their orders will be less than $N$ and the ratio of their diameters greater than $\min \theta(i)^{N}$. Therefore, \begin{equation*}
\mathrm{mes}\kern 0.04em\bigl(\pi(Y_\infty),\alpha\bigr) \ge \min \theta(i)^{N\alpha} \end{equation*} and hence $\dim_H \pi(Y_\infty) \ge \alpha$.
The set defined in the right hand part of \eqref{9,,4} contains $\pi(Y_\infty)$. Then its dimension is at least $\alpha$ too. Recall that we have proved this fact by means of a block selection that exists with probability $1-q^*$. By the arbitrariness of $\alpha<d(\nu,\mu,\theta)$ this implies the desired bound \eqref{9,,1} with the same probability. \qed
\begin{theorem} \label{9..2} Let\/ $s$ be a root of the Bowen equation \begin{equation*}
\sum_{i=1}^r \mu(i) \theta(i)^s =1. \end{equation*} If\/ $s\le 0$, then\/ $X_\infty =\emptyset$ almost surely. Otherwise, if\/ $s>0$, then\/ $X_\infty$ is nonempty with a positive probability, and with the same probability its dimension equals\/ $s$. \end{theorem}
\emph{Proof.} The expectation of total number of children of each individual in the brunching process generating the set $X_\infty$ is equal to $m=\mu(1)+\,\ldots\,+\mu(r)$. If $s\le 0$, then $m\le 1$. In this case by Theorem \ref{5..2} our brunching process degenerates almost surely, and $X_\infty =\emptyset$.
If $s>0$, then $m>1$. In this case by Theorem ref{5..2} our brunching process is degenerate with a positive probability, and $X_\infty$ is nonempty. Define a measure $\nu\in M_1(\Omega)$ by means of the equality \begin{equation*}
\nu(i) =\mu(i)\theta(i)^{s}, \quad\ i\in \Omega. \end{equation*} Then, evidently, $d(\nu,\mu,\theta) =s$. By the previous Theorem $\dim_H X_\infty \ge s$ with the same probability with which $X_\infty \ne\emptyset$. On the other hand, by Theorem \ref{7..2} the inverse inequality holds almost surely. \qed
\proofskip
A more general version of Theorem \ref{9..2}, in which the similarity coefficients $\theta(1)$, \dots, $\theta(r)$ are random, is proved in \cite{Falconer-article,Falconer-book,Graf,Mauldin-Williams}.
For every probability measure $\nu\in M_1(\Omega)$ define a basin $B(\nu)\subset X_\infty$ as the set of all infinite genetic lines $x =(x_1,x_2,x_3,\dots)$ such that the corresponding sequence of empirical measures $\delta_{x,n}$ converges to $\nu$. What is the dimension of $B(\nu)$? By Theorem \ref{7..2} it does not exceed the Billingsley--Kullback entropy $d(\nu,\mu,\theta)$ with probability $1$. On the other hand, the inverse inequality does not follow from the previous results (and, in particular, from Theorem \ref{9..1}). To obtain it, we ought to enhance the machinery of block selections.
\begin{lemma} \label{9..3} Let\/ $Q_1$, \dots, $Q_{2^r}$ be vertices of a cube in\/ $\mathbb R^r$. Then there exists a choice law\/ $i\!:\mathbb R^r\to \{1,\dots,2^r\}$ such that if neighborhoods\/ $O(Q_i)$ are small enough, sequences \begin{equation*}
\delta_n\in\mathbb R^r \quad \text{and}\quad
\Delta_n =\frac{\delta_1+\ldots+\delta_n}{n} \end{equation*} satisfy the conditions\/ $\delta_{n+1}\in O\bigl(Q_{i(\Delta_n)}\bigr)$ and\/ $\delta_1\in O(Q_1) \cup \ldots\cup O(Q_{2^r})$, then the sequence\/ $\Delta_n$ converges to the center of the cube. \end{lemma}
\emph{Proof.} First consider the case $r=1$, when the cube turns to a segment. Let, for definiteness, $Q_1 =-1$ and $Q_2 =1$. Set \begin{equation} \label{9,,5}
i(\Delta) =
\begin{cases}
1,& \text{если}\ \ \Delta\ge 0,\\[1pt]
2,& \text{если}\ \ \Delta<0.
\end{cases} \end{equation} Take any neighborhoods $O(Q_1)$ and $O(Q_2)$ with radii at most $1$. Then for any sequence $\delta_n$ satisfying the conditions $\delta_{n+1}\in O\bigl(Q_{i(\Delta_n)}\bigr)$ and
$|\delta_1|<2$ we have the estimate $|\Delta_n|<2/n$. It may be easily proved by induction. Thus in the one-dimensional case the lemma is proved. To prove it in the multidimensional case one should choose a coordinate system with origin at the center of the cube and axes parallel to edges of the cube and apply the choice law \eqref{9,,5} to each of the coordinates independently. \qed
\begin{theorem} \label{9..4} Let\/ $X_1,\, X_2,\, X_3,\, \dots$ be an unconditional colored brunching process with finite set of colors\/ $\Omega$ and probability of degeneration\/ $q^*<1$. If a measure\/ $\nu\in M_1(\Omega)$ satisfies the condition\/ $d(\nu,\mu,\theta) >0$, then with probability\/ $1-q^*$ \begin{equation*}
\dim_H B(\nu) = d(\nu,\mu,\theta). \end{equation*} \end{theorem}
\emph{Proof.} Fix any number $\alpha <d(\nu,\mu,\theta)$ and so small $\eps>0$ that \begin{equation*}
d(\nu,\mu,\theta) \pin=\pin \frac{\rho(\nu,\mu)\strut}{\sum_{i=1}^r \nu(i)\ln \theta(i)\strut} \pin>\pin
\frac{\rho(\nu,\mu) +3\eps\strut}{\sum_{i=1}^r \nu(i)\ln \theta(i) -\eps\strut} \pin>\pin \alpha. \end{equation*}
The set $M_1(\Omega)$ is in fact a simplex of dimension $r-1$, where $r =|\Omega|$. Suppose first that $\nu$ is an inner point of this simplex (in other words, $\nu(i)>0$ for all $i\in \Omega$). Take a small convex neighborhood $O(\nu)\subset M_1(\Omega)$ such that for any measure $\delta\in O(\nu)$ \begin{gather*}
\rho(\delta,\mu) < \rho(\nu,\mu) +\eps,\\[3pt]
\sum_{i=1}^r \delta(i)\ln \theta(i) >\sum_{i=1}^r \nu(i)\ln \theta(i)-\eps. \end{gather*}
Let $Q_1$, \dots, $Q_{2^{r-1}}$ be vertices of some cube in $O(\nu)$ with center at $\nu$. Define for them small neighborhoods $O(Q_i)\subset O(\nu)$ as in Lemma \ref{9..3}. By Theorem \ref{8..3} with probability $1-q^*$ one can extract from the colored branching process $X_1$, $X_2$, \dots\ a block selection $Y_1$, $Y_2$, \dots\ of order $N$ such that for every $i\le 2^{r-1}$ each sequence of blocks $(y_1,\dots,y_n)\in Y_n$ has at least \begin{equation*}
l(N) = e^{N(-\rho(\nu,\mu)-2\eps)} \end{equation*} prolongations $(y_1,\dots,y_n,y)\in Y_{n+1}$ possessing the property $\delta_{y}\in O(Q_i)$.
Exclude from this block selection a certain part of genetic lines so that each of the remaining sequences of blocks $(y_1,\dots,y_n)\in Y_n$ would have exactly $[l(N)]$ prolongations $(y_1,\dots,y_n,y)\in Y_{n+1}$, and all these prolongations would satisfy the choice law from Lemma \ref{9..3}, namely, \begin{equation*}
\delta_y\in O\bigl(Q_{i(\Delta_n)}\bigr), \quad\ \text{where} \quad
\Delta_n =\frac{\delta_{y_1}+\ldots+\delta_{y_n}}{n}. \end{equation*}
Denote by $\pi(Y_\infty)$ the set of all infinite genetic lines $(x_1,x_2,\dots)\in X_\infty$ for which every initial segment of length $nN$, has been partitioned into blocks of length $N$, turns into an element of $Y_n$. Then by Lemma \ref{9..3} we have the inclusion $\pi(Y_\infty) \subset B(\nu)$.
Reproducing reasoning from the proof of Theorem \ref{9..1} one can ascertain that the dimension of $\pi(Y_\infty)$ is greater than $\alpha$. Since $\alpha$ can be taken arbitrarily close to $d(\nu,\mu,\theta)$, we obtain the lower bound $\dim_H B(\nu) \ge d(\nu,\mu,\theta)$. The inverse inequality, as was mentioned above, follows from Theorem \ref{7..2}. Thus in the case of inner point $\nu\in M_1(\Omega)$ the theorem is proved.
If the measure $\nu$ belongs to the boundary of the simplex $M_1(\Omega)$, then one should exclude from $\Omega$ all elements $i$ with $\nu(i) =0$, and consider the set \begin{equation*}
\Omega' =\{\pin i\in \Omega \mid \nu(i)>0\pin\}. \end{equation*} Exclude from the brunching process $X_1$, $X_2$, \dots\ all genetic lines containing elements of colors not in $\Omega'$ and denote as $X'_1$, $X'_2$, \dots\ the resulting brunching process (with the set of colors $\Omega'$). The corresponding set of infinite genetic lines $X'_\infty$ is contained in $X_\infty$. It follows from the definition of Billingsley--Kullback entropy $d(\nu,\mu,\theta)$ that it is the same for the sets of colors $\Omega$ and $\Omega'$. Besides, the measure $\nu$ lies in the interior of the simplex $M_1(\Omega')$. Therefore, $\dim_H B(\nu)\cap X'_\infty =d(\nu,\mu,\theta)$ with the same probability as $X'_\infty\ne \emptyset$.
The theorem would be completely proved if the probability of the event $X'_\infty\ne\emptyset$ was equal to $1-q^*$. But it may be less than $1-q^*$. This obstacle can be overcome as follows.
Let $m' =\sum_{i\in \Omega'} \mu(i)$. This is nothing more than the expectation of each individual's number of children in the brunching process $X'_1$, $X'_2$, \dots\ If $m'\le 1$ then \eqref{2,,4} implies the inequality $\rho(\nu,\mu) \ge 0$, which contradicts the condition $d(\nu,\mu,\theta) >0$ of our theorem. Therefore $m'>1$ and, respectively, the probability of the event $X'_\infty = \emptyset$ is strictly less than $1$. Let us denote it $q'$.
If the brunching process $X'_1$, $X'_2$, \dots\ was generated not by a unique initial element, but
$k$ initial elements, then the probability of $X'_\infty =\emptyset$ would be equal to~$(q')^k$. Recall that the cardinality of $X_n$ grows exponentially with probability $1-q^*$. If this is the case, one can first wait for the event $|X_n|\ge k$, and then consider separately $|X_n|$
independent counterparts of the brunching process $X'_1$, $X'_2$, \dots\ generated by different elements of $X_n$. This trick allows to obtain the bound $\dim_H B(\nu) \ge d(\nu,\mu,\theta)$ with conditional probability at least $1-(q')^{k}$ under the condition $|X_n|\to \infty$. Since $k$ is arbitrary, the above mentioned conditional probability is in fact one, and the complete probability cannot be less than $1-q^*$. \qed
\end{document} |
\begin{document}
\date{} \title{More results on weighted independent domination\footnote{Extended abstract of this paper appeared in the proceedings of
WG 2017 -- the 43rd International Workshop on Graph-Theoretic Concepts in Computer Science \cite{WG2017}
\begin{abstract} Weighted independent domination is an NP-hard graph problem, which remains computationally intractable in many restricted graph classes. In particular, the problem is NP-hard in the classes of sat-graphs and chordal graphs. We strengthen these results by showing that the problem is NP-hard in a proper subclass of the intersection of sat-graphs and chordal graphs. On the other hand, we identify two new classes of graphs where the problem admits polynomial-time solutions. \end{abstract}
\section{Introduction}
\textsc{Independent domination} is the problem of finding in a graph an inclusionwise maximal independent set of minimum cardinality. This is one of the hardest problems of combinatorial optimization and it remains difficult under substantial restrictions. In particular, it is NP-hard for so-called sat-graphs, where the problem is equivalent to {\sc satisfiability} \cite{Zverovich06}. It is also NP-hard for planar graphs, triangle-free graphs, graphs of vertex degree at most 3 \cite{BL03}, line graphs \cite{YG80}, chordal bipartite graphs \cite{DMK90}, etc.
The weighted version of the problem (abbreviated WID) deals with vertex-weighted graphs and asks to find an inclusionwise maximal independent set of minimum total weight. This version is provenly harder, as it remains NP-hard even for chordal graphs \cite{Chang2004}, where {\sc independent domination} can be solved in polynomial time \cite{Farber}. In the present paper, we strengthen two NP-hardness results by showing that WID is NP-hard in a proper subclass of the intersection of sat-graphs and chordal graphs.
On the positive side, it is known that the problem is polynomial-time solvable for interval graphs, permutation graphs \cite{poly}, graphs of bounded clique-width \cite{CW}, etc.
Let us observe that all classes mention above are hereditary, i.e. closed under taking induced subgraphs. It is well-known (and not difficult to see) that a class of graphs is hereditary if and only if it can be characterized in terms of minimal forbidden induced subgraphs. Unfortunately, not much is known about efficient solutions for the WID problem on graph classes defined by {\it finitely many} forbidden induced subgraphs. Among rare examples of this type, let us mention cographs and split graphs. \begin{itemize} \item A {\it cograph} is a graph in which every induced subgraph with at least two vertices is either disconnected or the complement of a disconnected graph. The cographs are precisely $P_4$-free graphs, i.e. graphs containing no induced $P_4$. In the case of cographs, the problem can be solved efficiently by means of modular decomposition. \item A {\it split graph} is a graph whose vertices can be partitioned into a clique and an independent set. In terms of forbidden induced subgraphs, the split graphs are the graphs which are free of $2K_2, C_{4}$ and $C_5$. The only available way to solve WID efficiently for a split graph is to examine all its inclusionwise maximal independent sets, of which there are polynomially many. \end{itemize}
The class of sat-graphs, mentioned earlier, consists of graphs whose vertices can be partitioned into a clique and a graph of vertex degree at most 1. Therefore, sat-graphs form an extension of split graphs. With this extension the complexity status of the problem jumps from polynomial-time solvability to NP-hardness. In the present paper, we study two other extensions of split graphs and show polynomial-time solvability in both of them.
The first of them deals with the class of $(P_5,\overline{P}_5)$-free graphs, which also extends the cographs. From an algorithmic point of view, this extension is resistant to any available technique. To crack the puzzle for $(P_5,\overline{P}_5)$-free graphs, we develop a new decomposition scheme combining several algorithmic tools. This enables us to show that the WID problem can be solved for $(P_5,\overline{P}_5)$-free graphs in polynomial time.
The second extension of split graphs studied in this paper deals with the class of $(P_5, \overline{P_3+P_2)}$-free graphs. To solve the problem in this case, we develop a tricky reduction allowing us to reduce the problem to the first class.
Let us emphasize that in both cases the presence of $P_5$ among the forbidden graphs is necessary, because each of $\overline{P}_5$ and $\overline{P_3+P_2}$ contains a $C_4$ and by forbidding $C_4$ alone we obtain a class where the problem is NP-hard \cite{BL03}. Whether the presence of $P_5$ among the forbidden graphs is sufficient for polynomial-time solvability of WID is a big open question. For the related problem of finding a maximum weight independent set (WIS), this question was answered only recently \cite{P5} after several decades of attacking the problem on subclasses of $P_5$-free graphs (see e.g. \cite{gem,PP,Kar}). In particular, prior to solving the problem for $P_5$-free graphs, it was solved for $(P_5,H)$-free graphs for all graphs $H$ with at most 5 vertices, except for $H=C_5$.
WID is a more stubborn problem, as it remains NP-hard in many classes where WIS can be solved in polynomial time, such as line graphs, chordal graphs, bipartite graphs, etc. In \cite{LozMosPur2015}, the problem was solved in polynomial time for many subclasses of $P_5$-free graphs, including $(P_5,H)$-free graphs for all graphs $H$ with at most 5 vertices, except for $H=\overline{P}_5$, $H=\overline{P_3+P_2}$ and $H=C_5$. In the present paper, we solve the first two of them, leaving the case of $(P_5,C_5)$-free graphs open. We believe that WID in $(P_5,C_5)$-free graphs is polynomially equivalent to WID in $P_5$-free graphs. Determining the complexity status of the problem in both classes is a challenging open question. We discuss this and related open questions in the concluding section of the paper.
The rest of the paper is organized as follows. In the remainder of the present section, we introduce basic terminology and notation. In Section~\ref{sec:house} we solve the problem for $(P_5,\overline{P}_5)$-free graphs, and in Section~\ref{sec:new} we solve it for $(P_5, \overline{P_3+P_2)}$-free graphs.
All graphs in this paper are finite, undirected, without loops and multiple edges. The vertex set and the edge set of a graph $G$ are denoted by $V(G)$ and $E(G)$, respectively. A subset $S\subseteq V(G)$ is \begin{itemize} \item[--] \textit{independent} if no two vertices of $S$ are adjacent, \item[--] a \textit{clique} if every two vertices of $S$ are adjacent, \item[--] \textit{dominating} if every vertex not in $S$ is adjacent to a vertex in $S$. \end{itemize}
For a vertex-weighted graph $G$ with a weight function $w$, by $id_w(G)$ we denote the minimum weight of an independent dominating set in $G$.
If $v$ is a vertex of $G$, then $N(v)$ is the {\it neighbourhood} of $v$ (i.e. the set of vertices adjacent to $v$) and $V(G) \setminus N(v)$ is the {\it antineighbourhood} of $v$. We say that $v$ is \textit{simplicial} if its neighbourhood is a clique, and $v$ is \textit{antisimplicial} if its antineighbourhood is an independent set.
Let $S$ be a subset of $V(G)$. We say that a vertex $v \in V(G) \setminus S$ \textit{dominates} $S$ if $S\subseteq N(v)$. Also, $v$ \textit{distinguishes} $S$ if $v$ has both a neighbour and a non-neighbour in $S$. By $G[S]$ we denote the subgraph of $G$ induced by $S$ and by $G - S$ the subgraph $G[V \setminus S]$. If $S$ consists of a single element, say $S = \{ v \}$, we write $G - v$, omitting the brackets.
If $G$ is a connected graph but $G-S$ is not, then $S$ is a \textit{separator} (also known as a cut-set). A \textit{clique separator} is a separator which is also a clique.
As usual, $P_n,C_n$ and $K_n$ denote a chordless path, a chordless cycle and a complete graph on $n$ vertices, respectively. Given two graphs $G$ and $H$, we denote by $G+H$ the disjoint union of $G$ and $H$, and by $mG$ the disjoint union of $m$ copies of $G$.
We say that a graph $G$ contains a graph $H$ as an induced subgraph if $H$ is isomorphic to an induced subgraph of $G$. Otherwise, $G$ is $H$-free.
A class $\mathcal{Z}$ of graphs is hereditary if it is closed under taking induced subgraphs, i.e. if $G \in \mathcal{Z}$ implies that every induced subgraph of $G$ belongs to $\mathcal{Z}$. It is well-known that $\mathcal{Z}$ is hereditary if and only if graphs in $G$ do not contain induced subgraphs from a set $M$, in which case we say that $M$ is the set of forbidden induced subgraphs for $\mathcal{Z}$.
For an initial segment of natural numbers $\{ 1, 2, \ldots, n \}$ we will often use the notation $[n]$.
\section{An NP-hardness result} \label{sec:NP}
As we mentioned in the introduction, the WID problem is NP-hard in the classes of sat-graphs and chordal graphs. A graph is {\it chordal} if it is $(C_4,C_5,C_6,\ldots)$-free. A graph $G$ is called a \textit{sat-graph} if there exists a partition $A \cup B = V(G)$ such that \begin{enumerate}
\item $A$ is a clique (possibly, $A = \emptyset$);
\item $G[B]$ is an induced matching, i.e. an induced 1-regular graph (possibly, $B = \emptyset$);
\item there are no triangles $(a,b,b')$, where $a \in A$ and $b,b' \in B$. \end{enumerate} We shall refer to the pair $(A,B)$ as a \textit{sat-partition} of $G$.
Below we show that WID is NP-hard in the class of $(C_4, Sun_3)$-free sat-graphs, where $Sun_3$ is the graph shown in Figure~\ref{fig:T_domino}. Since cycles $C_k$ with $k\ge 5$ are not sat-graphs (which is easy to see), this class also is a subclass of chordal graphs. Moreover, $Sun_3$ is both a sat-graph and a chordal graph. Therefore, $(C_4, Sun_3)$-free sat-graphs form a proper subclass of the intersection of sat-graphs and chordal graphs.
\begin{figure}
\caption{Graph $Sun_3$}
\label{fig:T_domino}
\end{figure}
Before we prove the main result of this section, let us make the following useful observation.
\begin{observation}\label{obs:domino_T}
Let $G$ be a sat-graph with a sat-partition $(A,B)$. If $G$ contains $Sun_3$
as an induced subgraph, then $1,2,3 \in A$ and $4,5,6 \in B$. \end{observation}
\begin{theorem}
The WID problem is NP-hard in the class of $(C_4, Sun_3)$-free sat-graphs. \end{theorem} \begin{proof} We prove the theorem by transforming the decision version of the {\sc minimum dominating set} problem in $(C_3,C_4,C_5,C_6)$-free graphs to the WID problem in $(C_4, Sun_3)$-free graphs. Since the former problem in NP-complete (see \cite{Kor90}), this will prove that the latter is NP-hard.
For an $n$-vertex graph $G = (V,E)$ let us define the graph $G' = (V',E')$ with vertex set $V' = \{v_1,v_2,v_3 : v \in V\}$ and edge set
$E' = \{ (v_1,v_2), (v_2,v_3): v \in V\} \cup \{ (w_2,v_3), (w_3,v_2) : (w,v) \in E\} \cup \{(w_3, v_3) : w, v \in V, u \neq v\}$.
\begin{figure}
\caption{Graphs $P_4$ (top) and $P'_4$ (bottom)}
\label{fig:transformationP4}
\end{figure}
Figure~\ref{fig:transformationP4} illustrates the transformation of $P_4$ into $P'_4$. It is easy to see that for every graph $G$, the graph $G'$ is a sat-graph. Moreover, it is $C_4$-free, i.e. $G'$ is a chordal graph. Also using the fact that $Sun_3$ has the unique sat-partition (see Observation \ref{obs:domino_T}) it is not hard to check that if $G'$ contains $Sun_3$ as an induced subgraph, then $G$ has a cycle of length at most 6. Therefore, for any $(C_3,C_4,C_5,C_6)$-free graph $G$, the graph $G'$ is a $(C_4, Sun_3)$-free sat-graph.
Further, for every $v \in V$ we assign weight 1 to vertex $v_1$, weight $2$ to vertex $v_2$, and weight $2n$ to vertex $v_3$.
Now, we claim that $G$ has a dominating set of size at most $k$ if and only if $G'$ has an independent dominating set of total weight at most $n + k$. First, suppose $G$ has a dominating set $D$ of size at most $k$. Then $D' = \{v_2 : v \in D\} \cup \{v_1 : v \in V \setminus D\}$ is clearly an independent dominating set of $G'$ with total weight at most $n + k$. On the other hand, suppose $G'$ has an independent dominating set $D'$ of total weight at most
$n + k$. If $k \geq n$, then $V$ is a dominating set of $G$ of size at most $k$. If $k < n$, then $D'$ cannot contain any of the vertices of weight $2n$ and hence $D'$ is of the form $\{v_2 : v \in D\} \cup \{v_1 : v \in V \setminus D\}$ for some subset $D$ of $V$. For any vertex $u \in V$, since $u_3$ is dominated in $G'$ by some $v_2 \in D'$, we have that in $G$ vertex $u$ is dominated by $v \in D$. Hence, $D$ is a dominating set of $G$. Moreover, the total weight of $D'$ is $n + |D|$ implying that $D$ is of size at most $k$.
\end{proof}
\section{WID in $(P_5,\overline{P}_5)$-free graphs} \label{sec:house}
To solve the problem for $(P_5,\overline{P}_5)$-free graphs, we first develop a new decomposition scheme in Section~\ref{sec:had} that combines modular decomposition (Section~\ref{subsec:modular}) and antineighborhood decomposition (Section~\ref{subsec:anti}). Then in Section~\ref{sec:P5} we apply it to $(P_5,\overline{P}_5)$-free graphs.
\subsection{Graph decompositions}
\subsubsection{Modular decomposition} \label{subsec:modular}
Let $G=(V,E)$ be a graph. A set $M \subseteq V$ is a $module$ in $G$ if no vertex outside of $M$ distinguishes $M$. Obviously, $V(G)$, $\emptyset$ and any vertex of $G$ are modules and we call them {\it trivial}. A non-trivial module is also known as a \textit{homogeneous set}. A graph without homogeneous sets is called {\it prime}. The notion of a prime graph plays a crucial role in {\em modular decomposition}, which allows to reduce various algorithmic and combinatorial problems in a hereditary class $\mathcal{Z}$ to prime graphs in $\mathcal{Z}$ (see e.g. \cite{MoeRad1984/1} for more details on modular decomposition and its applications). In particular, it was shown in \cite{BL03} that the WID problem can be solved in polynomial time in $\mathcal{Z}$ whenever it is polynomially solvable for prime graphs in $\mathcal{Z}$.
In our solution, we will use homogeneous sets in order to reduce the problem from a graph $G$ to two proper induced subgraphs of $G$ as follows. Let $M \subset V$ be a homogeneous set in $G$. Denote by $H$ the graph obtained from $G$ by contracting $M$ into a single vertex $m$ (or equivalently, by removing all but one vertex $m$ from $M$). We define the weight function $w'$ on the vertices of $H$ as follows: $w'(v) = w(v)$ for every $v \ne m$, and $w'(m) = id_w(G[M])$. Then it is not difficult to see that \begin{equation}
id_w(G) = id_{w'}(H). \end{equation} In other words, to solve the problem for $G$ we first solve the problem for the subgraph $G[M]$, construct a new weighted graph $H$, and solve the problem for the graph $H$.
\subsubsection{Antineighborhood decomposition} \label{subsec:anti}
One of the simplest branching algorithms for the maximum weight independent set problem is based on the following obvious fact. For any graph $G=(V,E)$ and any vertex $v \in V$, $$
is_w(G) = \max \{ is_w(G - N(v)), is_w(G - v) \}, $$ where $w$ is a weight function on the vertices of $G$, and $is_w(G)$ stands for the maximum weight of an independent set in $G$. We want to use a similar branching rule for the WID problem, i.e. \begin{equation}\label{eq:anti_WID}
id_w(G) = \min \{ id_w(G - N(v)), id_w(G - v) \}. \end{equation}
However, formula (\ref{eq:anti_WID}) is not necessarily true, because an independent dominating set in the graph $G - v$ is not necessarily dominating in the whole graph $G$. To overcome this difficulty, we introduce the following notion. \begin{definition} A vertex $v$ is {\em permissible} if formula (\ref{eq:anti_WID}) is valid for $v$ \end{definition}
An obvious sufficient condition for a vertex to be permissible can be stated as follows: if every independent dominating set in $G - v$ contains at least one neighbour of $v$, then $v$ is permissible.
Applying (\ref{eq:anti_WID}) to a permissible vertex $v$ of $G$, we reduce the problem from $G$ to two subgraphs $G - v$ and $G - N(v)$. Such a branching procedure results in a decision tree. In general, this approach does not provide a polynomial-time solution, since the decision tree may have exponentially many nodes (subproblems). However, under some conditions this procedure may lead to a polynomial-time algorithm. In particular, this is true for graphs in hereditary classes possessing the following property.
\begin{definition}\label{def:2} A graph class ${\cal G}$ has the {\em antineighborhood property} if there is a subclass ${\cal F} \subseteq {\cal G}$, and polynomial algorithms $P, Q$ and $R$, such that \begin{enumerate}
\item[(i)] Given a graph $G$ the algorithm $P$ decides whether $G$ belongs to ${\cal F}$ or not;
\item[(ii)] $Q$ finds a permissible vertex $v$ in any input graph $G \in {\cal G} \setminus {\cal F}$
such that the graph $G-N(v)$ induced by the antineighborhood of $v$ belongs to ${\cal F}$;
we call $v$ a {\em good vertex};
\item[(iii)] $R$ solves the WID problem for (every induced subgraph of) any input graph from ${\cal F}$. \end{enumerate} \end{definition}
Directly from the definition we derive the following conclusion.
\begin{theorem}\label{theo: anti} Let ${\cal G}$ be a hereditary class possessing the antineighborhood property. Then WID can be solved in polynomial time for graphs in ${\cal G}$. \end{theorem}
\subsubsection{Decomposition scheme} \label{sec:had}
Let ${\cal G}$ be a hereditary class such that the class ${\cal G}_p$ of prime graphs in ${\cal G}$ has the antineighborhood property. We define the decomposition procedure by describing the corresponding decomposition tree $T(G)$ for a graph $G=(V,E) \in {\cal G}$. In the description, we use notions and notations introduced in Definition~\ref{def:2}.
\begin{enumerate}
\item If $G$ belongs to ${\cal F}$, then the node of $T(G)$ corresponding to $G$ is a leaf.
\item If $G \not\in {\cal F}$ and $G$ has a homogeneous set $M$,
then $G$ is decomposed into subgraphs $G_1 = G[M]$ and $G_2 = G[(V \setminus M) \cup \{m\}]$
for some vertex $m$ in $M$.
The node of $T(G)$ corresponding to $G$ is called a \textit{homogeneous node}, and it has
two children corresponding to $G_1$ and $G_2$. These children are in turn the roots of subtrees
representing possible decompositions of $G_1$ and $G_2$.
\item If $G \not\in {\cal F}$ and $G$ has no homogeneous set, then $G$ is prime and by the
antineighborhood property of ${\cal G}_p$ there exists a good vertex $v \in V$.
Then $G$ is decomposed into subgraphs $G_1 = G - N(v)$ and $G_2 = G - v$.
The node of $T(G)$ corresponding to $G$
is called an \textit{antineighborhood node}, and it has two children corresponding to
$G_1$ and $G_2$.
The graph $G_1$ belongs to ${\cal F}$ and the node corresponding to $G_1$ is a leaf. The node
corresponding to $G_2$ is the root of a subtree representing a possible decomposition of $G_2$.
\end{enumerate}
\begin{lemma}\label{tree}
Let $G$ be an $n$-vertex graph in ${\cal G}$. Then the tree $T(G)$ contains $O(n^2)$ nodes. \end{lemma} \begin{proof} Since $T(G)$ is a binary tree, it is sufficient to show that the number of internal nodes is $O(n^2)$. To this end, we prove that the internal nodes of $T(G)$ can be labeled by pairwise different pairs $(a,b)$, where $a,b \in V(G)$.
Let $G' = (V',E')$ be an induced subgraph of $G$ that corresponds to an internal node $X$ of $T(G)$. If $X$ is a homogeneous node, then $G'$ is decomposed into subgraphs $G_1 = G'[M]$ and $G_2 = G'[(V' \setminus M) \cup \{m\}]$, where $M \subset V'$ is a homogeneous set of $G'$ and $m$ is a vertex in $M$. In this case, we label $X$ with $(a,b)$, where $a \in M \setminus \{m\}$ and $b \in V' \setminus M$. If $X$ is an antineighborhood node, then $G'$ is decomposed into subgraphs $G_1 = G' - N(v)$ and $G_2 = G' - v$, where $v$ is a good vertex of $G'$. In this case, $X$ is labeled with $(v,b)$, where $b \in N(v)$.
Suppose, to the contrary, that there are two internal nodes $A$ and $B$ in $T(G)$ with the same label $(a,b)$. By construction, this means that $a,b$ are vertices of both $G_A$ and $G_B$, the subgraphs of $G$ corresponding to the nodes $A$ and $B$, respectively. Assume first that $B$ is a descendant of $A$. The choice of the labels implies that regardless of the type of node $A$ (homogeneous or antineighborhood), the label of $A$ has at least one vertex that is not a vertex of $G_B$, a contradiction. Now, assume that neither $A$ is a descendant of $B$ nor $B$ is a descendant of $A$. Let $X$ be the lowest common ancestor of $A$ and $B$ in $T(G)$. If $X$ is a homogeneous node, then $G_A$ and $G_B$ can have at most one vertex in common, and thus $A$ and $B$ cannot have the same label. If $X$ is an antineighborhood node, then one of its children is a leaf, contradicting to the assumption that both $A$ and $B$ are internal nodes. \end{proof}
\begin{lemma}\label{lem:construct}
Let $G$ be an $n$-vertex graph in ${\cal G}$. If time complexities of the algorithms $P$ and $Q$ are
$O(n^p)$ and $O(n^q)$, respectively, then $T(G)$ can be constructed in time
$O(n^{2 + \max\{ 2, p, q \})})$. \end{lemma} \begin{proof}
The time needed to construct $T(G)$ is the sum of times required to identify types of nodes of $T(G)$
and to decompose graphs corresponding to internal nodes of $T(G)$. To determine the type
of a given node $X$ of $T(G)$, we first use the algorithm $P$ to establish whether the graph
$G_X$ corresponding to $X$ belongs to ${\cal F}$ or not. In the former case $X$ is a leaf node, in the
latter case we further try to find in $G_X$ a homogeneous set, which can be performed
in $O(n+m)$ time \cite{McCSpi1999}. If $G_X$ has a homogeneous set, then $X$ is
a homogeneous node and we decompose $G_X$ into the graphs induced by the vertices in and outside
the homogeneous set, respectively. If $G_X$ does not have a homogeneous set, then $X$
is an antineighborhood node, and the decomposition of $G_X$ is equivalent to finding a
good vertex, which can be done by means of the algorithm $Q$.
Since there are $O(n^2)$ nodes in $T(G)$, the total time complexity for constructing
$T(G)$ is $O(n^{2 + \max\{ 2, p, q \}})$.
\end{proof}
Now we are ready to prove the main result of this section.
\begin{theorem}\label{theo:decomposition}
If ${\cal G}$ is a hereditary class such that the class ${\cal G}_p$ of prime graphs in ${\cal G}$
has the antineighborhood property, then the WID problem
can be solved in polynomial time for graphs in ${\cal G}$. \end{theorem} \begin{proof}
Let $G$ be an $n$-vertex graph in ${\cal G}$. To solve the WID problem for $G$, we construct
$T(G)$ and then traverse it bottom-up, deriving a solution for each node of $T(G)$ from the solutions
corresponding to the children of that node.
The construction of $T(G)$ requires a polynomial time by Lemma~\ref{lem:construct}.
For the instances corresponding to leaf-nodes of $T(G)$, the problem can be solved in polynomial time
by the antineighborhood property.
According to the discussion in Sections~\ref{subsec:modular} and~\ref{subsec:anti}, the solution for
an instance corresponding to an internal node can be derived from the solutions of its children
in polynomial time.
Finally, as there are $O(n^2)$ nodes in $T(G)$ (Lemma~\ref{tree}), the total running time to solve
the problem for $G$ is polynomial. \end{proof}
\subsection{Application to $(P_5,\overline{P_5})$-free graphs} \label{sec:P5}
In this section, we show that the WID problem can be solved efficiently for $(P_5,\overline{P_5})$-free graphs by means of the decomposition scheme described in Section~\ref{sec:had}. To this end, we will prove that the class of prime $(P_5,\overline{P_5})$-free graphs has the antineighborhood property. We start with several auxiliary results. The first of them is simple and we omit its proof.
\begin{observation}\label{obs:distAdj}
Let $G=(V,E)$ be a graph, and let $W \subset V$ induce a connected
subgraph in $G$. If a vertex $v \in V \setminus W$ distinguishes $W$, then $v$ distinguishes two adjacent vertices of $W$. \end{observation}
\begin{proposition}\label{st:distNonadj}
Let $G=(V,E)$ be a prime graph. If a subset $W\subset V$ has at least two vertices and is not a clique, then there exists a vertex $v \in V \setminus W$ which distinguishes two non-adjacent vertices of $W$. \end{proposition} \begin{proof}
Suppose, to the contrary, that none of the vertices in $V \setminus W$ distinguishes a pair of
non-adjacent vertices in $W$. If $G[W]$ has more than one connected component, then it is
easy to see that no vertex outside of $W$ distinguishes $W$. Hence,
$W$ is a homogeneous set in $G$, which contradicts the primality of $G$.
If $G[W]$ is connected, then $\overline{G[W]}$ has a connected component $C$ with at least two vertices, since
$W$ is not a clique. Then, by our assumption and Observation~\ref{obs:distAdj}, no vertex outside of $W$ distinguishes $C$. Also, by the choice of $C$, no vertex of $W$ outside of $C$ distinguishes $C$. Therefore, $V(C)$
is a homogeneous set in $G$. This contradiction completes the proof of the proposition. \end{proof}
\begin{lemma}\label{lem:atom}
If a $(P_5, \overline{P_5})$-free prime graph contains an induced copy of $2K_2$, then it has a
clique separator. \end{lemma} \begin{proof}
Let $G=(V,E)$ be a $(P_5, \overline{P_5})$-free prime graph containing an induced copy of $2K_2$.
Let $S \subseteq V$ be a minimal separator with the property that $G-S$ contains at least two non-trivial connected components, i.e. connected components with at least two vertices. Such a separator necessarily exists, since $G$ contains an induced $2K_2$.
It follows from the choice of $S$ that
\begin{itemize}
\item $G - S$ has $k \geq 2$ connected components $C_1, \ldots, C_k$;
\item $r \geq 2$ of these components, say $C_1, \ldots, C_r$, have at least two vertices, and
all the other components $C_{r+1}, \ldots, C_k$ are trivial;
\item every vertex in $S$ has a neighbour in each of the non-trivial components
$C_1, \ldots, C_r$ (since $S$ is minimal);
\item for every $i \in \{ r+1, \ldots, k \}$, the unique vertex of the trivial component $C_i$
has a neighbour in $S$ (since $G$ is connected).
\end{itemize}
In the remaining part of the proof, we show that $G$ has a clique separator.
Let us denote
$U_i = V(C_i)$ for $i = 1, \ldots, k$. We first observe the following.
\vskip1ex
\textbf{Claim 1.} \textit{Any vertex in $S$ distinguishes at most one of the sets $U_1, \ldots, U_r$.}
\vskip1ex
\textit{Proof.} Assume $v \in S$ distinguishes $U_i$ and $U_j$ for distinct $i,j \in [r]$. Then by
Observation~\ref{obs:distAdj} $v$ distinguishes two adjacent vertices $a,b$ in $U_i$ and two adjacent
vertices $c,d$ in $U_j$. But then $a,b,v,c,d$ induce a forbidden $P_5$.
\vskip1ex
According to Claim 1, the set $S$ can be partitioned into subsets $S_0, S_1 \ldots, S_r$,
where the vertices of $S_0$
dominate every member of $\{ U_1, \ldots, U_r \}$, and for each $i \in [r]$, the vertices
of $S_i$ distinguish $U_i$ and dominate $U_j$ for all $j$ different from $i$.
Moreover, for each $i \in [r]$ the set $S_i$ is non-empty, as the graph $G$ is prime.
Now we prove two more auxiliary claims.
\vskip1ex
\textbf{Claim 2.} \textit{For $0 \leq i < j \leq r$, every vertex in $S_i$ is adjacent to every vertex in $S_j$.}
\vskip1ex
\textit{Proof.} Assume that the claim is false, i.e. there exist two non-adjacent vertices $s_i \in S_i$
and $s_j \in S_j$. By Observation \ref{obs:distAdj} there exist two adjacent vertices $a,b \in U_j$ that
are distinguished by $s_j$. But then $s_i, s_j, a, b$ and any vertex in $N(s_i) \cap U_i$ induce
a forbidden $\overline{P_5}$, a contradiction.
\vskip1ex
\textbf{Claim 3.} \textit{For $i \in [r]$, no vertex in $U_i$ distinguishes two non-adjacent
vertices in $S_i$.}
\vskip1ex
\textit{Proof.} Assume that there exists a pair of non-adjacent vertices $x,y \in S_i$ that are
distinguished by a vertex $u_i \in U_i$. Let $j \in [r] \setminus \{ i \}$, and let $s_j \in S_j$
and $u_j \in U_j \setminus N(s_j)$. Then, since $s_j$ dominates $S_i$, we have that $u_j, x, y, s_j, u_i$
induce a forbidden $\overline{P_5}$, a contradiction.
\vskip1ex
We split further analysis into two cases.
\textit{Case 1}: there is at least one trivial component in $G \setminus S$, i.e. $k > r$.
For $i \in \{ r+1, \ldots, k \}$ we denote by $u_i$ the unique vertex of $U_i$.
Let $U = \{ u_{r+1}, \ldots, u_k \}$ and let $u^*$ be a vertex in $U$ with a minimal (under inclusion)
neighbourhood. We will show that $N(u^*)$ is a clique,
and hence is a clique separator in $G$.
By Claim 2, it suffices to show that $N(u^*) \cap S_i$ is a clique for each $i \in \{0, 1, \ldots, k\}$.
Suppose that for some $i$ the set $N(u^*) \cap S_i$ is not a clique.
Then, by Proposition~\ref{st:distNonadj}, there are two nonadjacent vertices $x,y \in N(u^*) \cap S_i$
distinguished by a vertex $z \in V \setminus (N(u^*) \cap S_i)$. It follows from Claims 2 and 3
that either $z \in S_i \setminus N(u^*)$ or $z \in U$. If $z \in S_i \setminus N(u^*)$, then
$u^*, x, y, z,$ and any vertex in $U_j$, $j \in [r] \setminus \{ i \}$ induce a forbidden
$\overline{P_5}$, a contradiction.
Hence, assume that none of the vertices in $S \setminus (N(u^*) \cap S_i)$
distinguishes two nonadjacent vertices in $N(u^*) \cap S_i$.
If $z \in U$, with $z$ being nonadjacent to $x$ and adjacent to $y$, then by the minimality
of $N(u^*)$
there is a vertex $s \in N(z)$ that is not adjacent to $u^*$. Since $N(z) \subseteq S$, vertex $s$
does not distinguish $x$ and $y$. But then $x, u^*, y, z, s$ induce either a $P_5$ (if $s$ is adjacent
neither to $x$ nor to $y$) or a $\overline{P_5}$ (if $s$ is adjacent to both $x$ and $y$), a contradiction.
\vskip1ex
\textit{Case 2}: there are no trivial components in $G \setminus S$, i.e. $k = r$.
First, observe that $|S_0| \leq 1$, since $G$ is prime and no vertex outside of $S_0$ distinguishes $S_0$
(which follows from the definition of $S_0$, Claim 2 and the fact that $k = r$). Further,
Claims 2 and 3 imply that for each $i \in [r]$ no vertex in $V \setminus S_i$ distinguishes two
nonadjacent vertices in $S_i$. Therefore, applying Proposition~\ref{st:distNonadj} we conclude that
$S_i$ is a clique. Hence $S = \bigcup_{i=0}^{r} S_i$ is a clique separator in $G$. \end{proof}
\begin{lemma}\label{lem:perm}
Let $G$ be a $(P_5, \overline{P_5})$-free prime graph containing an induced copy of $2K_2$.
Then $G$ contains a permissible antisimplicial vertex. \end{lemma} \begin{proof}
By Lemma \ref{lem:atom} graph $G$ has a clique separator, and therefore it also
has a minimal clique separator $S$.
Let $C_1, \ldots, C_k$, $k \geq 2$, be connected components of $G-S$, and $U_i = V(C_i)$,
$i = 1, \ldots, k$.
Since $S$ is a minimal separator, every vertex in $S$ has at least one neighbour in each of the sets
$U_1, \ldots, U_k$.
By Claim 1 in the proof of Lemma~\ref{lem:atom}, any vertex in $S$ distinguishes at most one of
the sets $U_1, \ldots, U_k$, and therefore, the set $S$
partitions into subsets $S_0, S_1 \ldots, S_k$, where the vertices of $S_0$
dominate every member of $\{ U_1, \ldots, U_k \}$, and for each $i \in [k]$ the vertices
of $S_i$ distinguish $U_i$ and dominate $U_j$ for all $j$ different from $i$.
If $S_0 \neq \emptyset$, then any vertex in $S_0$ is adjacent to all the other vertices in the graph,
and therefore it is permissible and antisimplicial. Hence, without loss of generality, assume that
$S_0 = \emptyset$ and $S_1 \neq \emptyset$.
Let $s$ be a vertex in $S_1$ with a maximal (under inclusion) neighbourhood in $U_1$.
We will show that $s$ is antisimplicial and permissible.
Suppose that the graph induced by the antineighbourhood of $s$ contains a
connected component $C$ with at least two vertices. Since $G$ is prime, by Observation~\ref{obs:distAdj} it must
contain a vertex $p$ outside of $C$ distinguishing two adjacent vertices $q$ and $t$ in $C$.
Then $p$ does not belong to $N(s) \cap U_1$, since otherwise $q, t, p, s$ together with any
vertex in $U_2$ would induce a $P_5$. Therefore, $p$ belongs to $S_1$.
Since the set $N(s) \cap U_1$ is maximal, it contains a vertex $y$ nonadjacent to $p$.
But now $t, q, p, s, y$ induce either a $P_5$ or its complement, as $y$ does not distinguish
$q$ and $t$.
This contradiction shows that every component in the graph induced by the antineighbourhood
of $s$ is trivial, i.e. $s$ is antisimplicial.
Assume now that $s$ is not permissible, i.e. there exists an independent dominating set $I$ in
$G - s$ that does not contain a neighbour of $s$. Since $s$ dominates $U_2 \cup \ldots \cup U_k$,
the set $I$ is a subset of $U_1 \setminus N(s)$. But then $I$ is not dominating, since no vertex of $U_2$ has a neighbour in $I$, This contradiction completes the proof of the lemma. \end{proof}
\begin{lemma}\label{lemm:anti}
The class of prime $(P_5,\overline{P_5})$-free graphs has the antineighborhood
property. \end{lemma} \begin{proof}
Let ${\cal F}$ be the class of $(2K_2,\overline{P_5})$-free graphs (this is a subclass of
$(P_5,\overline{P_5})$-free graphs, since $2K_2$ is an induced subgraph of $P_5$).
Clearly, graphs in ${\cal F}$ can be recognized in polynomial time.
Moreover, the WID problem can be solved in polynomial time for graphs in ${\cal F}$,
because the problem is polynomially solvable on $2K_2$-free graphs (according to \cite{BY},
these graphs have polynomially many maximal independent sets).
If a prime $(P_5,\overline{P_5})$-free graph $G=(V,E)$ does not belong to ${\cal F}$, then
by Lemma \ref{lem:perm} it contains a permissible vertex $v$ whose antineighbourhood is
an independent set, and therefore, $G - N(v) \in {\cal F}$.
It remains to check that a permissible antisimplicial vertex in $G$ can be found in polynomial time.
It follows from the proof of Lemma~\ref{lem:perm} that in a minimal clique separator of $G$
any vertex with a maximal neighbourhood is permissible and antisimplicial. A minimal
clique separator in a graph can be found in polynomial time \cite{Whitesides1981}, and therefore
the desired vertex can also be computed efficiently. \end{proof}
Now the main result of the section follows from Theorem~\ref{theo:decomposition} and Lemma~\ref{lemm:anti}.
\begin{theorem}\label{thm:house}
The WID problem is polynomial-time solvable in the class of $(P_5,\overline{P_5})$-free graphs. \end{theorem}
\section{WID in $(P_5, \overline{P_3+P_2)}$-free graphs} \label{sec:new}
To solve the problem for $(P_5, \overline{P_3+P_2)}$-free graphs, let us introduce the following notation: for an arbitrary graph $F$, we denote by $F^*$ the graph obtained from $F$ by adding three new vertices, say $b,c,d$, such that $b$ dominates (adjacent to each vertex of) $F$, while $c$ is adjacent to $b$ and $d$ only (see Figure~\ref{fig:coP5star} for an illustration in the case $F=\overline{P}_5$). The importance of this notation is due to the following result proved in \cite{LozMosPur2015}. \begin{theorem}\label{th:P5Fstar}
Let $F$ be any connected graph. If the WID problem can be solved in polynomial time for
$(P_5,F)$-free graphs, then this problem can also be solved in polynomial time for $(P_5, F^*)$-free
graphs. \end{theorem}
This result together with Theorem~\ref{thm:house} leads to the following conclusion. \begin{corollary}\label{cor:star} The WID problem is polynomial-time solvable in the class of $(P_5,\overline{P_5}^*)$-free graphs. \end{corollary}
To solve the problem for $(P_5, \overline{P_3+P_2)}$-free graphs, in this section we reduce it to $(P_5, \overline{P_3+P_2}, \compPfiveStar)$-free graphs, where the problem is solvable in polynomial time by Corollary~\ref{cor:star}.
Let $G$ be a $(P_5, \overline{P_3+P_2)}$-free graph containing a copy of $\overline{P_5}^*$ induced by vertices $a_1, a_2, a_3, a_4, a_5, b, c, d$, as shown in Figure~\ref{fig:coP5star}. \begin{figure}
\caption{The graph $\overline{P_5}^*$}
\label{fig:coP5star}
\end{figure}
\noindent Denote by $U$ the set of vertices in $G$ that have at least one neighbour in $\{ a_1, a_2, a_3, a_4, a_5\}$, that is, $U = N(a_1) \cup \ldots \cup N(a_5)$. In particular, $\{ a_1, a_2, a_3, a_4, a_5,b\}$ is a subset of $U$. We assume that \begin{itemize} \item[(**)] the copy of $\overline{P_5}^*$ in $G$ is chosen in such a way that $U$ has the minimum number of elements. \end{itemize} Now we prove several auxiliary results about the structure of $G$.
\begin{proposition}\label{prop:U2sep}
If a vertex $x \in U$ has a neighbour $y$ outside of $U$, then $x$ is adjacent to each of the vertices
$a_1, a_2, a_3, a_4$. \end{proposition} \begin{proof}
Let $A = \{ a_1, a_2, a_3, a_4 \}$.
Note that if $x$ is adjacent to $a_5$, then it must be adjacent to at least one vertex in $A$,
since otherwise a forbidden $P_5$ arises.
If $x$ is adjacent to exactly one or to exactly two adjacent vertices in $A$, then $\{ x,y \} \cup A$
induces a subgraph containing a forbidden $P_5$.
If $x$ is adjacent to exactly two non-adjacent vertices in $A$, say $a_1$ and $a_3$, then $x$
must be adjacent to $a_5$, since otherwise $y,x,a_3,a_2,a_5$ induce a $P_5$. But this is
impossible, since in this case $x,a_1,a_2,a_3,a_5$ induce a $\overline{P_3+P_2}$.
Finally, if $x$ has exactly three neighbours in $A$, then $\{x\} \cup A$ induces a forbidden
$\overline{P_3+P_2}$. Therefore, $x$ must be adjacent to every vertex in $A$. \end{proof}
\noindent Taking into account Proposition~\ref{prop:U2sep}, we partition the set $U$ into three subsets as follows:
\begin{itemize}
\item[$U_1$] consists of the vertices of $U$ that are adjacent to each of the vertices $a_1, a_2, a_3, a_4$,
and have at least one neighbour outside of $U$;
\item[$U_2$] consists of the vertices of $U$ that are adjacent to each of the vertices $a_1, a_2, a_3, a_4$,
but have no neighbours outside of $U$;
\item[$U_3$]$= U \setminus (U_1 \cup U_2)$. \end{itemize} Notice that $U_1$ is non-empty as it contains $b$. Also $\{ a_1, a_2, a_3, a_4, a_5 \} \subseteq U_3$, and no vertex in $U_3$ has a neighbour outside of $U$.
\begin{proposition}\label{prop:U2clique}
$U_1$ is a clique in $G$. \end{proposition} \begin{proof}
Suppose to the contrary that $U_1$ contains two non-adjacent vertices $x_1$ and $x_2$. Also, let $y_1$ and $y_2$ be neighbours of $x_1$ and $x_2$ outside of $U$,
respectively. Vertex $y_1$ is not adjacent to $x_2$, since otherwise $x_1,x_2,a_1,a_2,y_1$
induce a $\overline{P_3+P_2}$. Similarly, $y_2$ is not adjacent to $x_1$. Hence $y_1 \neq y_2$,
and therefore, to avoid a copy of $P_5$ induced by $y_1,x_1,a_1,x_2,y_2$, vertices $y_1$ and
$y_2$ must be adjacent. For the same reason, $a_5$ should be adjacent to both $x_1$ and $x_2$.
But then $x_1,x_2,a_3,a_4,a_5$ induce a copy of the forbidden $\overline{P_3+P_2}$, a contradiction. \end{proof}
\begin{proposition}\label{prop:coP5starFree}
The graph $G[U_2 \cup U_3]$ is $\overline{P_5}^*$-free. \end{proposition} \begin{proof}
Suppose to the contrary that $G[U_2 \cup U_3]$ contains vertices $a_1',a_2',a_3',a_4',a_5',b',c',d'$
inducing a $\overline{P_5}^*$ (similarly to Figure~\ref{fig:coP5star}).
Since no vertex in $U_2 \cup U_3$ has a neighbour outside of $U$ in $G$, and $c',d'$ are not adjacent
to any of the vertices $a_1',a_2',a_3',a_4',a_5'$, we conclude that
$|N(a_1') \cup \ldots \cup N(a_5')| \leq |U|-2$, which contradicts the minimality of $|U|$. \end{proof}
Now we describe a reduction from the graph $G$ with a weight function $w$ to a graph $G'$
with a weight function $w'$, where $|V(G')| \leq |V(G)|-4$, $G'$ is $(P_5, \overline{P_3+P_2)}$-free, and $id_w(G) = id_{w'}(G')$. First, we define $G'$ as the graph obtained from $G$ by \begin{enumerate}
\item removing the vertices of $U_3$;
\item adding edges between any two non-adjacent vertices in $U_1 \cup U_2$;
\item adding a new vertex $u$ adjacent to every vertex in $U_1 \cup U_2$. \end{enumerate}
Clearly, $|V(G')| \leq |V(G)|-4$, as the set $U_3$ of the removed vertices contains at least 5 elements and we add exactly one new vertex $u$. In the next proposition, we show that the above reduction does not produce any of the forbidden subgraphs.
\begin{proposition}\label{prop:GprimeForb}
The graph $G'$ is $(P_5, \overline{P_3+P_2)}$-free. \end{proposition} \begin{proof}
Note that the graph $G' - (U_2 \cup \{u\})$ is isomorphic to $G - (U_2 \cup U_3)$, and therefore it contains
no $P_5$ or $\overline{P_3+P_2}$ as an induced subgraph.
Hence, if $G'$ contains a forbidden subgraph, then at least one of the vertices of this subgraph
should lie in $U_2 \cup \{ u \}$.
By construction of $G'$ and the definition of $U_2$, the set $U_2 \cup \{ u \}$ is a clique, and every vertex
in this set is simplicial in $G'$.
Therefore, no vertex of $U_2 \cup \{ u \}$ can be a part of an induced copy of $\overline{P_3+P_2}$.
Also, $U_2 \cup \{ u \}$ can contain at most one vertex of an induced copy of $P_5$,
and if $U_2 \cup \{ u \}$ contains such a vertex, it must be a degree-one vertex of the $P_5$.
Suppose to the contrary that $G'$ contains a copy of $P_5$ induced by $v_1,v_2,v_3,v_4,v_5$
with $v_1 \in U_2 \cup \{ u \}$ and $\{ v_2,v_3,v_4,v_5 \} \subseteq V(G') \setminus (U_2 \cup \{ u \})$.
But then $a_1,v_2,v_3,v_4,v_5$ induce a forbidden $P_5$ in $G$, a contradiction. \end{proof}
\noindent Now we define a weight function $w'$ on the vertex set of $G'$ as follows: \begin{enumerate}
\item $w'(x) = w(x)$, for every $x \in V(G') \setminus (\{ u \} \cup U_1 \cup U_2)$;
\item $w'(u) = id_w(G[U_3])$;
\item $w'(x) = w(x) + id_w(G[U \setminus N[x]])$, for every $x \in U_1$;
\item $w'(x) = w(x) + id_w(G[U \setminus (U_1 \cup N[x])])$, for every $x \in U_2$. \end{enumerate}
\begin{lemma}\label{lem:polyGprime}
Given a weighted graph $(G,w)$, the weighted graph $(G',w')$ can be constructed in polynomial time. \end{lemma} \begin{proof}
To construct $G'$ we need to find in $G$ an induced copy of $\overline{P_5}^*$ that minimizes $|U|$. Clearly, this can be done in polynomial time.
To show that $w'$ can be computed in polynomial time we observe that each of the graphs
$G[U_3]$,
$G[U \setminus (U_1 \cup N[x])]$ for $x \in U_2$, and
$G[U \setminus N[x]]$ for $x \in U_1$ is an induced subgraph of $G[U_2 \cup U_3]$.
This observation together with Proposition~\ref{prop:coP5starFree} and Corollary~\ref{cor:star}
imply the desired conclusion and finish the proof of the lemma. \end{proof}
Now let us show that $id_w(G) = id_w'(G)$. For this, we will need two auxiliary propositions.
\begin{proposition}\label{prop:U3}
Any independent dominating set in $G[U_3]$ dominates $U_1 \cup U_2$. \end{proposition} \begin{proof}
Let $A = \{ a_1, a_2, a_3, a_4 \}$, and
let $I$ be an independent dominating set in $G[U_3]$.
If $I$ contains at least one of the vertices from $A$, then $I$ dominates $U_1 \cup U_2$,
so we assume that $I \subseteq U_3 \setminus A$. Note that a vertex $x \in U_3 \setminus A$
has at most two neighbours in $A$. Indeed, $x$ cannot have four neighbours by the definition of $U_3$,
and it cannot have three neighbours, since otherwise $\{x\} \cup A$ induces a forbidden
$\overline{P_3+P_2}$.
Now, if $I$ contains a vertex $x \in U_3 \setminus A$ that is adjacent to
$a_1$ and $a_3$, then $I$ dominates $U_1 \cup U_2$, since otherwise $x$ together with
$a_1, a_2, a_3$ and a non-neighbour of $x$ in $U_1 \cup U_2$ induce a forbidden
$\overline{P_3+P_2}$.
Assume that $I$ contains none of the above vertices. Then there exist vertices
$x,y \in I$ such that $x$ is adjacent to $a_1$ and non-adjacent to $a_3$, and $y$
is adjacent to $a_3$ and non-adjacent to $a_1$. If $I$ does not dominate $U_1 \cup U_2$,
then there exists a vertex $z \in U_1 \cup U_2$ that is adjacent neither to $x$ nor to $y$.
But then $x,a_1, z, a_3, y$ induce a forbidden $P_5$.
\end{proof}
\begin{proposition}\label{prop:U1}
For every vertex $x \in U_2$, any independent dominating set in the graph $G - U$ dominates
$U_1 \setminus N(x)$. \end{proposition} \begin{proof}
Suppose to the contrary that there exists an independent dominating set $I$ in the graph $G-U$
that does not dominate a vertex $y \in U_1 \setminus N(x)$.
By the definition of $U_1$, vertex $y$ has a neighbour $z$ in $V(G) \setminus U$.
Since $I$ is dominating in $G-U$, there exists a vertex $v \in I$ that is adjacent to $z$.
But then $v,z,y,a_1,x$ induce a forbidden $P_5$, a contradiction. \end{proof}
\begin{lemma}\label{lem:weight}
For any weighted graph $(G,w)$, we have $id_w(G) = id_{w'}(G')$. \end{lemma} \begin{proof}
First, we show that $id_w(G) \geq id_{w'}(G')$.
Let $I$ be an independent dominating set of the minimum weight in $G$. We distinguish between
the following three cases:
\begin{enumerate}
\item $I \cap U_1 \neq \emptyset$. \\
By Propositions~\ref{prop:U2sep} and \ref{prop:U2clique}, the set $U_1$ is a clique separating
$V(G) \setminus U$ from $U \setminus U_1$. Therefore, $I$ has only one element in $U_1$,
say $x$, and:
$$
id_w(G) = w(x) + id_w(G[U \setminus N[x]]) + id_w(G - (U \cup N[x])).
$$
Consequently
$$
id_w(G) = w'(x) + id_{w'}(G' - N[x]) \geq id_{w'}(G').
$$
\item $I \cap U_1 = \emptyset$ and $I \cap U_2 \neq \emptyset$. \\
Let $x \in I \cap U_2$. Then using Proposition~\ref{prop:U1}
$$
id_w(G) = w(x) + id_w(G[U \setminus (U_1 \cup N[x])]) + id_w(G - U) =
w'(x) + id_{w'}(G' - N[x]) \geq id_{w'}(G').
$$
\item $I \cap (U_2 \cup U_1) = \emptyset$. \\
In this case, taking into account Proposition \ref{prop:U3}, we conclude that
$$
id_w(G) = id_w(G[U_3]) + id_w(G - U) = w'(u) + id_{w'}(G' - N[u]) \geq id_{w'}(G').
$$
\end{enumerate}
Let us now prove the reverse inequality $id_w(G) \leq id_{w'}(G')$.
Let $I$ be an independent dominating set of the minimum weight in $G'$. Since $u$ does not have
neighbours outside of $U_1 \cup U_2$, and $\{ u \} \cup U_1 \cup U_2$ is a clique in $G'$, the set $I$
has exactly one element in $\{ u \} \cup U_1 \cup U_2$, which we denote by $x$.
Similarly to the first part of the proof, we consider three cases:
\begin{enumerate}
\item $x \in U_1$. \\
In this case
$$
id_{w'}(G') = w'(x) + id_{w'}(G' - N[x]) = w(x) + id_{w}(G[U \setminus N[x]]) +
id_w(G - (U \cup N[x])) \geq id_w(G).
$$
\item $x \in U_2$.\\
In this case, by Proposition~\ref{prop:U1},
$$
id_{w'}(G') = w'(x) + id_{w'}(G'-N[x]) =
w(x) + id_{w}(G[U \setminus (U_1 \cup N[x]) ]) + id_w(G-U) \geq id_w(G).
$$
\item $x = u$.\\
In this case, by Proposition~\ref{prop:U3},
$$
id_{w'}(G') = w'(x) + id_{w'}(G'-N[x]) = id_w(G[U_3]) + id_w(G-U) \geq id_w(G).
$$
\end{enumerate} \end{proof}
\noindent Now we are ready to prove the main result of this section.
\begin{theorem}
The WID problem is solvable in polynomial time for $(P_5, \overline{P_3+P_2)}$-free graphs. \end{theorem} \begin{proof}
Let $(G,w)$ be an $n$-vertex $(P_5, \overline{P_3+P_2)}$-free weighted graph.
If $G$ contains an induced copy of $\overline{P_5}^*$, then by Proposition~\ref{prop:GprimeForb}, and
Lemmas~\ref{lem:polyGprime} and~\ref{lem:weight}, the graph $(G,w)$ can be transformed in polynomial
time into a $(P_5, \overline{P_3+P_2)}$-free weighted graph $(G',w')$ with at most $n-4$ vertices such that
$id_w(G) = id_{w'}(G')$.
Repeating this procedure at most $\lfloor n/4 \rfloor$ times we obtain a $(P_5, \overline{P_3+P_2}, \compPfiveStar)$-free weighted
graph $(H,\sigma)$ such that $id_w(G) = id_{\sigma}(H)$.
By Corollary~\ref{cor:star} the WID problem for $(H,\sigma)$ can be solved in polynomial time.
Finally, it is not difficult to see that a polynomial-time procedure computing $id_w(G)$ can be easily transformed into a polynomial-time algorithm finding an independent dominating set of weight $id_w(G)$. \end{proof}
\section{Concluding remarks and open problems}
In this paper, we proved that \textsc{weighted independent domination} can be solved in polynomial time for $(P_5,\overline{P}_5)$-free graphs and $(P_5, \overline{P_3+P_2)}$-free graphs. A natural question to ask is whether these results can be extended to a class defined by one forbidden induced subgraph.
From the results in \cite{BL03} it follows that in the case of one forbidden induced subgraph $H$ the problem is solvable in polynomial time {\it only if} $H$ is a linear forest, i.e. a graph every connected component of which is a path. On the other hand, it is known that this necessary condition is not sufficient, since {\sc independent domination} is NP-hard in the class of $2P_3$-free graphs. This follows from the fact that all sat-graphs are $2P_3$-free \cite{Zverovich06}.
In the case of a {\it disconnected} forbidden graph $H$, polynomial-time algorithms to solve {\sc weighted independent domination} are known only for $mP_2$-free graphs for any fixed value of $m$. This follows from a polynomial bound on the number of maximal independent sets in these graphs \cite{BY}. The unweighted version of the problem can also be solved for $P_2+P_3$-free graphs \cite{LozMosPur2015}. However, for weighted graphs in this class the complexity status of the problem is unknown.
\begin{problem} Determine the complexity status of {\sc weighted independent domination} in the class of $P_2+P_3$-free graphs. \end{problem}
In the case of a {\it connected} forbidden graph $H$, i.e. in the case when $H=P_k$, the complexity status is known for $k\ge 7$ (as $P_7$ contains a $2P_3$) and for $k\le 4$ (as $P_4$-free graphs are precisely the cographs). Therefore, the only open cases are $P_5$-free and $P_6$-free graphs. As we mentioned in the introduction, the related problem of finding a maximum weight independent set (WIS) has been recently solved for $P_5$-free graphs \cite{P5}. This result makes the class of $P_5$-free graphs of particular interest for {\sc weighted independent domination} and we formally state it as an open problem.
\begin{problem} Determine the complexity status of {\sc weighted independent domination} in the class of $P_5$-free graphs. \end{problem}
We also mentioned earlier that a polynomial-time solution for WIS in a hereditary class $\cal X$ does not necessarily imply the same conclusion for WID in $\cal X$. However, in the reverse direction such examples are not known. We believe that such examples do not exist and propose this idea as a conjecture.
\begin{conjecture} If WID admits a polynomial-time solution in a hereditary class $\cal X$, then so does WIS. \end{conjecture}
\end{document} |
\begin{document}
\allowdisplaybreaks
\newcommand{1909.13211}{1909.13211}
\renewcommand{\arabic{footnote}}{}
\renewcommand{009}{009}
\FirstPageHeading
\ShortArticleName{New Examples of Irreducible Local Diffusion of Hyperbolic PDE's}
\ArticleName{New Examples of Irreducible Local Diffusion\\ of Hyperbolic PDE's\footnote{This paper is a~contribution to the Special Issue on Algebra, Topology, and Dynamics in Interaction in honor of Dmitry Fuchs. The full collection is available at \href{https://www.emis.de/journals/SIGMA/Fuchs.html}{https://www.emis.de/journals/SIGMA/Fuchs.html}}}
\Author{Victor A. VASSILIEV~$^{\dag\ddag}$}
\AuthorNameForHeading{V.A.~Vassiliev}
\Address{$^\dag$~Steklov Mathematical Institute of Russian Academy of Sciences, Moscow, Russia} \EmailD{\href{mailto:vva@mi-ras.ru}{vva@mi-ras.ru}} \URLaddressD{\url{http://www.mi-ras.ru/~vva/}}
\Address{$^\ddag$~National Research University Higher School of Economics, Moscow, Russia}
\ArticleDates{Received September 29, 2019, in final form February 18, 2020; Published online February 24, 2020}
\Abstract{Local diffusion of strictly hyperbolic higher-order PDE's with constant coefficients at all {\em simple} singularities of corresponding wavefronts can be explained and recognized by only two local geometrical features of these wavefronts. We radically disprove the obvious conjecture extending this fact to arbitrary singularities: namely, we present examples of diffusion at all non-simple singularity classes of generic wavefronts in odd-dimensional spaces, which are not reducible to diffusion at simple singular points.}
\Keywords{wavefront; discriminant; critical point; morsification; vanishing cycle; hyperbolic PDE; fundamental solution; lacuna; sharp front; diffusion; Petrovskii condition}
\Classification{35L67; 58K05; 32-04}
\renewcommand{\arabic{footnote}}{\arabic{footnote}} \setcounter{footnote}{0}
\section{Introduction} This is a work in {\em lacuna theory} of hyperbolic PDE's initiated by I.G.~Petrovskii~\cite{Petrovskii45} and expanded significantly by J.~Leray~\cite{Leray}, M.~Atiyah, R.~Bott and L.~G{\aa}rding \cite{ABG70, ABG73, Gording77}, and others. However, the methods of the work belong mainly to singularity theory of differentiable functions.
There are two known sources of local diffusion (i.e., the local irregularity of continuations) of solutions of strictly hyperbolic partial differential equations with constant coefficients: non-singular points of wavefronts, at which the Davydova signature condition fails, and cuspidal edges at which the investigated local connected component of the complement of a wavefront is the ``bigger'' one. All cases of local diffusion of waves at {\em simple} (i.e., of classes $A_k$, $D_k$, $E_6$, $E_7$ or $E_8$) singular points of wavefronts of generic hyperbolic PDE's can be reduced to these two: the diffusion arises in such a component in a neighbourhood of a simple singularity if and only if the boundary of this component contains points of one of these two basic types. In particular, this is true for all generic hyperbolic PDE's in the $\leq 7$-dimensional spaces.
We show that for non-simple singular points of wavefronts in ${\mathbb R}^N$, $N$ odd, this is not more the case. Namely, we indicate local components of complements of generic wavefronts at their points of all ``parabolic'' singularity classes $P_8$, $X_9$, and $J_{10}$, such that solutions of the corresponding PDE's have diffusion at the most singular points of their boundaries only (i.e., at the points of these parabolic types), but are sharp at all simpler points. The singularities of these three classes occur close to all other non-simple singularities of generic wavefronts, therefore such additional examples of diffusion also occur at all of them.
The proofs use a program counting topological types of morsifications of critical points of real analytic functions.
\subsection{Hyperbolic PDE's of higher orders (after \cite{ABG70, ABG73}, see also \cite{GP,APLT})}
Let $F$ be a linear partial differential operator with constant coefficients: $F$ has the form of a~polynomial (generally with complex coefficients) in variables $\partial/\partial x_j$ where $x_j$ are the coordinates in~${\mathbb R}^N$. It is convenient to identify these variables $\partial/\partial x_j$ with coordinates~$\xi_j$ in dual space~$\check {\mathbb R}^N$ of our space ${\mathbb R}^N$, with pairing operation \begin{gather*} \langle (x_1, \dots, x_N),(\xi_1, \dots, \xi_N)\rangle =\sum_{j=1}^N x_j \xi_j. \end{gather*} The projectivization of this dual space is denoted by $\check {\mathbb R}P^{N-1}$; its $k$-dimensional subspaces correspond via the {\em projective duality} to $(N-k-2)$-dimensional subspaces in ${\mathbb R}P^{N-1}$.
Let $d$ be the degree of this polynomial $F$. The following {\em Cauchy problems} in the half-space ${\mathbb R}^N_+= \{x\,|\,x_1>0\}$ for this operator are considered. Given any regular function $\varphi$ in this half-space, and a collection of $d$ regular functions $\psi_k$, $k=0, 1, \dots, d-1,$ defined on its boundary hyperplane ${\mathbb R}^{N-1}_1 \equiv \{x_1 =0\}$ of this half-space, the task is to find a function $u$ in ${\mathbb R}^N_+$, such that $F(u) \equiv \varphi$ in ${\mathbb R}^N_+$, and $\frac{\partial^k}{\partial x_1^k} u \equiv \psi_k$ on hyperplane ${\mathbb R}^{N-1}_1$ for any $k=0, \dots, d-1$.
A {\em fundamental solution} of our operator $F$ is any distribution in ${\mathbb R}^N$ such that this operator applied to it is equal to the delta function at $0$. Operator $F$ is called {\em hyperbolic} (or sometimes {\em hyperbolic in the sense of Petrovskii}) if it admits a fundamental solution whose support belongs to a proper cone $S(F)$ in the half-space ${\mathbb R}^N_+$ with the vertex $0$ (i.e., this vertex is the unique intersection point of this cone with ${\mathbb R}^{N-1}_1$). Such a fundamental solution (if it exists) is uniquely determined by $F$; it is called {\em principal fundamental solution} of $F$ and is denoted by $E(F)$. It may be considered as a (generalized) wave caused by an instant pointwise perturbation and propagating in the future ($x_1>0$) part of our space-time only.
If $F$ is hyperbolic, then any Cauchy problem of the described form has a unique solution, which is regular and depends continuously on the data $\{\varphi; \psi_0, \dots, \psi_{d-1}\}$ of the problem; moreover, the value of this solution at any point $x \in {\mathbb R}^N_+$ depends only on the values of these data in the capsized cone $x-S(F)$ with the vertex at point $x$. Indeed, such a solution is provided by the convolution of $E(F)$ with the data of our problem.
The hyperbolicity property implies strong conditions on the {\em principal symbol} of $F$ (i.e., the homogeneous part $F_d$ of highest degree of polynomial $F$). First of all, if the operator $F$ is hyperbolic then this principal part is necessarily a real (up to a constant factor) polynomial; therefore studying solutions of the corresponding equation we can and will assume that it is just real. Further, the cone $A(F) \subset \check {\mathbb R}^N$ of zeros of this principal part $F_d$ should have exactly $d$ real intersection points (maybe counted with multiplicities) with any line in $\check {\mathbb R}^N$ parallel to the $\xi_1$ axis (i.e., to the line orthogonal to the hyperplane ${\mathbb R}^{N-1}_1$).
If this cone $A(F)$ is a non-singular manifold outside the origin in $\check {\mathbb R}^N$, then this condition is also sufficient for hyperbolicity of entire operator $F$. Otherwise some additional conditions on the lower terms of $F$ should be satisfied.
Hyperbolic operators with such smooth cones $A(F)$ are called {\em strictly hyperbolic}. They form a dense subset in the space of all hyperbolic operators. We consider in this work only strictly hyperbolic operators.
The principal fundamental solution of any hyperbolic operator is regular (i.e., locally coincides with appropriate non-singular analytic functions) everywhere outside the {\em wavefront} $W(F)$, which is a conic (i.e., invariant under positive dilations) closed semi-algebraic subvariety of positive codimension in ${\mathbb R}^N_+$; moreover the support of this principal fundamental solution lies in the convex hull of the wavefront.
Namely, the wavefront of a {\em strictly} hyperbolic operator consists of points $x \in {\mathbb R}^N_+$ such that their orthogonal hyperplanes in the space $\check {\mathbb R}^N$ are tangent to the cone $A(F)$. For general hyperbolic operators also some points corresponding to the hyperplanes ``not in general position'' with the singular set of $A(F)$ should be added.
Given a point $x \in W(F)$, the principal fundamental solution $E(F)$ is called {\em holomorphically sharp} in a local (close to $x$) connected component $C$ of the regularity domain ${\mathbb R}^N \setminus W(F)$ if there is a smooth analytic function in a neighbourhood of $x$ in ${\mathbb R}^N$ coinciding with $E(F)$ in the intersection of this neighbourhood and this component $C$. $E(F)$ is {\em $C^\infty$-sharp} in such a~component if it can be extended to a $C^\infty$-smooth function on the closure of this component.
\begin{Example} The wavefront of the standard wave operator \begin{gather*} \frac{\partial^2}{\partial x_1^2} - c^2\left(\frac{\partial^2}{\partial x_2^2} +\frac{\partial^2}{\partial x_3^2} \right) \end{gather*}
in ${\mathbb R}^3$ is the cone \begin{gather*}c^2x_1^2= x_2^2+x_3^2, \qquad x_1 \geq 0; \end{gather*}
the principal fundamental solution of this operator is sharp at all its points in the ``exterior'' component of the complement of $W(F)$, and is not in the interior one (growing there asymptotically as $\text{(distance from the wavefront)}^{-1/2}$ close to the regular points of this cone). On the contrary, principal fundamental solutions of wave operators in space-time of even dimension greater than~2 are sharp from both sides of the cone. \end{Example}
A local component of ${\mathbb R}^N \setminus W(F)$, in which the fundamental solution is sharp, is called a~(holomorphic or $C^\infty$-) {\em local lacuna}. The negation of sharpness is called a {\em diffusion} of waves.
\begin{Remark}[on terminology] Roughly, elementary wave~$E(F)$ is sharp at a point of its front, if its singularity behaves like a squall, not predictable by the behaviour of the regular part of the wave before meeting with it. Diffusion is an opposite situation: in this case the shock front spreads some signs of its occurrence around it. The word ``lacuna'' was originally applied by Petrovskii to the domains where the fundamental solution is just equal to zero; so it indicated also the areas in~${\mathbb R}^N_+$ and~${\mathbb R}^{N-1}_1$ such that the values of the data of Cauchy problem in them are not important for the behavior of the solution of the problem in a given point~$x$. The {\em local} lacunas were introduced in \cite{ABG70, ABG73}. \end{Remark}
Wavefronts can be singular along the rays in ${\mathbb R}^N_+$, which correspond by projective duality to {\it parabolic points} of the projectivization $A^*(F) \subset \check {\mathbb R}P^{N-1}$ of the cone $A(F) \subset \check {\mathbb R}^N$, i.e., the points at which the second fundamental form of this hypersurface in $\check {\mathbb R}P^{N-1}$ is degenerate.
\begin{figure}
\caption{Projectivized zero set and wavefront of a cubic operator in ${\mathbb R}^3$.}
\label{cubic}
\end{figure}
\begin{Example} The projectivization of the zero set of a non-singular homogeneous polynomial in $\check {\mathbb R}^3$ is a cubic curve in the projective plane $\check {\mathbb R}P^2$, which can consist either of two components as in Fig.~\ref{cubic} (left) or only the right-hand one of these components. The one-component case never is hyperbolic, and the two-component one is hyperbolic if the $\xi_1$-axis is directed inside the domain bounded by its left-hand component. The wavefront of the latter operator looks as a~cone in~${\mathbb R}^3_+$ whose image in ${\mathbb R}P^2$ is shown in Fig.~\ref{cubic} (right): its three singular points come from three inflection points of the cubic curve (one of which is in our picture placed at infinity). \end{Example}
\begin{figure}
\caption{Cuspidal edge and swallowtail.}
\label{swallowtail}
\end{figure}
The properties of sharpness and diffusion at the points of a wavefront are invariant under dilations of ${\mathbb R}^N$. Therefore we will consider projectivizations $W^*(F) \subset {\mathbb R}P^{N-1}$ of wavefronts and speak on the diffusion or sharpness in a local connected component of ${\mathbb R}P^{N-1} \setminus W^*(F)$ if it holds in the preimage of this component under the obvious projection ${\mathbb R}^N_+ \setminus W(F) \to {\mathbb R}P^{N-1} \setminus W^*(F)$. These properties are related with the local geometry of the projectivized wavefront $W^*(F)$, which by construction is the variety projective dual to the projectivization $A^*(F) \subset \check {\mathbb R}P^{N-1}$ of the cone~$A(F)$. Two basic types of points of wavefronts spreading the diffusion in such components are described in the following two subsections.
A classification of singular points of projectivized wavefronts of strictly hyperbolic operators (and, more generally, of hypersurfaces projective dual to smooth ones) coincides with a classification of parabolic points of smooth hypersurfaces, and hence with a classification of critical points of smooth functions, see, e.g.,~\cite{AVG82}. The relation between them is provided by {\em generating functions} of these fronts, see Section~\ref{go} below. In particular, non-singular points of wavefronts correspond to non-parabolic points of $A^*(F)$ and to Morse critical points of generating functions, while the simplest standard singularities, {\em semicubic cuspidal edges} and {\em swallowtails}, shown correspondingly in the left and the right parts of Fig.~\ref{swallowtail}, are related with critical points of types~$A_2$ and~$A_3$. For the surfaces in ${\mathbb R}P^{N-1}$ with arbitrary $N\geq 4$, these pictures should be multiplied by a piece of ${\mathbb R}^{N-4}$. The singularities shown in Fig.~\ref{cubic} (right) for case $N=3$ have type~$A_2$: the cubic inflection points of curve $A^*(F)$ with local equation $\xi_0 = \xi_1^3 + O\big(\xi_1^4\big)$ in $\check {\mathbb R}P^2$ result in cusps of projective dual curve, which are locally ambient diffeomorphic to the semicubic parabola $\big\{x^3 + y^2 =0\big\}$; they appear as transversal slices of the picture of Fig.~\ref{swallowtail}.
\subsection{Davydova condition at non-singular points of a wavefront} \label{db}
A non-singular piece of a projectivized wavefront locally divides ${\mathbb R}P^{N-1}$ into two parts. Choose a normal direction to this wavefront $W^*(F)$ at a point of such a piece and consider the second fundamental form of the wavefront with respect to this direction. This quadratic form in $N-2$ variables is non-degenerate as the operator is strictly hyperbolic.
\begin{Definition}The {\em Davydova condition} \cite{Dav} is satisfied if the positive inertia index of this quadratic form is even. \end{Definition}
If this condition is not satisfied, then we surely have local diffusion in the component of the complement of the wavefront, indicated by the chosen normal direction (see \cite{Dav}). Moreover, the analytic continuation of the fundamental solution from this component to a neighbourhood of such a point of the wavefront in ${\mathbb C}^N$ has a two-fold ramification along the (complexified) wavefront. Conversely, if the Davydova condition holds, then the wavefront is sharp in this local component: this was proved in \cite{Bor} by analytic estimates, and explained in \cite{ABG73} in terms of monodromy and removable singularities.
\subsection{Cuspidal edges and generating functions} \label{go}
The simplest singularities of a wavefront in ${\mathbb R}P^{N-1}$ are its {\em cuspidal edges}, at whose points it is locally ambient diffeomorphic to the product of the semicubical parabola and ${\mathbb R}^{N-3}$, see Figs. \ref{cubic} (right) and \ref{swallowtail} (left). Space ${\mathbb R}P^{N-1}$ is locally divided by such a hypersurface into two parts, one of which is ``bigger'', and the other one ``smaller''. According to \cite{Gording77}, the principal fundamental solution is never sharp in the bigger component, but can be sharp in the smaller one in some additional conditions. To describe these conditions, we need the following notion of generating functions of wavefronts.
Let $X\in {\mathbb R}P^{N-1}$ be a point of a projectivized wavefront. The corresponding hyperplane in~$\check {\mathbb R}P^{N-1}$ (i.e., projectivization of the hyperplane in~$\check {\mathbb R}^N$ orthogonal to the line $\{X\} \subset {\mathbb R}^N$) is tangent to projectivized zero set $A^*(F) $ of the principal symbol of $F$ at a point $\xi$. Choose affine co\-or\-di\-nates $(\xi_0, \dots, \xi_{N-2})$ in $\check {\mathbb R}P^{N-1}$ with the origin in this point in such a way that this tangent plane is defined by equation $\xi_0=0$. Hypersurface~$A^*(F)$ can be then specified close to~$\xi$ by an equation of the form \begin{gather}\label{genfun} \xi_0=f(\xi_1, \dots, \xi_{N-2}), \end{gather} where $f(0)=0$, ${\rm d}f(0)=0.$ Variables $\xi_1, \dots, \xi_{N-2}$ form a system of local coordinates on $A^*(F)$. The projective duality map $A^*(F) \to {\mathbb R}P^{N-1}$, and also the germ of its image at~$X$, are then determined by our function~$f$: for any point of the variety $A^*(F)$ we draw the tangent hyperplane at this point, and mark the corresponding point in~${\mathbb R}P^{N-1}$.
\begin{Definition}\label{genf}The function germ~$f$ defined in this way is called a {\em projective generating function} of the wavefront at point $X \in {\mathbb R}P^{N-1}$ corresponding to the hyperplane $\{\xi_0=0\} \subset \check {\mathbb R}P^{N-1}$. \end{Definition}
It follows easily from this definition that any two generating functions of the same germ of the wavefront, defined by different choices of affine coordinates $\xi_i$, can be obtained one from the other by a diffeomorphism of space of arguments and a dilation of the target line by a non-zero (maybe negative) coefficient.
\begin{Remark} If occasionally the hyperplane corresponding to $X$ is tangent to hypersur\-fa\-ce~$A^*(F)$ at several its points, then such a map is defined in a neighbourhood of any of these points; the germ of the wavefront at point~$X$ will then consist of several locally irreducible branches corresponding to all these tangency points and described by their projective generating functions. \end{Remark}
If generating function $f$ is Morse, then the corresponding piece of the wavefront is smooth (and was considered in the previous subsection). A cuspidal edge of the wavefront occurs when~$f$ has the simplest non-Morse singularity of type~$A_2$, see, e.g., \cite{ALGV, AVG82}. According to L.~G{\aa}rding~\cite{Gording77}, a wavefront is sharp at its cuspidal edge if and only if dimension~$N$ is odd, the inertia indices of the quadratic part of the function germ $f$ are even, and the investigated component of the complement of the wavefront is the ``smaller'' one. (The rank of the quadratic part of a function of this class depending on $N-2$ variables is equal to $N-3$, hence in the case of odd $N$ both inertia indices are of the same parity.) For example, these conditions are satisfied for the interior component in Fig.~\ref{cubic} (right).
\begin{Remark}\looseness=1 The part ``only if'' of this G{\aa}rding's result claims that diffusion at cuspidal edges appears in some seven cases, depending on the parities of~$N$ and inertia indices of the quadratic part of~$f$, and on the choice of a component. In some six out of these seven cases, diffusion follows already from the Davydova's obstruction: we can indicate a smooth piece of wavefront approaching the cuspidal edge, such that sharpness in our component fails at the points of this piece. The only exceptional case is that of odd~$N$ and odd inertia indices of~$f$: in this case the wavefront is sharp for the ``bigger'' component at all neighboring smooth points of its surface, however regularity of the fundamental solution fails when we approach its singular stratum. \end{Remark}
\subsection{Other simple singularities}
The most common equivalence relation of singularity classes of germs of functions $\big({\mathbb R}^n,0\big) \to {\mathbb R}$ is provided by the group $\operatorname{Diff}_0$ of local diffeomorphisms $\big({\mathbb R}^n, 0\big) \to \big({\mathbb R}^n,0\big)$ acting on functions by composition with these diffeomorphisms. The space of function germs splits into the orbits of this action. This splitting is not discrete: some singularity classes can depend on continuous parameters separating continuously many orbits of this action.
\looseness=1 A natural primary segment of the classification of singularities of smooth functions (and hence also of wavefronts) is formed by so-called {\em simple singularities} $A_k$, $D_k$, $E_6$, $E_7$ or~$E_8$, see~\cite{AVG82}. For this part of the classification the splitting into the $\operatorname{Diff}_0$-orbits is still discrete: by definition these are the singularities such that a neighbourhood of any of them in the function space is covered by representatives of only finitely many orbits of this action. The name ``simple'' and canonical notation of these classes come from a deep and multiform relation of these classes with the simple Lie algebras. All local lacunas close to singular points of wavefronts of these types were counted in~\cite{Vassiliev86} (except for cases~$A_2$ and~$A_3$ of cuspidal edges and swallowtails, which were in detail described by L.~G{\aa}rding~\cite{Gording77}). In~\cite{Vassiliev92} an easy geometric characterization of these lacunas was found: it turned out that all cases of the diffusion close to any simple singular points of generic wavefronts can be reduced to the two cases described above. Namely, the following theorem was proved there (the technical notion of projective versality used in it and satisfied for generic wavefronts will be defined in Section~\ref{prove}).
\begin{Theorem}[see \cite{Vassiliev92}] \label{v92} If projectivized wavefront $W^*(F)$ of a strictly hyperbolic operator $F$ has a simple singularity at its point $X$ and is {\em projective versal} at this point, then the principal fundamental solution of $F$ is sharp at point $X$ in a certain local connected component of the complement of $W^*(F)$ if and only if \begin{enumerate}\itemsep=0pt \item[$1)$] the Davydova condition is satisfied at all smooth point of $W^*(F)$ in the boundary of this component, and \item[$2)$]this boundary does not contain the cuspidal edges of $W^*(F)$, close to which our component is the ``bigger'' one. \end{enumerate} \end{Theorem}
\begin{Example}[see~\cite{Gording77}] Close to a swallowtail (see Fig.~\ref{swallowtail}), only the following components of the complement of the wavefront are local lacunas: component~2 if $N$ is odd and the positive inertia index $i_+$ of the quadratic part of the generating function is even; component~3 if $i_+$ is odd and $N$ is arbitrary. \end{Example}
\begin{Remark}\quad \begin{enumerate}\itemsep=0pt \item If $N$ is even, then condition 2) in this theorem is unnecessary: if it is not satisfied at an edge point, then condition 1) also fails at some neighboring smooth points of the wavefront. \item The restrictions from Sections~\ref{db} and~\ref{go} prove the part ``only if'' of this theorem (which holds also for not necessarily projective versal wavefronts); a proof of the part ``if'' is based on the properness of the so-called Lyashko--Looijenga maps of miniversal deformations of simple singularities, see \cite{Loo,Loor}. \end{enumerate} \end{Remark}
A natural question arising from this theorem (and formulated explicitly by V.P.~Pa\-la\-modov about 1991, see~\cite{Vassiliev92}) asks whether it can be extended to all singularity classes of wavefronts of strictly hyperbolic operators. We show below that the answer is negative in the case of odd~$N$ for {\em all} non-simple singularity classes.
\subsection{Generating families of wavefronts and versality}\label{prove}
The projectivized wavefront $W^*(F) \subset {\mathbb R}P^{N-1}$ can be considered close to any its point $X$ as the real discriminant variety of a deformation of the corresponding generating function~(\ref{genfun}). Namely, this deformation depends on the $(N-1)$-dimensional parameter ${\bf x} =(x_0, x_1, \dots, x_{N-2})$ and consists of functions $f_{\bf x}$ defined by formula \begin{gather} f_{\bf x} \equiv f(\xi_1, \dots, \xi_{N-2}) - x_0 - \sum_{j=1}^{N-2} x_j \xi_j . \label{prover} \end{gather} The parameters $x_0, \dots, x_{N-2}$ of this deformation form a coordinate system in an affine chart of space ${\mathbb R}P^{N-1}$ with the origin at the point $X$: this chart consists of hyperplanes in $\check {\mathbb R}P^{N-1}$ defined by equation \begin{gather}\label{exex} \xi_0 =x_0+ \sum_{j=1}^{N-2} x_j \xi_j \end{gather} in local coordinates $\xi_i$. Such a point ${\bf x}\in {\mathbb R}P^{N-1}$ belongs to the {\em real discriminant} of deformation~(\ref{prover}) (i.e., $f_{\bf x}$ has a real critical point close to $0$ with critical value $0$) if and only if the hyperplane in $\check {\mathbb R} P^{N-1}$ given by~(\ref{exex}) is tangent to~$A^*(F)$; so the real discriminant of (\ref{prover}) is exactly the variety projective dual to~$A^*(F)$.
We will consider the case when this deformation is sufficiently representative, namely is a~{\em versal deformation} of~$f$. Let us remind this notion (for an expanded description see~\cite{AVG82}).
Suppose we have a smooth function $\varphi(y)\colon {\mathbb R}^n \to {\mathbb R}$ with $d\varphi(0)=0$, and its deformation $\Phi(y, \lambda) \colon \big({\mathbb R}^n \times {\mathbb R}^l, 0\big) \to ({\mathbb R},0)$, i.e., a family of functions $\varphi_\lambda\colon {\mathbb R}^n \to {\mathbb R}$ depending on the parameter $\lambda \in {\mathbb R}^l$, $\varphi_0 \equiv \varphi$. This deformation is called {\em versal} if any other deformation $\Psi\colon \big({\mathbb R}^n \times {\mathbb R}^k ,0\big) \to ({\mathbb R},0) $ of the same function $\varphi$ can be reduced to it by an appropriate map of parameters and a family of local diffeomorphisms of the space ${\mathbb R}^n$ depending on these parameters: in formulas, there should be a smooth map $\theta\colon \big({\mathbb R}^k,0\big) \to \big({\mathbb R}^l,0\big)$ and a family of local (i.e., defined in a neighbourhood of the origin) diffeomorphisms $H_{\varkappa}\colon {\mathbb R}^n \to {\mathbb R}^n$ smoothly depending on the parameter $\varkappa \in {\mathbb R}^k$, $H_0 \equiv \mbox{\{the identity map\}}$, such that \begin{gather}\label{versdef} \Psi(y,\varkappa) = \Phi(H_{\varkappa}(y),\theta(\varkappa)) \ \mbox{ for any $y$ and $\varkappa$ sufficiently close to the origin} . \end{gather}
It is known that \begin{itemize}\itemsep=0pt \item For any function singularity $\varphi$ with finite Milnor number $\mu(\varphi)$, almost all its deformations depending on $\geq \mu(\varphi)$ parame\-ters are versal; \item such a singularity does not have versal deformations depending on less than $\mu(\varphi)$ parame\-ters (versal deformations depending on exactly $\mu(\varphi)$ parameters are called {\em miniversal}), \item if $\Phi$ is a versal deformation of $\varphi$ then for any sufficiently small perturbation $\tilde \varphi$ of our singularity $\varphi$ there is a point $\lambda \in {\mathbb R}^l$ close to the origin such that the functions $\tilde \varphi$ and $\varphi_\lambda$ can be transformed one into the other by a local diffeomorphism in~${\mathbb R}^n$; \item if $\Phi$ is a miniversal deformation of $\varphi$ (thus depending on $\mu(\varphi)$ parameters), and $\Psi$ an arbitrary versal deformation of~$\varphi$ depending on~$k$ parameters, then the corresponding map~$\theta$ in~(\ref{versdef}) is a~submersion close to the origin in ${\mathbb R}^k$, sending the discriminant of deformation~$\Psi$ to the discriminant of deformation $\Phi$. \end{itemize}
Similar notion and facts hold for singularities of holomorphic functions of complex variables.
\begin{Definition}Deformation (\ref{prover}) is called a (projective) generating family of the wave\-front~$W^*(F)$ at point~$X$. The hypersurface $A(F)$ is {\em projective versal} at its point $X$ if the corresponding deformation~(\ref{prover}) is versal, see~\cite{AVG82}. \end{Definition}
\begin{Example} Suppose that generating function $f$ has a singularity of type~$A_k$ at a point $\Xi \in \check{\mathbb R} P^{N-1}$. Then there is a smooth parametric curve $u\colon \big({\mathbb R}^1,0\big) \to \big({\mathbb R}^{N-2},0\big)$ such that the function $|{\rm grad}\, f|$ grows as the $k$th degree of the parameter along it. The linear hull of the first~$j$ derivatives of this curve at~$0$ is uniquely defined by this condition for any $j \leq k-1$. The hypersurface with equation $\xi_0 = f(\xi_1, \dots, \xi_{N-2})$ is projective versal at point~$\Xi$ if and only if all these first $k-1$ derivatives are linearly independent in~${\mathbb R}^{N-2}$. \end{Example}
The projective versality at simple singular points is a condition of general position, however for more complicated singularity classes, which split into continuous families of orbits of the group $\operatorname{Diff}_0$ of local changes of variables $\big({\mathbb R}^n,0\big) \to \big({\mathbb R}^n,0\big)$, this is generally not true. For instance, a generic hyperbolic operator of order $\geq 3$ in~${\mathbb R}^8$ can have singularities of type~$P_8$ (see~\cite{AVG82} and the next Section~\ref{parara}) in some discrete set of points of the projectivized wavefront; the generating family~(\ref{prover}) is in this case transversal to the entire stratum~$\{P_8\}$ in the space of function singularities, but not to its particular orbits, and hence is not versal.
\subsection{Parabolic singularities and main result}\label{parara}
Parabolic singularities of function germs (see \cite{AVG82}) form the next natural family of singularity classes after the simple ones. Besides some nice algebraic properties distinguishing them, it is important for us that they are {\em confining} for the family of simple singularities (see \cite{AVG82}), that is, any non-simple singularity can be turned to a parabolic one by an arbitrarily small perturbation. Therefore, proving that some property (e.g., diffusion) holds close to any non-simple singularity, it is enough to prove it for these classes only.
The parabolic classes are listed in Table \ref{t4}. Any function germ ${\mathbb R}^n \to {\mathbb R}$ of this class can be reduced by a smooth change of variables to one of the normal forms indicated in this table. The letter $Q$ in it denotes non-degenerate quadratic forms in the variables missing in the first parts of these formulas, e.g.,~$\pm \xi_4^2 \pm \dots \pm \xi_n^2$ for singularities $P_8^1$ and $P_8^2$, and $\pm \xi_3^2 \pm \dots \pm \xi_n^2$ for five other singularity types. So, the {\em corank} of a parabolic singularity (i.e., the number of essential variables) is equal to 3 in the case of $P_8$ singularities, and to 2 for $X_9$ and $J_{10}$.
\begin{table}[t]\centering \caption{Real parabolic singularities}\label{t4}
\begin{tabular}{|c|l|l|} \hline Notation & \qquad \qquad Normal form & Restrictions \\ \hline $P_8^1$ & $\xi_1^3 + \alpha \xi_1 \xi_2 \xi_3 + \xi_2^2\xi_3 + \xi_2\xi_3^2+ Q$ & $\alpha > -3 $\tsep{2pt} \\ $P_8^2$ & $\xi_1^3 + \alpha \xi_1 \xi_2 \xi_3 + \xi_2^2\xi_3 + \xi_2\xi_3^2+ Q$ & $\alpha < -3 $ \tsep{2pt} \\ $\pm X_9$ & $\pm \big(\xi_1^4 + \alpha \xi_1^2 \xi_2^2 + \xi_2^4 + Q\big)$ & $\alpha > -2$ \tsep{2pt} \\ $X_9^1$ & $ \xi_1\xi_2\big(\xi_1^2 + \alpha \xi_1\xi_2 + \xi_2^2\big) + Q$ & $\alpha^2 < 4 $ \tsep{2pt} \\ $X_9^2$ & $ \xi_1\xi_2(\xi_1 + \xi_2)(\xi_1 + \alpha \xi_2) + Q$ & $\alpha \in (0,1) $ \tsep{2pt} \\ $J_{10}^3$ & $\xi_1\big(\xi_1 - \xi_2^2\big)\big(\xi_1 - \alpha \xi_2^2\big) + Q$ & $\alpha \in (0,1) $ \tsep{2pt} \\ $J_{10}^1$ & $\xi_1\big(\xi_1^2 + \alpha \xi_1\xi_2^2 + \xi_2^4\big) + Q$ & $\alpha^2 < 4 $ \bsep{2pt}\\ \hline \end{tabular} \end{table}
\begin{Proposition}[see \cite{AVG82}] \label{prim} All sufficiently small perturbations of a function germ with a~para\-bo\-lic critical point have only simple critical points or parabolic critical points of the same class as the perturbed one. \end{Proposition}
Indeed, a small perturbation cannot increase the Milnor number, or the corank or the multiplicity of a singularity.
\begin{Theorem} \label{maint} Suppose that $N$ is odd, the projectivized wavefront $W^*(F)\subset {\mathbb R}P^{N-1}$ of a~strict\-ly hyperbolic operator $F$ has a singularity of one of classes indicated in Table~{\rm \ref{t4}} at a point $X \in W^*(F)$, and one of two conditions is satisfied: \begin{enumerate}\itemsep=0pt \item[$a)$] the positive inertia index of the quadratic part $Q$ of the corresponding generating function is odd; \item[$b)$] the singularity type of $W^*(F)$ at point $X$ is one of the following four: $X_9^1$, $X_9^2$, $J_{10}^3$, or~$J_{10}^1$. \end{enumerate} Then \begin{enumerate}\itemsep=0pt \item[$1)$] the principal fundamental solution $E(F)$ has a holomorphic diffusion in {\em all} local components of the complement of the wavefront $W^*(F)$ at the point~$X$; \item[$2)$] if the singularity of the wavefront at point $X$ is projective versal, then there is a local $($close to the point~$X)$ connected component of the complement of the wavefront, such that the fundamental solution of~$F$ is sharp in this component at all points of the wavefront in the boundary of this component, except for the points of the most singular stratum $($i.e., for parabolic singular points of the corresponding class$)$. \end{enumerate} \end{Theorem}
Both statements of this theorem in Case~(b) follow from the same statements of Case~(a) for the same singularity types, because the four classes mentioned there are invariant under the multiplication of functions by~$-1$, which preserves also the condition of sharpness, but changes (in the case of odd~$N$) the parities of inertia indices of quadratic forms of rank~$N-4$. Therefore we will consider in our proofs only Case~(a), i.e., assume that positive inertia indicex of quadratic form~$Q$ is odd. For a proof of Statement~1 of this theorem see~\cite{APLT} for all parabolic singularity classes except for~$P_8^2$, and~\cite{V16} for~$P_8^2$; see also Remark~\ref{rem3} in p.~\pageref{rem3} below. Statement~2 in Case~(a) will be proved in Section~\ref{p8} for singularities~$P_8^1$ and~$P_8^2$, and in Section~\ref{cor2} for all the other parabolic singularity types.
\begin{Remark} The singular points of wavefronts of three parabolic classes not mentioned in Case~(b) of this theorem in the case of odd~$N$ and even positive inertia index of form~$Q$ do have local lacunas in their neighborhoods, see~\cite{V16}. \end{Remark}
\section{Topological reformulations}
\subsection{Sharpness and the local Petrovskii condition}
Holomorphic sharpness of wavefronts can be detected by a topological criterion, the {\em local Petrovskii condition}, introduced in~\cite{ABG73} and reformulated in~\cite{Vassiliev86} in terms of the topology of Milnor fibers of generating functions. This condition generalizes a global homological condition from~\cite{Petrovskii45}.
This condition has sense for all real holomorphic function singularities with finite {\em Milnor numbers}. Let us remind the needed topological objects (see~\cite{AVG82,Milnor}). Let $f\colon \big({\mathbb C}^n,{\mathbb R}^n,0\big) \to ({\mathbb C},{\mathbb R},0)$ be a holomorphic function germ with $df(0)=0$; suppose that $0$ is an isolated critical point of~$f$, so that its Milnor number~$\mu(f)$ is finite. Let $B \subset {\mathbb C}^n$ be a sufficiently small ball centered at point~$0$, and $f_\lambda$ a very small perturbation of function $f$ which is non-discriminant (i.e., the variety $f_\lambda^{-1}(0)$ is non-singular in $B$). The manifold $V_\lambda \equiv f^{-1}_\lambda(0) \cap B$ is then called the {\em Milnor fiber} corresponding to the perturbation $f_\lambda$. It is a smooth $(2n-2)$-dimensional manifold with boundary $\partial V_\lambda \equiv V_\lambda \cap \partial B$, and is homotopy equivalent to the wedge of $\mu(f)$ spheres of dimension $n-1$. In particular $\tilde H_{n-1}(V_\lambda) \simeq {\mathbb Z}^{\mu(f)}$ and
\begin{gather}\tilde H_{n-1}(V_\lambda, \partial V_\lambda) \simeq {\mathbb Z}^{\mu(f)}; \label{miln} \end{gather} here $\tilde H$ denotes the reduced homology groups, so that $\tilde H_{n-1} \equiv H_{n-1}$ if $n>1$.
If perturbation $f_\lambda$ is {\em real} (that is, $f_\lambda({\mathbb R}^n) \subset {\mathbb R}$), then there are two important elements of the group~(\ref{miln}): the {\em even} and the {\em odd local Petrovskii classes}.
The {\it even Petrovskii class} is realized by the fundamental class of the manifold $V_\lambda \cap {\mathbb R}^n$ of real points of the Milnor fiber, oriented as the boundary of the domain where $f_\lambda\leq 0$. For example, if the function $f$ has a local minimum at its critical point, then the even Petrovskii class of its perturbation $f+ \varepsilon$, $\varepsilon>0$, is trivial: it is represented by the empty cycle.
In this work we will deal with the {\it odd Petrovskii class} whose construction is more tricky, see, e.g.,~\cite[Chapter~IV]{APLT}. However, we will be mainly interested not in this class itself but in the condition of its triviality, which has an easy characterization: the odd Petrovskii class associated with the perturbation $f_\lambda(x_1, \dots, x_n)$ of function $f$ is equal to zero if and only if the even Petrovskii class of perturbation $f_\lambda(x_1, \dots, x_n)+ x_{n+1}^2-x_{n+2}^2$ of function $f(x_1, \dots, x_n)+ x_{n+1}^2-x_{n+2}^2$ is equal to zero.
\begin{Definition} \rm The {\it local Petrovskii class} in the group (\ref{miln}) is defined as the even local Petrovskii class if $n$ is even or the odd local Petrovskii class if $n$ is odd. A non-discriminant point $\lambda$ satisfies the {\it local Petrovskii condition} if the corresponding local Petrovskii class is equal to $0$. \end{Definition}
\begin{Proposition}\label{sence}If $f$ is the generating function of the projectivized wavefront~$W^*(F)$ of a~strictly hyperbolic operator at its point~$X$, the corresponding critical point of~$f$ is isolated, and~$f_{\bf x}$ is a small non-discriminant real perturbation of~$f$ of the form~\eqref{prover}, then the principal fundamental solution~$E(F)$ is holomorphically sharp at~$X$ in the local $($close to $X)$ component of ${\mathbb R}P^{N-1}\setminus W^*(F)$ containing point~${\bf x}$ if and only if the corresponding local Petrovskii class is equal to~$0$ in group $\tilde H_{N-3}(V_{\bf x}, \partial V_{\bf x})$. \end{Proposition}
Part ``if'' of this statement is proved in \cite{ABG73}, part ``only if'' in \cite{Vassiliev86}.
\subsection{Functoriality of local Petrovskii classes under adjacencies of singularities}
Let $\Phi\colon \big({\mathbb C}^n \times {\mathbb C}^l, {\mathbb R}^n \times {\mathbb R}^l, 0\big) \to ({\mathbb C}, {\mathbb R}, 0)$ be a deformation of our function germ $f$, parameterized by points $\lambda \in {\mathbb C}^l$, i.e., $\Phi$ is a family of functions $f_\lambda \equiv \Phi(\cdot,\lambda) \colon {\mathbb C}^n \to {\mathbb C}$ such that $f_0 \equiv f$ and $f_\lambda\big({\mathbb R}^n\big) \subset {\mathbb R}$ if $\lambda \in {\mathbb R}^l \subset {\mathbb C}^l$.
Let $\Sigma \subset {\mathbb R}^l$ be the {\em discriminant} of this deformation, i.e., the set of parameters $\lambda \in {\mathbb R}^l$ such that variety $V_\lambda$ is singular in~$B$. This set contains the {\em real discriminant} (see Section~\ref{prove}) but generally can be greater than it, including also parameter values $\lambda$ of functions $f_\lambda$ having imaginary critical points in~$B$ with zero critical values. This set divides parameter space ${\mathbb R}^l$ into several connected components in a neighbourhood of the origin.
By the construction of Petrovskii classes, the local Petrovskii condition is satisfied or is not satisfied simultaneously for all points from any such component.
Consider some such component~$C$; let $\tilde \lambda \in \Sigma \cap \bar C$ be a discriminant value of the parameter, which lies in the boundary of~$C$. By definition of the discriminant, the corresponding function~$f_{\tilde \lambda}$ has several (at least one) critical points $a_j\in B$ with critical value~$0$. Consider a set of very small non-intersecting balls $B_j \subset B$ around all these points.
Let $\overline{\lambda} \in C$ be a non-discriminant value of the parameter which is very close to $\tilde \lambda$, so that the corresponding level set $V_{\overline{\lambda}}$ is transversal to the boundaries of all balls~$B_j$, and varieties $V_{\overline{\lambda}} \cap B_j$ can be considered as the Milnor fibers of the corresponding critical points~$a_j$ of function $f_{\tilde \lambda}$.
\begin{Proposition}\label{funct}\quad \begin{enumerate}\itemsep=0pt \item[$1.$] For any real critical point $a_j \in B$ of function $f_{\tilde \lambda}$ with $f_{\tilde \lambda}(a_j)=0$, the odd $($respectively, even$)$ Petrovskii class of singularity $(f_{\tilde \lambda},a_j)$ in the homology group $\tilde H_{n-1}(V_{\overline{\lambda}} \cap B_j, V_{\overline{\lambda}} \cap \partial B_j)$ of the corresponding Milnor fiber is equal to the image of the odd $($respectively, even$)$ Petrovskii class of initial singularity $(f,0)$ under the obvious map \begin{gather}\label{functor} \tilde H_{n-1}(V_{\overline{\lambda}}, \partial V_{\overline{\lambda}}) \to \tilde H_{n-1}\big(V_{\overline{\lambda}}, V_{\overline{\lambda}} \cap \overline{(B\setminus B_j)}\big) \equiv \tilde H_{n-1}(V_{\overline{\lambda}} \cap B_j, V_{\overline{\lambda}} \cap \partial B_j). \end{gather} \item[$2.$] For any imaginary critical point $a_j \in B$ of $f_{\tilde \lambda}$ with $f_{\tilde \lambda}(a_j)=0$, the images of the odd and even local Petrovskii classes under the map~\eqref{functor} are equal to~$0$. \item[$3.$] If deformation $\Phi$ has the form \eqref{prover}, then the corresponding principal fundamental solution~$E(F)$ is holomorphically sharp at point $\tilde \lambda$ of the wavefront in component $C$ if and only if the images of the local Petrovskii class of $f_{\overline{\lambda}}$ under all maps \eqref{functor} corresponding to all such critical points $a_j$ are equal to zero in all groups $\tilde H_{N-3}(V_{\overline{\lambda}} \cap B_j)$. \end{enumerate} \end{Proposition}
Statements~1 and~2 of this proposition follow immediately from the construction of Petrovskii classes, and Statement~3 from the multisingularity version of Proposition~\ref{sence} also proved in~\cite{Vassiliev86}.
If $f$ and $g$ are two germs of functions $\big({\mathbb C}^n,{\mathbb R}^n,0\big) \to ({\mathbb C}, {\mathbb R}, 0)$ with ${\rm d}f(0)=0={\rm d}g(0)$, which are $\operatorname{Diff}_0$-equivalent (i.e., can be transformed one to another by the composition with a local diffeomorphism $\big({\mathbb C}^n,{\mathbb R}^n,0\big) \to \big({\mathbb C}^n,{\mathbb R}^n,0\big)$), and $\Phi$ and $\Gamma$ are some their real versal deformations, then there is a one-to one correspondence between the local connected components of the complements of the discriminants of these deformations. This correspondence is defined by maps inducing deformations equivalent to~$\Phi$ and~$\Gamma$ one from the other in accordance with the definition of versal deformations (see~\cite{AVG82} and map~$\theta$ in~(\ref{versdef})). The Petrovskii conditions are respected by this correspondence. Therefore to prove Statement~2 of Theorem~\ref{maint} for a parabolic singularity class it is enough to present a component of the complement of the discriminant set of an arbitrary real versal deformation of an arbitrary singularity of this class, such that the local Petrovskii condition will be satisfied in this component at all the discriminant points of its boundary except for the most singular points corresponding to the parabolic singularity class itself. For instance, we can consider only singularities~$f(\xi)$ given by the polynomials from Table~\ref{t4}, and their monomial versal deformations of form \begin{gather}\label{mondef} \Phi(\xi,\lambda)\equiv f(\xi)+ \sum_{\alpha \in M} \lambda_\alpha \xi^\alpha , \end{gather} where $M \subset {\mathbb Z}^n_+$ is a finite set of multiindices $\alpha=(\alpha_1, \dots, \alpha_n)$, $\xi^\alpha \equiv \xi_1^{\alpha_1} \cdots \xi_n^{\alpha_n}$.
If $\Phi(\xi_1, \dots, \xi_n; \lambda)$ is a versal deformation of function $f(\xi_1, \dots, \xi_n)$ of the form (\ref{mondef}), then $\Phi(\xi_1, \dots, \xi_n; \lambda)+\big(\xi_{n+1}^2 + \xi_{n+2}^2\big)$ and $\Phi(\xi_1, \dots, \xi_n; \lambda)-\big(\xi_{n+1}^2 + \xi_{n+2}^2\big)$ are versal deformations respectively of functions $f(\xi_1, \dots, \xi_n)+\big(\xi_{n+1}^2 + \xi_{n+2}^2\big)$ and $f(\xi_1, \dots, \xi_n)-\big(\xi_{n+1}^2 + \xi_{n+2}^2\big)$ in $n+2$ variables, and have the same discriminant sets in the spaces of parameters $\lambda = \{\lambda_\alpha\}$. The homology groups (both absolute and modulo the boundary) of the middle dimensions of Milnor fibers of these three functions corresponding to the same non-discriminant parameter values~$\lambda$ are naturally isomorphic to one another. These isomorphisms preserve the Petrovskii condition (see \cite[Section~V.1.7]{APLT}). Therefore proving Theorem~\ref{maint} (in its Case~(a)) we can assume that for singularity classes $P_8$ we have $n=5$, $Q=\xi_4^2- \xi_5^2$, and for singularities $X_9$ or $J_{10}$ we have $n=3$, $Q=\xi_3^2$.
\begin{Proposition}\label{lem11}If function $f\colon \big({\mathbb C}^n,{\mathbb R}^n,0\big) \to ({\mathbb C}, {\mathbb R}, 0)$ is given by one of normal forms from Table~{\rm \ref{t4}}, the dimension $n$ is odd and the positive inertia index of quadratic form $Q$ also is odd, then the boundary of the odd Petrovskii class of any non-discriminant small perturbation~$f_\lambda$ of~$f$ is a non-trivial element of group $H_{n-2}(V_{\lambda} \cap \partial B)$. \end{Proposition}
\begin{proof}[Scheme of the proof] A basis in $H_{n-1}(V_\lambda)$ can be composed of {\em vanishing cycles} $\Delta_i$, $i=1, \dots, \mu(f)$, see, e.g., \cite{AVG84,APLT}. By the Poincar\'e duality and exact sequence of pair $(V_\lambda, \partial V_\lambda)$, the desired non-triviality of the boundary for a morsification $f_\lambda$ is equivalent to the following condition: vector $\Pi(\lambda)$ of intersection indices of the odd Petrovskii class with these basic vanishing cycles is not an integer linear combination of rows of intersection matrix $\{\langle \Delta_i,\Delta_j\rangle \}$ of these cycles.
It is enough to prove the latter property for an arbitrary single non-discriminant real morsification $f_\lambda$. Indeed, by the exact formulas for the local Petrovskii classes (see \cite[Sections~V.1 and~V.4]{APLT}) expressing them in terms of intersection indices and the Morse indices of real critical points, vectors $\Pi(\lambda)$ for morsifications~$f_\lambda$ from the neighbouring components of the complement of the discriminant differ by adding or subtracting a row of the intersection matrix.
So, it remains to calculate all these data for one arbitrary morsification of any of our singularity classes. Vectors~$\Pi(\lambda)$ for these morsifications follow after that by the above-mentioned explicit formulas, and give the promised result.
The intersection indices of vanishing cycles for corank~2 singularities $X_9$ and $J_{10}$ can be found by the Gusein-Zade--A'Campo method, see~\cite{Gusein-Zade74} and~\cite{A'Campo75}, starting, e.g., from the morsifications discussed in Proposition~\ref{perts} below. For singularity class~$P_8^1$ (represented by function $\xi_1^3 + \xi_2^3+ \xi_3^3 + Q$) these indices follow from the main theorem of~\cite{Gab}, and for class $P_8^2$ they were calculated in \cite[Section~7]{V16}. \end{proof}
\begin{Remark}\label{rem3} Statement 1 of Theorem~\ref{maint} follows immediately from this proposition and Proposition~\ref{sence}. \end{Remark}
\section[Proof of Statement 2 of Theorem \ref{maint} for parabolic singularities of corank 2]{Proof of Statement 2 of Theorem \ref{maint}\\ for parabolic singularities of corank 2}\label{cor2}
\subsection{Examples}\label{theex}
\begin{figure}
\caption{$+X_9$ (left), $X_9^1$ (right).}
\label{X90}
\end{figure}
\begin{figure}
\caption{$X_9^2$ (left), $J_{10}^3$ (right).}
\label{X92}
\end{figure}
\begin{figure}
\caption{$J_{10}^1$.}
\label{J101}
\end{figure}
\begin{Proposition}\label{perts} Any function $\varphi(\xi_1,\xi_2)$ in two variables defined by one of normal forms indicated in Table~{\rm \ref{t4}} for singularities of types $+X_9$, $X_9^1$, $X_9^2$, $J_{10}^3$, or $J_{10}^1$ has an arbitrarily small perturbation, whose set of zeros and domains of constant signs of values look topologically as shown in Figs.~{\rm \ref{X90}} $($left and right$)$, {\rm \ref{X92}} $($left and right$)$, or~{\rm \ref{J101}} respectively. $($The non-closed curves in these pictures are assumed to be continued to the infinity in~${\mathbb R}^2.)$
In particular, any versal deformation of $\varphi$ contains perturbations with this topological picture. \end{Proposition}
Proof is elementary.
For any of these five functions $\varphi(\xi_1, \xi_2)$ let us add a small positive constant to its perturbation shown in the corresponding picture in such a way that all critical values at the minima points will remain negative. All the other critical values will become positive, therefore the obtained function is non-discriminant. Denote this function by $\tilde \varphi$ and define function $\tilde f\colon {\mathbb C}^3 \to {\mathbb C}$ by \begin{gather}\label{tfi} \tilde f(\xi_1, \xi_2, \xi_3) \equiv \tilde \varphi(\xi_1,\xi_2)+\xi^2_3 ; \end{gather} this is a perturbation of function \begin{gather}\label{fi} f \equiv \varphi(\xi_1,\xi_2)+\xi^2_3 . \end{gather}
\begin{Proposition}\label{imag} If $f$ is the function \eqref{fi}, where $\varphi$ is one of our five polynomials from Table~{\rm \ref{t4}}, and~$\tilde f$ is the perturbation of~$f$ just defined, then any Morse perturbation $f_\lambda$ of $f$ lying in the same component of the complement of the discriminant variety of a deformation of~$f$ as~$\tilde f$ can have no more than one pair of imaginary critical points $($so that the number of its real critical points is equal to either~$\mu(f)$ or~$\mu(f)-2)$. \end{Proposition}
\begin{proof} The sum of local indices of vector field $\operatorname{grad} f_\lambda$ at all real critical points of $f_\lambda$ with negative (respectively, positive) critical values is an invariant of components of the complement of the discriminant. For our five perturbations these sums are equal to: 4 and $-3$ for $+X_9$, 3 and $-4$ for $X_9^1$, 2 and $-5$ for $X_9^2$, 3 and $-5$ for $J_{10}^3$, 4 and $-4$ for $J_{10}^1$. The index of any Morse critical point is equal to $1$ or $-1$, hence $f_\lambda$ should have at least 7 real critical points in the case of any $X_9$ singularity and at least 8 in the case of $J_{10}$. \end{proof}
\begin{Proposition}\label{mp} Let $f$ be the function \eqref{fi} where $\varphi$ is given by one of formulas of Table~{\rm \ref{t4}} for one of five singularity classes $+X_9$, $X_9^1$, $X_9^2$, $J_{10}^3$ or $J_{10}^1$; let $C \subset {\mathbb R}^l$ be the component of the complement of discriminant variety $\Sigma$ in the parameter space of a deformation of $f$ containing the corresponding perturbation~\eqref{tfi}; let $\tilde \lambda \in \Sigma$ be an arbitrary discriminant point in the boundary of this component $C$ which is sufficiently close to the origin in the parameter space of the deformation, and all critical points of $f_{\tilde \lambda}$ with zero critical value are not parabolic $($i.e., do not belong to the same singularity class as~$f)$. Then for any parameter value $\overline{\lambda} \in C$, which is sufficiently close to $\tilde \lambda$, and any critical point $a_j\in B$ of $f_{\tilde \lambda}$ with critical value~$0$ the corresponding localized odd Petrovskii class in $\tilde H_{2}(V_{\overline{\lambda}} \cap B_j , V_{\overline{\lambda}} \cap \partial B_j)$ is equal to~$0$. \end{Proposition}
Statement 2 (Case~(a)) of Theorem~\ref{maint} for these five singularity classes follows immediately from this proposition and Proposition~\ref{funct}.
Proof of Proposition \ref{mp} takes the remaining part of this section. Everywhere in it we assume that $f_{\tilde \lambda}$ satisfies the conditions of this proposition.
We can and will assume that our deformation is versal, otherwise we can expand it to a versal one. Let $l$ be the dimension of its parameter space.
Let $\mu_j$, $j=1,\dots,$ be the Milnor numbers of all critical points $a_j$ of $f_{\tilde \lambda}$ with critical value 0. Suppose that for one of these points, say $a_1$, the localized Petrovskii class in $H_{2}(V_\lambda \cap B_1, V_\lambda \cap \partial B_1)$, where $\lambda \in C$ is very close to $\tilde \lambda$, is non-trivial. In Section~\ref{condit} we will show that in this case component~$C$ should contain functions $f_{\overline{\lambda}}$ satisfying at least one of two conditions described there; in Sections~\ref{propr} and~\ref{modif} we present a combinatorial program proving that such functions actually do not exist.
\subsection{Conditions} \label{condit} By Statement~2 of Proposition~\ref{funct}, critical point~$a_1$ mentioned in the previous paragraph is real.
\begin{Lemma}\label{lem8} We can assume that $f_{\tilde \lambda}$ has only one this critical point $a_1$ with critical value~$0$ $($i.e., if there are several such critical points of~$f_{\tilde \lambda}$, then there is another point $\tilde \lambda'$, $\tilde \lambda' \approx \tilde \lambda,$ in the boundary of~$C$ with unique such critical point and non-trivial localization of the Petrovskii class at neighboring points of~$C)$. \end{Lemma}
\begin{proof} \looseness=-1 Suppose first that all critical points of $f_{\tilde \lambda}$ with critical value 0 are real. If $\tilde \lambda$ is sufficiently close to the origin in parameter space ${\mathbb R}^l$ of our versal deformation of function $f$, then a small open ball $U$ in this space with the center at point $\tilde \lambda$ is also the parameter space of a versal deformation of the multisingularity of function $f_{\tilde \lambda}$, formed by all these function germs $(f_{\tilde \lambda}, a_j)$. A miniversal deformation of such a multisingularity splits into the product of independent versal deformations of all singularities composing it, in particular its number of parameters is equal to $\sum \mu_j$. Therefore our component $C \cap U$ of the complement of the discriminant set of this versal deformation of multisingularity $f_{\tilde \lambda}$ is ambient diffeomorphic to the direct product of some components $C_j$ of the complements of discriminant sets of all these singularities $(f_{\tilde \lambda},a_j)$, and additionally of space ${\mathbb R}^{l - \sum \mu_j}$. This diffeomorphism is realized by the restriction to $C \cap U$ of a~map inducing a deformation equivalent to our versal deformation of $f_{\tilde \lambda}$ from this miniversal one. (More precisely, this map is the projection map of a trivial fiber bundle, all whose fibers are diffeomorphic to ${\mathbb R}^{l - \sum \mu_j}$ and consist of functions, obtained one from another by diffeomorphisms of the argument space.) Point $\tilde \lambda$ is sent by this diffeomorphism to the product of origins in the space ${\mathbb R}^{l - \sum \mu_j}$ and in all parameter spaces ${\mathbb R}^{\mu_j}$ of these deformations. Let us shift for any $j \neq 1$ the corresponding origin point of ${\mathbb R}^{\mu_j}$ to a non-discriminant point inside the corresponding component $C_j$. The obtained point of the product corresponds via our diffeomorphism to a point~$\tilde \lambda'$ in the boundary of our component $C \cap U$, such that $f_{\tilde \lambda'}$ has a single critical point with critical value~0, with the same simple singularity class as $(f_{\tilde \lambda},a_1)$, and the localized Petrovskii class for a neighboring point $\overline{\lambda'} \in C$ again will be not equal to zero in group $\tilde H_{n-1}(V_{\overline{\lambda'}} \cap B_1 , V_{\overline{\lambda'}} \cap \partial B_1)$.
Suppose now that our function $f_{\tilde \lambda}$, $\tilde \lambda \in \partial C$, has imaginary critical points with value 0. By Proposition \ref{imag} there can be only one pair of such points, and these two critical points should be Morse. In this case we consider a very similar splitting of a neighbourhood of point $\tilde \lambda$, in which one factor ${\mathbb R}^2$ is not the parameter space of a real singularity of $f_{\tilde \lambda}$, but the real part of the product of parameter spaces of both its complex conjugate critical points; the rest of the consideration remains as previously. \end{proof}
So, we can and will assume that $f_{\tilde \lambda}$ has only one critical point $a_1 \in B$ with critical value~0. By Proposition~\ref{prim} this critical point~$a_1$ is simple. A neighbourhood of point $\tilde \lambda$ in ${\mathbb R}^l$ can be considered as the base of a versal deformation of critical point~$a_1$ of function $f_{\tilde \lambda}$. Then by Theorem~\ref{v92} we may assume that either critical point~$a_1$ is Morse or it is of type~$A_2$ and our component~$C$ is the ``bigger'' one with respect to the corresponding cuspidal edge.
If this point is Morse, then component~$C$ contains a point $\overline{\lambda}$ such that function $f_{\overline{\lambda}}$ is Morse and satisfies the following
\begin{Condition}\label{cond1} The intersection index of the Petrovskii class in $\tilde H_{2}(V_{\overline{\lambda}}, \partial V_{\overline{\lambda}})$ with a vanishing cycle in $\tilde H_{2}(V_{\overline{\lambda}})$ defined by the segment in ${\mathbb R}^1 \subset {\mathbb C}^1$ connecting the non-critical value~$0$ with either the smallest positive or the largest negative critical value of $f_{\overline{\lambda}}$ is not equal to~$0$. \end{Condition}
Indeed, this is true for all values $\overline{\lambda} \in C$ which are sufficiently close to $\tilde \lambda$ and correspond to Morse functions.
Accordingly to the explicit formulas for the Petrovskii classes (see \cite{Vassiliev86, APLT}) this condition is equivalent to the following one.
\begin{ConditionN}
Either the positive inertia index of the critical point of $f_{\overline{\lambda}}$ with the smallest positive critical value is odd, or the positive inertia index of the critical point of $f_{\overline{\lambda}}$ with the largest negative critical value is even. \end{ConditionN}
In the second case of $A_2$ singularity (assuming that Condition~\ref{cond1} is {\em not} satisfied for all points of $C$) our component $C$ contains points $\overline{\lambda}$ arbitrarily close to $\tilde \lambda$, such that $f_{\overline{\lambda}}$ is Morse and
\begin{Condition}\label{cond2}\quad \begin{enumerate}\itemsep=0pt \item[$1)$] $f_{\overline{\lambda}}$ has at least two real critical points with positive critical values; \item[$2)$] positive inertia index of the quadratic part of the critical point of $f_{\overline{\lambda}}$ with the smallest positive critical value is even; \item[$3)$] positive inertia index of the quadratic part of the critical point of $f_{\overline{\lambda}}$ with the next smallest positive critical value is exactly by~$1$ smaller than that for the previous one;
\item[$4)$] intersection index in $H_{2}(V_{\overline{\lambda}})$ of two vanishing cycles defined by simplest paths \mbox{\begin{picture}(25,5) \put(1,1){\circle{2}} \put(-1.5,-6.5){\footnotesize $0$}
\put(13,1){\circle*{2}} \put(25,1){\circle*{2}} \put(12,1){\vector(-1,0){9.3}} \bezier{120}(24,1.2)(13,6)(2.2,1) \end{picture}} connecting these critical values with $0$ in ${\mathbb C}^1$ is equal to $1$ or $-1$;
\item[$5)$] the intersection index of the odd Petrovskii class in $H_2(V_{\overline{\lambda}},\partial V_{\overline{\lambda}})$ with the vanishing cycle corresponding to the smallest positive critical value $($respectively, to the next smallest one$)$ is equal to~$0$ $($respectively, to~$1$ or $-1)$. \end{enumerate} \end{Condition}
It remains to prove that our component $C$ does not contain points~$\overline{\lambda}$ satisfying either of these two sets of conditions. To do it we apply the program~\cite{pro2} counting all topological types which the non-dis\-cri\-mi\-nant morsifications of function~$f$ can have.
\subsection{Description of the program} \label{propr} For an extended description of this program see \cite[Section~V.8]{APLT} (although the version of the program quoted there is now obsolete, for the actual version see~\cite{pro2}). Let us remind its basic ideas.
Any generic real morsification $f_\lambda$ of a function singularity $f$ of corank $\leq 2$ in $n$ variables is characterized by a set of its topological invariants, including \begin{enumerate}\itemsep=0pt \item[a)] intersection indices of appropriately oriented and ordered vanishing cycles in $H_{n-1}(V_\lambda)$ corresponding to all its critical points (including the imaginary ones), \item[b)] intersection indices of both local Petrovskii classes with all these vanishing cycles, \item[c)] Morse indices of all real critical points (more precisely, the {\em positive} inertia indices of their quadratic parts) ordered by increase of their critical values, \item[d)] the number of negative critical values, and \item[e)] the number $n$ of variables (in fact only its residue modulo 4 is important). \end{enumerate}
Any possible set of such data is called a {\em virtual morsification}.
If we know these data for an actual morsification~$f_\lambda$, then we can calculate them for all morsifications which can be obtained from~$f_\lambda$ by the standard surgeries, such as Morse surgeries of death/birth of real critical points of neighbouring indices, the change of the order in~${\mathbb R}^1$ of critical values at distant critical points, the passage of values at two distant imaginary critical points through the real axis, the jump of a critical value through~0 (this is the unique surgery changing the component of the complement of the discriminant), and the change of the choice of paths defining the cycles vanishing in imaginary critical points. Our algorithm starts from the virtual morsification corresponding to the initial perturbation $f_{\lambda} \in C$ and applies to it all possible chains of transformations of virtual morsifications modelling the possible surgeries of actual morsifications. The newborn virtual morsifications are compared with all found previously ones, and are added to the list if they are new. A priori this algorithm can provide virtual morsifications not corresponding to any actual one; however it surely finds all virtual morsifications corresponding to all actual ones.
\subsection{Modification of program \cite{pro2} for our purposes}\label{modif}
To apply this program to any of our five singularities, we calculate (by hands) data (a)--(d) for real morsification $f_\lambda$ from our component $C$ described in Section~\ref{theex}, put these data to our program, switch out in it the surgery of jumping of critical values through~$0$ (since we are interested only in the morsifications from component~$C$) and additionally include alarm operators aborting the program and typing the corresponding message if a newborn virtual morsification satisfies one of two conditions listed in Section~\ref{condit}.
More precisely, we take program \cite{pro2} and do the following: \begin{enumerate}\itemsep=0pt \item[a)] make in lines 1--7 changes described in lines 12--18 (the Milnor number of all $X_9$ singularities is equal to 9, and that of $J_{10}$ is equal to 10), \item[b)] put the command ${\rm NPOZC}=4$ in line 37 for singularities $+X_9$ and $J_{10}^1$, ${\rm NPOZC}=3$ for~$X_9^1$ and~$J_{10}^3$, and ${\rm NPOZC}=2$ for $X_9^2$: this is the number of critical points with negative critical values for our starting morsification specifying component~$C$, \item[c)] uncomment operators ${\rm L(MD+1)}=1$ and ${\rm L(MDD)}=1$ in lines~115,~116 (i.e., we disable the surgeries of crossing the discriminant, which correspond to the jumps of real critical values through~0), \item[d)] insert the next text immediately after operator 2318 CONTINUE: \begin{gather} \begin{split}
&\mbox{IF(PC1(NPOZC).NE.0)\ GOTO \ 7343}\\
&\mbox{NPO1=NPOZC+1}\\
& \mbox{IF(PC1(NPO1).NE.0)\ GOTO \ 7343}\\
& \mbox{NPO2=NPOZC+2}\\
& \mbox{IF(PC1(NPO2).NE.0.AND.INDC(NPO2).LT.INDC(NPO1)) \ GOTO \ 7343}
\end{split}\label{pro3} \end{gather} (the first two operators ``IF'' here check Condition~\ref{cond1} above, and the third one detects items~3 and (partly)~5 of Condition~\ref{cond2}; on the address 7343 the program types the word ALARM and data of the virtual morsification for which one of these conditions was detected, and then terminates its work); \item[e)] calculate the intersection indices of vanishing cycles of morsification~$f_\lambda$ described in Section~\ref{theex} using the Gusein-Zade--A'Campo method, and substitute all obtained {\em non-zero} indices $\langle\Delta_i,\Delta_j\rangle$, $i<j$, into the subroutine DATA at the very end of the program: e.g., if $\langle\Delta_3,\Delta_5\rangle=1$ then we insert operator $C(3,5)=1$ there. Also, if the positive inertia index of the quadratic part of the~$i$th (in the order of increase of critical values) critical point of our morsification is equal to some number $q \neq 2$, then we put ${\rm INDC}(i)=q$ in the last part of this subroutine. These indices for the critical points of functions~(\ref{tfi}) for our corank~2 singularities shown in Figs.~\ref{X90}--\ref{J101} are equal to~3 at minima of $\tilde \varphi$, 1 at maxima, and~2 at saddlepoints. \end{enumerate}
All programs modified in this way and corresponding to our five corank~2 parabolic singularities can be found by the address \url{https://drive.google.com/drive/folders/1L5p_HOvrBbcyBv-o4Ov4S_sZBbJytdxr?usp=sharing}.
The result of their work is negative: in all five cases the program detects that component $C$ does not contain points satisfying either of Conditions~\ref{cond1} or~\ref{cond2}. This proves Proposition~\ref{mp}, and hence also Statement~2 of Theorem~\ref{maint} for our five singularities of corank~2.
\section[Singularities $P_8$]{Singularities $\boldsymbol{P_8}$}\label{p8}
In the case of singularities of corank $>2$ we need to apply a different program~\cite{pro}. Indeed, we generally cannot predict the Morse indices of the newborn real critical points in the Morse birth surgery, if we know only the data of the virtual morsification before the birth: we can predict only parities of these indices. In the case of functions of corank 2, when the choice is between the pairs of neighboring Morse indices~$(0,1)$ or~$(1,2)$ of the new critical points of essential part~$\varphi_\lambda$ of the morsification, this is enough to obtain the exact values of the indices, but for the functions of greater coranks we need to use a program in which the Morse indices take only the values odd/even.
Fortunately, this is enough for our problem concerning the simplest singularities of corank 3. Other changes and features in these cases are as follows.
\subsection[Singularity class $P_8^1$]{Singularity class $\boldsymbol{P_8^1}$}
This class is represented by function $f \equiv \varphi(\xi_1, \xi_2, \xi_3) + \xi_4^2- \xi_5^2$, where $\varphi=\xi_1^3 + \xi_2^3 + \xi_3^3$. We consider the standard monomial versal deformation of this function depending on parameter $\lambda=(\lambda_1, \dots, \lambda_8)$, \begin{gather}\label{verp8} f_\lambda(\xi) \equiv f(\xi) + \lambda_1 + \lambda_2\xi_1+\lambda_3\xi_2+\lambda_4\xi_3+\lambda_5\xi_1\xi_2 +\lambda_6 \xi_1\xi_3 + \lambda_7 \xi_2\xi_3 +\lambda_8 \xi_1\xi_2\xi_3 . \end{gather}
The stratum of points of type $P_8$ is represented in it by the $\{\lambda_8\}$ axis. The multiplicative group of positive numbers acts on parameter space~${\mathbb R}^8$ of this deformation: any number $t$ sends a function $f_\lambda(x)$ to $t^{3}f_\lambda(x/t)$. In other words, $t(\lambda_1, \lambda_2, \lambda_3, \lambda_4, \lambda_5, \lambda_6, \lambda_7, \lambda_8)=\big(t^3\lambda_1, t^2\lambda_2, t^2\lambda_3, t^2 \lambda_4, t\lambda_5, t\lambda_6, t\lambda_7, \lambda_8\big)$. This action preserves the discriminant and allows us not to take care of the smallness of the perturbations except for parameter $\lambda_8$.
Let us construct a perturbation of function $f$, contained in component $C$ of the complement of the discriminant of this deformation, which will satisfy Statement~2 of Theorem~\ref{maint}.
Function $\varphi$ has a convenient morsification $\tilde \varphi \equiv \xi_1^3 + \xi_2^3 + \xi_3^3 - 3 (\xi_1 + \xi_2 +\xi_3)$ with eight real critical points: namely, it are all points with $\xi_i = \pm 1$. Let be $\tilde f \equiv \tilde \varphi + \xi_4^2-\xi_5^2$. The intersection indices of vanishing cycles in $H_{4}\big(\tilde f^{-1}(0)\big)$ related with these critical points can be calculated by the method of \cite{Gab}.
Consider family $\varphi_{(\tau)}$ of perturbations of $\varphi$ depending on parameter $\tau \in [0,1]$, \begin{gather}\label{famil} \varphi_{(\tau)} \equiv \varphi(\xi_1, \xi_2, \xi_3) + 6\tau(\xi_1\xi_2 +\xi_1\xi_3 + \xi_2\xi_3) - (3+12\tau)(\xi_1+\xi_2+\xi_3) . \end{gather} All functions of this family are invariant under the permutations of coordinates $\xi_1$, $\xi_2$, $\xi_3$, and have the fixed critical point~$(1,1,1)$. This family connects the morsification $\tilde \varphi \equiv \varphi_{(0)}$ with a~function having a singularity of class~$D_4^-$ at this point. The topological type of morsifications does not change along this path (except for the very last point), in particular the intersection indices of vanishing cycles and the Morse indices of critical points of the corresponding functions $f_{(\tau)} \equiv \varphi_{(\tau)} + \xi_4^2-\xi_5^2$ for nearly final values of $\tau \approx 1$ will be the same as for $\tau=0$.
A neighbourhood of the final function $\varphi_{(1)}$ in the parameter space of the versal deformation~(\ref{verp8}) can be considered as the parameter space of a versal deformation of the corresponding multisingularity consisting of this point of type $D_4^-$ and four distant Morse points, so we can deform these critical points independently.
Four Morse critical points of $\varphi_{(\tau)}$, $\tau =1-\varepsilon$, tending to the collision in this point of type~$D_4^-$, consist of three points with signature~$(2,1)$ and equal critical values, and one minimum point with a slightly smaller critical value tending to $-24$ when $\tau$ tends to~$1$. We can perform two standard Morse surgeries over them, first colliding this minimum point with one point of signature $(2,1)$, and then returning these critical points to the real domain as two points with signatures $(2,1)$ and $(1,2)$ and critical values slightly greater than these at two points of signature $(2,1)$ not participating in these surgeries. Composition of these two surgeries realizes the passage {\small \unitlength=0.8mm $\mbox{\begin{picture}(52,13) \put(0,10){\line(1,0){16}} \put(3,13){\line(1,-2){7}} \put(13,13){\line(-1,-2){7}} \put(6.8,6.2){{\footnotesize $-$}} {\Large \put(21,5){$\leftrightarrow$}} \put(32,1){\line(1,0){15}} \put(36,13){\line(1,-2){7}} \put(41,13){\line(-1,-2){7}} \put(37,2.5){{\footnotesize $+$}} \end{picture}}$} between the perturbations of a $D_4^-$-singularity. Other four critical points of $\varphi_{(1-\varepsilon)}$ can be perturbed very little during this surgery.
The resulting function has exactly three critical points of signature~$(2,1)$, which have the smallest critical values among all critical points of this function. Let us add a constant to this function in such a way that these three critical values of the obtained function $\overline{\varphi}$ will remain negative, and all the other ones become or remain positive. We claim that the component of the complement of the discriminant of the deformation~(\ref{famil}) containing function $\overline{f}\equiv \overline{\varphi} + \xi_4^2-\xi_5^2$ satisfies an analog of Proposition~\ref{mp}: the local Petrovskii condition holds in this component at all discriminant points of its boundary except for the most singular points of type~$P_8^1$.
The proof of this claim almost repeats that of Proposition~\ref{mp}.
In particular, the exact analogs of Proposition~\ref{imag} and Lemma~\ref{lem8} hold in this case together with their proofs: sums of indices of the vector field $\operatorname{grad} \overline{\varphi}$ over the critical points with negative (respectively, positive) critical values are equal to~$-3$ and~$3$.
The topological data of the obtained morsification~$\overline{f}$ can be easily derived from these for morsification $\varphi_{(1-\varepsilon)}$ (e.g., with the help of our program~\cite{pro}). Then
we substitute these data to a modification of our program, see the concluding subroutine DATA in program mP81 from the folder quoted in Section~\ref{modif}. This program is obtained from program~\cite{pro} by almost the same changes by which we obtained in Section~\ref{modif} similar programs for corank~2 singularities from program~\cite{pro2}, with only the following exceptions. The operator detecting cuspidal edge (see the fifth line in~(\ref{pro3})) should be slightly changed, because we generally do not know the integer values of Morse indices of critical points. Instead we write the command \[\mbox{IF(PC1(NPO2).NE.0.AND.C(NPO1,NPO2).EQ.1) GOTO 7343}\] checking items~4 and~5 of Condition~\ref{cond2} (we use a definite choice of orientations of vanishing cycles corresponding to real critical points, which allows us to fix the sign of their intersection index). Also, we write ${\rm INDC}(q)=-1$ in the concluding subprogram DATA for all numbers~$q$ of critical values of the function $\overline{\varphi}$ corresponding to critical points with {\em odd} positive inertia indices of quadratic parts; we write ${\rm NPOZC}=3$ in line~37 and activate operators ${\rm L}({\rm MD}+1)=1$ and ${\rm L(MDD)}=1$ in lines~115 and~116. At the beginning of this subprogram we preserve operator $\mbox{N}=-1$ (which means that number $n= 5$ of variables is as odd as the number~3 considered previously) but then write $\mbox{N2}=1$ (which means that number $n(n-1)/2$ for $n=5$ is even, unlike the case of $n=3$).
The obtained program proves that our component $C$ of the complement of the discriminant does not contain any morsifications satisfying Conditions~\ref{cond1} or~\ref{cond2} from Section~\ref{condit}, thus proving Statement~2 of Theorem~\ref{maint} for singularity class~$P_8^1$.
\subsection[Singularity $P_8^2$]{Singularity $\boldsymbol{P_8^2}$}
In this case we take initial morsification $\varphi_{\tilde \lambda} (\xi_1, \xi_2, \xi_2) + \xi_4^2-\xi_5^2$, where function $\varphi_{\tilde \lambda}$ in three variables is described in Section~7 of~\cite{V16} (and is called~$f_2$ there). This function $\varphi_{\tilde \lambda}$ has four critical points with signature of the quadratic part equal to $(2,1)$ and critical values equal to~0,~0,~0, and~1. In addition it has four critical points with signature $(1,2)$; the values at three of them are slightly greater than~$0$, and the value at the fourth one is also equal to~1. Let us subtract a very small constant from this function, making critical values at the first three critical points negative, and leaving the remaining five values positive. According to~\cite{V16}, the intersection matrix of corresponding vanishing cycles is then expressed by formula~(5) from~\cite{V16} with $X=0$, $Y=1$, $Z=0$, $W=-2$. Let us substitute these topological data into the same version of program~\cite{pro} which was used for $P_8^1$, again write $\mbox{NPOZC}=3$ in line~37, and run the obtained program~mP82.
It gives us a disappointing answer: one can see the word ALARM in the print-out of this program, followed by the data of a virtual morsification, for which Condition~\ref{cond1}$'$ is satisfied: namely, the critical point with the smallest positive critical value has an odd positive inertia index of the quadratic part.
We will show now that this is not dangerous for us. Indeed, there are the following two possible interpretations of this calculation.
1. All chains of admissible virtual surgeries leading to the virtual morsifications satisfying Condition~\ref{cond1}$'$, are fake (i.e., cannot be realized by chains of real surgeries arising along any generic paths in the parameter space of a~versal deformation). In this case the component of the complement of the discriminant, which contains our morsifications, is the desired one. Indeed, let us modify our program~mP82 in such a way that it checks only Condition~\ref{cond2} but forgives Condition~\ref{cond1}$'$ (i.e., we disable the first two operators IF in~(\ref{pro3})). The obtained program assures us that the morsifications satisfying this Condition~\ref{cond2} indeed do not occur in this component (while the morsifications satisfying Condition~\ref{cond1}$'$ do not occur by our conjecture).
2. If the conjecture of the previous paragraph is wrong, and our component contains real morsifications satisfying Condition~\ref{cond1}$'$, then there is a neighbouring component satisfying the assertion analogous to Proposition~\ref{mp} (i.e., such that the localized Petrovskii condition is satisfied in this component at all discriminant points of its boundary except for the most singular points of class~$P_8^2$). Indeed, any morsification of~$f$ from our component has at least three critical points with negative critical values and odd positive inertia indices, and also at least three points with positive critical values and even negative inertia indices (see the proof of Proposition~\ref{imag}). Moreover, quantities of all critical points with negative (or positive) critical values are odd (and hence equal to~3 or~5). Therefore if we have a real morsification in this component, satisfying Condition~\ref{cond1}$'$, then we can add to it a constant function in such a way that the resulting morsification has exactly four critical points with negative values and odd positive inertia indices, and four critical points with positive critical values and even positive inertia indices.
Any component of the complement of the discriminant, which contains such a morsification, is a desired one. Indeed, no Morse surgery can be performed over functions inside this component, hence {\em all} morsifications from it have the same signatures, and Conditions~\ref{cond1}$'$ or~\ref{cond2} cannot be satisfied for them.
\LastPageEnding
\end{document} |
\begin{document}
{\bf Comment on ``Weak value amplification is suboptimal for estimation and detection'' }
In a recent Letter, Ferrie and Combes \cite{FC} defined the practical tasks ``detect'' and ``estimate'' and concluded that ``Post-selection cannot aid in detect and estimate for any interaction parameter''. In particular, they argued that ``there is no sense in which WVA [Weak Value Amplification] provides an ``amplification'' for quantum metrology''.
At 1988 Aharonov, Albert and Vaidman \cite{AAV} discovered that a sufficiently weak coupling to any observable of a pre- and postselected quantum system is a coupling to the ``weak value'' of this observable and, since the weak value can be much larger than the eigenvalues of the observable, this method provides an effective amplification of the weak coupling. This amplification is the WVA discussed in the Letter of Ferrie and Combes. The WVA method has been implemented in several experiments in recent years. The spin Hall effect for light was first detected using this method \cite{HK}. A record precision of a mirror angle estimation was obtained using WVA in another experiment \cite{How}. So, definitely, the post-selection aided in detecting and estimating interaction parameters.
How can it then be that the ``statistically rigorous arguments'' of Ferrie and Combes contradict these experimental results? The explanation is that the assumptions in their statistical analysis are irrelevant for realistic experimental situations. I found the main erroneous assumption which led Ferrie and Combes to their incorrect conclusions thank to my direct involvement in two weak measurement experiments \cite{Xi,Danan}. The limiting factor in these and other experiments is not the number of preselected quantum systems (photons) considered by Ferrie and Combes, but the number of detected, post-selected photons. The saturation of the detectors generally happens much before the power limitation of the laser source kicks in. Thus, the low probability of the postseletction, the main negative factor in experiments with large weak values, is not relevant. This then undermines the conclusions of Ferrie and Combes.
In their Letter, Ferrie and Combes quote other recent papers analyzing the limitations of the WVA method \cite{KnBr,TaYa,Zhu,KnGa}, which they improve and complement.
These limitations were obtained by using the same assumptions, but the authors of these works specify (some of them maybe not clearly enough) that their conclusions are conditioned on these assumptions. Zhu et al. \cite{Zhu} do it very precisely. They conclude: ``We have shown that weak measurements cannot effectively improve the SNR [Signal to Noise Ratio] and the MS [measurement sensitivity] when the probability decrease due to postselection needs to be considered; while for practical cases when the probability reduced by postselection need not be considered, weak measurements can significantly improve both the SNR and the MS.''
This work has been supported in part by grant number 32/08 of the Binational Science Foundation and the Israel Science Foundation Grant No. 1125/10.
L. Vaidman\\
Raymond and Beverly Sackler School of Physics and Astronomy\\
Tel-Aviv University, Tel-Aviv 69978, Israel
\end{document} |
\begin{document}
\title{Graded Betti numbers of path ideals of cycles and lines}
\author{Ali Alilooee} \address{University of Wisconsin-Stout, Department of Mathematics and Statistics, Jarvis Hall-Science Wing, Menomonie, WI, USA } \email{ a-alilooeedolatabad@wiu.edu}
\author{Sara Faridi} \address{Dalhousie University, Department of Mathematics, 6316 Coburg Rd. Halifax, NS, Canada B3H 4R2 } \email{faridi@mathstat.dal.ca}
\maketitle
\begin{abstract}
We use purely combinatorial arguments to give a formula to compute all graded Betti numbers of path ideals of paths and cycles. As a consequence we can give new and short proofs for the known formulas of regularity and projective dimensions of path ideals of paths.
\end{abstract}
\section{Introduction}
Path complexes are simplicial complexes whose facets encode paths of a fixed length in a graph. These simplicial complexes in turn correspond to monomial ideals called ``path ideals''. Path ideals of graphs were first introduced by~\cite{Conca1999} in a different algebraic context, but the study of algebraic invariants corresponding to their minimal free resolutions has become popular, with works of~\cite{R.Bouchat2010} and~\cite{He2010}, and the authors~\cite{AF2012}. The papers cited above give partial information on Betti numbers of path ideals. In this paper we use purely combinatorial arguments based on our results in~\cite{AF2012} to give an explicit formula for all the graded Betti numbers of path ideals of paths and cycles. As a consequence we can give new and short proofs for the known formulas of regularity and projective dimensions of path ideals of path graphs.
\section{Preliminaries}
A \textbf{simplicial complex} on vertex set ${\mathcal{X}}=\{x_1,\dots,x_n\}$ is a collection $\Delta$ of subsets of ${\mathcal{X}}$ such that $\{x_i\}\in \Delta$ for all i, and if $F \in \Delta$ and $G \subset F$, then $G \in \Delta$. The elements of $\Delta$ are called \textbf{faces} of $\Delta$ and the maximal faces under inclusion are called \textbf{facets} of $\Delta$. We denote the simplicial complex $\Delta$ with facets $F_1,\dots,F_s$ by $\langle F_1,\dots,F_s\rangle$. We call $\{F_1,\dots,F_s\}$ the facet set of $\Delta$ and is denoted by $F(\Delta)$. The set ${\mathcal{X}}$ is the vertex set of $\Delta$ and is denoted by $\mbox{Vert}(\Delta)$. A \textbf{subcollection} of $\Delta$ is a simplicial complex whose facet set is a subset of the facet set of $\Delta$. For ${\mathcal{Y}}\subseteq{\mathcal{X}}$, an \textbf{induced subcollection} of $\Delta $ on ${\mathcal{Y}}$, denoted by $\Delta_{{\mathcal{Y}}}$, is the simplicial complex whose vertex set is a subset of ${\mathcal{Y}}$ and facet set is $\{F\in F(\Delta) \ | \ F \subseteq {{\mathcal{Y}}}\}.$ If $F$ is a face of $\Delta=\langle F_1,\dots,F_s\rangle$ and ${\mathcal{X}}=\mbox{Vert}(\Delta)$, the \textbf{complements} of a facet $F_i$ and of the simplicial complex $\Delta$ are \begin{eqnarray*} (F_i)_{\mathcal{X}}^c={\mathcal{X}}\setminus {F_i} & \mbox{and}& \Delta_{\mathcal{X}}^c=\langle (F_1)^c_{\mathcal{X}},\dots,(F_s)^c_{\mathcal{X}} \rangle. \end{eqnarray*} Note that if ${\mathcal{Y}} \subsetneqq \mbox{Vert} (\Delta)$, then $(\Delta_{{\mathcal{X}}}^{c})_{\mathcal{Y}}=(\Delta_{\mathcal{Y}})^{c}_{\mathcal{Y}}$.
From now on we assume that $R=K\left[x_1,\dots,x_n\right]$ is a polynomial ring over a field $K$. Suppose that $I$ is an ideal in $R$ minimally generated by square-free monomials $M_1,\ldots,M_s$. The \textbf{facet complex} $\Delta(I)$ associated to $I$ has vertex set $\{x_1,\dots,x_n\}$ and is defined as $\Delta(I)=\langle F_1,\ldots,F_s
\rangle \mbox{ where } F_i=\{x_j \ | \ x_j | M_i,\ 1\leq j\leq n\}, \ 1 \leq i \leq s.$ Conversely if $\Delta$ is a simplicial complex with vertices labeled $x_1,\ldots,x_n$, the \textbf{facet ideal} of $\Delta$ is defined as $I(\Delta)=( \prod_{x \in F}x \ | \ \ F \mbox{ is a facet of} \Delta).$
Given a homogeneous ideal $I$ of the polynomial ring $R$ there exists a \textbf{graded minimal finite free resolution} $$0\rightarrow {\displaystyle
\bigoplus_{d}}R(-d)^{\beta_{p,d}}\rightarrow\cdots{\displaystyle
\rightarrow\bigoplus_{d}}R(-d)^{\beta_{1,d}}\rightarrow
R\rightarrow R /I \rightarrow 0$$ of $R /I $ in which $R(-d)$ denotes
the graded free module obtained by shifting the degrees of elements
in $R$ by $d$. The numbers $\beta_{i,d}$ are the $i$-th
$\mathbb{N}$-\textbf{graded Betti numbers} of degree $d$ of $R /I$,
and are independent of the choice of graded minimal finite free
resolution.
By a reformulation of Hochster's formula in~\cite[Theorem~2.8]{AF2012}, to compute Betti numbers we only need to consider induced subcollections $\Gamma=\Delta_{\mathcal{Y}}$ of a simplicial complex $\Delta$ with ${\mathcal{Y}}=\mbox{Vert} (\Gamma)$.
\section{Path complexes and runs}
\begin{defn}
Let $G=({\mathcal{X}},E)$ be a finite simple graph and let $t$ be an integer such that $t\geq 2$. If $x$ and $y$ are two vertices of $G$, a \textbf{path} (or path) of length $(t-1)$ from $x$ to $y$ is a sequence of distinct vertices $x=x_{i_1},\dots, x_{i_{t}}=y$ of $G$ such that $\{x_{i_{j}},x_{i_{j+1}}\}\in E$ for all $j= 1,2,\dots,t-1$. We define the {\bf path ideal} of $G$, denoted by $I_t(G)$ to be the ideal of $K[x_1,\dots,x_n]$ generated by the monomials of the form $x_{i_1}x_{i_2}\dots x_{i_t}$ where $x_{i_1},x_{i_2},\dots,x_{i_t}$ is a path in $G$. The facet complex of $I_t(G)$, denoted by $\Delta_t(G)$, is called the {\bf path complex} of the graph $G$. \end{defn} Two special cases that we will be considering in this paper are when $G$ is a {\bf cycle} $C_n$, or a {\bf path graph} (or {\bf path}) $L_n$ on vertices $\{x_1,\dots,x_n\}$. $$C_n=\langle x_1x_2,\ldots,x_{n-1}x_n,x_nx_1\rangle \mbox{\ and \ } L_n=\langle x_1x_2,\ldots,x_{n-1}x_n\rangle.$$ \begin{example} Consider the cycle $C_5$ with vertex set ${\mathcal{X}}=\{x_1,\dots,x_5\}$ Then $$I_4(C_5)= (x_1x_2x_3x_4, \\ x_2x_3x_4x_5, x_3x_4x_5x_1, x_4x_5x_1x_2, x_5x_1x_2x_3).$$ \end{example}
\begin{notation} Let $i$ and $n$ be two positive integers. For (a set of) labeled objects we use the notation $\mod n$ to denote $$x_i \mod n \ =\{x_j \ | \ 1\leq j \leq n, i\equiv j \mod n\}$$ and $$\{x_{u_1},x_{u_2},\dots,x_{u_t}\} \mod n\ =\{x_{u_j}\mod n \ | \ j=1,2,\dots,n\}.$$ \end{notation} Let $C_n$ be a cycle on vertex set ${\mathcal{X}}=\{x_1,\dots,x_n\}$ and $t< n$. The {\bf standard labeling} of the facets of $\Delta_t(C_n)$ is as follows.
We let $\Delta_t(C_n)=\langle F_1,\dots,F_n\rangle$ where $F_i=\{x_i,x_{i+1},\dots,x_{i+t-1}\}\mod n$ for all $1\leq i \leq n$. Since for each $1\leq i \leq n$ we have $$\begin{array}{llll}
F_{i+1}\setminus F_{i}=\{x_{t+i}\}&\mbox{and}& F_{i}\setminus
F_{i+1}=\{x_{i}\}&\mod n, \end{array}$$
it follows that $\begin{array}{lllll} \left|F_i\setminus
F_{i+1}\right|=1&\mbox{and}& \left|F_{i+1}\setminus
F_{i}\right|=1&\mod n&\mbox{for all $1\leq i\leq
n-1$}. \end{array}$
\begin{defn}\label{defn:defn3.5}
Given an integer $t$, we define a {\bf run} to be the path complex of a path graph. A run which has $p$ facets is called a \textbf{run of length $p$} and corresponds to $\Delta_t(L_{p+t-1})$. Therefore, a run of length $p$ has $p+t-1$ vertices. \end{defn}
\begin{example} Consider the cycle $C_7$ on vertex set ${\mathcal{X}}=\{x_1,\dots x_7\}$ and the simplicial complex $\Delta_4(C_7)$. The following induced subcollections are two runs in $\Delta_4(C_7)$ $$\begin{array}{lll} \Delta_1&=&\langle
\{x_1,x_2,x_3,x_4\},\{x_2,x_3,x_4,x_5\}\rangle\\ \Delta_2&=&\langle
\{x_1,x_2,x_6,x_7\},\{x_1,x_2,x_3,x_7\},\{x_1,x_2,x_3,x_4\}\rangle. \end{array}$$ \end{example} In~\cite{AF2012} we show that every induced subcollection of the path complex of a cycle is a disjoint union of runs \cite[Proposition~3.6]{AF2012}, and that two induced subcollections of the path complex of a cycle composed of the same number of runs of the same lengths are homeomorphic \cite[Lemma~3.2.9]{AThesis2015}. Then all the information we need to compute the homologies of induced subcollections of $\Delta_t(C_n)$ depends on the number and the lengths of the runs. \begin{defn}\label{defn:e(s_1)} For a fixed integer $t \geq 2$, let the pure $(t-1)$-dimensional simplicial complex $\Gamma=\langle F_1,\ldots,F_s\rangle$ be a disjoint union of runs of length $s_1,\ldots,s_r$. Then the sequence of positive integers $s_1,\ldots,s_r$ is called a \textbf{run sequence} on ${\mathcal{Y}}=\mbox{Vert}(\Gamma)$, and we use the notation $$E(s_1,\dots,s_r)=\Gamma^c_{\mathcal{Y}}=\langle(F_1)_{{{\mathcal{Y}}}}^{c},\dots, (F_s)_{{\mathcal{Y}}}^{c}\rangle.$$ \end{defn}
\section{Graded Betti numbers of path ideals}
We focus on Betti numbers of degree less than $n$, as those of degree $n$ were computed in~\cite{AF2012}. By~\cite[Theorem~2.8]{AF2012} we need to count induced subcollections.
\begin{defn}\label{d:eligible} Let $i$ and $j$ be positive integers. We call an induced subcollection $\Gamma$ of $\Delta_t(C_n)$ an {\bf $(i,j)$-eligible subcollection} of $\Delta_t(C_n)$ if $\Gamma$ is composed of disjoint runs of lengths
\begin{eqnarray} (t+1)p_1+1,\dots, (t+1)p_{\alpha}+1, (t+1)q_1+2,
\ldots, (t+1)q_{\beta}+2\label{eqn:length1}
\end{eqnarray} for nonnegative integers $\alpha, \beta,p_1,p_2,\dots,p_{\alpha},q_1,q_2,\dots,q_{\beta}$, which satisfy the following conditions $$\begin{array}{lll} j&=&(t+1)(P+Q)+t(\alpha+\beta)+\beta
\\ i&=&2(P+Q)+2\beta+\alpha,
\end{array}$$ where $P=\sum_{i=1}^{\alpha} p_i$ and $Q=\sum_{i=1}^{\beta} q_i$. \end{defn} Eligible subcollections count the graded Betti numbers. \begin{theorem}[~\cite{AF2012} Theorem~5.3]\label{lem:lem15} Let $I=I(\Lambda)$ be the facet ideal of an induced subcollection $\Lambda$ of $\Delta_t(C_n)$. Suppose that $i$ and $j$ are integers with $i\leq j<n$. Then the ${\mathbb N}$-graded Betti number $\beta_{i,j}(R/I)$ is the number of $(i,j)$-eligible subcollections of $\Lambda$. \end{theorem}
The following corollary is a special case of Theorem~\ref{lem:lem15}. \begin{col}\label{col:col16} Let $I=I(\Lambda)$ be the facet ideal of an induced subcollection $\Lambda$ of $\Delta_t(C_n)$. Then for every $i$, $\beta_{i,ti}(R/I)$, is the number of induced subcollections of $\Lambda$ which are composed of $i$ runs of length 1. \end{col} \begin{proof} From Theorem~\ref{lem:lem15} we have $\beta_{i,ti}(R/I)$ is the number of $(i,ti)$-eligible subcollections of $\Lambda$. With
notation as in Definition~\ref{d:eligible} we have
$$\left\{\begin{array}{ll}
ti=(t+1)(P+Q)+t(\alpha+\beta)+\beta&\\
i=2(P+Q)+(\alpha+\beta)+\beta&\Rightarrow
ti=2t(P+Q)+t(\alpha+\beta)+t\beta\\
\end{array} \right.$$ Putting the two equations for $ti$ together, we conclude that $(t-1)(P+Q+\beta )=0$. But $\beta$, $P$, $Q\geq 0$ and $t\geq 2$, so we must have $$\beta=P=Q=0 \Rightarrow p_1=p_2=\dots=p_{\alpha}=0.$$ So $\alpha=i$ and $\Gamma$ is composed of $i$ runs of length one.
\end{proof} Theorem~\ref{lem:lem15} holds in particular for $\Lambda=\Delta_t(L_{m})$ and $\Lambda=\Delta_t(C_{n})$ for any integers
$m,n$. Our next statement is in a sense a converse to Theorem~\ref{lem:lem15}. \begin{prop}\label{prop:tree}Let $t$ and $n$ be integers such that $2 \leq t \leq n$ and $I=I(\Lambda)$ be the facet ideal of $\Lambda$
where $\Lambda$ is an induced subcollection of $\Delta_t(C_n)$. Then for each $i,j\in\mathbb{N}$, if $i <n$ and $\beta_{i,j}(R/I)\neq0$, there exist nonnegative integers $\ell,d$ such that $i\leq d<n$ and $$\left\{\begin{array}{lll} i&=&\ell+d\\ j&=&t\ell+d \end{array}\right.$$ \end{prop} \begin{proof} From Theorem~\ref{lem:lem15} we know $\beta_{i,j}$ is equal to the number of $(i,j)$-eligible subcollections of $\Lambda$,
where with notation as in Definition~\ref{d:eligible} we have
$$\left\{\begin{array}{lcr}
j=(t+1)(P+Q)+t(\alpha+\beta)+\beta
\\ i=2(P+Q)+(\alpha+\beta)+\beta.
\end{array}\right.$$
It follows that
\begin{eqnarray}
j-i=(t-1)(P+Q+\alpha+\beta)&\mbox{and}&
ti-j=(t-1)(P+Q+\beta)\label{eqn:ell}.
\end{eqnarray} We now show that there exist positive integers $\ell,d$ such that
$i=\ell+d$ and $j=t\ell+d$. $$\begin{array}{lll}
\left\{\begin{array}{lcr} i=\ell+d \\ j=t\ell+d
\end{array}\right. \Rightarrow
\begin{array}{lll} \ell= \displaystyle \frac{j-i}{t-1}&
\mbox{and}& d=\displaystyle \frac{ti-j}{t-1}
\end{array}.
\end{array}$$ From (\ref{eqn:ell}) we can see that $i$ and $j$ as described above
are nonnegative integers.
\end{proof} Corollary~\ref{col:col16} tells us that to compute Betti numbers $\beta_{i,ti}$ of induced subcollections of $\Delta_t(C_n)$ we need to count the number
of its induced subcollections which consist of disjoint runs of length one. The next few pages are dedicated to counting
such subcollections. We use some combinatorial methods to generalize a helpful formula which can be found in Stanley's
book~\cite[page~73]{R.P.Stanley1994}.
\begin{lem}\label{lem:lem5.6} Consider a collection of $n$ points arranged on a line. The number of ways to color $k$ points, when there are at least $t$ uncolored points on the line between each colored point is $${{n-(k-1)t}\choose{k}}.$$
\end{lem} \begin{proof} First label the points by $1,2,\dots,n$ from left to right, and let $a_1<a_2<\dots<a_k$ be the colored points. For $1\leq i\leq k-1$, we define $x_i$ to be the
number of points, including $a_{i}$, which are between $a_i$
and $a_{i+1}$, and $x_0$ to be the number of points which
exist before $a_1$, and $x_k$ the number of points,
including $a_k$, which are after $a_k$.
$$\begin{array}{llllll}
\overbrace{\cdots}^{x_0} &\overbrace{\bullet\ \cdots}^{x_1} &
\overbrace{\bullet\ \cdots}^{x_2} &
{\bullet}\ \cdots &\overbrace{\bullet\ \cdots}^{x_{k-1}} &
\overbrace{\bullet\ \cdots}^{x_{k}}\\
1&a_1&a_2&a_3&a_{k-1}&a_k\ \ n\\
\end{array}$$
If we consider the sequence $x_0,x_1,\dots,x_k$ it is not
difficult to see that there is a one to one correspondence
between the positive integer solutions of the following
equation and the ways of coloring $k$ points of $n$ points on
a line with at least $t$ uncolored points between each two
colored points.
\begin{eqnarray*} x_0+x_1+\dots+x_k=n&\mbox{$x_0\geq 0$, $x_i > t$,
for $1\leq i \leq k-1 $, and $x_k\geq 1$}.
\end{eqnarray*}
So we only need to find the number of positive integer
solutions of this equation. Consider the following
equation $$(x_0+1)+(x_1-t)+\dots+(x_{k-1}-t)+x_k=n-(k-1)t+1$$
where $x_0+1\geq 1$, $x_i-t \geq 1$, for $i=0\dots,k-1$ and
$x_k\geq 1$. The number of positive integer solution of this
equation is (see for example~\cite {R.P.Grimaldi2003} page
29) $${{n-(k-1)t}\choose{k}}.$$
\end{proof}
\begin{col}\label{col:mycol} Let $C_n$ be a graph cycle and with the standard labeling let $\Gamma$ be a proper subcollection of $\Delta_t(C_n)$ with $k$ facets $F_a, \ldots,F_{a+k-1} \mod n$. The number of induced subcollections of $\Gamma$ which are composed of $m$ runs of length one is $${k-(m-1)t\choose m}.$$ \end{col}
\begin{proof} To compute the number of induced
subcollections of $\Gamma$ which are composed of $m$ runs of
length one, it is enough to consider the facets $F_a,\ldots,F_{a+k-1}$ as points arranged on a line and compute the
number of ways which we can color $m$ points of these $k$
arranged points with at least $t$ uncolored points between each
two consecutive colored points. Therefore, by
Lemma~\ref{lem:lem5.6} we have the number of induced
subcollections of $\Gamma$ which are composed of $m$ runs of
length one is ${k-(m-1)t\choose m}.$
\end{proof}
\begin{prop}\label{lem:lem1} Let $C_n$ be a graph cycle with vertex
set ${\mathcal{X}}=\{x_1,\dots,x_n\}$. The number of induced subcollections of
$\Delta_t(C_n)$ which are composed of $m$ runs of length one
is $$\frac{n}{n-mt}{n-mt \choose m}.$$ \end{prop}
\begin{proof} Recall that $\Delta_t(C_n)=\langle F_1,\dots,F_n\rangle$
with standard labeling. First we compute the number of induced
subcollections of $\Delta_t(C_{n})$ which consist of $m$ runs of
length one and do not contain the vertex $x_n$. There are $t$
facets of $\Delta_t(C_n)$ which contain $x_n$, the remaining facets
are $F_1,\dots,F_{n-t}$, and so by Corollary~\ref{col:mycol} the
number we are looking for is
\begin{eqnarray} {n-t-(m-1)t \choose m}
= {n-mt \choose m}.\label{eqn:number}
\end{eqnarray}
Now we are going to compute the number of induced subcollections
$\Gamma$ which consist of $m$ runs of length one and include
$x_n$. We have $t$ facets which contain $x_n$, they are
$F_{n-t+1}\dots,F_n$. Each such $\Gamma$ will contain one $F_i\in
\{F_{n-t+1}\dots,F_n\}$ as the run containing $x_n$, and $m-1$
other runs of length one which have to be chosen so that they are
disjoint from $F_i$. So we are looking for $m-1$ runs of length
one in the subcollection $\Gamma'=\langle
F_{i+t},\ldots,F_{i-t}\rangle \mod n$. The subcollection
$\Gamma'$ has $n-2t-1$ facets, so by Corollary~\ref{col:mycol} it
has $${n-2t-1-(m-2)t \choose m-1}={n-mt-1 \choose m-1}$$ induced
subcollections that consist of runs of length one.
Putting this together with the number of ways to choose $F_i$ and
with (\ref{eqn:number}) we conclude that the number of induced
subcollections of $\Delta_t(C_n)$ which are composed of $m$ runs of
length one is $$t{n-mt-1 \choose m-1}+ {n-mt \choose m}=
\frac{n}{n-mt} {n-mt \choose m}.$$ \end{proof}
We apply these counting facts to find Betti numbers in specific degrees.
\begin{col}\label{col:col4.2} Let $n\geq 2$ and $t$ be an integer such that $2\leq t \leq n$. Then for the cycle $C_n$ we
have
$$\beta_{i,ti}(R/I_t(C_n))=\frac{n}{n-ti}{ n-ti \choose i }.$$ \end{col}
\begin{proof} From Corollary~\ref{col:col16} we have $\beta_{i,ti}(R/I_t(C_n))$
in each of the three cases is the number of induced
subcollections of $\Delta_t(C_n)$ which are composed of $i$ runs of
length 1. The formula now follows from Proposition~\ref{lem:lem1}.
\end{proof}
The following Lemma is the core of our counting later on in this section. \begin{lem}\label{lem:addingfacet}
Let $\Delta_t(C_n)=\langle F_1,F_2,\dots, F_n \rangle$, $2\leq t \leq n$,
be the standard labeling of the path complex of a cycle $C_n$ on
vertex set ${\mathcal{X}}=\{x_1,\ldots,x_n\}$. Let $i$ be a positive integer
and $\Gamma=\langle F_{c_1},F_{c_2},\dots,F_{c_i}\rangle$ be an
induced subcollection of $\Delta_{t}(C_n)$ consisting of $i$ runs of
length 1, with $1\leq c_1<c_2<\dots<c_i\leq n$. Suppose that $\Sigma$ is
the induced subcollection on $\mbox{Vert}(\Gamma)\cup \{x_{c_u+t}\}$ for some $1\leq u \leq i$. Then
$$|\Sigma|= \left\{ \begin{array}{lll}
|\Gamma|+ t & u <i \ \mbox{ and}& c_{u+1}=c_u+t+1\\
|\Gamma|+ 1 & u =i \ \mbox{ or}& c_{u+1}>c_u+t+1 \end{array}\right.$$ \end{lem}
\begin{proof} Since $\Gamma$ consists of runs of length one and
each $F_{c_u}=\{x_{c_u},x_{c_u+1},\dots,x_{c_u+t-1}\}$ we must
have $c_{u+1}>c_u+t \mod n$ for $u\in \{1,2,\dots,i-1\}$. There
are two ways that $x_{c_u+t}$ could add facets to $\Gamma$ to
obtain $\Sigma$. \begin{enumerate}
\item If $c_{u+1}=c_u+t+1$ then
$F_{c_u},F_{c_u+1},\dots,F_{c_u+t+1}=F_{c_{u+1}}\in\Sigma$ or in
other words, we have added $t$ new facets to $\Gamma$.
\item If $c_{u+1}>c_u+t+1$ or $u=i$ then $F_{c_u+1}\in \Sigma$,
and therefore one new facet is added to $\Gamma$.
\end{enumerate}
\end{proof}
The following propositions, which generalize Lemma~7.4.22 in~\cite{Jacques2004}, will help us to compute the remaining Betti numbers.
\begin{prop}\label{lem:lem4.3} Let $\Delta_t(C_n)=\langle F_1,F_2,\dots,
F_n \rangle$, $2\leq t \leq n$, be the standard labeling of the path
complex of a cycle $C_n$ on vertex set ${\mathcal{X}}=\{x_1,\ldots,x_n\}$. Also
let $i$, $j$ be positive integers such that $j\leq i$ and
$\Gamma=\langle F_{c_1},F_{c_2},\dots,F_{c_i}\rangle$ be an induced
subcollection of $\Delta_{t}(C_n)$ consisting of $i$ runs of length 1,
with $1\leq c_1<c_2<\dots<c_i\leq n$. Suppose that $W=\mbox{Vert}(\Gamma)\cup A \subsetneq {\mathcal{X}}$ for
some subset $A$ of $\{x_{c_1+t},\dots,x_{c_i+t}\} \mod n$ with
$|A|=j$. Then the induced subcollection $\Sigma$ of $\Delta_t(C_n)$ on
$W$ is an $(i+j,ti+j)$-eligible subcollection. \end{prop}
\begin{proof} Since $\Gamma$ consists of runs of length one and
each $F_{c_u}=\{x_{c_u},x_{c_u+1},\dots,x_{c_u+t-1}\}$ we must
have $c_{u+1}>c_u+t \mod n$ for $u\in \{1,2,\dots,i-1\}$.
The runs (or connected components) of $\Sigma$ are of the form
$\Sigma^{\prime}=\Sigma_{U}$ where $U\subseteq W$, and can have
one of the following possible forms.
\renewcommand{\roman{enumi}}{\alph{enumi}}
\renewcommand{\Roman{enumii}}{\Roman{enumii}}
\begin {enumerate}
\item For some $a\leq i$: $$U=F_{c_a},$$ and therefore
$\Sigma^{\prime}=\langle F_{c_a}\rangle$ is a run of length 1.
\item For some $a\leq i$: $$U=F_{c_a}\cup\{x_{{c_a}+t}\},$$ and
therefore $c_{a+1}> c_a+t+1$, so from Lemma~\ref{lem:addingfacet} we have
$\Sigma^{\prime}=\langle F_{c_a},F_{c_a+1}\rangle$ is a run of
length 2.
\item For some $a\leq i$: $$U=F_{c_a}\cup
F_{c_{a+1}}\cup\dots\cup
F_{c_{a+r}}\cup\{x_{c_a+t},x_{c_{a+1}+t},\dots,x_{c_{a+r-1}+t}\}\hspace{.1
in} \mod n$$ and $F_{c_{a+j}}=F_{{c_a}+j(t+1)}$ for
$j=0,1,\dots,r$ and $r\geq 1$. Then from
Lemma~\ref{lem:addingfacet} above we know $\Sigma^{\prime}$ is
a run of length $r+1+tr=(t+1)r+1$.
\item For some $a\leq i$: $$U=F_{c_a}\cup
F_{c_{a+1}}\cup\dots\cup
F_{c_{a+r}}\cup\{x_{c_a+t},x_{c_{a+1}+t},\dots,x_{c_{a+r}+t}\} \hspace{.1
in} \mod n$$ and $F_{c_{a+j}}=F_{{c_a}+j(t+1)}$ for
$j=0,1,\dots,r$ and $r\geq 1$, and $c_{a+r+1}>c_{a+r}+t+1$ or
${a+r}=i$. Then from Lemma~\ref{lem:addingfacet} we have
$\Sigma^{\prime}$ is a run of length $r+1+tr+1=(t+1)r+2$.
\end{enumerate}
So we have shown that $\Sigma$ consists of runs of length $1$ and
$2$ $\mod t+1$.
Suppose that the runs in $\Sigma$ are of the form
described in (\ref{eqn:length1}). By
Definition~\ref{defn:defn3.5} we have
$$\begin{array}{ll} |\mbox{Vert}
(\Sigma)|&=(t+1)p_1+t+\dots+(t+1)p_\alpha+t+(t+1)q_1+t+1+\dots+(t+1)
q_{\beta}+t+1~
\\
&=(t+1)P+t\alpha+(t+1)Q+t\beta+\beta ~
\\
&=(t+1)(P+Q)+t(\alpha+\beta)+\beta.
\end{array}$$
On the other hand by the definition of $\Sigma$ we know that,
$\Sigma$ has $ti+j$ vertices and
therefore $$ti+j=(t+1)(P+Q)+t(\alpha+\beta)+\beta.$$ It remains to
show that $i+j=2(P+Q)+(\alpha+\beta)+\beta$.
Note that if $j=0$ then $\beta=P=Q=0$ and hence
\begin{eqnarray}
j=0&\Longrightarrow& P+Q+\beta=0\label{eqn:neweqn}.
\end{eqnarray}
Moreover each vertex $x_{{c_v}+t}\in A$ either increases the length
of a run in $\Gamma$ by one and hence increases $\beta$ (the number
of runs of length 2 in $\Gamma$) by one, or increases the length of a run by $t+1$, in which case $P+Q$ increases by 1. We can conclude that if we add $j$ vertices to $\Gamma$, $P+Q+\beta$ increases by
$j$. From this and (\ref{eqn:neweqn}) we have $j=P+Q+\beta$.
Now we solve the following system
$$\left\{ \begin{array}{rllll}
ti+j&=&(t+1)(P+Q)+t(\alpha+\beta)+\beta &
\Longrightarrow& ti=t(P+Q)+t(\alpha+\beta)\\
j&=&P+Q+\beta&\Longrightarrow& i=P+Q+\alpha+\beta
\end{array} \right.$$
$$ \Longrightarrow \left\{\begin{array}{lll}
i&=&P+Q+\alpha+\beta \\
j&=&P+Q+\beta
\end{array} \right.
\Longrightarrow i+j=2(P+Q)+(\alpha+\beta)+\beta. $$
\end{proof} \begin{prop}\label{prop:newprop} Let $C_n$ be a cycle, $2\leq t \leq n$, and $i$ and $j$ be positive integers. Suppose that $\Sigma$ is an $(i+j,ti+j)$-eligible subcollection of $\Delta_t(C_n)$, $2\leq t \leq n$. Then with notation as in Definition~\ref{d:eligible}, there exists a unique induced subcollection $\Gamma$ of $\Delta_t(C_n)$ of the form
$\langle F_{c_1},F_{c_2},\dots,F_{c_i}\rangle$ with $1\leq c_1<c_2<\dots<c_i\leq n$ consisting of $i$ runs of length $1$, and a subset $A$ of $\{x_{c_1+t},\dots,x_{c_i+t}\}$ $\mod \ n$, with $|A|=j$ such that $\Sigma=\Delta_t(C_n)_{W}$ where $W=\mbox{Vert}(\Gamma)\cup A.$
Moreover if ${\mathcal{R}}=\langle F_{h},F_{h+1},\dots,F_{h+m}\rangle \mod n$ is a run in $\Sigma$ with $|{\mathcal{R}}|=2 \mod (t+1)$, then $F_{h+m} \notin \Gamma \mod n$. \end{prop}
\begin{proof} Suppose that $\Sigma$ consists of runs $R_1^{\prime},
R_2^{\prime},\ldots,R_{\alpha+\beta}^{\prime}$
where for $k=1,2,\ldots,\alpha+\beta$
$$\begin{array}{ll} R_{k}^{\prime}=\langle
F_{h_k},F_{h_k+1},\dots,F_{h_k+m_k-1}\rangle & \mod n
\\
\mbox{Vert}(R_{k}^{\prime})=\{x_{h_k},x_{h_k+1},\dots,x_{h_k+m_k+t-2}\} &
\mod n
\\
h_{k+1}\geq t+h_k+m_k & \mod n
\end{array}$$
and
\begin{eqnarray}
m_k=\left\{\begin{array}{lll}
(t+1)p_k+1&\mbox{for}& k=1,2,\dots,\alpha\\
(t+1)q_{k-\alpha}+2&\mbox{for}& k=\alpha+1,\alpha+2,\dots,\alpha+\beta.
\end{array}
\right. \label{eqn:mk}
\end{eqnarray}
For each $k$, we remove the following vertices from $\mbox{Vert}
(R_{k}^{\prime})$
\begin{eqnarray}
\begin{array}{lll}
x_{h_k+t},x_{h_k+2t+1},\dots,x_{h_k+p_k t+(p_k-1)}&\mod n&
\mbox{ if } 1 \leq k \leq \alpha
\mbox{ and } p_k\neq0 \\
x_{h_k+t},x_{h_k+2t+1},\dots,x_{h_k+(q_{k-\alpha}+1) t+q_{k-\alpha}}& \mod n&
\mbox{ if } \alpha+1\leq k \leq \alpha+\beta.
\end{array}\label{eqn:deleted}
\end{eqnarray}
Let $\Gamma=\langle R_1,R_2,\dots R_{\alpha+\beta} \rangle$ be the induced subcollection on the remaining vertices of $\Sigma$, where
\begin{eqnarray}
R_k=\left\{
\begin{array}{lll}
\langle F_{h_k},F_{h_k+t+1},\dots,F_{h_k+(t+1)p_k}\rangle&
\mod n &\mbox{for } 1 \leq k \leq \alpha\\
\langle F_{h_k},F_{h_k+t+1},\dots,F_{h_k+(t+1)q_{k-\alpha}}\rangle&
\mod n &\mbox{for } \alpha+1\leq k \leq \alpha+\beta.
\end{array}\right.\label{eqn:mohem}
\end{eqnarray}
In other words,$\mod n$, $\Gamma$ has facets
$$F_{h_1},F_{h_1+t+1},\dots,F_{h_1+(t+1)p_1},F_{h_2},F_{h_2+t+1},
\dots,F_{h_2+(t+1)p_2},\dots,F_{h_{\alpha+\beta}},\dots,F_{h_{\alpha+\beta}+
(t+1)q_{\beta}}. $$ It is clear that each $R_k$ consists of
runs of length one. Since $\Gamma$ is a subcollection of
$\Sigma$, no runs of $R_k$ and $R_{k^{\prime}}$ are connected to
one another if $k\neq k^{\prime}$, and hence we can conclude that
$\Gamma$ is an induced subcollection of $\Delta_t(C_n)$ which is
composed of runs of length one.
From (\ref{eqn:mohem}) we have the number of runs of length 1 in
$\Gamma$ (or the number of facets of $\Gamma$) is equal
to $$(p_1+1)+(p_2+1)+\dots+(p_{\alpha}+1)+(q_1+1)+\dots+(q_{\beta}+1)=
P+Q+\alpha+\beta=i.$$ Therefore, $\Gamma$ is an induced
subcollection of $\Delta_t(C_n)$ which is composed of $i$ runs of
length 1. We relabel the facets of $\Gamma$ as $\Gamma=\langle
F_{c_1},\dots,F_{c_i}\rangle$.
Now consider the following subset of
$\{x_{c_1+t},\dots,x_{c_i+t}\}$ as $A$
$$\bigcup_{k=1,p_k\neq
0}^{\alpha}\{x_{h_k+t},x_{h_k+2t+1},\dots,x_{h_k+p_k
t+(p_k-1)}\} \cup
\bigcup_{k={\alpha+1}}^{\alpha+\beta}\{x_{h_k+t},x_{h_k+2t+1},
\dots,x_{h_k+(q_{k-\alpha}+1)t+q_{k-\alpha}}\}$$ by
(\ref{eqn:deleted}) we
have: $$|A|=(p_1+p_2+\dots+p_{\alpha})+(q_1+1\dots+q_{\beta}+1)=P+Q+\beta=j.$$
Then if we set $$W=(\bigcup_{h=1}^{i}F_{c_h})\cup A$$ we clearly
have $\Sigma=(\Delta_t(C_n))_{W}$.
This proves the existence of $\Gamma$. We now prove its
uniqueness. Let $\Lambda=\langle
F_{s_1},F_{s_2},\dots,F_{s_i}\rangle$ be an induced subcollection
of $\Delta_t(C_n)$ which is composed of $i$ runs of length 1 such that
$1\leq s_1<s_2<\dots<s_i\leq n$. Also let $B$ be a $j$- subset of
the set
$\begin{array}{lll}\{x_{s_1+t},x_{s_2+t},\dots,x_{s_i+t}\}&\mod
n\end{array}$ such that
\begin{equation}
\Sigma=({\Delta_t(C_n)})_{\mbox{Vert} (\Lambda)\cup B}.\label{eqn:sum2}
\end{equation}
Suppose that $\Lambda=\langle S_1,S_2,\dots,S_{\alpha+\beta}\rangle$,
such that for $k=1,2,\dots,\alpha+\beta$, $S_k$ is an induced
subcollection of $R_k^{\prime}$ which consists of $y_k$ runs of
length one. By (\ref{eqn:sum2}) we have $y_k\neq0$ for all
$k$. Now we prove the following claims for each
$k\in\{1,2,\dots,\alpha+\beta\}$.
\renewcommand{\roman{enumi}}{\alph{enumi}}
\begin{enumerate}
\item \emph{$F_{h_k}\in \Lambda$}.
Suppose that $1\leq k \leq \alpha+\beta$. If $p_k=0$ we are clearly done, so consider the case $p_k\neq0$.
Assume that $F_{h_k}\notin \Lambda$. Since $F_{h_k}$ is the only facet
of $\Sigma$ which contains $x_{h_k}$ we can conclude that
$x_{h_k}\notin \mbox{Vert} (\Lambda)$. From (\ref{eqn:sum2}), it follows
that $x_{h_k}\in \{x_{s_1+t},x_{s_2+t},\dots,x_{s_i+t}\}$, so
\begin{eqnarray} x_{h_k}=x_{s_a+t} \mod n \mbox{ for some $a$}.\label{eqn:s-a}
\end{eqnarray}
On the other hand we know
\begin{eqnarray*}
F_{s_a}=\{x_{s_a},x_{s_a+1},\dots,x_{s_a+t-1}\} &\mod \ n\\
F_{s_a+1}=\{x_{s_a+1},x_{s_a+2},\dots,x_{s_a+t}\} &\mod \ n.
\end{eqnarray*}
Since $R_k^{\prime}$ is an induced connected component of
$\Sigma$, by (\ref{eqn:s-a}) we can conclude that $x_{h_k}\in
F_{s_a+1}$ and $F_{s_a},F_{s_a+1}\in R_k^{\prime}$. However, we
know $F_{h_k}$ is the only facet of $R_k^{\prime}$ which contains
$x_{h_k}$ and so $F_{s_a+1}=F_{h_k}$ and then $\begin{array}{ll}
s_a+1=h_k& \mod n\end{array}$. This and (\ref{eqn:s-a}) imply
that $t=1 \mod n$, which contradicts our assumption $2\leq t
\leq n$.
$F_{u+t+1}\in R_k^{\prime}$, then $F_{u+t+1}\in S_k$.
Assume that $F_{u+t+1}\notin S_k$ and $F_{u+t+1}\in
R_k^{\prime}$. Let $$r_0=\min\{r: r>u , F_r\in S_k \mod n\}.$$
Since $S_k$ consists of runs of length one we can conclude
that $r_0\geq u+t+1$. Since $r_0\neq u+t+1$ we have $r_0\geq u+t+2$. But
then $$x_{u+t+1}\notin \mbox{Vert} (\Lambda)\cup
\{x_{s_1+t},x_{s_2+t},\dots,x_{s_i+t}\}$$ and therefore $x_{u+t+1}\notin
\mbox{Vert} (\Sigma)$ which is a contradiction. \end{enumerate}
Now for each $k$, by (a) we have $F_{h_k}\in \Lambda$ and from
repeated applications of (b) we find that
$$F_{h_k+f(t+1)}\in S_k \hspace{.1 in}\mbox{ for }
f=\left \{ \begin{array}{ll}
1,2,\dots,p_k & 1\leq k \leq \alpha\\
1,2,\dots,q_{k-\alpha}& \alpha+1\leq k \leq \alpha+\beta.
\end{array}\right.$$
So $R_k \subseteq S_k$. On the other hand $S_k$ consists of runs of
length one, so no other facet of $R'_k$ can be added to it, and
therefore $S_k=R_k$ for all $k$. We conclude that $\Lambda=\Gamma$
and we are therefore done.
The last claim of the proposition is also apparent from this proof.
\end{proof}
We are now ready to compute the remaining Betti numbers. \begin{theorem}\label{theorem:theorem4}
Let $n$, $i$, $j$ and $t$ be integers such that $n\geq 2$, $2\leq t
\leq n$, and $ti+j < n$. Then \[ \beta_{i+j,ti+j}(R/I_t(C_n))= \frac{n}{n-it}{i \choose j}{ n-it \choose
i } \]
\end{theorem}
\begin{proof} If $I=I_t(C_n)$, from Theorem~\ref{lem:lem15}, $\beta_{i+j,ti+j}(R/I)$ is the
number of $(i+j,ti+j)$-eligible subcollections of $\Delta_t(C_n)$.
Suppose that ${\mathcal{R}}_{(i)}$ denotes the set of
all induced subcollections of $\Delta_t(C_n)$ which are composed of
$i$ runs of length one. By propositions~\ref{lem:lem4.3}
and~\ref{prop:newprop} there exists a one to one correspondence
between the set of all $(i+j,ti+j)$-eligible subcollections of
$\Delta_t(C_n)$ and the set $${\mathcal{R}}_{(i)}\times {\left[i\right]\choose
j}$$ where ${\left[i\right]\choose j}$ is the set of all
$j$-subsets of a set with $i$ elements.
By Corollary~\ref{col:col16} we have
$|{\mathcal{R}}_{(i)}|=\beta_{i,ti}$ and since
$|{\left[i\right]\choose j}|={i \choose j}$ and so we apply
Corollary~\ref{col:col4.2} to observe that
\[ \beta_{i+j,ti+j}(R/I_t(C_n))={i \choose j}\beta_{i,ti}(R/I_t(C_n))= \frac{n}{n-ti}{i \choose j}{ n-ti
\choose i }.
\]
\end{proof}
Finally, we put together Theorem~\ref{lem:lem15}, Proposition~\ref{prop:tree} and~\cite[Theorem~5.1 and
Theorem~2.8]{AF2012}. Note that the case $t=2$ is the case of graphs which appears in~\cite{Jacques2004}. Also note that $\beta_{i,j}(R/I_t(C_n))=0$ for all $i\geq 1$ and $j>ti$, see for example see for example~\cite[3.3.4]{Jacques2004}.
\begin{theorem}[{\bf Betti numbers of path ideals of cycles}]\label{t:maintheorem} Let $n$, $t$, $p$ and $d$ be integers such that $n\geq 2$, $2\leq t \leq n$, $n=(t+1)p+d$, where $p\geq 0$, $0\leq d
\leq t$. Then the $\mathbb{N}$-graded Betti numbers of the path ideal of the
graph cycle $C_n$ are given by $$\beta_{i ,j}(R/I_t(C_n))= \left \{
\begin{array}{ll}
t & j=n,\ d= 0,\ \displaystyle i=2\left (\frac{n}{t+1}\right )\\
&\\
1 & j=n,\ d\neq 0, \ \displaystyle i=2\left (\frac{n-d}{t+1}\right )+1 \\
&\\ \displaystyle \frac{n}{{n-t\left(\frac{j-i}{t-1}\right)}}
{\frac{j-i}{t-1}\choose \frac{ti-j}{t-1}}
{n-t\left(\frac{j-i}{t-1}\right)\choose \frac{j-i}{t-1}} &
\left \{\begin{array}{l}
j< n, \ i\leq j \leq ti, \mbox{ and }\\ \\
\displaystyle 2p\geq \frac{2(j-i)}{t-1}\geq i
\end{array} \right.
\\ &\\
0&\mbox{otherwise.}
\end{array} \right.
$$ \end{theorem} \begin{proof}
We only need to make sure we have the correct conditions for the Betti numbers to be nonzero. When $j<n$, $\beta_{i,j}(R/I_t(C_n))\neq 0 \Longleftrightarrow$
$$\begin{array}{lll}
& \Longleftrightarrow \left\{
\begin{array}{l}
\displaystyle\frac{j-i}{t-1}\geq \frac{ti-j}{t-1}~
\\
\displaystyle n-\frac{t(j-i)}{t-1}\geq \frac{j-i}{t-1}
\end{array}\right.
& \\
&& \\
& \Longleftrightarrow \left\{
\begin{array}{l}
\displaystyle 2j\geq (t+1)i~
\\
\displaystyle n\geq \left(\frac{t+1}{t-1}\right)(j-i)
\end{array}\right.
&\\
&& \\
&\Longleftrightarrow \left\{
\begin{array}{l}
\displaystyle 2j\geq (t+1)i~
\\
\displaystyle (t+1)p+d\geq \left(\frac{t+1}{t-1}\right)(j-i)
\end{array}\right.
& \\
&&\\
&\Longleftrightarrow \left\{
\begin{array}{l}
\displaystyle 2j\geq (t+1)i~
\\
\displaystyle p+\frac{d}{t+1}\geq \frac{j-i}{t-1}
\end{array}\right.
&\\
&& \\
&\Longleftrightarrow
\displaystyle 2p\geq \frac{2(j-i)}{t-1}\geq i
& \mbox{ as } d<t+1 \\ \end{array}$$
\end{proof}
\begin{theorem}[{\bf Betti numbers of path ideals of lines }]\label{newtheorem} Let $n$, $t$, $p$ and $d$ be integers such that $n\geq 2$, $2\leq t \leq n$, $n=(t+1)p+d$, where $p\geq 0$, $0\leq d
\leq t$. Then \renewcommand{\roman{enumi}}{\roman{enumi}} the $\mathbb{N}$-graded Betti numbers of the path ideal of the
path graph $L_n$ are nonzero and equal to $$\beta_{i ,j}(R/I_t(L_n))= \displaystyle{\frac{j-i}{t-1}\choose \frac{ti-j}{t-1}} {n-t\left(\frac{j-i}{t-1}\right)\choose \frac{j-i}{t-1}} + {\frac{j-i}{t-1}-1\choose \frac{ti-j}{t-1}} {n-t\left(\frac{j-i}{t-1}\right)\choose \frac{j-i}{t-1}-1}$$ if and only if \begin{enumerate} \item $j\leq n$ and $i\leq j \leq ti$; \item If $d<t$ then $ \displaystyle p\geq \frac{j-i}{t-1} \geq i/2$ where both inequalities cannot be $=$ at the same time; \item If $d=t$ then $\displaystyle (p+1)\geq \frac{j-i}{t-1} \geq i/2$ where both inequalities cannot be $=$ at the same time. \end{enumerate} \end{theorem}
\begin{proof} We use induction on $n$. Suppose $n=2$
so $t=2$. Then we have $I_t(L_n)=(x_1x_2)$ and it is clear that the only nonzero Betti number of $R/I_t(L_n)$ is $\beta_{1,2}=1$. So the assertion is clear. So suppose $n>2$. To continue we will use the
following formula for each integer $N,R>1$
\[
\binom{N}{R}+\binom{N}{R-1}=\binom{N+1}{R}. \] From~\cite{R.Bouchat2010} we have the recursive formula $$\beta_{i,j}(R/I_t(L_n))= \beta_{i,j}(R/I_t(L_{n-1}))+\beta_{i-1,j-t}(R/I_t(L_{n-(t+1)}))+\beta_{i-2,j-t-1}(R/I_t(L_{n-(t+1)}))$$ which using the induction hypothesis leads to the following calculation for $\beta_{i,j}(R/I_t(L_n))$ $$\begin{array}{l} \displaystyle\binom{\frac{j-i}{t-1}}{\frac{ti-j}{t-1}}
\binom{n-1-t\left(\frac{j-i}{t-1}\right)}{\frac{j-i}{t-1}}
+ \binom{\frac{j-i}{t-1}-1}{\frac{ti-j}{t-1}} \binom{n-1-t\left(\frac{j-i}{t-1}\right)}{\frac{j-i}{t-1}-1} \\ \\ +\displaystyle\binom{\frac{j-t-i+1}{t-1}}{\frac{ti-t-j+t}{t-1}} \binom{n-(t+1)-t\left(\frac{j-t-i+1}{t-1}\right)}{\frac{j-t-i+1}{t-1}} +\displaystyle\binom{\frac{j-t-i+1}{t-1}-1}{\frac{ti-t-j+t}{t-1}} \binom{n-(t+1)-t\left(\frac{j-t-i+1}{t-1}\right)}{\frac{j-t-i+1}{t-1}-1}\\ \\ +\displaystyle\binom{\frac{j-t-1-i+2}{t-1}}{\frac{ti-2t-j+t+1}{t-1}} \binom{n-(t+1)-t\left(\frac{j-t-1-i+2}{t-1}\right)}{\frac{j-t-1-i+2}{t-1}} +\displaystyle\binom{\frac{j-t-1-i+2}{t-1}-1}{\frac{ti-2t-j+t+1}{t-1}} \binom{n-(t+1)-t\left(\frac{j-t-1-i+2}{t-1}\right)}{\frac{j-t-1-i+2}{t-1}-1}. \end{array}$$ For ease of writing, we set $A=\frac{j-i}{t-1}$ and $B=\frac{ti-j}{t-1}$, so we have that $$\begin{array}{l}
\beta_{i,j}(R/I_t(L_n))= \binom{A}{B} \binom{n-1-tA}{A} + \binom{A-1}{B} \binom{n-1-tA}{A-1} \\ \\ +\binom{A-1}{B}\binom{n-1-tA}{A-1} +\binom{A-2}{B} \binom{n-1-tA}{A-2} + \binom{A-1}{B-1} \binom{n-1-tA}{A-1}+\binom{A-2}{B-1} \binom{n-1-tA}{A-2} \\ \\ =\left[\binom{A-1}{B}+ \binom{A-1}{B-1} \right]\binom{n-1-tA}{A-1} +\left[\binom{A-2}{B}+ \binom{A-2}{B-1}\right]\binom{n-1-tA}{A-2} +\binom{A}{B}\binom{n-1-tA}{A} +\binom{A-1}{B}\binom{n-1-tA}{A-1} \\ \\ =\binom{A}{B}\binom{n-1-tA}{A-1}+ \binom{A-1}{B}\binom{n-1-tA}{A-2} +\binom{A}{B}\binom{n-1-tA}{A} +\binom{A-1}{B}\binom{n-1-tA}{A-1} \\ \\ =\binom{A}{B}\left[\binom{n-1-tA}{A-1} +\binom{n-1-tA}{A}\right] +\binom{A-1}{B}\left[\binom{n-1-tA}{A-2}+ \binom{n-1-tA}{A-1}\right]\\ \\ =\binom{A}{B}\binom{n-tA}{A} +\binom{A-1}{B}\binom{n-tA}{A-1}. \end{array}$$
Using the notation above, we see that $\beta_{i,j}(R/I_t(L_n))\neq 0$ if and only if $$[B\leq A \mbox{ and } A \leq n-tA ] \mbox{ or } [B\leq A-1 \mbox{ and } A-1 \leq n-tA ]$$ which is equivalent to saying that $$B\leq A \mbox{ and } A-1 \leq n-tA \mbox{ where both $\geq$ cannot be $=$ at the same time}.$$ In other words $\beta_{i,j}(R/I_t(L_n))\neq 0$ if and only if $$ \begin{array}{ll}
\displaystyle\frac{j-i}{t-1}\geq \frac{ti-j}{t-1} \mbox{ and }
\displaystyle n-\frac{t(j-i)}{t-1}\geq \frac{j-i}{t-1}-1
& \iff \\
\displaystyle 2j\geq (t+1)i \mbox{ and }
\displaystyle n+1\geq \left(\frac{t+1}{t-1}\right)(j-i)
& \iff \\
\displaystyle 2j\geq (t+1)i \mbox{ and }
\displaystyle (t+1)p+d+1\geq \left(\frac{t+1}{t-1}\right)(j-i)
& \iff \\
\displaystyle 2j\geq (t+1)i \mbox{ and }
\displaystyle p+\frac{d+1}{t+1}\geq \frac{j-i}{t-1} & \\
\end{array} $$ where in each line both $\geq$ cannot be $=$ at the same time. This is equivalent to $$ \left\{
\begin{array}{ll}
\displaystyle 2p\geq \frac{2(j-i)}{t-1}\geq i
& \mbox{ if } d<t \\
\displaystyle 2(p+1)\geq \frac{2(j-i)}{t-1}\geq i
& \mbox{ if } d=t
\end{array}\right.$$ (note that $\frac{j-i}{t-1}$ from Theorem~\ref{lem:lem15} and Definition~\ref{d:eligible} is an integer) where both $\geq$ cannot be $=$ at the same time in the second line.
\end{proof}
We can now easily derive the projective dimension and regularity of path ideals of paths, which were known before. The projective dimension of paths (Part i below) was computed in~\cite{He2010} using different methods. The case $t=2$ is the case of graphs which appears in \cite{Jacques2004}. Part ii of the following Corollary reproves \cite[Theorem~5.3]{R.Bouchat2010} which computes the Castelnuovo-Mumford regularity of path ideal of a path. The case of cycles was done in~\cite{AF2012}. \begin{col}[{\bf Projective dimension and regularity of path ideals of paths}]\label{c:pdr}
Let $n$, $t$, $p$ and $d$ be integers such that $n\geq 2$, $2\leq t
\leq n$, $n=(t+1)p+d$, where $p\geq 0$, $0\leq d \leq t$. Then
\renewcommand{\roman{enumi}}{\roman{enumi}} \begin{enumerate} \item The projective dimension of the path ideal of a path $L_n$ is given by $$pd(R/I_t(L_n))=\left\{\begin{array}{ll} 2p & d\neq t~
\\ 2p+1&d=t\\ \end{array}\right.$$
\item The regularity of the path ideal of a path $L_n$ is given by $$reg(R/I_t(L_n))=\left\{\begin{array}{ll} p(t-1) & d<t
\\ (p+1)(t-1) & d=t\\ \end{array}\right.$$ \end{enumerate} \end{col}
\begin{proof} \renewcommand{\roman{enumi}}{\roman{enumi}}
\begin{enumerate}
\item By using Theorem~\ref{t:maintheorem} we know that if $\beta_{i,j}(R/I_t(L_n)\neq 0$ then $i\leq 2p+1$ when $d=t$ and therefore $pd(R/I_t(L_n))\leq 2p+1$.
On the other hand by applying Theorem~\ref{t:maintheorem} we have
$$\beta_{2p+1,n}(R/I_t(L_n))= \displaystyle{p+1\choose p} {p\choose
p+1} + {p\choose p}{p\choose p}=1\neq 0.$$ Then we can conclude that $pd(R/I_t(L_n))=2p+1$. Now we suppose that $d\neq t$. From Theorem~\ref{newtheorem} we can see
that if $\beta_{i,j}(R/I_t(L_n))\neq 0$ then $2p\geq i$ and
therefore $pd(R/I_t(L_n))\leq 2p$. On the other hand, by applying
Theorem~\ref{newtheorem}, we can see that
\begin{equation*}
\beta_{2p,p(t+1)}(R/I_t(L_n))= \displaystyle{p\choose p}
{p+d\choose p} + {p-1\choose p}{p\choose p}={p+d\choose p}\neq 0.
\end{equation*}
Therefore $pd(R/I_t(L_n))\geq 2p$ and we have $pd(R/I_t(L_n))= 2p$.
\item By definition, the regularity of a module $M$ is $\max \{j-i
\ | \ \beta_{i,j}(M)\neq 0\}$. By Theorem~\ref{newtheorem}, we
know exactly when the graded Betti numbers of $R/I_t(L_n)$ are
nonzero, and the formula follows directly.
\end{enumerate}
\end{proof}
\end{document} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.